query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Example of entry point
def main(args=None): if args is None: args = sys.argv[1:] print('This is the main function of pycharm') """Read the args""" if len(args) > 0: for i in args: print(i) if args[0] == 'create_setup': remove = False if len(args) > 2: if args[2] == '-removeold': remove = True from pymake.tools.create_setup import create_setup create_setup(args[1], remove) else: print('No parameters passed')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def entry_point():", "def entry_point():", "def entry_point():", "def main():\n pass", "def main():\n return", "def main(self) -> None:\n pass", "def main() -> None:", "def main() -> None:", "def main() -> None:", "def main() -> None:", "def main(args=None):", "def main(args=None):", "def main():\n example()", "def main():\n example()", "def main():\n example()", "def main():\n example()", "def main(self):\r\n pass", "def main(self, **kwargs) -> None:\n ...", "def main() -> None:\n return", "def main(args):", "def main(args):", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():\n\tpass", "def entry_point():\n pass", "def main(self):", "def main(args=None):\n pass", "def main(cls):\n raise NotImplementedError", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n\n pass", "def main(ctx, verbose):\n return", "def main():\n Main()", "def entry_point():\n\n\n plac.call(main)", "def main(self):\n pass", "def main():\n pass", "def main():\n\n pass\n\n return None", "def main():\n\n pass\n\n return None", "def main():\n print(\"It works!!! ;-)\")\n ###TODO### do something with the various methods/functions of this file", "def main(self, params):\n pass", "def main(args=None):\n app()\n return 0", "def main():\n print(\"Call your main application code here\")", "def main():\n print(\"Call your main application code here\")", "def main():\n print(\"Call your main application code here\")", "def run():\n main()", "def entry_point() -> None:\n args = parse_args()\n print(hello(args.name))", "def main():\n print(\"is Running!\")", "def main():\n \n ##\n ## Don't forget to add comments to clarify what the code is doing\n ##\n print()\n # example class instantiation\n my_instance = My_class()\n print('This is info about my_instance:', my_instance)\n \n # example function call\n my_function1()\n print('This is what my_function1 returns:', my_function1())\n \n # example function call\n print('This is what my_function2 returns:', my_function2())\n \n ## More code/comments here, as needed", "def main():\n ...", "def train_entry_point():", "def main():\n print(\"Everythin is ok\")", "def main():\n\tcli = Cli()\n\tcli.run()", "def main():\n CLI_APP.run()", "def main():\n hello()", "def main():\n print(\"def main\")\n return APP.run()", "def main():\n app = App()\n app.run()", "def main():\n pass\n\n if __name__ == \"__main)__\":\n main()", "def main(argv: Sequence[Text]) -> None:\n\n\n print(\"TODO\")", "def main(source):\n pass" ]
[ "0.84055316", "0.84055316", "0.84055316", "0.8176242", "0.8088535", "0.80879116", "0.8034941", "0.8034941", "0.8034941", "0.8034941", "0.7982359", "0.7982359", "0.7979266", "0.7979266", "0.7979266", "0.7979266", "0.79315084", "0.7893428", "0.78562397", "0.7752687", "0.7752687", "0.77410424", "0.77410424", "0.77410424", "0.77410424", "0.77410424", "0.77410424", "0.77410424", "0.77410424", "0.77410424", "0.77410424", "0.77410424", "0.77410424", "0.77410424", "0.77410424", "0.77410424", "0.77410424", "0.77410424", "0.77410424", "0.77410424", "0.77410424", "0.77410424", "0.77410424", "0.773999", "0.7720627", "0.7712189", "0.7685353", "0.76688296", "0.7605964", "0.7605964", "0.7605964", "0.7605964", "0.7605964", "0.7605964", "0.7605964", "0.7605964", "0.7605964", "0.7605964", "0.7605964", "0.7605964", "0.7605964", "0.7605964", "0.7605964", "0.7605964", "0.7605964", "0.7605964", "0.7605964", "0.7605964", "0.7605964", "0.7605964", "0.7605964", "0.7605964", "0.75931734", "0.75535417", "0.75020355", "0.7498775", "0.7423902", "0.7411809", "0.7387743", "0.7387743", "0.7355293", "0.73229384", "0.7305621", "0.7305279", "0.7305279", "0.7305279", "0.7270138", "0.7264654", "0.7242488", "0.7225863", "0.7213753", "0.7213526", "0.7192058", "0.7134406", "0.70914793", "0.7062452", "0.7046695", "0.70444083", "0.70250225", "0.70107603", "0.70092607" ]
0.0
-1
Converts an array with WCS to altitude and azimuth coordinates
def getAltAz(arr,header,time,location): soln = wcs.WCS(header) coords = cartesian([arange(arr.shape[1]),arange(arr.shape[0])]) world = soln.wcs_pix2world(coords,0) radec = SkyCoord(ra=world[:,0],dec=world[:,1],frame='icrs',unit='deg') altaz = radec.transform_to(AltAz(obstime=time,location=telescope)) return altaz.alt.deg,altaz.az.deg,coords[:,0],coords[:,1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def offset_to_altaz(xoff, yoff, azimuth, altitude):\n #Deal with situations where offset = 0?\n\n d = sqrt(xoff*xoff+yoff*yoff)\n pos = np.where(d==0)\n d=1e-12 * u.deg # add a very small offset to prevent math errors\n\n q = arctan(d.to(u.rad).value)\n\n sq = sin(q)\n xp1 = xoff * (sq/d)\n yp1 = yoff * (sq/d)\n zp1 = cos(q)\n\n cx = sin(altitude)\n sx = cos(altitude)\n\n xp0 = cx*xp1 - sx*zp1\n yp0 = yp1\n zp0 = sx*xp1 + cx*zp1\n\n obj_altitude = arcsin(zp0)\n obj_altitude[pos]=altitude\n obj_azimuth = arctan2(yp0,-xp0) + azimuth\n obj_azimuth[pos] = azimuth\n\n #if obj_azimuth.value < 0.:\n # obj_azimuth += 2.*pi\n #elif obj_azimuth.value >= (2.*pi ):\n # obj_azimuth -= 2.*pi\n\n return obj_altitude,obj_azimuth", "def geo_m_v2(data_array):\n r = 6378.137 #promien ziemi w km\n delta = np.zeros(data_array.size//7-1)\n alo = data_array[0][1]\n ala = data_array[0][2]\n count = 0\n for row in data_array[1:]:\n dLat = (row[2] - ala) * math.pi/180.0\n dLon = (row[1] - alo) * math.pi/180.0\n a = math.sin(dLat/2.0)**2 + math.cos(ala * math.pi/180.0) * math.cos(row[2] * math.pi/180.0)\\\n * math.sin(dLon/2.0)**2\n delta[count] = r * 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))#w km\n count += 1\n alo = row[1]\n ala = row[2]\n return delta", "def convert_coords(date, time_steps, azs, els, obs):\n coord_start_day = datetime(date.year, date.month, date.day)\n \n strategy = []\n for time_step, az, el in zip(time_steps, azs, els):\n if az % np.pi == 0.0: \n az += EPS\n \n ra, dec = sphere.altaz_to_ra_dec(coord_start_day + timedelta(hours=time_step), az, el, obs)\n strategy.append([ra, dec])\n \n return np.array(strategy)", "def wind_adjust_func(uz_array, zw):\n return uz_array * 4.87 / np.log(67.8 * zw - 5.42)", "def altaz_to_offset(obj_azimuth,obj_altitude,azimuth,altitude):\n\n daz = obj_azimuth - azimuth\n coa = cos(obj_altitude)\n\n xp0 = -cos(daz) * coa\n yp0 = sin(daz) * coa\n zp0 = sin(obj_altitude)\n\n cx = sin(altitude)\n sx = cos(altitude)\n\n xp1 = cx*xp0 + sx*zp0\n yp1 = yp0\n zp1 = -sx*xp0 + cx*zp0\n\n q = arccos(zp1)\n d = tan(q)\n alpha = arctan2(yp1,xp1)\n\n xoff = d * cos(alpha)\n yoff = d * sin(alpha)\n\n return xoff,yoff", "def ecef2LatLonAlt(x, y, z):\n\n # Calculate the polar eccentricity\n ep = np.sqrt((EARTH.EQUATORIAL_RADIUS**2 - EARTH.POLAR_RADIUS**2)/(EARTH.POLAR_RADIUS**2))\n\n # Calculate the longitude\n lon = np.arctan2(y, x)\n\n p = np.sqrt(x**2 + y**2)\n\n theta = np.arctan2( z*EARTH.EQUATORIAL_RADIUS, p*EARTH.POLAR_RADIUS)\n\n # Calculate the latitude\n lat = np.arctan2(z + (ep**2)*EARTH.POLAR_RADIUS*np.sin(theta)**3, \\\n p - (EARTH.E**2)*EARTH.EQUATORIAL_RADIUS*np.cos(theta)**3)\n\n # Get distance from Earth centre to the position given by geographical coordinates, in WGS84\n N = EARTH.EQUATORIAL_RADIUS/math.sqrt(1.0 - (EARTH.E**2)*math.sin(lat)**2)\n\n \n # Calculate the height in meters\n\n # Correct for numerical instability in altitude near exact poles (and make sure cos(lat) is not 0!)\n if((np.abs(x) < 1000) and (np.abs(y) < 1000)):\n alt = np.abs(z) - EARTH.POLAR_RADIUS\n\n else:\n # Calculate altitude anywhere else\n alt = p/np.cos(lat) - N\n\n\n return lat, lon, alt", "def read_elevation(filepath):\n h = 83 #distance between elevation measures\n N = 1201\n theta = np.pi / 6\n elev_array = np.zeros((N, N))\n grad_array = np.zeros((N, N, 2))\n I_array = np.zeros((N, N))\n # Read the elevation data as described in Question 3, and store in the elvation array\n f = open(filepath, \"rb\")\n for i in range(N):\n for j in range(N):\n buf = f.read(2)\n val = struct.unpack(\">h\", buf)[0]\n elev_array[i][j] = val\n f.close()\n # Populate the gradient array\n for i in range(N):\n for j in range(N):\n #This if statements handle the border cases\n if j == 0:\n grad_array[i][j][0] = (elev_array[i][j+1] - elev_array[i][j]) / h\n elif j == N - 1:\n grad_array[i][j][0] = (elev_array[i][j] - elev_array[i][j-1]) / h\n else:\n grad_array[i][j][0] = (elev_array[i][j+1] - elev_array[i][j-1]) / (2 * h)\n \n if i == 0:\n grad_array[i][j][1] = (elev_array[i][j] - elev_array[i-1][j]) / h\n elif i == N - 1:\n grad_array[i][j][1] = (elev_array[i-1][j] - elev_array[i][j]) / h\n else:\n grad_array[i][j][1] = (elev_array[i-1][j] - elev_array[i+1][j]) / (2 * h)\n \n # Populate intensities\n for i in range(N):\n for j in range(N):\n denom = np.sqrt(grad_array[i][j][0] ** 2 + grad_array[i][j][1] ** 2 + 1)\n numer = np.cos(theta) * grad_array[i][j][0] + np.sin(theta) * grad_array[i][j][1]\n I_array[i][j] = -1 * numer / denom\n \n return elev_array, I_array", "def lambert_azimuthal(coordinate_triples, longitude_offset=pi/8,\n latitude_offset=pi/8):\n latitudes, longitudes = cartesian_to_geographical(coordinate_triples)\n k = np.sqrt(2/(1 + np.cos(latitudes - latitude_offset)\n *np.cos(longitudes - longitude_offset)))\n x_projected = (k*np.cos(latitudes - latitude_offset)\n *np.sin(longitudes - longitude_offset))\n y_projected = k*np.sin(latitudes - latitude_offset)\n return np.array([x_projected, y_projected])", "def geo_m(data_array):\n earth_r = 12756.490 #srednica Ziemi na rowniku [km]\n delta = np.zeros(data_array.size//7-1)\n alo = data_array[0][1]\n ala = data_array[0][2]\n count = 0\n for row in data_array[1:]:\n a = (row[1] - alo) * math.cos(ala*math.pi/180.0)\n b = (row[2] - ala)\n delta[count] = math.sqrt(a*a + b*b)*math.pi*earth_r/36.0*100# wynik w m\n count += 1\n alo = row[1]\n ala = row[2]\n return delta", "def get_altitude(points):\n altitudes = np.zeros((len(points),), dtype=\"float64\")\n for i, point in tqdm(enumerate(points), desc=\"GETTING ALTITUDE\"):\n p = Point(point[0], point[1])\n altitudes[i] = alt.NM_COTA.iloc[\n np.argmin([p.distance(alt.geometry.iloc[j]) for j in range(alt.shape[0])])\n ]\n return altitudes", "def to_xyah(self):\n ret = self.tlwh.copy()\n ret[:2] += ret[2:] / 2\n ret[2] /= ret[3]\n return ret", "def to_xyah(self):\n ret = self.tlwh.copy()\n ret[:2] += ret[2:] / 2\n ret[2] /= ret[3]\n return ret", "def to_xyah(self):\n ret = self.tlwh.copy()\n ret[:2] += ret[2:] / 2\n ret[2] /= ret[3]\n return ret", "def benthos_psa916_dict(calib, signal):\n\n #array mode\n try:\n altitude = []\n for signal_x in signal:\n temp = (300 * signal_x / calib['ScaleFactor']) + calib['Offset']\n altitude.append(temp)\n #single mode\n except:\n altitude = (300 * signal / calib['ScaleFactor']) + calib['Offset']\n return altitude", "def horizontal_to_cartesian(altitude, azimuth):\n theta = math.pi / 2 - math.radians(altitude)\n phi = math.radians(-azimuth)\n x = math.sin(phi) * math.sin(-theta)\n y = math.sin(theta) * math.cos(phi)\n z = math.cos(theta)\n return x, y, z", "def change_altitude_cm_m(data_array):\n data_array[:, 3] = data_array[:, 3]*0.3048\n return data_array", "def azimuth(source):\n srcAzEl = subarrayControl.s.azel(source, 0.0);\n return srcAzEl[0];", "def toHEC(rts):\n pathname = rts.name\n values = rts.getYArray()\n import jarray\n times = jarray.zeros(len(values),'i')", "def test_az_za():\n Nside = 128\n obs = observatory.Observatory(latitude, longitude, fov=20, nside=Nside)\n center = [0, 0]\n lon, lat = [5, 0]\n ind0 = hp.ang2pix(Nside, lon, lat, lonlat=True)\n lon, lat = hp.pix2ang(Nside, ind0, lonlat=True)\n za, az, pix = obs.calc_azza(center, return_inds=True)\n ind = np.where(pix == ind0)\n # lon = longitude of the source, which is set to 5deg off zenith (hence, zenith angle)\n assert np.isclose(np.degrees(za[ind]), lon)\n assert np.isclose(np.degrees(az[ind]), 90.0)", "def AEH2LatLonAlt(azim, elev, h, lat, lon, alt):\n\n # Compute the range to the point\n r = AEH2Range(azim, elev, h, lat, lon, alt)\n\n\n # Compute lat/lon/alt of the point on the line of sight\n x, y, z = AER2ECEF(azim, elev, r, lat, lon, alt)\n lat2, lon2, alt2 = ecef2LatLonAlt(x, y, z)\n lat2, lon2 = np.degrees(lat2), np.degrees(lon2)\n\n\n return lat2, lon2, alt2", "def test_wcs_extras():\n data = np.ones([6, 6], dtype=np.float64)\n header = {'CRVAL1': 0,\n 'CRVAL2': 0,\n 'CRPIX1': 5,\n 'CRPIX2': 5,\n 'CDELT1': 10,\n 'CDELT2': 10,\n 'CUNIT1': 'arcsec',\n 'CUNIT2': 'arcsec',\n 'PC1_1': 0,\n 'PC1_2': -1,\n 'PC2_1': 1,\n 'PC2_2': 0,\n 'NAXIS1': 6,\n 'NAXIS2': 6,\n 'CTYPE1': 'HPLN-TAN',\n 'CTYPE2': 'HPLT-TAN',\n 'date-obs': '1970-01-01T00:00:00',\n 'obsrvtry': 'Foo',\n 'detector': 'bar',\n 'wavelnth': 10,\n 'waveunit': 'm',\n 'hglt_obs': 0,\n 'hgln_obs': 0,\n 'dsun_obs': 10,\n 'rsun_ref': 690000000}\n generic_map = sunpy.map.Map((data, header))\n\n wcs = generic_map.wcs\n\n assert wcs.heliographic_observer.lat.value == 0\n assert wcs.heliographic_observer.lon.value == 0\n assert wcs.heliographic_observer.radius.value == 10\n assert wcs.rsun.value == header['rsun_ref']\n\n result = solar_wcs_frame_mapping(wcs)\n\n assert isinstance(result, Helioprojective)\n assert result.observer.lat.value == 0\n assert result.observer.lon.value == 0\n assert result.observer.radius.value == 10\n assert result.rsun.value == header['rsun_ref']", "def raDec2AltAz(ra, dec, jd, lat, lon):\n\n # Compute azim and elev using a fast cython function\n azim, elev = cyraDec2AltAz(np.radians(ra), np.radians(dec), jd, np.radians(lat), np.radians(lon))\n \n\n # Convert alt/az to degrees\n azim = np.degrees(azim)\n elev = np.degrees(elev)\n\n return azim, elev", "def img2heliovec(bxImg,byImg,bzImg,lon,lat,lonc,latc,pAng):\n a11 = -np.sin(latc)*np.sin(pAng)*np.sin(lon - lonc) + np.cos(pAng)*np.cos(lon - lonc)\n a12 = np.sin(latc)*np.cos(pAng)*np.sin(lon - lonc) + np.sin(pAng)*np.cos(lon - lonc)\n a13 = -np.cos(latc)*np.sin(lon - lonc)\n a21 = -np.sin(lat)*(np.sin(latc)*np.sin(pAng)*np.cos(lon - lonc) + np.cos(pAng)*np.sin(lon - lonc)) - np.cos(lat)*np.cos(latc)*np.sin(pAng)\n a22 = np.sin(lat)*(np.sin(latc)*np.cos(pAng)*np.cos(lon - lonc) - np.sin(pAng)*np.sin(lon - lonc)) + np.cos(lat)*np.cos(latc)*np.cos(pAng)\n a23 = -np.cos(latc)*np.sin(lat)*np.cos(lon - lonc) + np.sin(latc)*np.cos(lat)\n a31 = np.cos(lat)*(np.sin(latc)*np.sin(pAng)*np.cos(lon - lonc) + np.cos(pAng)*np.sin(lon - lonc)) - np.sin(lat)*np.cos(latc)*np.sin(pAng)\n a32 = -np.cos(lat)*(np.sin(latc)*np.cos(pAng)*np.cos(lon - lonc) - np.sin(pAng)*np.sin(lon - lonc)) + np.sin(lat)*np.cos(latc)*np.cos(pAng)\n a33 = np.cos(lat)*np.cos(latc)*np.cos(lon - lonc) + np.sin(lat)*np.sin(latc)\n\n bxHelio = a11 * bxImg + a12 * byImg + a13 * bzImg\n byHelio = a21 * bxImg + a22 * byImg + a23 * bzImg\n bzHelio = a31 * bxImg + a32 * byImg + a33 * bzImg\n\n return bxHelio,byHelio,bzHelio", "def parse_azimuth_elevation(filename):\n match = REGEX.match(filename)\n return int(match.group(1)), int(match.group(2))", "def xywh_to_xyxy(boxes: np.array) -> np.array:\n boxes[..., 0] = boxes[..., 0] - boxes[..., 2]/2\n boxes[..., 1] = boxes[..., 1] - boxes[..., 3]/2\n boxes[..., 2] = boxes[..., 0] + boxes[..., 2]\n boxes[..., 3] = boxes[..., 1] + boxes[..., 3]\n return boxes", "def extract_wind(source,la,lo,lats,lons,wd,ws):\r\n lat = source[la]\r\n lon = source[lo]\r\n wdir = []\r\n wspd = [] \r\n for coor in zip(lon,lat): \r\n in_lon = coor[0]\r\n in_lat = coor[1]\r\n # since lons are 0 thru 360, convert to -180 thru 180\r\n converted_lons = lons - ( lons.astype(np.int32) / 180) * 360\r\n # get cell of facility\r\n lat_idx = geo_idx(in_lat, lats)\r\n lon_idx = geo_idx(in_lon, converted_lons)\r\n #extract winddirection and wind speed from that cell\r\n d = wd[:,lat_idx,lon_idx][0]\r\n wdir.append(d)\r\n s = ws[:,lat_idx,lon_idx][0]\r\n wspd.append(s)\r\n \r\n return wdir,wspd", "def tlwh_to_xyah(tlwh):\n ret = np.asarray(tlwh).copy()\n ret[:2] += ret[2:] / 2\n ret[2] /= ret[3]\n return ret", "def azel2radec(az,el,mjd,lat=47.8781,lon=-87.6298):\n \n T_UT1 = (mjd-51544.5)/36525;\n ThetaGMST = 67310.54841 + (876600*3600 + 8640184.812866)*T_UT1 + \\\n .093104*(T_UT1**2) - (6.2e-6)*(T_UT1**3)\n ThetaGMST = np.mod((np.mod(ThetaGMST,86400*(ThetaGMST/np.abs(ThetaGMST)))/240),360)\n ThetaLST = ThetaGMST + lon\n \n DEC = asind(sind(el)*sind(lat)+cosd(el)*cosd(lat)*cosd(az))\n LHA = atand2(-sind(az)*cosd(el)/cosd(DEC), \n (sind(el)-sind(DEC)*sind(lat))/(cosd(DEC)*cosd(lat)))*(180/np.pi);\n RA = np.mod(ThetaLST-LHA,360);\n \n return RA,DEC", "def read_affine(file):\n data = open(file, 'r').read()\n data = data.split('\\n')\n for i in range(1, 5):\n data[i] = data[i].split(':')\n int_lon = np.fromstring(data[1][1], dtype='float', sep=',')\n int_lat = np.fromstring(data[2][1], dtype='float', sep=',')\n Nlon = len(int_lon) - 1\n Nlat = len(int_lat) - 1\n data[3][1] = data[3][1].split(',')\n data[4][1] = data[4][1].split(',')\n lon_transform = np.zeros((Nlon, 2))\n lat_transform = np.zeros((Nlat, 2))\n for i in range(Nlon):\n data[3][1][i] = data[3][1][i].split(' ')\n lon_transform[i] = [data[3][1][i][0], data[3][1][i][1]]\n for i in range(Nlat):\n data[4][1][i] = data[4][1][i].split(' ')\n lat_transform[i] = [data[4][1][i][0], data[4][1][i][1]]\n lon_transform = np.array(lon_transform).astype('float')\n lat_transform = np.array(lat_transform).astype('float')\n return int_lon, int_lat, lon_transform, lat_transform", "def GPSlatlon2XY(data_sheet, origin, theta):\n\n\tlon = np.array([[data_sheet.cell(row = i, column = 1).value] for i in range(2, data_sheet.max_row+1)]).reshape(data_sheet.max_row-1, )\n\tlat = np.array([[data_sheet.cell(row = i, column = 2).value] for i in range(2, data_sheet.max_row+1)]).reshape(data_sheet.max_row-1, )\n\n\tlon_u = np.array([[data_sheet.cell(row = i, column = 5).value] for i in range(2, data_sheet.max_row+1)]).reshape(data_sheet.max_row-1, )\n\tlat_u = np.array([[data_sheet.cell(row = i, column = 6).value] for i in range(2, data_sheet.max_row+1)]).reshape(data_sheet.max_row-1, )\n\tUz = np.array([[data_sheet.cell(row = i, column = 4).value] for i in range(2, data_sheet.max_row+1)]).reshape(data_sheet.max_row-1, )\n\n\tlon_in_km = (lon - origin[0])*111*np.cos(lat*np.pi/180)\n\tlat_in_km = (lat - origin[1])*111\n\t\n\trho_u = np.sqrt(np.power(lon_u,2) + np.power(lat_u,2))\n\ttheta_new_u = np.arctan2(lat_u,lon_u) - theta\n\n\trho = np.sqrt(np.power(lon_in_km,2) + np.power(lat_in_km,2))\n\ttheta_new = np.arctan2(lat_in_km,lon_in_km) - theta\n\n\tX, Y = rho*np.cos(theta_new), rho*np.sin(theta_new)\n\tUx, Uy = rho_u*np.cos(theta_new_u), rho_u*np.sin(theta_new_u)\n\n\treturn 1e3*X, 1e3*Y, 1e-3*Ux, 1e-3*Uy, 1e-3*Uz", "def xyxy_to_xywh(boxes: np.array) -> np.array:\n boxes[..., 2] = boxes[..., 2] - boxes[..., 0]\n boxes[..., 3] = boxes[..., 3] - boxes[..., 1]\n boxes[..., 0] = boxes[..., 0] + boxes[..., 2]/2\n boxes[..., 1] = boxes[..., 1] + boxes[..., 3]/2\n return boxes", "def gps2tas3(GS, TK, verbose=0):\n x, y, b, m, hdg = [], [], [], [], []\n\n for (gs, tk) in zip(GS, TK):\n x.append(gs * M.sin(M.pi * (360.0 - tk) / 180.0))\n y.append(gs * M.cos(M.pi * (360.0 - tk) / 180.0))\n\n m.append(-1 * (x[1] - x[0]) / (y[1] - y[0]))\n m.append(-1 * (x[2] - x[0]) / (y[2] - y[0]))\n\n b.append((y[0] + y[1]) / 2 - m[0] * (x[0] + x[1]) / 2)\n b.append((y[0] + y[2]) / 2 - m[1] * (x[0] + x[2]) / 2)\n\n wind_x = (b[0] - b[1]) / (m[1] - m[0])\n wind_y = m[0] * wind_x + b[0]\n\n wind_speed = M.sqrt(wind_x ** 2 + wind_y ** 2)\n wind_dir = (540.0 - (180.0 / M.pi * M.atan2(wind_x, wind_y))) % 360.0\n\n TAS = M.sqrt((x[0] - wind_x) ** 2 + (y[0] - wind_y) ** 2)\n\n if verbose >= 2:\n hdg.append(\n (540.0 - (180.0 / M.pi * M.atan2(wind_x - x[0], wind_y - y[0]))) % 360.0\n )\n hdg.append(\n (540.0 - (180.0 / M.pi * M.atan2(wind_x - x[1], wind_y - y[1]))) % 360.0\n )\n hdg.append(\n (540.0 - (180.0 / M.pi * M.atan2(wind_x - x[2], wind_y - y[2]))) % 360.0\n )\n\n return TAS, (wind_speed, wind_dir), (hdg[0], hdg[1], hdg[2])\n\n elif verbose == 1:\n return TAS, (wind_speed, wind_dir)\n elif verbose == 0:\n return TAS\n else:\n raise ValueError(\"The value of verbose must be equal to 0, 1 or 2\")", "def transform_region_ascii(infile, outfile, wcs_in, wcs_out):\n\n with open(infile, 'r') as fh:\n regions = fh.readlines()\n\n with open(outfile, 'w') as ofh:\n for region in regions:\n if region.startswith('#'):\n ofh.write(region + '\\n')\n continue\n\n region = region.rstrip()\n post0 = 0\n post1 = region.find(\"(\")\n reg_type = region[post0:post1]\n\n if reg_type in ['polygon', 'Polygon']:\n # convert from a 1D array into a 2D one\n coords_in = [float(f)\n for f in region[post1 + 1:-1].split(',')]\n\n assert coords_in.size % 2 == 0\n # Use integer division here\n coords_in.resize(2, coords_in.size // 2)\n\n # The conversion can be applied to all the\n # pairs at once, but it requires the data be\n # in the \"right\" shape.\n #\n coords_cel = wcs_in.apply(coords_in.T)\n coords_out = wcs_out.invert(coords_cel)\n\n # The coords_out array is not transposed (to\n # match the input) since it makes it easier\n # to convert back to a string.\n coords_str = \",\".join([\"{:7.2f}\".format(c)\n for c in coords_out])\n\n out = reg_type + '(' + coords_str + ')'\n\n elif reg_type == 'rotbox':\n\n # Just need to convert the center of the box, since\n # the assumption is that the pixel scale is the\n # same in both the input and output systems.\n #\n toks = region[post1 + 1:].split(\",\")\n assert len(toks) > 2\n\n xphys_in = float(toks[0])\n yphys_in = float(toks[1])\n\n # The handling of nD arrays by the apply and invert\n # methods of transform objects is, at best, strange\n # to describe.\n #\n coords_cel = wcs_in.apply([[xphys_in, yphys_in]])\n coords_out = wcs_out.invert(coords_cel)\n\n xphys_out = coords_out[0][0]\n yphys_out = coords_out[0][1]\n coords_str = '{:7.2f},{:7.2f},'.format(xphys_out,\n yphys_out)\n\n # Hopefully this re-creates the remainded of the\n # string (i.e. after the center of the box).\n #\n out = reg_type + '(' + coords_str + \",\".join(toks[2:])\n\n else:\n # copy over the line\n out = region\n\n ofh.write(out + '\\n')", "def test_az_za_astropy():\n\n Nside = 128\n\n altitude = 0.0\n loc = EarthLocation.from_geodetic(longitude, latitude, altitude)\n\n obs = observatory.Observatory(latitude, longitude, nside=Nside)\n\n t0 = Time(2458684.453187554, format=\"jd\")\n obs.set_fov(180)\n\n zen = AltAz(alt=Angle(\"90d\"), az=Angle(\"0d\"), obstime=t0, location=loc)\n\n zen_radec = zen.transform_to(ICRS())\n center = [zen_radec.ra.deg, zen_radec.dec.deg]\n northloc = EarthLocation.from_geodetic(lat=\"90.d\", lon=\"0d\", height=0.0)\n north_radec = AltAz(\n alt=\"90.0d\", az=\"0.0d\", obstime=t0, location=northloc\n ).transform_to(ICRS())\n yvec = np.array([north_radec.ra.deg, north_radec.dec.deg])\n za, az, inds = obs.calc_azza(center, yvec, return_inds=True)\n\n ra, dec = hp.pix2ang(Nside, inds, lonlat=True)\n\n altaz_astropy = ICRS(\n ra=Angle(ra, unit=\"deg\"), dec=Angle(dec, unit=\"deg\")\n ).transform_to(AltAz(obstime=t0, location=loc))\n\n za0 = altaz_astropy.zen.rad\n az0 = altaz_astropy.az.rad\n\n if environ.get(\"VIS\", False):\n hmap = np.zeros(12 * Nside ** 2) + hp.UNSEEN\n hmap[inds] = np.unwrap(az0 - az)\n import IPython\n\n IPython.embed()\n\n print(np.degrees(za0 - za))\n assert np.allclose(za0, za, atol=1e-4)\n assert np.allclose(\n np.unwrap(az0 - az), 0.0, atol=3e-4\n ) # About 1 arcmin precision. Worst is at the southern horizon.", "def contour_extract(ds_array,\n z_values,\n ds_crs,\n ds_affine,\n output_shp,\n min_vertices=2,\n attribute_data=None,\n attribute_dtypes=None,\n dim='time',\n verbose=True):\n\n # Obtain affine object from either rasterio/xarray affine or a \n # gdal geotransform:\n if type(ds_affine) != affine.Affine:\n ds_affine = affine.Affine.from_gdal(*ds_affine)\n\n # If z_values is supplied is not a list, convert to list:\n z_values = z_values if isinstance(z_values, list) else [z_values]\n\n # If array has only one layer along the `dim` dimension (e.g. time), \n # remove the dim:\n try:\n ds_array = ds_array.squeeze(dim=dim)\n print(f\"Dimension '{dim}' has length of 1; removing from array\")\n\n except:\n pass\n\n ########################################\n # Single array, multiple z-values mode #\n ########################################\n\n # Output dict to hold contours for each offset\n contours_dict = collections.OrderedDict()\n\n # If array has only two dimensions, run in single array, \n # multiple z-values mode:\n if len(ds_array.shape) == 2:\n\n if verbose: print(f'Operating in single array, multiple z-values mode')\n\n # If no custom attributes given, default to including a single \n # z-value field based on `z_values`\n if not attribute_data:\n\n # Default field uses two decimal points by default\n attribute_data = {'z_value': z_values}\n attribute_dtypes = {'z_value': 'float:9.2'}\n\n # If custom attributes are provided, test that they are equal \n # in length to the number of `z-values`:\n else:\n\n for key, values in attribute_data.items():\n\n if len(values) != len(z_values):\n\n raise Exception(\n f\"Supplied attribute '{key}' has length of {len(values)} while z_values has \"\n f\"length of {len(z_values)}; please supply the same number of attribute values \"\n \"as z_values\")\n\n for z_value in z_values:\n\n # Extract contours and convert output array cell coords \n # into arrays of coordinate reference system coords.\n # We need to add (0.5 x the pixel size) to the x and y \n # values to correct coordinates to give the centre\n # point of pixels, rather than the top-left corner\n if verbose: print(f' Extracting contour {z_value}')\n ps_x = ds_affine[0] # Compute pixel x size\n ps_y = ds_affine[4] # Compute pixel y size\n contours_geo = [\n np.column_stack(ds_affine * (i[:, 1], i[:, 0])) +\n np.array([0.5 * ps_x, 0.5 * ps_y])\n for i in find_contours(ds_array, z_value)\n ]\n\n # For each array of coordinates, drop xy points that have NA\n contours_nona = [i[~np.isnan(i).any(axis=1)] for i in contours_geo]\n\n # Drop 0 length and add list of contour arrays to dict\n contours_withdata = [i for i in contours_nona \n if len(i) >= min_vertices]\n\n # If there is data for the contour, add to dict:\n if len(contours_withdata) > 0:\n contours_dict[z_value] = contours_withdata\n\n else:\n if verbose:\n print(f' No data for contour {z_value}; skipping')\n contours_dict[z_value] = None\n\n \n ########################################\n # Single z-value, multiple arrays mode #\n ########################################\n\n # For inputs with more than two dimensions, run in single z-value, \n # multiple arrays mode:\n else:\n\n # Test if only a single z-value is given when operating in \n # single z-value, multiple arrays mode\n print(f'Operating in single z-value, multiple arrays mode')\n if len(z_values) > 1:\n raise Exception('Please provide a single z-value when operating '\n 'in single z-value, multiple arrays mode')\n\n # If no custom attributes given, default to including one field \n # based on the `dim` dimension:\n if not attribute_data:\n\n # Default field is numbered from 0 to the number of arrays \n # along the `dim` dimension:\n attribute_data = {dim: range(0, len(ds_array[dim]))}\n attribute_dtypes = {dim: 'int'}\n\n # If custom attributes are provided, test that they are equal \n # in length to the number of arrays along `dim`:\n else:\n\n for key, values in attribute_data.items():\n\n if len(values) != len(ds_array[dim]):\n\n raise Exception(\n f\"Supplied attribute '{key}' has length of {len(values)} while there are \"\n f\"{len(ds_array[dim])} arrays along the '{dim}' dimension. Please supply \"\n f\"the same number of attribute values as arrays along the '{dim}' dimension\"\n )\n\n for z_value, _ in enumerate(ds_array[dim]):\n\n # Extract contours and convert output array cell coords into \n # arrays of coordinate reference system coords. We need to \n # add (0.5 x the pixel size) to the x and y values to \n # correct coordinates to give the centre point of pixels, \n # rather than the top-left corner\n if verbose: print(f' Extracting contour {z_value}')\n ps_x = ds_affine[0] # Compute pixel x size\n ps_y = ds_affine[4] # Compute pixel y size\n contours_geo = [\n np.column_stack(ds_affine * (i[:, 1], i[:, 0])) +\n np.array([0.5 * ps_x, 0.5 * ps_y]) for i in find_contours(\n ds_array.isel({dim: z_value}), z_values[0])\n ]\n\n # For each array of coordinates, drop any xy points that have NA\n contours_nona = [i[~np.isnan(i).any(axis=1)] for i in contours_geo]\n\n # Drop 0 length and add list of contour arrays to dict\n contours_withdata = [\n i for i in contours_nona if len(i) >= min_vertices\n ]\n\n # If there is data for the contour, add to dict:\n if len(contours_withdata) > 0:\n contours_dict[z_value] = contours_withdata\n\n else:\n if verbose:\n print(f' No data for contour {z_value}; skipping')\n contours_dict[z_value] = None\n\n #######################\n # Export to shapefile #\n #######################\n\n # If a shapefile path is given, generate shapefile\n if output_shp:\n\n if verbose: print(f'Exporting contour shapefile to {output_shp}')\n\n # Set up output multiline shapefile properties\n schema = {'geometry': 'MultiLineString', \n 'properties': attribute_dtypes}\n\n # Create output shapefile for writing\n with fiona.open(output_shp,\n 'w',\n crs={\n 'init': str(ds_crs),\n 'no_defs': True\n },\n driver='ESRI Shapefile',\n schema=schema) as output:\n\n # Write each shapefile to the dataset one by one\n for i, (z_value, contours) in enumerate(contours_dict.items()):\n\n if contours:\n\n # Create multi-string object from all contour coordinates\n contour_multilinestring = MultiLineString(contours)\n\n # Get attribute values for writing\n attribute_vals = {field_name: field_vals[i] \n for field_name, field_vals \n in attribute_data.items()}\n\n # Write output shapefile to file with z-value field\n output.write({\n 'properties': attribute_vals,\n 'geometry': mapping(contour_multilinestring)\n })\n\n # Return dict of contour arrays\n output_gdf = gpd.read_file(output_shp)\n return output_gdf", "def get_shower_trans_matrix (azimuth,altitude):\n\n cos_z = sin(altitude)\n sin_z = cos(altitude)\n cos_az = cos(azimuth)\n sin_az = sin(azimuth)\n\n trans = np.zeros([3,3])\n trans[0][0] = cos_z*cos_az\n trans[1][0] = sin_az\n trans[2][0] = sin_z*cos_az\n\n trans[0][1] = -cos_z*sin_az\n trans[1][1] = cos_az\n trans[2][1] = -sin_z*sin_az\n\n trans[0][2] = -sin_z\n trans[1][2] = 0.\n trans[2][2] = cos_z\n\n return trans", "def tlwh_to_xyah(self, tlwh):\n ret = np.asarray(tlwh).copy()\n ret[:2] += ret[2:] / 2\n ret[2] /= ret[3]\n return ret", "def to_azim_elev(vectors):\n\n azimuth = np.arctan2(vectors[..., 1], vectors[..., 0])\n elevation = np.arctan2(vectors[..., 2], np.sqrt((vectors[..., 1] ** 2) + (vectors[..., 0] ** 2)))\n\n if vectors.ndim == 1:\n return np.array([azimuth, elevation])\n else:\n return np.stack((azimuth, elevation), axis=-1)", "def calOffsets(self, Xi_arr, Vi_arr, hz):\n\n Wi_arr = [round(vi / hz, 6) for vi in Vi_arr] # tcptimestamps in seconds with microsecond precision\n Yi_arr = [(wi - xi) * 1000 for wi, xi in zip(Wi_arr, Xi_arr)] # offset in miliseconds\n offset_arr = [(round(x, 6), round(y, 6)) for x, y in zip(Xi_arr, Yi_arr)]\n return offset_arr", "def wac_to_dict(file_path: str) -> dict:\n\n weather_dict = {'longitude': '',\n 'latitude': '',\n 'altitude': '',\n 'time': [],\n 'temperature': [],\n 'relative_humidity': [],\n 'horizontal_global_solar_radiation': [],\n 'diffuse_horizontal_solar_radiation': [],\n 'air_pressure': [],\n 'vertical_rain': [],\n 'wind_direction': [],\n 'wind_speed': [],\n 'cloud_index': [],\n 'atmospheric_counter_horizontal_long_wave_radiation': [],\n 'atmospheric_horizontal_long_wave_radiation': [],\n 'ground_temperature': [],\n 'ground_reflectance': []\n }\n\n file_obj = open(file_path, 'r')\n file_lines = file_obj.readlines()\n file_obj.close()\n\n weather_dict['longitude'] = float(file_lines[4].split('\\t')[0].strip())\n weather_dict['latitude'] = float(file_lines[5].split('\\t')[0].strip())\n weather_dict['altitude'] = float(file_lines[6].split('\\t')[0].strip())\n\n for line in file_lines[12:]:\n splitted_line = line.split('\\t')\n weather_dict['time'].append(datetime.datetime.strptime(splitted_line[0].strip(), '%Y-%m-%d %H:%M'))\n weather_dict['temperature'].append(float(splitted_line[1].strip()))\n weather_dict['relative_humidity'].append(float(splitted_line[2].strip()))\n weather_dict['horizontal_global_solar_radiation'].append(float(splitted_line[3].strip()))\n weather_dict['diffuse_horizontal_solar_radiation'].append(float(splitted_line[4].strip()))\n weather_dict['air_pressure'].append(float(splitted_line[5].strip()))\n weather_dict['vertical_rain'].append(float(splitted_line[6].strip()))\n weather_dict['wind_direction'].append(float(splitted_line[7].strip()))\n weather_dict['wind_speed'].append(float(splitted_line[8].strip()))\n weather_dict['cloud_index'].append(float(splitted_line[9].strip()))\n weather_dict['atmospheric_counter_horizontal_long_wave_radiation'].append(float(splitted_line[10].strip()))\n weather_dict['atmospheric_horizontal_long_wave_radiation'].append(float(splitted_line[11].strip()))\n weather_dict['ground_temperature'].append(float(splitted_line[12].strip()))\n weather_dict['ground_reflectance'].append(float(splitted_line[13].strip()))\n\n return weather_dict", "def DARP2016_MicArray():\n\n M = 36 # number of mics\n array_height = -0.49 # [m] (ref. to airfoil height at z=0)\n\n # mic coordinates (corrected for DARP2016 configuration)\n XYZ_array = np.array([[0., 0.025, 0.08477, 0.12044, 0.18311, 0.19394,\n 0.01559, 0.08549, 0.16173, 0.19659, 0.24426, -0.00556,\n 0.02184, 0.08124, 0.06203, 0.11065, -0.02252, -0.05825,\n -0.06043, -0.11924, -0.10628, -0.02252, -0.09449, -0.15659,\n -0.21072, -0.24318, -0.00556, -0.05957, -0.13484, -0.14352,\n -0.19696, 0.01559, 0.02021, -0.01155, 0.03174, -0.00242],\n [-0., -0., 0.04175, 0.11082, 0.10542, 0.15776,\n -0.01955, -0.04024, -0.02507, -0.07743, -0.05327, -0.02437,\n -0.09193, -0.14208, -0.20198, -0.22418, -0.01085, -0.0744,\n -0.1521, -0.17443, -0.22628, 0.01085, -0.00084, -0.04759,\n -0.01553, -0.05799, 0.02437, 0.07335, 0.09276, 0.15506,\n 0.15397, 0.01955, 0.09231, 0.16326, 0.20889, 0.24999],\n array_height*np.ones(M)])\n\n # calibration factors\n array_cal = np.array([73.92182641429085, 96.84446743391487, 85.48777846463159,\n 85.24410968090712, 83.63917149322562, 68.94090765134432,\n 79.2385037527723, 112.77357210746612, 84.8483307868491,\n 87.18956628936178, 97.75046920293282, 89.2829545690508,\n 79.51644155562396, 90.39403884030057, 80.71754629014218,\n 89.4418210091059, 98.33634233056068, 79.2212022850229,\n 91.25543447201031, 89.55040012572815, 85.77495667666254,\n 82.74418222820202, 84.63061055646973, 77.01568014644964,\n 95.52764533324982, 92.16734812591154, 95.27123074600838,\n 87.93335310521428, 96.65066131188675, 93.58564782091074,\n 78.1446818728945, 101.3047738767648, 83.68569643491034,\n 84.7981031520437, 94.40796508430756, 83.52266614867919])\n\n return XYZ_array, array_cal", "def arrayTo3DPts(direction: str, arr: np.ndarray, ij2xy_func: Callable, xy2z_func: Callable) -> List[Point]:\n\n pts = []\n for i in range(arr.shape[0]):\n for j in range(arr.shape[1]):\n val = arr[i, j]\n if np.isfinite(val):\n if direction == 'i':\n i_int, j_int = i + val, j\n elif direction == 'j':\n i_int, j_int = i, j + val\n else:\n raise Exception('Unexpected array direction value: {}'.format(direction))\n x, y = ij2xy_func(i_int, j_int)\n z = xy2z_func(x, y)\n pts.append(Point(x, y, z))\n\n return pts", "def get_azimuth(self):\n self.degrees = self.azimuth_encoder.get_degrees()\n self.tele_azimuth = self.Calculations.convert_degrees(self.degrees)\n return self.tele_azimuth", "def convertToWCS(x, y, wcs_hdr):\n w = WCS(wcs_hdr)\n xy_coords = np.column_stack([x, y])\n \n # FITS convention, so use Fortran-like 1-based origin\n world = w.all_pix2world(xy_coords, 1)\n ra, dec = world[:, 0], world[:, 1]\n \n return ra, dec", "def lla_to_ecef(df):\n \n latitude = np.radians(df[0])\n longitude = np.radians(df[1])\n altitude = df[2]\n\n # WSG84 ellipsoid constants\n a = 6378137\n e = 8.1819190842622e-2\n\n # Prime vertical radius of curvature\n N = a / np.sqrt(1 - e**2 * np.sin(latitude)**2)\n \n x = (N + altitude) * np.cos(latitude) * np.cos(longitude)\n y = (N + altitude) * np.cos(latitude) * np.sin(longitude)\n z = ((1 - e**2) * N + altitude) * np.sin(latitude)\n\n df = np.hstack([np.expand_dims(x, axis=0), np.expand_dims(y, axis=0), np.expand_dims(z, axis=0)])\n \n return df", "def test_array2wkt(self):\n\n # Arrays first\n A = numpy.arange(10)\n A = A.reshape(5, 2)\n\n wkt = array2wkt(A, geom_type='POLYGON')\n assert wkt.startswith('POLYGON((')\n fields = wkt[9:-2].split(',')\n for i, field in enumerate(fields):\n x, y = field.split()\n assert numpy.allclose(A[i, :], [float(x), float(y)])\n\n # Then list\n wkt = array2wkt(A.tolist(), geom_type='POLYGON')\n assert wkt.startswith('POLYGON((')\n fields = wkt[9:-2].split(',')\n for i, field in enumerate(fields):\n x, y = field.split()\n assert numpy.allclose(A[i, :], [float(x), float(y)])\n\n # Then a linestring example (note one less bracket)\n wkt = array2wkt(A, geom_type='LINESTRING')\n assert wkt.startswith('LINESTRING(')\n fields = wkt[11:-1].split(',')\n for i, field in enumerate(fields):\n x, y = field.split()\n assert numpy.allclose(A[i, :], [float(x), float(y)])", "def cvt2array(tuples):\n rc = []\n for t in tuples:\n rc.append(point3d(np.float32(t[X]), np.float32(t[Y]), np.float32(t[Z])))\n return rc", "def parse_coordinates(self):\n header = self.header\n wcs = WCS()\n try:\n wcs.crval = header['crval1'], header['crval2']\n wcs.crpix = header['crpix1'] - 1, header['crpix2'] - 1\n wcs.cdelt = header['cdelt1'], header['cdelt2']\n except KeyError:\n msg = \"Coordinate system not specified in FITS\"\n logger.error(msg)\n raise TypeError(msg)\n try:\n wcs.ctype = header['ctype1'], header['ctype2']\n except KeyError:\n wcs.ctype = 'unknown', 'unknown'\n try:\n wcs.crota = float(header['crota1']), float(header['crota2'])\n except KeyError:\n wcs.crota = 0., 0.\n try:\n wcs.cunit = header['cunit1'], header['cunit2']\n except KeyError:\n # The \"Definition of the Flexible Image Transport System\", version\n # 3.0, tells us that \"units for celestial coordinate systems defined\n # in this Standard must be degrees\", so we assume that if nothing else\n # is specifiedj\n msg = \"WCS units unknown; using degrees\"\n logger.warning(msg)\n wcs.cunit = 'deg', 'deg'\n return wcs", "def get_accel_data(self):\n x = self.read_i2c_word(self.ACCEL_XOUT0)\n y = self.read_i2c_word(self.ACCEL_YOUT0)\n z = self.read_i2c_word(self.ACCEL_ZOUT0)\n\n accel_scale_modifier = None\n accel_range = self.read_accel_range(True)\n\n if accel_range == self.ACCEL_RANGE_2G:\n accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_2G\n elif accel_range == self.ACCEL_RANGE_4G:\n accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_4G\n elif accel_range == self.ACCEL_RANGE_8G:\n accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_8G\n elif accel_range == self.ACCEL_RANGE_16G:\n accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_16G\n else:\n print(\"Unkown range - accel_scale_modifier set to self.ACCEL_SCALE_MODIFIER_2G\")\n accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_2G\n\n x = x / accel_scale_modifier\n y = y / accel_scale_modifier\n z = z / accel_scale_modifier\n\n x = x * self.GRAVITIY_MS2\n y = y * self.GRAVITIY_MS2\n z = z * self.GRAVITIY_MS2\n return [x, y, z]", "def elevation(source):\n srcAzEl = subarrayControl.s.azel(source, 0.0);\n return srcAzEl[1];", "def point2wgs84_9603(self, datum):\n \"\"\"\n h is the height above the ellipsoid. This is the height value that is \n delivered by GPS satellite observations but is not the gravity-related height \n value which is normally used for national mapping and levelling operations. The\n gravity-related height (H) is usually the height above mean sea level or an \n alternative level reference for the country. If one starts with a gravity-related \n height H, it will be necessary to convert it to an ellipsoid height (h) before \n using the above transformation formulas. See section 4.11.1. For the WGS 84 \n ellipsoid the difference between ellipsoid and mean sea level can vary between \n values of -100m in the Sri Lanka area to +80m in the North Atlantic.)\n \"\"\"\n h=0\n # a is the semi-major axis of the ellipsoid of the given datum.\n a = datum.axis\n\n # f is the flattening of the ellipsoid of the given datum \n # (get_flattening actually returns the inverse flattening).\n f = 1.0/datum.flattening\n \n # dx, dy, dz are the x, y, z offset parameters for the given datum transformation\n # to WGS84\n dx = datum.dx\n dy = datum.dy\n dz = datum.dz\n \n # latr, lngr are the latitude and longitude in radians\n latr = math.radians(self.lat)\n lngr = math.radians(self.lng)\n\n # e is the eccentricity of the ellipsoid\n e_squared = f*(2-f)\n\n # nu is the prime vertical radius of curvature at latr\n nu = a/math.pow((1-e_squared*sqr(math.sin(latr))),0.5)\n\n X = (nu+h)*math.cos(latr)*math.cos(vlambda)\n Y = (nu+h)*math.cos(latr)*math.sin(vlambda)\n Z = ((1 - math.pow(e,2))*nu + h)*math.sin(phi)\n\n Xwgs84 = X+dx\n Ywgs84 = Y+dy\n Zwgs84 = Z+dz\n\n epsilon = e_squared/(1-e_squared)\n b = a*(1-f)\n p = math.pow(sqr(Xwgs84)+sqr(Ywgs84),0.5)\n q = math.atan2((Zwgs84*a),(p*b))\n\n latrwgs84 = math.atan2( (Zwgs84 + epsilon*b*math.pow(math.sin(q)),3)), \\\n (p - e_squared*a*math.pow(math.cos(q),3) )\n lngrwgs84 = math.atan2(Ywgs84, Xwgs84)\n hwgs84 = (p/math.cos(latrwgs84))-nu\n newlng = lng180(math.degrees(lngrwgs84))\n newlat = math.degrees(latrwgs84)\n return Point(float(truncate(newlng,DEGREE_DIGITS)), float(truncate(newlat,DEGREE_DIGITS)))", "def readaccl(self):\r\n\t\tdata0 = bus.read_byte_data(LSM330_ACCL_ADDRESS, LSM330_OUT_X_L_A)\r\n\t\tdata1 = bus.read_byte_data(LSM330_ACCL_ADDRESS, LSM330_OUT_X_H_A)\r\n\t\t\r\n\t\txAccl = data1 * 256 + data0\r\n\t\tif xAccl > 32767 :\r\n\t\t\txAccl -= 65536\r\n\t\t\r\n\t\t\"\"\"Read data back from LSM330_OUT_Y_L_M(0x2A), 2 bytes\r\n\t\tY-Axis Mag LSB, Y-Axis Mag MSB\"\"\"\r\n\t\tdata0 = bus.read_byte_data(LSM330_ACCL_ADDRESS, LSM330_OUT_Y_L_A)\r\n\t\tdata1 = bus.read_byte_data(LSM330_ACCL_ADDRESS, LSM330_OUT_Y_H_A)\r\n\t\t\r\n\t\tyAccl = data1 * 256 + data0\r\n\t\tif yAccl > 32767 :\r\n\t\t\tyAccl -= 65536\r\n\t\t\r\n\t\t\"\"\"Read data back from LSM330_OUT_Z_L_M(0x2C), 2 bytes\r\n\t\tZ-Axis Mag LSB, Z-Axis Mag MSB\"\"\"\r\n\t\tdata0 = bus.read_byte_data(LSM330_ACCL_ADDRESS, LSM330_OUT_Z_L_A)\r\n\t\tdata1 = bus.read_byte_data(LSM330_ACCL_ADDRESS, LSM330_OUT_Z_H_A)\r\n\t\t\r\n\t\tzAccl = data1 * 256 + data0\r\n\t\tif zAccl > 32767 :\r\n\t\t\tzAccl -= 65536\r\n\t\t\r\n\t\treturn {'x' : xAccl, 'y' : yAccl, 'z' : zAccl}", "def getAltitudeProfile(pass_length,terrain,uav_altitude,u,start_v,wind_angle):\n altitude_profile = []\n v = start_v\n for k in range(0,round(pass_length)):\n coord = convertCoords([[u,v]],wind_angle,'xy')\n x = coord[0][0]\n y = coord[0][1]\n x_points = [int(x),int(x),int(x)+1,int(x)+1]\n y_points = [int(y),int(y)+1,int(y)+1,int(y)]\n z_points = [terrain[int(y)][int(x)],terrain[int(y)+1][int(x)],\n terrain[int(y)+1][int(x)+1],terrain[int(y)][int(x)+1]]\n\n # For created terrain ONLY\n z = griddata((x_points,y_points),z_points,(x,y)) # Interpolate \n altitude = z + uav_altitude\n\n altitude_profile.append(altitude)\n v +=1\n return altitude_profile", "def dardar2era(dardar, ERA, p_grid):\n lon_d = dardar.get_data('longitude')\n lat_d = dardar.get_data('latitude')\n height_d = dardar.get_data('height')\n\n \n# convert longitude from -180-180 to 0-360\n if lon_d.min() < 0:\n lon_d = lon_d % 360\n \n# add extra pressure level in ERA5 data\n xlevel = 1200\n ERA.add_extra_level('temperature', xlevel)\n ERA.add_extra_level('geopotential', xlevel)\n \n# get ERA lat/lon/pressure grids\n \n lat = ERA.t.latitude.data\n lon = ERA.t.longitude.data\n level = ERA.t.level.data \n t = ERA.t.t[0].data\n z = ERA.z.z[0].data\n \n level = np.log(level) # convert pressure to log\n \n \n# add two extra dimension to longitudes to wrap around during interpolation\n \n lon, z = expand_lon(ERA.z.longitude.data, z )\n lon, t = expand_lon(ERA.t.longitude.data, t )\n \n #my_interpolating_function = RegularGridInterpolator((level, lat, lon), A)\n \n p_grid = np.arange(1, 1150, 10)\n points = []\n \n# interpolate ERA5 to DARDAR lat/lon locations\n \n for i in range(len(p_grid)):\n p = np.log(p_grid[i]) # convert pressure to log range\n pts = [[p, lat_d[j], lon_d[j]] for j in range(len(lat_d))] \n points.append(pts)\n \n my_interpolating_function = interpolate(level, lat, lon, t) \n grid_t = my_interpolating_function(points)\n \n my_interpolating_function = interpolate(level, lat, lon, z) \n grid_z = my_interpolating_function(points)\n \n return grid_t, grid_z", "def _GetHorizonAnglesLegacy(its_elev, height_cbsd, height_rx, refractivity):\n num_points = int(its_elev[0])\n step = its_elev[1]\n dist = num_points * step\n\n # Find the refractivity at the average terrain height\n start_avg = int(3.0 + 0.1 * num_points)\n end_avg = num_points - start_avg + 6\n zsys = np.mean(its_elev[start_avg-1:end_avg])\n refractivity *= np.exp(-zsys/9460.0)\n\n # Find the ray down-curvature per meter\n gma = 157e-9\n gme = gma*(1.0 - 0.04665 * np.exp(refractivity/179.3))\n\n alt_cbsd = its_elev[2] + height_cbsd\n alt_rx = its_elev[num_points+2] + height_rx\n qc = 0.5 * gme\n q = qc * dist\n # theta0 and theta1 the slopes, dl0 and dl1 the horizon distances\n theta1 = (alt_rx - alt_cbsd) / dist\n theta0 = theta1 - q\n theta1 = -theta1 - q\n dl0 = dist\n dl1 = dist\n\n if num_points >= 2:\n sa = 0.0\n sb = dist\n wq = True\n for i in range(1, num_points):\n sa += step\n sb -= step\n q = its_elev[i+2] - (qc*sa + theta0) * sa - alt_cbsd\n if q > 0.0:\n theta0 += q/sa\n dl0 = sa\n wq = False\n if not wq:\n q = its_elev[i+2] - (qc*sb + theta1) * sb - alt_rx\n if q > 0.0:\n theta1 += q/sb\n dl1 = sb\n\n return (np.arctan(theta0) * 180/np.pi,\n np.arctan(theta1) * 180/np.pi,\n dl0,\n dl1)", "def _getlats(self):\n lats = 90. - np.degrees(self.zeros)\n return lats", "def weyl_coordinates(U: np.ndarray) -> np.ndarray:\n import scipy.linalg as la\n\n pi2 = np.pi / 2\n pi4 = np.pi / 4\n\n U = U / la.det(U) ** (0.25)\n Up = transform_to_magic_basis(U, reverse=True)\n # We only need the eigenvalues of `M2 = Up.T @ Up` here, not the full diagonalization.\n D = la.eigvals(Up.T @ Up)\n\n d = -np.angle(D) / 2\n d[3] = -d[0] - d[1] - d[2]\n cs = np.mod((d[:3] + d[3]) / 2, 2 * np.pi)\n\n # Reorder the eigenvalues to get in the Weyl chamber\n cstemp = np.mod(cs, pi2)\n np.minimum(cstemp, pi2 - cstemp, cstemp)\n order = np.argsort(cstemp)[[1, 2, 0]]\n cs = cs[order]\n d[:3] = d[order]\n\n # Flip into Weyl chamber\n if cs[0] > pi2:\n cs[0] -= 3 * pi2\n if cs[1] > pi2:\n cs[1] -= 3 * pi2\n conjs = 0\n if cs[0] > pi4:\n cs[0] = pi2 - cs[0]\n conjs += 1\n if cs[1] > pi4:\n cs[1] = pi2 - cs[1]\n conjs += 1\n if cs[2] > pi2:\n cs[2] -= 3 * pi2\n if conjs == 1:\n cs[2] = pi2 - cs[2]\n if cs[2] > pi4:\n cs[2] -= pi2\n\n return cs[[1, 0, 2]]", "def set_cruise_altitudes(altitudes, cruise_indicies):\n if cruise_indicies:\n for index in range(0, len(cruise_indicies), 2):\n start = cruise_indicies[index]\n stop = cruise_indicies[index + 1]\n if start < stop:\n cruise_altitude = closest_cruising_altitude(altitudes[start])\n altitudes[start] = cruise_altitude\n altitudes[stop] = cruise_altitude\n\n return altitudes", "def altitude(self):\r\n pressure = self.pressure # in Si units for hPascal\r\n return 44330 * (1.0 - math.pow(pressure / self.sea_level_pressure, 0.1903))", "def altitude(p):\r\n \r\n R = 290 #specific gas constant \r\n T = 93.65 #surface temperature K from A.Coustenis book\r\n g = 1.354 #surface gravity from A.Coustenis book\r\n p0 = 1467 #surface pressure in hPa 6.1 for mars\r\n \r\n z = np.empty_like(p)\r\n \r\n for i in range(p.shape[0]):\r\n z[i] = (-1)*(R*T/g)*np.log((p[i])/p0)/(10**3)\r\n \r\n # Make into an xarray DataArray\r\n z_xr = xr.DataArray(z, coords=[z], dims=['pfull'])\r\n z_xr.attrs['units'] = 'km'\r\n \r\n #below is the inverse of the calculation\r\n #p[i] = p0*np.exp((-1)*z[i]*(10**3)/((R*T/g)))\r\n \r\n return z_xr", "def _lzw(self, tile: bytes) -> np.ndarray:\n decoded = self._reshape(np.frombuffer(imagecodecs.lzw_decode(tile), self.dtype))\n self._unpredict(decoded)\n return np.rollaxis(decoded, 2, 0)", "def fun_azimuth(self):\n\n energy_kev = self.energy_kev.get()\n hkl = self.hkl_magnetic.get()\n hkl = hkl.replace(',', ' ') # remove commas\n hkl = hkl.replace('(', '').replace(')', '') # remove brackets\n hkl = hkl.replace('[', '').replace(']', '') # remove brackets\n hkl = np.fromstring(hkl, sep=' ')\n\n azi = self.azim_zero.get()\n azi = azi.replace(',', ' ') # remove commas\n azi = azi.replace('(', '').replace(')', '') # remove brackets\n azi = azi.replace('[', '').replace(']', '') # remove brackets\n azi = np.fromstring(azi, sep=' ')\n\n pol = self.polval.get()\n if pol == u'\\u03c3-\\u03c3':\n pol = 's-s'\n elif pol == u'\\u03c3-\\u03c0':\n pol = 's-p'\n elif pol == u'\\u03c0-\\u03c3':\n pol = 'p-s'\n else:\n pol = 'p-p'\n\n F0 = self.resF0.get()\n F1 = self.resF1.get()\n F2 = self.resF2.get()\n\n isres = self.isres.get()\n if isres:\n # Resonant scattering\n self.xtl.Plot.simulate_azimuth_resonant(\n hkl,\n energy_kev=energy_kev,\n azim_zero=azi,\n polarisation=pol,\n F0=F0, F1=F1, F2=F2)\n plt.show()\n else:\n # Non-Resonant scattering\n self.xtl.Plot.simulate_azimuth_nonresonant(\n hkl,\n energy_kev=energy_kev,\n azim_zero=azi,\n polarisation=pol)\n plt.show()", "def convert(coordinates):\n center = np.mean(coordinates, axis=0, dtype=np.float32)\n x = np.subtract(np.array(coordinates, dtype=np.float32), center)\n rho, phi = cart2pol(x[:, 0], x[:, 1])\n result = np.swapaxes(np.array([rho, phi], dtype=np.float32), 0, 1)\n\n # normalize rho values to range[0-1]\n result[:, 0] = normalize(result[:, 0].reshape(1, -1), norm='max')\n return result", "def get_altitude(self):\n self.degrees = self.altitude_encoder.get_degrees()\n self.tele_altitude = self.Calculations.convert_degrees( self.degrees)\n return self.tele_altitude", "def get_orbit_data(self):\n orbit = self.orbit()\n\n return orbit.apoapsis_altitude, orbit.periapsis_altitude, orbit.inclination, orbit.eccentricity", "def get_altaz(ra,dec,jd=None,lat = 37.9183, lon = -122.1067, alt = 304, equinox='J2000'):\n if jd: t = ap.time.Time(jd,format='jd')\n else: t = ap.time.Time(time.time(),format='unix')\n l = ap.coordinates.EarthLocation(lat=lat*u.deg,\n lon=lon*u.deg,height=alt*u.m)\n f = ap.coordinates.AltAz(obstime=t,location=l)\n c = ap.coordinates.SkyCoord(ra, dec, frame='fk5',unit='deg',equinox=equinox)\n altaz = c.transform_to(f)\n return altaz.alt.deg, altaz.az.deg", "def altAz2RADec(azim, elev, jd, lat, lon):\n\n azim = np.radians(azim)\n elev = np.radians(elev)\n lat = np.radians(lat)\n lon = np.radians(lon)\n \n # Calculate hour angle\n ha = np.arctan2(-np.sin(azim), np.tan(elev)*np.cos(lat) - np.cos(azim)*np.sin(lat))\n\n # Calculate Local Sidereal Time\n lst = np.radians(JD2LST(jd, np.degrees(lon))[0])\n \n # Calculate right ascension\n ra = (lst - ha)%(2*np.pi)\n\n # Calculate declination\n dec = np.arcsin(np.sin(lat)*np.sin(elev) + np.cos(lat)*np.cos(elev)*np.cos(azim))\n\n return np.degrees(ra), np.degrees(dec)", "def interpolate_wx_from_gps(harbor_data):\n # print(harbor_data[\"gps_altitude\"])\n # print(harbor_data[\"gps_times\"])\n\n # Lists to hold the interpolated data\n harbor_data[\"wx_temp_up\"] = []\n harbor_data[\"wx_alt_up\"] = []\n harbor_data[\"wx_temp_down\"] = []\n harbor_data[\"wx_alt_down\"] = []\n \n altitude_peak = 0 # Holds peak altitude of balloon\n altitude_peak_time = 0 # Holds time balloon peaks\n\n # Finds peak altitude and peak altitude time\n for count, altitude in enumerate(harbor_data[\"gps_altitude\"]):\n if altitude > altitude_peak:\n altitude_peak = altitude\n else:\n altitude_peak_time = harbor_data[\"gps_times\"][count]\n break\n\n # Populates lists of temperatures up and temperatures down\n for count, time in enumerate(harbor_data[\"wx_times\"]):\n if time < altitude_peak_time:\n harbor_data[\"wx_temp_up\"].append(harbor_data[\"wx_temperatures\"][count])\n elif time > harbor_data[\"gps_times\"][len(harbor_data[\"gps_times\"])-1]:\n break\n else:\n harbor_data[\"wx_temp_down\"].append(harbor_data[\"wx_temperatures\"][count])\n\n # Populates lists of altitudes up and altitudes down\n harbor_data[\"wx_alt_up\"] = np.linspace(harbor_data[\"gps_altitude\"][0], altitude_peak, len(harbor_data[\"wx_temp_up\"]))\n harbor_data[\"wx_alt_down\"] = np.linspace(altitude_peak, harbor_data[\"gps_altitude\"][len(harbor_data[\"gps_altitude\"])-1], len(harbor_data[\"wx_temp_down\"]))", "def get_compass_dir_azimuth(azimuth, resolution='intercardinal', format='short'):\n if azimuth < 0:\n azimuth += 360\n if format not in ['short', 'long']:\n raise KeyError(f'Direction format {format} is not supported')\n if resolution not in ['cardinal', 'intercardinal', 'meteorological']:\n raise KeyError(f'Direction resolution {resolution} is not supported')\n if resolution == 'cardinal':\n angles = np.arange(0, 360 + 90, 90)\n if format == 'long':\n points = LONG_CARDINAL_POINTS\n else:\n points = SHORT_CARDINAL_POINTS\n elif resolution == 'intercardinal':\n angles = np.arange(0, 360 + 45, 45)\n if format == 'long':\n points = LONG_INTERCARDINAL_POINTS\n else:\n points = SHORT_INTERCARDINAL_POINTS\n elif resolution == 'meteorological':\n angles = np.arange(0, 360 + 22.5, 22.5)\n if format == 'long':\n points = LONG_METEOROLOGICAL_POINTS\n else:\n points = SHORT_METEOROLOGICAL_POINTS\n\n adiff = abs(azimuth - angles)\n i = adiff.argmin()\n return points[i]", "def solar_angles(df, lat, lon, alt=0):\n\n jd = pd.Timestamp(df).to_julian_date()\n\n # offset (2451543.5)\n d_offset = pd.Timestamp('1999-12-31 00:00:00').to_julian_date()\n\n d = jd - d_offset\n\n\n # Keplerian elements for the sun (geocentric)\n w = 282.9404 + 4.70935E-5 * d # longitude of perihelion [degrees]\n a = 1.0 # mean distance [AU]\n e = 0.016709 - 1.151E-9 * d # eccentricity [-]\n M = np.mod(356.0470 + 0.9856002585 * d, 360.0) # mean anomaly [degrees]\n L = w + M # Sun's mean longitude [degrees]\n oblecl = 23.4393 - 3.563E-7 * d # Sun's obliquity of the eliptic [degrees]\n\n # Auxiliary angle [degrees]\n E = M + (180.0 / np.pi) * e * np.sin(np.deg2rad(M)) * (1.0 + e * np.cos(np.deg2rad(M)))\n\n # Rectangular coordinates in the plane of the ecliptic (x-axis toward perihelion)\n x = np.cos(np.deg2rad(E)) - e\n y = np.sin(np.deg2rad(E)) * np.sqrt(1 - (e ** 2))\n\n # Distance (r) and true anomaly (v)\n r = np.sqrt((x ** 2) + (y ** 2))\n v = np.rad2deg(np.arctan2(y, x))\n\n # Longitude of the sun\n lon_sun = v + w\n\n # Ecliptic rectangular coordinates\n xeclip = r * np.cos(np.deg2rad(lon_sun))\n yeclip = r * np.sin(np.deg2rad(lon_sun))\n zeclip = 0.0\n\n # Rotate coordinates to equatorial rectangular coordinates\n xequat = xeclip\n yequat = yeclip * np.cos(np.deg2rad(oblecl)) + zeclip * np.sin(np.deg2rad(oblecl))\n zequat = yeclip * np.sin(np.deg2rad(23.4406)) + zeclip * np.cos(np.deg2rad(oblecl))\n\n # Convert equatorial rectangular coordinates to right-ascension (RA) and declination\n r = np.sqrt(xequat ** 2 + yequat ** 2 + zequat ** 2) - (alt / 149598000.0)\n RA = np.rad2deg(np.arctan2(yequat, xequat))\n delta = np.rad2deg(np.arcsin(zequat / r))\n\n # Calculate local siderial time\n uth = df.hour + (df.minute / 60.0) + (df.second / 3600.0)\n gmst0 = np.mod(L + 180.0, 360.0) / 15.0\n sidtime = gmst0 + uth + (lon / 15.0)\n\n # Replace RA with hour-angle (HA)\n HA = sidtime * 15.0 - RA\n\n # Convert to rectangular coordinates\n x = np.cos(np.deg2rad(HA)) * np.cos(np.deg2rad(delta))\n y = np.sin(np.deg2rad(HA)) * np.cos(np.deg2rad(delta))\n z = np.sin(np.deg2rad(delta))\n\n # Rotate along an axis going East-West\n xhor = x * np.cos(np.deg2rad(90.0 - lat)) - z * np.sin(np.deg2rad(90.0 - lat))\n yhor = y\n zhor = x * np.sin(np.deg2rad(90.0 - lat)) + z * np.cos(np.deg2rad(90.0 - lat))\n\n # Find azimuthal and elevation angles\n azimuthal = np.rad2deg(np.arctan2(yhor, xhor)) + 180.0\n elevation = np.rad2deg(np.arcsin(zhor))\n\n zenith = 90.0 - elevation\n\n return np.column_stack((zenith, elevation, azimuthal))", "def convert_bbox_to_z(bbox):\n w = bbox[2] - bbox[0]\n h = bbox[3] - bbox[1]\n x = bbox[0] + w/2.\n y = bbox[1] + h/2.\n s = w * h #scale is just area\n r = w / float(h)\n return np.array([x, y, s, r]).reshape((4, 1))", "def get_ecmwf_lat_lon(nc_file):\n from netCDF4 import Dataset\n \n fh = Dataset(nc_file, mode='r')\n\n latitude_ecmwf = fh.variables['latitude_ecmwf'][:]\n longitude_ecmwf = fh.variables['longitude_ecmwf'][:]\n\n lonmesh_ecmwf,latmesh_ecmwf = np.meshgrid(longitude_ecmwf,latitude_ecmwf)\n\n print('latitude_ecmwf: ', latitude_ecmwf.shape)\n print('longitude_ecmwf: ', longitude_ecmwf.shape)\n \n return latitude_ecmwf, longitude_ecmwf, latmesh_ecmwf, lonmesh_ecmwf;", "def test_enh_xyz():\n\n layout = read_layout(layout_path=f\"{test_data}/test_mwa.txt\")\n xyz = enh_xyz(layout=layout, latitude=mwa_geo.latitude.radians)\n\n assert xyz.shape[0] == 3\n assert xyz.shape[1] == 3\n assert xyz[0, 0] == 456.25006328090495\n assert xyz[1, 0] == -149.785\n assert xyz[2, 0] == 68.04598792853452", "def extrapToZ(zc,(x0,y0,z0),(px,py,pz)):\n x = x0+ (px/pz)*(zc-z0)\n y = y0+ (py/pz)*(zc-z0)\n\n return (x,y)", "def __getPressureCalibrationCoefficients(self):\n src13 = self.read_byte_data(self.address, 0x13)\n src14 = self.read_byte_data(self.address, 0x14)\n src15 = self.read_byte_data(self.address, 0x15)\n src16 = self.read_byte_data(self.address, 0x16)\n src17 = self.read_byte_data(self.address, 0x17)\n src18 = self.read_byte_data(self.address, 0x18)\n src19 = self.read_byte_data(self.address, 0x19)\n src1A = self.read_byte_data(self.address, 0x1A)\n src1B = self.read_byte_data(self.address, 0x1B)\n src1C = self.read_byte_data(self.address, 0x1C)\n src1D = self.read_byte_data(self.address, 0x1D)\n src1E = self.read_byte_data(self.address, 0x1E)\n src1F = self.read_byte_data(self.address, 0x1F)\n src20 = self.read_byte_data(self.address, 0x20)\n src21 = self.read_byte_data(self.address, 0x21)\n c00 = (src13 << 12) | (src14 << 4) | (src15 >> 4)\n c00 = getTwosComplement(c00, 20)\n c10 = ((src15 & 0x0F) << 16) | (src16 << 8) | src17\n c10 = getTwosComplement(c10, 20)\n c20 = (src1C << 8) | src1D\n c20 = getTwosComplement(c20, 16)\n c30 = (src20 << 8) | src21\n c30 = getTwosComplement(c30, 16)\n c01 = (src18 << 8) | src19\n c01 = getTwosComplement(c01, 16)\n c11 = (src1A << 8) | src1B\n c11 = getTwosComplement(c11, 16)\n c21 = (src1E < 8) | src1F\n c21 = getTwosComplement(c21, 16)\n return c00, c10, c20, c30, c01, c11, c21", "def iterate_over_celestial_slices(array_in, array_out, wcs):\n\n # First put lng/lat as first two dimensions in WCS/last two in Numpy\n if wcs.wcs.lng == 0 and wcs.wcs.lat == 1:\n array_in_view = array_in\n array_out_view = array_out\n elif wcs.wcs.lng == 1 and wcs.wcs.lat == 0:\n array_in_view = array_in.swapaxes(-1, -2)\n array_out_view = array_out.swapaxes(-1, -2)\n else:\n array_in_view = array_in.swapaxes(-2, -1 - wcs.wcs.lat).swapaxes(-1, -1 - wcs.wcs.lng)\n array_out_view = array_out.swapaxes(-2, -1 - wcs.wcs.lat).swapaxes(-1, -1 - wcs.wcs.lng)\n\n # Flatten remaining dimensions to make it easier to loop over\n from operator import mul\n\n nx_in = array_in_view.shape[-1]\n ny_in = array_in_view.shape[-2]\n n_remaining_in = reduce(mul, array_in_view.shape, 1) // nx_in // ny_in\n\n nx_out = array_out_view.shape[-1]\n ny_out = array_out_view.shape[-2]\n n_remaining_out = reduce(mul, array_out_view.shape, 1) // nx_out // ny_out\n\n if n_remaining_in != n_remaining_out:\n raise ValueError(\"Number of non-celestial elements should match\")\n\n array_in_view = array_in_view.reshape(n_remaining_in, ny_in, nx_in)\n array_out_view = array_out_view.reshape(n_remaining_out, ny_out, nx_out)\n\n for slice_index in range(n_remaining_in):\n yield array_in_view[slice_index], array_out_view[slice_index]", "def get_heat_source_inlet_air_absolute_humidity(x_nac: np.ndarray) -> np.ndarray:\n\n return x_nac", "def uv2yz(self, coords, chop=0):\n coords = np.array(coords, float, order='c', copy=False)\n yz = np.empty_like(coords)\n distortion = self.distortion_yz.base.base.base\n tmf.pacs_uv2yz(coords.reshape((-1,2)).T, distortion, chop,\n yz.reshape((-1,2)).T)\n yz *= 3600\n return yz", "def get_lw_to_sw_array(self):\n if self.lw_to_sw_array is None:\n lw_to_sw_array = self.basis.get_dO_I_ddelta_alpha(self.sw_survey.geo,self.sw_survey.get_dO_I_ddelta_bar_array())\n else:\n lw_to_sw_array = self.lw_to_sw_array\n return lw_to_sw_array", "def nominal_to_altaz(norm_coord,altaz_coord):\n alt_norm,az_norm = norm_coord.array_direction\n\n if type(norm_coord.x.value).__module__ != np.__name__:\n x = np.zeros(1)\n x[0] = norm_coord.x.value\n x = x*norm_coord.x.unit\n y = np.zeros(1)\n y[0] = norm_coord.y.value\n y = y*norm_coord.y.unit\n else:\n x = norm_coord.x\n y = norm_coord.y\n print(type(norm_coord.x),x)\n\n alt,az = offset_to_altaz(x,y,az_norm,alt_norm)\n altaz_coord = AltAz(az=az.to(u.deg),alt = alt.to(u.deg))\n\n return altaz_coord", "def eventlist():\n\n infile = conf[\"run_path_derived\"] + 'LOCALIZED.txt'\n\n data = np.genfromtxt(infile, skip_header=1) \n\n mlt = cx.MAGtoMLT(data[:, 5], data[:, 0:5])\n\n # Swap mlat and mlon colums so in expected order (lat then long)\n data[:, [6,5]] = data[:, [5,6]]\n \n data = np.hstack((data, np.reshape(mlt, (mlt.shape[0], 1))))\n \n return data", "def sfcwind_2_uas_vas(\n sfcWind: xr.DataArray, sfcWindfromdir: xr.DataArray\n) -> Tuple[xr.DataArray, xr.DataArray]:\n # Converts the wind speed to m s-1\n sfcWind = convert_units_to(sfcWind, \"m/s\")\n\n # Converts the wind direction from the meteorological standard to the mathematical standard\n windfromdir_math = (-sfcWindfromdir + 270) % 360.0\n\n # TODO: This commented part should allow us to resample subdaily wind, but needs to be cleaned up and put elsewhere\n # if resample is not None:\n # wind = wind.resample(time=resample).mean(dim='time', keep_attrs=True)\n #\n # # nb_per_day is the number of values each day. This should be calculated\n # windfromdir_math_per_day = windfromdir_math.reshape((len(wind.time), nb_per_day))\n # # Averages the subdaily angles around a circle, i.e. mean([0, 360]) = 0, not 180\n # windfromdir_math = np.concatenate([[degrees(phase(sum(rect(1, radians(d)) for d in angles) / len(angles)))]\n # for angles in windfromdir_math_per_day])\n\n uas = sfcWind * np.cos(np.radians(windfromdir_math))\n vas = sfcWind * np.sin(np.radians(windfromdir_math))\n uas.attrs[\"units\"] = \"m s-1\"\n vas.attrs[\"units\"] = \"m s-1\"\n return uas, vas", "def test_cspad_xy_at_z():\n ## 'CxiDs1.0:Cspad.0)' or 'DscCsPad'\n basedir = '/reg/g/psdm/detector/alignment/cspad/calib-cxi-camera1-2014-09-24/'\n fname_geometry = basedir + '2016-06-03-geometry-cxi06216-r25-camera1-z175mm.txt'\n fname_data = basedir + '2016-06-03-chun-cxi06216-0025-DscCsPad-max.txt'\n\n geometry = GeometryAccess(fname_geometry, pbits=0o377)\n\n # get pixel coordinate index arrays:\n xyc = xc, yc = 1000, 1000\n #rows, cols = geometry.get_pixel_coord_indexes(xy0_off_pix=xyc)\n #rows, cols = geometry.get_pixel_coord_indexes(do_tilt=True)\n #rows, cols = geometry.get_pixel_xy_inds_at_z(zplane=None, xy0_off_pix=xyc)\n rows, cols = geometry.get_pixel_xy_inds_at_z(zplane=150000)\n\n root, ext = os.path.splitext(fname_data)\n arr = np.load(fname_data) if ext == '.npy' else np.loadtxt(fname_data, dtype=np.float)\n\n #logger.info('arr.shape=', arr.shape\n arr.shape= (32,185,388)\n\n #ave, rms = arr.mean(), arr.std()\n #amp_range = (ave-rms, ave+3*rms)\n amp_range = (0, 1000)\n logger.info('amp_range:' + str(amp_range))\n\n logger.info('shapes rows: %s cols: %s weight: %s' % (str(rows.shape), str(cols.shape), str(arr.shape)))\n img = img_from_pixel_arrays(rows,cols,W=arr)\n\n axim = gg.plotImageLarge(img,amp_range=amp_range)\n gg.move(500,10)\n gg.show()", "def msl_to_wgs84(self, lon_lat_msl):\n geoid = rasterio.open(self.geoid)\n lon_lat_hae = np.zeros_like(lon_lat_msl)\n lon_lat_hae[:, 0:2] = lon_lat_msl[:, 0:2]\n gimg = geoid.read(1)\n geo = feat.wgs84_to_geo(\n geoid.crs, lon_lat_msl[:, 0], lon_lat_msl[:, 1])\n pix = feat.geo_to_pix(geoid.affine, geo[:, 0], geo[:, 1])\n pix = np.round(pix).astype(np.int)\n hae = lon_lat_msl[:, 2] + gimg[pix[:, 1], pix[:, 0]]\n lon_lat_hae[:, 2] = hae\n return lon_lat_hae", "def geo2desiredENU(self, lat, lon, h):\n\t\tlat0 = self.origin[0]\n\t\tlon0 = self.origin[1]\n\t\tx,y,z = pm.geodetic2enu(lat, lon, h, lat0, lon0, self.h0)\n\n\t\tx_L = cos(self.local_rot)*x + sin(self.local_rot)*y\n\t\ty_L = -1*sin(self.local_rot)*x + cos(self.local_rot)*y\n\n\t\tz = self.curr_z_enu - self.GND_ALT\n\t\treturn x_L, y_L, z", "def wvac2air(w):\n scalar = False\n if isinstance(w, (int, float)):\n w = [w]\n scalar = True\n w = np.array([w])\n wair = w.copy()\n\n mask = w > 2000. # Modify only wavelength above 2000 A\n\n s2 = (1e4/w[mask])**2\n f = 1.+0.05792105/(238.0185-s2)+0.00167917/(57.362-s2)\n wair[mask] = w[mask]/f\n return wair[0][0] if scalar else wair[0]", "def array_to_island(self):\n\n array_map = self.string_to_array()\n array_shape = np.shape(array_map) # type: tuple\n\n nested = list(np.zeros(array_shape))\n for i, e in enumerate(nested):\n nested[i] = list(e)\n\n for i in range(array_shape[0]):\n for j in range(array_shape[1]):\n if array_map[i, j] == 'J':\n nested[i][j] = Jungle()\n elif array_map[i, j] == 'S':\n nested[i][j] = Savannah()\n elif array_map[i, j] == 'D':\n nested[i][j] = Desert()\n elif array_map[i, j] == 'O':\n nested[i][j] = Ocean()\n elif array_map[i, j] == 'M':\n nested[i][j] = Mountain()\n else:\n raise SyntaxError(\"Island geography multi-line string \"\n \"must only have these letters: \"\n \"'J', 'S', 'D', 'O', 'M'\")\n\n self.cells = np.array(nested)", "def get_surface_elevation(wind_lat, wind_lon):\n # Load the NetCDF file containing the geopotential of Europe.\n nc = Dataset(path_join(era5_data_dir, geopotential_file_name))\n \n # Read the variables from the netCDF file.\n geopot_lat = nc.variables['latitude'][:]\n geopot_lon = nc.variables['longitude'][:]\n \n \n # Check if wind and geopotential data use same grid.\n assert np.array_equal(geopot_lat, wind_lat) and np.array_equal(geopot_lon, wind_lon), \\\n \"Requested latitudes and/or longitudes do not correspond to those in the NetCDF file.\"\n\n geopot_z = nc.variables['z'][0, :, :]\n nc.close()\n\n surface_elevation = geopot_z/9.81\n print(\"Minimum and maximum elevation found are respectively {:.1f}m and {:.1f}m, removing those below zero.\"\n .format(np.amin(surface_elevation), np.amax(surface_elevation)))\n\n # Get rid of negative elevation values.\n for i, row in enumerate(surface_elevation):\n for j, val in enumerate(row):\n if val < 0.:\n surface_elevation[i, j] = 0.\n\n return surface_elevation", "def haDecFromAzAlt (azAlt, lat):\n # convert spherical az/alt (deg) to direction cosines\n azAltDC = dcFromSC (azAlt)\n\n # convert az/alt direction cosines to -ha/dec direction cosines\n negHADecDC = Cnv.haDecFromAzAlt (azAltDC, lat)\n\n # convert -ha/dec direction cosines to spherical -ha/dec (deg)\n ((negHA, dec), atPole) = scFromDC (negHADecDC)\n\n return ((RO.MathUtil.wrapCtr(-negHA), dec), atPole)", "def get_A3():\n\n return array([[0.68557183+0.46550108j, 0.12934765-0.1622676j,\n 0.24409518+0.25335939j],\n [0.1531015 + 0.66678983j, 0.45112492+0.18206976j,\n -0.02633966+0.43477693j],\n [-0.10817164-1.16879196j, -0.18446849+0.03755672j,\n 0.06430325-0.44757084j]])", "def bboxToRaDec(bbox, wcs):\n corners = []\n for corner in bbox.getCorners():\n p = afwGeom.Point2D(corner.getX(), corner.getY())\n coord = wcs.pixelToSky(p).toIcrs()\n corners.append([coord.getRa().asDegrees(), coord.getDec().asDegrees()])\n ra, dec = zip(*corners)\n return ra, dec", "def find_cea_coord(header,phi_c,lambda_c,nx,ny,dx,dy):\n nx = int(nx)\n ny = int(ny)\n\n # Array of CEA coords\n x = []\n y = []\n\n for j in range(ny):\n col = []\n row = []\n for i in range(nx):\n col.append(np.radians((i-(nx-1)/2)*dx))\n row.append(np.radians((j-(ny-1)/2)*dy))\n x.append(col)\n y.append(row)\n\n x = np.array(x)\n y = np.array(y)\n\n # Relevant header values\n rSun = header['rsun_obs']/header['cdelt1'] #solar radius in pixels\n disk_latc = np.radians(header['CRLT_OBS'])\n disk_lonc = np.radians(header['CRLN_OBS'])\n disk_xc = header['CRPIX1'] - 1 #disk center wrt lower left of patch\n disk_yc = header['CRPIX2'] - 1\n pa = np.radians(header['CROTA2']*-1)\n\n latc = np.radians(lambda_c)\n lonc = np.radians(phi_c) - disk_lonc\n\n # Convert coordinates\n lat = []\n lon = []\n xi = []\n eta = []\n\n for j in range(ny):\n lat_col = []\n lon_col = []\n xi_col = []\n eta_col = []\n for i in range(nx):\n lat0,lon0 = plane2sphere(x[j,i],y[j,i],latc,lonc)\n lat_col.append(lat0)\n lon_col.append(lon0)\n\n xi0,eta0 = sphere2img(lat0,lon0,disk_latc,0.0,disk_xc,disk_yc,rSun,pa)\n xi_col.append(xi0)\n eta_col.append(eta0)\n lat.append(lat_col)\n lon.append(lon_col)\n xi.append(xi_col)\n eta.append(eta_col)\n\n lat = np.array(lat)\n lon = np.array(lon)\n xi = np.array(xi)\n eta = np.array(eta)\n\n return xi,eta,lat,lon", "def get_probeLocs_calib_setup(dir, num_probes = 16):\n position_vectors = [[0] * 3 for i in range(num_probes)]\n\n #every x postion\n\n # Convert to meters\n x_pos = [-4.25*1e-3*25.4, -4.25*1e-3*25.4, 4.24*1e-3*25.4, 4.24*1e-3*25.4]\n y_pos = [-4.25*1e-3*25.4, 4.24*1e-3*25.4, 4.24*1e-3*25.4, -4.25*1e-3*25.4]\n z_pos = [-2.25*1e-3*25.4, -0.75*1e-3*25.4, 0.75*1e-3*25.4, 2.25*1e-3*25.4]\n x = 0\n for i in range(num_probes):\n if(i%4 ==0 and i>0):\n x+=1\n position_vectors[i][0] =x_pos[x]\n position_vectors[i][1] = y_pos[x]\n position_vectors[i][2] =z_pos[i%4]\n # print(position_vectors[i][0])\n\n \"\"\" Now take into account the direction\n r shots : x,y,z - > r,t,z\n t shots : x,y,z - > r,t,z\n z shots : x,y,z - > r,t,z\n \"\"\"\n if dir ==2 :#r\n # don't need to switch anything\n return position_vectors\n if dir == 0:#t\n # looks like -90 degree rotation about y-axis of probe coil orientation, so switch x and z\n position_vectors[:][0], position_vectors[:][2] = position_vectors[:][2], position_vectors[:][0]\n return position_vectors\n if dir ==1:#z\n # also like -90 degree rotation, switch x and z\n position_vectors[:][0], position_vectors[:][2] = position_vectors[:][2], position_vectors[:][0]\n return position_vectors\n\n return position_vectors", "def calculate_psf_tilts():\n for order in [1, 2]:\n\n # Get the file\n path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)\n psf_file = resource_filename('awesimsoss', path)\n\n # Dimensions\n subarray = 'SUBSTRIP256'\n X = range(2048)\n Y = range(256)\n\n # Get the wave map\n wave_map = utils.wave_solutions(subarray, order).astype(float)\n\n # Get the y-coordinate of the trace polynomial in this column\n # (center of the trace)\n coeffs = trace_polynomials(subarray=subarray, order=order)\n trace = np.polyval(coeffs, X)\n\n # Interpolate to get the wavelength value at the center\n wave = interp2d(X, Y, wave_map)\n\n # Get the wavelength of the trace center in each column\n trace_wave = []\n for x, y in zip(X, trace):\n trace_wave.append(wave(x, y)[0])\n\n # For each column wavelength (defined by the wavelength at\n # the trace center) define an isowavelength contour\n angles = []\n for n, x in enumerate(X):\n\n w = trace_wave[x]\n\n # Edge cases\n try:\n w0 = trace_wave[x-1]\n except IndexError:\n w0 = 0\n\n try:\n w1 = trace_wave[x+1]\n except IndexError:\n w1 = 10\n\n # Define the width of the wavelength bin as half-way\n # between neighboring points\n dw0 = np.mean([w0, w])\n dw1 = np.mean([w1, w])\n\n # Get the coordinates of all the pixels in that range\n yy, xx = np.where(np.logical_and(wave_map >= dw0, wave_map < dw1))\n\n # Find the angle between the vertical and the tilted wavelength bin\n if len(xx) >= 1:\n angle = get_angle([xx[-1], yy[-1]], [x, trace[x]])\n else:\n angle = 0\n\n # Don't flip them upside down\n angle = angle % 180\n\n # Add to the array\n angles.append(angle)\n\n # Save the file\n np.save(psf_file, np.array(angles))\n print('Angles saved to', psf_file)", "def desiredENU2geo(self, x_L, y_L, z):\n\t\tx = cos(self.local_rot)*x_L - sin(self.local_rot)*y_L\n\t\ty = sin(self.local_rot)*x_L + cos(self.local_rot)*y_L\n\n\t\tlat0 = self.origin[0]\n\t\tlon0 = self.origin[1]\n\n\t\tlat, lon, alt = pm.enu2geodetic(x, y, z, lat0, lon0, self.h0)\n\t\treturn lat, lon, alt", "def convert_bbox_to_z(bbox):\n w = bbox[2] - bbox[0]\n h = bbox[3] - bbox[1]\n x = bbox[0] + w / 2.\n y = bbox[1] + h / 2.\n s = w * h # scale is just area\n r = w / float(h)\n return np.array([x, y, s, r]).reshape((4, 1))", "def _ArrayToThrustMoment(array):\n assert np.size(array) == 4\n\n return {'thrust': array[0],\n 'moment': [array[1], array[2], array[3]]}", "def gps2tas(GS, TK, verbose=0):\n # confirm GS and TK are valid lengths:\n if 2 < len(GS) < 5:\n pass\n else:\n raise ValueError(\"GS must be a list of three or four items\")\n\n if 2 < len(TK) < 5:\n pass\n else:\n raise ValueError(\"TK must be a list of three or four items\")\n\n if len(GS) != len(TK):\n raise ValueError(\n \"The ground speed and track arrays must have the same number of elements.\"\n )\n\n if len(GS) == 3:\n result = gps2tas3(GS, TK, verbose)\n return result\n else:\n gs_data_sets, tk_data_sets, results = [], [], []\n\n gs_data_sets.append([GS[0], GS[1], GS[2]])\n gs_data_sets.append([GS[1], GS[2], GS[3]])\n gs_data_sets.append([GS[2], GS[3], GS[0]])\n gs_data_sets.append([GS[3], GS[0], GS[1]])\n\n tk_data_sets.append([TK[0], TK[1], TK[2]])\n tk_data_sets.append([TK[1], TK[2], TK[3]])\n tk_data_sets.append([TK[2], TK[3], TK[0]])\n tk_data_sets.append([TK[3], TK[0], TK[1]])\n\n for (gs, tk) in zip(gs_data_sets, tk_data_sets):\n results.append(gps2tas3(gs, tk, 2))\n\n ave_TAS = 0\n ave_wind_x = 0\n ave_wind_y = 0\n sum2_TAS = 0\n\n for item in results:\n ave_TAS += item[0]\n sum2_TAS += item[0] ** 2\n ave_wind_x += item[1][0] * M.sin(M.pi * item[1][1] / 180.0)\n ave_wind_y += item[1][0] * M.cos(M.pi * item[1][1] / 180.0)\n\n ave_TAS /= 4.0\n std_dev_TAS = M.sqrt((sum2_TAS - 4 * ave_TAS ** 2) / 3)\n ave_wind_x /= 4\n ave_wind_y /= 4.0\n ave_wind_speed = M.sqrt(ave_wind_x ** 2 + ave_wind_y ** 2)\n ave_wind_dir = (720.0 - (180.0 / M.pi * M.atan2(ave_wind_x, ave_wind_y))) % 360\n # return results\n\n if verbose == 0:\n return ave_TAS\n elif verbose == 1:\n return ave_TAS, std_dev_TAS\n elif verbose == 2:\n return (\n ave_TAS,\n std_dev_TAS,\n (\n (results[0][1][0], results[0][1][1]),\n (results[1][1][0], results[1][1][1]),\n (results[2][1][0], results[2][1][1]),\n (results[3][1][0], results[3][1][1]),\n ),\n )\n else:\n raise ValueError(\"The value of verbose must be equal to 0, 1 or 2\")", "def _dwd_get_sun_zenith_angles_channel(self):\n LOGGER.info('Retrieve sun zenith angles')\n try:\n self.check_channels(\"SUN_ZEN_CHN\")\n if self[\"SUN_ZEN_CHN\"].data.shape != self.area.shape:\n self._data_holder.channels.remove(self[\"SUN_ZEN_CHN\"])\n raise Exception()\n except:\n if self.area.lons is None or self.area.lats is None:\n self.area.lons, self.area.lats = self.area.get_lonlats()\n sun_zen_chn_data = np.zeros(shape=self.area.lons.shape)\n q = 500\n for start in xrange(0, sun_zen_chn_data.shape[1], q):\n sun_zen_chn_data[\n :, start: start + q] = sza(\n get_first(self.time_slot), self.area.lons[:, start: start + q],\n self.area.lats[:, start: start + q])\n sun_zen_chn = Channel(name=\"SUN_ZEN_CHN\",\n data=sun_zen_chn_data)\n self._data_holder.channels.append(sun_zen_chn)\n\n return self[\"SUN_ZEN_CHN\"]", "def msl_to_wgs84(lon_lat_msl, geoid):\n\n if lon_lat_msl.shape[1] != 3:\n raise ValueError(\"pix must be an Nx3 length numpy.ndarray\")\n\n # Get the geometric transform for the Geoid\n gt_geoid = geoid.GetGeoTransform()\n\n # Load the geoid-shift as an \"image\"\n band = geoid.GetRasterBand(1)\n image = band.ReadAsArray(0, 0, band.XSize, band.YSize)\n pix = pix_from_geo(lon_lat_msl[:, 0:2], gt_geoid)\n\n # Lookup, pixel(x,y) is in image coordinate frame, so we need to reverse\n # when we do a numpy lookup into the array, as they're rotated/reversed\n # e.g. image frames run x right, y down and numpy array corresponding to\n # the mean pixel x is a column (y) index, and vice versa.\n lon_lat_hae = np.hstack((lon_lat_msl[:, 0:2], np.zeros((pix.shape[0], 1))))\n for ii in np.arange(pix.shape[0]):\n lon_lat_hae[ii, 2] = lon_lat_msl[ii, 2] + image[pix[ii, 1], pix[ii, 0]]\n return lon_lat_hae" ]
[ "0.55827683", "0.544639", "0.5397533", "0.53178024", "0.53166306", "0.52501696", "0.5224251", "0.52204317", "0.5159298", "0.5143273", "0.5093226", "0.5093226", "0.5093226", "0.50815976", "0.50683326", "0.506293", "0.50404876", "0.5039063", "0.501149", "0.5002514", "0.4954378", "0.49539453", "0.49454576", "0.49377787", "0.4935175", "0.49195683", "0.4906264", "0.4902345", "0.4895631", "0.48829803", "0.48767522", "0.48615822", "0.4861288", "0.48596844", "0.48547074", "0.4846554", "0.48243347", "0.48139414", "0.4801387", "0.47962558", "0.47921965", "0.4791991", "0.47825053", "0.47766098", "0.47755805", "0.47715837", "0.47635478", "0.47629094", "0.47500497", "0.4748994", "0.4748507", "0.47231743", "0.47127423", "0.46964535", "0.46861914", "0.46841782", "0.46821037", "0.46739587", "0.46649417", "0.46623835", "0.46600857", "0.46502975", "0.46460158", "0.46410772", "0.46337253", "0.4632782", "0.46312556", "0.46280438", "0.46228212", "0.46205956", "0.4597829", "0.45972478", "0.45968845", "0.45965117", "0.45913923", "0.45865163", "0.4583173", "0.45784292", "0.45754826", "0.45680115", "0.45589337", "0.45548365", "0.45537326", "0.45534852", "0.45528477", "0.45439383", "0.45365816", "0.4536502", "0.45360872", "0.45336396", "0.4532986", "0.45260254", "0.45159602", "0.45133752", "0.45098698", "0.4509161", "0.44956836", "0.4494243", "0.44934407", "0.4492104" ]
0.6210394
0
Gets all immediate subdirectories of directory
def getsubdir(directory): subdir=[name for name in os.listdir(directory) if os.path.isdir(os.path.join(directory, name))] subdir=['/'+name+'/' for name in subdir if '.' not in name] return subdir
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getImmediateSubdirectories(dir):", "def get_immediate_subdirectories(self, a_dir):\n return [name for name in os.listdir(a_dir)\n if os.path.isdir(os.path.join(a_dir, name))]", "def all_subdirs_of(dir='.'):\n result = []\n for item in os.listdir(dir):\n path = os.path.join(dir, item)\n if os.path.isdir(path):\n result.append(path)\n return result", "def get_subdirs(dir_path: str) -> list:\n res = list(x.name for x in os.scandir(dir_path) if x.is_dir())\n return res", "def subdirs(dir):\n return [dir + '/' + name for name in os.listdir(dir)\n if os.path.isdir(os.path.join(dir, name))]", "def _RecursiveDirectoryListing(dirpath):\n result = []\n for root, _, files in os.walk(dirpath):\n for f in files:\n result.append(os.path.relpath(os.path.join(root, f), dirpath))\n return result", "def get_subdirectories(a_dir):\n return [a_dir + name + \"/\" for name in os.listdir(a_dir)\n if os.path.isdir(os.path.join(a_dir, name))]", "def collect_subdirs(path_to_walk):\n root, subdirs, _ = next(os.walk(path_to_walk))\n\n return [os.path.join(root, d) for d in subdirs]", "def get_all_subdirs(dir_path):\n\tls = os.listdir(dir_path)\n\tdirs = []\n\tfor f in ls:\n\t\tif os.path.isdir(os.path.join(dir_path, f)):\n\t\t\tdirs.append(f)\n\treturn dirs", "def _subdirectories(self):\n for o in os.listdir(self.directory):\n if os.path.isdir(os.path.join(self.directory, o)):\n yield os.path.join(self.directory, o)", "def get_list_of_subdir_in_dir(directory):\n list_of_all_dirs = []\n for root, dirs, files in os.walk(directory):\n if not re.search('/$', root):\n root += os.sep # Add '/' to the end of root\n if '.ipynb_checkpoints' not in root:\n list_of_all_dirs.append(root)\n return list_of_all_dirs", "def _findAllSubdirs(self, parentDir):\n subDirs = [join(parentDir, d) for d in os.listdir(parentDir) if os.path.isdir(join(parentDir, d))]\n if not subDirs:\n subDirs = None\n else:\n # add the modify time for each directory\n subDirs = [[path, os.stat(path).st_mtime] for path in subDirs]\n\n # return the subdirectories\n return subDirs", "def get_directories_recursive(self, path) :\n\n if path.is_dir() :\n yield path\n for child in path.iterdir():\n yield from self.get_directories_recursive(child)\n elif path.is_file() :\n yield path", "def finddirs(root):\n retval = []\n for root, dirs, files in os.walk(root):\n for d in dirs:\n retval.append(os.path.join(root, d))\n return retval", "def get_dir_recursive(path: str) -> List[str]:\n files = []\n for dir_entry in os.scandir(path):\n if dir_entry.is_dir(follow_symlinks=True):\n files.extend(get_dir_recursive(dir_entry))\n else:\n files.append(dir_entry.path)\n return files", "def get_all_paths(dmt, directory_path=''):\n # Base case.\n if not dmt.children:\n return set()\n \n filesystem_items = set()\n for item in dmt.children.keys():\n filesystem_items.add(directory_path+item)\n # Also get the paths of subdirectory contents.\n if item[-1] == '/':\n subdir_name = item\n subdir_path = directory_path + subdir_name\n \n filesystem_items.add(subdir_path)\n filesystem_items.update(get_all_paths(dmt.children[subdir_name], subdir_path))\n \n return filesystem_items", "def GetDirFilesRecursive(directory):\n dirfiles = set()\n for dirpath, _, files in os.walk(directory):\n for name in files:\n dirfiles.add(os.path.normpath(os.path.join(dirpath, name)))\n return dirfiles", "def directories(self):\n for t in self.dirsIter():\n yield t\n for child in self.children:\n for t in child.directories():\n yield t", "def find_subdirectories(package):\n try:\n subdirectories = next(os.walk(package_to_path(package)))[1]\n except StopIteration:\n subdirectories = []\n return subdirectories", "def list_dir_recursively(dir: str) -> list:\n all_files = []\n for root, dirs, files in os.walk(dir):\n for name in files:\n file_path = os.path.join(root, name)\n file_path = os.path.relpath(file_path, dir)\n all_files.append(file_path)\n return all_files", "def get_dirs(root_dir, recursive=True):\n\n ret_dirs = []\n\n for root, dirs, _ in os.walk(root_dir, topdown=True):\n\n for name in dirs:\n ret_dirs.append(os.path.join(root, name))\n\n if not recursive:\n break\n\n return ret_dirs", "def immediate_children( path ):\n assert( os.path.isdir( path ) )\n CMD = [ \"find\", path, \"-mindepth\", \"1\", \"-maxdepth\", \"1\" ]\n return [ x for x in run_cmd( CMD ).split( \"\\n\" ) if len( x ) > 0 ]", "def get_dirs(source_dir):\n all_dirs = set()\n it = os.walk(source_dir)\n it.next()\n dirs = list(it)\n for d in dirs:\n if len(d[1])==0:\n all_dirs.add(d[0])\n return all_dirs", "def lsdirectorytree(directory):\n\t#init list to start, own start directory is included\n\tdirlist = [directory]\n\t#setting the first scan\n\tmoredirectories = dirlist\n\twhile len (moredirectories) != 0:\n\t\tnewdirectories = moredirectories\n\t\tmoredirectories = list ()\n\t\tfor element in newdirectories:\n\t\t\ttoadd = addchilddirectory(element)\n\t\t\tmoredirectories += toadd\n\t\tdirlist += moredirectories\n\treturn dirlist", "def getAllDirs(self):\n\n dirs = [ self ]\n for d in self._subdirs:\n if d.hasImages():\n dirs += d.getAllDirs()\n return dirs", "def list_subdir(a_dir):\n # https://stackoverflow.com/a/800201\n return [name for name in os.listdir(a_dir)\n if os.path.isdir(os.path.join(a_dir, name))]", "def get_subdirs(src_dir):\n img_dirs = sorted(next(os.walk(src_dir))[1])\n subdirs = [src_dir + img_dir for img_dir in img_dirs]\n return subdirs", "def get_subdirectories(self, physical_path):\n result = []\n for p in os.listdir(physical_path):\n if not os.path.isdir(os.path.join(physical_path, p)):\n continue\n result.append(os.path.join(physical_path, p))\n\n return result", "def scantree(path):\n for entry in os.scandir(path):\n if entry.is_dir(follow_symlinks=False):\n yield from scantree(entry.path)\n else:\n yield entry", "def listdir_full_path(directory):\n for f in os.listdir(directory):\n if not os.path.isdir(f):\n yield os.path.abspath(os.path.join(directory, f))", "def collect_paths_to_subdirectories(path_to_directory):\n paths_to_subdirectories = []\n for name_item in os.listdir(path_to_directory):\n path_to_item = os.path.join(path_to_directory,\n name_item)\n if os.path.isdir(path_to_item):\n paths_to_subdirectories.append(path_to_item)\n \n # `os.listdir` returns a list whose order depends\n # on the OS. To make `collect_paths_to_subdirectories`\n # independent of the OS, `paths_to_subdirectories` is\n # sorted.\n paths_to_subdirectories.sort()\n return paths_to_subdirectories", "def find_directories(root_directory):\n\n search_directories = []\n\n if os.path.isdir(root_directory):\n files_and_folders = os.listdir(root_directory)\n for item in files_and_folders:\n sub_directory = os.path.join(root_directory, item)\n if os.path.isdir(sub_directory):\n search_directories.append(sub_directory)\n return search_directories\n\n else:\n sys.exit(\"Error: {} is not a valid directory\".format(root_directory))", "def ls_dirs(self, path, recursive=False):\n if path != \"\" and not path.endswith(\"/\"):\n path += \"/\"\n\n blob_iter = self.client.list_blobs(name_starts_with=path)\n dirs = []\n for blob in blob_iter:\n relative_dir = os.path.dirname(os.path.relpath(blob.name, path))\n if (\n relative_dir\n and (recursive or \"/\" not in relative_dir)\n and relative_dir not in dirs\n ):\n dirs.append(relative_dir)\n\n return dirs", "def ls_dir(d):\n return [d for d in [os.path.join(d, f) for f in os.listdir(d)] if os.path.isdir(d)]", "def top_level_directories(self):\n return [d for d in self.directories if len([x for x in self.directories if x in d]) == 1]", "def getsubdirs(toppath, search_string = \".\"):\n if not search_string:\n return [toppath]\n reg_prog = re.compile(search_string)\n dirlist = []\n if search_string == \".\":\n dirlist.append(toppath)\n for root, dirs, files in os.walk(toppath):\n for fname in files:\n if reg_prog.search(os.path.join(root,fname)):\n dirlist.append(root)\n continue\n uniqueList = []\n for value in dirlist:\n if value not in uniqueList:\n uniqueList.append(value)\n return uniqueList", "def scantree(path):\n # type: (str) -> os.DirEntry\n for entry in scandir(path):\n if entry.is_dir(follow_symlinks=True):\n # due to python2 compat, cannot use yield from here\n for t in scantree(entry.path):\n yield t\n else:\n yield entry", "def test_GetFilesInDirectory_subdir_relpath(tempdir: pathlib.Path):\n # Create files: [ sub/a, sub/sub/b ]\n (tempdir / \"sub\").mkdir()\n (tempdir / \"sub\" / \"a\").touch()\n (tempdir / \"sub\" / \"sub\").mkdir()\n (tempdir / \"sub\" / \"sub\" / \"b\").touch()\n assert set(dpack.GetFilesInDirectory(tempdir, [])) == {\n pathlib.Path(\"sub/a\"),\n pathlib.Path(\"sub/sub/b\"),\n }", "def get_all_dirs(dirpath, base_dir=None):\n\tif not base_dir:\n\t\tpost = os.path.normpath(dirpath)\n\telif base_dir in dirpath:\n\t\t(pre, post) = dirpath.split(os.path.normpath(base_dir))\n\t\tpost = os.path.normpath(post)\n\telse:\n\t\treturn\n\tdirs = []\n\t(head, tail) = os.path.split(post)\n\twhile tail:\n\t\tdirs.append(tail)\n\t\t(head, tail) = os.path.split(head)\n\tdirs.reverse()\n\treturn dirs", "def scan_tree(path):\n list_of_file_paths = []\n for file_obj in scandir(path):\n if file_obj.is_dir(follow_symlinks=False):\n # yield from scan_tree(file_obj.path)\n list_of_file_paths.extend(scan_tree(file_obj.path))\n else:\n # yield file_path\n if 'DS_Store' not in file_obj.path:\n list_of_file_paths.append(file_obj.path)\n return list_of_file_paths", "def walk_directory(path: Path, suffix: Optional[str] = None) -> List[Path]:\n if not path.is_dir():\n return [path]\n paths = [path]\n locs = []\n seen = set()\n for path in paths:\n if str(path) in seen:\n continue\n seen.add(str(path))\n if path.parts[-1].startswith(\".\"):\n continue\n elif path.is_dir():\n paths.extend(path.iterdir())\n elif suffix is not None and not path.parts[-1].endswith(suffix):\n continue\n else:\n locs.append(path)\n # It's good to sort these, in case the ordering messes up cache.\n locs.sort()\n return locs", "def list_directory(project_tree, directory):\n _, subdirs, subfiles = next(project_tree.walk(directory.path))\n return DirectoryListing(directory,\n [Path(join(directory.path, subdir)) for subdir in subdirs\n if not subdir.startswith('.')],\n [Path(join(directory.path, subfile)) for subfile in subfiles])", "def recursive_iterdir(directory, include_hidden=False):\n dir_path = pathlib.Path(directory)\n content = dir_path.iterdir()\n if not include_hidden:\n content = (item for item in content if not is_hidden(item))\n for item in content:\n if item.is_dir():\n yield from recursive_iterdir(item)\n yield item", "def subdir_findall(dir, subdir):\n strip_n = len(dir.split('/'))\n path = '/'.join((dir, subdir))\n return ['/'.join(s.split('/')[strip_n:]) for s in setuptools.findall(path)]", "def walk_directory(self, path):\n files = []\n for dirpath, dirnames, filenames in os.walk(path):\n for filename in filenames:\n files.append(os.path.join(dirpath, filename))\n return files", "def listDir(path):\n filenames = []\n for root, dirs, files in os.walk(path):\n for i in files:\n filenames.append(os.path.join(root, i))\n return filenames", "def print_directory_contents(path):\n if os.path.isdir(path):\n children = os.listdir(path)\n for child in children:\n child_path = os.path.join(path, child)\n print_directory_contents(child_path)\n else:\n print(path)\n directories.append(path)\n\n return directories", "def get_files(dir: str) -> List[str]:\n ret = []\n for root, dirs, files in os.walk(dir):\n for name in dirs:\n ret.extend(get_files(os.path.join(root, name)))\n for name in files:\n ret.append(os.path.join(root, name))\n return ret", "def get_all_vdirs(path):\n items = glob.glob(path)\n return items", "def list_directory(self, path):\n dirent = self.lookup(path)\n if dirent and dirent.is_directory():\n best_fit = self.retrieve_catalog_for_path(path)\n return best_fit.list_directory(path)", "def get_directories(self):\n\t\tdirectories = []\n\t\tfor i in range(self.directoryModel.get_row_count()):\n\t\t\tdirectories.append((\n\t\t\t\t\tself.directoryModel.get_value(i, 'directoryTagName'),\n\t\t\t\t\tself.directoryModel.get_value(i, 'directory')\n\t\t\t\t\t))\n\t\treturn directories", "def recursive_glob(self, rootdir='.', suffix=''):\n return [os.path.join(rootdir, filename)\n for filename in sorted(os.listdir(rootdir)) if filename.endswith(suffix)]", "def list_folders_into_directory(directory_path: str) -> [str]:\n for root, directory_names, file_names in walk(directory_path):\n return directory_names", "def GetSubdirectories(self, directory):\n\n subdirs = []\n file_dir = self.GetSuitePath(directory)\n for entry in dircache.listdir(file_dir):\n if not self._AreLabelsPaths():\n root = os.path.splitext(entry)[0]\n else:\n root = entry\n if not self.IsValidLabel(root):\n continue\n entry_path = os.path.join(file_dir, entry)\n if (self._IsSuiteFile(entry_path)\n and os.path.isdir(entry_path)):\n subdirs.append(root)\n return subdirs", "def dirs_in_dir(path):\n listing = sorted(os.listdir(path))\n\n dirs = []\n for name in listing:\n longname = path + '/' + name\n if name[0] == '.':\n continue\n if not os.path.isdir(longname):\n continue\n dirs.append(name)\n\n return dirs", "def get_dirs(self, path):\n ds = []\n try:\n for d in os.listdir(path):\n if os.path.isdir(os.path.join(path, d)):\n ds.append(d)\n except OSError:\n pass\n ds.sort()\n return ds", "def get_directories(path):\n\n # Uses abspath as the directory\n absolute = os.path.dirname(abspath(path))\n all_files = os.listdir(absolute)\n\n # Get the absolute path of each file\n absolute_files = [\"/\".join([absolute, d]) for d in all_files]\n\n # Here we filter all non-directires out and return\n return [i for i in absolute_files if os.path.isdir(i)]", "def list_directories(dir_pathname, recursive=True, topdown=True,\n followlinks=False):\n for root, dir_names, _ in walk(dir_pathname, recursive, topdown, followlinks):\n for dir_name in dir_names:\n yield absolute_path(os.path.join(root, dir_name))", "def folders(self):\n for directory in self.directory:\n for path in os.listdir(directory):\n full_path = os.path.join(directory, path)\n if os.path.isdir(full_path):\n if not path.startswith('.'):\n self.filepaths.append(full_path)\n return self._get_filepaths()", "def getDirectoryList(path):\n dirList = [\"/\".join([path, object]) for object in os.listdir(path)]\n dirList = [object for object in dirList if os.path.isdir(object)]\n return dirList", "def get_dirs(hub: pop.hub.Hub, sub: pop.hub.Sub) -> List[str]:\n return sub._dirs", "def directories_in_dir_recursive(search_dir, ignored_regex_objects):\n\n dir_paths = [search_dir]\n\n for dirpath, dirnames, filenames in os.walk(search_dir):\n\n for dirname in dirnames:\n\n if expression_helper.is_string_matched_in_regular_expression_objects(dirpath, ignored_regex_objects):\n # ignore subdirectories of ignored directory\n continue\n\n if os.path.islink(dirname):\n # ignore symlink\n # http://stackoverflow.com/questions/15718006/check-if-directory-is-symlink\n continue\n\n if expression_helper.is_string_matched_in_regular_expression_objects(dirname, ignored_regex_objects):\n # ignore this directory\n continue\n\n full_name = os.path.join(dirpath, dirname)\n dir_paths.append(full_name)\n\n return dir_paths", "def list_sub(location=''):\n if location != '':\n pathloc = os.path.join(os.getcwd(), location)\n else:\n pathloc = os.getcwd()\n\n print(pathloc)\n\n directory_contents = os.listdir(pathloc)\n sub_directories = []\n for item in directory_contents:\n # list directories\n if os.path.isdir(os.path.join(pathloc, item)):\n sub_directories.append(item)\n sub_directories.sort()\n return sub_directories", "def get_search_subdirs(adir, afile, in_subdirs=[]):\n dirs = []\n search_subdirs = in_subdirs\n if not in_subdirs:\n search_subdirs = get_basic_search_subdirs(afile)\n for subdir in search_subdirs:\n path = os.path.join(adir, subdir)\n dirs.append(path)\n return dirs", "def segment_paths(root):\n directories = []\n history = history_path(root)\n for d in os.listdir(history):\n path = os.path.join(history, d)\n if os.path.isdir(path):\n directories.append(path)\n return sorted(directories)", "def recursive_glob(rootdir=\".\", suffix=\"\"):\n return [\n os.path.join(looproot, filename)\n for looproot, _, filenames in os.walk(rootdir)\n for filename in filenames\n if filename.endswith(suffix)\n ]", "def recursive_glob(rootdir=\".\", suffix=\"\"):\n return [\n os.path.join(looproot, filename)\n for looproot, _, filenames in os.walk(rootdir)\n for filename in filenames\n if filename.endswith(suffix)\n ]", "def list_dirs(folder):\n return sorted(\n child.name for child in folder.iterdir() if child.is_dir() and ('__pycache__' not in str(child.absolute())))", "def get_all_files(self):\n\t\tfiles_list = []\n\t\tfor path, subdirs, files in os.walk(self.root):\n\t\t for name in files:\n\t\t \tfiles_list.append(os.path.join(self.root, name))\n\t\treturn files_list[0:-1]", "def subdirs(path, name):\n f = lambda x: name is None or x.lower() == name.lower()\n return [file_path\n for file_name in os.listdir(path)\n if f(file_name) and not file_name.startswith('.')\n for file_path in (os.path.join(path, file_name),)\n if os.path.isdir(file_path)]", "def getAllDirs(self):\n\n\t\treturn Tagging.getTagsForTags(self, tagList = [])", "def gen_recursive_filelist(d):\n \n for root, directories, files in os.walk(d):\n for file in files:\n yield os.path.join(root, file)", "def get_dir_list(basepath):\n parent = ListDir(basepath=basepath)\n parent.contents = get_dir_list_recurse(basepath, parent=parent)\n return parent", "def get_all_folders_in_directory(directory: str):\n # print()\n # print()\n # print(\"[def] get_all_folders_in_directory(\")\n # print(\" directory : \")\n # print(\" \" + directory )\n # print(\" )\")\n # print(\"{\")\n\n all_files_and_folders = os.listdir(directory)\n absolute_path_to_all_files_and_folders = []\n for item in all_files_and_folders:\n # print(\"item: \" + item)\n absolute_path = directory + \"/\" + item\n absolute_path_to_all_files_and_folders.append(absolute_path)\n ...\n # print()\n # pretty_print_array(all_files_and_folders,\"all_files_and_folders\")\n # print()\n\n only_folders = []\n\n # print(\"[for] item in all_files_and_folders:\")\n for item in absolute_path_to_all_files_and_folders:\n # print(\"item: \" + item)\n # print()\n # print(\"[if] os.path.isdir(item): \" + str(os.path.isdir(item)))\n # print()\n if os.path.isdir(item):\n # print()\n # print(\"!!\")\n # print(\"!! adding \" + item + \" to only_folders\")\n # print(\"!!\")\n # print()\n only_folders.append(item)\n # pretty_print_array(only_folders,\"only_folders\")\n # print()\n ...\n # print(only_folders)\n\n # print()\n # pretty_print_array(only_folders,\"only_folders\")\n # print()\n\n # print(\"}\")\n # print()\n # print()\n\n return only_folders", "def get_dirs():\n # join glob matchers\n dirnames = [\n str(dir_path.relative_to(get_data_dir()))\n for dir_path in get_data_dir().rglob(\"*\")\n if dir_path.is_dir()\n ]\n\n return dirnames", "def get_files(path: str) -> List[str]:\n if not isdir(path):\n return [path] # its expected to return a list each time even if its a single element\n return [file for fileOrDir in listdir(path) for file in get_files(path + '/' + fileOrDir)]\n # return list of each file returned by the recursive call getFiles(fileOrDir) on\n # each fileOrDir in listdir(path)", "def getFilePaths(directory):\r\n\tfor folder, subs, files in os.walk(directory):\r\n\t\tfor filename in files:\r\n\t\t\tyield os.path.join(folder, filename)", "def get_patient_dirs(base_folder):\n patient_dirs = sorted([x for x in base_folder.iterdir() if x.is_dir()])\n return patient_dirs", "def get_files_in_dir(path):\n return [os.path.join(dir_name, file)\n for dir_name, subdirs, files in os.walk(path)\n for file in files]", "def get_all_files_and_nested(file_path):\n stack_dirs = list()\n all_files = list()\n first_level_files = listdir(file_path)\n for f in first_level_files:\n full_f_path = join(file_path, f)\n if isdir(full_f_path):\n stack_dirs.append(full_f_path)\n else:\n all_files.append(full_f_path)\n for d in stack_dirs:\n all_files.extend(get_all_files_and_nested(d))\n return all_files", "def get_sub_folders(session, ds_browser, ds_path):\n search_task = session._call_method(\n session._get_vim(),\n \"SearchDatastore_Task\",\n ds_browser,\n datastorePath=ds_path)\n try:\n task_info = session._wait_for_task(search_task)\n except error_util.FileNotFoundException:\n return set()\n # populate the folder entries\n if hasattr(task_info.result, 'file'):\n return set([file.path for file in task_info.result.file])\n return set()", "def get_files(root_dir, recursive=True):\n\n ret_files = []\n\n for root, _, files in os.walk(root_dir, topdown=True):\n\n for name in files:\n ret_files.append(os.path.join(root, name))\n\n if not recursive:\n break\n\n return ret_files", "def list_directories(path):\n dir_list = os.listdir(path)\n directories = [f for f in dir_list if os.path.isdir(os.path.join(path, f))]\n return directories", "def _traverse_path(path):\n path = Path(path)\n\n if path.is_dir():\n yield from path.rglob(\"*\")\n else:\n yield path", "def list_dir(self, path):", "def get_input_subdirs(rootdir='.', dirs_skip=[], recursive=True):\n dirs = []\n for item in os.scandir(rootdir):\n if item.is_dir() and (item.path not in dirs_skip):\n if item.name.endswith(options['subdir_suffix']):\n dirs.append(item.path)\n if recursive:\n subdirs = get_input_subdirs(item.path, dirs_skip, recursive)\n if subdirs:\n dirs.extend(subdirs)\n return dirs", "def _walk_to_root(path):\n if not os.path.exists(path):\n raise IOError('Starting path not found')\n\n if os.path.isfile(path):\n path = os.path.dirname(path)\n\n last_dir = None\n current_dir = os.path.abspath(path)\n while last_dir != current_dir:\n yield current_dir\n parent_dir = os.path.abspath(os.path.join(current_dir, os.path.pardir))\n last_dir, current_dir = current_dir, parent_dir", "def list_dir(*path):\n path = os.path.join(*path)\n return next(os.walk(path))[1]", "def directories(self):\n directories = list(set([\n '/'.join(f.split('/')[:-1]) for f in self.files\n ]))\n return sorted(directories)", "def listdir(self, path=None, recursive=False):\n actual = self.actual(path, recursive)\n if not actual:\n print('No files or directories found.')\n for n in actual:\n print(n)", "def get_parent_subfolders(self):\n return [x[0] for x in os.walk(self.parent_folder)]", "def get_all_files(cwd):\n return os.listdir(cwd)", "def getDirListing (dirPath, revert):\n dirList = []\n fileList = []\n for root, dirs, files in os.walk (dirPath, False):\n for name in files:\n fileList.append (os.path.join (root, name))\n for name in dirs:\n dirList.append (os.path.join (root, name))\n\n if revert == True:\n return fileList + dirList\n else:\n dirList.reverse ()\n return dirList + fileList", "def files_in_dir(root_dir):\n file_set = set()\n\n for dir_, _, files in os.walk(root_dir):\n for file_name in files:\n rel_dir = os.path.relpath(dir_, root_dir)\n rel_file = os.path.join(rel_dir, file_name)\n file_set.add(rel_file)\n\n return [Path(PureWindowsPath(f)) for f in file_set]", "def list_directory_tree(root_dir):\n if not os.path.exists(root_dir):\n logger.error(f\"Cannot list tree under non-existent directory: '{root_dir}'\")\n return\n cwd = os.getcwd()\n os.chdir(root_dir)\n res = sh.tree().stdout.decode(\"utf-8\")\n os.chdir(cwd)\n return res", "def get_directories(self, path):\n\n if self.name == 'dropbox':\n dbx = dropbox.get_dropbox()\n return dropbox.get_folders(dbx, path)", "def recursive_glob(rootdir='.', suffix=''):\n return [os.path.join(looproot, filename)\n for looproot, _, filenames in os.walk(rootdir)\n for filename in filenames if filename.endswith(suffix)]", "def get_all_files(pathdir: str) -> list:\n from os import path, walk\n\n '''\n os.walk(root_path) - directory tree generator.\n For each directory on root_path return a tuple:\n (path_for_dir, list_dirs_on_the_dir, list_files_on_the_dir)\n\n trash\n ├── dir1\n │   ├── dir2\n │   │   ├── dir3\n │   │   └── file3\n │   ├── file1\n │   └── file2\n └── dir4\n ├── dir5\n │   ├── file5\n │   └── file6\n └── file4\n\n >>> import os\n >>> list(os.walk('/home/myrequiem/trash'))\n [\n ('trash', ['dir1', 'dir4'], []),\n ('trash/dir1', ['dir2'], ['file2', 'file1']),\n ('trash/dir1/dir2', ['dir3'], ['file3']),\n ('trash/dir1/dir2/dir3', [], []),\n ('trash/dir4', ['dir5'], ['file4']),\n ('trash/dir4/dir5', [], ['file5', 'file6'])\n ]\n '''\n\n allfiles = []\n\n try:\n from tqdm import tqdm\n except ImportError:\n def tqdm(*args, **kwargs):\n if args:\n return args[0]\n return kwargs.get('iterable', None)\n\n for root, dirs, files in tqdm(walk(pathdir), leave=False,\n ncols=80, unit=''):\n del dirs\n for fls in files:\n allfiles.append(path.join(root, fls))\n\n return allfiles", "def get_dir_list_recurse(basepath, itempath=\"\", parent=None):\n total = []\n if not basepath.endswith(\"/\"):\n basepath = basepath + \"/\"\n if itempath and not itempath.endswith(\"/\"):\n itempath = itempath + \"/\"\n items = os.listdir(basepath + itempath)\n for itemname in items:\n curpath = basepath + itempath + itemname\n if os.path.isdir(curpath):\n dirobj = ListDir(\n basepath=basepath,\n itempath=itempath + itemname,\n itemname=itemname,\n parent=parent\n )\n dirobj.contents = get_dir_list_recurse(\n basepath,\n itempath=itempath+itemname,\n parent=dirobj\n )\n total.append(dirobj)\n else:\n fileobj = ListItem(\n parent,\n basepath=basepath,\n itempath=itempath + itemname,\n itemname=itemname\n )\n total.append(fileobj)\n return total", "def listdir(self, subdir=''):\n\n try:\n subdir = subdir.decode()\n except AttributeError:\n pass\n subdir = subdir.rstrip('\\\\')\n # cmd = '\"%s\" \"%s\" 0 ' % (self.ndc_path, self.filename)\n cmd = [\n self.ndc_path,\n self.filename,\n '0'\n ]\n if subdir:\n cmd.append(subdir)\n # cmd += '\"%s\"' % subdir\n\n logging.info(cmd)\n try:\n result = check_output(cmd)\n except CalledProcessError:\n raise FileNotFoundError('Subdirectory not found in disk', [])\n\n result = [r.split(b'\\t') for r in result.split(b'\\r\\n')]\n result = list(filter(lambda x: len(x) == 4, result))\n\n filenames = []\n subdirs = []\n for r in result:\n try:\n decoded = r[0].decode('shift_jis')\n if r[2] != b'<DIR>':\n filenames.append(decoded)\n elif r[2] == b'<DIR>' and len(r[0].strip(b'.')) > 0:\n subdirs.append(decoded)\n except UnicodeDecodeError:\n logging.info(\"Couldn't decode one of the strings in the folder: %s\" % subdir)\n continue\n\n return filenames, subdirs" ]
[ "0.8456564", "0.7849883", "0.75609636", "0.7355576", "0.72475153", "0.7239605", "0.7224201", "0.7196618", "0.71935165", "0.7171549", "0.7157886", "0.7133028", "0.71121323", "0.71053064", "0.706358", "0.7032585", "0.6998959", "0.69399023", "0.692623", "0.6925599", "0.68778616", "0.687129", "0.67787737", "0.67302805", "0.6729009", "0.6646888", "0.66233015", "0.6620263", "0.66130364", "0.66067165", "0.6589677", "0.6545661", "0.6528834", "0.65229064", "0.65092534", "0.6504459", "0.64948493", "0.64579386", "0.6457818", "0.6456339", "0.6454135", "0.6450041", "0.64455366", "0.64386153", "0.6431477", "0.6360927", "0.6357289", "0.63515276", "0.63302577", "0.6326713", "0.6324958", "0.63171357", "0.6315847", "0.6315394", "0.63142586", "0.6309803", "0.63079053", "0.6297661", "0.6295744", "0.6294625", "0.6285613", "0.6282646", "0.6281599", "0.6271252", "0.6265755", "0.6247892", "0.6247892", "0.62457293", "0.6242506", "0.62393856", "0.62345034", "0.6232093", "0.6221494", "0.6207778", "0.61980444", "0.6196045", "0.61920714", "0.6191108", "0.6178569", "0.61762065", "0.6162757", "0.6161953", "0.6160388", "0.6159948", "0.61543185", "0.61493796", "0.61450267", "0.61399704", "0.6136347", "0.6128364", "0.6116799", "0.6116492", "0.61141115", "0.61087", "0.6101966", "0.6097582", "0.60972124", "0.6090043", "0.6084935", "0.6078452" ]
0.6162301
81
Run source extractor on f, producing a catalogue, and object and background maps
def sexcall(f,ddi,odi,cdi,bdi): # Split to make file name for catalogue, # object map and background map filenames fname = f.split('.fits')[0] # Construct source extractor call objsexcall = 'sex -CATALOG_TYPE ASCII_HEAD -PARAMETERS_NAME photo.param -CATALOG_NAME '+cdi+fname+'.cat'+' -CHECKIMAGE_TYPE OBJECTS -CHECKIMAGE_NAME '+odi+fname+'_objects.fits '+ddi+f baksexcall = 'sex -CATALOG_TYPE ASCII_HEAD -PARAMETERS_NAME photo.param -CATALOG_NAME '+cdi+fname+'.cat'+' -CHECKIMAGE_TYPE BACKGROUND -CHECKIMAGE_NAME '+bdi+fname+'_background.fits '+ddi+f os.system(objsexcall) os.system(baksexcall)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, vcffiles):\n self.vcffilenames = vcffiles\n self.snpsites = {}\n self.snp_positions = {}", "def sources_extraction(image,sextractor_pars):\n\n cat_name, detect_minarea, detect_thresh, analysis_thresh, phot_aperture, satur_level, ZP, gain, pixelScale,seeing,back_type,back_value,back_size,backphoto_type,backphoto_thick,back_filterthresh,checkimage_type,checkimage_name= sextractor_pars\n sp.run('sex %s.fits -c gft.sex -CATALOG_NAME %s.cat -CATALOG_TYPE ASCII_HEAD -PARAMETERS_NAME gft.param -DETECT_TYPE CCD -DETECT_MINAREA %d -DETECT_THRESH %d -ANALYSIS_THRESH %d -PHOT_APERTURES %d -SATUR_LEVEL %d -MAG_ZEROPOINT %f -GAIN %f -PIXEL_SCALE %f -SEEING_FWHM %f -BACK_TYPE %s -BACK_VALUE %f -BACK_SIZE %d -BACKPHOTO_TYPE %s -BACKPHOTO_THICK %d -BACK_FILTTHRESH %f -CHECKIMAGE_TYPE %s -CHECKIMAGE_NAME %s.fits ' % (image,cat_name, detect_minarea, detect_thresh, analysis_thresh, phot_aperture, satur_level, ZP, gain, pixelScale,seeing,back_type,back_value,back_size,backphoto_type,backphoto_thick,back_filterthresh,checkimage_type,checkimage_name),shell=True)", "def mainFunction(f):\n\n #############################################################################\n \n \n # biomass hexagon\n predF = '/vol/v3/lt_stem_v3.1/models/biomassfiaald_20180708_0859/2000/biomassfiaald_20180708_0859_2000_mean.tif'\n trainF = '/vol/v2/datasets/biomass/nbcd/fia_ald/nbcd_fia_ald_biomass_clipped_to_conus.tif'\n shpF = '/vol/v1/general_files/datasets/spatial_data/hexagons/hexagons_conus_albers_30km_with_id.shp'\n trainND = -32768\n predND = -9999\n trgField = 'id'\n descrField = 'id'\n outDir = '/vol/v3/lt_stem_v3.1/evaluation/biomassfiaald_20180708_0859/hexagon_correlation'\n xyLim = (500, 500)\n xLab = 'Reference (tons/ha)'\n yLab = 'Prediction (tons/ha)'\n annoXY = (15,420)\n \n \n \"\"\"\n # cc\n predF = '/vol/v3/lt_stem_v3.1/models/canopy_20180915_1631/2001/canopy_20180915_1631_2001_mean.tif'\n trainF = '/vol/v2/stem/conus/reference_rasters/nlcd_2001_canopy_clipped_to_conus_train.tif'\n #shpF = '/vol/v2/datasets/Eco_Level_III_US/us_eco_l3_no_states_multipart.shp'\n shpF = '/vol/v1/general_files/datasets/spatial_data/hexagons/hexagons_conus_albers_30km_with_id.shp'\n trainND = 255\n predND = 255\n trgField = 'id'\n descrField = 'id'\n #trgField = 'US_L3CODE'\n #descrField = 'US_L3NAME'\n #outDir = '/vol/v3/lt_stem_v3.1/evaluation/canopy_20180915_1631/ecoregion_correlation'\n outDir = '/vol/v3/lt_stem_v3.1/evaluation/canopy_20180915_1631/hexagon_correlation'\n xyLim = (100, 100)\n xLab = 'Reference (%)'\n yLab = 'Prediction (%)'\n annoXY = (5,82)\n \"\"\"\n #############################################################################\n\n\n # get color setup\n norm = colors.Normalize(vmin=0, vmax=1)\n f2rgb = cm.ScalarMappable(norm=norm, cmap=cm.get_cmap('YlGnBu_r'))\n \n # open the shapefile\t\n vDriver = ogr.GetDriverByName(\"ESRI Shapefile\")\n vSrc = vDriver.Open(shpF, 0)\n vLayer = vSrc.GetLayer()\n \n commonBox = get_intersec([predF, trainF])\n\n#for f in range(vLayer.GetFeatureCount()):\n feature = vLayer[f]\n name = feature.GetField(trgField)\n print('f: '+str(f))\n outFig = os.path.join(outDir, (trgField.replace(' ','_').lower()+'_'+str(name)+'.png'))\n if os.path.exists(outFig):\n #break\n return\n \n descr = feature.GetField(descrField)\n \n predP, coords = get_zone_pixels(feature, shpF, predF, 1, [commonBox[0], commonBox[2], commonBox[3], commonBox[1]])#.compressed() [commonBox[0], commonBox[2], commonBox[3], commonBox[1]]\n trainP, coords = get_zone_pixels(feature, shpF, trainF, 1, [coords[0], coords[1], coords[2], coords[3]])#.compressed()\n \n predP = ma.masked_equal(predP, predND)\n trainP = ma.masked_equal(trainP, trainND)\n trainP = ma.masked_equal(trainP, 0)\n\n combMask = np.logical_not(np.logical_not(predP.mask) * np.logical_not(trainP.mask))\n predP[combMask] = ma.masked\n trainP[combMask] = ma.masked\n predP = predP.compressed()\n trainP = trainP.compressed()\n if (predP.shape[0] == 0) | (trainP.shape[0] == 0) | (predP==0).all() | (trainP==0).all():\n predP = np.array([0,0,1,1], dtype='float64')\n trainP = np.array([0,0,1,1], dtype='float64')\n mae = round(np.mean(np.absolute(np.subtract(predP, trainP))),1)\n rmse = round(np.sqrt(np.mean((predP-trainP)**2)),1)\n \n\n totPixs = trainP.shape[0]\n sampSize = round(totPixs*1)\n pickFrom = range(sampSize)\n #sampIndex = np.random.choice(pickFrom, size=sampSize)\n sampIndex = pickFrom\n\n r = round(np.corrcoef(trainP[sampIndex], predP[sampIndex])[0][1], 2)\n if (mae == 0) & (r == 1):\n r = 0.0\n rColor = f2hex(f2rgb, r)\n p = sns.jointplot(trainP[sampIndex], predP[sampIndex], kind=\"hex\", color='blue', xlim=(0,xyLim[0]), ylim=(0,xyLim[1]), size=5)\n p.ax_joint.set_xlabel(xLab)\n p.ax_joint.set_ylabel(yLab)\n p.ax_joint.annotate('r: '+str(r)+'\\nrmse: '+str(rmse)+'\\nmae: '+str(mae), annoXY)\n plt.tight_layout()\n outFig = os.path.join(outDir, (trgField.replace(' ','_').lower()+'_'+str(name)+'.png'))\n p.savefig(outFig)\n \n df = pd.DataFrame({'id':name, 'descr':descr, 'r':r, 'rmse':rmse, 'mae':mae, 'color':rColor, 'img':os.path.basename(outFig)}, index=[0])\n outCSV = outFig.replace('.png','.csv')\n df.to_csv(outCSV, ',', index=False)", "def readSources(self):\n for sourceCount, sourceElement in enumerate(self.root.findall(\".sources/source\")):\n # shall we just read the UFO here?\n filename = sourceElement.attrib.get('filename')\n # filename is a path relaive to the documentpath. resolve first.\n sourcePath = os.path.abspath(os.path.join(os.path.dirname(self.path), filename))\n sourceName = sourceElement.attrib.get('name')\n if sourceName is None:\n # if the source element has no name attribute\n # (some authoring tools do not need them)\n # then we should make a temporary one. We still need it for reference.\n sourceName = \"temp_master.%d\"%(sourceCount)\n self.reportProgress(\"prep\", 'load', sourcePath)\n if not os.path.exists(sourcePath):\n raise MutatorError(\"Source not found at %s\"%sourcePath)\n sourceObject = self._instantiateFont(sourcePath)\n # read the locations\n sourceLocationObject = None\n sourceLocationObject = self.locationFromElement(sourceElement)\n\n if sourceLocationObject is None:\n raise MutatorError(\"No location defined for source %s\"%sourceName)\n\n # read lib flag\n for libElement in sourceElement.findall('.lib'):\n if libElement.attrib.get('copy') == '1':\n self.libSource = sourceName\n\n # read the groups flag\n for groupsElement in sourceElement.findall('.groups'):\n if groupsElement.attrib.get('copy') == '1':\n self.groupsSource = sourceName\n\n # read the info flag\n for infoElement in sourceElement.findall(\".info\"):\n if infoElement.attrib.get('copy') == '1':\n self.infoSource = sourceName\n if infoElement.attrib.get('mute') == '1':\n self.muted['info'].append(sourceName)\n\n # read the features flag\n for featuresElement in sourceElement.findall(\".features\"):\n if featuresElement.attrib.get('copy') == '1':\n if self.featuresSource is not None:\n self.featuresSource = None\n else:\n self.featuresSource = sourceName\n\n mutedGlyphs = []\n for glyphElement in sourceElement.findall(\".glyph\"):\n glyphName = glyphElement.attrib.get('name')\n if glyphName is None:\n continue\n if glyphElement.attrib.get('mute') == '1':\n if not sourceName in self.muted['glyphs']:\n self.muted['glyphs'][sourceName] = []\n self.muted['glyphs'][sourceName].append(glyphName)\n\n for kerningElement in sourceElement.findall(\".kerning\"):\n if kerningElement.attrib.get('mute') == '1':\n self.muted['kerning'].append(sourceName)\n\n # store\n self.sources[sourceName] = sourceObject, sourceLocationObject\n self.reportProgress(\"prep\", 'done')", "def cli(source_f, raster_f, output, verbose):\n with fiona.open(source_f, 'r') as source:\n source_driver = source.driver\n source_crs = source.crs\n sink_schema = source.schema.copy()\n\n source_geom = source.schema['geometry']\n if source_geom == 'Point':\n sink_schema['geometry'] = '3D Point'\n elif source_geom == 'LineString':\n sink_schema['geometry'] = '3D LineString'\n elif source_geom == '3D Point' or source_geom == '3D LineString':\n pass\n else:\n click.BadParameter(\"Source geometry type {} not implemented\".format(source_geom))\n\n with rasterio.open(raster_f) as raster:\n if source_crs != raster.crs:\n click.BadParameter(\"Features and raster have different CRS.\")\n if raster.count > 1:\n warnings.warn(\"Found {0} bands in {1}, expected a single band raster\".format(raster.bands, raster_f))\n supported = ['int16', 'int32', 'float32', 'float64']\n if raster.dtypes[0] not in supported:\n warnings.warn(\"Found {0} type in {1}, expected one of {2}\".format(raster.dtypes[0], raster_f, supported))\n with fiona.open(\n output, 'w',\n driver=source_driver,\n crs=source_crs,\n schema=sink_schema) as sink:\n\n for feature in source:\n try:\n feature_z = drapery.drape(raster, feature)\n sink.write({\n 'geometry': mapping(feature_z),\n 'properties': feature['properties'],\n })\n except Exception:\n logging.exception(\"Error processing feature %s:\", feature['id'])\n #print(sink.closed)\n #print(raster.closed)\n #print(source.closed)", "def main():\n\n (options, args) = parse_options(sys.argv)\n\n iterator = GFFParser.GFFAddingIterator() \n examiner = GFFParser.GFFExaminer()\n\n exon_map = dict()\n\n id_dict = examiner.available_limits(options.anno)['gff_id']\n intron_lists = dict()\n\n ### collect all available sources from gff-file\n source_dict = examiner.available_limits(options.anno)['gff_source_type']\n taken_sources = set()\n #types = ['gene', 'mRNA', 'exon', 'CDS']\n types = ['exon']\n\n ### parse only for exons and let the GFFparser \n ### infer the respective parents (otherwise doubled entries occured)\n ### we sanitize the structure later on anyways\n for key in [source[0] for source in source_dict.keys() if source[1] in types]:\n taken_sources.add(key)\n\n ### try different type, if sources are empty \n if len(taken_sources) == 0:\n types = ['CDS']\n for key in [source[0] for source in source_dict.keys() if source[1] in types]:\n taken_sources.add(key)\n\n ### print taken_sources\n if len(taken_sources) == 0:\n print >> sys.stderr, 'No suitable sources found!'\n sys.exit(-1)\n\n ### only show available sources - if neccessary\n if options.show_sources:\n print 'Parsed file %s\\n' % options.anno\n print 'Following sources are available:\\n'\n for source in taken_sources:\n print source \n print '\\nUse option -s to specify a comma-separated list of sources (-s source1,source2,source3), otherwise all sources are taken'\n sys.exit(0)\n\n if options.sources != '':\n user_sources = set(options.sources.split(','))\n taken_sources = taken_sources.intersection(user_sources)\n if len(taken_sources) == 0:\n print >> sys.stderr, 'The specified sources do not match any of the available sources - Please use option -S to get a list of available sources'\n sys.exit(-1)\n\n if options.verbose:\n print \"take sources %s\" % str(list(taken_sources))\n\n ### build up gff-parsing filter\n gff_sources = []\n for source in taken_sources:\n gff_sources.extend(zip([source] * len(types), types))\n\n ### parse gff-file\n for idx in id_dict.keys():\n print 'parsing chromosome %s' % idx\n if len(gff_sources) > 0:\n trans_dict = iterator.get_all_features(options.anno, {'gff_source_type':gff_sources, 'gff_id':idx})\n else:\n trans_dict = iterator.get_all_features(options.anno, {'gff_id':idx})\n ### since we parse only one chromosome, this loop is evaluated only once\n for chrm in trans_dict.keys():\n ### verify/sanitize the created dictionairy\n fix_structure(trans_dict[chrm])\n intron_lists[chrm] = dict()\n for gene in trans_dict[chrm].features:\n for trans in gene.sub_features:\n if trans.type == 'exon':\n print \"WARNING: Exon on transcript level:\"\n print trans\n print 'will continue\\n'\n continue\n elif len(trans.sub_features) > 1: ### at least two exons for one intron ...\n strand = trans.sub_features[0].strand\n contig_list = [(trans.sub_features[i].location.nofuzzy_start, trans.sub_features[i].location.nofuzzy_end) for i in range(len(trans.sub_features))]\n contig_list.sort(lambda u, v:u[0]-v[0])\n for exon in range(len(contig_list) - 1):\n ### update intron lists\n if contig_list[exon][1] - contig_list[exon + 1][0] == 0:\n continue\n try:\n assert(contig_list[exon][1] < contig_list[exon + 1][0])\n except AssertionError:\n print >> sys.stderr, 'exon_1 %i, exon_2 %i' % (contig_list[exon][1], contig_list[exon + 1][0]) \n print >> sys.stderr, contig_list[exon]\n print >> sys.stderr, contig_list[exon+1]\n print >> sys.stderr, exon\n sys.exit(-1)\n ### for now strand information is only dummy\n intron_lists[chrm][(0, contig_list[exon][1], contig_list[exon + 1][0])] = strand\n \n ### update exon map\n for exon in range(len(contig_list)):\n if not exon_map.has_key(chrm):\n exon_map[chrm] = dict()\n\n if not exon_map[chrm].has_key(trans.id):\n exon_map[chrm][trans.id] = dict()\n ### we assume, that an exon cannot occurr twice in the same transcript!\n ### the value in the dict is a binary encoding, if the left/right end is intronic 10 = 2 means, 5' end is intronic\n if len(contig_list) == 1:\n exon_map[chrm][trans.id][contig_list[exon]] = 0 ### 00 -> should never occurr\n elif exon == 0:\n exon_map[chrm][trans.id][contig_list[exon]] = 2 ### 10\n elif exon == len(contig_list) - 1:\n exon_map[chrm][trans.id][contig_list[exon]] = 1 ### 01\n else:\n exon_map[chrm][trans.id][contig_list[exon]] = 3 ### 11 \n\n outfile = open(options.outfile, 'w')\n cPickle.dump(intron_lists, outfile)\n outfile.close()\n \n outfile = open(options.outfile + '.' + 'cov', 'w')\n cPickle.dump(exon_map, outfile)\n outfile.close()", "def main():\n\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n\n parser.add_argument('-V', '--version', action='version', version=VERSION)\n\n\n file_group = parser.add_argument_group('Input Files')\n file_group.add_argument('-f', dest='traj', required=True, type=str,\n help='trajectory file (XTC/TRR/GRO/PDB ...)')\n file_group.add_argument('-s', dest='tpr', required=True, type=str,\n help='tpr file (TPR)')\n file_group.add_argument('-o', dest='outpath', type=str,\n help='name of the mapped trajectory (XTC/GRO)')\n file_group.add_argument('-m', dest='map_file', type=str,\n help='.mapping file or path to directory of .map files')\n\n mapping_group = parser.add_argument_group('Mapping Options')\n mapping_group.add_argument('-mode', dest='mode', required=False, type=str,\n help='COG or COM mapping', default='COG')\n mapping_group.add_argument('-pbc', action='store_true', required=False, dest='pbc_complete',\n help='complete pbc with MDAnalysis; this is slow!')\n mapping_group.add_argument('-mols', dest='mol_names', required=True, type=str, nargs='+',\n help='names of molecules to consider when mapping as in the [moleculetypes] directive')\n mapping_group.add_argument('-add_H', dest='h_association', nargs='+', type=lambda s: s.split(':'),\n default=[],\n help='atom-types corresponding to CH3, CH2, CH1 for aliphatic groups and CH2d for double bonds.')\n args = parser.parse_args()\n\n print(\"INFO - Loading universe\")\n # load trajectory\n init_universe = UniverseHandler(args.mol_names,\n args.tpr,\n args.traj,\n in_memory=True)\n if args.pbc_complete:\n print(\"INFO - PBC completing trajectory\")\n init_universe.pbc_complete()\n\n if args.h_association:\n print(\"INFO - Adding Hydrogen to united-atoms\")\n treated_atoms = init_universe.shift_united_atom_carbons(dict(args.h_association))\n else:\n treated_atoms = np.array([])\n\n print(\"INFO - Loading mapping files\")\n #determine if we have a single .mapping file or a directory of .map files\n map_path = pathlib.Path(args.map_file)\n if map_path.is_file() == True:\n with open(args.map_file, \"r\") as _file:\n lines = _file.readlines()\n elif map_path.is_dir() == True:\n l = []\n for i in map_path.glob('*.map'):\n with open(i, \"r\") as _file:\n l.append(_file.readlines())\n if len(l) > 0:\n lines = [item for sublist in l for item in sublist]\n else:\n msg = (\"Couldn't find any .map files in the directory given.\"\n \"Please check the -m argument!\")\n raise IOError(msg)\n else:\n msg = (\"\\nCannot determine if you have given me a single .mapping file\\n\"\n \"or a directory of .map files. Please check!\\n\")\n raise IOError(msg)\n\n mappings = read_mapping(lines)[0]\n\n print(\"INFO - Mapping universe - indices\")\n # first mapp the atom indices\n mapped_atoms, bead_idxs = forward_map_indices(init_universe,\n mappings)\n n_frames = len(init_universe.trajectory)\n\n print(\"INFO - Mapping universe - positions\")\n mapped_atoms = numba.typed.List(mapped_atoms)\n bead_idxs = numba.typed.List(bead_idxs)\n # extract the position array from universe\n # if it's not a trajectory we have to emulate\n # a single frame\n path = pathlib.Path(args.traj)\n file_extension = path.suffix.casefold()[1:]\n if file_extension in [\"xtc\", \"trr\"]:\n positions = init_universe.trajectory.coordinate_array\n else:\n positions = init_universe.atoms.positions\n positions = positions.reshape(1, -1, 3)\n\n mapped_trajectory = forward_map_positions(mapped_atoms,\n bead_idxs,\n positions,\n n_frames,\n args.mode,\n treated_atoms)\n\n print(\"INFO - Mapping universe - building pos-array\")\n cg_universe = create_new_universe(init_universe, mapped_trajectory, mappings)\n\n # write coordinate\n print(\"INFO - Writing CG trajectory\")\n if args.traj:\n path = pathlib.Path(args.traj)\n file_extension = path.suffix.casefold()[1:]\n else:\n file_extension = \"xtc\"\n\n if file_extension in [\"xtc\", \"trr\"]:\n cg_beads = cg_universe.atoms\n with mda.Writer(args.outpath,\n multiframe=True,\n n_atoms=len(cg_universe.atoms)) as mapped:\n for time_step in cg_universe.trajectory:\n mapped.write(cg_beads)\n else:\n cg_universe.atoms.positions = cg_universe.trajectory.coordinate_array[0]\n cg_beads = cg_universe.atoms\n cg_universe.atoms.dimensions = init_universe.atoms.dimensions\n with mda.Writer(args.outpath, n_atoms=len(cg_universe.atoms)) as mapped:\n mapped.write(cg_beads)", "def __init__( self, catalogs, **kwargs ):\n\n self.catalogs = catalogs\n self.init()\n self.__dict__.update(kwargs)\n\n if isinstance(catalogs,types.StringType):\n if not os.path.isfile(catalogs): print (\"Cannot open the catalog! Exiting ...\"); sys.exit()\n self.sourcelist = minidom.parse(catalogs).getElementsByTagName('source')\n elif isinstance(catalogs,types.ListType):\n self.sourcelist = []\n for catalog in catalogs:\n if not os.path.isfile(catalog): print (\"Cannot open %s! Exiting ...\" %catalog)\n self.sourcelist += minidom.parse(catalog).getElementsByTagName('source')\n else:\n raise Exception(\"Do not recognize the format! Exiting...\")\n\n # exception for diffuse sources\n self.DiffuseSourceList = []\n self.DiffuseSourceList += [{'name':'CenALobes.fits','ra':201.0,'dec':-43.5}]\n self.DiffuseSourceList += [{'name':'CygnusLoop.fits','ra':312.75,'dec':30.67}]\n self.DiffuseSourceList += [{'name':'IC443.fits','ra':94.31,'dec':22.58}]\n self.DiffuseSourceList += [{'name':'HESSJ1825-137.fits','ra':276.13,'dec':-13.8521}]\n self.DiffuseSourceList += [{'name':'MSH15-52.fits','ra':228.507,'dec':-59.256}]\n self.DiffuseSourceList += [{'name':'LMC.fits','ra':81.65,'dec':-68.42}]\n self.DiffuseSourceList += [{'name':'SMC.fits','ra':14.75,'dec':-72.7}]\n self.DiffuseSourceList += [{'name':'VelaX.fits','ra':128.287,'dec':-45.1901}]\n self.DiffuseSourceList += [{'name':'W28.fits','ra':270.34,'dec':-23.44}]\n self.DiffuseSourceList += [{'name':'W30.fits','ra':271.408,'dec':-21.6117}]\n self.DiffuseSourceList += [{'name':'W44.fits','ra':283.99,'dec':1.355}]\n self.DiffuseSourceList += [{'name':'W51C.fits','ra':290.818,'dec':14.145}]", "def main(config):\n file_paths_info = [('GLOFRIS','WATCH','ARG_inunriver_historical_000000000WATCH_1980_rp01000.tif'),\n \t\t\t\t('GLOFRIS','RCP45','ARG_inunriver_rcp4p5_0000GFDL-ESM2M_2030_rp01000.tif'),\n \t\t\t\t('GLOFRIS','RCP85','ARG_inunriver_rcp8p5_0000GFDL-ESM2M_2030_rp01000.tif'),\n \t\t\t\t('FATHOM','AR_fluvial_undefended_merged','AR-FU-1000.tif'),\n \t\t\t\t('FATHOM','AR_pluvial_undefended_merged','AR-PU-1000.tif')\n \t\t\t\t]\n figure_names = ['GLOFRIS-WATCH-fluvial','GLOFRIS-RCP45-fluvial','GLOFRIS-RCP85-fluvial','FATHOM-fluvial','FATHOM-pluvial']\n figure_titles = ['current fluvial flooding','RCP4.5 fluvial flooding','RCP8.5 fluvial flooding','current fluvial flooding','current pluvial flooding']\n for f_i in range(len(file_paths_info)):\n\t hazard_file = os.path.join(config['paths']['data'],'flood_data', file_paths_info[f_i][0],file_paths_info[f_i][1],file_paths_info[f_i][2])\n\t output_file = os.path.join(config['paths']['figures'], 'flood-map-{}.png'.format(figure_names[f_i]))\n\t ax = get_axes()\n\t plot_basemap(ax, config['paths']['data'])\n\t scale_bar(ax, location=(0.8, 0.05))\n\t plot_basemap_labels(ax, config['paths']['data'], include_regions=True,include_zorder=3)\n\n\t proj_lat_lon = ccrs.PlateCarree()\n\n\n\t # Create color map\n\t colors = plt.get_cmap('Blues')\n\n\t # Read in raster data\n\t data, lat_lon_extent = get_data(hazard_file)\n\t data[(data <= 0) | (data > 5)] = np.nan\n\t max_val = np.nanmax(data)\n\t norm=mpl.colors.Normalize(vmin=0, vmax=max_val)\n\n\t # Plot population data\n\t im = ax.imshow(data, extent=lat_lon_extent,transform=proj_lat_lon, cmap=colors,norm =norm, zorder=2)\n\n\t # Add colorbar\n\t cbar = plt.colorbar(im, ax=ax,fraction=0.1, shrink=0.87,pad=0.01, drawedges=False, orientation='horizontal',\n\t norm=mpl.colors.Normalize(vmin=0, vmax=max_val), ticks=list(np.linspace(0,max_val,3)))\n\t cbar.set_clim(vmin=0,vmax=max_val)\n\n\n\t cbar.outline.set_color(\"none\")\n\t cbar.ax.yaxis.set_tick_params(color='black')\n\t cbar.ax.set_xlabel('Flood depths (m)',fontsize=12,color='black')\n\n\t plt.title('1 in 1000 year {}'.format(figure_titles[f_i]), fontsize = 14)\n\t save_fig(output_file)\n\t plt.close()", "def __init__(self, x=0, y=0, flux=None, time=None, wcs=None, quality=None, mask=None, exposure=1800, sector=0,\n size=150,\n camera=1, ccd=1, cadence=None):\n super(Source, self).__init__()\n if cadence is None:\n cadence = []\n if quality is None:\n quality = []\n if wcs is None:\n wcs = []\n if time is None:\n time = []\n if flux is None:\n flux = []\n\n self.size = size\n self.sector = sector\n self.camera = camera\n self.ccd = ccd\n self.cadence = cadence\n self.quality = quality\n self.exposure = exposure\n self.wcs = wcs\n co1 = 38.5\n co2 = 116.5\n catalog_1 = self.search_gaia(x, y, co1, co1)\n catalog_2 = self.search_gaia(x, y, co1, co2)\n catalog_3 = self.search_gaia(x, y, co2, co1)\n catalog_4 = self.search_gaia(x, y, co2, co2)\n catalogdata = vstack([catalog_1, catalog_2, catalog_3, catalog_4], join_type='exact')\n catalogdata = unique(catalogdata, keys='DESIGNATION')\n coord = wcs.pixel_to_world([x + (size - 1) / 2 + 44], [y + (size - 1) / 2])[0].to_string()\n ra = float(coord.split()[0])\n dec = float(coord.split()[1])\n catalogdata_tic = tic_advanced_search_position_rows(ra=ra, dec=dec, radius=(self.size + 2) * 21 * 0.707 / 3600)\n # print(f'no_of_stars={len(catalogdata_tic)}, camera={camera}, ccd={ccd}: ra={ra}, dec={dec}, radius={(self.size + 2) * 21 * 0.707 / 3600}')\n self.tic = convert_gaia_id(catalogdata_tic)\n self.flux = flux[:, y:y + size, x:x + size]\n self.mask = mask[y:y + size, x:x + size]\n self.time = np.array(time)\n median_time = np.median(self.time)\n interval = (median_time - 388.5) / 365.25\n\n num_gaia = len(catalogdata)\n tic_id = np.zeros(num_gaia)\n x_gaia = np.zeros(num_gaia)\n y_gaia = np.zeros(num_gaia)\n tess_mag = np.zeros(num_gaia)\n in_frame = [True] * num_gaia\n for i, designation in enumerate(catalogdata['DESIGNATION']):\n ra = catalogdata['ra'][i]\n dec = catalogdata['dec'][i]\n if not np.isnan(catalogdata['pmra'].mask[i]): # masked?\n ra += catalogdata['pmra'][i] * np.cos(np.deg2rad(dec)) * interval / 1000 / 3600\n if not np.isnan(catalogdata['pmdec'].mask[i]):\n dec += catalogdata['pmdec'][i] * interval / 1000 / 3600\n pixel = self.wcs.all_world2pix(\n np.array([catalogdata['ra'][i], catalogdata['dec'][i]]).reshape((1, 2)), 0, quiet=True)\n x_gaia[i] = pixel[0][0] - x - 44\n y_gaia[i] = pixel[0][1] - y\n try:\n tic_id[i] = catalogdata_tic['ID'][np.where(catalogdata_tic['GAIA'] == designation.split()[2])[0][0]]\n except:\n tic_id[i] = np.nan\n if np.isnan(catalogdata['phot_g_mean_mag'][i]):\n in_frame[i] = False\n elif catalogdata['phot_g_mean_mag'][i] >= 25:\n in_frame[i] = False\n elif -4 < x_gaia[i] < self.size + 3 and -4 < y_gaia[i] < self.size + 3:\n dif = catalogdata['phot_bp_mean_mag'][i] - catalogdata['phot_rp_mean_mag'][i]\n tess_mag[i] = catalogdata['phot_g_mean_mag'][\n i] - 0.00522555 * dif ** 3 + 0.0891337 * dif ** 2 - 0.633923 * dif + 0.0324473\n if np.isnan(tess_mag[i]):\n tess_mag[i] = catalogdata['phot_g_mean_mag'][i] - 0.430\n if np.isnan(tess_mag[i]):\n in_frame[i] = False\n else:\n in_frame[i] = False\n\n tess_flux = 10 ** (- tess_mag / 2.5)\n t = Table()\n t[f'tess_mag'] = tess_mag[in_frame]\n t[f'tess_flux'] = tess_flux[in_frame]\n t[f'tess_flux_ratio'] = tess_flux[in_frame] / np.nanmax(tess_flux[in_frame])\n t[f'sector_{self.sector}_x'] = x_gaia[in_frame]\n t[f'sector_{self.sector}_y'] = y_gaia[in_frame]\n catalogdata = hstack([catalogdata[in_frame], t]) # TODO: sorting not sorting all columns\n catalogdata.sort('tess_mag')\n self.gaia = catalogdata", "def process(sources, output, force):\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s [%(levelname)s] - %(message)s', datefmt=\"%H:%M:%S\")\n\n logging.getLogger('shapely.geos').setLevel(logging.WARNING)\n logging.getLogger('Fiona').setLevel(logging.WARNING)\n logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.WARNING)\n requests.packages.urllib3.disable_warnings()\n # logging.getLogger('processing').setLevel(logging.DEBUG)\n\n catalog_features = []\n failures = []\n path_parts_to_skip = utils.get_path_parts(sources).index(\"sources\") + 1\n success = True\n for path in utils.get_files(sources):\n try:\n logging.info(\"Processing \" + path)\n pathparts = utils.get_path_parts(path)[path_parts_to_skip:]\n pathparts[-1] = pathparts[-1].replace('.json', '.geojson')\n\n outdir = os.path.join(output, *pathparts[:-1], pathparts[-1].replace('.geojson', ''))\n outfile = os.path.join(output, *pathparts)\n\n source = utils.read_json(path)\n urlfile = urlparse(source['url']).path.split('/')[-1]\n \n if not hasattr(adapters, source['filetype']):\n logging.error('Unknown filetype ' + source['filetype'])\n failures.append(path)\n continue\n \n read_existing = False\n if os.path.isfile(outfile):\n logging.info(\"Output file exists\")\n if os.path.getmtime(outfile) > os.path.getmtime(path):\n logging.info(\"Output file is up to date\")\n if not force:\n read_existing = True\n logging.warning('Skipping ' + path + ' since generated file exists. Use --force to regenerate.') \n else:\n logging.info(\"Output is outdated, {} < {}\".format(\n datetime.datetime.fromtimestamp(os.path.getmtime(outfile)),\n datetime.datetime.fromtimestamp(os.path.getmtime(path))))\n\n if read_existing:\n with open(outfile, \"rb\") as f:\n geojson = json.load(f)\n properties = geojson['properties']\n else:\n logging.info('Downloading ' + source['url'])\n \n try:\n fp = utils.download(source['url'])\n except IOError:\n logging.error('Failed to download ' + source['url'])\n failures.append(path)\n continue\n \n logging.info('Reading ' + urlfile)\n \n if 'filter' in source:\n filterer = BasicFilterer(source['filter'], source.get('filterOperator', 'and'))\n else:\n filterer = None\n \n try:\n geojson = getattr(adapters, source['filetype'])\\\n .read(fp, source['properties'],\n filterer=filterer,\n layer_name=source.get(\"layerName\", None),\n source_filename=source.get(\"filenameInZip\", None))\n except IOError as e:\n logging.error('Failed to read ' + urlfile + \" \" + str(e))\n failures.append(path)\n continue\n except zipfile.BadZipfile as e:\n logging.error('Unable to open zip file ' + source['url'])\n failures.append(path)\n continue\n finally:\n os.remove(fp.name)\n if(len(geojson['features'])) == 0:\n logging.error(\"Result contained no features for \" + path)\n continue\n excluded_keys = ['filetype', 'url', 'properties', 'filter', 'filenameInZip']\n properties = {k:v for k,v in list(source.items()) if k not in excluded_keys}\n properties['source_url'] = source['url']\n properties['feature_count'] = len(geojson['features'])\n logging.info(\"Generating demo point\")\n properties['demo'] = geoutils.get_demo_point(geojson)\n \n geojson['properties'] = properties\n \n utils.make_sure_path_exists(os.path.dirname(outfile))\n\n #cleanup existing generated files\n if os.path.exists(outdir):\n rmtree(outdir)\n filename_to_match, ext = os.path.splitext(pathparts[-1])\n output_file_dir = os.sep.join(utils.get_path_parts(outfile)[:-1])\n logging.info(\"looking for generated files to delete in \" + output_file_dir)\n for name in os.listdir(output_file_dir):\n base, ext = os.path.splitext(name)\n if base == filename_to_match:\n to_remove = os.path.join(output_file_dir, name)\n logging.info(\"Removing generated file \" + to_remove)\n os.remove(to_remove)\n\n utils.write_json(outfile, geojson)\n\n logging.info(\"Generating label points\")\n label_geojson = geoutils.get_label_points(geojson)\n label_path = outfile.replace('.geojson', '.labels.geojson')\n utils.write_json(label_path, label_geojson)\n\n logging.info('Done. Processed to ' + outfile)\n \n if not \"demo\" in properties:\n properties['demo'] = geoutils.get_demo_point(geojson)\n\n properties['path'] = \"/\".join(pathparts)\n catalog_entry = {\n 'type': 'Feature',\n 'properties': properties,\n 'geometry': geoutils.get_union(geojson)\n }\n catalog_features.append(catalog_entry)\n\n if not os.path.exists(outdir) or not os.path.exists(os.path.join(outdir, \"units.json\")):\n logging.info(\"Generated exploded GeoJSON to \" + outdir)\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n # .json instead of .geojson, incase there is a unit named \"source\"\n utils.write_json(os.path.join(outdir, \"source.json\"), catalog_entry) \n units = []\n for feature in geojson['features']:\n feature_id = str(feature['properties']['id'])\n feature_id = feature_id.replace('/', '')\n feature_filename = os.path.join(outdir, feature_id + \".geojson\")\n utils.write_json(feature_filename, feature)\n units.append(feature['properties'])\n utils.write_json(os.path.join(outdir, \"units.json\"), units)\n else:\n logging.debug(\"exploded GeoJSON already exists, not generating\")\n\n except Exception as e:\n logging.error(str(e))\n logging.exception(\"Error processing file \" + path)\n failures.append(path)\n success = False\n\n catalog = {\n 'type': 'FeatureCollection',\n 'features': catalog_features\n }\n utils.write_json(os.path.join(output,'catalog.geojson'), catalog)\n\n if not success:\n logging.error(\"Failed sources: \" + \", \".join(failures))\n sys.exit(-1)", "def main(source):\n pass", "def process(self):\n self.extract()\n self.transform()\n self.load()", "def main():\n\n obj_lookup = interfaces_dir / \"FrameLib-obj-jlookup.json\"\n\n worker = jParseAndBuild()\n\n refpages = [x for x in refpages_dir.rglob(\"fl.*.xml\")]\n\n for ref in refpages:\n worker.extract_from_refpage(ref)\n\n write_json(obj_lookup, worker.j_master_dict)", "def extract(args):\n prism.extract.run(\n input_fp=args.input,\n output_fp=args.output,\n depth_cutoff=args.depth_cutoff,\n num_cpg_cutoff=args.num_cpg_cutoff,\n prepend_chr=args.prepend_chr,\n paired=args.paired,\n verbose=args.verbose,\n )", "def pre_process_source(source, sourcemag, sourcepb, sourcez, smooth=True):\n inspec = None\n inspecz = np.nan\n inspecmag = np.nan\n inspecpb = None\n\n source_table_file = os.path.join('sources', 'sourcetable.txt')\n source_table_file = io.get_pkgfile(source_table_file)\n source_table = at.Table.read(source_table_file, format='ascii')\n ind = (source_table['specname'] == source)\n nmatch = len(source_table['specname'][ind])\n if nmatch == 1:\n # load the file and the info\n inspec = source_table['specname'][ind][0]\n inspecz = source_table['redshift'][ind][0]\n inspecmag = source_table['g'][ind][0] # for now, just normalize the g-band mag\n elif nmatch == 0:\n message = 'Spectrum {} not listed in lookup table'.format(source)\n pass\n else:\n message = 'Spectrum {} not uniquely listed in lookup table'.format(source)\n pass\n\n if inspec is None:\n warnings.warn(message, RuntimeWarning)\n inspec = source\n inspecz = sourcez\n inspecmag = sourcemag\n inspecpb = sourcepb\n\n if not os.path.exists(inspec):\n message = 'Spectrum {} could not be found'.format(inspec)\n raise ValueError(message)\n\n try:\n spec = at.Table.read(inspec, names=('wave','flux'), format='ascii')\n except Exception as e:\n message = 'Could not read file {}'.format(source)\n raise ValueError(message)\n\n if hasattr(inspecpb,'wave') and hasattr(inspecpb, 'throughput'):\n pass\n else:\n pbs = passband.load_pbs([inspecpb], 0.)\n try:\n inspecpb = pbs[inspecpb][0]\n except KeyError as e:\n message = 'Could not load passband {}'.format(inspecpb)\n raise RuntimeError(message)\n\n try:\n inspecmag = float(inspecmag)\n except (TypeError, ValueError) as e:\n message = 'Source magnitude {} could not be interpreted as a float'.format(inspecmag)\n raise ValueError(message)\n\n try:\n inspecz = float(inspecz)\n except (TypeError, ValueError) as e:\n message = 'Source redshift {} could not be interpreted as a float'.format(inspecz)\n raise ValueError(message)\n\n if inspecz < 0 :\n message = 'Source must have positive definite cosmological redshift'\n raise ValueError(message)\n\n inspec = S.ArraySpectrum(spec['wave'], spec['flux'], fluxunits='flam')\n try:\n inspec = inspec.renorm(sourcemag, 'ABmag', inspecpb)\n inspec.convert('flam')\n except Exception as e:\n message = 'Could not renormalize spectrum {}'.format(inspec)\n raise RuntimeError(message)\n\n if inspecz > 0:\n zblue = 1./(1+inspecz) - 1.\n inspec_rest = inspec.redshift(zblue)\n inspec_rest.convert('flam')\n c = default_cosmology.get()\n mu = c.distmod(inspecz)\n out = inspec_rest*(10.**(0.4*mu.value))\n else:\n out = inspec\n # TODO renorm is basic and just calculates dmag = RNval - what the original spectrum's mag is\n # and renormalizes - there's some sanity checking for overlaps\n # we can do this without using it and relying on the .passband routines\n return out", "def extract(self):\n self.build_path_pairs()\n self.extract_field_blocks()\n self.assert_filenames()", "def preprocess_main():", "def createInstanceSource(pcol, path, nr_robots, smallest_robot_id):\n\n # prevent alphabet related bugs by including e and f objects in alphabet\n if (\"e\" not in pcol.A):\n pcol.A.append(\"e\")\n if (\"f\" not in pcol.A):\n pcol.A.append(\"f\")\n\n with open(path + \".c\", \"w\") as fout:\n fout.write(\"\"\"#include \"%s.h\"\n\n#ifdef NEEDING_WILDCARD_EXPANSION\n #include \"wild_expand.h\"\n#endif\n\n#ifdef PCOL_SIM\"\"\" % path.split(\"/\")[-1]) #only filename\n\n fout.write(\"\"\"\\n char* objectNames[] = {[NO_OBJECT] = \"no_object\", \"\"\")\n for obj in pcol.A:\n fout.write(\"\"\"[OBJECT_ID_%s] = \"%s\", \"\"\" % (obj.upper(), obj))\n\n fout.write(\"\"\"};\n char* agentNames[] = {\"\"\")\n for ag_name in pcol.B:\n fout.write(\"\"\"[AGENT_%s] = \"%s\", \"\"\" % (ag_name.upper(), ag_name))\n fout.write(\"\"\"};\n#endif\n\n//the smallest kilo_uid from the swarm\nconst uint16_t smallest_robot_uid = %d;\n//the number of robots that make up the swarm\nconst uint16_t nr_swarm_robots = %d;\n\nvoid lulu_init(Pcolony_t *pcol) {\"\"\" % (smallest_robot_id, nr_robots) )\n\n # call initPcolony()\n fout.write(\"\"\"\\n //init Pcolony with alphabet size = %d, nr of agents = %d, capacity = %d\n initPcolony(pcol, %d, %d, %d);\"\"\" % (len(pcol.A), len(pcol.B), pcol.n, len(pcol.A), len(pcol.B), pcol.n))\n fout.write(\"\"\"\\n //Pcolony.alphabet = %s\"\"\" % pcol.A)\n\n # init environment\n fout.write(\"\"\"\\n\\n //init environment\"\"\")\n counter = 0;\n for obj, nr in pcol.env.items():\n #replace %id and * with $id and $ respectively\n\n fout.write(\"\"\"\\n pcol->env.items[%d].id = OBJECT_ID_%s;\"\"\" % (counter, obj.upper()))\n fout.write(\"\"\"\\n pcol->env.items[%d].nr = %d;\\n\"\"\" % (counter, nr))\n counter += 1\n fout.write(\"\"\"\\n //end init environment\"\"\")\n\n fout.write(\"\"\"\\n\\n //init global pswarm environment\"\"\")\n if (pcol.parentSwarm == None or len(pcol.parentSwarm.global_env) == 0):\n fout.write(\"\"\"\\n pcol->pswarm.global_env.items[0].id = OBJECT_ID_E;\"\"\")\n fout.write(\"\"\"\\n pcol->pswarm.global_env.items[0].nr = 1;\"\"\")\n else:\n counter = 0\n for obj, nr in pcol.parentSwarm.global_env.items():\n #replace %id and * with $id and $ respectively\n\n fout.write(\"\"\"\\n pcol->pswarm.global_env.items[%d].id = OBJECT_ID_%s;\"\"\" % (counter, obj.upper()))\n fout.write(\"\"\"\\n pcol->pswarm.global_env.items[%d].nr = %d;\"\"\" % (counter, nr))\n counter += 1\n fout.write(\"\"\"\\n //end init global pswarm environment\"\"\")\n\n fout.write(\"\"\"\\n\\n //init INPUT global pswarm environment\"\"\")\n if (pcol.parentSwarm == None or len(pcol.parentSwarm.in_global_env) == 0):\n fout.write(\"\"\"\\n pcol->pswarm.in_global_env.items[0].id = OBJECT_ID_E;\"\"\")\n fout.write(\"\"\"\\n pcol->pswarm.in_global_env.items[0].nr = 1;\"\"\")\n else:\n counter = 0\n for obj, nr in pcol.parentSwarm.in_global_env.items():\n #replace %id and * with $id and $ respectively\n\n fout.write(\"\"\"\\n pcol->pswarm.in_global_env.items[%d].id = OBJECT_ID_%s;\"\"\" % (counter, obj.upper()))\n fout.write(\"\"\"\\n pcol->pswarm.in_global_env.items[%d].nr = %d;\"\"\" % (counter, nr))\n counter += 1\n fout.write(\"\"\"\\n //end init INPUT global pswarm environment\"\"\")\n\n fout.write(\"\"\"\\n\\n //init OUTPUT global pswarm environment\"\"\")\n if (pcol.parentSwarm == None or len(pcol.parentSwarm.out_global_env) == 0):\n fout.write(\"\"\"\\n pcol->pswarm.out_global_env.items[0].id = OBJECT_ID_E;\"\"\")\n fout.write(\"\"\"\\n pcol->pswarm.out_global_env.items[0].nr = 1;\"\"\")\n else:\n counter = 0\n for obj, nr in pcol.parentSwarm.out_global_env.items():\n #replace %id and * with $id and $ respectively\n\n fout.write(\"\"\"\\n pcol->pswarm.out_global_env.items[%d].id = OBJECT_ID_%s;\"\"\" % (counter, obj.upper()))\n fout.write(\"\"\"\\n pcol->pswarm.out_global_env.items[%d].nr = %d;\"\"\" % (counter, nr))\n counter += 1\n fout.write(\"\"\"\\n //end init OUTPUT global pswarm environment\"\"\")\n\n for ag_name in pcol.B:\n fout.write(\"\"\"\\n\\n //init agent %s\"\"\" % ag_name)\n #fout.write(\"\"\"\\n\\n initAgent(&pcol->agents[AGENT_%s], pcol, %d);\"\"\" % (ag_name.upper(), len(pcol.agents[ag_name].programs)))\n fout.write(\"\"\"\\n\\n initAgent(&pcol->agents[AGENT_%s], pcol, %d);\"\"\" % (ag_name.upper(), getNrOfProgramsAfterExpansion(pcol.agents[ag_name], nr_robots- 1)))\n\n fout.write(\"\"\"\\n //init obj multiset\"\"\")\n counter = 0;\n for obj, nr in pcol.agents[ag_name].obj.items():\n #replace %id and * with $id and $ respectively\n\n for i in range(nr):\n fout.write(\"\"\"\\n pcol->agents[AGENT_%s].obj.items[%d] = OBJECT_ID_%s;\"\"\" % (ag_name.upper(), counter, obj.upper()))\n counter += 1\n\n fout.write(\"\"\"\\n\\n //init programs\"\"\")\n for prg_nr, prg in enumerate(pcol.agents[ag_name].programs):\n fout.write(\"\"\"\\n\\n initProgram(&pcol->agents[AGENT_%s].programs[%d], %d);\"\"\" % (ag_name.upper(), prg_nr, getNrOfRulesWithoutRepetitions(prg)))\n fout.write(\"\"\"\\n //init program %d: < %s >\"\"\" % (prg_nr, prg.print()))\n\n rule_index = 0\n for rule_nr, rule in enumerate(prg):\n # skip rules that contain identical operands and thus have no effect\n if (rule.lhs == rule.rhs and rule.lhs == 'e' and rule.main_type != sim.RuleType.conditional):\n continue\n\n fout.write(\"\"\"\\n //init rule %d: %s\"\"\" % (rule_nr, rule.print(toString=True)) )\n if (rule.main_type != sim.RuleType.conditional):\n fout.write(\"\"\"\\n initRule(&pcol->agents[AGENT_%s].programs[%d].rules[%d], RULE_TYPE_%s, OBJECT_ID_%s, OBJECT_ID_%s, NO_OBJECT, NO_OBJECT);\"\"\" % (ag_name.upper(), prg_nr, rule_index, rule.type.name.upper(), rule.lhs.upper(), rule.rhs.upper()))\n else:\n fout.write(\"\"\"\\n initRule(&pcol->agents[AGENT_%s].programs[%d].rules[%d], RULE_TYPE_CONDITIONAL_%s_%s, OBJECT_ID_%s, OBJECT_ID_%s, OBJECT_ID_%s, OBJECT_ID_%s);\"\"\" % (ag_name.upper(), prg_nr, rule_index, rule.type.name.upper(), rule.alt_type.name.upper(), rule.lhs.upper(), rule.rhs.upper(), rule.alt_lhs.upper(), rule.alt_rhs.upper()))\n\n #increase rule_index\n rule_index += 1\n fout.write(\"\"\"\\n //end init program %d\n pcol->agents[AGENT_%s].init_program_nr++;\"\"\" % (prg_nr, ag_name.upper()))\n fout.write(\"\"\"\\n //end init programs\"\"\")\n\n fout.write(\"\"\"\\n //end init agent %s\"\"\" % ag_name)\n\n fout.write(\"\"\"\\n}\"\"\")\n fout.write(\"\"\"\\n\\nvoid lulu_destroy(Pcolony_t *pcol) {\n //destroys all of the subcomponents\n destroyPcolony(pcol);\n}\"\"\")\n fout.write(\"\"\"\\n\n#ifdef NEEDING_WILDCARD_EXPANSION\nuint16_t expand_pcolony(Pcolony_t *pcol, uint16_t my_id) {\n //used for a cleaner iteration through the P colony\n //instead of using agents[i] all of the time, we use just agent\n Agent_t *agent;\n\"\"\")\n\n fout.write(\"\"\"\\n uint8_t obj_with_id[] = {\"\"\")\n obj_with_id_size = 0\n for obj in pcol.A:\n if (\"_W_ID\" in obj):\n fout.write(\"OBJECT_ID_%s, \" % obj.upper())\n obj_with_id_size += 1\n fout.write(\"\"\"};\n uint8_t obj_with_id_size = %d;\"\"\" % (obj_with_id_size))\n\n fout.write(\"\"\"\\n uint8_t obj_with_any[] = {\"\"\")\n obj_with_any_size = 0\n is_obj_with_any_followed_by_id = []\n for i, obj in enumerate(pcol.A):\n if (obj.endswith(\"_W_ALL\")):\n fout.write(\"OBJECT_ID_%s, \" % obj.upper())\n # if we are at least 2 objects before the end of the list\n if (i < len(pcol.A) - 1):\n # check if this _$ wildcarded object is followed by a _$id object\n if (\"_W_ID\" in pcol.A[i+1]):\n is_obj_with_any_followed_by_id.append(1)\n else:\n is_obj_with_any_followed_by_id.append(0)\n else:\n # this (_$) object is the last one in the list\n is_obj_with_any_followed_by_id.append(0)\n obj_with_any_size += 1\n fout.write(\"\"\"};\n uint8_t obj_with_any_size = %d;\n uint8_t is_obj_with_any_followed_by_id[] = {%s};\"\"\" % (obj_with_any_size,\n str(is_obj_with_any_followed_by_id).replace(\"[\", \"\").replace(\"]\", \"\")))\n\n fout.write(\"\"\"\\n\\n uint16_t my_symbolic_id = my_id - smallest_robot_uid;\n\n //replace W_ID wildcarded objects with the object corresponding to the symbolic id\n // e.g.: B_W_ID -> B_0 for my_symbolic_id = 0\n replacePcolonyWildID(pcol, obj_with_id, obj_with_id_size, my_symbolic_id);\n\n //expand each obj_with_any[] element into nr_swarm_robots objects except my_symbolic id.\n // e.g.: B_W_ALL -> B_0, B_2 for nr_swarm_robots = 3 and my_symbolic_id = 1\n expandPcolonyWildAny(pcol, obj_with_any, is_obj_with_any_followed_by_id, obj_with_any_size, my_symbolic_id, nr_swarm_robots);\n\n return my_symbolic_id;\n}\n#endif\"\"\")", "def separateSource(self,compInfo):\n sourceInfo = {}\n source = []\n for eachline in compInfo:\n words = eachline.split() ##This line need to be confirmed with Manas\n if eachline[0] in ['f', 'h']:\n source.append(words[3])\n if len(source) > 0:\n for eachline in compInfo:\n words_s = eachline.split()\n if words_s[0] in source:\n sourceInfo[words_s[0]] = words_s[1:3]\n return sourceInfo", "def extract_features(self):\n self.extract_features_static()\n self.extract_features_dynamic()", "def _ExtractWithFilter(\n self, source_path_specs, destination_path, output_writer,\n filter_file_path, skip_duplicates=True):\n for source_path_spec in source_path_specs:\n file_system, mount_point = self._GetSourceFileSystem(\n source_path_spec, resolver_context=self._resolver_context)\n\n if self._knowledge_base is None:\n self._Preprocess(file_system, mount_point)\n\n display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(\n source_path_spec)\n output_writer.Write(\n u'Extracting file entries from: {0:s}\\n'.format(display_name))\n\n environment_variables = self._knowledge_base.GetEnvironmentVariables()\n find_specs = frontend_utils.BuildFindSpecsFromFile(\n filter_file_path, environment_variables=environment_variables)\n\n searcher = file_system_searcher.FileSystemSearcher(\n file_system, mount_point)\n for path_spec in searcher.Find(find_specs=find_specs):\n self._ExtractFileEntry(\n path_spec, destination_path, output_writer,\n skip_duplicates=skip_duplicates)\n\n file_system.Close()", "def process_source_code(source_dir, header_map):\n sources = get_source_files(source_dir)\n for filename in sources:\n process_file(filename, header_map)", "def annotate_intermediary(source_basename, content, fout, fmap):\n is_code = n = 0\n for line in content.split('\\n'):\n if not line:\n continue\n # cc65 outputs source code as three commented lines.\n if line[0] == ';':\n is_code += 1\n else:\n is_code = 0\n # cc65 disables debuginfo, turn it back on.\n if re.match(r'\\W*.debuginfo.*off', line):\n fout.write('.debuginfo on\\n')\n continue\n if re.match(r'\\W*.debuginfo.*-', line):\n fout.write('.debuginfo +\\n')\n continue\n fout.write(line + '\\n')\n # The source code appears on the middle commented line.\n if is_code == 2:\n code_text = line[1:]\n elif is_code == 3:\n # Don't output source code for raw assembly, because it's not needed.\n if '__asm__' in code_text:\n continue\n if n:\n fout.write('_Rsource_map__%s__%04d:\\n' % (source_basename, n))\n fmap.write('%s__%04d %s\\n' % (source_basename, n, code_text))\n n += 1", "def __init__(self, train, transform, data_path='../extracted', category_path=\"../all_fish.txt\"):\n self.transform = transform\n data_path = os.path.join(data_path, 'train' if train else 'test')\n fp = open(category_path, 'r')\n self.fish_dict = {i.split(\";\")[0] : i.split(\";\")[1][1:-1] for i in fp}\n \n #self.name_to_label = [i.split(\";\")[1][1:-1] for i in fp]\n self.image_paths = glob.glob(data_path + '/*.jpg')", "def main(args=None):\n if args is None:\n parser = create_parser()\n args = parser.parse_args()\n\n if args.debug:\n logging.basicConfig(level=logging.DEBUG)\n else:\n logging.basicConfig(level=logging.INFO)\n\n input_module = input_mapping[args.input_reader]\n output_module = output_mapping[args.output_format]\n\n templates = []\n # Load templates from external folder if set.\n if args.template_folder:\n templates += read_templates(os.path.abspath(args.template_folder))\n\n # Load internal templates, if not disabled.\n if not args.exclude_built_in_templates:\n templates += read_templates()\n output = []\n for f in args.input_files:\n res = extract_data(f.name, templates=templates, input_module=input_module)\n if res:\n logger.info(res)\n output.append(res)\n if args.copy:\n filename = args.filename.format(\n date=res['date'].strftime('%Y-%m-%d'),\n invoice_number=res['invoice_number'],\n desc=res['desc'],\n )\n shutil.copyfile(f.name, join(args.copy, filename))\n if args.move:\n filename = args.filename.format(\n date=res['date'].strftime('%Y-%m-%d'),\n invoice_number=res['invoice_number'],\n desc=res['desc'],\n )\n shutil.move(f.name, join(args.move, filename))\n f.close()\n\n if output_module is not None:\n output_module.write_to_file(output, args.output_name, args.output_date_format)", "def __init__(self, fname):\n self.format = 2\n self.target = {}\n self.filters = {}\n self.comment = {}\n\n try:\n rec = re.compile('file\\s+object\\s+filter', re.I)\n rre = re.compile('(run\\d\\d\\d\\d?)(.*)')\n old = re.compile('\\s*(\\S+)\\s+(\\S+)\\s+(.*)$')\n oldii = re.compile('\\s*(\\S+)\\s*$')\n with open(fname) as f:\n for line in f:\n m = rec.search(line)\n if m:\n self.format = 1\n if len(self.comment):\n raise Exception('Error in night log = ' + fname + ', line = ' + line)\n\n mr = rre.match(line)\n if mr:\n run = mr.group(1)\n if self.format == 2:\n self.comment[run] = mr.group(2).strip()\n else:\n m = old.search(mr.group(2))\n if m:\n self.target[run] = m.group(1)\n self.filters[run] = m.group(2)\n self.comment[run] = m.group(3)\n else:\n m = oldii.search(mr.group(2))\n if m:\n self.target[run] = m.group(1)\n except FileNotFoundError:\n sys.stdout.write(f'Night log = {fname} does not exist\\n')\n except Exception as err:\n sys.stdout.write(f'Problem on night log = {fname}:' + str(err) + '\\n')", "def main():\n print \"=\" * 78\n print \"%s %s\" % (__prog_name__, __version__)\n debug, input_file_names = check_cli()\n if not input_file_names:\n _error(\"No input file name found!\\n\\n%s\" % __help__)\n for input_file_name in input_file_names:\n print \"* Reading\", input_file_name\n file_base_name = os.path.splitext(os.path.basename(input_file_name))[0]\n file_dir_name = os.path.dirname(input_file_name)\n sections = {}\n tex_map = {}\n with open(input_file_name, 'rU') as in_fd:\n sections = get_sections(in_fd.read())\n\n if debug:\n write_dump_file(file_dir_name, file_base_name, \"sec\",\n {\"sections\": sections})\n\n if not sections:\n _error(\"Nothing could be read from '%s'.\\nIs this an Oolite .dat file?\" \\\n % input_file_name)\n\n # Magically call the 'check' functions\n for name in sections.keys():\n f_name = \"check_%s\" % name.lower()\n if f_name in globals().keys():\n if not globals()[f_name](sections):\n _error(\"Number of entries in '%s' section is different as declared!\" % name)\n\n def get_data(name, sections=sections):\n \"\"\"Returns the 'data' object from the 'name' one found in the\n 'sections' one.\n :sections: dictionary: Object returned by 'get_sections'.\n :name: string: The name of the section to get the 'data'.\n Returns a list of 'lines'.\n \"\"\"\n return sections.get(name, {}).get(\"data\", [])\n\n oti_file_name = build_file_path(file_dir_name, file_base_name, \"oti\")\n tex_map = parse_names(get_data(\"NAMES\"), oti_file_name)\n\n tex_refs, tex_lines_out = parse_textures(get_data(\"TEXTURES\"))\n\n if debug:\n write_dump_file(file_dir_name, file_base_name, \"tex\",\n {\"tex_refs\": tex_refs,\n \"tex_lines_out\": tex_lines_out})\n\n # Update the tex_map object if textures indexes and names are both\n # used in 'TEXTURES'.\n if sorted(tex_map.keys()) != sorted(tex_refs.get(\"named\").keys()):\n tex_map = update_tex_map(tex_map,\n set(tex_refs[\"named\"].keys()).difference(tex_map.keys()))\n\n if debug:\n write_dump_file(file_dir_name, file_base_name, \"txm\",\n {\"tex_map\": tex_map})\n\n n_verts, vertex_lines_out = parse_vertex(get_data(\"VERTEX\"))\n\n if debug:\n write_dump_file(file_dir_name, file_base_name, \"ver\",\n {\"n_verts\": n_verts,\n \"vertex_lines_out\": vertex_lines_out})\n\n n_normals, normals_lines_out = parse_normals(get_data(\"NORMALS\"))\n\n if debug:\n write_dump_file(file_dir_name, file_base_name, \"nor\",\n {\"n_normals\": n_normals,\n \"normals_lines_out\": normals_lines_out})\n\n n_faces, faces_groups = parse_faces(get_data(\"FACES\"), tex_refs,\n normals_lines_out)\n\n if debug:\n write_dump_file(file_dir_name, file_base_name, \"fac\",\n {\"n_faces\": n_faces,\n \"faces_groups\": faces_groups})\n\n output_file_name = build_file_path(file_dir_name,\n file_base_name, 'obj')\n material_file_name = build_file_path(file_dir_name,\n file_base_name, 'mtl')\n mtl_lib_file = os.path.basename(material_file_name)\n\n write_obj(output_file_name, file_base_name, mtl_lib_file,\n tex_lines_out, tex_map, n_verts, vertex_lines_out,\n n_normals, normals_lines_out, n_faces, faces_groups)\n\n write_mtl(material_file_name, tex_map)\n\n _exit(\"* Done\")", "def parse_flt_files(files=[], info=None, uniquename=False, use_visit=False,\n get_footprint = False, \n translate = {'AEGIS-':'aegis-', \n 'COSMOS-':'cosmos-', \n 'GNGRISM':'goodsn-', \n 'GOODS-SOUTH-':'goodss-', \n 'UDS-':'uds-'}): \n \n if info is None:\n if not files:\n files=glob.glob('*flt.fits')\n \n if len(files) == 0:\n return False\n \n info = get_flt_info(files)\n else:\n info = info.copy()\n \n for c in info.colnames:\n if not c.islower(): \n info.rename_column(c, c.lower())\n\n if 'expstart' not in info.colnames:\n info['expstart'] = info['exptime']*0.\n\n so = np.argsort(info['expstart'])\n info = info[so]\n\n #pa_v3 = np.round(info['pa_v3']*10)/10 % 360.\n pa_v3 = np.round(info['pa_v3']) % 360.\n \n target_list = []\n for i in range(len(info)):\n #### Replace ANY targets with JRhRmRs-DdDmDs\n if info['targname'][i] == 'ANY': \n if use_visit:\n new_targname=info['file'][i][:6]\n else:\n new_targname = 'par-'+radec_to_targname(ra=info['ra_targ'][i],\n dec=info['dec_targ'][i])\n \n target_list.append(new_targname.lower())\n else:\n target_list.append(info['targname'][i])\n \n target_list = np.array(target_list)\n\n info['progIDs'] = [file[1:4] for file in info['file']]\n\n progIDs = np.unique(info['progIDs'])\n visits = np.array([os.path.basename(file)[4:6] for file in info['file']])\n dates = np.array([''.join(date.split('-')[1:]) for date in info['date-obs']])\n \n targets = np.unique(target_list)\n \n output_list = [] #OrderedDict()\n filter_list = OrderedDict()\n \n for filter in np.unique(info['filter']):\n filter_list[filter] = OrderedDict()\n \n angles = np.unique(pa_v3[(info['filter'] == filter)]) \n for angle in angles:\n filter_list[filter][angle] = []\n \n for target in targets:\n #### 3D-HST targname translations\n target_use = target\n for key in translate.keys():\n target_use = target_use.replace(key, translate[key])\n \n ## pad i < 10 with zero\n for key in translate.keys():\n if translate[key] in target_use:\n spl = target_use.split('-')\n try:\n if (int(spl[-1]) < 10) & (len(spl[-1]) == 1):\n spl[-1] = '{0:02d}'.format(int(spl[-1]))\n target_use = '-'.join(spl)\n except:\n pass\n\n for filter in np.unique(info['filter'][(target_list == target)]):\n angles = np.unique(pa_v3[(info['filter'] == filter) & \n (target_list == target)])\n \n for angle in angles:\n exposure_list = []\n exposure_start = []\n product='{0}-{1:05.1f}-{2}'.format(target_use, angle, filter) \n\n visit_match = np.unique(visits[(target_list == target) &\n (info['filter'] == filter)])\n \n this_progs = []\n this_visits = []\n \n for visit in visit_match:\n ix = (visits == visit) & (target_list == target) & (info['filter'] == filter)\n #this_progs.append(info['progIDs'][ix][0])\n #print visit, ix.sum(), np.unique(info['progIDs'][ix])\n new_progs = list(np.unique(info['progIDs'][ix]))\n this_visits.extend([visit]*len(new_progs))\n this_progs.extend(new_progs)\n \n for visit, prog in zip(this_visits, this_progs):\n visit_list = []\n visit_start = []\n visit_product = '{0}-{1}-{2}-{3:05.1f}-{4}'.format(target_use, prog, visit, angle, filter) \n \n use = ((target_list == target) & \n (info['filter'] == filter) & \n (visits == visit) & (pa_v3 == angle) &\n (info['progIDs'] == prog))\n \n if use.sum() == 0:\n continue\n\n for tstart, file in zip(info['expstart'][use],\n info['file'][use]):\n \n f = file.split('.gz')[0]\n if f not in exposure_list:\n visit_list.append(str(f))\n visit_start.append(tstart)\n \n exposure_list = np.append(exposure_list, visit_list)\n exposure_start.extend(visit_start)\n \n filter_list[filter][angle].extend(visit_list)\n \n if uniquename:\n print(visit_product, len(visit_list))\n so = np.argsort(visit_start)\n exposure_list = np.array(visit_list)[so]\n #output_list[visit_product.lower()] = visit_list\n \n d = OrderedDict(product=str(visit_product.lower()),\n files=list(np.array(visit_list)[so]))\n output_list.append(d)\n \n if not uniquename:\n print(product, len(exposure_list))\n so = np.argsort(exposure_start)\n exposure_list = np.array(exposure_list)[so]\n #output_list[product.lower()] = exposure_list\n d = OrderedDict(product=str(product.lower()),\n files=list(np.array(exposure_list)[so]))\n output_list.append(d)\n \n ### Get visit footprint from FLT WCS\n if get_footprint:\n from shapely.geometry import Polygon\n \n N = len(output_list)\n for i in range(N):\n for j in range(len(output_list[i]['files'])):\n flt_file = output_list[i]['files'][j]\n if (not os.path.exists(flt_file)) & os.path.exists('../RAW/'+flt_file):\n flt_file = '../RAW/'+flt_file\n \n flt_j = pyfits.open(flt_file)\n h = flt_j[0].header\n if (h['INSTRUME'] == 'WFC3') & (h['DETECTOR'] == 'IR'):\n wcs_j = pywcs.WCS(flt_j['SCI',1])\n else:\n wcs_j = pywcs.WCS(flt_j['SCI',1], fobj=flt_j)\n \n fp_j = Polygon(wcs_j.calc_footprint())\n if j == 0:\n fp_i = fp_j\n else:\n fp_i = fp_i.union(fp_j)\n \n output_list[i]['footprint'] = fp_i\n \n return output_list, filter_list", "def add_catalogs(self):\n n_exposures = len(self.info['Module'])\n self.info['point_source'] = [None] * n_exposures\n self.info['galaxyListFile'] = [None] * n_exposures\n self.info['extended'] = [None] * n_exposures\n self.info['convolveExtended'] = [False] * n_exposures\n self.info['movingTarg'] = [None] * n_exposures\n self.info['movingTargSersic'] = [None] * n_exposures\n self.info['movingTargExtended'] = [None] * n_exposures\n self.info['movingTargToTrack'] = [None] * n_exposures\n\n for i in range(n_exposures):\n if int(self.info['detector'][i][-1]) < 5:\n filtkey = 'ShortFilter'\n pupilkey = 'ShortPupil'\n else:\n filtkey = 'LongFilter'\n pupilkey = 'LongPupil'\n filt = self.info[filtkey][i]\n pup = self.info[pupilkey][i]\n\n if self.point_source[i] is not None:\n # In here, we assume the user provided a catalog to go with each filter\n # so now we need to find the filter for each entry and generate a list that makes sense\n self.info['point_source'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.point_source, 'point source')))\n else:\n self.info['point_source'][i] = None\n if self.galaxyListFile[i] is not None:\n self.info['galaxyListFile'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.galaxyListFile, 'galaxy')))\n else:\n self.info['galaxyListFile'][i] = None\n if self.extended[i] is not None:\n self.info['extended'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.extended, 'extended')))\n else:\n self.info['extended'][i] = None\n if self.movingTarg[i] is not None:\n self.info['movingTarg'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.movingTarg, 'moving point source target')))\n else:\n self.info['movingTarg'][i] = None\n if self.movingTargSersic[i] is not None:\n self.info['movingTargSersic'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.movingTargSersic, 'moving sersic target')))\n else:\n self.info['movingTargSersic'][i] = None\n if self.movingTargExtended[i] is not None:\n self.info['movingTargExtended'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.movingTargExtended, 'moving extended target')))\n else:\n self.info['movingTargExtended'][i] = None\n if self.movingTargToTrack[i] is not None:\n self.info['movingTargToTrack'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.movingTargToTrack, 'non-sidereal moving target')))\n else:\n self.info['movingTargToTrack'][i] = None\n if self.convolveExtended is True:\n self.info['convolveExtended'] = [True] * n_exposures", "def render_sources(self, src_dict):\n pass", "def run_extraction(self):\n self.background_estimator = ReflectedRegionsBackgroundEstimator(\n observations=self.observations, **self.config[\"background\"]\n )\n self.background_estimator.run()\n\n self.extraction = SpectrumExtraction(\n observations=self.observations,\n bkg_estimate=self.background_estimator.result,\n **self.config[\"extraction\"]\n )\n\n self.extraction.run()", "def main(argv):\n logging.basicConfig(format=\"%(message)s\", level=logging.INFO, stream=sys.stdout)\n logger = logging.getLogger(\"demo4\")\n\n # Define some parameters we'll use below and make directories if needed.\n cat_file_name = os.path.join('input','galsim_default_input.asc')\n if not os.path.isdir('output'):\n os.mkdir('output')\n multi_file_name = os.path.join('output','multi.fits')\n\n random_seed = galsim.BaseDeviate(8241573).raw()\n sky_level = 1.e6 # ADU / arcsec^2\n pixel_scale = 1.0 # arcsec / pixel (size units in input catalog are pixels)\n gal_flux = 1.e6 # arbitrary choice, makes nice (not too) noisy images\n gal_g1 = -0.009 #\n gal_g2 = 0.011 #\n\n # the fraction of flux in each component\n # 40% is in the bulge, 60% in a disk. 70% of that disk light is placed\n # into point sources distributed as a random walk\n\n bulge_frac = 0.4\n disk_frac = 0.6\n knot_frac = 0.42\n smooth_disk_frac = 0.18\n\n # number of knots of star formation. To simulate a nice irregular (all the\n # flux is in knots) we find ~100 is a minimum number needed, but we will\n # just use 10 here to make the demo run fast.\n\n n_knots = 10\n\n xsize = 64 # pixels\n ysize = 64 # pixels\n\n logger.info('Starting demo script 4 using:')\n logger.info(' - parameters taken from catalog %r',cat_file_name)\n logger.info(' - Moffat PSF (parameters from catalog)')\n logger.info(' - pixel scale = %.2f',pixel_scale)\n logger.info(' - Bulge + Disc galaxies (parameters from catalog)')\n logger.info(' - 100 Point sources, distributed as random walk')\n logger.info(' - Applied gravitational shear = (%.3f,%.3f)',gal_g1,gal_g2)\n logger.info(' - Poisson noise (sky level = %.1e).', sky_level)\n\n # Read in the input catalog\n cat = galsim.Catalog(cat_file_name)\n\n\n # save a list of the galaxy images in the \"images\" list variable:\n images = []\n for k in range(cat.nobjects):\n # Initialize the (pseudo-)random number generator that we will be using below.\n # Use a different random seed for each object to get different noise realizations.\n # Using sequential random seeds here is safer than it sounds. We use Mersenne Twister\n # random number generators that are designed to be used with this kind of seeding.\n # However, to be extra safe, we actually initialize one random number generator with this\n # seed, generate and throw away two random values with that, and then use the next value\n # to seed a completely different Mersenne Twister RNG. The result is that successive\n # RNGs created this way produce very independent random number streams.\n rng = galsim.BaseDeviate(random_seed+k+1)\n\n # Take the Moffat beta from the first column (called 0) of the input catalog:\n # Note: cat.get(k,col) returns a string. To get the value as a float, use either\n # cat.getFloat(k,col) or float(cat.get(k,col))\n beta = cat.getFloat(k,0)\n # A Moffat's size may be either scale_radius, fwhm, or half_light_radius.\n # Here we use fwhm, taking from the catalog as well.\n fwhm = cat.getFloat(k,1)\n # A Moffat profile may be truncated if desired\n # The units for this are expected to be arcsec (or specifically -- whatever units\n # you are using for all the size values as defined by the pixel_scale).\n trunc = cat.getFloat(k,4)\n # Note: You may omit the flux, since the default is flux=1.\n psf = galsim.Moffat(beta=beta, fwhm=fwhm, trunc=trunc)\n\n # Take the (e1, e2) shape parameters from the catalog as well.\n psf = psf.shear(e1=cat.getFloat(k,2), e2=cat.getFloat(k,3))\n\n # Galaxy is a bulge + disk(+knots) with parameters taken from the catalog:\n\n # put some fraction of the disk light into knots of star formation\n\n disk_hlr = cat.getFloat(k,5)\n disk_e1 = cat.getFloat(k,6)\n disk_e2 = cat.getFloat(k,7)\n bulge_hlr = cat.getFloat(k,8)\n bulge_e1 = cat.getFloat(k,9)\n bulge_e2 = cat.getFloat(k,10)\n\n smooth_disk = galsim.Exponential(flux=smooth_disk_frac, half_light_radius=disk_hlr)\n\n knots = galsim.RandomKnots(n_knots, half_light_radius=disk_hlr, flux=knot_frac, rng=rng)\n\n disk = galsim.Add([smooth_disk, knots])\n disk = disk.shear(e1=disk_e1, e2=disk_e2)\n\n # the rest of the light goes into the bulge\n bulge = galsim.DeVaucouleurs(flux=bulge_frac, half_light_radius=bulge_hlr)\n bulge = bulge.shear(e1=bulge_e1, e2=bulge_e2)\n\n # The flux of an Add object is the sum of the component fluxes.\n # Note that in demo3.py, a similar addition was performed by the binary operator \"+\".\n gal = galsim.Add([disk, bulge])\n\n # This flux may be overridden by withFlux. The relative fluxes of the components\n # remains the same, but the total flux is set to gal_flux.\n gal = gal.withFlux(gal_flux)\n gal = gal.shear(g1=gal_g1, g2=gal_g2)\n\n # The center of the object is normally placed at the center of the postage stamp image.\n # You can change that with shift:\n gal = gal.shift(dx=cat.getFloat(k,11), dy=cat.getFloat(k,12))\n\n final = galsim.Convolve([psf, gal])\n\n # Draw the profile\n image = galsim.ImageF(xsize, ysize)\n final.drawImage(image, scale=pixel_scale)\n\n # Add Poisson noise to the image:\n image.addNoise(galsim.PoissonNoise(rng, sky_level * pixel_scale**2))\n\n logger.info('Drew image for object at row %d in the input catalog'%k)\n \n # Add the image to our list of images\n images.append(image)\n \n # Now write the images to a multi-extension fits file. Each image will be in its own HDU.\n galsim.fits.writeMulti(images, multi_file_name)\n logger.info('Images written to multi-extension fits file %r',multi_file_name)", "def _push_one(self, f, **kwargs):\n\n # Copy the metadata for modifying and open the ann file\n meta = kwargs.copy()\n desc = read_InSar_annotation(f)\n\n # Expand the path for the geotiffs\n tiff_dir = abspath(expanduser(self.geotiff_dir))\n\n # form the pattern to look for and grab the tifs\n pattern = '.'.join(basename(f).split('.')[0:-1]) + '*.tif'\n rasters = glob.glob(join(tiff_dir, pattern))\n\n # Submit each geotif, modifying meta on the fly\n for r in rasters:\n # Grab information from the filename\n f_pieces = r.split('.')\n component = f_pieces[-2] # Real or imaginary component\n data_abbr = f_pieces[-3] # Key to the data name\n dname = self.dname_map[data_abbr] # Data type in db\n\n # For the data type\n meta['type'] = 'insar ' + dname.split(' ')[0]\n\n if dname == 'interferogram':\n meta['type'] += (' ' + component)\n\n # Assign the date for the respective flights\n if 'amplitude' in dname:\n meta['date'] = desc['start time of acquisition for pass {}'.format(\n dname.split(' ')[-1])]['value']\n\n # Derived products always receive the date of the last overpass\n else:\n meta['date'] = desc['start time of acquisition for pass 2']['value']\n\n # Assign only the date not the date and time\n meta['date'] = meta['date'].date()\n\n # Assign units\n meta['units'] = desc['{} units'.format(\n dname.split(' ')[0])]['value']\n\n # Flexibly form a comment for each of the products for dates\n comment = get_InSar_flight_comment(dname, desc)\n # add which dem was used which dictates the file name convert e.g.\n # ...VV_01.int.grd\n comment += ', DEM used = {}'.format(\n desc['dem used in processing']['value'])\n # Add the polarization to the the comments\n comment += ', Polarization = {}'.format(\n desc['polarization']['value'])\n meta['description'] = comment\n\n self.log.info('Uploading {} as {}...'.format(r, meta['type']))\n\n d = self.UploaderClass(r, **meta)\n\n # Submit the data to the database\n d.submit(self.session)\n\n # Uploaded set\n self.uploaded += 1", "def flyc_nofly_extract(po, fwmdlfile):\n (po.nfzone_pos, po.nfzone_count) = flyc_nofly_zone_pos_search(po, fwmdlfile, 0, po.expect_func_align, po.expect_data_align, po.min_match_accepted)\n if po.nfzone_pos < 0:\n raise ValueError(\"Flight controller no fly zones array signature not detected in input file.\")\n (po.nfcord_pos, po.nfcord_count) = flyc_nofly_cord_pos_search(po, fwmdlfile, 0, po.expect_func_align, po.expect_data_align, po.min_match_accepted)\n if po.nfcord_pos < 0:\n raise ValueError(\"Flight controller no fly coords array signature not detected in input file.\")\n nfzones = flyc_nofly_merged_zones_array(po, fwmdlfile)\n if (po.verbose > 0):\n print(\"{}: Creating JSON file...\".format(po.mdlfile))\n inffile = open(po.inffile, \"w\")\n inffile.write(\"{\\\"release_limits\\\":[\\n\")\n i = 0\n for parprop in nfzones:\n inffile.write(\"{\")\n for ppname in ('area_id','type','shape',):\n inffile.write(\"\\\"{:s}\\\":{:d}\".format(ppname,parprop[ppname]))\n inffile.write(\",\")\n for ppname in ('lat','lng',):\n inffile.write(\"\\\"{:s}\\\":{:06f}\".format(ppname,parprop[ppname]))\n inffile.write(\",\")\n for ppname in ('radius','warning','level','disable','updated_at','begin_at','end_at',):\n inffile.write(\"\\\"{:s}\\\":{:d}\".format(ppname,parprop[ppname]))\n inffile.write(\",\")\n for ppname in ('name',):\n inffile.write(\"\\\"{:s}\\\":\\\"{:s}\\\"\".format(ppname,parprop[ppname]))\n inffile.write(\",\")\n for ppname in ('storage','country',):\n inffile.write(\"\\\"{:s}\\\":{:d}\".format(ppname,parprop[ppname]))\n inffile.write(\",\")\n for ppname in ('city',):\n inffile.write(\"\\\"{:s}\\\":\\\"{:s}\\\"\".format(ppname,parprop[ppname]))\n inffile.write(\",\")\n for ppname in ('points',):\n inffile.write(\"\\\"{:s}\\\":{:s}\".format(ppname,parprop[ppname] if parprop[ppname] is not None else \"null\"))\n if (i+1 < len(nfzones)):\n inffile.write(\"},\\n\")\n else:\n inffile.write(\"}\\n\")\n i += 1\n inffile.write(\"]}\\n\")\n inffile.close()\n if (po.verbose > 0):\n print(\"{}: Done exporting.\".format(po.mdlfile))", "def _init_extractor_from_source(self, source_name):\n try:\n source = [s for s in self.sources if s['id'] == source_name][0]\n except IndexError:\n source = None\n\n if source is None:\n return\n\n extractor_klass = load_object(source['extractor'])\n return extractor_klass(source)", "def MakeFieldmaps(self):\n if self.verbose:\n print 'Compute fieldmaps.'\n for entry in self.info:\n if self.info[entry]['type'] == 'fmap':\n if self.info[entry]['imgfile'] == None:\n# Fieldmap data not found.\n return\n# Make a magnitude image for use in checking registration.\n cmd = 'convert_file -f0 -m0 %s %s nii' % \\\n (entry, self.info[entry]['magfile'])\n self.CheckExec(cmd, [self.info[entry]['magfile'] + '.nii'])\n\n# Make fieldmap. Use separate loop in case make_fmap aborts.\n for entry in self.info:\n if self.info[entry]['type'] == 'fmap':\n fmapname = self.info[entry]['imgfile']\n if not os.path.exists('%s.nii' % fmapname) or self.redo:\n# Couldn't find or existing fmap, compute a new one.\n if self.verbose:\n extra_args = '-v'\n else:\n extra_args = ''\n if self.info[entry]['correct_fmap_phase'] == 'force':\n extra_args += ' --force-slicecorr'\n elif self.info[entry]['correct_fmap_phase'] == 'omit':\n extra_args += ' --omit-slicecorr'\n cmd = 'make_fmap %s %s %s' % (extra_args, entry, fmapname)\n# error = self.ExecCmd(cmd, halt_on_error=False)\n if self.no_fmapcorr:\n halt_on_error = False\n else:\n halt_on_error = True\n error = self.CheckExec(cmd, ['%s.nii' % fmapname], \\\n halt_on_error=halt_on_error)\n if error:\n self.info[entry]['valid'] = False\n del self.fmaps[entry]", "def __init__(self, filenum, source):\n self.source_body = {\n 'filenum': filenum,\n 'source': source\n }", "def main():\n print(\"Program version: 1.5\")\n StartTime = datetime.now()\n args = parseArguments()\n\n verbose = args.verbose\n images = args.images\n ignore_warnings = args.ignore_warnings\n if(args.silent):\n verbose = False\n images = False\n ignore_warnings = True\n\n if(args.images):\n plt.ioff()\n\n if(args.ignore_warnings):\n warnings.simplefilter('ignore', UserWarning)\n\n #sample header keywords\n # OBJECT = 'P016+03_P1_JKdeep' / Original target\n # RA = ' 01:06:37.759' / 01:06:37.7 RA (J2000) pointing\n # DEC = ' 03:32:36.096' / 03:32:36.0 DEC (J2000) pointing\n # EQUINOX = 2000. / Standard FK5 (years)\n # RADECSYS= 'FK5 ' / Coordinate reference frame\n # CRVAL1 = 16.65733 / 01:06:37.7, RA at ref pixel\n # CRVAL2 = 3.54336 / 03:32:36.0, DEC at ref pixel\n # CRPIX1 = 447. /Ref pixel in X\n # CRPIX2 = 452. / Ref pixel in Y\n # CDELT1 = -8.0000000000000E-5 / SS arcsec per pixel in RA\n # CDELT2 = 8.00000000000003E-5 / SS arcsec per pixel in DEC\n # CTYPE1 = 'RA---TAN' / pixel coordinate system\n # CTYPE2 = 'DEC--TAN' / pixel coordinate system\n # PC1_1 = 0.000000 / Translation matrix element\n # PC1_2 = 1.000000 / Translation matrix element\n # PC2_1 = -1.000000 / Translation matrix element\n # PC2_2 = 0.000000 / Translation matrix element\n\n fits_image_filenames = args.input\n\n #if directory given search for appropriate fits files\n\n if(os.path.isdir(fits_image_filenames[0])):\n print(\"detected a directory. Will search for fits files in it\")\n path = fits_image_filenames[0]\n fits_image_filenames = []\n for file in os.listdir(path):\n if file.endswith(\".fits\") and \"_astro\" not in file:\n fits_image_filenames.append(path+\"/\"+file)\n print(fits_image_filenames)\n\n multiple = False\n if(len(fits_image_filenames)>1):\n multiple = True\n not_converged = []\n converged_counter = 0\n for fits_image_filename in fits_image_filenames:\n\n result,_ = astrometry_script(fits_image_filename, catalog=args.catalog, rotation_scaling=0, xy_transformation=args.xy_transformation, fine_transformation=args.fine_transformation,\n images=images, vignette=args.vignette,vignette_rectangular=args.vignette_rectangular, cutouts=args.cutout, ra=args.ra, dec=args.dec, projection_ra=args.projection_ra, projection_dec=args.projection_dec, verbose=verbose, save_images=args.save_images, ignore_header_rot=args.ignore_header_rot, radius = args.radius, save_bad_result=args.save_bad_result, silent =args.silent, sigma_threshold_for_source_detection= args.sigma_threshold_for_source_detection, high_res=args.high_resolution, hdul_idx=args.hdul_idx, filename_for_sources=args.filename_for_sources, FWHM=args.seeing)\n\n if((not result) and args.rotation_scaling):\n print(\"Did not converge. Will try again with full rotation and scaling\")\n result, _ = astrometry_script(fits_image_filename, catalog=args.catalog, rotation_scaling=args.rotation_scaling, xy_transformation=args.xy_transformation, fine_transformation=args.fine_transformation,\n images=images, vignette=args.vignette,vignette_rectangular=args.vignette_rectangular, cutouts=args.cutout, ra=args.ra, dec=args.dec, projection_ra=args.projection_ra, projection_dec=args.projection_dec, verbose=verbose, save_images=args.save_images, ignore_header_rot=args.ignore_header_rot, radius = args.radius, save_bad_result=args.save_bad_result, silent=args.silent, sigma_threshold_for_source_detection=args.sigma_threshold_for_source_detection, high_res=args.high_resolution, hdul_idx=args.hdul_idx, filename_for_sources=args.filename_for_sources, FWHM=args.seeing)\n\n if(result):\n print(\"Astrometry was determined to be good.\")\n converged_counter = converged_counter+1\n else:\n print(\"Astrometry was determined to be bad.\")\n not_converged.append(fits_image_filename)\n if(args.save_bad_result):\n print(\"Result was saved anyway\")\n else:\n print(\"Result was not saved.\")\n # print(\"\")\n # print(\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\")\n # print(\"> Astrometry for {} \".format(fits_image_filename))\n #\n # with fits.open(fits_image_filename) as hdul:\n # #print(hdul.info())\n # if(args.verbose):\n # print(\"if image is not at first position in the fits file the program will break later on\")\n # #print(hdul[0].header)\n #\n # hdu = hdul[0]\n # #hdu.verify('fix')\n # hdr = hdu.header\n #\n #\n # image_or = hdul[0].data.astype(float)\n # median = np.nanmedian(image_or)\n # image_or[np.isnan(image_or)]=median\n # image = image_or - median\n #\n # observation = find_sources(image, args.vignette)\n # #print(observation)\n #\n # positions = (observation['xcenter'], observation['ycenter'])\n # apertures = CircularAperture(positions, r=4.)\n #\n #\n # #world coordinates\n # print(\">Info found in the file -- (CRVAl: position of central pixel (CRPIX) on the sky)\")\n # print(WCS(hdr))\n #\n # hdr[\"NAXIS1\"] = image.shape[0]\n # hdr[\"NAXIS2\"] = image.shape[1]\n #\n # #wcsprm = Wcsprm(hdr.tostring().encode('utf-8')) #everything else gave me errors with python 3, seemed to make problems with pc conversios, so i wwitched to the form below\n # wcsprm = WCS(hdr).wcs\n # wcsprm_original = WCS(hdr).wcs\n # if(args.verbose):\n # print(WCS(wcsprm.to_header()))\n # wcsprm, fov_radius, INCREASE_FOV_FLAG, PIXSCALE_UNCLEAR = read_additional_info_from_header(wcsprm, hdr, args.ra, args.dec, args.projection_ra, args.projection_dec)\n # if(args.verbose):\n # print(WCS(wcsprm.to_header()))\n #\n # #print(wcsprm)\n # #wcsprm.pc = [[2, 0],[0,1]]\n #\n #\n # #Possibly usefull examples of how to use wcsprm:\n # #print(wcsprm.set())\n # #print(wcsprm.get_pc())\n # #pc = wcsprm.get_pc()\n # #print(np.linalg.det(pc))\n # #print(wcsprm.get_cdelt())\n # #wcs.fix()\n # #print(wcsprm.print_contents())\n # #print(repr(hdr.update(wcsprm.to_header().encode('utf-8')))) #not working\n #\n # #hdu.verify(\"fix\")\n # #print(repr(hdr))\n # #wcs.wcs_pix2world(pixcrd, 1)\n # #wcs.wcs_world2pix(world, 1)\n # #wcs.wcs.crpix = [-234.75, 8.3393]\n # # wcs.wcs.cdelt = np.array([-0.066667, 0.066667])\n # # wcs.wcs.crval = [0, -90]\n # # wcs.wcs.ctype = [\"RA---AIR\", \"DEC--AIR\"]\n # # wcs.wcs.set_pv([(2, 1, 45.0)])\n # # For historical compatibility, three alternate specifications of the linear transformations\n # # are available in wcslib. The canonical PCi_ja with CDELTia, CDi_ja, and the deprecated CROTAia\n # # keywords. Although the latter may not formally co-exist with PCi_ja,\n # # the approach here is simply to ignore them if given in conjunction with PCi_ja.\n # # has_pc, has_cd and has_crota can be used to determine which of these alternatives are present in the header.\n # # These alternate specifications of the linear transformation matrix are translated immediately to PCi_ja by set\n # # and are nowhere visible to the lower-level routines. In particular, set resets cdelt to unity if CDi_ja is present\n # # (and no PCi_ja). If no CROTAia is associated with the latitude axis, set reverts to a unity PCi_ja matrix.\n #\n #\n #\n #\n #\n # #get rough coordinates\n # #print(hdr[\"RA\"])\n # #coord = SkyCoord(hdr[\"RA\"], hdr[\"DEC\"], unit=(u.hourangle, u.deg), frame=\"icrs\")\n # coord = SkyCoord(wcsprm.crval[0], wcsprm.crval[1], unit=(u.deg, u.deg), frame=\"icrs\")\n # if(not PIXSCALE_UNCLEAR):\n # if(wcsprm.crpix[0] < 0 or wcsprm.crpix[1] < 0 or wcsprm.crpix[0] > image.shape[0] or wcsprm.crpix[1] > image.shape[1] ):\n # print(\"central value outside of the image, moving it to the center\")\n # coord_radec = wcsprm.p2s([[image.shape[0]/2, image.shape[1]/2]], 0)[\"world\"][0]\n # coord = SkyCoord(coord_radec[0], coord_radec[1], unit=(u.deg, u.deg), frame=\"icrs\")\n # #print(wcsprm)\n #\n #\n #\n # #better: put in nice wrapper! with repeated tries and maybe try synchron!\n # print(\">Dowloading catalog data\")\n # radius = u.Quantity(fov_radius, u.arcmin)#will prob need more\n # catalog_data = query.get_data(coord, radius, args.catalog)\n # #reference = reference.query(\"mag <20\")\n # max_sources = 500\n # if(INCREASE_FOV_FLAG):\n # max_sources= max_sources*2.25 #1.5 times the radius, so 2.25 the area\n # if(catalog_data.shape[0]>max_sources):\n # catalog_data = catalog_data.nsmallest(400, \"mag\")\n #\n # if(args.catalog == \"GAIA\" and catalog_data.shape[0] < 5):\n # print(\"GAIA seems to not have enough objects, will enhance with PS1\")\n # catalog_data2 = query.get_data(coord, radius, \"PS\")\n # catalog_data = pd.concat([catalog_data, catalog_data2])\n # #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n # print(\"Now we have a total of {} sources. Keep in mind that there might be duplicates now since we combined 2 catalogs\".format(catalog_data.shape[0]))\n # elif(args.catalog == \"PS\" and (catalog_data is None or catalog_data.shape[0] < 5)):\n # print(\"We seem to be outside the PS footprint, enhance with GAIA data\")\n # catalog_data2 = query.get_data(coord, radius, \"GAIA\")\n # catalog_data = pd.concat([catalog_data, catalog_data2])\n # #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n # print(\"Now we have a total of {} sources. Keep in mind that there might be duplicates now since we combined 2 catalogs\".format(catalog_data.shape[0]))\n #\n # #remove duplicates in catalog?\n #\n # apertures_catalog = CircularAperture(wcsprm.s2p(catalog_data[[\"ra\", \"dec\"]], 1)['pixcrd'], r=5.)\n #\n #\n # #plotting what we have, I keep it in the detector field, world coordinates are more painfull to plot\n # if(args.images):\n # fig = plt.figure()\n # fig.canvas.set_window_title('Input for {}'.format(fits_image_filename))\n # plt.xlabel(\"pixel x direction\")\n # plt.ylabel(\"pixel y direction\")\n # plt.title(\"Input - red: catalog sources, blue: detected sources in img\")\n # plt.imshow(image,cmap='Greys', origin='lower', norm=LogNorm())\n # apertures.plot(color='blue', lw=1.5, alpha=0.5)\n # apertures_catalog.plot(color='red', lw=1.5, alpha=0.5)\n #\n # plt.xlim(-200,image.shape[0]+200)\n # plt.ylim(-200,image.shape[1]+200)\n # if(args.save_images):\n # name_parts = fits_image_filename.rsplit('.', 1)\n # plt.savefig(name_parts[0]+\"_image_before.pdf\")\n #\n # ###tranforming to match the sources\n # print(\"---------------------------------\")\n # print(\">Finding the transformation\")\n # if(args.rotation_scaling):\n # print(\"Finding scaling and rotation\")\n # wcsprm = register.get_scaling_and_rotation(observation, catalog_data, wcsprm, scale_guessed=PIXSCALE_UNCLEAR, verbose=args.verbose)\n # if(args.xy_transformation):\n # print(\"Finding offset\")\n # wcsprm,_,_ = register.offset_with_orientation(observation, catalog_data, wcsprm, fast=False , INCREASE_FOV_FLAG=INCREASE_FOV_FLAG, verbose= args.verbose)\n #\n # #correct subpixel error\n # obs_x, obs_y, cat_x, cat_y, distances = register.find_matches(observation, catalog_data, wcsprm, threshold=3)\n # rms = np.sqrt(np.mean(np.square(distances)))\n # best_score = len(obs_x)/(rms+10) #start with current best score\n # fine_transformation = False\n # if(args.fine_transformation):\n # for i in [2,3,5,8,10,6,4, 20,2,1,0.5]:\n # wcsprm_new, score = register.fine_transformation(observation, catalog_data, wcsprm, threshold=i)\n # if(score> best_score):\n # wcsprm = wcsprm_new\n # best_score = score\n # fine_transformation = True\n # if not fine_transformation:\n # print(\"Fine transformation did not improve result so will be discarded.\")\n # else:\n # print(\"Fine transformation applied to improve result\")\n # #register.calculate_rms(observation, catalog_data,wcs)\n #\n # #make wcsprim more physical by moving scaling to cdelt, out of the pc matrix\n # wcs =WCS(wcsprm.to_header())\n # if(args.verbose):\n # print(wcs)\n #\n # from astropy.wcs import utils\n # scales = utils.proj_plane_pixel_scales(wcs)\n # print(scales)\n # cdelt = wcsprm.get_cdelt()\n # print(cdelt)\n # scale_ratio = scales/cdelt\n # #print(scale_ratio)\n # pc = np.array(wcsprm.get_pc())\n # pc[0,0] = pc[0,0]/scale_ratio[0]\n # pc[1,0] = pc[1,0]/scale_ratio[1]\n # pc[0,1] = pc[0,1]/scale_ratio[0]\n # pc[1,1] = pc[1,1]/scale_ratio[1]\n # wcsprm.pc = pc\n # wcsprm.cdelt = scales\n # if(args.verbose):\n # print(\"moved scaling info to CDelt\")\n # print(WCS(wcsprm.to_header()))\n #\n # #WCS difference before and after\n # print(\"> Compared to the input the Wcs was changed by: \")\n # scales_original = utils.proj_plane_pixel_scales(WCS(hdr))\n # print(\"WCS got scaled by {} in x direction and {} in y direction\".format(scales[0]/scales_original[0], scales[1]/scales_original[1]))\n # #sources:\n # #https://math.stackexchange.com/questions/2113634/comparing-two-rotation-matrices\n # #https://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python/13849249#13849249\n # def unit_vector(vector):\n # \"\"\" Returns the unit vector of the vector. \"\"\"\n # return vector / max(np.linalg.norm(vector), 1e-10)\n # def matrix_angle( B, A ):\n # \"\"\" comment cos between vectors or matrices \"\"\"\n # Aflat = A.reshape(-1)\n # Aflat = unit_vector(Aflat)\n # Bflat = B.reshape(-1)\n # Bflat = unit_vector(Bflat)\n # #return np.arccos((np.dot( Aflat, Bflat ) / max( np.linalg.norm(Aflat) * np.linalg.norm(Bflat), 1e-10 )))\n # return np.arccos(np.clip(np.dot(Aflat, Bflat), -1.0, 1.0))\n # #print(matrix_angle(wcsprm.get_pc(), wcsprm_original.get_pc()) /2/np.pi*360)\n # rotation_angle = matrix_angle(wcsprm.get_pc(), wcsprm_original.get_pc()) /2/np.pi*360\n # if((wcsprm.get_pc() @ wcsprm_original.get_pc() )[0,1] > 0):\n # text = \"counterclockwise\"\n # else:\n # text = \"clockwise\"\n # print(\"Rotation of WCS by an angle of {} deg \".format(rotation_angle)+text)\n # old_central_pixel = wcsprm_original.s2p([wcsprm.crval], 0)[\"pixcrd\"][0]\n # print(\"x offset: {} px, y offset: {} px \".format(wcsprm.crpix[0]- old_central_pixel[0], wcsprm.crpix[1]- old_central_pixel[1]))\n #\n #\n # #check final figure\n # if(args.images):\n # fig = plt.figure()\n # fig.canvas.set_window_title('Result for {}'.format(fits_image_filename))\n # plt.xlabel(\"pixel x direction\")\n # plt.ylabel(\"pixel y direction\")\n # plt.title(\"Result - red: catalog sources, blue: detected sources in img\")\n # plt.imshow(image,cmap='Greys', origin='lower', norm=LogNorm())\n # apertures.plot(color='blue', lw=1.5, alpha=0.5)\n # #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n # apertures_catalog = CircularAperture(wcsprm.s2p(catalog_data[[\"ra\", \"dec\"]], 1)['pixcrd'], r=5.)\n #\n # apertures_catalog.plot(color='red', lw=1.5, alpha=0.5)\n # if(args.save_images):\n # name_parts = fits_image_filename.rsplit('.', 1)\n # plt.savefig(name_parts[0]+\"_image_after.pdf\")\n #\n # print(\"--- Evaluate how good the transformation is ----\")\n # register.calculate_rms(observation, catalog_data,wcsprm)\n #\n #\n # #updating file\n # write_wcs_to_hdr(fits_image_filename, wcsprm)\n #\n #\n # print(\"overall time taken\")\n # print(datetime.now()-StartTime)\n # if(args.images):\n # plt.show()\n if(multiple):\n print(\">> Final report:\")\n print(\"Processed {} files, {} of them did converge. The following files failed:\".format(len(fits_image_filenames), converged_counter))\n print(not_converged)\n print(\"-- finished --\")", "def __init__(self, file_pattern, validate=True, **nucleus_kwargs):\n\n super(ReadGenomicsFile, self).__init__()\n self._source = self._source_class(\n file_pattern, validate=validate, **nucleus_kwargs)", "def crunch(self):\n while True:\n lst = self.want_line(r'\\s*\\.file\\s+(.*)')\n if lst:\n self.erase(lst[0])\n continue\n lst = self.want_line(r'\\s*\\.globl\\s+(.*)')\n if lst:\n self.erase(lst[0])\n continue\n lst = self.want_line(r'\\s*\\.ident\\s+(.*)')\n if lst:\n self.erase(lst[0])\n continue\n lst = self.want_line(r'\\s*\\.section\\s+(.*)')\n if lst:\n self.erase(lst[0])\n continue\n lst = self.want_line(r'\\s*\\.type\\s+(.*)')\n if lst:\n self.erase(lst[0])\n continue\n lst = self.want_line(r'\\s*\\.size\\s+(.*)')\n if lst:\n self.erase(lst[0])\n continue\n lst = self.want_line(r'\\s*\\.(bss)\\s+')\n if lst:\n self.erase(lst[0])\n continue\n lst = self.want_line(r'\\s*\\.(data)\\s+')\n if lst:\n self.erase(lst[0])\n continue\n lst = self.want_line(r'\\s*\\.(text)\\s+')\n if lst:\n self.erase(lst[0])\n continue\n break\n if osarch_is_amd64():\n self.crunch_amd64(lst)\n elif osarch_is_ia32():\n self.crunch_ia32(lst)\n self.__tag = None", "def main():\n tmp_dir = xm.constants['dir_tmp']\n exr_f = join(tmp_dir, 'test.exr')\n exr = EXR(exr_f)\n exr.extract_normal(join(tmp_dir, 'test.png'), vis=True)", "def Run(self, args):\n\n with RecoverFromDiagnosticException(args.image_name):\n img_name = util.GetDigestFromName(args.image_name)\n return util.TransformContainerAnalysisData(img_name,\n args.occurrence_filter)", "def get_source(f):\n\n if isinstance(f, types.FunctionType):\n\n # lambda function?\n # use inspect module\n # need to clean out lambda...\n if f.__name__ == '<lambda>':\n # interpreter in interactive mode or not?\n # beware jupyter notebook also returns true for interactive mode!\n if is_in_interactive_mode() and not in_jupyter_notebook() and not in_google_colab():\n\n # import here, avoids also trouble with jupyter notebooks\n from tuplex.utils.interactive_shell import TuplexShell\n\n # for this to work, a dummy shell has to be instantiated\n # through which all typing occurs. Thus, the history can\n # be properly captured for source code lookup.\n # shell is a borg object, i.e. singleton alike behaviour\n shell = TuplexShell()\n return shell.get_lambda_source(f)\n else:\n # does lambda have globals?\n # if yes, then extract won't work IFF there's more than one lambda per line!\n # => display warning then.\n # => change hashing method...\n f_globs = get_globals(f)\n f_filename = f.__code__.co_filename\n f_lineno = f.__code__.co_firstlineno\n f_colno = f.__code__.co_firstcolno if hasattr(f.__code__, 'co_firstcolno') else None\n\n src_info = inspect.getsourcelines(f)\n\n vault.extractAndPutAllLambdas(src_info,\n f_filename,\n f_lineno,\n f_colno,\n f_globs)\n return vault.get(f, f_filename, f_lineno, f_colno, f_globs)\n else:\n # works always, because functions can be only defined on a single line!\n return get_function_code(f)\n else:\n\n # TODO: for constants, create dummy source code, i.e. lambda x: 20\n # when desired to retrieve a constant or so!\n\n return ''", "def testProcessSource(self):\n test_artifacts_path = shared_test_lib.GetTestFilePath(['artifacts'])\n self._SkipIfPathNotExists(test_artifacts_path)\n\n test_engine = extraction_engine.ExtractionMultiProcessEngine(\n maximum_number_of_tasks=100)\n test_engine.BuildArtifactsRegistry(test_artifacts_path, None)\n\n test_file_path = self._GetTestFilePath(['ímynd.dd'])\n self._SkipIfPathNotExists(test_file_path)\n\n os_path_spec = path_spec_factory.Factory.NewPathSpec(\n dfvfs_definitions.TYPE_INDICATOR_OS, location=test_file_path)\n source_path_spec = path_spec_factory.Factory.NewPathSpec(\n dfvfs_definitions.TYPE_INDICATOR_TSK, location='/',\n parent=os_path_spec)\n\n session = sessions.Session()\n\n processing_configuration = configurations.ProcessingConfiguration()\n processing_configuration.data_location = shared_test_lib.DATA_PATH\n processing_configuration.parser_filter_expression = 'filestat'\n processing_configuration.task_storage_format = (\n definitions.STORAGE_FORMAT_SQLITE)\n\n with shared_test_lib.TempDirectory() as temp_directory:\n temp_file = os.path.join(temp_directory, 'storage.plaso')\n storage_writer = sqlite_writer.SQLiteStorageFileWriter()\n storage_writer.Open(path=temp_file)\n\n try:\n system_configurations = test_engine.PreprocessSource(\n [source_path_spec], storage_writer)\n\n # The method is named ProcessSourceMulti because pylint 2.6.0 and\n # later gets confused about keyword arguments when ProcessSource\n # is used.\n processing_status = test_engine.ProcessSourceMulti(\n storage_writer, session.identifier, processing_configuration,\n system_configurations, [source_path_spec],\n storage_file_path=temp_directory)\n\n number_of_events = storage_writer.GetNumberOfAttributeContainers(\n 'event')\n number_of_extraction_warnings = (\n storage_writer.GetNumberOfAttributeContainers(\n 'extraction_warning'))\n number_of_recovery_warnings = (\n storage_writer.GetNumberOfAttributeContainers(\n 'recovery_warning'))\n\n parsers_counter = collections.Counter({\n parser_count.name: parser_count.number_of_events\n for parser_count in storage_writer.GetAttributeContainers(\n 'parser_count')})\n\n finally:\n storage_writer.Close()\n\n self.assertFalse(processing_status.aborted)\n\n self.assertEqual(number_of_events, 15)\n self.assertEqual(number_of_extraction_warnings, 0)\n self.assertEqual(number_of_recovery_warnings, 0)\n\n expected_parsers_counter = collections.Counter({\n 'filestat': 15,\n 'total': 15})\n self.assertEqual(parsers_counter, expected_parsers_counter)", "def preprocess(args):\n \n # Set up options\n src = args.src\n dest = args.dest\n collect_path = args.collect_path\n formats = args.formats\n ref_img_path = args.ref_img_path\n width = args.width\n debug = args.debug\n if debug:\n print args.__dict__\n # Make necessary directories if there is not.\n if not os.path.exists(dest):\n os.mkdir(dest)\n if not os.path.exists(collect_path):\n os.mkdir(collect_path)\n\n # Open referce image and trying to find the face in it.\n try:\n ref_img_origin = Image.open(os.path.abspath(ref_img_path))\n except IOError as e:\n print \"[IOError] Can't open the reference imgae: {}\".format(ref_img_path)\n print \"[Info] Terminating....\"\n return 1\n\n face_ref_coor, degree_ref = segment_tools.faces_positions(ref_img_origin)\n \n # Only one face is allowed in referece image. Raise error if it isn't.\n # Crop the origin image to get the face image.\n if face_ref_coor.shape[0] > 1:\n raise MultiFaceError(\"Detect multiple faces in reference image. There should be only one face.\")\n face_ref = segment_tools.crop_img(ref_img_origin, face_ref_coor[0], offset = True)\n\n # Adjust that image to make eyes lie on horizontal line.\n try:\n eye_angle = face_align_tools.eyes_horizon_angle(face_ref)\n except segment_tools.NotDetectedError:\n print \"[NotDetectedError] This reference image is not good enough. The program can't make the eyes horizontal.\"\n print \"[NotDetectedError] Pleas use another reference image.\"\n print \"Terminating....\"\n return 1\n\n total_degree = eye_angle + degree_ref\n img_ref_rotated = ref_img_origin.rotate(total_degree, resample = Image.CUBIC)\n face_ref_coor, _ = segment_tools.faces_positions(img_ref_rotated)\n face_ref = segment_tools.crop_img(img_ref_rotated, face_ref_coor[0], offset = True)\n \n # Resize the reference face to desired witdh (but preserve the width/heigh ratio.)\n ref_width, ref_heigh = face_ref.size\n face_ref = face_ref.resize((width, ref_heigh*width/ref_width))\n if debug:\n face_ref.show()\n \n ref_file_name = os.path.basename(ref_img_path)\n face_ref.save(os.path.join(os.path.abspath(dest), \"ref_\" + ref_file_name))\n print \"[Info] Complete preprocess of reference image.\"\n\n # Walk through the source directory.\n print \"[Info] Start processing files in {src}.\".format(src = os.path.abspath(src))\n for rel_path, dir_names, file_names in os.walk(os.path.abspath(src)):\n for filename in file_names:\n if np.any(map(filename.endswith, formats)):\n file_path = os.path.join(os.path.abspath(rel_path), filename)\n print \"[Info] Start processing {file_path}.\".format(file_path = file_path)\n try:\n target_img_origin = Image.open(file_path)\n except IOError as e:\n print \"[IOError] Can not open {}\".format(file_path)\n print \"[Info] Passing this image.\"\n continue\n \n # Try to find faces in target image. If don't, copy it to collection directory.\n try:\n faces_target_coors, degree_target = segment_tools.faces_positions(target_img_origin)\n except segment_tools.NotDetectedError as e:\n print \"[NotDetectedError] Does not find any face in {filename}. Collect it into {collect_path}\".format(filename = filename, collect_path = collect_path)\n target_img_origin.save(os.path.join(os.path.abspath(collect_path), filename))\n continue # Brake loop for not finding any face in the picture.\n\n # Adjust all found faces to make them just.\n target_img_rotated = target_img_origin.rotate(degree_target, resample = Image.CUBIC)\n for face_coor in faces_target_coors:\n temp_img = segment_tools.crop_img(target_img_rotated, face_coor, offset=True)\n try:\n eyes_degree = face_align_tools.eyes_horizon_angle(temp_img)\n except segment_tools.NotDetectedError:\n eyes_degree = 0\n face_target = temp_img.rotate(eyes_degree)\n temp_file_name = random_prefix() + filename\n if debug:\n face_target.show()\n face_target.save(os.path.join(os.path.abspath(dest), temp_file_name))\n temp_aligned_file_name = \"aligned_\" + temp_file_name\n try:\n face_target_aligned = face_align_tools.face_align(face_ref, face_target)\n face_target_aligned.save(os.path.join(os.path.abspath(dest), temp_aligned_file_name))\n except segment_tools.NotDetectedError:\n print \"[AlignError] Can't align face. Moving to {collection}.\".format(collection = collect_path)\n face_target.save(os.path.join(os.path.abspath(collect_path), \"not_aligned_\" + temp_file_name))\n print \"[Info] Saving {}\".format(os.path.join(os.path.abspath(collect_path), \"not_aligned_\" + temp_file_name))\n continue\n masked_target_img = segment_tools.mask_img(target_img_rotated, faces_target_coors)\n\n if debug:\n masked_target_img.show()\n masked_target_img.save(\"masked.jpg\")\n \n try:\n while True:\n temp_face_coors, temp_degree = segment_tools.faces_positions(masked_target_img)\n temp_img = masked_target_img.rotate(temp_degree, resample = Image.CUBIC)\n if debug:\n print \"temp_face_coors\", temp_face_coors\n print \"[Info] Multiple faces are found in {file_path}\".format(file_path = file_path)\n for face_coor in temp_face_coors:\n temp_face = segment_tools.crop_img(temp_img, face_coor, offset = True)\n eye_angle = face_align_tools.eyes_horizon_angle(temp_face)\n face_target = temp_face.rotate(eye_angle, resample = Image.CUBIC)\n if debug:\n face_target.show()\n face_target_aligned = face_align_tools.face_align(face_ref, face_target)\n temp_file_name = random_prefix() + filename\n temp_aligned_file_name = \"aligned_\" + temp_file_name\n print \"[Info] Sucessful aligned {}\".format(temp_file_name)\n if debug:\n masked_target_img.show()\n except segment_tools.NotDetectedError:\n file_path = os.path.join(os.path.abspath(rel_path), filename)\n print \"[Info] Complete searching faces in {file_path}\".format(file_path = file_path)", "def main():\n args = utils.read_arguments(__doc__)\n documents = []\n filenames = list(traverse_directory(args[\"input_dirpath\"],'*clean*.txt'))\n labels_dirname = args[\"labels_dirpath\"]\n labels_from_json = get_all_labels_from_json(labels_dirname)\n for filename in tqdm(filenames):\n with AnnotatedIBMFactory(filename) as instance_extractor:\n filename_key = filename.split(\"/\")[-1]\n document = instance_extractor.build_document(\n labels_from_json[filename_key])\n documents.append(document)\n utils.pickle_to_file(documents, args['output_file'])", "def render_source(self, filename, obj):\n raise NotImplementedError()", "def processImage(fpaths_src, label_map, fnames_src, img_idx):\n global counter\n \n n_imgs = len(fpaths_src)\n print(\"Processing %s -- %s/%s (%s%%)\"%(fnames_src[img_idx],counter,n_imgs,round(100.*counter/n_imgs)))\n \n path = fpaths_src[img_idx]\n src_image_raw = Image.open(path, 'r')\n \n # size normalization of the image\n if not (args.resize == None):\n src_image_raw = src_image_raw.resize(size=(int(args.resize), int(args.resize)), resample=Image.BILINEAR)\n \n # convert to writable numpy array\n src_image = np.asarray(src_image_raw, dtype=np.uint8)\n src_image.setflags(write=True)\n \n # some dummy label\n label = -99.99\n # the labels\n if not (label_map == {}):\n # let the label start at 1, instead of 0\n label = int(label_map[fnames_src[img_idx]])+1\n else:\n # add a dummy label (between 0 and 1)\n label = np.random.rand()\n \n image_features = []\n \n # add the original\n image_features.append(generateFeatures(src_image,label,args.knn))\n \n if args.augment == 1:\n print \"Augmenting dataset...\"\n # data augmentation techniques\n rotation_angles = [i for i in xrange(36,360,36)] # samples are transformed by these rotation angles\n \n flip_x = True # data augmentation by flipping around x axis\n flip_y = True # data augmentation by flipping around y axis\n flip_xy= True # data augmentation by flipping around x AND y axis\n \n for angle in rotation_angles:\n rot_matrix = cv2.getRotationMatrix2D(\n (src_image.shape[1]/2.,src_image.shape[0]/2.),\n angle,\n 1.0)\n rot_sample_crop = np.array([])\n rot_sample_crop = cv2.warpAffine(src_image,\n rot_matrix,\n (src_image.shape[1],src_image.shape[0]),\n rot_sample_crop,\n cv2.INTER_LINEAR,\n cv2.BORDER_REFLECT_101)\n \n # add the sample to the dataset\n image_features.append(generateFeatures(rot_sample_crop,label,args.knn))\n \n # add 3 flipped copies\n if flip_x:\n rot_sample_crop_x = cv2.flip(rot_sample_crop,0)\n image_features.append(generateFeatures(rot_sample_crop_x,label,args.knn))\n if flip_y:\n rot_sample_crop_y = cv2.flip(rot_sample_crop,1)\n image_features.append(generateFeatures(rot_sample_crop_y,label,args.knn))\n if flip_xy:\n rot_sample_crop_xy = cv2.flip(rot_sample_crop,-1)\n image_features.append(generateFeatures(rot_sample_crop_xy,label,args.knn))\n \n counter+=1\n\n # return a nx128 or nxk matrix for the features of all modifications of this image\n feat_matrix = np.asarray(image_features)\n return feat_matrix", "def test_filter_mapping_file_from_mapping_f(self):\r\n actual = filter_mapping_file_from_mapping_f(\r\n self.tutorial_mapping_f, [\"PC.354\", \"PC.355\"])\r\n expected = \"\"\"#SampleID\tBarcodeSequence\tLinkerPrimerSequence\tTreatment\tDOB\tDescription\r\nPC.354\tAGCACGAGCCTA\tYATGCTGCCTCCCGTAGGAGT\tControl\t20061218\tControl_mouse_I.D._354\r\nPC.355\tAACTCGTCGATG\tYATGCTGCCTCCCGTAGGAGT\tControl\t20061218\tControl_mouse_I.D._355\"\"\"\r\n self.assertEqual(actual, expected)", "def _print_source(f):\n\n @_wraps(f)\n def wrapper(*args, **kwargs):\n source = _getsource(f)\n print(_clean_source(source))\n return f(*args, **kwargs)\n\n return wrapper", "def _run_file_contents(contents: dict, strict_fn, debug_print):\n\n default_fns_args = {\n \"adjust_focus\": {\"on\": \"hexbin\"},\n \"adjust_positioning\": True,\n \"adjust_opacity\": True\n }\n\n input_fns = contents.pop(\"functions\", {})\n for k, v in default_fns_args.items():\n if k not in input_fns:\n input_fns[k] = v\n\n output = contents.pop(\"output\", False)\n display = contents.pop(\"display\", True)\n builder = PlotBuilder.builder_from_dict(**contents)\n debug_print(\"* all layers loaded\")\n\n for k, v in input_fns.items():\n if v:\n args, kwargs = parse_args_kwargs(v)\n try:\n _fn_map[k](builder, *args, **kwargs)\n debug_print(f\"* invoked function '{_fn_map[k].__name__}'.\\nargs: {args}\\nkwargs: {kwargs}\")\n except Exception as e:\n try:\n debug_print(f\"* error while performing '{_fn_map[k].__name__}'.\\nerror: {e}\")\n strict_fn(e)\n except KeyError as f:\n debug_print(f\"* no such function as '{k}'.\")\n strict_fn(f)\n\n builder.finalize(raise_errors=False)\n\n if output:\n args, kwargs = parse_args_kwargs(output)\n builder.output(*args, **kwargs)\n debug_print(\"* figure output.\")\n\n if display:\n args, kwargs = parse_args_kwargs(display)\n builder.display(*args, **kwargs)\n debug_print(\"* figure displayed.\")\n\n return builder.get_plot_status()", "def extract_features_static(self):\n self.feature_static_metadata()\n self.feature_static_signature()\n self.feature_static_heuristic()\n self.feature_static_packer()\n self.feature_static_pef()\n self.feature_static_imports()", "def main():\n \n # for inserting other images, add tem to /input folder and list them here\n images = (\n 'image-0',\n 'image-1',\n 'image-2'\n )\n\n for image_name in images:\n print(image_name, \"image:\")\n\n image = open_image(image_name)\n display_image(image, \"Original input \" + image_name)\n\n grayscale_v = transform_colors(image)\n display_image(grayscale_v[:,:,0], \"Grayscale \" + image_name)\n save_image(image_name + \"-grayscale\", grayscale_v[:,:,0])\n\n contours_v, contours = get_contours(grayscale_v)\n display_image(contours_v, \"Contours \" + image_name)\n save_image(image_name + \"-contours\", contours_v)\n\n labeled_img, areas = get_measures(image, contours[1:])\n display_image(labeled_img, \"Labeled \" + image_name)\n save_image(image_name + \"-labeled\", labeled_img)\n\n areas_histogram(areas, image_name)", "def collect_ftmap(cls):\n for obj in pm.get_object_list():\n if obj.endswith(\".pdb\"):\n new_name = obj[:-4].replace(\"crosscluster\", \"consensus\")\n pm.set_name(obj, new_name)\n yield from cls.collect()", "def routines(args, infile):\n\n # -- Open the tarfile\n tar = tarfile.open(infile)\n\n # -- Set the model year string\n fyear = str(infile.split(\"/\")[-1].split(\".\")[0])\n print(\"Processing \" + fyear)\n\n # -- Get list of components to process\n comps = args.component\n\n # -- Atmospheric Fields\n modules = {\n \"atmos_month\": \"Atmos\",\n \"atmos_co2_month\": \"Atmos\",\n \"atmos_month_aer\": \"AtmosAer\",\n \"aerosol_month_cmip\": \"AeroCMIP\",\n }\n if any(comp in comps for comp in [\"atmos\", \"all\"]):\n averagers.cubesphere.xr_average(fyear, tar, modules)\n\n # -- Land Fields\n modules = {\"land_month\": \"Land\"}\n if any(comp in comps for comp in [\"land\", \"all\"]):\n averagers.land_lm4.xr_average(fyear, tar, modules)\n\n # -- Ice\n modules = {\"ice_month\": \"Ice\"}\n if any(comp in comps for comp in [\"ice\", \"all\"]):\n averagers.ice.xr_average(fyear, tar, modules)\n\n # -- Ocean\n fname = f\"{fyear}.ocean_scalar_annual.nc\"\n if any(comp in comps for comp in [\"ocean\", \"all\"]):\n if tar_member_exists(tar, fname):\n print(f\"{fyear}.ocean_scalar_annual.nc\")\n fdata = nctools.extract_from_tar(tar, fname, ncfile=True)\n extract_ocean_scalar.mom6(fdata, fyear, \"./\")\n fdata.close()\n\n # -- OBGC\n modules = {\n \"ocean_cobalt_sfc\": \"OBGC\",\n \"ocean_cobalt_misc\": \"OBGC\",\n \"ocean_cobalt_tracers_year\": \"OBGC\",\n \"ocean_cobalt_tracers_int\": \"OBGC\",\n \"ocean_bling\": \"OBGC\",\n \"ocean_bling_cmip6_omip_2d\": \"OBGC\",\n \"ocean_bling_cmip6_omip_rates_year_z\": \"OBGC\",\n \"ocean_bling_cmip6_omip_sfc\": \"OBGC\",\n \"ocean_bling_cmip6_omip_tracers_month_z\": \"OBGC\",\n \"ocean_bling_cmip6_omip_tracers_year_z\": \"OBGC\",\n }\n if any(comp in comps for comp in [\"obgc\", \"all\"]):\n averagers.tripolar.xr_average(fyear, tar, modules)\n\n # -- AMOC\n if any(comp in comps for comp in [\"amoc\", \"all\"]):\n diags.amoc.mom6_amoc(fyear, tar)\n\n # -- Close out the tarfile handle\n tar.close()\n\n # -- Do performance timing\n #try:\n # infile = infile.replace(\"/history/\", \"/ascii/\")\n # infile = infile.replace(\".nc.tar\", \".ascii_out.tar\")\n # label = \"Timing\"\n # if os.path.exists(infile):\n # diags.fms.timing(infile, fyear, \"./\", label)\n #except RuntimeError:\n # pass", "def main(unused_argv):\n\n # Read the scene file.\n with open(FLAGS.scene_path, 'r') as file_id:\n scenes = json.load(file_id)\n\n # Read the synonyms file.\n with open(FLAGS.synonym_path, 'r') as file_id:\n synonyms = json.load(file_id)\n sorter = lambda x: len(x[0].split(' '))\n\n # Read the metainformation file.\n with open(FLAGS.metainfo_path, 'r') as file_id:\n gvars.METAINFO = json.load(file_id)\n tag_inv_map = {attr: tag for tag, attr in gvars.METAINFO['tag_map'].items()\n if tag != '<P>'}\n gvars.METAINFO['tag_inv_map'] = tag_inv_map\n gvars.METAINFO['synonym_keys'] = sorted(synonyms.items(),\n key=sorter, reverse=True)\n\n # Add ids to objects.\n scenes = utils.add_object_ids(scenes)\n scenes = utils.clean_object_attributes(scenes)\n\n # Read the caption templates.\n template_paths = os.listdir(FLAGS.caption_template_root)\n cap_templates = []\n for ii in template_paths:\n with open(os.path.join(FLAGS.caption_template_root, ii), 'r') as file_id:\n cur_templates = json.load(file_id)\n cap_templates.extend(cur_templates)\n #utils.pretty_print_templates(cap_templates, 1)\n\n # Read the question templates.\n template_paths = os.listdir(FLAGS.question_template_root)\n ques_templates = []\n for ii in template_paths:\n with open(os.path.join(FLAGS.question_template_root, ii), 'r') as file_id:\n cur_templates = json.load(file_id)\n ques_templates.extend(cur_templates)\n #utils.pretty_print_templates(ques_templates, 1)\n\n # 1. Check if there a scene_id_file specified.\n # 2. Check if num_images is -1\n if FLAGS.scene_id_file != '':\n with open(FLAGS.scene_id_file, 'r') as file_id:\n missing_ids = [int(ii.strip('\\n')) for ii in file_id.readlines()]\n print('Dialogs missing for scenes: %d' % len(missing_ids))\n\n # Create a image_index -> scenes list index dictionary\n image_list_id_dict = {ii['image_index']: index\n for index, ii in enumerate(scenes['scenes'])}\n scenes_subset = [scenes['scenes'][image_list_id_dict[scene_id]]\n for scene_id in missing_ids]\n\n elif FLAGS.num_images == -1:\n scenes_subset = scenes['scenes']\n\n else:\n scenes_subset = scenes['scenes'][0: FLAGS.num_images]\n\n # BFS for each scene.\n if FLAGS.num_workers == 1:\n # Single thread version.\n dialogs = []\n for index, scene in enumerate(scenes_subset):\n cur_time = time.strftime('%a-%d%b%y-%X', time.gmtime())\n print('Generating [ %s ] [ Worker: %d, Progress: %d/%d Scene: %d ]' %\\\n (cur_time, 0, index, len(scenes_subset), scene['image_index']))\n gen_dialog = generate_dialog_bfs(scene, cap_templates, ques_templates)\n dialogs.append(gen_dialog)\n\n else:\n # Multithread version.\n output_q = multiprocessing.Queue()\n jobs = []\n for worker_id in range(FLAGS.num_workers):\n allotment = scenes_subset[worker_id::FLAGS.num_workers]\n inputs = (allotment, cap_templates, ques_templates)\n inputs += (worker_id, output_q)\n\n process = multiprocessing.Process(target=worker, args=inputs)\n jobs.append(process)\n process.start()\n\n # Wait for all the jobs to finish and collect the output.\n final_results = {}\n for _ in jobs:\n final_results.update(output_q.get())\n for job in jobs:\n job.join()\n\n # Flatten and sort.\n final_results = [jj for _, ii in final_results.items() for jj in ii]\n dialogs = sorted(final_results, key=lambda x: x['image_index'])\n # utils.pretty_print_dialogs(dialogs)\n\n # Save the dialogs.\n print('Saving dialog at: %s' % FLAGS.save_path)\n with open(FLAGS.save_path, 'w') as file_id:\n json.dump(dialogs, file_id)", "def process(self):\n # Opening and preprocessing of the input file\n if self.options.mbtiles_fromdisk or self.options.mbtiles_todisk:\n if self.options.mbtiles_fromdisk:\n i_parm=10\n if self.options.mbtiles_todisk:\n i_parm=11\n if self.options.verbose:\n print \"GDAL2MbTiles :mbtiles from/to disk [\",i_parm,\"] mbtiles_fromdisk[\",self.options.mbtiles_fromdisk,\"] mbtiles_todisk[\",self.options.mbtiles_todisk,\"]\"\n self.mbtiles_setup(i_parm)\n return\n else:\n if self.options.verbose:\n print \"GDAL2MbTiles :tile creation mbtiles[\",self.options.mbtiles,\"]\"\n self.open_input()\n # Generation of main metadata files and HTML viewers\n self.generate_metadata()\n # Generation of the lowest tiles\n self.generate_base_tiles()\n # Generation of the overview tiles (higher in the pyramid)\n self.generate_overview_tiles()\n # Generating of KML\n self.generate_kml()", "def phase_two_data():\n from pathlib import Path\n try:\n import cPickle as pickle\n except ImportError:\n import pickle\n \n from annotation import parse_fulltext\n from features import ALL_FEATURES\n \n from feature_template import apply_templates\n from feature_selection import filter_by_frequency\n from feature_encoding import encode\n\n # Feature templates considered if heading by 1:\n # ----------------------------\n # Position + Voice\n # Path length + Clause layer\n # 1 Predicate + Path\n # Path + Position + Voice\n # Path + Position + Voice + Predicate\n # 1 Head word stem + Predicate\n # 1 Head word stem + Predicate + Path\n # 1 Head word stem + Phrase\n # Clause layer + Position + Predicate\n templates = [tuple([f.name]) for f in ALL_FEATURES] + \\\n [('path_to_frame', 'frame'), ('head_stem', 'frame'), ('head_stem', 'frame', 'path_to_frame'), ('head_stem', 'phrase_type')]\n \n size = 40\n instances = []\n for i, p in enumerate(Path(\"/cs/fs2/home/hxiao/Downloads/fndata-1.5/fulltext/\").glob(\"*.xml\")):\n if i == size:\n break\n sys.stderr.write(\"Processing file: '%s'\\n\" %p.absolute())\n annotations = parse_fulltext(str(p.absolute()))\n instances += make_training_data(ALL_FEATURES, annotations)\n\n sys.stderr.write(\"Feature selection...\\n\")\n x, y = zip(*instances)\n x = apply_templates(x, templates)\n features = filter_by_frequency(x, 5)\n sys.stderr.write(\"Feature encoding...\\n\")\n x, feature_map = encode(x, features)\n \n sys.stderr.write(\"Dumping data...\\n\") \n pickle.dump((x, y, ALL_FEATURES, templates, feature_map), open('dump/test_data.pkl', 'w'))\n import pdb\n pdb.set_trace()\n print len(instances)", "def run_pipeline(directory):\n\n # io = IO(path)\n # df = io.load_cleaned_file(download_always=False)\n # df = add_choke_events(df)\n\n # Add calls to features.Xxx here\n\n #directory = main_directory\n site=os.listdir(directory)\n site_dicom={}\n site_dicom_sub={}\n site_sub_files={}\n i,k,j=0,0,0\n for filename in site:\n site_dicom[i]=directory+'/'+filename+'/DICOM-raw'\n temporary_path=os.listdir(site_dicom[i])\n\n for another_file in temporary_path:\n site_dicom_sub[j]=site_dicom[i]+'/'+another_file+'/scans'\n temporary_path_1 = os.listdir(site_dicom_sub[j])\n for another_file_1 in temporary_path_1:\n site_sub_files[k]=site_dicom_sub[j]+'/'+another_file_1+'/'\n k=k+1\n j = j + 1\n i=i+1\n splitted={}\n output_mif={}\n for i in range (len(site_sub_files)):\n splitted[i]=site_sub_files[i].split('/')\n output_mif[i]=directory+'/'+splitted[i][5]+'/MIF-raw/'+splitted[i][5]+'_'+splitted[i][7]+'_'+splitted[i][9]+'.mif'\n\n\n # save (or return) dataframe here?\n return site_sub_files,output_mif", "def process_info(args):\n fname, opts = args\n \n with open(fname, 'r') as f:\n ann = json.load(f)\n f.close()\n examples = []\n skipped_instances = 0\n\n for instance in ann:\n components = instance['components']\n\n if 'class_filter'in opts.keys() and instance['label'] not in opts['class_filter']:\n continue\n \n candidates = [c for c in components if len(c['poly']) >= opts['min_poly_len']]\n\n if 'sub_th' in opts.keys():\n total_area = np.sum([c['area'] for c in candidates])\n candidates = [c for c in candidates if c['area'] > opts['sub_th']*total_area]\n\n candidates = [c for c in candidates if c['area'] >= opts['min_area']]\n\n if opts['skip_multicomponent'] and len(candidates) > 1:\n skipped_instances += 1\n continue\n\n instance['components'] = candidates\n if candidates:\n examples.append(instance)\n\n return examples, skipped_instances", "def __init__(self, inifile, dry_run, output):\n\n config = ConfigParser()\n config.read(inifile)\n sequence = config['dithersequence']\n\n # Set up the output.\n self._output = output\n\n # Set up the file type and exposure sequence.\n self._location = sequence['location']\n self._filetype = sequence['filetype']\n self._date = sequence['date']\n self._exposures = [int(e) for e in sequence['exposures'].split()]\n\n if 'coordinates' not in config:\n raise ValueError('no coordinates set for dither!')\n \n coords = config['coordinates']\n self._dithertype = coords['dithertype']\n \n self._wcs = fits.getdata(coords['wcsfile'], 2)\n self._wcs = self._wcs[np.argsort(self._wcs['mjd_obs'])]\n self._central_exposure = int(sequence['centralexposure'])\n\n if coords['dithertype'] == 'telescope':\n fadir = coords['fiberassigndir']\n self._ditherfa = fits.getdata(os.path.join(\n fadir, 'fiberassign-%s.fits' % coords['ditheredtilenum']))\n self._unditherfa = fits.getdata(os.path.join(\n fadir, 'fiberassign-%s.fits' % coords['unditheredtilenum']))\n expnum = [int(fn.split('-')[1]) for fn in self._wcs['filename']]\n centralind = expnum.index(self._central_exposure)\n self._central_wcs = self._wcs[centralind]\n\n # Set the Tile ID for the output metadata.\n self._tileid = coords['unditheredtilenum']\n else:\n raise ValueError('not implemented')\n\n # Extract the list of exposures on disk.\n self._exposure_files = self._getfilenames()\n\n if not dry_run:\n # Construct fiber output.\n self._exposure_table = self._buildtable()", "def lect_ca(dir_cat,SourceRA,SourceDec,SourceROI,distmin,name,outputfile,namesource):\n\tprint \"SourceROI=\"+str(SourceROI)\n\tfcal= open(outputfile,\"w\");\n\t\n\t#fcat=pyfits.open(dir_cat)\n\n\tdonnees=pyfits.getdata(dir_cat,1)\n\tnames=donnees.field(name_col)\n\tprint names\n\tra=donnees.field(ra_col)\n\tdec=donnees.field(dec_col)\n\tind=donnees.field(ind_col)\n\tcour=donnees.field(cour_col)\n\tEpiv=donnees.field(Epivot_col)\t\n\tInteg=donnees.field(Inte_col)\n\tPrefact=donnees.field(pref_col)\n\tErr_prefact=donnees.field(err_pref_col)\n\tvariabilite=donnees.field(varia_cut)\n\n #Loop on the LAT Catalog\r\n\tfor p in range(0,len(names)-1):\n\t\t\t#Calcul de la distance angulaire separant la source p et la source que l'on etudie\r\n\t\tdist = (180./3.14159)*acos(cos(3.14159/2. - float(SourceDec)*3.14159/180.)*cos(3.14159/2.- float(dec[p])*3.14159/180.)+sin(3.14159/2. - float(SourceDec)*3.14159/180.)*sin(3.14159/2.- float(dec[p])*3.14159/180.)*cos((float(SourceRA) - float(ra[p]))*3.14159/180.))\n\t\t\t\n\t\t\t#These Marie-Helene Grondin p.\n\t\t#nom=names[p].split(\" \")\n\t\t#names[p]=nom[0]+\"_\"+nom[1]\n\n\n#\t\tif (dist< float(SourceROI)):\n#\t\t\tprint str(dist)+str(names[p])\r \r\n\t\tif (dist < float(SourceROI) and dist > float(distmin) ): #SI la source est dans la region d'interet mais n'est pas confondu avec la source elle meme. 0.2 est subjectif, compromis qui marche dans la pluspart des cas mais pas dans tous. Faire attention parfois il faut remplacer ce 0.2 par 0.3\r\n\t\t\tif float(cour[p]!=\"NULL\") and float(Integ[p]) > 1e-8 and dist < 5: #cut-off\r\n\t\t\t\tprint \"curvature = \", float(cour[p])\r\n\t\t\t\ttxt = str(names[p])+\" \"+str(ra[p])+\" \"+str(dec[p])+\" \"+str(Integ[p])+\" \"+str(ind[p])+\" \"+str(Prefact[p])+\" \"+str(Epiv[p])+\" 1\"\n#\t\t\t\tif variabilite[p]>varia_seuil:\n txt+=\" 1 \"+str(dist)\n#\t\t\t\telse :\n#\t\t\t txt+=\" 0 \"+str(dist)\r\n\t\t\telse: #no cut off with flux level high enough\r\n\t\t\t\ttxt = str(names[p])+\" \"+str(ra[p])+\" \"+str(dec[p])+\" \"+str(Integ[p])+\" \"+str(ind[p])+\" \"+str(Prefact[p])+\" \"+str(Epiv[p])+\" 0\"\n#\t\t\t\tif variabilite[p]>varia_seuil:\n\t\t\t \ttxt+=\" 1 \"+str(dist)\n#\t\t\t\telse :\n#\t\t\t\t\ttxt+=\" 0 \"+str(dist)\n\t\t\tfcal.write(txt)\r\n\t\t\tfcal.write(\"\\n\")\n\t\telif (dist < float(raymax) and names[p]!=namesource) :\n\n\t\t\tif float(cour[p]) > 11.34 and float(Integ[p]) > 1e-8 and dist < 5: #cut-off\n\t\t\t\tprint \"curvature = \", float(cour[p])\r\n\t\t\t\ttxt = str(names[p])+\" \"+str(ra[p])+\" \"+str(dec[p])+\" \"+str(Integ[p])+\" \"+str(ind[p])+\" \"+str(Prefact[p])+\" \"+str(Epiv[p])+\" 1 2 \"+str(dist)\n\r\n\t\t\telse: #no cut off with flux level high enough\r\n\t\t\t\ttxt = str(names[p])+\" \"+str(ra[p])+\" \"+str(dec[p])+\" \"+str(Integ[p])+\" \"+str(ind[p])+\" \"+str(Prefact[p])+\" \"+str(Epiv[p])+\" 0 2 \"+str(dist) \r\n\t\t\tfcal.write(txt)\r\n\t\t\tfcal.write(\"\\n\")\n\t#On ajoute l'objet de l'etude\r\n\tpwn = name\r\n\ttxt = str(pwn)+\" \"+str(SourceRA)+\" \"+str(SourceDec)+\" 1e-10 2 0\"\r\n\tfcal.write(txt)\r\n\tfcal.write(\"\\n\")\n\n\tfcal.close()", "def main():\r\n option_parser, opts, args = parse_command_line_parameters(**script_info)\r\n\r\n data = {}\r\n\r\n fasta_file = opts.input_fasta_fp\r\n\r\n # load the input alignment\r\n data['aln'] = SequenceCollection.from_fasta_records(\r\n parse_fasta(open(fasta_file)), DNA)\r\n\r\n # Load the otu file\r\n otu_path = opts.otu_map_fp\r\n otu_f = open(otu_path, 'U')\r\n otus = fields_to_dict(otu_f)\r\n otu_f.close()\r\n\r\n data['otus'] = otus\r\n # Determine which which samples to extract from representative seqs\r\n # and from otus file\r\n if opts.samples_to_extract:\r\n prefs = process_extract_samples(opts.samples_to_extract)\r\n\r\n filepath = opts.input_fasta_fp\r\n filename = filepath.strip().split('/')[-1]\r\n filename = filename.split('.')[0]\r\n\r\n if opts.output_dir:\r\n if os.path.exists(opts.output_dir):\r\n dir_path = opts.output_dir\r\n else:\r\n try:\r\n os.mkdir(opts.output_dir)\r\n dir_path = opts.output_dir\r\n except OSError:\r\n pass\r\n else:\r\n dir_path = './'\r\n\r\n try:\r\n action = filter_samples\r\n except NameError:\r\n action = None\r\n # Place this outside try/except so we don't mask NameError in action\r\n if action:\r\n action(prefs, data, dir_path, filename)", "def make_segmap(f, overwrite=True):\n \n # Make segmaps for each SCI extension\n for i in [1,4]:\n # See if segmap already exists\n outfile = f.replace('.fits', '_seg_ext_{}.fits'.format(i))\n if (os.path.exists(outfile)) & (overwrite is False):\n pass\n\n else:\n # Get the data\n data = fits.getdata(f,i)\n \n # Detector sources; Make segmap\n threshold = detect_threshold(data, snr=1.0)\n sigma = 3.0 * gaussian_fwhm_to_sigma # FWHM = 3.\n kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3)\n kernel.normalize()\n segm = detect_sources(data, threshold, npixels=3, filter_kernel=kernel)\n fits.writeto(outfile, segm.data, overwrite=overwrite)", "def map_func(h, configs, args):\n\tif args.verbose:\n\t\tcmd = \"python {} -i {}/threshold{}.tif -o {}/threshold{}.shp -v\".format(\n\t\t\tconfigs[\"path\"][\"polygons\"],\n\t\t\tconfigs[\"path\"][\"output\"],\n\t\t\th,\n\t\t\tconfigs[\"path\"][\"output\"],\n\t\t\th\n\t\t)\n\t\tprint cmd\n\telse:\n\t\tcmd = \"python {} -i {}/threshold{}.tif -o {}/threshold{}.shp\".format(\n\t\t\tconfigs[\"path\"][\"polygons\"],\n\t\t\tconfigs[\"path\"][\"output\"],\n\t\t\th,\n\t\t\tconfigs[\"path\"][\"output\"],\n\t\t\th\n\t\t)\n\tcmd_args = shlex.split(cmd)\n\tstdout,stderr = sp.Popen(\n\t\tcmd_args,\n\t\tstdin = sp.PIPE,\n\t\tstdout = sp.PIPE,\n\t\tstderr = sp.PIPE\n\t).communicate()\n\tif args.verbose:\n\t\tprint stdout, stderr\n\treturn True", "def source_positions(self, dist_cutoff=None):\n \n crds = {}\n for fn in self.srclists:\n \n print(\"OM::source_positions -- checking: \",fn) \n ff = pyfits.open(fn)\n idf = fn[-20:-14]\n obsid = ff[0].header[\"OBS_ID\"]\n print(obsid, idf)\n try:\n ra, dec = ff[1].data[\"RA\"], ff[1].data[\"DEC\"]\n except:\n print(fn,\" does not contain RA, Dec coordinates.\")\n continue\n rate = ff[1].data[\"CORR_RATE\"]\n coords = C.SkyCoord(ra, dec, unit=(\"deg\", \"deg\"), frame='icrs')\n tc = []\n ci = []\n for i, c in enumerate(coords):\n #print(c)\n if len(tc) == 0: \n tc.append(c.to_string(\"hmsdms\",sep=':', precision=2, pad=True))\n ci.append(i)\n continue\n #print(c, tc, sc)\n dist = c.separation(coords[ci]).arcsec\n #print(\"dist: \",dist)\n gi = np.where(dist>2)[0]\n for j in gi:\n tc.append(c.to_string(\"hmsdms\",sep=':', precision=2, pad=True))\n #ci.append(j)\n if len(tc)>0:\n crds[idf] = tc\n #else:\n if len(crds) == 0:\n return None\n return {obsid:crds}", "def test_extractors(gtf_simple):\n tssp = get(\"tss\")(gtf_simple)\n assert np.all(tssp.position == [100, 250])\n assert np.all(tssp.strand == [\"+\", \"-\"])\n\n pa = get(\"polya\")(gtf_simple)\n assert np.all(pa.position == [200, 150])\n assert np.all(pa.strand == [\"+\", \"-\"])\n\n ie = get(\"intron_exon\")(gtf_simple)\n # only one as we need to throw it out (due to transcript...)\n assert np.all(ie.position == [400])\n assert np.all(ie.strand == [\"-\"])\n\n ei = get(\"exon_intron\")(gtf_simple)\n assert np.all(ei.position == [300])\n assert np.all(ei.strand == [\"-\"])", "def _generate_examples(self, archive):\n\n for fname, fobj in archive:\n image_dir, image_file = os.path.split(fname)\n d = os.path.basename(image_dir)\n record = {'image': fobj, 'label': d}\n yield \"%s/%s\" % (image_file, d), record", "def process(self, source, dest):\n\n if os.path.isfile(dest):\n print(\"File %s exists -> aborting\" % dest)\n exit(1)\n print(dest)\n \n fin = open(source)\n fout = open(dest, 'w')\n for l in fin.readlines():\n l = l.replace(\"AUTHOR\", self.author)\n l = l.replace(\"DESCRIPTION\", self.description)\n l = l.replace(\"NAMESPACE\", self.namespace)\n l = l.replace(\"MyComponent\", self.className)\n l = l.replace(\"INCDIR\", self.hDir)\n l = l.replace(\"CXXDIR\", self.cxxDir)\n l = l.replace(\"YEAR\", str(self.now.year))\n l = l.replace(\"DATE\", \"%d %s %d\" % (self.now.day, self.now.strftime(\"%b\"), self.now.year))\n fout.write(l)\n fout.close()\n fin.close()", "def _load_sources(self):\n self.point_sources= []\n if os.path.exists(os.path.join(self.folder,'pickle.zip')):\n pzip = zipfile.ZipFile(os.path.join(self.folder,'pickle.zip'))\n files = ['pickle/HP12_%04d.pickle' %i for i in range(1728)]\n assert all(f in pzip.namelist() for f in files), 'Improper model zip file'\n opener = pzip.open\n else:\n files = glob.glob(os.path.join(self.folder, 'pickle', '*.pickle'))\n files.sort()\n opener = open\n self.nside = int(np.sqrt(len(files)/12))\n if len(files) != 12*self.nside**2:\n msg = 'Number of pickled ROI files, %d, found in folder %s, not consistent with HEALpix' \\\n % (len(files),os.path.join(self.folder, 'pickle'))\n raise Exception(msg)\n \n ####self.global_sources = sources.GlobalSourceList() # allocate list to index parameters for global sources\n self.extended_sources=[] # list of unique extended sources\n self.changed=set() # to keep track of extended models that are different from catalog\n moved=0\n nfreed = 0\n self.tagged=set()\n source_names =[]\n for i,file in enumerate(files):\n p = pickle.load(opener(file))\n index = int(os.path.splitext(file)[0][-4:])\n assert i==index, 'logic error: file name %s inconsistent with expected index %d' % (file, i)\n roi_sources = p.get('sources', {}) # don't know why this needed\n extended_names = {} if (self.__dict__.get('extended_catalog') is None) else self.extended_catalog.names\n for key,item in roi_sources.items():\n if key in extended_names: continue\n if key in source_names:\n #if not self.quiet: print ('SkyModel warning: source with name %s in ROI %d duplicates previous entry: ignored'%(key, i))\n continue\n source_names.append(key)\n skydir = item['skydir']\n if self.update_positions is not None:\n ellipse = item.get('ellipse', None)\n ts = item['ts']\n if ellipse is not None and not np.any(np.isnan(ellipse)) :\n fit_ra, fit_dec, a, b, ang, qual, delta_ts = ellipse\n if qual<5 and a < 0.2 and \\\n ts>self.update_positions and delta_ts>0.1:\n skydir = SkyDir(float(fit_ra),float(fit_dec))\n moved +=1\n self.tagged.add(i)\n \n ps = sources.PointSource(name=key,\n skydir=skydir, model= sources.convert_model(item['model']),\n ts=item['ts'],band_ts=item['band_ts'], index=index)\n if sources.validate(ps,self.nside, self.filter):\n self._check_position(ps) # check that it is not coincident with previous source(warning for now?)\n self.point_sources.append( ps)\n # make a list of extended sources used in the model \n names = p.get('diffuse_names')\n for name, oldmodel in zip(names, p['diffuse']):\n model = sources.convert_model(oldmodel) # convert from old Model version if necessary \n key = name.split('_')[0]\n if key in self.diffuse_dict:\n self.diffuse_dict.add_model(index, name, model)\n elif self.extended_catalog_name=='ignore': \n continue\n else:\n try:\n es = self.extended_catalog.lookup(name) if self.extended_catalog is not None else None\n except Exception as msg:\n print ('Skymodel: Failed to create model for %s' %name)\n raise\n if es is None:\n #raise Exception( 'Extended source %s not found in extended catalog' %name)\n print ('SkyModel warning: Extended source %s not found in extended catalog, removing' %name)\n continue\n if self.hpindex(es.skydir)!=index: continue\n \n if es.model.name!=model.name:\n if name not in self.changed:\n if not self.quiet: print ('SkyModel warning: catalog model %s changed from %s for source %s: keeping change'%\\\n (es.model.name, model.name, name))\n self.changed.add(name)\n es.smodel=es.model=model #update with current fit values always\n if sources.validate(es,self.nside, self.filter): #lambda x: True): \n self.extended_sources.append(es)\n # check for new extended sources not yet in model\n self._check_for_extended()\n if self.update_positions and moved>0:\n print ('updated positions of %d sources, healpix ids in tagged' % moved)", "def feature_extraction(args, what_roi):\n print(\"Start feature extraction...\")\n descr_dict = {}\n file_list = []\n for root, dirs, files in os.walk(args.imdir):\n for n, filename in enumerate(files):\n if n == 0:\n print(\"\\nCurrent directory: %s\" % root)\n filepath = os.path.join(root, filename)\n img = cv2.imread(filepath, cv2.IMREAD_COLOR)\n\n if type(img) != np.ndarray:\n sys.stderr.write(\"--> %d File %s is not an img: will be skipped.\\n\" % (n, filename))\n continue\n else:\n print(\"%d Image %s is being processed...\" % (n, filename))\n file_list.append(filename)\n\n img = apply_roi(args, what_roi, img)\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n ret, thresh = cv2.threshold(img_gray, 170, 255, cv2.THRESH_TRUNC)\n des = surf_keypoint_detection(thresh)\n descr_dict[filename] = des\n\n print(\"\\nAll images have been processed.\")\n return descr_dict, file_list", "def get_matches(self, file_map) -> dict:\r\n get_file_dict = {}\r\n match_dict = {\r\n 'GIF': re.findall(b'(?s)(\\x47\\x49\\x46\\x38\\x39\\x61.{80})', file_map),\r\n 'RTF': re.findall(b'(?s)(.{20}\\x35\\x30\\x34\\x65\\x34\\x37\\x30.{80}|.{20}\\x66\\x66\\x64\\x38\\x66\\x66.{80}|'\r\n b'.{20}\\x66\\x66\\x64\\x38\\x66\\x66\\x65\\x30\\x30\\x30\\x31\\x30.{80})', file_map),\r\n }\r\n if self.jpgsos:\r\n match_dict['JPG_SOS'] = jpg_sos(file_map)\r\n elif self.sof2sos:\r\n match_dict['JPG_SOF2SOS'] = jpg_sof2sos(file_map)\r\n elif self.jump:\r\n match_dict['JPG_JUMP'] = jpg_jump(file_map)\r\n else:\r\n match_dict['JPG'] = re.findall(b'(?s)(\\xff\\xd8\\xff\\xe0\\x00\\x10.{80})', file_map)\r\n match_dict['JPG2'] = re.findall(b'(?s)(\\xff\\xd8\\xff.{80})', file_map)\r\n if self.idat:\r\n match_dict['PNG_IDAT'] = idat(file_map)\r\n else:\r\n match_dict['PNG'] = re.findall(b'(?s)(\\x89\\x50\\x4e\\x47.{82})', file_map)\r\n m = re.match(br'^(?P<magic_beans>\\x49\\x49\\x2a\\x00[^\\x00\\x00]{2}.{80})',file_map,re.S)\r\n if m:\r\n match_dict['TIF'] = [m.group('magic_beans')]\r\n for file_type, regex_match in match_dict.items():\r\n if len(regex_match) > 0:\r\n get_file_dict[file_type] = regex_match\r\n return get_file_dict", "def generate_siaf_pre_flight_reference_files_fgs(verbose=False, mode='siaf'):\n instrument = 'FGS'\n\n center_offset_x = 1023.5\n center_offset_y = 1023.5\n\n # hardcoded pixelscale, reference?\n scale = 0.06738281367 # arcsec/pixel\n\n if mode == 'siaf':\n # write focal plane alignment reference file\n outfile = os.path.join(JWST_SOURCE_DATA_ROOT, instrument, '{}_siaf_alignment.txt'.format(instrument.lower()))\n oss_flags = [False, True]\n elif mode == 'fsw':\n outfile = os.path.join(JWST_SOURCE_DATA_ROOT, instrument, '{}_fsw_coefficients.txt'.format(instrument.lower()))\n oss_flags = [True]\n\n if os.path.isfile(outfile):\n os.remove(outfile)\n\n siaf_alignment = None\n counter = 0\n\n for aperture_id in 'FGS1 FGS2'.split():\n\n if aperture_id == 'FGS1':\n V2Ref = 207.1900\n V3Ref = -697.5000\n\n # coefficients copied from Cox' makeSIAF.py to reproduce PRDOPSSOC-H-015\n # February 2015 FGS delivery\n # these numbers match the `To be Updated for CV3` column in the Tables on page 6ff\n # of an unpublished word document entitled `FGS Transformation for CV3.docx` by\n # Julia Zhou, e.g. C = IDEALPTOREALPXCOE_N\n\n # Initialize the parameters\n A = np.array(\n [-2.33369320E+01, 9.98690490E-01, 1.05024970E-02, 2.69889020E-06, 6.74362640E-06,\n 9.91415010E-07, 1.21090320E-09, -2.84802930E-11, 1.27892930E-09, -1.91322470E-11,\n 5.34567520E-14, 9.29791010E-14, 8.27060020E-14, 9.70576590E-14, 1.94203870E-14])\n\n B = np.array(\n [-2.70337440E+01, -2.54596080E-03, 1.01166810E+00, 2.46371870E-06, 2.08880620E-06,\n 9.32489680E-06, -4.11885660E-11, 1.26383770E-09, -7.60173360E-11, 1.36525900E-09,\n 2.70499280E-14, 5.70198270E-14, 1.43943080E-13, 7.02321790E-14, 1.21579450E-13])\n\n C = np.array(\n [2.31013520E+01, 1.00091800E+00, -1.06389620E-02, -2.65680980E-06, -6.51704610E-06,\n -7.45631440E-07, -1.29600400E-09, -4.27453220E-12, -1.27808870E-09, 5.01165140E-12,\n 2.72622090E-15, 5.42715750E-15, 3.46979980E-15, 2.49124350E-15, 1.22848570E-15])\n\n D = np.array(\n [2.67853100E+01, 2.26545910E-03, 9.87816850E-01, -2.35598140E-06, -1.91455620E-06,\n -8.92779540E-06, -3.24201520E-11, -1.30056630E-09, -1.73730700E-11,\n -1.27341590E-09, 1.84205730E-15, 3.13647160E-15, -2.99705840E-16, 1.98589690E-15,\n -1.26523200E-15])\n\n elif aperture_id == 'FGS2':\n V2Ref = 24.4300\n V3Ref = -697.5000\n\n A = np.array(\n [-3.28410900E+01, 1.03455010E+00, 2.11920160E-02, -9.08746430E-06, -1.43516480E-05,\n -3.93814140E-06, 1.60956450E-09, 5.82814640E-10, 2.02870570E-09, 2.08582470E-10,\n -2.79748590E-14, -8.11622820E-14, -4.76943000E-14, -9.01937740E-14,\n -8.76203780E-15])\n\n B = np.array(\n [-7.76806220E+01, 2.92234710E-02, 1.07790000E+00, -6.31144890E-06, -7.87266390E-06,\n -2.14170580E-05, 2.13293560E-10, 2.03376270E-09, 6.74607790E-10, 2.41463060E-09,\n -2.30267730E-14, -3.63681270E-14, -1.35117660E-13, -4.22207660E-14,\n -1.16201020E-13])\n\n C = np.array(\n [3.03390890E+01, 9.68539030E-01, -1.82288450E-02, 7.72758330E-06, 1.17536430E-05,\n 2.71516870E-06, -1.28167820E-09, -6.34376120E-12, -1.24563160E-09, -9.26192040E-12,\n 8.14604260E-16, -5.93798790E-16, -2.69247540E-15, -4.05196100E-15, 2.14529600E-15])\n\n D = np.array(\n [7.13783150E+01, -2.55191710E-02, 9.30941560E-01, 5.01322910E-06, 5.10548510E-06,\n 1.68083960E-05, 9.41565630E-12, -1.29749490E-09, -1.89194230E-11, -1.29425530E-09,\n -2.81501600E-15, -1.73025000E-15, 2.57732600E-15, 1.75268080E-15, 2.95238320E-15])\n\n number_of_coefficients = len(A)\n polynomial_degree = int((np.sqrt(8 * number_of_coefficients + 1) - 3) / 2)\n\n # generate distortion coefficient files\n siaf_index = []\n exponent_x = []\n exponent_y = []\n for i in range(polynomial_degree + 1):\n for j in np.arange(i + 1):\n siaf_index.append('{:d}{:d}'.format(i, j))\n exponent_x.append(i-j)\n exponent_y.append(j)\n\n\n print('*'*100)\n aperture_name = '{}_FULL'.format(aperture_id)\n for oss in oss_flags:\n\n if oss:\n aperture_name = aperture_name + '_OSS'\n oss_factor = 1.\n else:\n oss_factor = -1.\n\n print('{}'.format(aperture_name))\n\n if mode == 'fsw':\n (AX, BX, CX, DX) = (A, B, C, D)\n\n AS = polynomial.shift_coefficients(AX, center_offset_x, center_offset_y)\n BS = polynomial.shift_coefficients(BX, center_offset_x, center_offset_y)\n\n AS0 = copy.deepcopy(AS[0])\n BS0 = copy.deepcopy(BS[0])\n AS[0] = 0.0\n BS[0] = 0.0\n\n betaY = np.arctan2(AS[2], BS[2])\n print('Beta Y', np.degrees(betaY))\n print('Shift zeros', AS0, BS0)\n\n AR = AS * np.cos(betaY) - BS * np.sin(betaY)\n BR = AS * np.sin(betaY) + BS * np.cos(betaY)\n\n\n AR[0] = center_offset_x\n BR[0] = center_offset_y\n\n AF = polynomial.shift_coefficients(AR, -center_offset_x, -center_offset_y)\n BF = polynomial.shift_coefficients(BR, -center_offset_x, -center_offset_y)\n\n # Inverse matrices\n xc = polynomial.poly(AX, center_offset_x, center_offset_y)\n yc = polynomial.poly(BX, center_offset_x, center_offset_y)\n # CS1 = 1.0*C1 # Force a real copy\n CS = polynomial.shift_coefficients(CX, xc, yc)\n DS = polynomial.shift_coefficients(DX, xc, yc)\n CS0 = copy.deepcopy(CS[0])\n DS0 = copy.deepcopy(DS[0])\n\n CS[0] = 0.0\n DS[0] = 0.0\n CR = polynomial.prepend_rotation_to_polynomial(CS, np.degrees(betaY))\n DR = polynomial.prepend_rotation_to_polynomial(DS, np.degrees(betaY))\n CR[0] = CS0\n DR[0] = DS0\n CF = polynomial.shift_coefficients(CR, -center_offset_x, -center_offset_y)\n DF = polynomial.shift_coefficients(DR, -center_offset_x, -center_offset_y)\n distortion_reference_table = Table((siaf_index, exponent_x, exponent_y, AF, BF, CF, DF),\n names=('siaf_index', 'exponent_x', 'exponent_y', 'Sci2IdlX', 'Sci2IdlY', 'Idl2SciX','Idl2SciY'))\n\n V3angle = 0\n betaX = 0\n\n\n else:\n # Scale to arcsec\n (AX, BX, CX, DX) = polynomial.rescale(A, B, C, D, scale)\n\n\n V2c = polynomial.poly(AX, center_offset_x, center_offset_y)\n V3c = polynomial.poly(BX, center_offset_x, center_offset_y)\n\n AS = polynomial.shift_coefficients(AX, center_offset_x, center_offset_y)\n AS[0] = 0.0\n BS = polynomial.shift_coefficients(BX, center_offset_x, center_offset_y)\n BS[0] = 0.0\n CS = polynomial.shift_coefficients(CX, V2c, V3c)\n CS[0] = 0.0\n DS = polynomial.shift_coefficients(DX, V2c, V3c)\n DS[0] = 0.0\n\n if aperture_id == 'FGS1':\n if oss is False:\n AF = -polynomial.flip_x(polynomial.flip_y(AS))\n BF = -polynomial.flip_x(polynomial.flip_y(BS))\n CF = -polynomial.flip_x(polynomial.flip_y(CS))\n DF = -polynomial.flip_x(polynomial.flip_y(DS))\n else:\n AF = AS # For OSS detector and science are identical\n BF = -BS\n CF = polynomial.flip_y(CS)\n DF = polynomial.flip_y(DS)\n elif aperture_id == 'FGS2':\n if oss is False:\n AF = -polynomial.flip_x(AS)\n BF = polynomial.flip_x(BS)\n CF = -polynomial.flip_x(CS)\n DF = polynomial.flip_x(DS)\n else:\n AF = AS # For OSS detector and science are identical\n BF = BS\n CF = CS\n DF = DS\n\n betaX = np.arctan2(oss_factor * AF[1], BF[1])\n betaY = np.arctan2(oss_factor * AF[2], BF[2])\n\n V3angle = copy.deepcopy(betaY)\n if (abs(V3angle) > np.pi/2):\n V3angle = V3angle - np.copysign(np.pi, V3angle)\n\n (AR,BR) = polynomial.add_rotation(AF, BF, -1 * oss_factor * np.rad2deg(V3angle))\n\n # take out the rotation, carried separately in V3IdlYangle\n CR = polynomial.prepend_rotation_to_polynomial(CF, oss_factor * np.rad2deg(V3angle))\n DR = polynomial.prepend_rotation_to_polynomial(DF, oss_factor * np.rad2deg(V3angle))\n distortion_reference_table = Table((siaf_index, exponent_x, exponent_y, AR, BR, CR, DR),\n names=('siaf_index', 'exponent_x', 'exponent_y', 'Sci2IdlX', 'Sci2IdlY', 'Idl2SciX','Idl2SciY'))\n\n print('{} {}'.format(aperture_name, np.rad2deg(betaY)))\n # if aperture_name == 'FGS1_FULL': # first in loop\n if counter == 0: # first in loop\n siaf_alignment = Table()\n siaf_alignment['AperName'] = ['{:>30}'.format(aperture_name)]\n siaf_alignment['V3IdlYAngle'] = [np.rad2deg(V3angle)]\n siaf_alignment['V3SciXAngle'] = [np.rad2deg(betaX)]\n siaf_alignment['V3SciYAngle'] = [np.rad2deg(betaY)]\n siaf_alignment['V2Ref'] = [V2Ref]\n siaf_alignment['V3Ref'] = [V3Ref]\n else:\n siaf_alignment.add_row(['{:>30}'.format(aperture_name), np.rad2deg(V3angle), np.rad2deg(betaX), np.rad2deg(betaY), V2Ref, V3Ref])\n\n counter += 1\n\n\n distortion_reference_table.add_column(Column([aperture_name] * len(distortion_reference_table), name='AperName'), index=0)\n if mode == 'fsw':\n distortion_reference_file_name = os.path.join(JWST_SOURCE_DATA_ROOT, instrument, 'fgs_fsw_distortion_{}.txt'.format(aperture_name.lower()))\n else:\n distortion_reference_file_name = os.path.join(JWST_SOURCE_DATA_ROOT, instrument, 'fgs_siaf_distortion_{}.txt'.format(aperture_name.lower()))\n\n comments = []\n comments.append('FGS distortion reference file for SIAF\\n')\n comments.append('')\n comments.append('Based on coefficients delivered to STScI in February 2015.')\n comments.append('These parameters are stored in PRDOPSSOC-H-014.')\n comments.append('')\n comments.append('Generated {} {}'.format(timestamp.isot, timestamp.scale))\n comments.append('by {}'.format(username))\n comments.append('')\n distortion_reference_table.meta['comments'] = comments\n distortion_reference_table.write(distortion_reference_file_name, format='ascii.fixed_width', delimiter=',', delimiter_pad=' ', bookend=False, overwrite=True)\n\n comments = []\n comments.append('{} alignment parameter reference file for SIAF'.format(instrument))\n comments.append('')\n comments.append('This file contains the focal plane alignment parameters calibrated during FGS-SI alignment.')\n comments.append('')\n comments.append('Generated {} {}'.format(timestamp.isot, timestamp.scale))\n comments.append('by {}'.format(username))\n comments.append('')\n siaf_alignment.meta['comments'] = comments\n siaf_alignment.write(outfile, format='ascii.fixed_width', delimiter=',',\n delimiter_pad=' ', bookend=False, overwrite=True)", "def source_info(self,fname):\n\t\t# Has this source file already been parsed?\n\t\tif fname in self.src_info:\n\t\t\t# if yes return the previous parse-result\n\t\t\treturn self.src_info[fname]\n\t\t\n\t\t# Create a source file parse-info-container and ast-parse the sourcefile\n\t\tself.src_info[fname] = {}\n\t\tsrc_fp = open(fname,'rb')\n\t\tsrc = src_fp.read()\n\t\tsrc_fp.close()\n\t\tsrc_encoding = detect(src)\n\t\ta = ast.parse(src)\n\t\tdel src\n\t\tself.src_encoding[fname] = src_encoding['encoding']\n\t\t\n\t\t# Analyse the ast\n\t\tfor obj in a.body:\n\t\t\tif type(obj)==ast.ClassDef:\n\t\t\t\tc = obj\n\t\t\t\tfirstlineno = c.lineno\n\t\t\t\tlastlineno = c.lineno\n\t\t\t\tclass_doc_lines = []\n\t\t\t\tfirst_class_obj = True\n\t\t\t\tfor obj in c.body:\n\t\t\t\t\t# Detect documentation for class\n\t\t\t\t\tif first_class_obj and type(obj)==ast.Expr and type(obj.value)==ast.Str:\n\t\t\t\t\t\tfor doc_line in obj.value.s.strip().replace('\\r\\n','\\n').split('\\n'):\n\t\t\t\t\t\t\tclass_doc_lines += [doc_line.strip()]\n\t\t\t\t\t# Detect class methods\n\t\t\t\t\tif type(obj)==ast.FunctionDef:\n\t\t\t\t\t\tlastlineno = obj.lineno\n\t\t\t\t\tfirst_class_obj = False\n\t\t\t\tself.src_info[fname][c.name] = (firstlineno,lastlineno,class_doc_lines)\n\t\t\n\t\t# return the parse-info-container\n\t\treturn self.src_info[fname]", "def main():\n create_fc(\"layers/WAPR.lyr\",\n \"TravelerInfo.gdb/HighwayAlerts\")", "def process_source(pk_type, path, file_flag, out_path, skip_lines=None, encoding=\"utf-8\"):\n if file_flag:\n process_singles(pk_type, path, out_path, skip_lines, encoding)\n else:\n process_dirs(pk_type, path, out_path, skip_lines, encoding)", "def loadSourceCatalog(self, filename):\n sourceCat = afwTable.SourceCatalog.readFits(filename)\n aliasMap = sourceCat.schema.getAliasMap()\n aliasMap.set(\"slot_ApFlux\", \"base_PsfFlux\")\n instFluxKey = sourceCat.schema[\"slot_ApFlux_instFlux\"].asKey()\n instFluxErrKey = sourceCat.schema[\"slot_ApFlux_instFluxErr\"].asKey()\n\n # print(\"schema=\", sourceCat.schema)\n\n # Source x,y positions are ~ (500,1500) x (500,1500)\n centroidKey = sourceCat.table.getCentroidSlot().getMeasKey()\n for src in sourceCat:\n adjCentroid = src.get(centroidKey) - lsst.geom.Extent2D(500, 500)\n src.set(centroidKey, adjCentroid)\n src.set(instFluxKey, 1000)\n src.set(instFluxErrKey, 1)\n\n # Set catalog coord\n for src in sourceCat:\n src.updateCoord(self.wcs)\n return sourceCat", "def world_feature_extract(wav_list, args):\n # define feature extractor\n feature_extractor = FeatureExtractor(\n analyzer=\"world\",\n fs=args.fs,\n shiftms=args.shiftms,\n minf0=args.minf0,\n maxf0=args.maxf0,\n fftl=args.fftl)\n\n for i, wav_name in enumerate(wav_list):\n logging.info(\"now processing %s (%d/%d)\" % (wav_name, i + 1, len(wav_list)))\n\n # load wavfile and apply low cut filter\n fs, x = wavfile.read(wav_name)\n if x.dtype != np.int16:\n logging.warning(\"wav file format is not 16 bit PCM.\")\n x = np.array(x, dtype=np.float64)\n if args.highpass_cutoff != 0:\n x = low_cut_filter(x, fs, cutoff=args.highpass_cutoff)\n\n # check sampling frequency\n if not fs == args.fs:\n logging.error(\"sampling frequency is not matched.\")\n sys.exit(1)\n\n # extract features\n f0, _, _ = feature_extractor.analyze(x)\n uv, cont_f0 = convert_to_continuos_f0(f0)\n cont_f0_lpf = low_pass_filter(cont_f0, int(1.0 / (args.shiftms * 0.001)), cutoff=20)\n codeap = feature_extractor.codeap()\n mcep = feature_extractor.mcep(dim=args.mcep_dim, alpha=args.mcep_alpha)\n\n # concatenate\n cont_f0_lpf = np.expand_dims(cont_f0_lpf, axis=-1)\n uv = np.expand_dims(uv, axis=-1)\n feats = np.concatenate([uv, cont_f0_lpf, mcep, codeap], axis=1)\n\n # save to hdf5\n hdf5name = args.hdf5dir + \"/\" + os.path.basename(wav_name).replace(\".wav\", \".h5\")\n write_hdf5(hdf5name, \"/world\", feats)\n\n # overwrite wav file\n if args.highpass_cutoff != 0 and args.save_wav:\n wavfile.write(args.wavdir + \"/\" + os.path.basename(wav_name), fs, np.int16(x))", "def run_from_file(f):\n #set defaults\n x_loops=1;max_steps=0;display_on=True;max_fps=10;garden_size=13;tako_number=20\n pop_max=40;max_width=1800;max_height=900;collect_data=True;export_all=False\n rand_nets=False;max_gen=0;genetic_mode=\"Plain\";learning_on=False\n seeds=None;garden_mode=\"Diverse Static\";family_detection=None;family_mod=0\n record_inbreeding=True;inbreed_lim=1.1;filename=\"default file\"\n hla_genes=0;binary_health=0;carrier_percentage=40;two_envs=False\n diff_envs=False;migration_rate=0;phen_pref=False\n\n \n atr_dict = {\"x_loops\": x_loops, \"max_steps\": max_steps,\n \"display_on\": display_on, \"max_fps\": max_fps,\n \"garden_size\": garden_size,\n \"tako_number\": tako_number, \"pop_max\": pop_max,\n \"max_width\": max_width, \"max_height\": max_height,\n \"collect_data\": collect_data, \"export_all\": export_all,\n \"rand_nets\": rand_nets, \"max_gen\": max_gen,\n \"genetic_mode\": genetic_mode, \"learning_on\": learning_on,\n \"seeds\": seeds, \"garden_mode\": garden_mode,\n \"family_detection\": family_detection, \"family_mod\": family_mod,\n \"record_inbreeding\": record_inbreeding,\n \"inbreed_lim\": inbreed_lim, \"filename\": filename,\n \"hla_genes\": hla_genes, \"binary_health\": binary_health,\n \"carrier_percentage\": carrier_percentage,\n \"two_envs\": two_envs, \"diff_envs\": diff_envs,\n \"migration_rate\": migration_rate, \"phen_pref\": phen_pref}\n \n ints = [\"x_loops\", \"max_steps\", \"garden_size\", \"tako_number\", \"pop_max\",\n \"max_width\", \"max_height\", \"max_gen\", \"hla_genes\",\n \"binary_health\", \"carrier_percentage\", \"max_fps\"]\n floats = [\"family_mod\", \"inbreed_lim\", \"migration_rate\"]\n strs = [\"genetic_mode\", \"garden_mode\", \"filename\"]\n bools = [\"display_on\", \"collect_data\", \"export_all\", \"rand_nets\",\n \"learning_on\", \"record_inbreeding\", \"two_envs\", \"diff_envs\",\n \"phen_pref\"]\n\n #then sets all user-defined settings from the file f\n with open(f) as exp_file:\n for line in exp_file:\n #comments\n if line[0] == \"#\":\n pass\n #blank line = run what we have, then continue\n #to read the file for a new set of parameters\n elif line == \"\\n\":\n run_experiment(atr_dict[\"x_loops\"], atr_dict[\"max_steps\"],\n atr_dict[\"display_on\"], atr_dict[\"max_fps\"],\n atr_dict[\"garden_size\"],\n atr_dict[\"tako_number\"], atr_dict[\"pop_max\"],\n atr_dict[\"max_width\"], atr_dict[\"max_height\"],\n atr_dict[\"collect_data\"], atr_dict[\"export_all\"],\n atr_dict[\"rand_nets\"], atr_dict[\"max_gen\"],\n atr_dict[\"genetic_mode\"],\n atr_dict[\"learning_on\"],\n atr_dict[\"seeds\"], atr_dict[\"garden_mode\"],\n atr_dict[\"family_detection\"],\n atr_dict[\"family_mod\"],\n atr_dict[\"record_inbreeding\"],\n atr_dict[\"inbreed_lim\"],\n atr_dict[\"hla_genes\"], atr_dict[\"binary_health\"],\n atr_dict[\"carrier_percentage\"],\n atr_dict[\"filename\"],\n atr_dict[\"two_envs\"],\n atr_dict[\"diff_envs\"],\n atr_dict[\"migration_rate\"],\n atr_dict[\"phen_pref\"])\n #reset defaults\n atr_dict = {\"x_loops\": x_loops, \"max_steps\": max_steps,\n \"display_on\": display_on, \"max_fps\": max_fps,\n \"garden_size\": garden_size,\n \"tako_number\": tako_number, \"pop_max\": pop_max,\n \"max_width\": max_width, \"max_height\": max_height,\n \"collect_data\": collect_data, \"export_all\": export_all,\n \"rand_nets\": rand_nets, \"max_gen\": max_gen,\n \"genetic_mode\": genetic_mode, \"learning_on\": learning_on,\n \"seeds\": seeds, \"garden_mode\": garden_mode,\n \"family_detection\": family_detection,\n \"family_mod\": family_mod,\n \"record_inbreeding\": record_inbreeding,\n \"inbreed_lim\": inbreed_lim, \"filename\": filename,\n \"hla_genes\": hla_genes, \"binary_health\": binary_health,\n \"carrier_percentage\": carrier_percentage,\n \"two_envs\": two_envs, \"diff_envs\": diff_envs,\n \"migration_rate\": migration_rate, \"phen_pref\": phen_pref}\n else:\n #get rid of newline character\n line = line[:-1]\n line = line.split(\": \")\n if line[0] in ints:\n val = int(line[1])\n elif line[0] in floats:\n val = float(line[1])\n elif line[0] in bools:\n val = True if line[1] == \"True\" else False\n elif line[0] in strs:\n val = line[1]\n elif line[0] == \"family_detection\":\n if line[1] == \"None\":\n val = None\n else:\n val = line[1]\n elif line[0] == \"seeds\":\n val = line[1].split(\" \")\n atr_dict[line[0]] = val\n #run the last one in the file\n run_experiment(atr_dict[\"x_loops\"], atr_dict[\"max_steps\"],\n atr_dict[\"display_on\"], atr_dict[\"max_fps\"],\n atr_dict[\"garden_size\"],\n atr_dict[\"tako_number\"], atr_dict[\"pop_max\"],\n atr_dict[\"max_width\"], atr_dict[\"max_height\"],\n atr_dict[\"collect_data\"], atr_dict[\"export_all\"],\n atr_dict[\"rand_nets\"], atr_dict[\"max_gen\"],\n atr_dict[\"genetic_mode\"],\n atr_dict[\"learning_on\"],\n atr_dict[\"seeds\"], atr_dict[\"garden_mode\"],\n atr_dict[\"family_detection\"],\n atr_dict[\"family_mod\"],\n atr_dict[\"record_inbreeding\"],\n atr_dict[\"inbreed_lim\"], atr_dict[\"hla_genes\"],\n atr_dict[\"binary_health\"], atr_dict[\"carrier_percentage\"],\n atr_dict[\"two_envs\"], atr_dict[\"diff_envs\"],\n atr_dict[\"migration_rate\"], atr_dict[\"phen_pref\"],\n atr_dict[\"filename\"])", "def postprocess_cga(lines, outfile):\n pattern = re.compile(\"^\\s*([0-9,]+)\\s+\\([ 0-9.]+%\\)\\s+Source/(\\S+):(\\S+)\\(.*\\).*$\")\n\n totalCost = 0.0\n functionTable = []\n functionMap = {}\n\n for line in lines:\n line = line.strip()\n match = pattern.match(line)\n if not match:\n continue\n\n cost = float(match.group(1).replace(\",\", \"\"))\n sourceFile = match.group(2)\n function = match.group(3)\n\n # Filter out library code we don't want to change\n if function.startswith(\"stbi__\"):\n continue\n\n totalCost += cost\n\n # Accumulate the scores from functions in multiple call chains\n if function in functionMap:\n index = functionMap[function]\n functionTable[index][1] += cost\n functionTable[index][2] += cost\n # Else add new functions to the end of the table\n else:\n functionMap[function] = len(functionTable)\n functionTable.append([function, cost, cost])\n\n # Sort the table by accumulated cost\n functionTable.sort(key=lambda x: 101.0 - x[2])\n\n for function in functionTable:\n function[2] /= totalCost\n function[2] *= 100.0\n\n with open(outfile, \"w\") as fileHandle:\n\n totals = 0.0\n for function in functionTable:\n # Omit entries less than 1% load\n if function[2] < 1:\n break\n\n totals += function[2]\n fileHandle.write(\"%5.2f%% %s\\n\" % (function[2], function[0]))\n\n fileHandle.write(\"======\\n\")\n fileHandle.write(f\"{totals:5.2f}%\\n\")", "def source_files(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___DebuggedSourceFile]:", "def pattern_matching(pattern_base, cc_pattern_base):\n papers = [os.path.join(target_folder, paper) for paper in os.listdir(target_folder) if \".xml\" in paper]\n \n for paper in papers:\n paper_text = open(paper[:paper.index('.')]+\".txt\", 'r').read()\n \n annotator = detect_change_events(paper, pattern_base, paper_text) \n annotator = detect_cause_correlation(paper_text, cc_pattern_base, annotator)\n \n # Write the annotations to file\n with open(paper[:paper.index('.')]+\".ann\", 'w') as annfile:\n for annotation in annotator.annotations:\n annfile.write(annotation+\"\\n\")", "def _map_source(source):\n for pattern, replacement in \\\n settings.REFINERY_FILE_SOURCE_MAP.iteritems():\n translated_source = re.sub(pattern, replacement, source)\n if translated_source != source:\n return translated_source\n return source", "def calib_map_gen(calib_file_dir: str, pattern='*.poni') -> list:\n fns = sorted([str(x) for x in pathlib.Path(calib_file_dir).glob(pattern)])\n rv = []\n for i, fn in enumerate(fns):\n print('{} --> {}'.format(i, fn))\n calib_md = dict(pyFAI.load(fn).getPyFAI())\n rv.append(calib_md)\n return rv", "def fetch_and_extract(self, filename):\n # type: (Text) -> None\n raise NotImplementedError(\"\")", "def run_analysis(self):\n ### skip some snapshots for testing purposes\n nskip = 199\n read_char.skip_snapshots(self.hfile, self.ifile, nskip)\n ### read in the first two steps (required for velocity related computations\n xs_old, ys_old, lx_old, ly_old, tstep_old, natoms_old = read_char.read_snapshot(self.hfile, self.ifile)\n x_old = xs_old*lx_old\n y_old = ys_old*ly_old\n xs,ys,lx,ly,tstep,natoms = read_char.read_snapshot(self.hfile, self.ifile)\n x = xs*lx\n y = ys*ly\n ### loop over all steps of the input file\n for step in range(nskip+1,self.nsteps-1):\n print step\n ### read in coordinates (as required)\n xs_new,ys_new,lx_new,ly_new,tstep_new,natoms_new = read_char.read_snapshot(self.hfile, self.ifile)\n x_new = xs_new*lx_new\n y_new = ys_new*ly_new\n ### compute further current per/atom quantities\n phi = misc_tools.compute_orientation(x,y,lx,ly,self.npol)\n vx,vy = misc_tools.compute_velocity(x_old,y_old, x_new, y_new, lx, ly, tstep_old, tstep_new, natoms)\n ### start desired analysis methods\n # density\n if self.density_flag:\n self.density.compute(step,x,y,lx,ly,natoms, plot = 'False')\n # number fluctuations\n if self.nf_flag:\n self.numberfluctuation.compute(step,xs,ys, plot = 'False')\n # voronoi density\n if self.voronoi_flag:\n self.voronoidensity.compute(step,x,y,lx,ly,natoms, plot = 'False')\n # velocity / worticity\n if self.velocity_flag:\n self.velocityworticity.compute(step,x,y,vx,vy,natoms,lx,ly, plot = 'False')\n # orientation / velocity\n if self.orientvel_flag:\n self.orientvel.compute(step,x,y,vx,vy,phi,natoms, plot = 'False')\n # defect points\n if self.pointdefects_flag:\n self.pointdefects.compute(step,x,y,phi,lx,ly,natoms)\n ### move coordinate arrays\n xs_old = np.copy(xs)\n ys_old = np.copy(ys)\n x_old = np.copy(x)\n y_old = np.copy(y)\n tstep_old = tstep\n xs = np.copy(xs_new)\n ys = np.copy(ys_new)\n x = np.copy(x_new)\n y = np.copy(y_new)\n tstep = tstep_new\n return", "def sources(obj, reftype):", "def main():\n parser = argparse.ArgumentParser(\n description='Compute photometry.')\n parser.add_argument('filename', metavar='filename', nargs='+',\n help='Path to one or more input files to '\n 'modify in place.')\n parser.add_argument('-n', '--new', dest='overwrite',\n action='store_false', default=True,\n help='Set to write to _new.fits file instead '\n 'of overwriting the input.')\n parser.add_argument('-l', '--loglevel', dest='loglevel', type=str,\n action='store', default='INFO',\n help='Log level.')\n parser.add_argument('-z', '--fitsize', dest='fitsize', type=int,\n action='store', default=None,\n help='Fit subimage size (pix).')\n parser.add_argument('-s', '--srcpos', dest='srcpos', type=str,\n action='store', default=None,\n help='Estimated source position (x,y).')\n parser.add_argument('-f', '--fwhm', dest='fwhm', type=float,\n action='store', default=None,\n help='Estimated FWHM (pix).')\n parser.add_argument('-p', '--profile', dest='profile', type=str,\n action='store', default='moffat',\n help='Profile function (moffat, gaussian, '\n 'or lorentzian).')\n parser.add_argument('-r', '--aprad', dest='aprad', type=float,\n action='store', default=None,\n help='Aperture radius (pix).')\n parser.add_argument('-b', '--skyrad', dest='skyrad', type=str,\n action='store', default=None,\n help='Sky radii in pix (inner,outer).')\n parser.add_argument('-u', '--raw_units', dest='runits', type=str,\n action='store', default=None,\n help='Raw data units before calibration, '\n 'to use in header comments.')\n args = parser.parse_args()\n\n if args.srcpos is not None:\n try:\n srcpos = [float(x) for x in args.srcpos.split(',')]\n if len(srcpos) != 2:\n raise ValueError\n except ValueError:\n srcpos = None\n parser.error(\"Invalid srcpos argument.\")\n else:\n srcpos = None\n if args.skyrad is not None:\n try:\n skyrad = [float(x) for x in args.skyrad.split(',')]\n if len(skyrad) != 2:\n raise ValueError\n except ValueError:\n skyrad = None\n parser.error(\"Invalid skyrad argument.\")\n else:\n skyrad = None\n\n log.setLevel(args.loglevel.upper())\n for fname in args.filename:\n log.info('Running: {}'.format(fname))\n pipecal_applyphot(fname, srcpos=srcpos,\n fitsize=args.fitsize, fwhm=args.fwhm,\n profile=args.profile, aprad=args.aprad,\n skyrad=skyrad, runits=args.runits,\n overwrite=args.overwrite)\n log.info('')", "def main():\n\takpPoints,chpPoints = extractSupporterCities(\"Data/PreprocessedAkpTweets.csv\",\n\t\t\t\t\t\t\t\t\t\t\t \"Data/PreprocessedChpTweets.csv\")\n\tgenerateMapPoints(akpPoints,chpPoints)\n\tgenerateCitySentimentData(akpPoints,chpPoints)\n\tgenerateChoroplethMap(\"Data/tr_cities_modified.json\",\"Data/city_ratio.csv\")", "def detail_matching(self):\n paradic = self.cfg['param']['paradic']\n work_dir = self.work_dir\n \n x = float(self.cfg['param']['x']) # selected pixel in the first image\n y = float(self.cfg['param']['y'])\n \n # sift parameters\n # number of bins in the orientation histogram\n n_bins = int(paradic['n_bins']) \n n_hist = int(paradic['n_hist']) \n # descriptor of n_hist X n_hist weighted histograms with n_ori\n n_ori = int(paradic['n_ori']) \n delta_min = float(paradic['delta_min'])\n sigma_min = float(paradic['sigma_min'])\n sigma_in = float(paradic['sigma_in'])\n lambda_ori = float(paradic['lambda_ori'])\n lambda_descr = float(paradic['lambda_descr'])\n #threshold defining reference orientations\n n_spo = int(paradic['n_spo'])\n \n # Read feature vectors from output files\n if (os.path.getsize(work_dir+'OUTmatches.txt') > 0 ):\n pairdata = find_nearest_keypoint(work_dir+'OUTmatches.txt', y, x)\n \n illustrate_pair(pairdata, n_bins, n_hist, n_ori, work_dir)\n\n \n # Read keys coordinates.\n d = 6+n_bins+n_hist*n_hist*n_ori # size of keydata inside pairdata\n v = n_hist*n_hist*n_ori\n [x1, y1, sigma1, theta1] = [float(x) for x in pairdata[0:4]]\n [o1, s1] = [float(x) for x in pairdata[4+v:4+v+2]]\n [x2a, y2a, sigma2a, theta2a] = [float(x) for x in pairdata[d:d+4]]\n [o2a, s2a] = [float(x) for x in pairdata[d+4+v:d+4+v+2]]\n [x2b, y2b, sigma2b, theta2b] = \\\n [float(x) for x in pairdata[2*d:2*d+4]]\n [o2b, s2b] = [float(x) for x in pairdata[2*d+4+v:2*d+4+v+2]]\n \n draw_one_match(pairdata,\n work_dir+'input_0.png',\n work_dir+'input_1.png',\n d,\n lambda_ori,\n lambda_descr,\n n_hist,\n work_dir+'OUTonepair.png')\n \n \n # Extract thumbnails.\n # keypoint 1 (image 1)\n print ' '.join(['demo_extract_patch', work_dir+'input_0.png',\n str(x1), str(y1), str(sigma1), str(theta1), str(o1), str(s1),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im1\"])\n proc = self.run_proc(['demo_extract_patch', work_dir+'input_0.png',\n str(x1), str(y1), str(sigma1), str(theta1), str(o1), str(s1),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im1\"])\n self.wait_proc(proc, timeout=self.timeout)\n \n # keypoint 2a (nearest neighbor in image 2)\n print ' '.join(['demo_extract_patch', work_dir+'input_1.png',\n str(x2a), str(y2a), str(sigma2a), str(theta2a), str(o2a), str(s2a),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im2a\"])\n proc = self.run_proc(['demo_extract_patch', work_dir+'input_1.png',\n str(x2a), str(y2a), str(sigma2a), str(theta2a), str(o2a), str(s2a),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im2a\"])\n self.wait_proc(proc, timeout=self.timeout) \n \n # keypoint 2b (second nearest neighbor in image 2)\n proc = self.run_proc(['demo_extract_patch', work_dir+'input_1.png',\n str(x2b), str(y2b), str(sigma2b), str(theta2b), str(o2b), str(s2b),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im2b\"])\n self.wait_proc(proc, timeout=self.timeout) \n \n \n return 1", "def run(self, exposure, sources):\n with self.distortionContext(exposure, sources) as bbox:\n results = self.astrometry(exposure, sources, bbox=bbox)\n\n if results.matches:\n self.refitWcs(exposure, sources, results.matches)\n\n return results", "def test_tb_full_mapping_iter_01():\n resource_path = os.path.join(os.path.dirname(__file__), \"data/\")\n gem_file = resource_path + \"tb.Human.GCA_000001405.22_gem.fasta.gem\"\n\n fastq_file_1 = resource_path + \"tb.Human.SRR1658573_1.fastq\"\n\n files = [\n gem_file,\n fastq_file_1\n ]\n\n metadata = {\n 'assembly': 'test',\n # 'enzyme_name': 'MboI',\n 'windows': ((1, 25), (1, 50), (1, 75), (1, 100))\n }\n\n gem_file = files[1]\n\n print(gem_file)\n\n tfm1 = tbFullMappingTool()\n tfm1_files, tfm1_meta = tfm1.run(files, [], metadata) # pylint: disable=unused-variable\n\n map25 = resource_path + \"tb.Human.SRR1658573_1_full_1-25.map\"\n map50 = resource_path + \"tb.Human.SRR1658573_1_full_1-50.map\"\n map75 = resource_path + \"tb.Human.SRR1658573_1_full_1-75.map\"\n map100 = resource_path + \"tb.Human.SRR1658573_1_full_1-100.map\"\n\n assert os.path.isfile(map25) is True\n assert os.path.getsize(map25) > 0\n assert os.path.isfile(map50) is True\n assert os.path.getsize(map50) > 0\n assert os.path.isfile(map75) is True\n assert os.path.getsize(map75) > 0\n assert os.path.isfile(map100) is True\n assert os.path.getsize(map100) > 0", "def _init_extractors(self):\n @self.extractors_wrapper(\"networkx\")\n def get_nx_extractor(graph):\n \"\"\"\n :param graph: networkx.Graph\n :returns: projx.nx_extractor\n \"\"\"\n return nx_xtrct.nx_extractor(\n self.extractor_json[self.extractor_name], graph\n )\n\n @self.extractors_wrapper(\"neo4j\")\n def get_neo4j_extractor(graph):\n \"\"\"\n :returns: projx.nx_extractor\n \"\"\"\n return neo4j_xtrct.neo4j_extractor(\n self.extractor_json[self.extractor_name], graph\n )\n\n @self.extractors_wrapper(\"edgelist\")\n def get_edgelist_extractor(graph):\n \"\"\"\n :returns: projx.nx_extractor\n \"\"\"\n return edgelist_xtrct.edgelist_extractor(\n self.extractor_json[self.extractor_name], graph\n )", "def test_tb_full_mapping_iter_02():\n resource_path = os.path.join(os.path.dirname(__file__), \"data/\")\n gem_file = resource_path + \"tb.Human.GCA_000001405.22_gem.fasta.gem\"\n\n fastq_file_2 = resource_path + \"tb.Human.SRR1658573_2.fastq\"\n\n files = [\n gem_file,\n fastq_file_2\n ]\n\n metadata = {\n 'assembly': 'test',\n # 'enzyme_name': 'MboI',\n 'windows': ((1, 25), (1, 50), (1, 75), (1, 100))\n }\n\n gem_file = files[1]\n\n print(gem_file)\n\n tfm2 = tbFullMappingTool()\n tfm2_files, tfm2_meta = tfm2.run(files, [], metadata) # pylint: disable=unused-variable\n\n map25 = resource_path + \"tb.Human.SRR1658573_2_full_1-25.map\"\n map50 = resource_path + \"tb.Human.SRR1658573_2_full_1-50.map\"\n map75 = resource_path + \"tb.Human.SRR1658573_2_full_1-75.map\"\n map100 = resource_path + \"tb.Human.SRR1658573_2_full_1-100.map\"\n\n assert os.path.isfile(map25) is True\n assert os.path.getsize(map25) > 0\n assert os.path.isfile(map50) is True\n assert os.path.getsize(map50) > 0\n assert os.path.isfile(map75) is True\n assert os.path.getsize(map75) > 0\n assert os.path.isfile(map100) is True\n assert os.path.getsize(map100) > 0", "def generate_source():\n \"\"\"their dependencies\"\"\"\n global dictionary_names, dictionary_slices\n src = \"\"\n for s in dictionary_slices:\n src += deconstruct(s)\n src += \" '\" + pointer_to_name(s)\n src += \"' define\\n\"\n return src + \"\\n\"", "def main():\n import shutil\n import json\n\n if not os.path.isdir(args.cache):\n # creation dossier cache\n os.mkdir(args.cache)\n\n if not os.path.exists(args.cache+'/overviews.json'):\n # creation fichier overviews.json a partir d'un fichier ressource\n shutil.copy2(args.overviews, args.cache+'/overviews.json')\n\n with open(args.cache+'/overviews.json') as json_overviews:\n overviews_dict = json.load(json_overviews)\n if not (\"list_OPI\" in overviews_dict):\n overviews_dict[\"list_OPI\"] = []\n\n out_raster_srs = gdal.osr.SpatialReference()\n out_raster_srs.ImportFromEPSG(overviews_dict['crs']['code'])\n conn_string = \"PG:host=\"+host+\" dbname=\"+database+\" user=\"+user+\" password=\"+password\n db_graph = gdal.OpenEx(conn_string, gdal.OF_VECTOR)\n if db_graph is None:\n raise ValueError(\"Connection to database failed\")\n list_filename = glob.glob(args.input)\n if verbose > 0:\n print(len(list_filename), \"fichier(s) a traiter\")\n\n try:\n with open(args.cache+'/cache_mtd.json', 'r') as inputfile:\n mtd = json.load(inputfile)\n except:\n mtd = {}\n\n cliche_dejaTraites = []\n for filename in list_filename:\n cliche = Path(filename).stem\n \n if (cliche in overviews_dict['list_OPI']):\n # OPI déja traitée\n cliche_dejaTraites.append(cliche)\n else:\n print('nouvelle image: ', filename)\n color = [randrange(255), randrange(255), randrange(255)]\n while (color[0] in mtd) and (color[1] in mtd[color[0]]) and (color[2] in mtd[color[0]][color[1]]):\n color = [randrange(255), randrange(255), randrange(255)]\n if color[0] not in mtd:\n mtd[color[0]] = {}\n if color[1] not in mtd[color[0]]:\n mtd[color[0]][color[1]] = {}\n mtd[color[0]][color[1]][color[2]] = cliche\n process_image(overviews_dict, db_graph, filename, color, out_raster_srs)\n # on ajout l'OPI traitée a la liste\n overviews_dict[\"list_OPI\"].append(cliche)\n\n with open(args.cache+'/cache_mtd.json', 'w') as outfile:\n json.dump(mtd, outfile)\n\n with open(args.cache+'/overviews.json', 'w') as outfile:\n json.dump(overviews_dict, outfile)\n\n print(\"\\n\", len(list_filename) - len(cliche_dejaTraites),\"/\",len(list_filename),\"OPI(s) ajoutée(s)\")\n if len(cliche_dejaTraites) > 0:\n print(cliche_dejaTraites, \"déjà traitées : OPI non recalculée(s)\")", "def __main__() :\n try :\n poly = Polyhedre(sys.argv[1])\n \n name = sys.argv[2]\n \n createAllFiles(poly, name)\n\n createAllImageFiles(poly, name)\n \n except FileNotFoundError :\n print(\"Use an existing file\")", "def run_main():\n # Matching lines against a matcher function.\n matched_lines = match_file(file_names, matcher)\n\n # Will contain data sorted by file.\n binned_data = {}\n\n # Looking through the lines that were inserted into the metrics file via the metrics component.\n for key in matched_lines:\n\n # Grabbing matched lines by the file or orgination.\n buffer = matched_lines[key]\n\n # This will contain dictionaries converted from JSON.\n data = []\n\n # Loop through the collection, appending data converted from JSON entries.\n for line in buffer:\n data.append(extract_data(line))\n\n # Sort the data by file.\n binned_data[key] = sort_data(data)\n\n # Output the final results.\n generate_statistics(binned_data)\n return 0", "def parse_main(self):\n self.decompiler = self.createDecompiler(self.compilerEnv)\n self.nf = IRFactory(self)\n self.currentScriptOrFn = self.nf.createScript()\n sourceStartOffset = self.decompiler.getCurrentOffset();\n self.encodedSource = None;\n self.decompiler.addToken(Token.SCRIPT);\n \n self.currentFlaggedToken = Token.EOF;\n self.syntaxErrorCount = 0;\n\n baseLineno = self.ts.getLineno(); #// line number where source starts\n\n #/* so we have something to add nodes to until\n # * we've collected all the source */\n pn = self.nf.createLeaf(Token.BLOCK);\n try:\n while(True):\n tt = self.peekToken()\n if (tt <= Token.EOF):\n break\n n = None\n \n if tt == Token.FUNCTION:\n self.consumeToken()\n try: \n n = self.function( \\\n (FunctionNode.FUNCTION_EXPRESSION,\\\n FunctionNode.FUNCTION_STATEMENT)[self.calledByCompileFunction])\n except ParserException:\n raise NotImplementedError()# should this have to raise?\n break;\n #raise NotImplementedError <- I think this is now implemented - TW\n \n else:\n n = self.statement()\n \n self.nf.addChildToBack(pn, n)\n \n \n except RuntimeError:\n # Was StackOverflowError\n raise\n # TODO: exception handling\n \n if (self.syntaxErrorCount != 0) :\n msg = str(self.syntaxErrorCount)\n #msg = ScriptRuntime.getMessage1(\"msg.got.syntax.errors\", msg);\n #throw errorReporter.runtimeError(msg, sourceURI, baseLineno,\n # null, 0);\n\n self.currentScriptOrFn.setSourceName(self.sourceURI);\n self.currentScriptOrFn.setBaseLineno(baseLineno);\n self.currentScriptOrFn.setEndLineno(self.ts.getLineno());\n\n sourceEndOffset = self.decompiler.getCurrentOffset();\n self.currentScriptOrFn.setEncodedSourceBounds(sourceStartOffset,\n sourceEndOffset);\n\n self.nf.initScript(self.currentScriptOrFn, pn);\n\n if (self.compilerEnv.isGeneratingSource()):\n self.encodedSource = self.decompiler.getEncodedSource();\n \n del self.decompiler# comment was //\"It helps GC\" \n # - can't do any harm on CPython either\n\n return self.currentScriptOrFn;" ]
[ "0.5765961", "0.5687466", "0.5643927", "0.5641508", "0.56028205", "0.5564399", "0.5480887", "0.5477202", "0.53990245", "0.53960085", "0.5374448", "0.5361827", "0.532035", "0.53024715", "0.5297381", "0.52936006", "0.52374905", "0.52320105", "0.5228987", "0.5202783", "0.5200074", "0.5184582", "0.5178871", "0.51451534", "0.51267326", "0.5111365", "0.511052", "0.5109168", "0.50874734", "0.5078105", "0.505946", "0.5047446", "0.5043495", "0.5031954", "0.5028479", "0.50279105", "0.5025863", "0.5017813", "0.5010856", "0.5002078", "0.49956533", "0.49929816", "0.49919832", "0.4977698", "0.49711236", "0.4967343", "0.4966087", "0.49651998", "0.49585643", "0.49524823", "0.49491155", "0.4944219", "0.49433994", "0.49430162", "0.49427974", "0.49414417", "0.49227723", "0.49205172", "0.49150994", "0.49118114", "0.49056485", "0.490412", "0.4896442", "0.48918238", "0.48890033", "0.48842576", "0.4882018", "0.48806953", "0.4878883", "0.48786178", "0.48757976", "0.4875461", "0.48734784", "0.4871273", "0.48699835", "0.48669788", "0.4863503", "0.48634163", "0.48576477", "0.4852751", "0.48510015", "0.48496857", "0.48466182", "0.48399785", "0.48373252", "0.4835372", "0.4827993", "0.48272973", "0.48250434", "0.48197785", "0.48165423", "0.48159474", "0.48153898", "0.48145956", "0.4812524", "0.48111418", "0.48092946", "0.48075578", "0.48075098", "0.4804608" ]
0.51135045
25
Creates a 2D histogram from data given by numpy's histogram
def hist2d(x,y,nbins = 50 ,maskval = 0,saveloc = '',labels=[],slope = 1,sloperr = 0): # Remove NANs and masked values good = where((isnan(x) == False) & (isnan(y) == False) & (x != maskval) & (y != maskval)) x = x[good] y = y[good] # Create histogram H,xedges,yedges = histogram2d(x,y,bins=nbins) # Reorient appropriately H = rot90(H) H = flipud(H) # Mask zero value bins Hmasked = ma.masked_where(H==0,H) # Find average values in y: yavgs = [] ystds = [] xposs = [] for j in range(len(xedges)-1): toavg = where((x > xedges[j]) & (x < xedges[j+1])) xpos = np.mean(x[toavg]) yavg = np.median(y[toavg]) ystd = np.std(y[toavg])/len(y[toavg]) xposs.append(xpos) yavgs.append(yavg) ystds.append(ystd) # Begin creating figure plt.figure(figsize=(12,10)) # Make histogram pixels with logscale plt.pcolormesh(xedges,yedges,Hmasked, norm = LogNorm(vmin = Hmasked.min(), vmax = Hmasked.max()), cmap = plt.get_cmap('Spectral_r')) # Create fit line x-array uplim = nmax(x)+5 dolim = nmin(x)-5 x_range = arange(dolim,uplim) # Plot fit line plt.plot(x_range,slope*x_range,color = 'royalblue',linewidth = 3,label = 'Slope = {0}, Uncertainty = {1}'.format(slope,sloperr)) # Plot average points plt.errorbar(xposs,yavgs,yerr = ystds,fmt = 'D',color='k',markersize = 5) # Set plot limits plt.xlim(dolim+5,uplim-5) plt.ylim(nmin(y),nmax(y)) # Add colourbar cbar = plt.colorbar() # Add labels if labels != []: title,xlabel,ylabel,zlabel = labels plt.xlabel(xlabel) plt.ylabel(ylabel) plt.title(title) cbar.ax.set_ylabel(zlabel) plt.legend(loc = 'best',fontsize = 15) # Save plot if saveloc != '': plt.savefig(saveloc) plt.close() # Return histogram return xedges,yedges,Hmasked
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _fast_hist_2d(data, bin_edges):\n # Yes, I've tested this against histogramdd().\n xassign = np.digitize(data[:,0], bin_edges[1:-1]) \n yassign = np.digitize(data[:,1], bin_edges[1:-1])\n nbins = len(bin_edges) - 1\n flatcount = np.bincount(xassign + yassign * nbins, minlength=nbins*nbins)\n return flatcount.reshape((nbins, nbins))", "def histogram2d(xflat, yflat, xbins, ybins):\n if len(xflat) - len(yflat): raise ValueError('')\n H, X, Y = np.histogram2d(x = xflat, y = yflat, bins=(xbins, ybins), normed=True)\n\n H[np.isnan(H)] = 0.\n return X, Y, H.T", "def histogram2d(x,y, bins=10, range=None, normed=False, weights=None):\r\n from numpy import histogramdd\r\n\r\n try:\r\n N = len(bins)\r\n except TypeError:\r\n N = 1\r\n\r\n if N != 1 and N != 2:\r\n xedges = yedges = asarray(bins, float)\r\n bins = [xedges, yedges]\r\n hist, edges = histogramdd([x,y], bins, range, normed, weights)\r\n return hist, edges[0], edges[1]", "def np_histogram(data, title, bins=\"auto\"):\n figure = plt.figure()\n canvas = figure.canvas\n plt.hist(data, bins=bins)\n plt.title(title)\n\n canvas.draw()\n w, h = canvas.get_width_height()\n np_hist = np.fromstring(canvas.get_renderer().tostring_rgb(), dtype=np.uint8).reshape(h, w, 3)\n plt.close(figure)\n util.np_info(np_hist)\n return np_hist", "def histogram2d(x, y, bins=10, range=None, weights=None, density=None):\n try:\n n = len(bins)\n except TypeError:\n n = 1\n\n if n != 1 and n != 2:\n if isinstance(bins, cupy.ndarray):\n xedges = yedges = bins\n bins = [xedges, yedges]\n else:\n raise ValueError('array-like bins not supported in CuPy')\n\n hist, edges = histogramdd([x, y], bins, range, weights, density)\n return hist, edges[0], edges[1]", "def get2d(infile, histname, subdir='',verbose=False): \n\n ## 2d Histogram\n Hist = getter(infile,histname,subdir,verbose)\n\n nbinsX, nbinsY = Hist.GetNbinsX(), Hist.GetNbinsY()\n Arr = np.zeros((nbinsY,nbinsX))\n dArr = np.zeros((nbinsY,nbinsX))\n axesX = np.zeros(nbinsX)\n axesY = np.zeros(nbinsY)\n edgesX = np.zeros(nbinsX+1)\n edgesY = np.zeros(nbinsY+1)\n for j in xrange(0,nbinsX):\n axesX[j] = Hist.GetXaxis().GetBinCenter(j+1)\n edgesX[j] = Hist.GetXaxis().GetBinLowEdge(j+1)\n edgesX[nbinsX] = Hist.GetXaxis().GetBinLowEdge(nbinsX+1)\n\n for j in xrange(0,nbinsY):\n axesY[j] = Hist.GetYaxis().GetBinCenter(j+1)\n edgesY[j] = Hist.GetYaxis().GetBinLowEdge(j+1)\n edgesY[nbinsY] = Hist.GetYaxis().GetBinLowEdge(nbinsY+1)\n\n axes = [axesX, axesY]\n edges = [edgesX, edgesY]\n \n for j in xrange(0,nbinsX):\n for k in xrange(0,nbinsY):\n Arr[k,j] = Hist.GetBinContent(j+1,k+1)\n dArr[k,j] = Hist.GetBinError(j+1,k+1)\n \n return axes, edges, Arr, dArr", "def make_2d_histogram(xx, yy, dx, dy):\n i_color1 = np.round(xx/dx).astype(int)\n i_color2 = np.round(yy/dy).astype(int)\n dex_reverse = np.array([i_color1, i_color2])\n dex_arr = dex_reverse.transpose()\n # see http://stackoverflow.com/questions/16970982/find-unique-rows-in-numpy-array\n dex_raw = np.ascontiguousarray(dex_arr).view(np.dtype((np.void, dex_arr.dtype.itemsize*dex_arr.shape[1])))\n _, unique_rows, unique_counts = np.unique(dex_raw, return_index=True, return_counts=True)\n\n return unique_rows, unique_counts", "def histogram(data,binwidth, xmin,xmax):\n bins = arange(xmin,xmax, binwidth)\n binsc = bins + (0.5 * binwidth)\n try: #FIXME: use updated numpy.histogram\n histo = numpyhisto(data, bins, new=False)[0]\n except:\n histo = numpyhisto(data, bins)[0]\n return binsc[:len(histo)], histo", "def create_histogram(points,nums,range=None):\n if range != None:\n range=[range[1],range[0]]\n z, y, x = np.histogram2d(\n points[:,0],points[:,1],\n bins=np.subtract([nums[1],nums[0]],1), # convert nums to bins\n range=range\n )\n#TODO: delete\n# print points.shape\n# print np.vstack({tuple(row) for row in points}).shape\n return z, x, y", "def test_histogram2d(device):\n torch.manual_seed(0)\n\n # set up\n N = 10000\n bins = (10, 5)\n\n x_data = torch.rand(N, device=device)\n y_data = torch.rand(N, device=device)\n\n # all values inside\n epsilon = 1e-6\n _range = (\n (x_data.min().item() - epsilon, x_data.max().item() + epsilon),\n (y_data.min().item() - epsilon, y_data.max().item() + epsilon),\n )\n\n # run\n sample = torch.stack((x_data, y_data))\n\n torch_hist, _ = histogram2d(sample, bins=bins, range=_range)\n\n numpy_hist, _, _ = numpy.histogram2d(\n x_data.cpu().numpy(), y_data.cpu().numpy(), bins=bins, range=_range\n )\n\n # compare\n torch_hist = torch_hist.cpu().int()\n numpy_hist = torch.from_numpy(numpy_hist).int()\n\n wrong = 0\n for idx, (h1, h2) in enumerate(zip(torch_hist.flatten(), numpy_hist.flatten())):\n if not torch.allclose(h1, h2):\n print(f\"{idx}: {h1} ≠ {h2}\")\n wrong += 1\n\n print(f\"Mismatches: {wrong}\")\n\n print(torch_hist.flatten()[:20])\n print(numpy_hist.flatten()[:20])\n\n assert torch.allclose(torch_hist, numpy_hist)", "def _make_histogram(values, bins):\n values = values.reshape(-1)\n counts, limits = np.histogram(values, bins=bins)\n limits = limits[1:]\n\n sum_sq = values.dot(values)\n return HistogramProto(min=values.min(),\n max=values.max(),\n num=len(values),\n sum=values.sum(),\n sum_squares=sum_sq,\n bucket_limit=limits,\n bucket=counts)", "def addHistogram2D(self, name, title, n_bins_x, minimum_x, maximum_x, n_bins_y, minimum_y, maximum_y):\n\t\tself.histograms[ name ] = ROOT.TH2F(name, title, n_bins_x, minimum_x, maximum_x, n_bins_y, minimum_y, maximum_y)", "def histogram2d(x, y, bins_x, bins_y):\n # x-range\n x_max, x_min = x.max(), x.min()\n delta_x = 1 / ((x_max - x_min) / bins_x)\n # y-range\n y_max, y_min = y.max(), y.min()\n delta_y = 1 / ((y_max - y_min) / bins_y)\n # compute histogram 2d\n xy_bin = np.zeros((np.int64(bins_x), np.int64(bins_y)), dtype=np.int64)\n for t in range(len(x)):\n i = (x[t] - x_min) * delta_x\n j = (y[t] - y_min) * delta_y\n if 0 <= i < bins_x and 0 <= j < bins_y:\n xy_bin[int(i), int(j)] += 1\n return xy_bin", "def histogram2d(data1, data2, bins=None, **kwargs):\n # TODO: currently very unoptimized! for non-dasks\n if \"axis_names\" not in kwargs:\n if hasattr(data1, \"name\") and hasattr(data2, \"name\"):\n kwargs[\"axis_names\"] = [data1.name, data2.name]\n if not hasattr(data1, \"dask\"):\n data1 = dask.array.from_array(data1, chunks=data1.size() / 100)\n if not hasattr(data2, \"dask\"):\n data2 = dask.array.from_array(data2, chunks=data2.size() / 100)\n\n data = dask.array.stack([data1, data2], axis=1)\n kwargs[\"dim\"] = 2\n return histogramdd(data, bins, **kwargs)", "def hist(bins, y, /, axis=0):\n if bins.ndim != 1:\n raise ValueError('Bins must be 1-dimensional.')\n\n with quack._ArrayContext(y, push_right=axis) as context:\n # Get flattened data\n y = context.data\n yhist = np.empty((y.shape[0], bins.size - 1))\n\n # Take histogram\n for k in range(y.shape[0]):\n yhist[k, :] = np.histogram(y[k, :], bins=bins)[0]\n\n # Replace data\n context.replace_data(yhist)\n\n # Return unflattened data\n return context.data", "def makeHistogram(values, numBins, xLabel, yLabel, title=None):", "def test_histogramdd(device):\n torch.manual_seed(0)\n\n # set up\n N = 1000\n bins = 20\n\n x_data = torch.rand(N, device=device)\n y_data = torch.rand(N, device=device)\n\n # all values inside\n epsilon = 1e-6\n x_edges = torch.linspace(\n x_data.min() - epsilon, x_data.max() + epsilon, steps=bins + 1, device=device\n )\n y_edges = torch.linspace(\n y_data.min() - epsilon, y_data.max() + epsilon, steps=bins + 1, device=device\n )\n\n # run\n sample = torch.stack((x_data, y_data))\n\n torch_hist, _ = histogramdd(sample, bins=(x_edges, y_edges))\n\n numpy_hist, _, _ = numpy.histogram2d(\n x_data.cpu().numpy(),\n y_data.cpu().numpy(),\n bins=(x_edges.cpu().numpy(), y_edges.cpu().numpy()),\n )\n\n # compare\n torch_hist = torch_hist.cpu().int()\n numpy_hist = torch.from_numpy(numpy_hist).int()\n\n assert torch.allclose(torch_hist, numpy_hist)", "def makeHist(data, bins, wgt=None, factor=1.0, pdf=False):\n n_arr, bins = np.histogram(data, bins, weights=wgt)\n ctr_bins = centerOfBins(bins)\n \n if pdf == True:\n n_arr = asFloat(n_arr) / (float(sum(n_arr)) * (bins[1:] - bins[:-1]))\n else:\n n_arr = asFloat(n_arr) * factor\n \n return n_arr, ctr_bins", "def test_make_histograms(self):\r\n raw_lengths = [90, 100, 110, 110, 130, 135]\r\n pre_lengths = [100, 110, 105, 130, 135]\r\n post_lengths = [130, 135]\r\n raw_hist, pre_hist, post_hist, bin_edges = \\\r\n make_histograms(raw_lengths, pre_lengths, post_lengths)\r\n assert_almost_equal(pre_hist, array([0, 2, 1, 0, 2]))\r\n assert_almost_equal(post_hist, array([0, 0, 0, 0, 2]))\r\n assert_almost_equal(bin_edges, array([90, 100, 110, 120, 130, 140]))", "def hist(data):\n\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n plt.hold(True)\n for x in xrange(len(data[:,0,0])):\n counts, edges = np.histogram(data[x,:,:],bins=100)\n centers = [(edges[i]+edges[i+1])/2.0 for i,v in enumerate(edges[:-1])]\n ax1.plot(centers,counts)\n plt.hold(False)\n\n plt.show(block=False)\n\n # return fig", "def makeHistogram(x, y):\n if not len(x) == len(y):\n raise ValueError(\n \"Cannot make a histogram unless the x and y lists are the same size.\"\n + \"len(x) == {} and len(y) == {}\".format(len(x), len(y))\n )\n n = len(x)\n xHistogram = numpy.zeros(2 * n)\n yHistogram = numpy.zeros(2 * n)\n for i in range(n):\n lower = 2 * i\n upper = 2 * i + 1\n xHistogram[lower] = x[i - 1]\n xHistogram[upper] = x[i]\n yHistogram[lower] = y[i]\n yHistogram[upper] = y[i]\n xHistogram[0] = x[0] / 2.0\n return xHistogram, yHistogram", "def make_hist(filename, density_bounds, temperature_bounds, bins):\n\n density_bins = np.logspace(\n np.log10(density_bounds[0]), np.log10(density_bounds[1]), bins\n )\n temperature_bins = np.logspace(\n np.log10(temperature_bounds[0]), np.log10(temperature_bounds[1]), bins\n )\n\n dens, temps, metals = get_data(filename)\n\n H, density_edges, temperature_edges = np.histogram2d(\n dens, temps, bins=[density_bins, temperature_bins], weights=metals\n )\n\n H_norm, _, _ = np.histogram2d(dens, temps, bins=[density_bins, temperature_bins])\n\n # Avoid div/0\n mask = H_norm == 0.0\n H[mask] = -25\n H_norm[mask] = 1.0\n\n return np.ma.array((H / H_norm).T, mask=mask.T), density_edges, temperature_edges", "def hist2d(ax, x, y, *args, **kwargs):\n\n extent = [[x.min(), x.max()], [y.min(), y.max()]]\n bins = 50\n color = \"k\"\n linewidths = 0.8\n\n cmap = cm.get_cmap(\"gray\")\n cmap._init()\n cmap._lut[:-3, :-1] = 0.\n cmap._lut[:-3, -1] = np.linspace(1, 0, cmap.N)\n\n X = np.linspace(extent[0][0], extent[0][1], bins + 1)\n Y = np.linspace(extent[1][0], extent[1][1], bins + 1)\n try:\n H, X, Y = np.histogram2d(x.flatten(), y.flatten(), bins=(X, Y))\n except ValueError:\n raise ValueError(\"It looks like at least one of your sample columns \"\n \"have no dynamic range. You could try using the \"\n \"`extent` argument.\")\n\n # V = 1.0 - np.exp(-0.5 * np.array([1.0, 2.0, 3.0]) ** 2)\n V = 1.0 - np.exp(-0.5 * np.array([1.0, 2.0]) ** 2)\n #V = 1.0 - np.exp(-0.5 * np.arange(0.5, 2.1, 0.5) ** 2)\n Hflat = H.flatten()\n inds = np.argsort(Hflat)[::-1]\n Hflat = Hflat[inds]\n sm = np.cumsum(Hflat)\n sm /= sm[-1]\n\n for i, v0 in enumerate(V):\n try:\n V[i] = Hflat[sm <= v0][-1]\n except:\n V[i] = Hflat[0]\n\n X1, Y1 = 0.5 * (X[1:] + X[:-1]), 0.5 * (Y[1:] + Y[:-1])\n X, Y = X[:-1], Y[:-1]\n\n # Plot the contours\n # ax.pcolor(X, Y, H.max() - H.T, cmap=cmap)\n ax.contour(X1, Y1, H.T, V, colors=color, linewidths=linewidths)\n\n ax.set_xlim(extent[0])\n ax.set_ylim(extent[1])", "def hist(img):\n bottom_half = img[img.shape[0]//2:,:] # 0:img.shape[0]//2 is the top half\n histogram = bottom_half.sum(axis=0) \n \n return histogram", "def calc_histogram(self, img_data):\n\n histogram = [0] * self.color_depth\n\n for w in range(img_data.shape[0]):\n for h in range(img_data.shape[1]):\n pixel = img_data[w][h]\n histogram[pixel] += 1\n\n return histogram", "def histogram_data(xydata):\r\n\r\n\twith open(xydata, \"rb\") as csvfile:\r\n\t\treader = csv.reader(csvfile, delimiter=' ')\r\n\r\n\t\tnpnts = sum(1 for row in reader)\r\n\t\tnbins = npnts/10\r\n\t\txmax=5.0;xmin=-xmax;ymax=xmax;ymin=-ymax\r\n\t\tbinwidth = 2*xmax / nbins\r\n\t\txbins = np.linspace(xmin,xmax,nbins) \r\n\t\tybins = np.linspace(ymin,ymax,nbins)\r\n\r\n\t\thistarr = np.zeros([nbins,nbins])\r\n\t\tfor row in reader:\r\n\t\t\tbin = (int(floor(row[0]/binwidth)),int(floor(row[1]/binwidth)))\r\n\t\t\thistarr[bin] += 1\r\n\t\t\r\n\treturn (histarr,xbins,ybins)", "def build_hist(concept_values: np.ndarray, num_bins: int = 100) -> np.ndarray:\n hist, _ = np.histogram(concept_values, bins=num_bins, range=(0., 1.), density=True)\n return hist", "def histogram(data, bins=50, nmb_data_to_use=None, ignored_row=0,\n col_to_read=1, output_file=None, verbose=0):\n # prepare arguments\n args = \"-x{} -c{} -b{} -V{}\" \\\n .format(ignored_row, col_to_read, bins, verbose)\n if nmb_data_to_use is not None:\n args += \"-l{}\".format(nmb_data_to_use)\n args = args.split(\" \")\n # run command\n res, msg = tisean('histogram', args, input_data=data,\n output_file=output_file)\n # return\n if msg != \"\":\n print(msg)\n return res", "def draw_histogram(data, # type: thelper.typedefs.ArrayType\n bins=50, # type: Optional[int]\n xlabel=\"\", # type: Optional[thelper.typedefs.LabelType]\n ylabel=\"Proportion\", # type: Optional[thelper.typedefs.LabelType]\n show=False, # type: Optional[bool]\n block=False, # type: Optional[bool]\n ): # type: (...) -> thelper.typedefs.DrawingType\n fig, ax = plt.subplots()\n ax.hist(data, density=True, bins=bins)\n if len(ylabel) > 0:\n ax.set_ylabel(ylabel)\n if len(xlabel) > 0:\n ax.set_xlabel(xlabel)\n ax.set_xlim(xmin=0)\n if show:\n fig.show()\n if block:\n plt.show(block=block)\n return fig\n plt.pause(0.5)\n return fig, ax", "def test_format_histograms_two_bins(self):\r\n self.assertEqual(format_histograms_two_bins(array([0, 1, 0, 2, 2, 3]),\r\n array(\r\n [2, 1, 0, 2, 0, 0]), array(\r\n [100, 110, 120, 130, 140, 150, 160])),\r\n \"\"\"Length\\tBefore\\tAfter\\n100\\t0\\t2\\n110\\t1\\t1\\n120\\t0\\t0\\n130\\t2\\t2\\n140\\t2\\t0\\n150\\t3\\t0\"\"\")", "def numpy_gw_hist(data, bins, scale):\n data = np.atleast_1d(data)\n bins = np.atleast_1d(bins)\n nbins, ndata = bins.size, data.size\n\n scale = np.zeros(ndata) + scale\n\n logsm_bin_matrix = np.repeat(\n bins, ndata).reshape((nbins, ndata)).astype('f4')\n data_matrix = np.tile(data, nbins).reshape((nbins, ndata)).astype('f4')\n smoothing_kernel_matrix = np.tile(\n scale, nbins).reshape((nbins, ndata)).astype('f4')\n\n cdf_matrix = norm.cdf(\n logsm_bin_matrix, loc=data_matrix, scale=smoothing_kernel_matrix)\n\n prob_bin_member = np.diff(cdf_matrix, axis=0) # Shape (nbins-1, ndata)\n\n total_num_bin_members = np.sum(\n prob_bin_member, axis=1) # Shape (nbins-1, )\n\n return total_num_bin_members", "def _create_histogram(self, histogram_data, feature):\n hist_source = self._create_histogram_source(histogram_data, feature)\n hist_plot = self._create_histogram_plot(hist_source)\n return hist_source, hist_plot", "def histogram2d(x, y, bins=10, range=None, weights=None, flow=False, cons_var=False):\n try:\n N = len(bins)\n except TypeError:\n N = 1\n\n x = np.asarray(x)\n y = np.asarray(y)\n if weights is not None:\n weights = np.asarray(weights)\n if N != 1 and N != 2:\n bins = np.asarray(bins)\n return var2d(x, y, bins, bins, weights=weights, flow=flow, cons_var=cons_var)\n\n elif N == 1:\n return fix2d(\n x,\n y,\n bins=bins,\n range=range,\n weights=weights,\n flow=flow,\n cons_var=cons_var,\n )\n\n elif N == 2:\n if isinstance(bins[0], int) and isinstance(bins[1], int):\n return fix2d(x, y, bins=bins, range=range, weights=weights, flow=flow)\n else:\n b1 = np.asarray(bins[0])\n b2 = np.asarray(bins[1])\n return var2d(x, y, b1, b2, weights=weights, flow=flow)\n\n else:\n raise ValueError(\"bins argument is not compatible\")", "def h3(data, bins=None, **kwargs):\n return histogramdd(data, bins, **kwargs)", "def get_test_histograms2():\n # dummy dataset with mixed types\n # convert timestamp (col D) to nanosec since 1970-1-1\n import pandas as pd\n import histogrammar as hg\n\n df = pd.util.testing.makeMixedDataFrame()\n\n # building 1d-, 2d-histogram (iteratively)\n hist1 = hg.Categorize(unit('C'))\n hist2 = hg.Bin(5, 0, 5, unit('A'), value=hist1)\n hist3 = hg.Bin(5, 0, 5, unit('A'))\n hist4 = hg.Categorize(unit('C'), value=hist3)\n\n # fill them\n hist1.fill.numpy(df)\n hist2.fill.numpy(df)\n hist3.fill.numpy(df)\n hist4.fill.numpy(df)\n\n return df, hist1, hist2, hist3, hist4", "def histogram(self, data):\n histogram_keys = ['bins', 'color', 'alpha', 'label']\n histogram_config = self.config.filter(histogram_keys, prefix='histogram_')\n\n _, _, bar = self.ax.hist(data, **histogram_config)\n\n return [bar]", "def make_histograms(lengths, binwidth=10):\r\n min_len = min(lengths)\r\n max_len = max(lengths)\r\n floor = (min_len / binwidth) * binwidth\r\n ceil = ((max_len / binwidth) + 2) * binwidth\r\n bins = arange(floor, ceil, binwidth)\r\n hist, bin_edges = histogram(lengths, bins)\r\n return hist, bin_edges", "def histogram2d(self, cond1, cond2, numbins1, numbins2, min1=None, max1=None, min2=None, max2=None):\n colname1 = \"axs_hist_col1\"\n colname2 = \"axs_hist_col2\"\n res = self._df.select(cond1.alias(colname1), cond2.alias(colname2))\n\n if min1 is None or max1 is None or min2 is None or max2 is None:\n mm = res.select(F.min(res[colname1]).alias(\"min1\"), F.max(res[colname1]).alias(\"max1\"),\n F.min(res[colname2]).alias(\"min2\"), F.max(res[colname2]).alias(\"max2\")).\\\n collect()\n (min1, max1, min2, max2) = (mm[0][\"min1\"], mm[0][\"max1\"], mm[0][\"min2\"], mm[0][\"max2\"])\n\n rng1 = float(max1 - min1)\n rng2 = float(max2 - min2)\n step1 = rng1 / numbins1\n step2 = rng2 / numbins2\n\n hist2d = res.withColumn(\"bin1\", ((res[colname1]-min1)/step1).cast(\"int\")*step1+min1) \\\n .withColumn(\"bin2\", ((res[colname2]-min2)/step2).cast(\"int\")*step2+min2).\\\n groupBy(\"bin1\", \"bin2\").count()\n hist2data = hist2d.orderBy(hist2d.bin1, hist2d.bin2).collect()\n bin1 = list(map(lambda row: row.bin1, hist2data))\n bin2 = list(map(lambda row: row.bin2, hist2data))\n vals = list(map(lambda row: row[\"count\"], hist2data))\n\n x, y = np.mgrid[slice(min1, max1 + step1, step1),\n slice(min2, max2 + step2, step2)]\n z = np.zeros(x.shape)\n for b1, b2, v in zip(bin1, bin2, vals):\n z[int((b1-min1)/step1)][int((b2-min2)/step2)] = v\n return x, y, z", "def _fast_hist(data, bin_edges):\n # Yes, I've tested this against histogram().\n return np.bincount(np.digitize(data, bin_edges[1:-1]), minlength=len(bin_edges) - 1)", "def hist_bins(bins, vals):\r\n\r\n hist = zeros(len(bins))\r\n j = 0\r\n for i in vals:\r\n while bins[j] < i:\r\n j += 1\r\n hist[j] += 1\r\n\r\n return asarray(bins), hist", "def plot2dhist(xdata,ydata,cmap='binary',interpolation='nearest',\n fig=None,logscale=True,xbins=None,ybins=None,\n nbins=50,pts_only=False,**kwargs):\n\n setfig(fig)\n if pts_only:\n plt.plot(xdata,ydata,**kwargs)\n return\n\n ok = (~np.isnan(xdata) & ~np.isnan(ydata) & \n ~np.isinf(xdata) & ~np.isinf(ydata))\n if ~ok.sum() > 0:\n logging.warning('{} x values and {} y values are nan'.format(np.isnan(xdata).sum(),\n np.isnan(ydata).sum()))\n logging.warning('{} x values and {} y values are inf'.format(np.isinf(xdata).sum(),\n np.isinf(ydata).sum()))\n\n if xbins is not None and ybins is not None:\n H,xs,ys = np.histogram2d(xdata[ok],ydata[ok],bins=(xbins,ybins))\n else:\n H,xs,ys = np.histogram2d(xdata[ok],ydata[ok],bins=nbins) \n H = H.T\n\n if logscale:\n H = np.log(H)\n\n extent = [xs[0],xs[-1],ys[0],ys[-1]]\n plt.imshow(H,extent=extent,interpolation=interpolation,\n aspect='auto',cmap=cmap,origin='lower',**kwargs)", "def vis_data(data, f1,f2):\n hist = np.histogram2d(data[:, f2], data[:, f1], bins=100, range=[lims, lims])\n plt.pcolormesh(hist[1], hist[2], hist[0], alpha=0.5)", "def data_hist(xvar, yvar, datahist, nbins=95):\n hists = [datahist[j].createHistogram(\n 'hdata{0}{1}'.format(c, i),\n xvar, RooFit.Binning(nbins),\n RooFit.YVar(yvar, RooFit.Binning(nbins))\n ) for j, (i, c) in enumerate(ic)]\n return hists", "def get_histogram(self):\n\n values_array = np.array(self.values)\n for bin0 in range(self.bins[0].size):\n bin_inf0 = self.bins[0][bin0]\n try: bin_sup0 = self.bins[0][bin0 + 1]\n except IndexError: bin_sup0 = self.vmax[0]\n values = values_array[\n (values_array[:, 0] >= bin_inf0)\n *(values_array[:, 0] < bin_sup0)][:, 1]\n for bin1 in range(self.bins[1].size):\n bin_inf1 = self.bins[1][bin1]\n try: bin_sup1 = self.bins[1][bin1 + 1]\n except IndexError: bin_sup1 = self.vmax[1]\n self.hist[bin0*self.Nbins[1] + bin1, 2] = (\n np.sum((values >= bin_inf1)*(values < bin_sup1)))\n\n if np.sum(self.hist[:, 2]) > 0: # there are binned values\n self.hist[:, 2] /= np.sum(self.hist[:, 2])\n return self.hist", "def histogram(img):\n BINS = 8\n RANGE = np.tile(np.array([0, 255]), (3, 1))\n\n # histogram of the first image\n r = np.ravel(img[:, :, 0])\n g = np.ravel(img[:, :, 1])\n b = np.ravel(img[:, :, 2])\n hist, endpoints = np.histogramdd([r, g, b], bins = BINS, range = RANGE)\n\n # normalize the images\n return hist/np.sum(hist)", "def compute_histogram(self, image):\n\n hist = [0] * 256\n x, y = image.shape[:2]\n #print(image.shape)\n for i in range(x):\n for j in range(y):\n hist[image[i, j]] += 1\n\n return hist", "def createHistogram(data,cumulative=False,**kargs):\n y,x = histogram(data,**kargs)\n if cumulative:\n y = y.cumsum()\n return y,x", "def histograma(p):\n img = read_img(p)\n show_histograma(img.reshape((-1)))", "def h2(self, column1: Any = None, column2: Any = None, bins=None, **kwargs) -> Histogram2D:\n if self._df.shape[1] < 2:\n raise ValueError(\"At least two columns required for 2D histograms.\")\n if column1 is None and column2 is None and self._df.shape[1] == 2:\n column1, column2 = self._df.columns\n elif column1 is None or column2 is None:\n raise ValueError(\"Arguments `column1` and `column2` must be set.\")\n return cast(Histogram2D, self.histogram([column1, column2], bins=bins, **kwargs))", "def hog_histograms(*args, **kwargs): # real signature unknown\n pass", "def _make_hist(self, oned_arr):\n hist_ = np.histogram(\n a=oned_arr,\n bins=self.null_distributions_[\"histogram_bins\"],\n range=(\n np.min(self.null_distributions_[\"histogram_bins\"]),\n np.max(self.null_distributions_[\"histogram_bins\"]),\n ),\n density=False,\n )[0]\n return hist_", "def compute_histogram(image, n_bins, color_space=\"RGB\"):\n\n n_channels = 1 if color_space == \"GRAY\" else image.shape[2]\n\n hist_channels = list(range(n_channels))\n hist_bins = [n_bins,]*n_channels\n hist_range = [0, 256]*n_channels\n\n hist = cv.calcHist([image], hist_channels, None, hist_bins,\n hist_range)\n hist = cv.normalize(hist, hist, alpha=0, beta=1,\n norm_type=cv.NORM_MINMAX).flatten() # change histogram range from [0,256] to [0,1]\n return hist", "def crazy_histogram2d(x, y, bins=10, weights=None, reduce_w=None, NULL=None, reinterp=None):\n # define the bins (do anything you want here but needs edges and sizes of the 2d bins)\n try:\n nx, ny = bins\n except TypeError:\n nx = ny = bins\n\n #values you want to be reported\n if weights is None:\n weights = np.ones(x.size)\n\n if reduce_w is None:\n reduce_w = np.sum\n else:\n if not hasattr(reduce_w, '__call__'):\n raise TypeError('reduce function is not callable')\n\n # culling nans\n finite_inds = (np.isfinite(x) & np.isfinite(y) & np.isfinite(weights))\n _x = np.asarray(x)[finite_inds]\n _y = np.asarray(y)[finite_inds]\n _w = np.asarray(weights)[finite_inds]\n\n if not (len(_x) == len(_y)) & (len(_y) == len(_w)):\n raise ValueError('Shape mismatch between x, y, and weights: {}, {}, {}'.format(_x.shape, _y.shape, _w.shape))\n\n xmin, xmax = _x.min(), _x.max()\n ymin, ymax = _y.min(), _y.max()\n dx = (xmax - xmin) / (nx - 1.0)\n dy = (ymax - ymin) / (ny - 1.0)\n\n # Basically, this is just doing what np.digitize does with one less copy\n xyi = np.vstack((_x, _y)).T\n xyi -= [xmin, ymin]\n xyi /= [dx, dy]\n xyi = np.floor(xyi, xyi).T\n\n #xyi contains the bins of each point as a 2d array [(xi,yi)]\n\n d = {}\n for e, k in enumerate(xyi.T):\n key = (k[0], k[1])\n\n if key in d:\n d[key].append(_w[e])\n else:\n d[key] = [_w[e]]\n\n _xyi = np.array(d.keys()).T\n _w = np.array([ reduce_w(v) for v in d.values() ])\n\n # exploit a sparse coo_matrix to build the 2D histogram...\n _grid = sparse.coo_matrix((_w, _xyi), shape=(nx, ny))\n\n if reinterp is None:\n #convert sparse to array with filled value\n ## grid.toarray() does not account for filled value\n ## sparse.coo.coo_todense() does actually add the values to the existing ones, i.e. not what we want -> brute force\n if NULL is None:\n B = _grid.toarray()\n else: # Brute force only went needed\n B = np.zeros(_grid.shape, dtype=_grid.dtype)\n B.fill(NULL)\n for (x, y, v) in zip(_grid.col, _grid.row, _grid.data):\n B[y, x] = v\n else: # reinterp\n xi = np.arange(nx, dtype=float)\n yi = np.arange(ny, dtype=float)\n B = griddata(_grid.col.astype(float), _grid.row.astype(float), _grid.data, xi, yi, interp=reinterp)\n\n return B, (xmin, xmax, ymin, ymax), (dx, dy)", "def just_histogram(*args, **kwargs):\n return np.histogram(*args, **kwargs)[0].astype(float)", "def _histogram(image,\n min,\n max,\n bins):\n\n return numpy.histogram(image, bins, (min, max))[0]", "def hist(a,bins):\n n=searchsorted(sort(a),bins)\n n=concatenate([n,[len(a)]])\n n=array(list(map(float,n)))\n# n=array(n)\n return n[1:]-n[:-1]", "def histogram(self, data, global_step, bins=1000, tag='histogram'):\n\n data = np.array(data)\n\n # Create histogram using numpy\n counts, bin_edges = np.histogram(data, bins=bins)\n\n # Fill fields of histogram proto\n hist = tf.HistogramProto()\n hist.min = float(np.min(data))\n hist.max = float(np.max(data))\n hist.num = int(np.prod(data.shape))\n hist.sum = float(np.sum(data))\n hist.sum_squares = float(np.sum(data ** 2))\n\n # Requires equal number as bins, where the first goes from -DBL_MAX to bin_edges[1]\n # See https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/summary.proto#L30\n # Thus, we drop the start of the first bin\n bin_edges = bin_edges[1:]\n\n # Add bin edges and counts\n for edge in bin_edges:\n hist.bucket_limit.append(edge)\n for c in counts:\n hist.bucket.append(c)\n\n # Create and write Summary\n summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])\n self.summary_writer.add_summary(summary, global_step)\n self.summary_writer.flush()", "def make_histograms(raw_lengths, pre_lengths, post_lengths, binwidth=10):\r\n if post_lengths:\r\n min_len = min([min(post_lengths), min(raw_lengths)])\r\n else:\r\n min_len = min(raw_lengths)\r\n max_len = max(raw_lengths)\r\n floor = (min_len / binwidth) * binwidth\r\n ceil = ((max_len / binwidth) + 2) * binwidth\r\n bins = arange(floor, ceil, binwidth)\r\n raw_hist = histogram(raw_lengths, bins)[0]\r\n pre_hist = histogram(pre_lengths, bins)[0]\r\n post_hist, bin_edges = histogram(post_lengths, bins)\r\n return raw_hist, pre_hist, post_hist, bin_edges", "def histogram(x, bins):\n hist = np.histogram(x, bins=bins)[0]\n return hist", "def crazy_histogram2d(x, y, bins=10, weights=None, reduce_w=None, NULL=None, reinterp=None):\n # define the bins (do anything you want here but needs edges and sizes of the 2d bins)\n try:\n nx, ny = bins\n except TypeError:\n nx = ny = bins\n\n # values you want to be reported\n if weights is None:\n weights = np.ones(x.size)\n\n if reduce_w is None:\n reduce_w = np.sum\n else:\n if not hasattr(reduce_w, '__call__'):\n raise TypeError('reduce function is not callable')\n\n # culling nans\n finite_inds = (np.isfinite(x) & np.isfinite(y) & np.isfinite(weights))\n _x = np.asarray(x)[finite_inds]\n _y = np.asarray(y)[finite_inds]\n _w = np.asarray(weights)[finite_inds]\n\n if not (len(_x) == len(_y)) & (len(_y) == len(_w)):\n raise ValueError('Shape mismatch between x, y, and weights: {}, {}, {}'.format(_x.shape, _y.shape, _w.shape))\n\n xmin, xmax = _x.min(), _x.max()\n ymin, ymax = _y.min(), _y.max()\n dx = (xmax - xmin) / (nx - 1.0)\n dy = (ymax - ymin) / (ny - 1.0)\n\n # Basically, this is just doing what np.digitize does with one less copy\n xyi = np.vstack((_x, _y)).T\n xyi -= [xmin, ymin]\n xyi /= [dx, dy]\n xyi = np.floor(xyi, xyi).T\n\n # xyi contains the bins of each point as a 2d array [(xi,yi)]\n\n d = {}\n for e, k in enumerate(xyi.T):\n key = (k[0], k[1])\n\n if key in d:\n d[key].append(_w[e])\n else:\n d[key] = [_w[e]]\n\n _xyi = np.array(d.keys()).T\n _w = np.array([ reduce_w(v) for v in d.values() ])\n\n # exploit a sparse coo_matrix to build the 2D histogram...\n _grid = sparse.coo_matrix((_w, _xyi), shape=(nx, ny))\n\n if reinterp is None:\n # convert sparse to array with filled value\n # grid.toarray() does not account for filled value\n # sparse.coo.coo_todense() does actually add the values to the existing ones, i.e. not what we want -> brute force\n if NULL is None:\n B = _grid.toarray()\n else: # Brute force only went needed\n B = np.zeros(_grid.shape, dtype=_grid.dtype)\n B.fill(NULL)\n for (x, y, v) in zip(_grid.col, _grid.row, _grid.data):\n B[y, x] = v\n else: # reinterp\n xi = np.arange(nx, dtype=float)\n yi = np.arange(ny, dtype=float)\n B = griddata(_grid.col.astype(float), _grid.row.astype(float), _grid.data, xi, yi, interp=reinterp)\n\n return B, (xmin, xmax, ymin, ymax), (dx, dy)", "def make_histogram(outpath, plotdata_y, bins=None, color='red',\n xlabel='', ylabel='', x_range=None):\n if bins is None:\n bins = get_optimum_bins(plotdata_y)\n pyplot.hist(plotdata_y, bins=bins, color=color, range=x_range)\n pyplot.grid(True, which='major', linestyle='-')\n pyplot.grid(True, which='minor')\n pyplot.xlabel(xlabel, fontsize=20)\n pyplot.ylabel(ylabel, fontsize=20)\n pyplot.tick_params(axis='both', which='major', labelsize=16)\n pyplot.tick_params(axis='both', which='minor', labelsize=8)\n pyplot.tight_layout()\n pyplot.savefig(outpath)\n pyplot.close()\n return outpath", "def histograms(probs, actual, bins=100):\n actual = actual.astype(np.bool)\n edges, step = np.linspace(0., 1., bins, retstep=True, endpoint=False)\n idx = np.digitize(probs, edges) - 1\n top = np.bincount(idx, weights=actual, minlength=bins)\n bot = np.bincount(idx, weights=(~actual), minlength=bins)\n return top, bot, edges, step", "def add_histogram(self, tag, values, global_step=None, bins='tensorflow'):\n values = make_np(values)\n self.vis.histogram(make_np(values), opts={'title': tag})", "def bin_data(x, N_bins=100, xmin=0.0, xmax=1.0, density=False):\n\n hist_y, hist_edges = np.histogram(x, bins=N_bins, range=(xmin, xmax), density=density)\n hist_x = 0.5 * (hist_edges[1:] + hist_edges[:-1])\n hist_sy = np.sqrt(hist_y)\n hist_mask = hist_y > 0\n\n return hist_x, hist_y, hist_sy, hist_mask", "def np_rgb_b_histogram(rgb):\n hist = np_rgb_channel_histogram(rgb, 2, \"B\")\n return hist", "def get3d(infile, histname, subdir='',verbose=False): \n\n ## 2d Histogram\n Hist = getter(infile,histname,subdir,verbose)\n\n nbinsX, nbinsY, nbinsZ = Hist.GetNbinsX(), Hist.GetNbinsY(), Hist.GetNbinsZ()\n Arr = np.zeros((nbinsZ,nbinsY,nbinsX))\n dArr = np.zeros((nbinsZ,nbinsY,nbinsX))\n axesX = np.zeros(nbinsX)\n axesY = np.zeros(nbinsY)\n axesZ = np.zeros(nbinsZ)\n edgesX = np.zeros(nbinsX+1)\n edgesY = np.zeros(nbinsY+1)\n edgesZ = np.zeros(nbinsZ+1)\n for j in xrange(0,nbinsX):\n axesX[j] = Hist.GetXaxis().GetBinCenter(j+1)\n edgesX[j] = Hist.GetXaxis().GetBinLowEdge(j+1)\n edgesX[nbinsX] = Hist.GetXaxis().GetBinLowEdge(nbinsX+1)\n\n for j in xrange(0,nbinsY):\n axesY[j] = Hist.GetYaxis().GetBinCenter(j+1)\n edgesY[j] = Hist.GetYaxis().GetBinLowEdge(j+1)\n edgesY[nbinsY] = Hist.GetYaxis().GetBinLowEdge(nbinsY+1)\n\n for j in xrange(0,nbinsZ):\n axesZ[j] = Hist.GetZaxis().GetBinCenter(j+1)\n edgesZ[j] = Hist.GetZaxis().GetBinLowEdge(j+1)\n edgesZ[nbinsZ] = Hist.GetZaxis().GetBinLowEdge(nbinsZ+1)\n\n axes = [axesX, axesY, axesZ]\n edges = [edgesX, edgesY, edgesZ]\n \n for j in xrange(0,nbinsX):\n for k in xrange(0,nbinsY):\n for l in xrange(0,nbinsZ):\n Arr[l,k,j] = Hist.GetBinContent(j+1,k+1,l+1)\n dArr[l,k,j] = Hist.GetBinError(j+1,k+1,l+1)\n \n return axes, edges, Arr, dArr", "def histogram(\n *args,\n bins=None,\n range=None,\n dim=None,\n weights=None,\n density=False,\n block_size=\"auto\",\n keep_coords=False,\n bin_dim_suffix=\"_bin\",\n):\n\n args = list(args)\n N_args = len(args)\n\n # TODO: allow list of weights as well\n N_weights = 1 if weights is not None else 0\n\n for a in args:\n if not isinstance(a, xr.DataArray):\n raise TypeError(\n \"xhistogram.xarray.histogram accepts only xarray.DataArray \"\n + f\"objects but a {type(a).__name__} was provided\"\n )\n\n for a in args:\n assert a.name is not None, \"all arrays must have a name\"\n\n # we drop coords to simplify alignment\n if not keep_coords:\n args = [da.reset_coords(drop=True) for da in args]\n if N_weights:\n args += [weights.reset_coords(drop=True)]\n # explicitly broadcast so we understand what is going into apply_ufunc\n # (apply_ufunc might be doing this by itself again)\n args = list(xr.align(*args, join=\"exact\"))\n\n # what happens if we skip this?\n # args = list(xr.broadcast(*args))\n a0 = args[0]\n a_coords = a0.coords\n\n # roll our own broadcasting\n # now manually expand the arrays\n all_dims = [d for a in args for d in a.dims]\n all_dims_ordered = list(OrderedDict.fromkeys(all_dims))\n args_expanded = []\n for a in args:\n expand_keys = [d for d in all_dims_ordered if d not in a.dims]\n a_expanded = a.expand_dims({k: 1 for k in expand_keys})\n args_expanded.append(a_expanded)\n\n # only transpose if necessary, to avoid creating unnecessary dask tasks\n args_transposed = []\n for a in args_expanded:\n if a.dims != all_dims_ordered:\n args_transposed.append(a.transpose(*all_dims_ordered))\n else:\n args.transposed.append(a)\n args_data = [a.data for a in args_transposed]\n\n if N_weights:\n weights_data = args_data.pop()\n else:\n weights_data = None\n\n if dim is not None:\n dims_to_keep = [d for d in all_dims_ordered if d not in dim]\n axis = [args_transposed[0].get_axis_num(d) for d in dim]\n else:\n dims_to_keep = []\n axis = None\n\n h_data, bins = _histogram(\n *args_data,\n weights=weights_data,\n bins=bins,\n range=range,\n axis=axis,\n density=density,\n block_size=block_size,\n )\n\n # create output dims\n new_dims = [a.name + bin_dim_suffix for a in args[:N_args]]\n output_dims = dims_to_keep + new_dims\n\n # create new coords\n bin_centers = [0.5 * (bin[:-1] + bin[1:]) for bin in bins]\n new_coords = {\n name: ((name,), bin_center, a.attrs)\n for name, bin_center, a in zip(new_dims, bin_centers, args)\n }\n\n # old coords associated with dims\n old_dim_coords = {name: a0[name] for name in dims_to_keep if name in a_coords}\n\n all_coords = {}\n all_coords.update(old_dim_coords)\n all_coords.update(new_coords)\n # add compatible coords\n if keep_coords:\n for c in a_coords:\n if c not in all_coords and set(a0[c].dims).issubset(output_dims):\n all_coords[c] = a0[c]\n\n output_name = \"_\".join([\"histogram\"] + [a.name for a in args[:N_args]])\n\n da_out = xr.DataArray(h_data, dims=output_dims, coords=all_coords, name=output_name)\n\n return da_out\n\n # we need weights to be passed through apply_func's alignment algorithm,\n # so we include it as an arg, so we create a wrapper function to do so\n # this feels like a hack\n # def _histogram_wrapped(*args, **kwargs):\n # alist = list(args)\n # weights = [alist.pop() for n in _range(N_weights)]\n # if N_weights == 0:\n # weights = None\n # elif N_weights == 1:\n # weights = weights[0] # squeeze\n # return _histogram(*alist, weights=weights, **kwargs)", "def histograms(self, *args, **kwargs):\n return _image.image_histograms(self, *args, **kwargs)", "def plotHistogram(a):\n plt.figure(figsize=(10,5))\n plt.subplot(1,2,1)\n plt.imshow(a)\n plt.axis('off')\n histo = plt.subplot(1,2,2)\n histo.set_ylabel('Count')\n histo.set_xlabel('Pixel Intensity')\n n_bins = 30\n plt.hist(a[:,:,0].flatten(), bins= n_bins, lw = 0, color='r', alpha=0.5);\n plt.hist(a[:,:,1].flatten(), bins= n_bins, lw = 0, color='g', alpha=0.5);\n plt.hist(a[:,:,2].flatten(), bins= n_bins, lw = 0, color='b', alpha=0.5);", "def plotHistogram(a):\n plt.figure(figsize=(10,5))\n plt.subplot(1,2,1)\n plt.imshow(a)\n plt.axis('off')\n histo = plt.subplot(1,2,2)\n histo.set_ylabel('Count')\n histo.set_xlabel('Pixel Intensity')\n n_bins = 30\n plt.hist(a[:,:,0].flatten(), bins= n_bins, lw = 0, color='r', alpha=0.5);\n plt.hist(a[:,:,1].flatten(), bins= n_bins, lw = 0, color='g', alpha=0.5);\n plt.hist(a[:,:,2].flatten(), bins= n_bins, lw = 0, color='b', alpha=0.5);", "def plotHistogram(a):\n plt.figure(figsize=(10,5))\n plt.subplot(1,2,1)\n plt.imshow(a)\n plt.axis('off')\n histo = plt.subplot(1,2,2)\n histo.set_ylabel('Count')\n histo.set_xlabel('Pixel Intensity')\n n_bins = 30\n plt.hist(a[:,:,0].flatten(), bins= n_bins, lw = 0, color='r', alpha=0.5);\n plt.hist(a[:,:,1].flatten(), bins= n_bins, lw = 0, color='g', alpha=0.5);\n plt.hist(a[:,:,2].flatten(), bins= n_bins, lw = 0, color='b', alpha=0.5);", "def get_extended_hist(img, sid_bin_edges):\n extended_bin_edges = np.append(sid_bin_edges.numpy(), float('inf'))\n img_hist, _ = np.histogram(img, bins=extended_bin_edges)\n return img_hist", "def hist2d(ax, x, y, sigs=[1], color=\"k\", pcolor=\"grey\", *args, **kwargs):\n\n extent = kwargs.get(\"extent\", None)\n if extent is None:\n extent = [[x.min(), x.max()], [y.min(), y.max()]]\n\n bins = 45\n linewidths = 0.8\n\n # Instead of this, create a color map with the peak color.\n\n if pcolor != \"grey\":\n # print(pcolor)\n r,g,b = pcolor\n # print(r, g, b)\n\n # Make our custom intensity scale\n dict_cmap = {'red':[(0.0, r, r),\n (1.0, 1.0, 1.0)],\n\n 'green': [(0.0, g, g),\n (1.0, 1.0, 1.0)],\n\n 'blue': [(0.0, b, b),\n (1.0, 1.0, 1.0)]}\n\n cmap = LSC(\"new\", dict_cmap)\n else:\n cmap = cm.get_cmap(\"gray\")\n\n cmap._init()\n\n # The only thing he's changing here is the alpha interpolator, I think\n\n # He's saying that we will have everything be black, and change alpha from 1 to 0.0\n\n # cmap._lut[:-3, :-1] = 0.\n cmap._lut[:-3, -1] = np.linspace(1, 0, cmap.N)\n\n # N is the number of levels in the colormap\n # Dunno what _lut is\n # look up table\n # Is he setting everything below some value to 0?\n\n\n X = np.linspace(extent[0][0], extent[0][1], bins + 1)\n # Y = np.linspace(extent[1][0], extent[1][1], bins + 1)\n Y = np.logspace(np.log10(extent[1][0]), np.log10(extent[1][1]), bins + 1)\n\n try:\n H, X, Y = np.histogram2d(x.flatten(), y.flatten(), bins=(X, Y))\n except ValueError:\n raise ValueError(\"It looks like at least one of your sample columns \"\n \"have no dynamic range. You could try using the \"\n \"`extent` argument.\")\n\n # V = 1.0 - np.exp(-0.5 * np.array([1.0, 2.0, 3.0]) ** 2)\n V = 1.0 - np.exp(-0.5 * np.array(sigs) ** 2)\n #V = 1.0 - np.exp(-0.5 * np.arange(0.5, 2.1, 0.5) ** 2)\n Hflat = H.flatten()\n inds = np.argsort(Hflat)[::-1]\n Hflat = Hflat[inds]\n sm = np.cumsum(Hflat)\n sm /= sm[-1]\n\n for i, v0 in enumerate(V):\n try:\n V[i] = Hflat[sm <= v0][-1]\n except:\n V[i] = Hflat[0]\n\n X1, Y1 = 0.5 * (X[1:] + X[:-1]), 0.5 * (Y[1:] + Y[:-1])\n X, Y = X[:-1], Y[:-1]\n\n # Plot the contours\n ax.pcolor(X, Y, H.max() - H.T, cmap=cmap)\n ax.contour(X1, Y1, H.T, V, colors=color, linewidths=linewidths)\n\n # ax.set_xlim(extent[0])\n # ax.set_ylim(extent[1])", "def histograms(data, xlabel=None, ylabel=None, title=None):\n mpl_fig = plt.figure()\n plt.hist(data)\n if xlabel:\n plt.set_xlabel(xlabel)\n if ylabel:\n plt.set_ylabel(ylabel)\n if title:\n plt.set_title(title)\n return get_div_from_data(mpl_fig)", "def OF1_CalculateRawHistogram(image):\n h = np.zeros(256, np.float_)\n for i in np.nditer(image):\n h[i - 1] = h[i - 1] + 1\n\n return h", "def push_histogram(self, data):\n # Loop through bands of this tile\n for i, dat in enumerate(data):\n # Create histogram for new data with the same bins\n new_hist = numpy.histogram(dat['data'], bins=self.hist_bins[i])\n # Add counts of this tile to band metadata histogram\n self.hist_values[i] += new_hist[0]", "def color_hist(im, col_bins):\n assert im.ndim == 3 and im.shape[2] == 3, \"image should be rgb\"\n arr = np.concatenate((im, color.rgb2lab(im)), axis=2).reshape((-1, 6))\n desc = np.zeros((col_bins * 6,), dtype=np.float)\n for i in range(3):\n desc[i * col_bins:(i + 1) * col_bins], _ = np.histogram(\n arr[:, i], bins=col_bins, range=(0, 255))\n desc[i * col_bins:(i + 1) * col_bins] /= np.sum(\n desc[i * col_bins:(i + 1) * col_bins]) + (\n np.sum(desc[i * col_bins:(i + 1) * col_bins]) < 1e-4)\n\n # noinspection PyUnboundLocalVariable\n i += 1\n desc[i * col_bins:(i + 1) * col_bins], _ = np.histogram(\n arr[:, i], bins=col_bins, range=(0, 100))\n desc[i * col_bins:(i + 1) * col_bins] /= np.sum(\n desc[i * col_bins:(i + 1) * col_bins]) + (\n np.sum(desc[i * col_bins:(i + 1) * col_bins]) < 1e-4)\n for i in range(4, 6):\n desc[i * col_bins:(i + 1) * col_bins], _ = np.histogram(\n arr[:, i], bins=col_bins, range=(-128, 127))\n desc[i * col_bins:(i + 1) * col_bins] /= np.sum(\n desc[i * col_bins:(i + 1) * col_bins]) + (\n np.sum(desc[i * col_bins:(i + 1) * col_bins]) < 1e-4)\n return desc", "def histo ( self ,\n xbins = 20 , xmin = None , xmax = None ,\n ybins = 20 , ymin = None , ymax = None ,\n hpars = () , \n histo = None ,\n integral = False ,\n errors = False , \n density = False ) :\n \n \n histos = self.make_histo ( xbins = xbins , xmin = xmin , xmax = xmax ,\n ybins = ybins , ymin = ymin , ymax = ymax ,\n hpars = hpars ,\n histo = histo )\n\n # loop over the historgam bins \n for ix,iy,x,y,z in histo.items() :\n\n xv , xe = x.value() , x.error()\n yv , ye = y.value() , y.error()\n \n # value at the bin center \n c = self ( xv , yv , error = errors ) \n\n if not integral : \n histo[ix,iy] = c\n continue\n\n # integral over the bin \n v = self.integral( xv - xe , xv + xe , yv - ye , yv + ye )\n \n if errors :\n if 0 == c.cov2 () : pass\n elif 0 != c.value() and 0 != v : \n v = c * ( v / c.value() )\n \n histo[ix,iy] = v \n\n ## coovert to density historgam, if requested \n if density : histo = histo.density()\n \n return histo", "def equalise_hist(image, bin_count=256):\n # TODO: your histogram equalization code\n #define arrays\n image = img_as_ubyte(image)\n row,col = image.shape\n new_image = np.zeros((row,col),dtype='uint8') \n\n # compute the value of each grayscale,and save in image_hist \n image_hist = np.bincount(image.flatten(), minlength=(bin_count))\n\n # normalise n[]\n norm_arr = (np.cumsum(image_hist)/(image.size))*(bin_count-1)\n norm_arr = norm_arr.astype('uint8')\n \n #Compute a normalized cumulative histogram\n for x in range(row):\n for y in range(col):\n new_image[x,y] = norm_arr[image[x,y]]\n \n return new_image", "def _make_histogram(\n dict_,\n data,\n bins=25,\n show_output=False,\n figsize=(10, 6),\n fontsize=15,\n plot_title=False,\n):\n indicator = dict_[\"ESTIMATION\"][\"indicator\"]\n\n treated = data[[indicator, \"prop_score\"]][data[indicator] == 1].values\n untreated = data[[indicator, \"prop_score\"]][data[indicator] == 0].values\n\n treated = treated[:, 1].tolist()\n untreated = untreated[:, 1].tolist()\n\n # Make the histogram using a list of lists\n fig = plt.figure(figsize=figsize)\n hist = plt.hist(\n [treated, untreated],\n bins=bins,\n weights=[\n np.ones(len(treated)) / len(treated),\n np.ones(len(untreated)) / len(untreated),\n ],\n density=0,\n alpha=0.55,\n label=[\"Treated\", \"Unreated\"],\n )\n\n if show_output is True:\n plt.tick_params(axis=\"both\", labelsize=14)\n plt.legend(loc=\"upper right\", prop={\"size\": 14})\n plt.xticks(np.arange(0, 1.1, step=0.1))\n plt.grid(axis=\"y\", alpha=0.25)\n plt.xlabel(\"$P$\", fontsize=fontsize)\n plt.ylabel(\"$f(P)$\", fontsize=fontsize)\n\n if plot_title is True:\n plt.title(r\"Support of $P(\\hat{Z})$ for $D=1$ and $D=0$\")\n\n else:\n plt.close(fig)\n\n return hist, treated, untreated", "def get_2D_bins(x, y, bins, same_bins=False):\n \n # precalculated bins [np.ndarray, np.ndarray]: do nothing and return the same bins\n if isinstance(bins, list):\n if isinstance(bins[0], np.ndarray) and isinstance(bins[1], np.ndarray):\n pass\n elif 'uniform_counts' in bins:\n try:\n n = int(bins[1])\n\n bins_x = np.fromiter(\n (np.nanpercentile(x, (i / n) * 100) for i in range(1, n + 1)),\n dtype=float)\n bins_y = np.fromiter(\n (np.nanpercentile(y, (i / n) * 100) for i in range(1, n + 1)),\n dtype=float)\n bins = [bins_x, bins_y] \n except:\n raise ValueError(f\"Please define number of bins for binning method uniform_counts: bins = ['uniform_bins', n_bins]\")\n else:\n # calculate bins with np.histogram_bin_edges(), even_width option == int\n if bins in ['fd', 'doane', 'scott', 'stone', 'rice', 'sturges', 'sqrt'] or isinstance(bins, int):\n if same_bins:\n bins_xy = np.histogram_bin_edges([x, y], bins)\n bins = [bins_xy, bins_xy]\n else:\n bins_x = np.histogram_bin_edges(x, bins)\n bins_y = np.histogram_bin_edges(y, bins)\n bins = [bins_x, bins_y]\n elif bins == 'uniform_counts':\n raise ValueError(f\"Please define number of bins for binning method uniform_bins: bins = ['uniform_bins', n_bins]\") \n elif bins == 'unique_values':\n if same_bins:\n bins_xy = np.unique([x, y])\n bins = [bins_xy, bins_xy]\n else:\n bins_x = np.unique(x)\n bins_y = np.unique(y)\n bins = [bins_x, bins_y]\n else:\n raise ValueError(f\"Binning option {bins} not know.\")\n \n # always return bins as bin edges: [np.ndarray, np.ndarray] \n return bins", "def getHists(img,bins=50):\n hists = np.array([])\n for i in range(3):#Images are loaded as three-dimensional matrices with three channels\n hists = np.append(hists,np.histogram(img[:,:,i], bins, density = True)[0])\n return hists", "def make_2d_hist_plot(self, zvals, xbins, ybins, xlabel,\n ylabel, zlabel, xunits=None, yunits=None,\n zunits=None, cmap=None, xticks=None,\n yticks=None, xxvals=None, xyvals=None,\n yxvals=None, yyvals=None):\n import matplotlib.pyplot as plt\n plt.rcParams['text.usetex'] = True\n if (xxvals is not None) or (xyvals is not None):\n if not ((xxvals is not None) and (xyvals is not None)):\n raise ValueError(\n \"When specifying projections, both xx and \"\n \"xy vals must be specified.\"\n )\n if (yxvals is not None) or (yxvals is not None):\n if not ((yxvals is not None) and (yyvals is not None)):\n raise ValueError(\n \"When specifying projections, both yx and \"\n \"yy vals must be specified.\"\n )\n fig, axes = plt.subplots(\n nrows=2,\n ncols=2,\n gridspec_kw={\n 'width_ratios': [4, 1],\n 'height_ratios': [1, 4],\n 'wspace': 0.025,\n 'hspace': 0.025\n }\n )\n if zlabel == 'contour':\n X, Y = np.meshgrid(xbins, ybins)\n im = axes[1, 0].contour(\n X,\n Y,\n zvals.T,\n levels=[0, 4.605],\n colors=\"k\",\n linewidths=3,\n origin=\"lower\"\n )\n else:\n if cmap is None:\n cmap = \"Blues\"\n im = axes[1, 0].pcolormesh(xbins, ybins, zvals.T, cmap=cmap)\n cax = fig.add_axes([0.15, 0.13, 0.03, 0.595])\n nice_zlabel = self.make_label(zlabel, zunits)\n cb = fig.colorbar(im, cax=cax)\n cb.set_label(\n label=nice_zlabel,\n fontsize=24\n )\n cb.ax.yaxis.set_ticks_position('left')\n cb.ax.yaxis.set_label_position('left')\n axes[0, 1].set_visible(False)\n axes[1, 0].set_xlim(xbins[0], xbins[-1])\n axes[1, 0].set_ylim(ybins[0], ybins[-1])\n axes[0, 0].plot(xxvals, xyvals)\n if zlabel == 'contour':\n axes[0, 0].set_ylim(0.0, 2.0)\n axes[0, 0].set_ylabel(\n self.tex_axis_label('delta_%s'%self.metric_name)\n )\n axes[0, 0].set_xlim(xbins[0], xbins[-1])\n axes[0, 0].tick_params(\n axis='x',\n which='both',\n bottom='off',\n top='off',\n labelbottom='off'\n )\n axes[0, 0].grid(zorder=0, linestyle='--')\n axes[1, 1].plot(yyvals, yxvals)\n if zlabel == 'contour':\n axes[1, 1].set_xlim(0.0, 2.0)\n axes[1, 1].set_xlabel(\n self.tex_axis_label('delta_%s'%self.metric_name)\n )\n axes[1, 1].set_ylim(ybins[0], ybins[-1])\n axes[1, 1].tick_params(\n axis='y',\n which='both',\n left='off',\n right='off',\n labelleft='off'\n )\n axes[1, 1].grid(zorder=0, linestyle='--')\n if xlabel is not None:\n nice_xlabel = self.make_label(xlabel, xunits)\n axes[1, 0].set_xlabel(\n nice_xlabel,\n fontsize=24\n )\n if ylabel is not None:\n nice_ylabel = self.make_label(ylabel, yunits)\n axes[1, 0].set_ylabel(\n nice_ylabel,\n fontsize=24\n )\n return axes[1, 0]\n else:\n if zlabel == 'contour':\n X, Y = np.meshgrid(xbins, ybins)\n im = plt.contour(\n X,\n Y,\n zvals.T,\n levels=[0, 4.605],\n colors=\"k\",\n linewidths=3,\n origin=\"lower\"\n )\n # Save contour data to a file\n contour_data = {}\n contour_data['label'] = self.make_data_label()\n contour_data['contour'] = im.allsegs[1][0]\n if self.best_fit_data is not None:\n contour_data['best_fit'] = self.best_fit_point\n contour_data['vars'] = [xlabel, ylabel]\n contour_data['color'] = 'k'\n contour_data['linestyle'] = '-'\n contour_file = \"%s_%s_2D_%s_scan_contour_data.json\"%(\n xlabel, ylabel, self.metric_name)\n to_file(\n contour_data,\n os.path.join(self.outdir, contour_file),\n warn=False\n )\n else:\n if cmap is None:\n cmap = \"Blues\"\n im = plt.pcolormesh(xbins, ybins, zvals.T, cmap=cmap)\n nice_zlabel = self.make_label(zlabel, zunits)\n plt.colorbar(im).set_label(\n label=nice_zlabel,\n fontsize=24\n )\n plt.xlim(xbins[0], xbins[-1])\n plt.ylim(ybins[0], ybins[-1])\n if xlabel is not None:\n nice_xlabel = self.make_label(xlabel, xunits)\n plt.xlabel(\n nice_xlabel,\n fontsize=24\n )\n if ylabel is not None:\n nice_ylabel = self.make_label(ylabel, yunits)\n plt.ylabel(\n nice_ylabel,\n fontsize=24\n )\n if xticks is not None:\n if len(xticks) != (len(xbins)-1):\n raise ValueError(\n \"Got %i ticks for %i bins.\"%(len(xticks), len(xbins)-1)\n )\n plt.xticks(\n np.arange(len(xticks)),\n xticks,\n rotation=45,\n horizontalalignment='right'\n )\n if yticks is not None:\n if len(yticks) != (len(ybins)-1):\n raise ValueError(\n \"Got %i ticks for %i bins.\"%(len(yticks), len(ybins)-1)\n )\n plt.yticks(\n np.arange(len(xticks)),\n yticks,\n rotation=0\n )", "def color_histogram_hsv(im, nbin=10, xmin=0, xmax=255, normalized=True):\n ndim = im.ndim\n bins = np.linspace(xmin, xmax, nbin + 1)\n hsv = matplotlib.colors.rgb_to_hsv(im / xmax) * xmax\n imhist, bin_edges = np.histogram(hsv[:, :, 0],\n bins=bins,\n density=normalized)\n imhist = imhist * np.diff(bin_edges)\n\n return imhist", "def genHistArrays(df,csname,bins=50):\n #initiate matrix which will contain values of histograms\n allpixV = np.zeros((df.shape[0],bins*3))\n #attain histograms\n hists = df['SKImage'].apply(lambda x: getHists(x,bins))\n \n #Generate column names for result dataframe\n fullnames = []\n for chs in ['CH1', 'CH2', 'CH3']:\n fullnames.extend([chs+'-'+str(j) for j in range(bins)])\n fullnames = [csname+'-'+str(j) for j in fullnames]\n \n #extract histograms\n for rowi, histArr in enumerate(hists):\n allpixV[rowi,:] = np.array(histArr).flatten()\n \n return allpixV,fullnames", "def histogramdd(sample, bins=10, range=None, weights=None, density=False):\n if isinstance(sample, cupy.ndarray):\n # Sample is an ND-array.\n if sample.ndim == 1:\n sample = sample[:, cupy.newaxis]\n nsamples, ndim = sample.shape\n else:\n sample = cupy.stack(sample, axis=-1)\n nsamples, ndim = sample.shape\n\n nbin = numpy.empty(ndim, int)\n edges = ndim * [None]\n dedges = ndim * [None]\n if weights is not None:\n weights = cupy.asarray(weights)\n\n try:\n nbins = len(bins)\n if nbins != ndim:\n raise ValueError(\n 'The dimension of bins must be equal to the dimension of the '\n ' sample x.'\n )\n except TypeError:\n # bins is an integer\n bins = ndim * [bins]\n\n # normalize the range argument\n if range is None:\n range = (None,) * ndim\n elif len(range) != ndim:\n raise ValueError('range argument must have one entry per dimension')\n\n # Create edge arrays\n for i in _range(ndim):\n if cupy.ndim(bins[i]) == 0:\n if bins[i] < 1:\n raise ValueError(\n '`bins[{}]` must be positive, when an integer'.format(i)\n )\n smin, smax = _get_outer_edges(sample[:, i], range[i])\n num = int(bins[i] + 1) # synchronize!\n edges[i] = cupy.linspace(smin, smax, num)\n elif cupy.ndim(bins[i]) == 1:\n if not isinstance(bins[i], cupy.ndarray):\n raise ValueError('array-like bins not supported')\n edges[i] = bins[i]\n if (edges[i][:-1] > edges[i][1:]).any(): # synchronize!\n raise ValueError(\n '`bins[{}]` must be monotonically increasing, when an '\n 'array'.format(i)\n )\n else:\n raise ValueError(\n '`bins[{}]` must be a scalar or 1d array'.format(i)\n )\n\n nbin[i] = len(edges[i]) + 1 # includes an outlier on each end\n dedges[i] = cupy.diff(edges[i])\n\n # Compute the bin number each sample falls into.\n ncount = tuple(\n # avoid cupy.digitize to work around NumPy issue gh-11022\n cupy.searchsorted(edges[i], sample[:, i], side='right')\n for i in _range(ndim)\n )\n\n # Using digitize, values that fall on an edge are put in the right bin.\n # For the rightmost bin, we want values equal to the right edge to be\n # counted in the last bin, and not as an outlier.\n for i in _range(ndim):\n # Find which points are on the rightmost edge.\n on_edge = sample[:, i] == edges[i][-1]\n # Shift these points one bin to the left.\n ncount[i][on_edge] -= 1\n\n # Compute the sample indices in the flattened histogram matrix.\n # This raises an error if the array is too large.\n xy = cupy.ravel_multi_index(ncount, nbin)\n\n # Compute the number of repetitions in xy and assign it to the\n # flattened histmat.\n hist = cupy.bincount(xy, weights, minlength=numpy.prod(nbin))\n\n # Shape into a proper matrix\n hist = hist.reshape(nbin)\n\n # This preserves the (bad) behavior observed in NumPy gh-7845, for now.\n hist = hist.astype(float) # Note: NumPy uses casting='safe' here too\n\n # Remove outliers (indices 0 and -1 for each dimension).\n core = ndim * (slice(1, -1),)\n hist = hist[core]\n\n if density:\n # calculate the probability density function\n s = hist.sum()\n for i in _range(ndim):\n shape = [1] * ndim\n shape[i] = nbin[i] - 2\n hist = hist / dedges[i].reshape(shape)\n hist /= s\n\n if any(hist.shape != numpy.asarray(nbin) - 2):\n raise RuntimeError('Internal Shape Error')\n return hist, edges", "def getHistogram(self, var, idx = None, translation = None, other = None,\\\n verbose = 1, ab = [], bins = 100, minmax = None):\n \n if idx is None: idx = np.arange(self.atoms.shape[0])\n if translation is None: translation = [0]\n if isinstance(translation, (int, np.integer)): translation = [translation]\n\n data, lbl, leg = self.getData(var = var, idx = idx, translation = translation,\\\n verbose = verbose, ab = ab, other = other)\n x = []\n y = []\n for i, item in enumerate(data):\n cnt, bin = np.histogram(item, bins = bins, range = minmax)\n x.append((bin[:-1] + bin[1:]) / 2)\n y.append(cnt)\n\n return x, y, lbl, leg", "def simetrize_3dhistogram(histogram):\n N =len(histogram)\n n_histogram = np.zeros((N,N,N))\n for i in range(N):\n for j in range(i,N):\n for k in range(j, N):\n S = histogram[i][j][k] + histogram[k][i][j] + histogram[j][k][i] + histogram[i][k][j] + histogram[j][i][k] + histogram[k][j][i]\n n_histogram[i][j][k] = S\n n_histogram[k][i][j] = S\n n_histogram[j][k][i] = S\n n_histogram[i][k][j] = S\n n_histogram[j][i][k] = S\n n_histogram[k][j][i] = S\n #a[i][j][k], a[k][i][j], a[j][k][i], a[i][k][j], a[j][i][k], a[k][j][i]\n return n_histogram", "def GetHistogram(self, label: 'unsigned char') -> \"itkHistogramD_Pointer\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUC2IUC2_GetHistogram(self, label)", "def getHistogramData(self):\n return (self.numBins, self.binWidth, self.minFreq, self.maxFreq)", "def make_histogram(points, bucket_size):\n return Counter(bucketize(point, bucket_size) for point in points)", "def make_histogram(points, bucket_size):\n return Counter(bucketize(point, bucket_size) for point in points)", "def make_histogram(points, bucket_size):\r\n return Counter(bucketize(point, bucket_size) for point in points)", "def GetHistogram(self, label: 'unsigned char') -> \"itkHistogramD_Pointer\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF2IUC2_GetHistogram(self, label)", "def hist_from_images(img1: np.ndarray, img2: np.ndarray):\n\n # turn the 2D images images into 1D histograms\n hist1 = img1.reshape((np.prod(img1.shape, -1)))\n hist2 = img2.reshape((np.prod(img2.shape, -1)))\n\n return (hist1, hist2)", "def color_histogram_hsv(img, nbin=10, xmin=0, xmax=255, normalized=True):\n ndim = img.ndim\n bins = np.linspace(xmin, xmax, nbin+1)\n hsv = matplotlib.color.rgb_to_hsv(img/xmax) * xmax\n imhist, bin_edges = np.histogram(hsv[:, :, 0], bins=bins, density=normalized)\n imhist = imhist * np.diff(bin_edges)\n return imhist", "def get_test_histograms1():\n # dummy dataset with mixed types\n # convert timestamp (col D) to nanosec since 1970-1-1\n import pandas as pd\n import histogrammar as hg\n\n df = pd.util.testing.makeMixedDataFrame()\n df['date'] = df['D'].apply(to_ns)\n df['boolT'] = True\n df['boolF'] = False\n\n # building 1d-, 2d-, and 3d-histogram (iteratively)\n hist1 = hg.Categorize(unit('C'))\n hist2 = hg.Bin(5, 0, 5, unit('A'), value=hist1)\n hist3 = hg.SparselyBin(origin=pd.Timestamp('2009-01-01').value, binWidth=pd.Timedelta(days=1).value,\n quantity=unit('date'), value=hist2)\n # fill them\n hist1.fill.numpy(df)\n hist2.fill.numpy(df)\n hist3.fill.numpy(df)\n\n return df, hist1, hist2, hist3", "def makeHistogram(values, numBins, xLabel, yLabel, title=None):\r\n pylab.hist(values, bins = numBins)\r\n pylab.xlabel(xLabel)\r\n pylab.ylabel(yLabel)\r\n if title != None:\r\n pylab.title(title)\r\n pylab.show()", "def plot_features(data: np.array)->None:\n n_rows = np.size(data, 0)\n n_cols = np.size(data, 1)\n for i in range(n_cols):\n plt.hist(data[:,i])\n plt.show()", "def hist(self, nBins, vmin1=None, vmax1=None, vmin2=None, vmax2=None):\n\n if vmin1 == None: vmin1 = self.min1\n if vmax1 == None: vmax1 = self.max1\n if vmin2 == None: vmin2 = self.min2\n if vmax2 == None: vmax2 = self.max2\n histogram = Histogram3D(nBins, (vmin1, vmin2), (vmax1, vmax2),\n log=False)\n histogram.values = np.array(list(\n zip(self.valuesArray1, self.valuesArray2)))\n\n return histogram.get_histogram()", "def makeHistogram(values, numBins, xLabel, yLabel, title=None):\n pylab.hist(values, bins = numBins)\n pylab.xlabel(xLabel)\n pylab.ylabel(yLabel)\n if not title == None:\n pylab.title(title)\n pylab.show()" ]
[ "0.7927253", "0.71789026", "0.7173408", "0.71437126", "0.70881116", "0.7070723", "0.70457834", "0.6966444", "0.69234824", "0.68995667", "0.6850321", "0.6839149", "0.68292195", "0.6809204", "0.6803134", "0.66806215", "0.66756934", "0.66571724", "0.6654381", "0.66289043", "0.6619386", "0.6608173", "0.65912485", "0.64458555", "0.64204836", "0.64028955", "0.63899964", "0.63896006", "0.6353574", "0.6351246", "0.63414764", "0.6287997", "0.62851864", "0.62794733", "0.6274912", "0.6270379", "0.62565655", "0.6228388", "0.62032443", "0.6201778", "0.61676425", "0.61493397", "0.6145913", "0.6121803", "0.6113425", "0.60966325", "0.60867184", "0.60857236", "0.6075036", "0.6072473", "0.6061711", "0.6022794", "0.60223", "0.6007431", "0.60031664", "0.60016435", "0.5999745", "0.5991009", "0.5988331", "0.5987614", "0.5984159", "0.59814095", "0.59798807", "0.59635746", "0.5961083", "0.5956328", "0.59477466", "0.594664", "0.5943214", "0.5943214", "0.5943214", "0.59357923", "0.5918857", "0.5916699", "0.59156513", "0.5909891", "0.5902389", "0.5886933", "0.588675", "0.58839464", "0.5882767", "0.5879111", "0.586464", "0.5855232", "0.58431536", "0.583922", "0.5838707", "0.58287936", "0.58240616", "0.5822581", "0.5810791", "0.5810791", "0.58040553", "0.57992905", "0.5791844", "0.5785909", "0.5780735", "0.5772857", "0.5770442", "0.57663137", "0.576261" ]
0.0
-1
Convert a hexadecimal string with leading hash into a three item list of values between [0, 1]. E.g. 00ff00 > [0, 1, 0]
def convert_hex_to_rgb(hex_string): hex_string = hex_string.lstrip('#') return [int(hex_string[i:i + 2], 16) / 255.0 for i in (0, 2, 4)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hex_to_RGB(hex_code: str) -> list:\n\n hex_code = hex_code.lstrip('#')\n return [int(hex_code[i:i + 2], 16) for i in (0, 2, 4)]", "def hex_list(self):\r\n return [''.join(['{:02X}'.format(b) for b in data]) for data in self.buffers()]", "def _string_to_bitlist(self, data):\n l = len(data) * 8\n result = [0] * l\n pos = 0\n for ch in data:\n i = 7\n while i >= 0:\n # bit-wise operation\n if ch & (1 << i) != 0:\n result[pos] = 1\n else:\n result[pos] = 0\n pos += 1\n i -= 1\n return result", "def hex2color(s):\n hexColorPattern = re.compile(\"\\A#[a-fA-F0-9]{6}\\Z\")\n if not isinstance(s, basestring):\n raise TypeError('hex2color requires a string argument')\n if hexColorPattern.match(s) is None:\n raise ValueError('invalid hex color string \"%s\"' % s)\n return tuple([int(n, 16)/255.0 for n in (s[1:3], s[3:5], s[5:7])])", "def hex_str_to_bytes_arr(bytes_str: str) -> List[int]:\n return eval(f\"[{bytes_str}]\")", "def hx(i):\n a = hex(i)[2:]\n if len(a)<2: a = ''.join(['0',a])\n return a", "def hex_form(hash):\n final_hash = ''\n for i in range(len(hash)):\n final_hash += format(hash[i], '02x')\n return final_hash", "def _hex2rgb(c_hex):\n # Pass 16 to the integer function for change of base\n return [int(c_hex[i:i + 2], 16) for i in range(1, 6, 2)]", "def HexToRGB(hex_str):\r\n hexval = hex_str\r\n if hexval[0] == u\"#\":\r\n hexval = hexval[1:]\r\n ldiff = 6 - len(hexval)\r\n hexval += ldiff * u\"0\"\r\n # Convert hex values to integer\r\n red = int(hexval[0:2], 16)\r\n green = int(hexval[2:4], 16)\r\n blue = int(hexval[4:], 16)\r\n return [red, green, blue]", "def hex2dec_on_list(lst):\n data = []\n for i, val in enumerate(lst):\n data.append(hex2dec(val))\n return data", "def decompose_byte(data: str, nibble: bool = False) -> list:\n _bytes = int(len(sanatize_hex(data)) / 2)\n mem_size = 8\n if nibble:\n mem_size = 4\n binary_data = format(int(str(data), 16), f\"0{_bytes*8}b\")\n return [\n format(int(binary_data[mem_size * x : mem_size * (x + 1)], 2), f\"#0{int(mem_size/2)}x\")\n for x in range(0, int(len(binary_data) / mem_size))\n ]", "def hex_to_rgb(value):\n\n values = value.lstrip('#')\n lv = len(values)\n rgb = list(int(values[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))\n return rgb", "def hex_to_rgb(hex_string):\n return tuple(int(hex_string[i:i + 2], 16)\n for i in range(1, len(hex_string), 2))", "def getHexwords(msg):\n hexwords = []\n for i in range(0, len(msg), 8):\n msgBlock = msg[i:i+8]\n m = stringToHex(msgBlock)\n hexwords.append(m)\n\n last = hexwords[-1]\n hexwords[-1] += ''.join(['0'] * (16-len(last)))\n return hexwords", "def hash_reduce(ascii_array):\r\n\r\n if len(ascii_array) % 16 != 0:\r\n raise ValueError(\"Array size not equally divisible by 16.\")\r\n\r\n output = []\r\n for i in range(0, int(len(ascii_array) / 16)):\r\n\r\n val = 0\r\n for numb in ascii_array[i*16:i*16+16]:\r\n val ^= numb\r\n output.append(val)\r\n\r\n return output", "def hex2rgb(hex_value: str) -> List[int, int, int]:\n assert len(hex_value) == 7, \"Supported only HEX RGB string format!\"\n color = _hex2color(hex_value)\n _validate_color(color)\n return color", "def bin_to_nibbles(s):\n return [hti[c] for c in encode_hex(s)]", "def _parse_hex_profiles(lines: list) -> list:\n n_profiles = len(lines[0])\n return [\n \"\".join([lines[m][n][3:].strip() for m in range(16)])\n for n in range(n_profiles)\n ]", "def get_color_map_in_hex(rgb_colors):\n list_of_hex_colors = []\n # Iterating through the list of colors given\n for i in range(len(rgb_colors)):\n rgb = []\n # Iterating through each rgb to get them into a range of 0-255\n for j in range(3):\n num = int(rgb_colors[i][j] * 255)\n rgb.append(num)\n # Converting the rgb to hex and appending them to a new list\n list_of_hex_colors.append(rgb_to_hex(rgb))\n return list_of_hex_colors", "def string_features_hex(hexstr):\n out = dict([(x,0) for x in hexabet])\n ct = dict(Counter(hexstr.split()))\n N = len(hexstr.split())\n for k in out.keys():\n if k in ct.keys():\n out[k] += ct[k]\n out = [v[1] for v in sorted(out.iteritems(), key=lambda (k,v): k)]\n out = [float(x)/N for x in out]\n return out", "def hex_to_rgb(value):\n value = value.lstrip('#')\n hex_total_length = len(value)\n rgb_section_length = hex_total_length // 3\n return tuple(int(value[i:i + rgb_section_length], 16)\n for i in range(0, hex_total_length, rgb_section_length))", "def hex_to_rgb(self,value):\n value = value.lstrip('#')\n lv = len(value)\n return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))", "def to_dense_hash(hash):\n dense_hash = []\n for i in range(0, len(hash), 16):\n block = reduce(lambda x, y: x ^ y, hash[i:i+16])\n dense_hash.append(block)\n return dense_hash", "def set24_to_list(v):\n return [x for x in range(24) if v & (1 << x)]", "def string_to_bit_array(text_string: str) -> list:\n\n array = list()\n for char in text_string:\n # Get the char value on one byte\n bin_val = Des.bin_value(char, 8)\n # Add the bits to the final list\n array.extend([int(x) for x in list(bin_val)])\n return array", "def hex_to_rgb(value):\r\n lv = len(value)\r\n out = tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))\r\n out = tuple([x/256.0 for x in out])\r\n return out", "def format_hex(self, list_converted):\n dict_hex = {10: 'A', 11: 'B', 12: 'C', 13: 'D', 14: 'E', 15: 'F'}\n list_converted = [dict_hex[n] if n in dict_hex.keys() else str(n) for n in list_converted]\n return list_converted", "def hex_to_rgb(hex_str):\n hex_str = hex_str.strip(\"#\")\n return tuple(int(hex_str[i : i + 2], 16) for i in (0, 2, 4))", "def hex_to_rgb(value):\n value = value.lstrip('#')\n lv = len(value)\n return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))", "def hex_to_rgb(value):\n value = value.lstrip('#')\n lv = len(value)\n return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))", "def hex_to_rgb(value):\n value = value.lstrip('#')\n lv = len(value)\n return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))", "def hex_to_rgb(value):\n value = value.lstrip('#')\n lv = len(value)\n return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))", "def StrToList(val):\n return [ord(c) for c in val]", "def hex2rgb(hexcode):\n\treturn tuple(map(ord, hexcode[1:].decode('hex')))", "def hex_to_rgba(h, alpha):\n return tuple([int(h.lstrip('#')[i:i + 2], 16) for i in (0, 2, 4)] + [alpha])", "def crcJK232(byteData):\n CRC = 0\n for b in byteData:\n CRC += b\n crc_low = CRC & 0xFF\n crc_high = (CRC >> 8) & 0xFF\n return [crc_high, crc_low]", "def string2bits(s=''):\n return [bin(ord(x))[2:].zfill(8) for x in s]", "def hex_to_rgb(hexa):\n return tuple(int(hexa[i:i+2], 16) for i in (0, 2, 4))", "def hex2rgb(hx):\n if hx[0] == '#':\n return int(hx[1:3], 16), int(hx[3:5], 16), int(hx[5:7], 16)\n else:\n return int(hx[0:2], 16), int(hx[2:4], 16), int(hx[4:6], 16)", "def render_list_as_hex(self, data):\n s = '[ '\n for c in data:\n s += '%02x ' % c\n s += ']'\n return s", "def hex2rgb( hex ):\n\n hex = hex.lstrip( '#' )\n hlen = len( hex )\n hlen3 = int( hlen / 3 )\n\n return np.asarray( tuple(\n int( hex[ i : i + hlen3 ], 16 ) / 255. for i in range( 0, hlen, hlen3 ) ) )", "def unH(s):\n return ''.join([chr(int(s[i:i+2],16)) for i in range(2, len(s),2)])", "def ReadHashes(self):\n len = self.ReadVarInt()\n items = []\n for i in range(0, len):\n ba = bytearray(self.ReadBytes(32))\n ba.reverse()\n items.append(ba.hex())\n return items", "def _string_to_colors(self):\n string = self.str_colors\n colors_three = [string[c:c+3] for c in range(0, len(string), 3)]\n colors_three = [list(color) for color in colors_three]\n pixels = [[ord(rgb) for rgb in color] for color in colors_three]\n return pixels", "def getpalette(data):\n\tpalette = []\n\tstring = StringIO(data)\n\twhile True:\n\t\ttry:\n\t\t\tpalette.append(unpack(\"<4B\", string.read(4)))\n\t\texcept StructError:\n\t\t\tbreak\n\treturn palette", "def hex_color(s):\n\n if s.startswith(\"#\"):\n s = s[1:]\n valid = len(s) in [1, 2, 3, 4, 6, 12] and set(s) <= set(string.hexdigits)\n if not valid:\n raise ValueError(\"colour must be 1,2,3,4,6, or 12 hex-digits\")\n\n # For the 4-bit RGB, expand to 8-bit, by repeating digits.\n if len(s) == 3:\n s = \"\".join(c + c for c in s)\n\n if len(s) in [1, 2, 4]:\n # Single grey value.\n return (int(s, 16),)\n\n if len(s) in [6, 12]:\n w = len(s) // 3\n return tuple(int(s[i : i + w], 16) for i in range(0, len(s), w))", "def from_hex(x):\n\treturn numpy.fromstring(\n\t\tbinascii.a2b_hex(x),\n\t\tdtype=numpy.uint8\n\t)", "def string_to_bit_array(text):\n array = list()\n for char in text:\n bin_val = bin_value(char, 8) # Get value of char in one byte\n array.extend([int(x) for x in list(bin_val)]) # Add the bits to the list\n return array", "def hexify_word(word):\r\n\r\n return ''.join([str(hex(ord(c))[2::]) for c in word])", "def hex_to_rgb(hex):\n hex = hex.lstrip('#')\n hlen = len(hex)\n return tuple(int(hex[i:i + hlen // 3], 16) for i in range(0, hlen, hlen // 3))", "def get_channels(hexcode):\n assert len(hexcode) in (7, 9)\n assert hexcode[0] == \"#\"\n rgb = hexcode[1:3], hexcode[3:5], hexcode[5:7], hexcode[7:]\n rgb = [int(x, 16) for x in rgb if x != \"\"]\n return np.array(rgb, dtype=np.uint8)", "def hex_to_spins(self, hex_spins):\n \n # purely alphanumeric strings only\n assert(hex_spins.isalnum())\n \n binary = '{:0{}b}'.format(int(hex_spins,16), self.size)\n spins = bitarray.bitarray(binary)\n\n return spins", "def hexify(buffer):\n return ''.join('%02x' % ord(c) for c in buffer)", "def convert_to_hex(input_string: str):\n return \" \".join([hex(ord(ch))[2:] for ch in input_string])", "def bits_list(number):\n\n # https://wiki.python.org/moin/BitManipulation\n if number == 0:\n return [0]\n else:\n # binary_literal string e.g. '0b101'\n binary_literal = bin(number)\n bits_string = binary_literal.lstrip('0b')\n # list comprehension\n bits = [int(bit_character) for bit_character in bits_string]\n return bits", "def hex_to_rgb(hex: str, normalize: bool = True) -> np.ndarray:\n hex = hex.lstrip('#')\n hlen = len(hex)\n rgb = np.array([int(hex[i:i + hlen // 3], 16) for i in range(0, hlen, hlen // 3)])\n if normalize is True:\n rgb = rgb / 255\n return rgb", "def splitint(ba):\n l = []\n # TODO: Endianness\n for ba in WeirdBytes.split(ba):\n l.append(ba[0] * 256 + ord(ba[1:].tobytes()))\n\n return l", "def Read2000256List(self):\n items = []\n for i in range(0, 2000):\n data = self.ReadBytes(64)\n ba = bytearray(binascii.unhexlify(data))\n ba.reverse()\n items.append(ba.hex().encode('utf-8'))\n return items", "def crc32_table() -> List[int]:\n\n table: List[int] = []\n for i in range(256):\n k: int = i\n for j in range(8):\n if k & 1:\n k ^= 0x1db710640\n k >>= 1\n table.append(k)\n return table", "def hash_load(db: Redis[bytes], short_hash: str) -> list[hash_t]:\n if len(short_hash) > 40:\n raise AttributeError(f\"hash {short_hash} has length {len(short_hash)} > 40\")\n\n data = db.zrangebylex(HASH_INDEX, f\"[{short_hash}\", \"+\")\n out = []\n for key in data:\n k = key.decode()\n if k[: len(short_hash)] == short_hash:\n out += [hash_t(k)]\n\n if len(out) == 0:\n logger.error(f\"{short_hash} not found\")\n elif len(out) > 1:\n logger.warning(f\"{len(data)} possible values for {short_hash}\")\n return out", "def int2color(x):\n # r = int(1000 * x % 255)\n # g = int(10000 * x % 255)\n # b = int(100000 * x % 255)\n x = 0 if x == 0 else int(1/x)\n b = x & 0xff\n g = (x >> 8) & 0xff\n r = (x >> 16) & 0xff\n return [r, g, b]", "def hex_to_rgb(hex_val):\n h_len = len(hex_val)\n tupl = tuple(int(hex_val[i:i + h_len // 3], 16) for i in range(0, h_len, h_len // 3))\n final = ','.join(map(str, tupl))\n return final", "def dec2hex(x):\n return hex(x)[2:]", "def unpack_varint_list(data: bytes) -> List[int]:\n result = []\n pos = 0\n while pos < len(data):\n val, n_read = unpack_varint(data[pos:])\n pos += n_read\n result.append(val)\n return result", "def convert_selection(selection):\n\n\n bits = selection.split(\",\")\n all_bits = []\n for bit in bits:\n small_bits = bit.split(\"-\")\n if len(small_bits) == 1:\n all_bits.append(int(small_bits[0]))\n else:\n range_of_bits = range(int(small_bits[0]), int(small_bits[1])+1)\n for tiny_bit in range_of_bits:\n all_bits.append(tiny_bit)\n\n return all_bits", "def add_hash(self, lst):\n self.__data = []\n num = len(lst) + 1\n self.add_data((\"\", 4, 1))\n self.add_data((\"\", 4, num))\n self.add_data((\"\", 4, num - 1))\n self.add_data((\"\", 4, 0))\n if 1 < num:\n for ii in range(num - 1):\n self.add_data((\"\", 4, ii))", "def _bytestringToValuelist(bytestring, numberOfRegisters):\n _checkInt(numberOfRegisters, minvalue=1, description='number of registers')\n numberOfBytes = _NUMBER_OF_BYTES_PER_REGISTER * numberOfRegisters\n _checkString(bytestring, 'byte string', minlength=numberOfBytes, maxlength=numberOfBytes)\n\n values = []\n for i in range(numberOfRegisters):\n offset = _NUMBER_OF_BYTES_PER_REGISTER * i\n substring = bytestring[offset : offset + _NUMBER_OF_BYTES_PER_REGISTER]\n values.append(_twoByteStringToNum(substring))\n\n return values", "def bitstr_to_hex(a):\n return hex(bitstr_to_int(a))", "def hex_to_rgb(cls, hex_value):\n hex_value = hex_value.lstrip(\"#\")\n r,g,b = tuple(int(hex_value[i:i+2], 16) for i in (0, 2 ,4))\n return (r,g,b)", "def get_binary_sha256_hash(hash: str) -> str:\n result = \"\"\n\n for character in hash:\n character_number = int(character, base=16)\n binary_number = bin(character_number)\n # CAVEAT: each hash character is 4 bit size since SHA256 hash is hexidecimal string, so 4 * 64 = 256 bit\n formatted_binary_number = binary_number[2:].ljust(4, \"0\")\n result += formatted_binary_number\n\n return result", "def to_bit_list(val, width=16):\n return [(1 if val & (1<<n) else 0) for n in range(width)]", "def hex_to_rgb_hash(value):\n if not value:\n value = \"#EFEFEF\"\n\n value = value.lstrip('#')\n\n if len(value) != 6:\n raise Exception(\"hex_to_rgb_hash expects hex color of length, eg #FF0000, but got %s\" % value)\n\n round(int('EF', 16) / 255.0, 2)\n\n return {\n 'red': round(int(value[:2], 16) / 255.0, 1),\n 'green': round(int(value[2:4], 16) / 255.0, 1),\n 'blue': round(int(value[4:], 16) / 255.0, 1),\n }", "def elf_hash(s):\n h = 0\n for c in s:\n h = (h << 4) + ord(c)\n t = (h & 0xF0000000)\n if t != 0:\n h = h ^ (t >> 24)\n h = h & ~t\n return h", "def contracter(s):\r\n index = 0\r\n chain = 1\r\n status = True\r\n current = ''\r\n beep = 0\r\n l=[[],[]]\r\n while status:\r\n try:\r\n int(s[0])\r\n break\r\n except ValueError:\r\n pass\r\n current = s[index]\r\n if s[index+1] == current:\r\n chain += 1\r\n else:\r\n s = s[chain:] + `chain`+current\r\n l[1].append(chain)\r\n chain = 1\r\n index = -1\r\n index += 1\r\n l[0] = s\r\n return l", "def binary_list(dec_number, width):\n bin_str = bin(dec_number)[2:].zfill(width)\n return [int(x) for x in bin_str]", "def _geohash2bits(geohash):\r\n bits = ''.join([_char2bits(c) for c in geohash])\r\n return bits", "def hex_hash(s):\n if not s:\n return '0'\n s = s.encode('utf-8')\n return '{:x}'.format(adler32(s) & 0xffffffff)", "def hexify(text):\r\n return ' '.join([hexify_word(word) for word in text.split()])", "def bitlist(n):\n return [n >> i & 1 for i in range(7,-1,-1)]", "def read_file(filename):\n with open(filename, 'r') as file_handle:\n list_of_hex_strings = file_handle.read().split('\\n')\n return list_of_hex_strings", "def convert_uint32_to_array(value):\n return [\n (value >> 0 & 0xFF),\n (value >> 8 & 0xFF),\n (value >> 16 & 0xFF),\n (value >> 24 & 0xFF)\n ]", "def get_hexcode(rgb):\n return \"#\" + \"\".join(f\"{hex(int(x))[2:]:0>2}\" for x in rgb)", "def bits(data):\n\treturn [format(ord(c),'08b') for c in data]", "def a2b_hex(string):\n\n if len(string) % 2 == 1:\n string = '0' + string\n\n try:\n return binascii.a2b_hex(string.encode('ascii'))\n except TypeError:\n raise Error('Invalid hexadecimal string')", "def part_two(data):\r\n\r\n tl = TwistList(256)\r\n\r\n ascii_codes = [ord(char) for char in data]\r\n ascii_codes += [17, 31, 73, 47, 23] #add padding\r\n\r\n for _ in range(64):\r\n for length in ascii_codes:\r\n tl.twist(int(length))\r\n\r\n return to_hex_str(hash_reduce(tl.list))", "def decode(self, s):\n i = 0\n strs = []\n while i < len(s):\n l = int(s[i:i+8], 16)\n strs.append(s[i+8:i+8+l])\n i += 8+l\n return strs", "def bin2hex(x):\n return hex(int(x, 2))[2:]", "def parse_hex_little_endian_positive(hex_string):\n hex_string = hex_string[2:len(hex_string) + 1]\n clean_string = \"\"\n\n while len(hex_string) > 0:\n\n if len(hex_string) > 1:\n cur_nybble = hex_string[\n len(hex_string) - 2:len(hex_string) + 1]\n hex_string = hex_string[0:len(hex_string) - 2]\n upper_first = cur_nybble[0].upper()\n upper_second = cur_nybble[1].upper()\n else:\n upper_first = \"0\"\n upper_second = hex_string[0].upper()\n hex_string = \"\"\n\n clean_string = upper_first + upper_second + clean_string\n\n if len(hex_string) > 0:\n clean_string = \" \" + clean_string\n return clean_string", "def unphred_string(phred):\n arr = [(ord(c) - 33) / 30. for c in phred]\n return arr", "def hex2rgb(hexcolors):\n hexcolors = toiter(hexcolors)\n rgb = []\n for s in hexcolors:\n s = s[len(s)-6:len(s)] # get last 6 characters\n r, g, b = s[0:2], s[2:4], s[4:6]\n r, g, b = int(r, base=16), int(g, base=16), int(b, base=16)\n rgb.append((r, g, b))\n return np.uint8(rgb)", "def bytify(binary):\n\tbytes = [0,0,0,0]\n\ti = 3\n\twhile binary:\n\n\t\tbytes[i] = binary&255\n\t\tbinary >>= 8\n\t\ti -= 1 \n\treturn bytes", "def string_to_rgb(s):\r\n orig_s = s\r\n s = s.strip()\r\n if s.startswith('#'):\r\n s = s[1:]\r\n if not len(s) == 6:\r\n raise ValueError(\"String %s doesn't look like a hex string\" % orig_s)\r\n return int(s[:2], 16), int(s[2:4], 16), int(s[4:], 16)", "def binary_encode(x: int) -> List[int]:\n return [x >> i & 1 for i in range(10)]", "def binary_encode(x: int) -> List[int]:\n return [x >> i & 1 for i in range(10)]", "def hex2rgb(cls, hex):\r\n valid_char = '#1234567890abcdef'\r\n conditions = (hex[0] == '#',\r\n len(hex) == 7,\r\n all(c in valid_char for c in hex))\r\n if not all(conditions):\r\n raise ValueError\r\n return int(hex[1:3], 16), int(hex[3:5], 16), int(hex[5:], 16)", "def hexToRed(self, hex):\n return int(hex[0:2], 16)", "def format_hex(hex):\n octets = [hex[i:i+2] for i in range(0, len(hex), 2)]\n pairs = [\" \".join(octets[i:i+2]) for i in range(0, len(octets), 2)]\n return \"\\n\".join(pairs)", "def int_list(data: bytes) -> list:\n byte_data = BytesIO(data)\n byte_list = []\n single_byte = byte_data.read(1)\n while single_byte != b\"\" and single_byte != \"\":\n single_int = byte_to_int(single_byte)\n byte_list.append(single_int)\n single_byte = byte_data.read(1)\n return byte_list", "def list_to_set24(l):\n res = 0\n for x in l: res ^= 1 << x\n return res & 0xffffff", "def int_to_bin_three_bit(matches: int) -> List[bool] :\n bin_converter = {\n 0: [False, False, False], # 000\n 1: [False, False, True], # 001\n 2: [False, True, False], # 010\n 3: [False, True, True],\n 4: [True , False, False],\n 5: [True, False, True],\n 6: [True, True, False], # 110\n 7: [True, True, True] # 111\n }\n dic_length = len(bin_converter) # how many entries in the dictionary\n matches = matches % dic_length # modulo avoids out of bounds\n return bin_converter[matches]" ]
[ "0.6689272", "0.6465506", "0.63749844", "0.63519144", "0.6346904", "0.63243943", "0.62050515", "0.61755127", "0.6154368", "0.615235", "0.61361694", "0.60561454", "0.603794", "0.6002749", "0.60014415", "0.5985288", "0.5950709", "0.5918056", "0.58994037", "0.58917654", "0.58843315", "0.5878982", "0.5856554", "0.5855643", "0.5828479", "0.58199096", "0.5814149", "0.58095413", "0.57886565", "0.57886565", "0.57886565", "0.57886565", "0.5782912", "0.5759997", "0.5747877", "0.5737635", "0.5720731", "0.57002705", "0.5684855", "0.5680797", "0.5676694", "0.5674026", "0.56651574", "0.56606853", "0.5657059", "0.56558424", "0.5652133", "0.5646923", "0.56453925", "0.5643536", "0.56280327", "0.5625714", "0.560811", "0.56068486", "0.55815804", "0.5580477", "0.55788976", "0.5572862", "0.55628544", "0.5556428", "0.55543387", "0.55271196", "0.55216664", "0.55124784", "0.5499697", "0.5497579", "0.549474", "0.5469304", "0.54674023", "0.54652256", "0.54611623", "0.5455685", "0.5454938", "0.5450757", "0.5449439", "0.54210013", "0.54174346", "0.5408693", "0.5401492", "0.53931373", "0.53927225", "0.5390401", "0.53879327", "0.53874695", "0.5385813", "0.5385383", "0.53845", "0.5380762", "0.5379553", "0.53635263", "0.5359168", "0.53589326", "0.5357871", "0.5357871", "0.5344481", "0.5341488", "0.53399265", "0.53315175", "0.5331121", "0.53293854" ]
0.625158
6
Return the element of an atom as defined in it's label.
def xd_element(name): try: name = name[:2] except: pass try: covalence_radius[name] except: name = name[0] return name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_item_from_label(self, label):\n idx = self.labels.index(label)\n item = self[idx][0]\n return item", "def get_label(urs):\n return assign_term(urs)[1]", "def findLabel(self, label):\n return self.root._findLabel(label)", "def fromLabel(name):\n return Data.labels.index(name)", "def __getitem__(self, atom_name):\n return self.atoms_by_name[atom_name]", "def getAtom(self, atomname):\n if self.hasAtom(atomname):\n return self.atoms[atomname]\n else:\n return None", "def atom(self, atom_name, resnum, chain_id, icode=' ', alt=' ', model_num=0):\n return self.struct[model_num][chain_id][(alt, resnum, icode)][atom_name]", "def getElementSymbol(self):\n dataDict = self.__dict__\n yy = self\n while yy is not None:\n xx = yy\n yy = xx.findFirstChemAtomSet()\n \n result = xx.findFirstChemAtom().elementSymbol\n return result", "def get_equivalent_atom(self, atom):\n try:\n return self.atom_dict[atom.name]\n except KeyError:\n return None", "def getAtom(t, swipl):\n return Atom.fromTerm(t, swipl)", "def get_element(self, index):\n return self.name[index], self.label[index], self.meta[index]", "def element_by_atom_type(atom_type, verbose=False):\n matched_element = None\n\n if matched_element is None and atom_type.mass:\n matched_element = element_by_mass(\n atom_type.mass, exact=False, verbose=verbose\n )\n if matched_element is None and atom_type.name:\n matched_element = element_by_symbol(atom_type.name, verbose=verbose)\n if matched_element is None and atom_type.definition:\n matched_element = element_by_smarts_string(\n atom_type.definition, verbose=verbose\n )\n\n if matched_element is None:\n raise GMSOError(\n f\"Failed to find an element from atom type\"\n \"{atom_type} with \"\n \"properties mass: {atom_type.mass}, name:\"\n \"{atom_type.name}, and \"\n \"definition: {atom_type.definition}\"\n )\n\n return matched_element", "def label(tree):\n return tree[0]", "def extract_label(node):\n if (isinstance(node, UnaryOp) and\n isinstance(node.op, USub) and\n isinstance(node.operand, UnaryOp) and\n isinstance(node.operand.op, USub) and\n isinstance(node.operand.operand, Name)):\n return node.operand.operand.id\n else:\n return None", "def lookup_element(self, name: str) -> ElementNSEntry:\n for i, scope in enumerate(reversed(self.element_ns_stack)):\n if name in scope:\n el, parent_def = scope[name]\n if i == 0:\n # Return anything from local namespace\n return (el, parent_def)\n elif isinstance(el, comp.Signal):\n # Signals are allowed to be found in parent namespaces\n return (el, parent_def)\n elif self.parent_parameters_visible and isinstance(el, Parameter):\n # Parameters are allowed to be found in parent namespaces,\n # except in some contexts\n return (el, parent_def)\n return (None, None)", "def index(self, atom):\n return self.atom_list.index(atom)", "def __determineElement(self, atom):\n\n\t\tc1 = atom.name[0:1]\n\t\tc2 = atom.name[1:2]\n\n\t\t# virtual atoms\n\t\tif c1 == \"V\":\n\t\t\tatom.element = \"V\"\n\t\t\tatom.radius = 0.0\n\t\t\treturn\n\n\t\tfor element in self.periodic.element_name:\n\t\t\tif c2 == element:\n\t\t\t\tatom.element = c2\n\t\t\t\tatom.radius = self.periodic.element_radius[c2]\n\t\t\t\treturn\n\n\t\tfor element in self.periodic.element_name:\n\t\t\tif c1 == element:\n\t\t\t\tatom.element = c1\n\t\t\t\tatom.radius = self.periodic.element_radius[c1]\n\t\t\t\treturn\n\n\t\tif atom.name in _molelement.keys():\n\t\t\tatom.element = _molelement[atom.name]\n\t\t\tatom.radius = self.periodic.element_radius[atom.element]", "def value_for(cls, name: str) -> t.Any:\n for key, value in list(cls.__labels__.items()):\n if isinstance(value, NameTitle) and value.name == name:\n return key\n return None", "def get_equivalent_atom(self, atom):\n try:\n return self.fragment_dict[atom.fragment_id].atom_dict[atom.name]\n except KeyError:\n return None", "def nodeWithLabel(self, label):\r\n for nt in self.listNodes.keys(): \t# for all kind of nodes...\r\n for node in self.listNodes[nt]: \t# for all nodes of type <nt>\r\n if node.GGLabel.getValue() == label: # check if the node's label is what we are looking for...\r\n return node # a node has been found!\r\n return None # no appropriate node has been found \r", "def get_equivalent_atom(self, atom):\n try:\n return self.chain_dict[atom.chain_id].fragment_dict[atom.fragment_id].atom_dict[atom.name]\n except KeyError:\n return None", "def get(self, seq):\n return self._get_node(seq).element", "def get_atom(self, name, alt_loc = None):\n if alt_loc:\n if self.alt_loc_dict.has_key(name):\n altloc = self.alt_loc_dict[name]\n if altloc.has_key(alt_loc):\n return altloc[alt_loc]\n return None\n else:\n if not self.atom_dict.has_key(name):\n return None\n return self.atom_dict[name]", "def get_equivalent_atom(self, atom):\n try:\n return self.model_dict[atom.model_id].chain_dict[atom.chain_id].fragment_dict[atom.fragment_id].atom_dict[atom.name]\n except KeyError:\n return None", "def get_num(elem):\n if isinstance(elem, str):\n return _find_index(elem)\n else:\n for atm in elem:\n if atm not in sym and atm[0] not in ['X', 'D']:\n raise ValueError('Unrecognized atomic symbol \\'' + atm +\n '\\'. Use X prefix for dummy atoms.')\n return np.array([_find_index(atm) for atm in elem])", "def get_label(self, name):\n label_list = self.wls_board.get_labels()\n for label in label_list:\n if name in label.name: \n return label", "def get_element_by_name(self, name):\n for e in self.E:\n if e.name == name:\n return e", "def _find_first(self, ast, label):\n res = self._find_all(ast, label, max_results=1)\n if len(res):\n return res[0]\n return None", "def getEntry(self, x):\n return self.entries[x]", "def get_element(self, index):\n return self.name[index], self.meta[index]", "def _get_label(obj):\n # NOTE: BarContainer and StemContainer are instances of tuple\n while not hasattr(obj, 'get_label') and isinstance(obj, tuple) and len(obj) > 1:\n obj = obj[-1]\n label = getattr(obj, 'get_label', lambda: None)()\n return label if label and label[:1] != '_' else None", "def get_input(label):\r\n\r\n return input(label + ' > ')", "def symbol(self) -> Optional[str]:\n if self._is_a() or self._is_label():\n return self._cur().split(\"@\")[1]", "def get_element_from_id(self, identifier):\n classification, org, rel, com = classify_id(identifier)\n if classification == id_classification.org:\n return self.get_org_question(org)\n elif classification == id_classification.rel:\n return self.get_rel_question(org, rel)\n elif classification == id_classification.com:\n return self.get_rel_comment(org, rel, com)\n return None", "def get_computed_label(self, element):\n pass", "def value_of(char: str) -> str:\n for value, c in _ELEMENTS.items():\n if char == c:\n return value\n else:\n raise AttributeError(\"No such Element: {}\".format(char))", "def first(self, name: str) -> etree.Element:\n return self.from_name(name)[0]", "def element(self):\n return self._node._element", "def get1(node: md.Document, name: str) -> md.Element | None:\n s = get(node, name)\n if s:\n return s[0]\n else:\n return None", "def element_name(argument):\n\n try:\n element = atomic_symbol(argument)\n name = Elements[element][\"name\"]\n except ValueError:\n raise ValueError(\"Unable to identify element in element_name\")\n except TypeError:\n raise TypeError(\"Invalid input to element_name\")\n\n return name", "def atomic_number2element_symbol(atomic_number):\n return ATOMIC_NUMBER2SYMBOL[atomic_number]", "def find_node(self, node_name):\n return self.labeled_nodes[node_name]", "def getelem(self,num):\n #return self.M.conf()['elements'][num]\n return self.lat[num]", "def element(self):\n return self.node.element", "def element(self) -> str:\n return self._particle.element", "def __getitem__(self, name_idx):\n if isinstance(name_idx, str):\n return self.atom_dict[name_idx]\n elif isinstance(name_idx, int):\n return self.atom_list[name_idx]\n raise TypeError, name_idx", "def an_element(self):\n return self.semigroup_generators()[0]", "def element(self):\n return self._node.element", "def get_label(self, ):\n return self.attrs.get(self.AttributeNames.LABEL, None)", "def get_element_by_element_name(self, element_name):\n for element in self.iterate():\n if element.get_name() == element_name:\n return element", "def get_by_label(self, label):\n # label = label.replace(\"-\", \"\") FLB: problem with - in variable\n # Check for name in all categories in self\n for category in categories:\n method = getattr(self, category)\n for entity in method():\n if label in entity.label:\n return entity\n # Check for special names\n d = {\n 'Nothing': Nothing,\n }\n if label in d:\n return d[label]\n # Check whether `label` matches a Python class name of any category\n # l = [cls for cls in itertools.chain.from_iterable(\n # getattr(self, category)() for category in categories)\n # if hasattr(cls, '__name__') and cls.__name__ == label]\n # if len(l) == 1:\n # return l[0]\n # elif len(l) > 1:\n # raise NoSuchLabelError('There is more than one Python class with '\n # 'name %r'%label)\n # # Check imported ontologies\n # for onto in self.imported_ontologies:\n # onto.__class__ = self.__class__ # magically change type of onto\n # try:\n # return onto.get_by_label(label)\n # except NoSuchLabelError:\n # pass", "def _findLabel(self, label):\n if self.label == label:\n return self\n else:\n for i in range(self.nChildren()):\n found = self.children[i]._findLabel(label)\n if found:\n return found\n return None", "def get_atom1(self):\n return self.atom1", "def get_label(self, offset):\n self.ret = idc.GetDisasm(offset).replace(\"extrn \", \"\").split(\":\")[0]\n return self.ret", "def extract_label(selector):\n return selector.split('=')[-1][:-1]", "def atom_name_or(default, molecule, index):\n try:\n return molecule.atom(index).name\n except (TypeError, IndexError):\n return default", "def an_element(self):\n return self.a_realization().an_element()", "def element(self):\n return self._node._element", "def element(self):\n return self._node._element", "def element(self):\n return self._node._element", "def element(self):\n return self._node._element", "def element(self):\n return self._node._element", "def atom(token):\n try:\n return int(token)\n except ValueError:\n try:\n return float(token)\n except ValueError:\n return Symbol(token) # Equivalent to str(token)", "def element_id_by_label(browser, label):\r\n for_id = browser.find_elements_by_xpath(str('//label[contains(., \"%s\")]' %\r\n label))\r\n if not for_id:\r\n return False\r\n return for_id[0].get_attribute('for')", "def element_by_symbol(symbol, verbose=False):\n symbol_trimmed = sub(r\"[0-9 -]\", \"\", symbol).capitalize()\n\n if symbol_trimmed != symbol and verbose:\n msg = (\n f\"Numbers and spaces are not considered when searching by element symbol.\\n\"\n f\"{symbol} became {symbol_trimmed}\"\n )\n warnings.warn(msg)\n\n matched_element = symbol_dict.get(symbol_trimmed)\n return matched_element", "def __getitem__(self, item):\n return self.elements[item]", "def atom(token):\n try:\n return int(token)\n except ValueError:\n try:\n return float(token)\n except ValueError:\n return Symbol(token)", "def get_atom_by_locant(self, locant):\n return self._atom_index[locant]", "def first_label(self):\n if self.labels:\n return self.labels[0]\n else:\n return None", "def get(self, element):\n bucket_index = self._bucket_index(element)\n return self.buckets[bucket_index].find(lambda value: value == element)", "def get_semantic_label(self, numeric_label):\n return self.semantic_labels[numeric_label]", "def get_unknown_atom(connection, atom, exists_only=False):\n cookie = connection.core.InternAtom(\n exists_only,\n len(atom),\n atom\n )\n reply = cookie.reply()\n return reply.atom", "def parse(cls, label) -> Any:\n return label", "def __getitem__(self, tag):\n return self.get(tag)", "def getElementName(self):\n return _libsbml.DefaultTerm_getElementName(self)", "def get_element(self):\n return self.element", "def get_element(self):\n return self.element", "def get_atom_labels(self, full=False):\n import numpy\n\n labels = self.get_attr(\"atom_labels\")\n if full:\n return labels\n return numpy.array(labels)[self._get_equivalent_atom_list()].tolist()", "def __getitem__(self, label):\n return self.subset_map[label]", "def getElement(self):\n length = self.getInt()\n element = self._getStr(length)\n \n if length == _NONE_LEN and element == _NONE:\n element = None\n \n return element", "def get_element_by_name(self, name):\n for element in self._elements:\n if element.get_name() == name:\n return element", "def getElementName(self):\n return _libsbml.ReactionGlyph_getElementName(self)", "def get_element_name(ielem):\n\n return interrogate_element_name(ielem)", "def get_atomized_operand(self, context=None):\n selector = iter(self.atomization(context))\n try:\n value = next(selector)\n except StopIteration:\n return\n else:\n try:\n next(selector)\n except StopIteration:\n if isinstance(value, UntypedAtomic):\n value = str(value)\n if isinstance(context, XPathSchemaContext):\n return value\n if self.xsd_types and isinstance(value, str):\n xsd_type = self.get_xsd_type(context.item)\n if xsd_type is None:\n pass\n elif xsd_type.name in XSD_SPECIAL_TYPES:\n value = UntypedAtomic(value)\n else:\n try:\n value = xsd_type.decode(value)\n except (TypeError, ValueError):\n msg = \"Type {!r} is not appropriate for the context\"\n self.wrong_context_type(msg.format(type(value)))\n return value\n else:\n self.wrong_context_type(\"atomized operand is a sequence of length greater than one\")", "def fetch(self, name, implicit_extrn=False):\n if name not in self.symbols:\n if implicit_extrn:\n self.extrn(name)\n else:\n self.label(name, None)\n return self.symbols[name]", "def label_from_example(example):\n val = example.features.feature['label'].int64_list.value\n if val:\n return int(val[0])\n else:\n return None", "def _eval_atom(self, vars, word):\n if word == \"null\":\n return None\n elif word == \"true\":\n return True\n elif word == \"false\":\n return False\n elif word.startswith(\"$\"):\n return vars[word[1:]]\n (instr, arg) = word.split(\":\")\n if instr == \"int\":\n return int(arg)\n elif instr == \"float\":\n return float(arg)\n elif instr == \"id\":\n return uuid.UUID(arg.zfill(32))\n elif instr == \"blob\":\n return bytearray.fromhex(arg)\n elif instr == \"str\":\n return arg\n else:\n raise NotAnAtom()", "def atom(token):\n if REGEX_INTEGER.match(token):\n return int(token)\n else:\n return token", "def find_label(self, *args):\n return _ida_hexrays.cfunc_t_find_label(self, *args)", "def get_node(self, name):\n return self._node_reg[name]", "def getChemElement(self):\n dataDict = self.__dict__\n result = self.root.currentChemElementStore.findFirstChemElement(symbol=self.elementSymbol)\n return result", "def get_element(self, pos):\n curr = self.head\n count = 1\n\n while curr != None:\n if count == pos:\n return curr.data\n\n curr = curr.link\n count += 1\n return None", "def get_manifest_label(label_uuid, tag_manifest):\n try:\n return (\n Label.select(Label, LabelSourceType)\n .join(LabelSourceType)\n .where(Label.uuid == label_uuid)\n .switch(Label)\n .join(TagManifestLabel)\n .where(TagManifestLabel.annotated == tag_manifest)\n .get()\n )\n except Label.DoesNotExist:\n return None", "def __getitem__(self, name):\n tag = self._find(name)\n if tag is not None:\n return tag.text\n raise KeyError(name)", "def get_label(repo, title, verbose=None):\n if verbose:\n print \"Checking for label...\"\n label = None\n label_text = None\n try:\n label_start = 1 + title.index('(')\n label_end = title.index(')')\n label_text = title[label_start:label_end]\n except ValueError, e:\n print \"Warning: This tile has no embeded label. {0}\".format(e)\n if label_text:\n try:\n label = [repo.get_label(label_text)]\n if verbose:\n print \"Found label: {0}\".format(label)\n except UnknownObjectException, e:\n print \"Error: The label '{0}' does not exist on \" \\\n \"Github. {1}\".format(label_text, e)\n return label", "def __getitem__(self, name):\n return self.entry[name]", "def get_element(self):\n return self._element", "def get_label(cls, name_or_numeric):\n # type: (Union[str, int, Enum]) -> Optional[str]\n value = cls.get(name_or_numeric)\n if value is not None:\n return value.label\n return None", "def get_closest_atom_of_element(element, atom, exclude=None):\n for atom2 in atom.partner:\n if (element == atom2.element or not element) and not atom2 == exclude:\n return atom2", "def get_parameter_by_label(self, label):\n \n for attr in self.parm_list:\n if attr.label == label:\n return attr\n\n self.logging.error(\"Can't find topic: \"+label)\n return None" ]
[ "0.64875007", "0.6234612", "0.6180619", "0.60778284", "0.6046715", "0.60378295", "0.6036052", "0.59951407", "0.59556556", "0.5930658", "0.59231937", "0.58469766", "0.5837059", "0.57520235", "0.5750243", "0.564846", "0.561729", "0.56039554", "0.55868757", "0.5572615", "0.5556304", "0.5545935", "0.55415994", "0.5516912", "0.5512488", "0.55000865", "0.5500017", "0.5471906", "0.54594916", "0.54577386", "0.5455217", "0.5441049", "0.5438462", "0.5432392", "0.54253286", "0.54217076", "0.5415649", "0.53865325", "0.53740585", "0.5371924", "0.5371609", "0.5367002", "0.53647625", "0.5363343", "0.5356394", "0.535191", "0.5342844", "0.5338211", "0.53262705", "0.5317794", "0.5312941", "0.53064597", "0.53055596", "0.530044", "0.5297487", "0.52909833", "0.52887404", "0.52854854", "0.52854854", "0.52854854", "0.52854854", "0.52854854", "0.5275516", "0.5269839", "0.5269316", "0.52590764", "0.525019", "0.52493757", "0.5246886", "0.52342564", "0.52298975", "0.5224745", "0.5212646", "0.5210337", "0.52036357", "0.5200131", "0.5200131", "0.51987296", "0.5194796", "0.51936215", "0.5192588", "0.5189135", "0.51840895", "0.51792246", "0.51752895", "0.5173778", "0.5165067", "0.51610875", "0.515694", "0.51552236", "0.5140126", "0.5134507", "0.51310736", "0.5130793", "0.5128876", "0.51243633", "0.5123513", "0.512344", "0.5121068", "0.5118837" ]
0.52610904
65
Reads a .FChk file and returns a list containing the charge of the compound, the number of electrons in the compound, the overall lengths of the dipole moment vector and the total HF energy.
def get_compound_properties(path): filepointer = open(path) charge = None NE = None E_HF = None dipole = None read_dipole = False for line in filepointer: if read_dipole: read_dipole = False dipole = [float(value) for value in line.split(' ') if '.' in value] dipole = np.linalg.norm(dipole) elif 'Charge' in line and not charge: charge = line.split(' ')[-1].rstrip('\n') elif 'Number of electrons' in line and not NE: NE = line.split(' ')[-1].rstrip('\n') elif 'Total Energy' in line and not E_HF: E_HF = line.split(' ')[-1].rstrip('\n') elif 'Dipole Moment' in line and not dipole: read_dipole = True if charge and NE and E_HF and dipole: break return [charge, NE, dipole, E_HF]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_forces(self):\n #\"\"\"Read Forces from dftb output file (results.tag).\"\"\"\n\n from ase.units import Hartree, Bohr\n\n myfile = open(os.path.join(self.directory, 'detailed.out'), 'r')\n self.lines = myfile.readlines()\n myfile.close()\n\n # Force line indexes\n for iline, line in enumerate(self.lines):\n fstring = 'Total Forces'\n if line.find(fstring) >= 0:\n index_force_begin = iline + 1\n index_force_end = iline + 1 + len(self.atoms)\n break\n\n gradients = []\n for j in range(index_force_begin, index_force_end):\n word = self.lines[j].split()\n gradients.append([float(word[k]) for k in range(1, 4)])\n\n return np.array(gradients) * Hartree / Bohr", "def read_charges_and_energy(self):\n infile = open(os.path.join(self.directory, 'detailed.out'), 'r')\n lines = infile.readlines()\n infile.close()\n\n #for line in lines:\n # if line.strip().startswith('Total energy:'):\n # energy = float(line.split()[2]) * Hartree\n # break\n\n # for finite-temperature DFT, 0K energy is needed\n for line in lines:\n if line.strip().startswith('Extrapolated to 0:'):\n energy = float(line.split()[3]) * Hartree\n break\n\n # for hellman-feynman force, need force-related free energy\n for line in lines:\n if line.strip().startswith('Force related energy:'):\n free_energy = float(line.split()[3]) * Hartree\n break\n\n qm_charges = []\n for n, line in enumerate(lines):\n if ('Atom' and 'Charge' in line):\n chargestart = n + 1\n break\n else:\n # print('Warning: did not find DFTB-charges')\n # print('This is ok if flag SCC=No')\n return None, energy\n\n lines1 = lines[chargestart:(chargestart + len(self.atoms))]\n for line in lines1:\n qm_charges.append(float(line.split()[-1]))\n\n return np.array(qm_charges), energy, free_energy", "def read_force_field(ff_file, units=\"Angstrom\", fragment_id=AtomicData.atomic_number):\n fh = open(ff_file)\n line = fh.readline()\n # read number of atoms\n nat = int(line.split()[0])\n # skip comment\n fh.readline()\n #\n atomlist = []\n atomtypes = []\n partial_charges = []\n lattice_vectors = []\n for i in xrange(nat+3):\n # read nat atoms and at most 3 lattice vectors\n line = fh.readline()\n if not line:\n # end of file reached\n break\n words = line.split()\n x,y,z = map(float, words[1:4])\n if units == \"Angstrom\":\n x,y,z = map(lambda c: c/AtomicData.bohr_to_angs, [x,y,z])\n if words[0] == \"Tv\":\n lattice_vectors.append( [x,y,z] )\n continue\n atno = fragment_id(words[0])\n atomlist.append((atno, [x,y,z]))\n atomtypes.append(int(words[4]))\n if len(words) > 5:\n # 6th column contains partial charges\n partial_charges.append( float(words[5]) )\n else:\n partial_charges.append( 0.0 ) \n fh.close()\n if lattice_vectors == []:\n print \"No lattice vectors found\"\n # no lattice vectors were provided\n # HACK: By setting a lattice vectors to 0, we tell\n # the function 'build_force_field' that we do not want\n # a periodic calculation in this direction.\n # If all lattice vectors are 0, only the atoms in the central\n # unit cell are included (number of replica cells == 0)\n lattice_vectors = [ [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0] ]\n\n assert len(lattice_vectors) == 3, \"Need 3 lattice vectors, got %d!\" % len(lattice_vectors)\n return atomlist, atomtypes, partial_charges, lattice_vectors", "def cfdReadFacesFile(self): \r\n\r\n with open(self.facesFile,\"r\") as fpid:\r\n print('Reading faces file ...')\r\n self.faceNodes=[]\r\n \r\n for linecount, tline in enumerate(fpid):\r\n \r\n if not io.cfdSkipEmptyLines(tline):\r\n continue\r\n \r\n if not io.cfdSkipMacroComments(tline):\r\n continue\r\n \r\n if \"FoamFile\" in tline:\r\n dictionary=io.cfdReadCfdDictionary(fpid)\r\n continue\r\n \r\n if len(tline.split()) ==1:\r\n if \"(\" in tline:\r\n continue\r\n if \")\" in tline:\r\n continue\r\n else:\r\n \r\n self.numberOfFaces = int(tline.split()[0])\r\n continue\r\n \r\n tline=tline.replace(\"(\",\" \")\r\n tline=tline.replace(\")\",\"\")\r\n faceNodesi=[]\r\n for count, node in enumerate(tline.split()):\r\n if count == 0:\r\n continue\r\n #faceNodesi.append(int(node))\r\n else:\r\n faceNodesi.append(float(node))\r\n \r\n self.faceNodes.append(faceNodesi)\r\n \r\n ## (array) with the nodes for each face\r\n self.faceNodes=np.asarray(self.faceNodes)\r\n print(self.faceNodes)", "def f2c_file_read_write_function():\n with open('Fdeg.dat', 'r') as infile:\n data = [i.strip().split() for i in infile] # store data as list\n\n data = data[3:] # get lines with numerical values only\n\n F_list = [float(line[-1]) for line in data]\n C_list = [5/9.0*F - 32 for F in F_list]\n\n for i in range(len(C_list)):\n print(\"{:6g}F {:10.2f}C\".format(F_list[i], C_list[i]))\n\n return F_list", "def read_fr_cards(line_split, temp_data):\n fr_list = temp_data.setdefault('fr_list', [])\n try:\n num_vals_dict = {'SAV': 1, 'URV': 2, 'EDO': 4, 'ICE': 2, 'BRD': 1}\n record = [float('NaN') for i in range(8)]\n record[0] = line_split[0]\n record[1] = line_split[1]\n record[2] = int(line_split[2])\n record[3] = float(line_split[3])\n if record[1] in num_vals_dict:\n for x in range(num_vals_dict[record[1]]):\n record[4+x] = float(line_split[4+x])\n except:\n raise IOError(\"Error reading friction control from *.bc file.\")\n\n fr_list.append(record)", "def read_forces(filename):\n f=open(filename,\"r\")\n castep_forces = f.readlines()\n f.close() \n nruter = []\n for index, line in enumerate(castep_forces):\n if 'Total number of ions in cell' in line:\n n_atoms = int(line.split()[7])\n if 'Cartesian components (eV/A)' in line:\n starting_line = index + 4\n for i in range(n_atoms):\n f = starting_line + i\n nruter.append([float(castep_forces[f].split()[m]) for m in range(3,6)]) \n nruter=np.array(nruter,dtype=np.double)\n return nruter", "def readFamily(Ped_File, vcfIndivs, unaff_Flag):\n\n family = {} # family hash table Key = ID Value = (Father ID, Mother ID, Sex)\n indivs = {} # individuals in VCF Key = ID Value = index in the vcf\n\n indivSet = Set(vcfIndivs) # Convert array to Set to decrease lookup time.\n\n with open(Ped_File) as file:\n for line in file:\n field = line.strip().split('\\t')\n\n family_ID = field[0]\n indiv_ID = field[1]\n father_ID = field[2]\n mother_ID = field[3]\n\n if indiv_ID not in indivSet:\n sys.stderr.write('Individual {} is not in vcf.\\n'.format(indiv_ID))\n continue\n\n if field[4] == '1':\n sex = 'male'\n elif field[4] == '2':\n sex = 'female'\n else:\n sex = 'NA'\n\n # Parents, cases, and controls will not have parental IDs.\n if(father_ID == '0' or mother_ID == '0'):\n continue\n\n # Check to see if the parents are in the vcf.\n if father_ID not in indivSet or mother_ID not in indivSet:\n sys.stderr.write('Family {} is incomplete.\\n'.format(family_ID))\n continue\n\n # If we only want affected probands.\n if not unaff_Flag:\n if field[5] != '2':\n continue\n # If we are only looking at unaffected probands.\n else:\n if field[5] != '1':\n continue\n\n # Family dictionary is in the form: {child_ID} = [Dad_ID, Mom_ID, Sex]\n family[indiv_ID] = (father_ID, mother_ID, sex)\n indivs[indiv_ID] = vcfIndivs.index(indiv_ID)\n indivs[father_ID] = vcfIndivs.index(father_ID)\n indivs[mother_ID] = vcfIndivs.index(mother_ID)\n\n print 'Number of families in hash table = {}.'.format(len(family))\n return family, indivs", "def read_forces(self, atoms, all=False):\n\n file = open('OUTCAR','r')\n lines = file.readlines()\n file.close()\n n=0\n if all:\n all_forces = []\n for line in lines:\n if line.rfind('TOTAL-FORCE') > -1:\n forces=[]\n for i in range(len(atoms)):\n forces.append(np.array([float(f) for f in lines[n+2+i].split()[3:6]]))\n if all:\n all_forces.append(np.array(forces)[self.resort])\n n+=1\n if all:\n return np.array(all_forces)\n else:\n return np.array(forces)[self.resort]", "def f2c_file_read_function():\n with open('data.txt', 'r') as infile:\n data = [i.strip().split() for i in infile] # store data as list\n\n F = float(data[-1][-1]) # last item in data should be value\n C = 5/9.0*F - 32\n print(\"The temperatire in Celcius is {:g}\".format(C))", "def read_file_allocation_table(self):\n\n def construct_fat_format(num_clusters):\n return self.endian_fmt + (\n ('H' if self.fat16x else 'L') * num_clusters)\n\n fat_offset = self.byte_offset_to_physical_offset(self.fat_byte_offset)\n self.infile.seek(fat_offset)\n fat_format = construct_fat_format(self.max_clusters)\n fat_length = struct.calcsize(fat_format)\n\n LOG.debug(\"FAT Offset: %s\", POSITION)\n LOG.debug(\"FAT Length: %08x\", fat_length)\n\n fat_table = self.infile.read(fat_length)\n return [entry for entry in struct.unpack(fat_format, fat_table)]", "def read_forces(self, fname):\n outfile = open(fname)\n lines = outfile.readlines()\n outfile.close()\n nats = len(self.atoms)\n forces = np.zeros((nats, 3), float)\n infinite_force=\"*****\"\n if 'mozyme' in self.str_params['job_type'].lower():\n for i, line in enumerate(lines):\n if line.find('FINAL POINT AND DERIVATIVES') != -1:\n for j in range(nats):\n gline = lines[i + j + 5]\n pre_force=gline[8:35]\n if(infinite_force in pre_force):\n forces[j] = [999999999.9999,999999999.9999,999999999.9999]\n else:\n forces[j] = [float( pre_force[0:9].strip()),float( pre_force[9:18].strip()),float( pre_force[18:27].strip())]\n else:\n for i, line in enumerate(lines):\n if line.find('GRADIENT\\n') != -1:\n for j in range(nats * 3):\n gline = lines[i + j + 1]\n pre_force=gline[49:62]\n if(infinite_force in pre_force):\n forces[int(j/3), int(j%3)] =999999999.9999\n else:\n forces[int(j/3), int(j%3)] = float(pre_force)\n break\n#do not change unit for mopac\n forces *= - (kcal / mol)\n return forces", "def read_forces(self):\n\n # Initialise the indices so their scope\n # reaches outside of the for loop\n index_force_begin = -1\n index_force_end = -1\n \n # Force line indexes\n fstring = 'forces '\n for iline, line in enumerate(self.lines):\n if line.find(fstring) >= 0:\n index_force_begin = iline + 1\n line1 = line.replace(':', ',')\n index_force_end = iline + 1 + \\\n int(line1.split(',')[-1])\n break\n gradients = []\n for j in range(index_force_begin, index_force_end):\n word = self.lines[j].split()\n gradients.append([float(word[k]) for k in range(0, 3)])\n gradients = np.array(gradients)* Hartree / Bohr\n\n return gradients", "def _read(self):\n # initializng data dictionary\n self.data={}\n\n f = FortranFile(self.filename)\n # Default omnivor binary header\n self.data['MK'] = f.readInts('i')\n self.data['itime'] = f.readInts('i')\n self.data['version'] = f.readString()\n self.data['file_id'] = f.readInts('i')\n self.data['sversion'] = f.readString()\n # Velocity field\n self.data['stype'] = f.readString()\n self.data['is_grid'] = f.readInts('i')\n nCPs = f.readInts('i')\n self.data['nCPs'] = nCPs\n if self.data['MK'] == 8:\n real_char='d'\n else:\n real_char='f'\n if self.data['is_grid']:\n #print('File is a velocity grid file')\n n1 = f.readInts('i')\n n2 = f.readInts('i')\n n3 = f.readInts('i')\n self.data['n1'] = n1\n self.data['n2'] = n2\n self.data['n3'] = n3\n self.data['is_straight'] = f.readInts('i')\n self.data['v1'] = f.readReals(real_char)\n self.data['v2'] = f.readReals(real_char)\n self.data['v3'] = f.readReals(real_char)\n\n CPs_raw = f.readReals(real_char)\n Utot_raw = f.readReals(real_char)\n CPs = np.reshape(CPs_raw,(3,nCPs),order = 'F')\n Utot = np.reshape(Utot_raw,(3,nCPs),order = 'F')\n\n acc=-1\n CPsTab = np.zeros((3, n1,n2,n3))\n UtotTab = np.zeros((3, n1,n2,n3))\n # Reshaping the nasty way (this is natural order). \n for i in range(0,n1):\n for j in range(0,n2):\n for k in range(0,n3):\n acc=acc+1\n CPsTab[0:3,i,j,k] = CPs[0:3,acc]\n UtotTab[0:3,i,j,k] = Utot[0:3,acc]\n\n self.data['CPs'] = CPs\n self.data['CPsTab'] = CPsTab\n self.data['Utot'] = Utot\n self.data['UtotTab'] = UtotTab", "def readenergy(self, filelist):\r\n \r\n energy=[]\r\n tmpenergy=[]\r\n for filename in filelist:\r\n if not(os.path.exists(filename)):\r\n if self._resultfile: self._resultfile.write('Output file: \"'+filename+'\" does not exist. Restart your calculation. \\n')\r\n else: print 'Output file: \"'+filename+'\" does not exist. Restart your calculation. \\n'\r\n sys.exit()\r\n else:\r\n tmpdat=[]\r\n for key in self._keydict:\r\n infile=open(filename)\r\n lenstart=len(key['start'])\r\n lenblock=len(key['startblock'])\r\n if lenblock:\r\n tmp=''\r\n readlist=[]\r\n readout=0\r\n startblock=0\r\n startcol=[]\r\n lcount=0\r\n count=0\r\n for tmpc in infile:\r\n if tmp>'':\r\n # We are in the area that we should read.\r\n if tmpc.count(key['stop']):\r\n tmp=''\r\n else:\r\n # We are currently in a data block.\r\n if readout:\r\n if (ischar and tmpc.count(key['stopblock'])) or (not(ischar) and len(tmpc)<=1):\r\n readout=readout-1\r\n else:\r\n # Read the columns.\r\n if lcount==0:\r\n for i in range(0, len(startcol)):\r\n tmpenergy.append([float(key['factor'])*float(tmpc[startcol[i][0]:startcol[i][0]+startcol[i][1]]),''])\r\n ilast=startlen\r\n count=0\r\n # Get the labeling information form the block.\r\n if key['type']=='label' or key['type']=='': \r\n # For type=label we read the symmetry of the orbitals. And put it at the end of the list containing the energy values.\r\n for i in range(ilast, len(tmpc)-1):\r\n if tmpc[i:i+1]==' ':\r\n if i-ilast>1:\r\n if int(tmpc[ilast:i]): \r\n if len(tmpenergy[ecount][1]): tmpenergy[ecount][1]=tmpenergy[ecount][1]+'<->'+readlist[lcount+count]\r\n else: tmpenergy[ecount][1]=readlist[lcount+count]\r\n count=count+1\r\n ilast=i\r\n ecount=ecount+1\r\n else:\r\n if self._resultfile: self._resultfile.write('ERROR: (parsedirac.dat). Type of data block is not implemented. '+key['type']+' key= '+key['key']+' \\n')\r\n else: print 'ERROR: (parsedirac.dat). Type of data block is not implemented. type=\"'+key['type']+'\" key= '+key['key']+' \\n'\r\n sys.exit()\r\n elif tmpc.count(key['startblock']):\r\n try:\r\n readout=float(key['stopblock'])\r\n ischar=0\r\n except:\r\n readout=1\r\n ischar=1\r\n # If readout=0 we only need to read to the end of this line.\r\n if readout==0:\r\n ilast=tmpc.index(key['startblock'])+len(key['startblock'])\r\n for i in range(ilast, len(tmpc)):\r\n if tmpc[i:i+1] in (' ', '\\n'):\r\n if (i-ilast)>1:\r\n try:\r\n tmpdat.append(key['factor']*float(tmpc[ilast+1:i]))\r\n except:\r\n if self._resultfile: self.resultfile.write('ERROR (parsedirac.py): Numirical data expected. File= '+filename+'\\n'+tmpc+'\\n')\r\n else: print 'ERROR (parsedirac.py): Numirical data expected. File= '+filename+'\\n'+tmpc+'\\n'\r\n ilast=i\r\n else:\r\n # Read the positions of the columns containing energy information.\r\n for col in key['columns']:\r\n if tmpc.count(col):\r\n startcol.append((tmpc.index(col),len(col))) \r\n startlen=tmpc.index(key['startblock'])+len(key['startblock'])\r\n ilast=startlen\r\n lcount=lcount+count\r\n count=0\r\n ecount=0\r\n # Read the labels after the block start commando.\r\n for i in range(ilast, len(tmpc)-1):\r\n if tmpc[i:i+1]==' ':\r\n if i-ilast>1:\r\n readlist.append(tmpc[ilast+1:i])\r\n count=count+1\r\n ilast=i\r\n elif tmpc.count(key['start']):\r\n tmp=key['start']\r\n tmpenergy=[]\r\n tmpdat.append(tmpenergy)\r\n else:\r\n # If we read only one line, find it and read all numbers on it.\r\n for tmpc in infile: \r\n tmpn=tmpc.count(key['start'])\r\n if tmpn:\r\n ilast=tmpc.index(key['start'])+len(key['start'])\r\n for i in range(ilast, len(tmpc)):\r\n if tmpc[i:i+1] in (' ', '\\n'):\r\n if (i-ilast)>1:\r\n try:\r\n tmpdat.append(key['factor']*float(tmpc[ilast+1:i]))\r\n except:\r\n if self._resultfile: self.resultfile.write('ERROR (parsedirac.py): Numirical data expected. File= '+filename+'\\n'+tmpc+'\\n')\r\n else: print 'ERROR (parsedirac.py): Numirical data expected. File= '+filename+'\\n'+tmpc+'\\n'\r\n ilast=i\r\n infile.close()\r\n energy.append(tmpdat)\r\n return energy", "def readHeader(self, filename):\n f = Data.Usrxxx.readHeader(self, filename)\n# self.sayHeader()\n \n while True:\n data = fortran.read(f)\n if data is None: break\n size = len(data)\n# print(\"size: \", size)\n\n if size == 14 and data[:10] == \"STATISTICS\":\n self.statpos = f.tell()\n for det in self.detector:\n data = Data.unpackArray(fortran.read(f))\n det.total = data[0]\n det.totalerror = data[1]\n# for j in range(6):\n# fortran.skip(f)\n break\n\n if size != 50: raise IOError(\"Invalid USRTRACK/USRCOLL file\")\n\n header = struct.unpack(\"=i10siiififfif\", data)\n\n det = Data.Detector()\n det.nb = header[0]\n det.name = header[1].strip() # titutc - track/coll name\n det.type = header[2] # itustc - type of binning: 1 - linear energy etc\n det.dist = header[3] # idustc = distribution to be scored\n det.reg = header[4] # nrustc = region\n det.volume = header[5] # vusrtc = volume (cm**3) of the detector\n det.lowneu = header[6] # llnutc = low energy neutron flag\n det.elow = header[7] # etclow = minimum energy [GeV]\n det.ehigh = header[8] # etchgh = maximum energy [GeV]\n det.ne = header[9] # netcbn = number of energy intervals\n det.de = header[10] # detcbn = energy bin width\n\n self.detector.append(det)\n\n if det.lowneu:\n data = fortran.read(f)\n det.ngroup = struct.unpack(\"=i\",data[:4])[0]\n det.egroup = struct.unpack(\"=%df\"%(det.ngroup+1), data[4:])\n print(\"Low energy neutrons scored with %d groups\" % det.ngroup)\n else:\n\t\tdet.ngroup = 0\n\t\tdet.egroup = []\n\n\t size = (det.ngroup+det.ne) * 4\n\t if size != fortran.skip(f):\n\t\traise IOError(\"Invalid USRTRACK file\")\n f.close()", "def read_ZCORN(self, fp):\r\n Z = []\r\n for line in fp:\r\n item = line.split()\r\n if len(item) > 0:\r\n if item[0][0] != \"*\":\r\n for zz in item:\r\n if \"*\" in zz:\r\n item = zz.split(\"*\")\r\n for i in range(0, int(item[0])):\r\n Z.append(float(item[1]))\r\n else:\r\n Z.append(float(zz))\r\n else:\r\n break\r\n return Z", "def retrieve_additional_files(input_qchem, data_fchk, work_dir, scratch_read_level=0):\n\n additional_data = {}\n\n natom = len(input_qchem.molecule.get_coordinates())\n file_list = os.listdir(work_dir)\n\n # OLD_DIMENSIONS\n if '819.0' in file_list:\n with open(work_dir + '819.0', 'r') as f:\n data = np.fromfile(f, dtype=np.int32)\n norb_alpha, norb_beta = data[0:2]\n norb = norb_alpha\n nbas = norb # assumption\n else:\n norb = np.shape(data_fchk['coefficients']['alpha'])[0]\n nbas = np.shape(data_fchk['coefficients']['alpha'])[1]\n\n\n # MO_COEFS (Already in fchk) in internal order\n if '53.0' in file_list and 'coefficients' in data_fchk:\n with open(work_dir + '53.0', 'r') as f:\n data = np.fromfile(f, dtype=float)\n mo_alpha = data[:norb*nbas].reshape(-1, norb).tolist()\n mo_beta = data[norb*nbas: 2*norb_beta*nbas].reshape(-1, norb_beta).tolist()\n # additional_data['coefficients_internal'] = {'alpha': mo_alpha, 'beta': mo_beta}\n\n # obtain the order indices between fchk order and Q-Chem internal order of basis functions\n diff_square = get_sdm(data_fchk['coefficients']['alpha'], mo_alpha)\n\n # get non-repeating indices\n indices = []\n for row in diff_square.T:\n for i in np.argsort(row):\n if i not in indices:\n indices.append(int(i))\n break\n\n # indices = np.argmin(diff_square, axis=0).tolist()\n\n # store q-chem index order for later use (e.g guess)\n data_fchk['coefficients']['qchem_order'] = indices\n else:\n indices = list(range(nbas))\n\n # FOCK_MATRIX\n if '58.0' in file_list:\n with open(work_dir + '58.0', 'r') as f:\n data = np.fromfile(f, dtype=float)\n fock_alpha = data[:nbas*nbas].reshape(-1, nbas)\n fock_beta = data[nbas*nbas: 2*nbas*nbas].reshape(-1, nbas)\n\n # set basis functions in fchk order\n fock_alpha = fock_alpha[:, indices]\n fock_alpha = fock_alpha[indices, :]\n fock_beta = fock_beta[:, indices]\n fock_beta = fock_beta[indices, :]\n\n additional_data['fock_matrix'] = {'alpha': fock_alpha.tolist(), 'beta': fock_beta.tolist()}\n\n if scratch_read_level == -1:\n # FILE_ENERGY (Not really worth to read it)\n if '99.0' in file_list:\n with open(work_dir + '99.0', 'r') as f:\n data = np.fromfile(f, dtype=float)\n\n # FILE_DENSITY_MATRIX (Already in fchk)\n if '54.0' in file_list:\n with open(work_dir + '54.0', 'r') as f:\n data = np.fromfile(f, dtype=float)\n density_alpha = data[:nbas*nbas].reshape(-1, nbas)\n density_beta = data[nbas*nbas: 2*nbas*nbas].reshape(-1, nbas)\n # set basis functions in fchk order\n density_alpha = density_alpha[:, indices]\n density_alpha = density_alpha[indices, :]\n density_beta = density_beta[:, indices]\n density_beta = density_beta[indices, :]\n additional_data['scf_density_internal'] = {'alpha': density_alpha.tolist(), 'beta': density_beta.tolist()}\n\n # HESSIAN_MATRIX\n if '132.0' in file_list:\n with open(work_dir + '132.0', 'r') as f:\n data = np.fromfile(f, dtype=float)\n hessian = data.reshape(-1, natom*3)\n additional_data['hessian'] = hessian.tolist()\n\n # AO_INTS_DEBUG\n if '21.0' in file_list:\n with open(work_dir + '21.0', 'r') as f:\n data = np.fromfile(f, dtype=float)\n ao_integrals = data.reshape(-1, nbas, nbas, nbas)\n\n # set basis functions in fchk order\n ao_integrals = ao_integrals[:, :, :, indices]\n ao_integrals = ao_integrals[:, :, indices, :]\n ao_integrals = ao_integrals[:, indices, :, :]\n ao_integrals = ao_integrals[indices, :, :, :]\n\n additional_data['ao_integrals'] = ao_integrals.tolist()\n\n if scratch_read_level > 0:\n # FILE_RAS_AMP\n if '704.0' in file_list:\n with open(work_dir + '705.0', 'r') as f:\n ras_energies = np.fromfile(f, dtype=float)\n n_ras_roots = len(ras_energies)\n\n with open(work_dir + '704.0', 'r') as f:\n data = np.fromfile(f, dtype=float)\n ras_amplitudes = data.reshape(n_ras_roots, -1)\n additional_data['ras_amplitudes'] = ras_amplitudes.tolist()\n\n return additional_data", "def _get_magtot(self, file):\n #TODO implement\n return []", "def cfdReadBoundaryFile(self):\r\n \r\n with open(self.boundaryFile,\"r\") as fpid:\r\n print('Reading boundary file ...')\r\n \r\n ## (dict) key for each boundary patch\r\n self.cfdBoundaryPatchesArray={}\r\n for linecount, tline in enumerate(fpid):\r\n \r\n if not io.cfdSkipEmptyLines(tline):\r\n continue\r\n \r\n if not io.cfdSkipMacroComments(tline):\r\n continue\r\n \r\n if \"FoamFile\" in tline:\r\n dictionary=io.cfdReadCfdDictionary(fpid)\r\n continue\r\n \r\n count=0\r\n if len(tline.split()) ==1:\r\n if \"(\" in tline:\r\n continue\r\n if \")\" in tline:\r\n continue\r\n \r\n if tline.strip().isdigit():\r\n \r\n self.numberOfBoundaryPatches = tline.split()[0]\r\n continue\r\n \r\n boundaryName=tline.split()[0]\r\n \r\n self.cfdBoundaryPatchesArray[boundaryName]=io.cfdReadCfdDictionary(fpid)\r\n ## number of faces for the boundary patch\r\n self.cfdBoundaryPatchesArray[boundaryName]['numberOfBFaces']= int(self.cfdBoundaryPatchesArray[boundaryName].pop('nFaces'))\r\n \r\n ## start face index of the boundary patch in the self.faceNodes\r\n self.cfdBoundaryPatchesArray[boundaryName]['startFaceIndex']= int(self.cfdBoundaryPatchesArray[boundaryName].pop('startFace'))\r\n count=count+1\r\n\r\n ## index for boundary face, used for reference\r\n self.cfdBoundaryPatchesArray[boundaryName]['index']= count", "def test_CFCalculation_txt_files():\n from masci_tools.tools.cf_calculation import CFCalculation, CFCoefficient\n\n #Make sure new script produces the same result as old one\n expected_results = [\n CFCoefficient(l=2,\n m=0,\n spin_up=-419.7891726292168,\n spin_down=-414.7152560307904,\n unit='K',\n convention='Stevens'),\n CFCoefficient(l=4,\n m=0,\n spin_up=-35.92607948104669,\n spin_down=-26.384951772020756,\n unit='K',\n convention='Stevens'),\n CFCoefficient(l=6, m=0, spin_up=6.522900740505054, spin_down=5.488104692050172, unit='K', convention='Stevens')\n ]\n\n cf = CFCalculation(reference_radius='cdn')\n cf.readPot('files/cf_calculation/VKS.2.0.dat',\n 'files/cf_calculation/VKS.4.0.dat',\n 'files/cf_calculation/VKS.6.0.dat',\n lm=[(2, 0), (4, 0), (6, 0)])\n cf.readCDN('files/cf_calculation/Nd.dat', header=3)\n cf.cdn['RMT'] = 3.138049652\n results = cf.performIntegration()\n\n assert results == expected_results", "def _read_chk_is(self):\n fname = os.path.splitext(self.filename)[0] + '.chk_is'\n table = []\n bg_before = []\n bg_after = []\n with open(fname, mode='rt') as fh:\n for line in fh:\n if line.startswith('FC Background before acq'):\n bg_before = line.split(':')[1].strip().split()\n elif line.startswith('FC Background after acq'):\n bg_after = line.split(':')[1].strip().split()\n elif line.startswith('|'):\n table.append(line)\n\n # Parse analysis background\n det = ''\n for part in bg_before:\n if 'Det' in part:\n det = part.replace('Det', 'Detector ').strip('= ')\n continue\n try:\n bg = float(part.strip())\n except ValueError:\n bg = 0\n self.header['Detectors'][det]['fc background before analysis'] = bg\n\n for part in bg_after:\n if 'Det' in part:\n det = part.replace('Det', 'Detector ').strip('= ')\n continue\n try:\n bg = float(part.strip())\n except ValueError:\n bg = 0\n self.header['Detectors'][det]['fc background after analysis'] = bg\n\n # Parse baseline background if found\n if table:\n background = table[0].strip().strip('|').split('|')\n background = [float(b.strip()) for b in background[1:]]\n detectors = table[2].strip().strip('|').split('|')\n detectors = [i.strip().replace('Mass#', 'Detector ') for i in detectors[2:]]\n for det, bg in zip(detectors, background):\n detdict = self.header['Detectors'][det]\n key = '{} background baseline'.format(detdict['detector'].lower())\n detdict[key] = bg", "def readCC(Ped_File, vcfIndivs):\n\n case = {} # case hash table: Key = ID Value = Sex\n control = {} # control hash table: Key = ID Value = Sex\n caseControl = {} # cases and controls hash table: Key = ID Value = index in vcf\n\n indivSet = Set(vcfIndivs) # convert array to Set to decrease lookup time.\n\n with open(Ped_File) as file:\n for line in file:\n field = line.strip().split('\\t')\n\n indiv_ID = field[1]\n father_ID = field[2]\n mother_ID = field[3]\n ptype = field[5] # case/control status: 1=control, 2=case\n\n if indiv_ID not in indivSet:\n sys.stderr.write('Individual {} is not in vcf.\\n'.format(indiv_ID))\n continue\n\n if field[4] == '1':\n sex = 'male'\n elif field[4] == '2':\n sex = 'female'\n else:\n sex = 'NA'\n\n if(father_ID != '0' or mother_ID != '0'):\n continue\n\n elif(ptype == '2'):\n case[indiv_ID] = sex\n caseControl[indiv_ID] = vcfIndivs.index(indiv_ID)\n\n elif(ptype == '1'):\n control[indiv_ID] = sex\n caseControl[indiv_ID] = vcfIndivs.index(indiv_ID)\n\n print 'Number of cases in hash table = {}.'.format(len(case))\n print 'Number of controls in hash table = {}.'.format(len(control))\n return case, control, caseControl", "def read_fermi(self):\n E_f=None\n for line in open('OUTCAR', 'r'):\n if line.rfind('E-fermi') > -1:\n E_f=float(line.split()[2])\n return E_f", "def _read_born_charges(self, line):\n line = self.file_descriptor.readline()\n self.born_charges = []\n for i in range(self.nions):\n b = []\n line = self.file_descriptor.readline()\n b.append([float(f) for f in line.split()[2:5]])\n line = self.file_descriptor.readline()\n b.append([float(f) for f in line.split()[0:3]])\n line = self.file_descriptor.readline()\n b.append([float(f) for f in line.split()[0:3]])\n B = np.array(b)\n C = B.T\n self.born_charges.append(C.tolist())\n return", "def read_pdb(self, fname, ff='WK'):\n\n atoms = symmpdb(fname)\n self.atom_pos = atoms[:, 0:3] / 10 ** 10 # convert unit from Angstroms to m\n tmp = (100 * atoms[:, 3] + atoms[:, 4]).astype(\n int) # hack to get split idx from the sorted atom array\n atom_type, idx = np.unique(np.sort(tmp), return_index=True)\n self.num_atom_types = len(atom_type)\n self.split_idx = np.append(idx, [len(tmp)])\n\n if ff == 'WK':\n \"\"\"\n Here, one tries to calculate the form factor from formula and tables.\n Therefore, one needs to setup some reference points for interpolation.\n Here, the qs variable is such a variable containing the momentum length\n at which one calculate the reference values.\n \"\"\"\n # set up q samples and compton\n qs = np.linspace(0, 10, 101) / (2.0 * np.pi * 0.529177206 * 2.0)\n self.q_sample = qs\n self.compton_q_sample = qs\n self.num_q_samples = len(qs)\n self.num_compton_q_samples = len(qs)\n self.sBound = np.zeros(self.num_q_samples)\n self.nFree = np.zeros(self.num_q_samples)\n\n # calculate form factor using WaasKirf coeffs table\n wk_dbase = load_waaskirf_database()\n for i in idx:\n if i == 0:\n zz = int(atoms[i, 3]) # atom type\n qq = int(atoms[i, 4]) # charge\n idx = np.where(wk_dbase[:, 0] == zz)[0]\n flag = True\n for j in idx:\n if wk_dbase[j, 1] == qq:\n [a1, a2, a3, a4, a5, c, b1, b2, b3, b4, b5] = wk_dbase[j, 2:]\n self.ff_table = (a1 * np.exp(-b1 * self.q_sample ** 2) +\n a2 * np.exp(-b2 * self.q_sample ** 2) +\n a3 * np.exp(-b3 * self.q_sample ** 2) +\n a4 * np.exp(-b4 * self.q_sample ** 2) +\n a5 * np.exp(-b5 * self.q_sample ** 2) + c)\n flag = False\n break\n if flag:\n print('Atom number = ' + str(zz) + ' with charge ' + str(qq))\n raise ValueError('Unrecognized atom type!')\n else:\n zz = int(atoms[i, 3]) # atom type\n qq = int(atoms[i, 4]) # charge\n idx = np.where(wk_dbase[:, 0] == zz)[0]\n flag = True\n for j in idx:\n if wk_dbase[j, 1] == qq:\n [a1, a2, a3, a4, a5, c, b1, b2, b3, b4, b5] = wk_dbase[j, 2:]\n\n ff = (a1 * np.exp(-b1 * self.q_sample ** 2) +\n a2 * np.exp(-b2 * self.q_sample ** 2) +\n a3 * np.exp(-b3 * self.q_sample ** 2) +\n a4 * np.exp(-b4 * self.q_sample ** 2) +\n a5 * np.exp(-b5 * self.q_sample ** 2) + c)\n\n self.ff_table = np.vstack((self.ff_table, ff))\n flag = False\n break\n if flag:\n print('Atom number = ' + str(zz) + ' with charge ' + str(qq))\n raise ValueError('Unrecognized atom type!')\n\n elif ff == 'pmi':\n # set up ff table\n ffdbase = load_ff_database()\n for i in idx:\n if i == 0:\n zz = int(atoms[i, 3]) # atom type\n qq = int(atoms[i, 4]) # charge\n self.ff_table = ffdbase[:, zz] * (zz - qq) / (zz * 1.0)\n else:\n zz = int(atoms[i, 3]) # atom type\n qq = int(atoms[i, 4]) # charge\n self.ff_table = np.vstack(\n (self.ff_table, ffdbase[:, zz] * (zz - qq) / (zz * 1.0)))\n\n # set up q samples and compton\n self.q_sample = ffdbase[:, 0] / (2.0 * np.pi * 0.529177206 * 2.0)\n self.compton_q_sample = ffdbase[:, 0] / (2.0 * np.pi * 0.529177206 * 2.0)\n self.num_q_samples = len(ffdbase[:, 0])\n self.num_compton_q_samples = len(ffdbase[:, 0])\n self.sBound = np.zeros(self.num_q_samples)\n self.nFree = np.zeros(self.num_q_samples)\n else:\n raise ValueError('Unrecognized form factor source!')", "def test_CFCalculation_hdf_files():\n from masci_tools.tools.cf_calculation import CFCalculation, CFCoefficient\n\n expected_results = [\n CFCoefficient(l=2, m=0, spin_up=-571.68845386399, spin_down=-558.2336974657351, unit='K', convention='Stevens'),\n CFCoefficient(l=4,\n m=0,\n spin_up=-34.982539807305045,\n spin_down=-21.850435868549834,\n unit='K',\n convention='Stevens'),\n CFCoefficient(l=6, m=0, spin_up=3.8503494779930776, spin_down=2.168215129491561, unit='K',\n convention='Stevens'),\n CFCoefficient(l=6,\n m=-6,\n spin_up=110.50156137060345,\n spin_down=85.58558990378205,\n unit='K',\n convention='Stevens'),\n CFCoefficient(l=6, m=6, spin_up=110.50156137060345, spin_down=85.58558990378205, unit='K', convention='Stevens')\n ]\n\n cf = CFCalculation()\n cf.readPot('files/cf_calculation/CFdata.hdf')\n cf.readCDN('files/cf_calculation/CFdata.hdf')\n results = cf.performIntegration()\n\n assert results == expected_results", "def read_formations():\n\n with resource_stream('component_contribution',\n FullTrainingData.FORMATION_ENERGY_FNAME) as fp:\n formation_df = pd.read_csv(gzip.GzipFile(fileobj=fp))\n\n cids_that_dont_decompose = set(\n formation_df.loc[formation_df['decompose'] == 0, 'cid'])\n\n for col in [\"dG'0\", \"T\", \"I\", \"pH\", \"pMg\"]:\n formation_df[col] = formation_df[col].apply(float)\n\n formation_df = formation_df[~pd.isnull(formation_df[\"dG'0\"])]\n formation_df['reaction'] = formation_df['cid'].apply(\n lambda c: Reaction({c: 1}))\n\n formation_df['balance'] = False\n formation_df['description'] = formation_df['name'] + ' formation'\n formation_df.rename(columns={'compound_ref': 'reference'},\n inplace=True)\n formation_df.drop(['name', 'cid', 'remark', 'decompose'],\n axis=1, inplace=True)\n\n logger.debug('Successfully added %d formation energies' %\n formation_df.shape[0])\n return formation_df, cids_that_dont_decompose", "def readFLO(file):\r\n\r\n tag_float = 202021.25\r\n with open(file) as f:\r\n nbands = 2\r\n tag = np.fromfile(f, np.float32, 1)[0]\r\n\r\n if tag != tag_float:\r\n raise ValueError('wrong tag possibly due to big-endian machine?')\r\n\r\n width = np.fromfile(f, np.int32, 1)[0]\r\n height = np.fromfile(f, np.int32, 1)[0]\r\n\r\n tmp = np.fromfile(f, np.float32)\r\n tmp = tmp.reshape(height, width * nbands)\r\n\r\n flow = np.zeros((height, width, 2))\r\n flow[:, :, 0] = tmp[:, 0::2]\r\n flow[:, :, 1] = tmp[:, 1::2]\r\n\r\n return flow", "def read_forces_on_pointcharges(self):\n from ase.units import Hartree, Bohr\n infile = open(os.path.join(self.directory, 'detailed.out'), 'r')\n lines = infile.readlines()\n infile.close()\n\n external_forces = []\n for n, line in enumerate(lines):\n if ('Forces on external charges' in line):\n chargestart = n + 1\n break\n else:\n raise RuntimeError(\n 'Problem in reading forces on MM external-charges')\n lines1 = lines[chargestart:(chargestart + len(self.mmcharges))]\n for line in lines1:\n external_forces.append(\n [float(i) for i in line.split()])\n return np.array(external_forces) * Hartree / Bohr", "def part2():\n input_list = read_input('input.txt')\n fuel_list = []\n for mass_in in input_list:\n helper = True\n total_fuel = 0\n mass = mass_in\n while helper or mass > 0:\n helper = False\n mass = fuel_required(mass)\n if mass > 0:\n total_fuel += mass\n fuel_list.append(total_fuel)\n return sum(fuel_list)", "def _parse_nscf(self) -> None:\n alat = 0\n lattice = np.zeros((3,3))\n recip = np.zeros((3,3))\n nbnd = 0\n natom = 0\n positions = []\n nk = 0\n symbols = []\n k_frac = []\n efermi = 0\n\n energy = {\"spinup\" : [],\n \"spindown\" : []\n }\n\n which = \"spinup\" # remember if we are reading spin up or spin down\n \n with open(self.output,'r') as f:\n aline=f.readline()\n\n while aline:\n # read information by checking the flags\n if \"lattice parameter (alat) =\" in aline:\n data = aline.split('=')[1]\n data = data.split()\n alat = float(data[0]) # in Bohr\n\n if \"number of Kohn-Sham states\" in aline:\n data = aline.split()[-1]\n nbnd = int(data)\n\n if \"number of atoms/cell\" in aline:\n data = aline.split()[-1]\n natom = int(data)\n\n if \"crystal axes: (cart. coord. in units of alat)\" in aline:\n for i in range(3):\n data = f.readline().split()[3:6]\n lattice[i] = np.array(data, dtype = float) \n lattice *= alat * Bohr2A\n\n if \"reciprocal axes: (cart. coord. in units 2 pi/alat)\" in aline:\n for i in range(3):\n data = f.readline().split()[3:6]\n recip[i] = np.array(data, dtype = float)\n recip *= 2 * np.pi / (alat * Bohr2A)\n\n if \"site n. atom positions (cryst. coord.)\" in aline:\n for i in range(natom):\n data = f.readline()\n symbols.append(re.findall(r'[A-Z][a-z]*', data)[0])\n positions.append(np.array(re.findall('-?\\d+\\.\\d+', data), dtype = float))\n \n if \"number of k points= \" in aline:\n nk = int( re.findall(r'\\d+', aline)[0] )\n k_frac = np.zeros((nk,3))\n\n if re.search(r'k\\(.+\\)\\s+=\\s+\\(.+\\)', aline) != None:\n parts = aline.split('=')\n ik = int( re.findall(r'\\d+', parts[0])[0] )\n pos = np.array(re.findall(r'-?\\d+\\.\\d+', parts[1]), dtype = float)\n k_frac[ik-1] = pos\n\n if \"the Fermi energy is\" in aline:\n efermi = float(re.findall(r'-?\\d+\\.\\d+', aline)[0])\n\n if \"------ SPIN UP ------------\" in aline:\n which = \"spinup\"\n\n if \"------ SPIN DOWN ----------\" in aline:\n which = \"spindown\"\n\n if re.search('k\\s+=\\s*-?\\d+\\.\\d+\\s*-?\\d+\\.\\d+\\s*-?\\d+\\.\\d+\\s',aline) != None:\n kstr=re.findall(r'-?\\d+\\.\\d+',aline)\n\n f.readline()\n\n lenergy = [] # local energy for each k point\n while len(lenergy) < nbnd:\n aline = f.readline()\n data = np.array(aline.split(), dtype = float)\n for d in data:\n lenergy.append(d)\n\n if len(lenergy) > nbnd:\n raise \"length of energy > nbnd\"\n\n energy[which].append(lenergy)\n \n aline = f.readline()\n\n self.efermi = efermi\n self.lattice = lattice\n self.symbols = symbols \n self.positions = np.array(positions)\n self.reciprocal = recip\n self.kpoints = k_frac\n\n self.eig = {}\n self.eig[Spin.up] = np.array(energy[\"spinup\"]).T\n\n if energy[\"spindown\"]:\n self.spin_polarized = True\n self.eig[Spin.down] = np.array(energy[\"spindown\"]).T", "def readFST(self):\n\n fname = self.fst_file\n print \"reading FAST template file\", fname\n try:\n fh = open(fname,'r')\n self.lines_fast = fh.readlines()\n fh.close()\n except:\n sys.stdout.write (\"Error opening master FAST input file %s\\n\" % fname)\n return 0\n\n for line in self.lines_fast:\n f = line.lstrip().split()\n if (len(f) < 2):\n continue\n\n if (f[1] == 'PtfmFile' and self.ptfm_file == None):\n self.ptfm_file = f[0][1:-1]\n if (f[1] == 'TwrFile' and self.twr_file == None):\n self.twr_file = f[0][1:-1]\n if (f[1] == 'ADAMSFile' and self.adams_file == None):\n self.adams_file = f[0][1:-1]\n if (f[1] == 'BldFile(1)' and self.blade1_file == None):\n self.blade1_file = f[0][1:-1]\n if (f[1] == 'BldFile(2)' and self.blade2_file == None):\n self.blade2_file = f[0][1:-1]\n if (f[1] == 'BldFile(3)' and self.blade3_file == None):\n self.blade3_file = f[0][1:-1]\n if (f[1] == 'ADFile' and self.ad_file == None):\n self.ad_file = f[0][1:-1]\n if (f[1] == 'NoiseFile' and self.noise_file == None):\n self.noise_file = f[0][1:-1]\n \n print \"FAST subfiles:\"\n print \"ptfm \", self.ptfm_file\n print \"twr \", self.twr_file\n print \"blades \", self.blade1_file, self.blade2_file, self.blade3_file\n print \"ad \", self.ad_file\n print \"noise \", self.noise_file", "def calculate_total_fuel(filename):\n return sum([calculate_fuel_from_mass(mass) for mass in read_mass_from_file(filename)])", "def testReadCifFile(self):\n\n try:\n blockNameList = []\n cf = CifFile(self.__pathPdbxDataFile)\n myReader = cf.getCifFile()\n blockNameList = myReader.GetBlockNames(blockNameList)\n logger.debug(\"Block list %r\", repr(blockNameList))\n #\n for blockName in blockNameList:\n block = myReader.GetBlock(blockName)\n tableNameList = []\n tableNameList = block.GetTableNames(tableNameList)\n for tableName in tableNameList:\n table = block.GetTable(tableName)\n columnNameList = table.GetColumnNames()\n logger.debug(\"Column list %r\", repr(columnNameList))\n numRows = table.GetNumRows()\n rowList = []\n for iRow in range(0, numRows):\n row = table.GetRow(iRow)\n rowList.append(row)\n cf.write(self.__pathOutputPdbxFile)\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def readData(f):\n taxa_lis = []\n num_lis = []\n for n, line in enumerate(open(f)):\n if line.startswith('#'):\n continue\n\n line = line.rstrip()\n if line == '':\n continue\n\n taxa, num = line.split('\\t')\n skip = False\n for word in EXCLUDE:\n if word in taxa:\n skip = True\n break\n\n if skip:\n continue \n\n taxa = taxa.rstrip(';')\n lis = taxa.split(';')\n lis2 = []\n for item in lis:\n item = item.strip()\n if item.endswith(')'):\n item = item.split('(')[0].strip()\n\n # remove taxon level prefix, e.g. 'p__Firmicutes'\n if '__' in item:\n item = item.split('__', 1)[1]\n\n #item = item.strip('\"')\n\n item = item.lower()\n if 'unclassified' in item:\n item = 'Unclassifed'\n elif 'unknown' in item:\n item = 'Unclassifed'\n elif 'other' in item:\n item = 'Unclassifed'\n elif 'unassigned' in item:\n item = 'Unclassifed'\n\n item = item.capitalize()\n lis2.append(item)\n\n taxa_lis.append(lis2)\n num_lis.append(float(num))\n\n return taxa_lis, num_lis", "def readListData( self, file, bQuiet = True ):\n nSizeOfSectionChunck = struct.unpack_from( \"i\", file.read( 4 ) )[0]\n logging.debug( \"nSizeOfSectionChunck: %d\" % nSizeOfSectionChunck )\n data = file.read( 4 )\n #print debug.dumpHexa( data )\n if( data[0] == 'I' and data[3] == 'O' ):\n self.readInfoData( file, nSizeOfSectionChunck - 4, bQuiet = bQuiet )\n return nSizeOfSectionChunck\n return 0", "def read_oncokb_fusions(mypath):\n tmp = pd.read_csv(mypath, sep='\\t')\n is_fus = tmp['Alteration'].str.contains('Fusion')\n tmp = tmp[is_fus].copy()\n\n # fetch cases where both fusion partners specified\n oncokb_fusions_both = tmp[\"Alteration\"].str.extract('(.+) Fusion', expand=False)\n oncokb_fusions_both = oncokb_fusions_both.str.replace('-', '--').dropna()\n\n # get cases where any fusion partner is allowed\n oncokb_fusions_one = tmp[tmp['Alteration']=='Fusions']['Gene'].unique()\n\n return oncokb_fusions_both, oncokb_fusions_one", "def lo_cf(self):\n cf1 = self._read(0x12, 0, 0x1F)\n cf2 = self._read(0x13, 0, 0x1F)\n return (cf1, cf2)", "def read_input_files(input_file: str) -> list[Food]:\n with open(input_file) as input_fobj:\n foods = [Food.from_raw(line.strip()) for line in input_fobj]\n return foods", "def readprimitive(f): \n \n ## read in lines from input file and ignore blank lines and comment lines\n lines = [line.rstrip() for line in f if line.rstrip() if line[0] != '#']\n\n # a1,a2,a3\n A = np.array([[float(lines[0].split()[0]),float(lines[0].split()[1]),float(lines[0].split()[2])],\n [float(lines[1].split()[0]),float(lines[1].split()[1]),float(lines[1].split()[2])],\n [float(lines[2].split()[0]),float(lines[2].split()[1]),float(lines[2].split()[2])]]).T\n \n # number of basis atoms\n num_basis = int(lines[3].split()[0]) \n\n # basis atom positions in unit cell\n unitcell_pos = []\n for i in range(num_basis): \n unitcell_pos.append([float(lines[4+i].split()[0]),float(lines[4+i].split()[1]),float(lines[4+i].split()[2])]) \n \n return (A,unitcell_pos)", "def readMol2TotalCharge(self, mol2File):\n charge = 0.0\n ll = []\n cmd = '%s -i %s -fi mol2 -o tmp -fo mol2 -c wc -cf tmp.crg -pf y' % \\\n (self.acExe, mol2File)\n if self.debug:\n self.printMess(\"Debugging...\")\n cmd = cmd.replace('-pf y', '-pf n')\n\n self.printDebug(cmd)\n\n log = getoutput(cmd)\n\n if log.isspace():\n tmpFile = open('tmp.crg', 'r')\n tmpData = tmpFile.readlines()\n for line in tmpData:\n ll += line.split()\n charge = sum(map(float,ll))\n elif self.debug:\n self.printQuoted(log)\n\n self.printDebug(\"readMol2TotalCharge: \" + str(charge))\n\n return charge", "def read_corr(no_of_specs,base_name=\"spec\"):\n count_r=[]\n corr_fact=[]\n for i in range(no_of_specs):\n path=\"./\"+base_name+str(i)+\"/\"+base_name+str(i)+\"pc_fit.fit\"\n # path=\"./spec0/spec0pc_fit.fit\"\n \n temp_count_r=linecache.getline(path,9).strip().replace('File count rate: ','')\n count_r.append(temp_count_r)\n temp_corr_fact=linecache.getline(path,10).strip().replace('Corr factor:\\t','')\n corr_fact.append(temp_corr_fact)\n \n count_r=np.array(count_r,dtype=np.float32)\n corr_fact=np.array(corr_fact,dtype=np.float32)\n return [count_r,corr_fact]", "def get_float_list(gene_file, c):\n\tfile = open(gene_file,'r')\n\tList = []\n\tfor line in file:\n\t\tif not re.match(\"#\", line):\n\t\t\tline = line.strip()\n\t\t\tsline = line.split()\n\t\t\tList.append(atof(sline[c]))\n\tfile.close()\n\treturn List", "def read_hf():\n hf = main_dir + \"height_file.txt\"\n height_list = []\n with open(hf, 'r') as f:\n for line in f:\n line = line.strip()\n column = line.split()\n if len(column) == 1:\n height_list.append(float(column[0]))\n else:\n print \"Error: height file has wrong format!\"\n return\n\n return np.array(height_list)", "def read_forces(self, file_path=None, labels=None):\n if not file_path:\n file_path = os.path.join(self.directory, 'forces.txt')\n print('[info] reading forces ...'),\n with open(file_path, 'r') as infile:\n data = numpy.loadtxt(infile, dtype=numpy.float64, unpack=True)\n times = data[0]\n if not labels:\n labels = ['f_x', 'f_z', 'f_z'] # default labels\n self.forces = []\n for index, values in enumerate(data[1:]):\n self.forces.append(Force(times, values, label=labels[index]))\n print('done')", "def CARDAMOM_READ_BINARY_FILEFORMAT(filename,cbrfrac=0.5,INFO=[]):\n \n if type(filename) is str:\n \n if filename.split(\".\")[-1] == \"cbf\":\n VARRETURN = read_cbf_file(filename)\n \n elif filename.split(\".\")[-1] == \"cbr\":\n PARS = read_cbr_file(filename,INFO)\n VARRETURN = PARS[int((PARS.shape[0])*cbrfrac):]\n \n \n elif type(filename) is list:\n \n filename.sort()\n tmplist = []\n allcbrlist = []\n for fn in filename:\n \n if fn.split(\".\")[-1] == \"cbr\":\n PARS = read_cbr_file(fn,INFO)\n tmplist.append(PARS[int((PARS.shape[0])*cbrfrac):])\n allcbrlist.append(True)\n else:\n allcbrlist.append(False)\n \n VARRETURN = np.concatenate(tmplist,axis=0)\n \n if not all(allcbrlist):\n print('not all files where .cbr files')\n \n return VARRETURN", "def sample_vcf():\n file_content = b\"\"\"##fileformat=VCFv4.2\n##hailversion=0.2.100-2ea2615a797a\n##INFO=<ID=QUALapprox,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=SB,Number=.,Type=Integer,Description=\"\">\n##INFO=<ID=MQ,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=MQRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=VarDP,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=AS_ReadPosRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_pab_max,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_QD,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_MQ,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=QD,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_MQRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=FS,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_FS,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=ReadPosRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_QUALapprox,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=AS_SB_TABLE,Number=.,Type=Integer,Description=\"\">\n##INFO=<ID=AS_VarDP,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=AS_SOR,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=SOR,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=singleton,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=transmitted_singleton,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=omni,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=mills,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=monoallelic,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=AS_VQSLOD,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=InbreedingCoeff,Number=1,Type=Float,Description=\"\">\n##FILTER=<ID=AC0,Description=\"Allele count is zero after filtering out low-confidence genotypes (GQ < 20; DP < 10; and AB < 0.2 for het calls)\">\n##FILTER=<ID=AS_VQSR,Description=\"Failed VQSR filtering thresholds of -2.7739 for SNPs and -1.0606 for indels\">\n##contig=<ID=chr1,length=248956422,assembly=GRCh38>\n#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\nchr1\t10330\t.\tCCCCTAACCCTAACCCTAACCCTACCCTAACCCTAACCCTAACCCTAACCCTAA\tC\t.\tPASS\tQUALapprox=21493;SB=325,1077,113,694;MQ=32.1327;MQRankSum=0.720000;VarDP=2236;AS_ReadPosRankSum=-0.736000;AS_pab_max=1.00000;AS_QD=5.17857;AS_MQ=29.5449;QD=9.61225;AS_MQRankSum=0.00000;FS=8.55065;AS_FS=.;ReadPosRankSum=0.727000;AS_QUALapprox=145;AS_SB_TABLE=325,1077,2,5;AS_VarDP=28;AS_SOR=0.311749;SOR=1.48100;singleton;AS_VQSLOD=13.4641;InbreedingCoeff=-0.000517845\"\"\"\n file = io.BytesIO(file_content)\n return file", "def read_collector_config(cfg_file):\n hpifreqs = []\n linefreq = None\n if op.isfile(cfg_file):\n with open(cfg_file, 'r') as f:\n flines = f.read().splitlines()\n for line in flines:\n lit = line.split()\n if len(lit) > 1:\n if lit[0].find('hpiFreq') == 0:\n hpifreqs.append(float(lit[1]))\n elif lit[0].find('lineFreq') == 0:\n linefreq = float(lit[1])\n return linefreq, hpifreqs", "def main():\n regexham = r'\\s+\\((\\d+,\\s*\\d+)\\)\\s+([\\-+]?\\d+\\.\\d+[eEdD]?[\\-+]?\\d+)' #to extract the Hamiltonian.\n root = '.'\n #fname = 'output_files/'\n ciffci = CIFlow_Reader('testfci.dat', regexp = regexham , read_ham= True)\n ciffcipar = CIFlow_Reader( 'psi0_output10outputfci.dat', regexp = regexham , read_ham = True)\n #print ciffci.calc_overlap(cifdoci)\n #print e.get_groundstate('00000000000011|00000000000011') \n\n psir = rp.PsiReader('psi0_output10.dat', isbig = False, numorbs = -1 , read_ints = False)\n\n detlist = dw.cimain(psir.values['nalpha'],psir.values['nbeta'], psir.values['norb'], [range(1,psir.values['nalpha']+psir.values['nbeta']), []], [] , fname = 'determinants.dat' ,ref = [lambda x , y , z : psir.get_hf_orbs()] , add_frozen = 0, write = False) #CISDDOCI\n count = 0\n for det in detlist:\n for det2 in detlist:\n #+ because the eigenvectors have already a different phasefactor of 1.\n if abs(ciffci.get_mat_element(det[0]+'|'+det[1], det2[0]+'|'+det2[1]) - ciffcipar.get_mat_element(det[0]+'|'+det[1], det2[0]+'|'+det2[1]) ) > 1e-10 :\n print 'difference in hamiltonian row: ' , det[0]+'|'+det[1] , \" col: \" , det2[0]+'|'+det2[1] , 'fci: ', ciffci.get_mat_element(det[0]+'|'+det[1], det2[0]+'|'+det2[1]) , 'fciaddres: ' , ciffcipar.get_mat_element(det[0]+'|'+det[1],det2[0]+'|'+det2[1]) \n count += 1\n print 'There were ' , count , ' different elements'", "def read_energy(self, fname):\n outfile = open(fname)\n lines = outfile.readlines()\n outfile.close()\n\n energy = None\n for line in lines:\n if line.find('HEAT OF FORMATION') != -1:\n words = line.split()\n energy = float(words[5])\n if line.find('H.o.F. per unit cell') != -1:\n words = line.split()\n energy = float(words[5])\n if line.find('UNABLE TO ACHIEVE SELF-CONSISTENCE') != -1:\n energy = None\n if energy is None:\n raise RuntimeError('MOPAC: could not find total energy')\n### do not change unit for mopac\n energy *= (kcal / mol)\n return energy", "def hessian(self):\n\n with open('lig.fchk', 'r') as fchk:\n\n lines = fchk.readlines()\n hessian_list = []\n\n for count, line in enumerate(lines):\n if line.startswith('Cartesian Force Constants'):\n start_pos = count + 1\n if line.startswith('Dipole Moment'):\n end_pos = count\n\n if not start_pos and end_pos:\n raise EOFError('Cannot locate Hessian matrix in lig.fchk file.')\n\n for line in lines[start_pos: end_pos]:\n # Extend the list with the converted floats from the file, splitting on spaces and removing '\\n' tags.\n hessian_list.extend([float(num) * 0.529 for num in line.strip('\\n').split()])\n\n hess_size = 3 * len(self.molecule.molecule['input'])\n\n hessian = zeros((hess_size, hess_size))\n\n # Rewrite Hessian to full, symmetric 3N * 3N matrix rather than list with just the non-repeated values.\n m = 0\n for i in range(hess_size):\n for j in range(i + 1):\n hessian[i, j] = hessian_list[m]\n hessian[j, i] = hessian_list[m]\n m += 1\n\n check_symmetry(hessian)\n\n return hessian", "def parseLcalcfile(self, filecontents):\n \n lines = filecontents.split('\\n',6)\n self.coefficient_type = int(lines[0])\n self.quasidegree = int(lines[4])\n lines = self.lcalcfile.split('\\n',8+2*self.quasidegree)\n self.Q_fe = float(lines[5+2*self.quasidegree])\n self.sign = pair2complex(lines[6+2*self.quasidegree])\n\n self.kappa_fe = []\n self.lambda_fe = []\n self.mu_fe = []\n self.nu_fe = []\n\n for i in range(self.quasidegree):\n localdegree = float(lines[5+2*i])\n self.kappa_fe.append(localdegree)\n locallambda = pair2complex(lines[6+2*i])\n self.lambda_fe.append(locallambda)\n if math.fabs(localdegree-0.5)<0.00001:\n self.mu_fe.append(2*locallambda)\n elif math.fabs(localdegree-1)<0.00001:\n self.nu_fe.append(locallambda)\n else:\n self.nu_fe.append(locallambda)\n self.langlands = False\n\n \"\"\" Do poles here later\n \"\"\"\n \n self.degree = int(round(2*sum(self.kappa_fe)))\n\n self.level = int(round(math.pi**float(self.degree) * 4**len(self.nu_fe) * self.Q_fe**2 ))\n # note: math.pi was not compatible with the sage type of degree\n\n self.dirichlet_coefficients = splitcoeff(lines[-1])", "def read_results():\r\n with open(\"packing.nfo\", \"r\") as fin:\r\n fin.readline()\r\n fin.readline()\r\n por_theory = float(fin.readline().split()[2])\r\n por_final = float(fin.readline().split()[2])\r\n print('Theoretical porosity:', por_theory)\r\n print('Final porosity:', por_final)\r\n with open(\"packing.xyzd\", \"rb\") as fin:\r\n btxt = fin.read()\r\n txt = list(struct.unpack(\"<\" + \"d\" * (len(btxt) // 8), btxt))\r\n data = array(zip(*[iter(txt)] * 4))\r\n data[:, 3] = data[:, 3] * \\\r\n ((1 - por_final) / (1 - por_theory))**(1 / 3)\r\n return data", "def cfdReadOwnerFile(self): \r\n\r\n with open(self.ownerFile,\"r\") as fpid:\r\n print('Reading owner file ...')\r\n\r\n ## (list) 1D, indices refer to faces, list value is the face's owner cell\r\n self.owners=[]\r\n start=False\r\n \r\n for linecount, tline in enumerate(fpid):\r\n \r\n if not io.cfdSkipEmptyLines(tline):\r\n continue\r\n \r\n if not io.cfdSkipMacroComments(tline):\r\n continue\r\n \r\n if \"FoamFile\" in tline:\r\n dictionary=io.cfdReadCfdDictionary(fpid)\r\n continue\r\n \r\n if len(tline.split()) ==1:\r\n \r\n #load and skip number of owners\r\n if not start:\r\n nbrOwner=tline\r\n start=True\r\n continue\r\n \r\n if \"(\" in tline:\r\n continue\r\n if \")\" in tline:\r\n break\r\n else:\r\n self.owners.append(int(tline.split()[0]))", "def harvest_corr_fact(self):\n # Search in the file #\n lines = [line for line in self.paths.sas if \"if _2='\" in str(line)]\n # Do each line #\n query = \"if _2='([A-Z][A-Z])' then CF=([0-9].[0-9]+);\"\n extract = lambda line: re.findall(query, str(line))\n result = list(map(extract, lines))\n result = [found[0] for found in result if found]\n # Make a data frame #\n df = pandas.DataFrame(result, columns=['forest_type', 'corr_fact'])\n # Write back into a CSV #\n df.to_csv(str(self.paths.corr_fact), index=False)", "def read(read_file) -> list:\n result = []\n try:\n with open(read_file) as file:\n for lines in file:\n line = decode(lines.strip(\"\"))\n result.append(extract_information(line))\n global header\n header = result[0:2]\n result = result[3:]\n for word in result:\n if \"None\" in word[0:3]:\n raise InvalidPrincessException(\"Invalid princess!\")\n continue\n return result\n except FileNotFoundError:\n raise Exception(\"File not found!\")", "def _scfconv_from_ccdata(self):\n\n lines = [f\"scf-first 1 THROUGH {len(self.ccdata.scfenergies)}\"]\n\n for scfenergy in self.ccdata.scfenergies:\n lines.append(f\"{scfenergy:15.6f}\")\n\n return lines", "def openFullProfFile(self, filename):\n handle = open(filename)\n lines = handle.readlines()\n handle.close()\n atoms = []\n bonds = []\n conns = []\n for line in lines:\n if line[0:4] == \"CELL\":\n #format of line: CELL a b c alpha beta gamma\n vals = line.split()\n print vals\n a = float(vals[1])\n b = float(vals[2])\n c = float(vals[3])\n alpha = float(vals[4])\n gamma = float(vals[5])\n beta = float(vals[6])\n elif line[0:6] == \"SPACEG\":\n #this is the space group in Hermann-Mauguin notation.\n hm_spacegroup = (line[6:]).strip().upper()\n space_group = GetSpaceGroup(hm_spacegroup)\n elif line[0:3] == \"BOX\":\n #Format: xmin xmax ymin ymax zmin zmax\n #In this program, however, xmin, ymin, zmin = 0,0,0 always.\n vals = line.split()\n a_diff = float(vals[2]) - float(vals[1])\n b_diff = float(vals[4]) - float(vals[3])\n c_diff = float(vals[6]) - float(vals[5])\n a_cutoff = int(a_diff)\n b_cutoff = int(b_diff)\n c_cutoff = int(c_diff)\n if a_diff - a_cutoff > 0:\n a_cutoff += 1\n if b_diff - b_cutoff > 0:\n b_cutoff += 1\n if c_diff - c_cutoff > 0:\n c_cutoff += 1\n elif line[0:4] == \"ATOM\":\n vals = line.split()\n label = vals[1]\n symbol = vals[2]\n a_coord = float(vals[3])\n b_coord = float(vals[4])\n c_coord = float(vals[5])\n position = (a_coord, b_coord, c_coord)\n #Get the radius which is right after the word \"RADIUS\"\n for i in range(len(vals)):\n if vals[i] == \"RADIUS\":\n radius = float(vals[i+1])\n break\n else:\n radius = None\n #Get the color which is right after the word \"COLOR\"\n for i in range(len(vals)):\n if vals[i] == \"COLOR\":\n color = [float(vals[i+1]), float(vals[i+2]), float(vals[i+3])]\n break\n else:\n color = None\n #atomData format (each line):\n #label massNum aPos bPos cPos anisotropy_a anisotropy_b anistropy_c spin valence\n atoms.append([label, symbol, position, radius, color])\n elif line[0:4] == \"BOND\":\n #Format: BOND label1 label2 min_dist max_dist RADIUS rad COLOR r g b t\n #The color and radius need not be there and will be ignored for now since\n #the color and radius of bonds is hardcoded in right now.\n vals = line.split()\n bonds.append([vals[1], vals[2], vals[3], vals[4]])\n elif line[0:4] == \"CONN\":\n #Format: BOND symbol1 symbol2 min_dist max_dist RADIUS rad COLOR r g b t\n #The color and radius need not be there and will be ignored for now since\n #the color and radius of bonds is hardcoded in right now.\n vals = line.split()\n conns.append([vals[1], vals[2], vals[3], vals[4]])\n \n \n self.newCell(space_group.number, a, b, c, alpha, beta, gamma, 1, 1, 1,\n a_cutoff, b_cutoff, c_cutoff)\n \n for atom in atoms:\n #FPStudio does not seem to support isotopes\n massNum = None\n self.addAtom(atom[1], atom[2], massNum = massNum, radius = atom[3], rgb = atom[4])\n \n for bond in bonds:\n self.createBonds(label1 = bonds[0], label2 = bonds[1],\n minDist = bonds[2], maxDist = bonds[3])\n for conn in conns:\n self.createBonds(symbol1 = conns[0], symbol2 = conns[1],\n minDist = conns[2], maxDist = conns[3])\n \n self.refreshGUI()\n #self.cellChange(space_group.number, a, b, c, alpha, beta, gamma, magNa = 1, magNb = 1, magNc = 1, cutNa = a_cutoff, cutNb = b_cutoff, cutNc = c_cutoff, atomData = atoms)\n #self.updateCell(space_group.number, a, b, c, alpha, beta, gamma, magNa = 1, magNb = 1, magNc = 1, cutNa = a_cutoff, cutNb = b_cutoff, cutNc = c_cutoff, atomData = atoms)\n #self.refreshGUI()\n \n #send signal to the cell window to show the info that has been loaded and to vtkWindow to draw it\n send(signal = \"File Load\", sender = \"Session\",\n spaceGroup = space_group.number, a = a, b = b, c = c,\n alpha = alpha, beta = beta, gamma = gamma, magNa = a_cutoff,\n magNb = b_cutoff, magNc = c_cutoff, cutNa = a_cutoff,\n cutNb = b_cutoff, cutNc = c_cutoff)\n \n \n #TODO: use these values extracted. You could combine the three file opening functions.\n #Each function would have to extract values form it's format and then a single function\n #could be used for all three to construct the model from the extracted values.e", "def read_file(infile_name):\n chr_list = [0]*13 \n for i in range(len(chr_list)):\n chr_list[i] = [] \n infile = open(infile_name)\n for line in infile:\n if line.startswith('SL2.40'):\n info = line.strip().split()\n chr = int(info[0][-2:])\n chr_list[chr].append(map(int,info[1:3])+[[info[-1]]])\n else:\n pass\n infile.close()\n return chr_list", "def read_inversion_info(file_dic):\n #print_file_test = open('file_test.txt','w')\n\n if not ( check_inversion_files(file_dic) ):\n print 'error(read_inversion_info): problem with lenstool file names'\n return 0\n \n file_generate_arcs = file_dic['file_generate_arcs']\n info_input_lens = fc.extract_second_identifiers( file_generate_arcs, \\\n 'potential' )\n#-------------------------------------------------------------------------------\n\n file_source = file_dic['file_source']\n info_src = np.loadtxt(file_source, unpack=False)\n if len(info_src) == 8 and np.isscalar(info_src[0]):\n #FIXME - check if the second condition is all we need\n info_src = [info_src]\n#-------------------------------------------------------------------------------\n\n file_make_inversion = file_dic['file_make_inversion']\n info_fited_param = fc.extract_second_identifiers( file_make_inversion, \\\n 'limit' )\n info_forme = fc.extract_parameter(file_make_inversion, 'forme')[0][0]\n\n#-------------------------------------------------------------------------------\n\n file_best_fit = file_dic['file_best_fit']\n info_best_fit = fc.extract_second_identifiers( file_best_fit, \\\n 'potentiel' )\n\n info_xi2 = fc.extract_parameter(file_best_fit, '#Chi2pos:')\n\n#-------------------------------------------------------------------------------\n file_chires = file_dic['file_chires']\n\n info_chires = extract_parameter(file_chires, '0')\n rmss_mean = [0.0, 0.0]\n rmsi_mean = [0.0, 0.0]\n for i in info_chires:\n if i[0] != 'A':\n rmss_mean[0] = rmss_mean[0] + float(i[7])\n rmss_mean[1] = rmss_mean[1] + 1.0\n \n rmsi_mean[0] = rmsi_mean[0] + float(i[8])\n rmsi_mean[1] = rmsi_mean[1] + 1.0\n\n rmss_mean = rmss_mean[0]/rmss_mean[1]\n rmsi_mean = rmsi_mean[0]/rmsi_mean[1]\n#-------------------------------------------------------------------------------\n out_dict = { 'xi2' : float(info_xi2[0][0]), \\\n 'best_fit_lens' : info_best_fit, \\\n 'rmsi_mean' : rmsi_mean, \\\n 'rmss_mean' : rmss_mean, \\\n 'fited_parameters' : info_fited_param[0].keys(), \\\n 'input_lens' : info_input_lens[len(info_input_lens) - 1], \\\n 'forme' : info_forme \\\n }\n #for i in out_dict.keys():\n # print i, out_dict[i]\n return out_dict", "def cfdReadNeighbourFile(self): \r\n with open(self.neighbourFile,\"r\") as fpid:\r\n print('Reading neighbour file ...')\r\n\r\n ## (list) 1D, indices refer to faces, list value is the face's neighbour cell\r\n self.neighbours=[]\r\n start=False\r\n \r\n for linecount, tline in enumerate(fpid):\r\n \r\n if not io.cfdSkipEmptyLines(tline):\r\n continue\r\n \r\n if not io.cfdSkipMacroComments(tline):\r\n continue\r\n \r\n if \"FoamFile\" in tline:\r\n dictionary=io.cfdReadCfdDictionary(fpid)\r\n continue\r\n \r\n if len(tline.split()) ==1:\r\n \r\n #load and skip number of owners\r\n if not start:\r\n self.numberOfInteriorFaces=int(tline)\r\n start=True\r\n continue\r\n \r\n if \"(\" in tline:\r\n continue\r\n if \")\" in tline:\r\n break\r\n else:\r\n self.neighbours.append(int(tline.split()[0]))", "def readenergyfile(filename):\n def parsemeta(metalines):\n \"\"\"Parse metadata lines to get metadata object (ordered dict)\n\n Allow only numbers, lists of numbers and strings\n \"\"\"\n def parseline(line):\n res = [val.strip() for val in line[5:].split(u':', 1)]\n key, value = (res[0], res[1]) if len(res) == 2 else (res[0], u'')\n if re.match(r'^-?\\d*[\\.|,]?\\d+$', value):\n value = float(value)\n elif re.match(r'^\\[(.*)\\]', value):\n value = [val.strip() for val in value[1:-1].split(u',')]\n value = [float(val) if re.match(r'^-?\\d*[\\.|,]?\\d+$', val) else val for val in value]\n return key, value\n return OrderedDict(parseline(line) for line in metalines if line.startswith(u'#CTE_'))\n\n with io.open(filename, 'r') as datafile:\n components, meta = [], []\n for ii, line in enumerate(datafile):\n line = line.strip()\n if (line == '') or line.startswith('vector'):\n continue\n elif line.startswith('#'):\n meta.append(line)\n else:\n fields = line.split('#', 1)\n data = [x.strip() for x in fields[0].split(',')]\n comment = fields[1] if len(fields) > 1 else ''\n carrier, ctype, originoruse = data[0:3]\n values = [float(v.strip()) for v in data[3:]]\n\n if ctype not in ('PRODUCCION', 'CONSUMO'):\n raise ValueError(\"Carrier type is not 'CONSUMO' or 'PRODUCCION' in line %i\\n\\t%s\" % (ii+2, line))\n if originoruse not in ('EPB', 'NEPB', 'INSITU', 'COGENERACION'):\n raise ValueError((\"Origin or end use is not 'EPB', 'NEPB', 'INSITU' or 'COGENERACION'\"\n \" in line %i\\n\\t%s\" % (ii+2, line)))\n\n components.append({ \"carrier\": carrier, \"ctype\": ctype,\n \"originoruse\": originoruse,\n \"values\": values, \"comment\": comment })\n numsteps = [len(c['values']) for c in components]\n if max(numsteps) != min(numsteps):\n raise ValueError(\"All input must have the same number of timesteps.\")\n return (parsemeta(meta), components)", "def parse(self, ergFilePath):\n infoFilePath = ergFilePath + '.info'\n quantityList = self.getQuantityList(infoFilePath)\n\n # get block size\n blockSize = sum(quantity.bytes for quantity in quantityList)\n\n with open(ergFilePath, 'rb') as ergFile:\n\n # read the first 16 bytes, as they are not needed now\n ergFile.read(16)\n\n # now read the rest of the file block wise\n block = ergFile.read(blockSize)\n while len(block) == blockSize:\n for q in quantityList:\n q.values.append(np.fromstring(block[:q.bytes], q.dataType)[0])\n block = block[q.bytes:]\n block = ergFile.read(blockSize)\n\n self.quantityList = quantityList\n return quantityList", "def get_data(dataf):\n with open(dataf) as f:\n label = []\n e_val = []\n for line in f:\n label.append(float(line.split()[1]))\n e_val.append(-1 * float(line.split()[0]))\n return label, e_val", "def readqFile(self, PFC, t):\n if self.qFilePath[-1] != '/':\n base = self.qFilePath + '/'\n else:\n base = self.qFilePath\n\n f = base + '{:06d}/'.format(t) + PFC.name + '/' + self.qFileTag\n try:\n df = pd.read_csv(f, names=['X','Y','Z','HF'], skiprows=[0])\n if len(df['HF'].values) != len(PFC.centers):\n print('HF file mesh is not same length as STL file mesh.')\n print('Will not assign HF to mismatched mesh')\n print(\"qFile length: {:d}\".format(len(df['HF'].values)))\n print(\"PFC STL mesh length: {:d}\".format(len(PFC.centers)))\n val = -1\n else:\n PFC.qDiv = df['HF'].values\n PFC.powerFrac = self.getDivertorPowerFraction(PFC.DivCode)\n PFC.qOpticalList.append(PFC.qDiv)\n print(\"Loaded heat flux from file: \"+f)\n val = 0\n except:\n print(\"COULD NOT READ qFILE PATH: \"+f)\n print(\"Please point HEAT to a valid qFilePath and qFileTag,\")\n print(\"which should be a .csv file with (X,Y,Z,HF)\")\n val = -1\n\n return val", "def gen_fchks(list_mols):\n #scratch, local_home = os.environ['GAUSS_SCRATCH'], os.path.realpath(os.environ['ASE_HOME'])\n scratch, local_home = config.get('gaussian', 'gauss_scratch'), os.path.realpath(config.get('ase', 'ase_home'))\n\n try:\n active_dir = os.getcwd().split(local_home)[1]\n scratch_dir = scratch + active_dir\n except IndexError:\n raise RuntimeError('Not running from within ASE_HOME')\n\n home_files = [mol.calc.label for mol in list_mols]\n serv_files = [scratch_dir + '/' + fn for fn in home_files]\n\n ssh = remote.connect_server(ssh=True)\n fchk_out = [gen_fchk(serv_f, ssh) for serv_f in serv_files]\n ssh.close()\n\n return fchk_out", "def file_read(file_name):\n \n #open specified file in read mode\n in_file = open(file_name, \"r\")\n \n #create data lists\n sp_length_v3 = []\n sp_period_v3 = [] \n\n #save header to string and split into list\n header_string = in_file.readline()\n header_v3 = header_string.split()\n \n #save revelent data to respective lists\n for line in in_file:\n values = line.split()\n sp_length_v3.append(float(values[1]))\n sp_period_v3.append(float(values[2]))\n \n #close the file\n in_file.close()\n \n #return 3D lists of lists containing data\n ans = [sp_length_v3, sp_period_v3, header_v3]\n \n return ans", "def read_cleaned(file):\n wvlen, band, lamFlam, elamFlam, flamFlam, beam, odate, ref = [],[],[],[],[],[],[],[]\n with open(file, 'r') as f_in:\n for line in f_in:\n try:\n # ensure line contains data:\n a = float(line[0])\n except ValueError:\n a = 'dummy'\n try:\n # ensure mag or flux entry is not '--'\n m = float(line.split(' ')[2])\n except ValueError:\n m = 'dummy'\n \n if isinstance(a, float) and isinstance(m, float):\n wvlen.append(float(line.strip().split(' ')[0])) # in metres\n band.append(line.strip().split(' ')[1])\n lamFlam.append(float(line.strip().split(' ')[2]))\n elamFlam.append(line.strip().split(' ')[3])\n flamFlam.append(line.strip().split(' ')[4])\n beam.append(line.strip().split(' ')[5])\n odate.append(line.strip().split(' ')[6])\n ref.append(line.strip().split(' ')[7])\n \n return wvlen, band, lamFlam, elamFlam, flamFlam, beam, odate, ref", "def readcif(filename, **kwds):\n \n # Read the unit cell parameters\n a, b, c, alf, bet, gam = [[]]*6\n with open(filename, 'r') as f:\n \n for line in f:\n if \"length_a\" in line:\n a = numgrab(line)\n elif \"length_b\" in line:\n b = numgrab(line)\n elif \"length_c\" in line:\n c = numgrab(line)\n elif \"angle_alpha\" in line:\n alf = numgrab(line)\n elif \"angle_beta\" in line:\n bet = numgrab(line)\n elif \"angle_gamma\" in line:\n gam = numgrab(line)\n \n crystVec = a + b + c + alf + bet + gam\n \n # Read atomic coordinates\n cifdata = pd.read_csv(filename, delim_whitespace=True, header=None, **kwds)\n atomLabels = np.array(cifdata.values[:,0], dtype='str')\n coords = np.array(cifdata.values[:,1:4]).astype('float64')\n\n return atomLabels, coords, crystVec", "def read_fx_data(self):\n\n dirStr = self.dir\n\n formatSpec1 = '%Y-%m-%d %H:%M:%S'\n formatSpec2 = '%m/%d/%Y %H:%M'\n\n dirN = os.fsencode(dirStr)\n data = []\n labels = {}\n fileIdx = 0\n\n for file in os.listdir(dirN):\n filename = os.fsdecode(file)\n if filename.endswith('.csv'):\n try:\n fileData, label = self.read_fx_data_from_file(os.path.join(dirStr, filename), formatSpec=formatSpec1)\n except:\n fileData, label = self.read_fx_data_from_file(os.path.join(dirStr, filename), formatSpec=formatSpec2)\n\n labels[fileIdx] = label\n fileIdx += 1\n data.append(fileData)\n\n # Drop columns where not all data are present\n scatData = pd.concat([df['Close'] for df in data], axis=1)\n for df in data:\n df.drop(scatData.index[scatData.isnull().any(1).nonzero()[0]], errors='ignore', inplace=True)\n\n return data, labels", "def read_list(f, nb_freqs):\n alist = []\n while len(alist) < nb_freqs:\n line = f.readline()\n splitted = line.split()\n well_splitted = True\n for entry in splitted:\n well_splitted = well_splitted and entry.count('.') <= 1\n if well_splitted:\n entries = splitted\n else:\n if line.count('-') > 0:\n # Probably coming from an SDSS spectrum.\n entries = [line[i:i+12] for i in range(0, len(line) - 1, 12)]\n else:\n entries = [line[i:i+8] for i in range(0, len(line) - 1, 8)]\n for entry in entries:\n try:\n alist.append(float(entry))\n except ValueError:\n # If conversion to float fails, put 0 instead.\n alist.append(0)\n return numpy.array(alist)", "def getForcefields():\n\n gmxlib = os.getenv('GMXLIB') \n if gmxlib == None:\n raise RuntimeError('Error: Please set GMXLIB environment variable to point to location of force fields before running.')\n else:\n try:\n fin = open( os.path.join( gmxlib, 'FF.dat'), 'r')\n except IOError:\n raise RuntimeError('Error: FF.dat cannot be found in GMXLIB=%s...' % gmxlib)\n sys.exit(1)\n forcefields = {}\n forcefieldCodes = {}\n lines = fin.readlines()\n numff = int(lines.pop(0))\n for i in range(0, numff):\n fields = lines[i].split()\n key = fields.pop(0)\n forcefields[ key ] = string.joinfields(fields)\n forcefieldCodes[ key ] = str(i)\n fin.close()\n return forcefields, forcefieldCodes", "def read_fcidump(filename, symmetry=8, verbose=True):\n assert(symmetry==1 or symmetry==4 or symmetry==8)\n if verbose:\n print (\"# Reading integrals in plain text FCIDUMP format.\")\n with open(filename) as f:\n while True:\n line = f.readline()\n if 'END' in line or '/' in line:\n break\n for i in line.split(','):\n if 'NORB' in i:\n nbasis = int(i.split('=')[1])\n elif 'NELEC' in i:\n nelec = int(i.split('=')[1])\n elif 'MS2' in i:\n ms2 = int(i.split('=')[1])\n if verbose:\n print(\"# Number of orbitals: {}\".format(nbasis))\n print(\"# Number of electrons: {}\".format(nelec))\n h1e = numpy.zeros((nbasis, nbasis), dtype=numpy.complex128)\n h2e = numpy.zeros((nbasis, nbasis, nbasis, nbasis), dtype=numpy.complex128)\n lines = f.readlines()\n for l in lines:\n s = l.split()\n # ascii fcidump uses Chemist's notation for integrals.\n # each line contains v_{ijkl} i k j l\n # Note (ik|jl) = <ij|kl>.\n if len(s) == 6:\n # FCIDUMP from quantum package.\n integral = float(s[0]) + 1j*float(s[1])\n s = s[1:]\n else:\n try:\n integral = float(s[0])\n except ValueError:\n ig = ast.literal_eval(s[0].strip())\n integral = ig[0] + 1j*ig[1]\n i, k, j, l = [int(x) for x in s[1:]]\n if i == j == k == l == 0:\n ecore = integral\n elif j == 0 and l == 0:\n # <i|k> = <k|i>\n h1e[i-1,k-1] = integral\n h1e[k-1,i-1] = integral.conjugate()\n elif i > 0 and j > 0 and k > 0 and l > 0:\n # Assuming 8 fold symmetry in integrals.\n # <ij|kl> = <ji|lk> = <kl|ij> = <lk|ji> =\n # <kj|il> = <li|jk> = <il|kj> = <jk|li>\n # (ik|jl)\n h2e[i-1,k-1,j-1,l-1] = integral\n if symmetry == 1:\n continue\n # (jl|ik)\n h2e[j-1,l-1,i-1,k-1] = integral\n # (ki|lj)\n h2e[k-1,i-1,l-1,j-1] = integral.conjugate()\n # (lj|ki)\n h2e[l-1,j-1,k-1,i-1] = integral.conjugate()\n if symmetry == 4:\n continue\n # (ki|jl)\n h2e[k-1,i-1,j-1,l-1] = integral\n # (lj|ik)\n h2e[l-1,j-1,i-1,k-1] = integral\n # (ik|lj)\n h2e[i-1,k-1,l-1,j-1] = integral\n # (jl|ki)\n h2e[j-1,l-1,k-1,i-1] = integral\n if symmetry == 8:\n if numpy.any(numpy.abs(h1e.imag)) > 1e-18:\n print(\"# Found complex numbers in one-body Hamiltonian but 8-fold\"\n \" symmetry specified.\")\n if numpy.any(numpy.abs(h2e.imag)) > 1e-18:\n print(\"# Found complex numbers in two-body Hamiltonian but 8-fold\"\n \" symmetry specified.\")\n nalpha = (nelec + ms2) // 2\n nbeta = nalpha - ms2\n return h1e, h2e, ecore, (nalpha, nbeta)", "def read_flt(input_file):\n\n if input_file.endswith('.flt') or input_file.endswith('.hdr'):\n input_file = input_file[:-4]\n else:\n print 'Incorrect filename'\n return 0,0 #exits module gracefully\n\n headers = read_headers(input_file)\n\n #read the data as a 1D array and reshape it to the dimensions in the header\n raster_array = read_bin(input_file).reshape(int(headers[1]), int(headers[0]))\n raster_array = raster_array.reshape(int(headers[1]), int(headers[0])) #rows, columns\n\n return raster_array, headers", "def test_read_with_cclib():\n main([\"-g\", \"/tmp/fnord.Gaussian.gjf\"])\n main([\"/tmp/fnord.Gaussian.gjf\", \"data/benzene.out\"])\n assert_equals(\n open(\"data/benzene.gjf\").read(),\n \"\"\"#Put Keywords Here, check Charge and Multiplicity.\n\n data/benzene.out\n\n0 1\nC 1.7458930000 1.7957530000 -1.0597530000\nC 0.9484120000 2.8689700000 -1.4311180000\nC 1.4480470000 1.0743490000 0.0876540000\nC -0.1470660000 3.2206120000 -0.6552520000\nC 0.3525690000 1.4259910000 0.8635200000\nC -0.4449120000 2.4992080000 0.4921550000\nH 2.5997410000 1.5203090000 -1.6651660000\nH 1.1810280000 3.4311430000 -2.3262420000\nH 2.0700040000 0.2375420000 0.3781170000\nH -0.7690240000 4.0574200000 -0.9457150000\nH 0.1199530000 0.8638180000 1.7586440000\nH -1.2987600000 2.7746520000 1.0975680000\n\n\"\"\",\n )", "def read_reference(conformation_fname):\r\n\r\n #Create empty set\r\n reference_atoms = []\r\n\r\n #Try/catch if file cannot be found. Open file in read mode\r\n #For eveyr line in the text file, strip all white spaces from front and back\r\n #If not empty line, split line on commas and put integers in set. These correspond to atom numbers of the key atoms\r\n #Return this list\r\n\r\n try:\r\n with open(conformation_fname, \"r\") as fin :\r\n num = 1\r\n for line in fin:\r\n if num < 10:\r\n num = num + 1\r\n continue\r\n content = line.strip()\r\n if content == '':\r\n continue\r\n else:\r\n reference_atoms.append(content.split())\r\n #reference_atom_num.update([int(i) for i in content.split(',')])\r\n return reference_atoms\r\n #Catch OS error\r\n except OSError:\r\n print('OS error')\r\n sys.exit()\r\n #Catch value error (not appropriate values to be converted to int)\r\n except ValueError:\r\n print('Could not convert data to integer')\r\n sys.exit()", "def fens_from_file(filename):\n file = open(filename, \"r\")\n fens = []\n\n if is_pgn(filename): # pgn input detected we will skip through last position\n print(\"PGN input detected we will only analyze from last position(s) reached.\")\n\n game = chess.pgn.read_game(file)\n while game != None: # there is still at least one game to parse\n board = game.board() # set up board to initial position\n\n for move in game.main_line(): # iterate through mainline\n board.push(move)\n\n fens += [board.fen()]\n game = chess.pgn.read_game(file) # try to \n else: #standard .fen or .epd\n raw_lines = file.readlines()\n for l in raw_lines: #extract all fen\n fen = extract_fen(l)\n \n if fen != None: # If we found a fen\n fens += [fen]\n\n return fens", "def _read_hc(directory):\n fname, found = _make_ctf_name(directory, \"hc\", raise_error=False)\n if not found:\n logger.info(\" hc data not present\")\n return None\n s = list()\n with open(fname, \"rb\") as fid:\n while True:\n p = _read_one_coil_point(fid)\n if p is None:\n # First point bad indicates that the file is empty\n if len(s) == 0:\n logger.info(\"hc file empty, no data present\")\n return None\n # Returns None if at EOF\n logger.info(\" hc data read.\")\n return s\n if p[\"valid\"]:\n s.append(p)", "def mel_gff_list():\n\tmod_gff3 = sys.argv[1]\n\twith open(mod_gff3, 'r') as f:\n\t\tgff = [line.strip().split('\\t') for line in f]\n\t\tf.close()\n\treturn gff\n\t#gff_list ex/:\n\t#['2L', 'FlyBase', 'gene', '7529', '9484', '.', '+', '.', 'ID=FBgn0031208;Name=CG11023;Ontology_term=SO:0000010,SO:0000087,GO:0016929,GO:0016926;Dbxref=FlyBase:FBan0011023,FlyBase_Annotation_IDs:CG11023,GB_protein:ACZ94128,GB_protein:AAO41164,GB:AI944728,GB:AJ564667,GB_protein:CAD92822,GB:BF495604,UniProt/TrEMBL:Q86BM6,INTERPRO:IPR003653,GB_protein:AGB92323,UniProt/TrEMBL:M9PAY1,OrthoDB7_Drosophila:EOG796K1P,OrthoDB7_Diptera:EOG7X1604,EntrezGene:33155,UniProt/TrEMBL:E1JHP8,UniProt/TrEMBL:Q6KEV3,OrthoDB7_Insecta:EOG7Q8QM7,OrthoDB7_Arthropoda:EOG7R5K68,OrthoDB7_Metazoa:EOG7D59MP,InterologFinder:33155,BIOGRID:59420,FlyAtlas:CG11023-RA,GenomeRNAi:33155;gbunit=AE014134;derived_computed_cyto=21A5-21A5'], ['2L', 'FlyBase', 'gene', '9839', '21376', '.', '-', '.', 'ID=FBgn0002121;Name=l(2)gl;fullname=lethal (2) giant larvae;Alias=Lgl,lgl,lethal giant larvae,lethal giant larve,lethal giant larva,lethal(2)giant larvae,Complementation group 2.1,Lethal Giant Larvae,dlgl,p127l(2)gl,LGL,l(2) giant larva,CG2671,L(2)GL,p127,l(2)giant larvae,D-LGL,l(2),gl,l[[2]]gl,l-gl,lethal-giant-larvae,Lethal giant larvae,Lethal (2) giant larvae,L(2)gl,Lethal (2) giant larva,Lethal-giant-larvae,MENE (2L)-B,lethal(2) giant larvae,p127[l(2)gl],lethal(2)-giant larvae,lethal-2-giant larvae,l(2) giant larvae,lethal- giant-larvae,Lethal(2)giant larvae,Lethal-2-giant larvae;Ontology_term=SO:0000010,SO:0000087,GO:0005578,GO:0005886,GO:0007269,GO:0016082,GO:0008021,GO:0008283,GO:0016334,GO:0016336,GO:0016333,GO:0016335,GO:0016327,GO:0005829,GO:0045175,GO:0016332,GO:0045184,GO:0007399,GO:0005938,GO:0005737,GO:0007179,GO:0045197,GO:0045196,GO:0002009,GO:0005918,GO:0008105,GO:0045167,GO:0008104,GO:0045746,GO:0007423,GO:0008285,GO:0001738,GO:0016323,GO:0007391,GO:0005856,GO:0030154,GO:0042127,GO:0005614,GO:0045159,GO:0035072,GO:0007559,GO:0045200,GO:0008360,GO:0019991,GO:0007406,GO:0051726,GO:0051668,GO:0007314,GO:0016325,GO:0030036,GO:0030863,GO:0035070,GO:0055059,GO:0035212,GO:0035293,GO:0090163,GO:0048730,GO:0000132,GO:0098725,GO:0060429,GO:0007293,GO:0045176,GO:0072697,GO:0000149,SO:0000548,GO:0005920,GO:0017022,GO:0004860,GO:0006469;Dbxref=FlyBase:FBan0002671,FlyBase_Annotation_IDs:CG2671,INTERPRO:IPR015943,GB_protein:AAN10503,GB_protein:AAG22256,GB_protein:AAN10502,GB_protein:AAN10501,GB_protein:AAF51570,GB_protein:AAG22255,INTERPRO:IPR017986,GB:AA246243,GB:AW942062,GB:AY051654,GB_protein:AAK93078,GB:BH809482,GB:CZ471313,GB:CZ482024,GB:CZ484691,GB:M17022,GB_protein:AAA28671,GB_protein:AAA28672,GB:X05426,GB_protein:CAA29007,UniProt/Swiss-Prot:P08111,INTERPRO:IPR000664,INTERPRO:IPR001680,INTERPRO:IPR013577,GB_protein:AGB92324,UniProt/TrEMBL:M9NCX1,UniProt/TrEMBL:M9PBJ2,OrthoDB7_Drosophila:EOG7CW2GT,OrthoDB7_Diptera:EOG7DRVK2,GB_protein:AFH03479,GB_protein:AFH03478,GB_protein:AFH03481,GB_protein:AFH03480,EntrezGene:33156,INTERPRO:IPR013905,BDGP_clone:PC00404,OrthoDB7_Insecta:EOG7SRGKH,OrthoDB7_Arthropoda:EOG7ZDD82,OrthoDB7_Metazoa:EOG79W94C,InterologFinder:33156,FlyAtlas:CG2671-RB,BIOGRID:59421,Fly-FISH:CG2671,GenomeRNAi:33156,INTERACTIVEFLY:/cytoskel/lethl2g1.htm;gbunit=AE014134;derived_computed_cyto=21A5-21A5'],\n\t# ['2L', 'FlyBase', 'ncRNA', '286383', '288292', '.', '+', '.', 'ID=FBtr0347595;Name=CR46263-RA;Parent=FBgn0267996;Dbxref=FlyBase_Annotation_IDs:CR46263-RA;score_text=Weakly Supported;score=0'], ['2L', 'FlyBase', 'gene', '287252', '289144', '.', '-', '.', 'ID=FBgn0025686;Name=Amnionless;fullname=Amnionless ortholog;Alias=FBgn0031246,CG11592,CK02467,BEST:CK02467,dAMN,Amnionless;Ontology_term=SO:0000010,SO:0000087,GO:0046331,GO:0097206,GO:0016021,GO:0097017;Dbxref=FlyBase:FBan0011592,FlyBase_Annotation_IDs:CG11592,GB_protein:AAF51514,GB:AA141784,GB:CZ468687,UniProt/TrEMBL:Q9VPN2,GB_protein:AGB92350,OrthoDB7_Drosophila:EOG7CGKJK,EntrezGene:33199,BDGP_clone:IP03221,OrthoDB7_Diptera:EOG774804,INTERPRO:IPR026112,OrthoDB7_Insecta:EOG7G266G,OrthoDB7_Arthropoda:EOG7P65FW,OrthoDB7_Metazoa:EOG7ZGX2W,InterologFinder:33199,FlyAtlas:CG11592-RA,GenomeRNAi:33199;gbunit=AE014134;derived_computed_cyto=21B7-21B7'], ['2L', 'FlyBase', 'gene', '292419', '293222', '.', '+', '.', 'ID=FBgn0031247;Name=CG11562;Alias=FBgn0063011,BcDNA:RE44650;Ontology_term=SO:0000010,SO:0000087,GO:0005739,GO:0003674,GO:0008150;Dbxref=FlyBase:FBan0011562,FlyBase_Annotation_IDs:CG11562,GB_protein:AAF51513,GB:AI520524,GB:AI945841,GB:AY119645,GB_protein:AAM50299,GB:BE662187,GB:BI358003,UniProt/TrEMBL:Q9VPN3,OrthoDB7_Drosophila:EOG7HTW3H,OrthoDB7_Diptera:EOG7200K9,EntrezGene:33200,BDGP_clone:RE44650,OrthoDB7_Insecta:EOG7B9454,OrthoDB7_Arthropoda:EOG7RK278,OrthoDB7_Metazoa:EOG78H3X3,FlyAtlas:CG11562-RA,INTERPRO:IPR031568,Fly-FISH:CG11562,GenomeRNAi:33200;gbunit=AE014134;derived_computed_cyto=21B7-21B7'], ['2L', 'FlyBase', 'gene', '292959', '294681', '.', '-', '.', 'ID=FBgn0017457;Name=U2af38;fullname=U2 small nuclear riboprotein auxiliary factor 38;Alias=FBgn0010626,U2AF38,U2AF,dU2AF38,DU2AF38,CG3582,dU2AF[38],l(2)06751,u2af38,U2AF 38;Ontology_term=GO:0089701,SO:0000010,SO:0000087,GO:0000398,GO:0008187,GO:0005681,GO:0005686,GO:0000381,GO:0005634,GO:0003729,GO:0007052,GO:0071011,GO:0008380,GO:0000166,GO:0046872;Dbxref=FlyBase:FBan0003582,FlyBase_Annotation_IDs:CG3582,GB_protein:AAF51512,GB:AA264081,GB:AA820431,GB:AC004115,GB:AC008371,GB:AI061776,GB:AI455418,GB:AI944553,GB:AQ026079,GB:AY058537,GB_protein:AAL13766,GB:U67066,GB_protein:AAB17271,UniProt/Swiss-Prot:Q94535,INTERPRO:IPR000504,INTERPRO:IPR000571,INTERPRO:IPR009145,INTERPRO:IPR012677,GB_protein:AGB92351,UniProt/TrEMBL:M9PBM1,OrthoDB7_Drosophila:EOG7FRM2M,OrthoDB7_Diptera:EOG700KS6,EntrezGene:33201,BDGP_clone:LD24048,OrthoDB7_Insecta:EOG76QSHP,OrthoDB7_Arthropoda:EOG7KMJ7T,OrthoDB7_Metazoa:EOG70089G,apodroso:10448-U2af38[k14504],InterologFinder:33201,FlyAtlas:CG3582-RA,BIOGRID:59457,Fly-FISH:CG3582,GenomeRNAi:33201;gbunit=AE014134;derived_computed_cyto=21B7-21B8']]", "def vcf_samples(vcffile):\n try:\n vcf_reader = vcf.Reader(open(vcffile, 'r'))\n return vcf_reader.samples\n except Exception as error:\n print(f\"Could not read vcffile {vcffile}: continuing without vcf data: {str(error)}\")\n\n return []", "def readAllfromFile(self):\n with open(self._fname, 'r') as f:\n lines = f.readlines()\n readList = []\n for line in lines:\n line = line.strip()\n if len(line) > 1:\n gra = self._readGrafromLine(line)\n readList.append(gra)\n f.close()\n return readList", "def read_file(infile_name):\n chr_list = [0]*13 \n for i in range(len(chr_list)):\n chr_list[i] = [] \n infile = open(infile_name)\n for line in infile:\n if line.startswith('SL2.40'):\n chr = int(line.strip().split()[0][-2:])\n loci = int(line.strip().split()[1])\n chr_list[chr] += [loci]\n else:\n pass\n infile.close()\n return chr_list", "def comb_fc(filepatt):\r\n\r\n\r\n files = glob.glob(os.path.join(savefolder, filepatt))\r\n print(filepatt)\r\n \r\n if files == []:\r\n imgs = []\r\n print('No files found for ' + filepatt)\r\n return\r\n\r\n\r\n imgs = np.empty((600, 600, 3))\r\n for fi, file in enumerate(files):\r\n img = cv2.imread(file)\r\n \r\n if fi == 0:\r\n imgs = img\r\n else:\r\n imgs = np.concatenate((imgs, img), axis = 2)\r\n\r\n idx = filepatt.find('freq')\r\n comb_fcGraph = os.path.join(savefolder, 'comb_' + filepatt[idx: -len('.mat')])\r\n cv2.imwrite(comb_fcGraph, imgs)\r\n print(comb_fcGraph)\r\n\r\n # find lowweight\r\n pvals_vec = []\r\n ciCOH_vec = []\r\n pvals = pvals_fc_overtime(ciCOH = ciCOH, ntrials = ntrials, ntemp = ntemp, f = f, t = t)\r\n\r\n reject, pval_corr = fdr_correction(pvals, alpha=0.05, method='indep')\r\n \r\n\r\n \r\n lowweight = min(ciCOH_vec[rejs])\r\n\r\n return lowweight", "def readblock(fileObj):\n data = []\n\n p = re.compile('ORDINATE')\n q = re.compile('0LINEAR COEFFICIENTS')\n for line in fileObj:\n if q.search(line) is not None:\n break\n if p.search(line) is None:\n dataContent = line[0:31]\n dataContent = dataContent.replace('D', 'E')\n datarow = list(map(float, dataContent.split()))\n data.append(datarow)\n\n return np.array(data)", "def read_rf_file(filename: str):\n header_dt = np.dtype([('nfft', np.uint32), ('frame_period', np.float64),\n ('t0_int_s', np.uint64), ('t0_frac_s', np.float64)])\n\n with open(filename, \"rb\") as f:\n header = np.fromfile(f, dtype=header_dt, count=1)\n\n row_dt = np.dtype([(\"sync\", np.uint32), (\"frame_num\", np.uint32),\n (\"fft_bins\", np.float32, (1, header[0][\"nfft\"]))])\n\n rows = np.fromfile(f, dtype=row_dt)\n\n return header[0], rows", "def calHet( inFile, varType ):\n names = []\n print(\"Sample\\tfracHet\\thetCt\\thomCt\") # print header\n \n with open( inFile, 'r') as files: # open sample name file\n for i in files:\n i = i.rstrip()\n vcf = i + \".\" + varType + \".vcf\" \n with open( vcf, 'r' ) as data:\n hom = 0.0 # count homozygous sites\n het = 0.0 # count heterozygous sites\n fractionHet = 0.0 # fraction heterozygous\n \n for var in data:\n if var.startswith(\"#\"): # skip header\n continue\n else: \n var = var.rstrip()\n line = var.split(\"\\t\")\n stats = line[9].split(':') # \n alleles = list( map( int, stats[1].split(',') ) ) # create list of allele counts\n check = [ i for i in alleles if i > 0] # put any counts > 0 into a list\n if not check: # if all allele counts == 0\n continue # all alleles are set to zero wtf? Result of a quality score that is low.\n elif len(check) > 1: # multiple allele counts , must be heterozygous\n het += 1 # more than one allele \n elif len(check) == 1: # only one allele has a count\n hom += 1\n #print(\"%s\\t%s\\t%s\\t%s\\t%s\\t%s\" %(i, line[0], line[1], stats[0], stats[1], check ) ) \n if hom == 0:\n fractionHet = 100\n else:\n fractionHet = het/(hom + het) # calculate fraction heterozygous\n print(\"%s\\t%f\\t%f\\t%f\" %(i, fractionHet, het,hom )) \n \n files.close()", "def _read_libffm_file(self, filename):\n\n X_true = np.zeros((self.num_rows, self.num_features))\n y_true = np.zeros((self.num_rows, 1))\n field_true = np.zeros((self.num_features, 1))\n with open(filename, 'r') as f:\n i = 0\n for line in f:\n tmp_row = line.replace('\\n', '').split(' ')\n\n # extract label\n y_true[i] = int(tmp_row[0])\n\n # extract data and fields\n for k in range(1, len(tmp_row)):\n if len(tmp_row[k]) > 0:\n tmp_str = tmp_row[k].split(':')\n j = int(tmp_str[1])\n field_true[j] = int(tmp_str[0])\n tmp_data = float(tmp_str[2])\n X_true[i, j] = tmp_data\n i = i + 1\n\n return X_true, y_true, field_true", "def cfdReadPointsFile(self):\r\n\r\n with open(self.pointsFile,\"r\") as fpid:\r\n \r\n print('Reading points file ...')\r\n points_x=[]\r\n points_y=[]\r\n points_z=[]\r\n \r\n for linecount, tline in enumerate(fpid):\r\n \r\n if not io.cfdSkipEmptyLines(tline):\r\n continue\r\n \r\n if not io.cfdSkipMacroComments(tline):\r\n continue\r\n \r\n if \"FoamFile\" in tline:\r\n dictionary=io.cfdReadCfdDictionary(fpid)\r\n continue\r\n \r\n if len(tline.split()) ==1:\r\n if \"(\" in tline:\r\n continue\r\n if \")\" in tline:\r\n continue\r\n else:\r\n self.numberOfNodes = int(tline.split()[0])\r\n continue\r\n \r\n tline=tline.replace(\"(\",\"\")\r\n tline=tline.replace(\")\",\"\")\r\n tline=tline.split()\r\n \r\n points_x.append(float(tline[0]))\r\n points_y.append(float(tline[1]))\r\n points_z.append(float(tline[2]))\r\n \r\n ## (array) with the mesh point coordinates \r\n self.nodeCentroids = np.array((points_x, points_y, points_z), dtype=float).transpose()", "def parseOcrFile(self, filePath):\n conditionedFileList = []\n f = open(filePath, 'r')\n ocrNumeralsListofLists = []\n\n #First strip newlines from each line of file\n for line in f:\n conditionedFileList.extend([line.rstrip(\"\\n\")])\n\n ocrSeqLists = list(chunked(conditionedFileList, 4))\n\n for ocrSeq in ocrSeqLists:\n resultList = self.parseOcrLines(ocrSeq)\n ocrNumeralsListofLists.extend([resultList])\n \n return ocrNumeralsListofLists", "def _readBTS(self,fname):\n with BinaryFile(fname) as f:\n #\n # read header info\n #\n if self.verbose: print('Reading header information from',fname)\n\n ID = f.read_int2()\n assert( ID==7 or ID==8 )\n if ID==7: filetype = 'non-periodic'\n elif ID==8: filetype = 'periodic'\n else: filetype = 'UNKNOWN'\n if self.verbose:\n print(' id= {:d} ({:s})'.format(ID,filetype))\n\n # - read resolution settings\n self.NZ = f.read_int4()\n self.NY = f.read_int4()\n self.Ntower = f.read_int4()\n if self.verbose:\n print(' NumGrid_Z,_Y=',self.NZ,self.NY)\n print(' ntower=',self.Ntower)\n self.N = f.read_int4()\n self.dz = f.read_float(dtype=self.realtype)\n self.dy = f.read_float(dtype=self.realtype)\n self.dt = f.read_float(dtype=self.realtype)\n self.period = self.realtype(self.N * self.dt)\n self.Nsize = 3*self.NY*self.NZ*self.N\n if self.verbose:\n print(' nt=',self.N)\n print(' (problem size: {:d} points)'.format(self.Nsize))\n print(' dz,dy=',self.dz,self.dy)\n print(' TimeStep=',self.dt)\n print(' Period=',self.period)\n\n # - read reference values\n self.uhub = f.read_float(dtype=self.realtype)\n self.zhub = f.read_float(dtype=self.realtype) # NOT USED\n self.zbot = f.read_float(dtype=self.realtype)\n if self.Umean is None:\n self.Umean = self.uhub\n if self.verbose:\n print(' Umean = uhub =',self.Umean,\n '(for calculating fluctuations)')\n else: # user-specified Umean\n if self.verbose:\n print(' Umean =',self.Umean,\n '(for calculating fluctuations)')\n print(' uhub=',self.uhub,' (NOT USED)')\n if self.verbose:\n print(' HubHt=',self.zhub,' (NOT USED)')\n print(' Zbottom=',self.zbot)\n\n # - read scaling factors\n self.Vslope = np.zeros(3,dtype=self.realtype)\n self.Vintercept = np.zeros(3,dtype=self.realtype)\n for i in range(3):\n self.Vslope[i] = f.read_float(dtype=self.realtype)\n self.Vintercept[i] = f.read_float(dtype=self.realtype)\n if self.verbose:\n # output is float64 precision by default...\n print(' Vslope=',self.Vslope)\n print(' Vintercept=',self.Vintercept)\n\n # - read turbsim info string\n nchar = f.read_int4()\n version = f.read(N=nchar)\n if self.verbose: print(version)\n\n #\n # read normalized data\n #\n # note: need to specify Fortran-order to properly read data using np.nditer\n t0 = time.process_time()\n if self.verbose: print('Reading normalized grid data')\n\n self.U = np.zeros((3,self.NY,self.NZ,self.N),order='F',dtype=self.realtype)\n self.T = np.zeros((self.N,self.NY,self.NZ))\n if self.verbose:\n print(' U size :',self.U.nbytes/1024.**2,'MB')\n\n for val in np.nditer(self.U, op_flags=['writeonly']):\n val[...] = f.read_int2()\n self.U = self.U.swapaxes(3,2).swapaxes(2,1) # new shape: (3,self.N,self.NY,self.NZ)\n\n if self.Ntower > 0:\n if self.verbose:\n print('Reading normalized tower data')\n self.Utow = np.zeros((3,self.Ntower,self.N),\n order='F',dtype=self.realtype)\n if self.verbose:\n print(' Utow size :',self.Utow.nbytes/1024.**2,'MB')\n for val in np.nditer(self.Utow, op_flags=['writeonly']):\n val[...] = f.read_int2()\n\n if self.verbose:\n print(' Read velocitiy fields in',time.process_time()-t0,'s')\n \n #\n # calculate dimensional velocity\n #\n if self.verbose:\n print('Calculating velocities from normalized data')\n for i in range(3):\n self.U[i,:,:,:] -= self.Vintercept[i]\n self.U[i,:,:,:] /= self.Vslope[i]\n if self.Ntower > 0:\n self.Utow[i,:,:] -= self.Vintercept[i]\n self.Utow[i,:,:] /= self.Vslope[i]\n self.U[0,:,:,:] -= self.Umean # uniform inflow w/ no shear assumed\n\n print(' u min/max [',np.min(self.U[0,:,:,:]),\n np.max(self.U[0,:,:,:]),']')\n print(' v min/max [',np.min(self.U[1,:,:,:]),\n np.max(self.U[1,:,:,:]),']')\n print(' w min/max [',np.min(self.U[2,:,:,:]),\n np.max(self.U[2,:,:,:]),']')\n\n self.scaling = np.ones((3,self.NZ))\n\n #\n # calculate coordinates\n #\n if self.verbose:\n print('Calculating coordinates')\n #self.y = -0.5*(self.NY-1)*self.dy + np.arange(self.NY,dtype=self.realtype)*self.dy\n self.y = np.arange(self.NY,dtype=self.realtype)*self.dy\n self.z = self.zbot + np.arange(self.NZ,dtype=self.realtype)*self.dz\n #self.ztow = self.zbot - np.arange(self.NZ,dtype=self.realtype)*self.dz #--NOT USED\n\n self.t = np.arange(self.N,dtype=self.realtype)*self.dt\n if self.verbose:\n print('Read times [',self.t[0],self.t[1],'...',self.t[-1],']')", "def Read_RMCA_basic(Complete_Path):\n fid = open(Complete_Path,'r')\n S = []\n while 1: \n line = fid.readline()\n if line =='': \n break \n else :\n S.append(float(line))\n #R.append(float(line[27:-2]))\n return np.array(S)", "def _read_rmf(file):\n\n with fits.open(file) as hdul:\n data = hdul[2].data\n\n return data['energ_lo'], data['energ_hi'], data['n_grp'], data['f_chan'], data['n_chan'], data['matrix']", "def process_spacecraft(spacecraft_file_abspath):\n\n total_fuel = 0\n with open(spacecraft_file_abspath, 'r') as f:\n for mass in f.readlines():\n total_fuel += process_module_mass(int(mass))\n\n return total_fuel", "def test_fc(self):\n self.assertEqual(self.nhf.metadata[\"ndim\"], 3)\n self.assertEqual(self.nhf.metadata[\"ngroup\"], 4)\n self.assertEqual(self.nhf.metadata[\"ninti\"], 5)\n self.assertEqual(self.nhf.metadata[\"nintj\"], 5)\n self.assertEqual(self.nhf.metadata[\"nintk\"], 6)\n self.assertEqual(self.nhf.metadata[\"nSurf\"], 6)\n self.assertEqual(self.nhf.metadata[\"nMom\"], 5)\n self.assertEqual(self.nhf.metadata[\"nintxy\"], 19)\n self.assertEqual(self.nhf.metadata[\"npcxy\"], 144)\n self.assertEqual(self.nhf.metadata[\"iaprx\"], 4)\n self.assertEqual(self.nhf.metadata[\"iaprxz\"], 3)\n\n variantControlInfo = nhflux.FILE_SPEC_1D_KEYS_VARIANT11\n for info in variantControlInfo:\n self.assertTrue(info not in self.nhf.metadata)", "def list_of_file_boundaries(self):\n res = []\n list_of_file_lengths = [file['length'] for file in self.file_meta]\n partial_sum = 0\n for i in range(len(list_of_file_lengths)):\n partial_sum += list_of_file_lengths[i]\n res.append(partial_sum)\n return res[:]", "def read_file(fname):\n recs=[]\n logger.info('Start read file %s', fname)\n with open(fname) as inf:\n for line in inf:\n recs.append(line.strip().split())\n logger.info('End reading with recnumber %d', len(recs))\n return recs", "def ccd_to_list(file_path: str) -> list:\n\n file = open(file_path, 'r')\n lines = file.readlines()\n file.close()\n\n data = []\n\n for index, line in enumerate(lines):\n try:\n if line.startswith(' '):\n data.append(float(line.split(' ')[-1].strip()))\n elif isinstance(int(line[0]), int):\n split_line = line.split('\\t')\n if len(split_line) > 1:\n data.append(float(split_line[-1].strip()))\n if split_line[1].startswith('23'):\n if int(split_line[0])+1 != int(lines[index+1].split('\\t')[0]):\n print('DAY\\tHOUR\\tVALUE')\n print(line, lines[index+1])\n else:\n if int(split_line[1][:2])+1 != int(lines[index+1].split('\\t')[1][:2]):\n print('\\nDAY\\tHOUR\\t\\tVALUE')\n print(line.strip())\n print(f'--- MISSING DATA UNTIL ---')\n print(lines[index+1].strip())\n else:\n split_line = line.strip().split(' ')\n data.append(float(split_line[-1]))\n if split_line[1].startswith('23'):\n if int(split_line[0])+1 != int(lines[index+1].split(' ')[0]):\n print('DAY\\tHOUR\\tVALUE')\n print(line, lines[index+1])\n else:\n if int(split_line[1][:2])+1 != int(lines[index+1].split(' ')[1][:2]):\n print('\\nDAY\\tHOUR\\t\\tVALUE')\n print(line.strip())\n print(f'--- MISSING DATA UNTIL ---')\n print(lines[index+1].strip())\n except ValueError:\n pass\n\n return data", "def parse_cif(filename, header=False, spg=False, eng=False, csd=False, sim=False):\n strings = []\n headers = []\n spgs = []\n engs = []\n csds = []\n sims = []\n with open(filename, 'r') as f:\n lines = f.readlines()\n start = None\n end = None\n for i in range(len(lines)):\n if lines[i].find(\"data_\") == 0:\n if sim:\n sims.append(float(lines[i].split(':')[-1]))\n end = i\n if start is not None:\n tmp = []\n for l in lines[start:end-1]:\n if len(re.findall(r\"[0-9][B-C]\", l))>0 or \\\n len(re.findall(r\"[A-Z][0-9]\\' [0-9]\", l))>0:\n #print(l) #; import sys; sys.exit()\n continue\n tmp.append(l)\n cif = listToString(tmp)\n strings.append(cif)\n start = i\n headers.append(lines[i])\n elif lines[i].find(\"_symmetry_Int_Tables_number\") == 0:\n spgs.append(int(lines[i].split()[-1]))\n elif lines[i].find(\"#Energy\") == 0:\n engs.append(float(lines[i].split()[1]))\n elif lines[i].find(\"_database_code\") == 0:\n tmp = lines[i].split()[-1]\n csds.append(tmp[:-1])\n\n #Last one\n tmp = []\n for l in lines[start:]:\n if len(re.findall(r\"[0-9][B-D]\", l))>0 or \\\n len(re.findall(r\"[A-Z][0-9]\\' [0-9]\", l))>0:\n #print(l);\n continue\n tmp.append(l)\n cif = listToString(tmp)\n strings.append(cif)\n\n if header:\n return strings, headers\n elif spg:\n return strings, spgs\n elif eng:\n return strings, engs\n elif csd:\n return strings, csds\n elif sim:\n return strings, sims\n else:\n return strings", "def parse_vcf(filename, gff_contigs):\n with open(filename, \"r\") as f:\n for line in f:\n if line[0] == \"#\": # ignore comment lines in vcf file\n continue\n\n elements = line.rstrip().split(\"\\t\")\n name = elements[0] # name of the contig / chromosome SNP found on\n pos = elements[1] # position of SNP in contig\n ref = elements[3] # reference basepair(s)\n alt = elements[4] # SNP basepair(s) at same location\n qual = float(elements[5]) # SNP quality, might want to filter this\n contig = gff_contigs[name] # grab contig where SNP is located\n\n # iterate over GFF annotations on his contig, printing those that\n # overlap with the position of the SNP.\n for feature in contig:\n if pos >= feature.start and pos <= feature.stop:\n extract_feature_notes(feature.contig, feature.type, feature.start, feature.stop, feature.direction, feature.notes, pos, ref, alt) # prints info" ]
[ "0.61628425", "0.6071108", "0.5977757", "0.5936052", "0.5908581", "0.5907671", "0.5888301", "0.5856573", "0.5693422", "0.5663114", "0.56423414", "0.5607148", "0.55911905", "0.550648", "0.55034184", "0.5458108", "0.544729", "0.54410064", "0.54253936", "0.5411036", "0.538077", "0.5372545", "0.53587973", "0.5357465", "0.53333944", "0.5324789", "0.5319221", "0.529981", "0.5299186", "0.5296077", "0.52953786", "0.52560085", "0.52419275", "0.5240033", "0.52346325", "0.52299714", "0.5225511", "0.5210736", "0.5202591", "0.51972675", "0.5195811", "0.518433", "0.5181529", "0.5172706", "0.5168701", "0.51587653", "0.51527405", "0.5136566", "0.5124188", "0.5122215", "0.5121301", "0.51128596", "0.5110975", "0.5110806", "0.5108491", "0.5107992", "0.5097166", "0.5088332", "0.50782186", "0.506007", "0.5059102", "0.5047931", "0.5046195", "0.5045558", "0.50390315", "0.50260496", "0.50212604", "0.50166047", "0.50079757", "0.5006693", "0.5003529", "0.50016123", "0.50006676", "0.49961123", "0.49927664", "0.4990203", "0.49783328", "0.4974604", "0.49709317", "0.49688774", "0.4963998", "0.4956407", "0.4954851", "0.49514276", "0.49512598", "0.49452862", "0.4938546", "0.49381024", "0.49344978", "0.49344128", "0.4931231", "0.4931011", "0.49254325", "0.49066997", "0.48983023", "0.48964417", "0.48959556", "0.48948818", "0.4892368", "0.48884574" ]
0.5689103
9
Returns the bond order between two atoms.
def bond_order(bondxi, threshold_single_meso=0.0847, # ================================================================ # threshold_meso_double=0.184, #================================================================ threshold_meso_double=0.0847, threshold_double_triple=0.27): if bondxi < threshold_single_meso: order = '1' elif bondxi < threshold_meso_double: order = '1.5' elif bondxi < threshold_double_triple: order = '2' else: order = '3' return order
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_bond_order(molecule, bond_index):\n return molecule.GetBondOrder(bond_index)", "def get_bond_order(bond_type):\n if bond_type == 'single':\n return 1\n elif bond_type == 'aromatic':\n return 1.5\n elif bond_type == 'double':\n return 2\n elif bond_type == 'triple':\n return 3\n else:\n raise ValueError(f'Unexpected bond type: {bond_type.upper()}')", "def bond_distances_v2(molmod_atoms, bonds=None, ignored_elements=None):\n if not ignored_elements:\n ignored_elements = []\n\n m=molmod_atoms\n\n if not bonds:\n bonds = m.graph.edges\n\n bond_dists = []\n indices = []\n\n for ind1, ind2 in bonds:\n if not m.symbols[ind1] in ignored_elements and not m.symbols[ind2] in ignored_elements:\n bond_dists.append(m.distance_matrix[ind1,ind2]/molmod.angstrom)\n indices.append((ind1, ind2))\n\n #we sort by bond index so that comparison between two bdist_inds objects is possible (without sorting we can get variation in the order)\n bdist_inds = zip(bond_dists, indices)\n bdist_inds.sort(key=lambda e: e[1])\n\n return bdist_inds", "def order(self):\n\n return np.array([bond.order for bond in self])", "def test_order_atoms(self):\n mol1 = converter.s_bonds_mol_from_xyz(self.xyz10['dict'])\n mol2 = converter.s_bonds_mol_from_xyz(self.xyz10['dict_diff_order1'])\n mol3 = converter.s_bonds_mol_from_xyz(self.xyz10['dict_diff_order2'])\n converter.order_atoms(ref_mol=mol1, mol=mol2)\n for atom1, atom2 in zip(mol1.atoms, mol2.atoms):\n self.assertEqual(atom1.symbol, atom2.symbol)\n converter.order_atoms(ref_mol=mol3, mol=mol1)\n for atom1, atom2 in zip(mol3.atoms, mol1.atoms):\n self.assertEqual(atom1.symbol, atom2.symbol)\n\n ref_mol = Molecule(smiles='[CH](CC[CH]c1ccccc1)c1ccccc1')\n mol_list = ref_mol.copy(deep=True).generate_resonance_structures(keep_isomorphic=False, filter_structures=True)\n for mol in mol_list:\n converter.order_atoms(ref_mol=ref_mol, mol=mol)\n bond_dict = dict()\n for index1, atom1 in enumerate(ref_mol.atoms):\n for atom2 in atom1.edges.keys():\n index2 = ref_mol.atoms.index(atom2)\n if index1 < index2:\n if index1 not in bond_dict:\n bond_dict[index1] = [index2]\n else:\n bond_dict[index1].append(index2)\n for index1, atom1 in enumerate(mol.atoms):\n for atom2 in atom1.edges.keys():\n index2 = mol.atoms.index(atom2)\n if index1 < index2:\n self.assertIn(index2, bond_dict[index1]) # check that these atoms are connected in all mols", "def set_bond_order(molecule, bond_index, bond_order):\n return molecule.SetBondOrder(bond_index, bond_order)", "def get_all_bond_orders(molecule):\n return nps.vtk_to_numpy(molecule.GetBondOrdersArray())", "def getOrder(self):\n return _libsbml.CompartmentGlyph_getOrder(self)", "def bond_dist_delta(ase_mol1, ase_mol2):\n #convert to molmod\n mol1 = to_molmod(ase_mol1)\n mol2 = to_molmod(ase_mol2)\n\n #get bond distances between neighbouring carbon atoms\n mol1_bdists_inds = bond_distances_v2(mol1)\n #seperate the bond distances and the atom indices the bonds correspond to\n #nb indexes are python_like so start at zero programs (e.g. pyMol/Avogadro) often number atoms starting at 1\n mol1_bdists, mol1_inds = zip(*mol1_bdists_inds)\n\n mol2_bdists_inds = bond_distances_v2(mol2, bonds=mol1_inds)\n mol2_bdists, mol2_inds = zip(*mol2_bdists_inds)\n\n if mol1_inds != mol2_inds:\n raise RuntimeError('Comparison of bond distances for different molecules not yet implemented')\n\n mol1_bdists = np.array(mol1_bdists)\n mol2_bdists = np.array(mol2_bdists)\n\n delta_bdists = mol1_bdists - mol2_bdists\n return np.array([mol1_inds, delta_bdists])", "def _canonical_order(node_chunk_a: node_chunk, node_chunk_b: node_chunk) -> int:\n na, prec_a, slotsA = node_chunk_a\n nb, prec_b, slotsB = node_chunk_b\n\n # compare based on node precedence\n if prec_a > prec_b:\n return -1\n elif prec_b > prec_a:\n return 1\n\n # compare based on slots\n else:\n # slots are equivalent\n if slotsA == slotsB:\n return 0\n\n # a is subset of b\n aWithoutB = slotsA - slotsB\n if not aWithoutB:\n return 1\n\n # b is subset of a\n bWithoutA = slotsB - slotsA\n if not bWithoutA:\n return -1\n\n # compare based on slots\n aMin = min(aWithoutB)\n bMin = min(bWithoutA)\n return -1 if aMin < bMin else 1", "def _bond_dist(geom, a1, a2):\n if isinstance(geom, np.ndarray):\n geom = geom.flatten().tolist()\n a13 = a1 * 3\n a23 = a2 * 3\n\n xd = (geom[a13] - geom[a23])**2\n yd = (geom[a13 + 1] - geom[a23 + 1])**2\n zd = (geom[a13 + 2] - geom[a23 + 2])**2\n\n return (xd + yd + zd)**0.5", "def get_symbol_name_order(gdbval):\n return (symtab_node_name (gdbval), int(gdbval[\"order\"]))", "def add_bond(molecule, atom1_index, atom2_index, bond_order=1):\n molecule.AppendBond(atom1_index, atom2_index, bond_order)", "def get_bond_id(atom1, atom2):\n id1 = [str(atom1.atom_name) + str(atom2.atom_name) + str(atom1.residue_name) + str(atom2.residue_name), str(atom2.atom_name) + str(atom1.atom_name) + str(atom2.residue_name) + str(atom1.residue_name)]\n return id1", "def test_order_atoms_in_mol_list(self):\n ref_mol = Molecule(smiles='[CH](CC[CH]c1ccccc1)c1ccccc1')\n mol_list = ref_mol.copy(deep=True).generate_resonance_structures(keep_isomorphic=False, filter_structures=True)\n success = converter.order_atoms_in_mol_list(ref_mol=ref_mol, mol_list=mol_list)\n self.assertTrue(success)\n bond_dict = dict()\n for index1, atom1 in enumerate(ref_mol.atoms):\n for atom2 in atom1.edges.keys():\n index2 = ref_mol.atoms.index(atom2)\n if index1 < index2:\n if index1 not in bond_dict:\n bond_dict[index1] = [index2]\n else:\n bond_dict[index1].append(index2)\n for mol in mol_list:\n for index1, atom1 in enumerate(mol.atoms):\n for atom2 in atom1.edges.keys():\n index2 = mol.atoms.index(atom2)\n if index1 < index2:\n self.assertIn(index2, bond_dict[index1]) # check that these atoms are connected in all mols", "def order ( self ) :\n return self.__order", "def Order(self) -> int:", "def order(self):\n return self.__order", "def naive_order_calculation(self):\n\t\torder = 0\n\t\tfor pt in self.enumerate_points():\n\t\t\torder += 1\n\t\treturn order", "def modules_cmp(m1, m2):\n\n if m1.endswith('stubs'):\n return 1\n if m2.endswith('stubs'):\n return -1\n \n return len(m1) - len(m2)", "def _bond_orders(self, control, bond_order_matrix, configuration):\n text = \"\"\n n_atoms = configuration.n_atoms\n bond_i = []\n bond_j = []\n bond_order = []\n bond_order_str = []\n orders = []\n ij = 0\n for j in range(n_atoms):\n for i in range(j + 1):\n if i != j:\n order = bond_order_matrix[ij]\n if order > 0.5:\n bond_i.append(i)\n bond_j.append(j)\n if order > 1.3 and order < 1.7:\n bond_order.append(5)\n bond_order_str.append(\"aromatic\")\n else:\n bond_order.append(round(order))\n bond_order_str.append(str(round(order)))\n orders.append(order)\n ij += 1\n\n symbols = configuration.atoms.symbols\n options = self.parent.options\n text_lines = []\n if len(symbols) <= int(options[\"max_atoms_to_print\"]):\n if \"name\" in configuration.atoms:\n name = configuration.atoms.get_column_data(\"name\")\n else:\n name = []\n count = {}\n for symbol in symbols:\n if symbol not in count:\n count[symbol] = 1\n else:\n count[symbol] += 1\n name.append(f\"{symbol}{count[symbol]}\")\n table = {\n \"i\": [name[i] for i in bond_i],\n \"j\": [name[j] for j in bond_j],\n \"bond order\": orders,\n \"bond multiplicity\": bond_order_str,\n }\n tmp = tabulate(\n table,\n headers=\"keys\",\n tablefmt=\"pretty\",\n disable_numparse=True,\n colalign=(\"center\", \"center\", \"right\", \"center\"),\n )\n length = len(tmp.splitlines()[0])\n text_lines.append(\"\\n\")\n text_lines.append(\"Bond Orders\".center(length))\n text_lines.append(\n tabulate(\n table,\n headers=\"keys\",\n tablefmt=\"psql\",\n colalign=(\"center\", \"center\", \"decimal\", \"center\"),\n )\n )\n text += \"\\n\\n\"\n text += textwrap.indent(\"\\n\".join(text_lines), self.indent + 7 * \" \")\n\n if control == \"yes, and apply to structure\":\n ids = configuration.atoms.ids\n iatoms = [ids[i] for i in bond_i]\n jatoms = [ids[j] for j in bond_j]\n configuration.bonds.delete()\n configuration.bonds.append(i=iatoms, j=jatoms, bondorder=bond_order)\n text2 = (\n \"\\nReplaced the bonds in the configuration with those from the \"\n \"calculated bond orders.\\n\"\n )\n\n text += str(__(text2, indent=self.indent + 4 * \" \"))\n\n return text", "def order(self):\n return self.n", "def order(self):\n return self._order", "def order(self):\n return self._order", "def order(self):\n return self._order", "def test_fractional_bondorder_multiple_same_mol(self):\n\n mol = create_ethanol()\n mol2 = create_reversed_ethanol()\n\n forcefield = ForceField(\n get_data_file_path(\"test_forcefields/test_forcefield.offxml\"),\n xml_ff_bo,\n )\n topology = Topology.from_molecules([mol, mol2])\n\n with pytest.raises(ValueError):\n forcefield.create_openmm_system(\n topology,\n charge_from_molecules=[mol],\n partial_bond_orders_from_molecules=[mol, mol2],\n )", "def order(self) -> float:\n return self._order", "def canonical_order(match):\n\n # match[0][0:3] contains the ID numbers of the 4 atoms in the match\n atom0 = match[0][0]\n atom1 = match[0][1]\n atom2 = match[0][2]\n atom3 = match[0][3]\n # match[1][0:2] contains the ID numbers of the the 3 bonds\n bond0 = match[1][0]\n bond1 = match[1][1]\n bond2 = match[1][2]\n if atom0 < atom3:\n # return ((atom0, atom1, atom2, atom3), (bond0, bond1, bond2)) same\n # as:\n return match\n else:\n return ((atom3, atom2, atom1, atom0), (bond2, bond1, bond0))", "def order(self):\n return self._degree + 1", "def _tab_order_sorter(app1, app2):\n app1_order = int(app1[\"taborder\"]) if \"taborder\" in app1 else sys.maxsize\n app2_order = int(app2[\"taborder\"]) if \"taborder\" in app2 else sys.maxsize\n return app1_order - app2_order", "def order(self):\n return self._order + 1", "def correct_ordering_of_component_masses(mass_1, mass_2):\n if mass_1 > mass_2:\n return mass_1, mass_2\n else:\n return mass_2, mass_1", "def getOrderList(self):\r\n\t\treturn self.pair.orders", "def get_interactions(list_atoms1, list_atoms2, dist):\n beta_carbons1 = list(filter(lambda x: x.get_id() == \"CB\", list_atoms1))\n beta_carbons2 = list(filter(lambda x: x.get_id() == \"CB\", list_atoms2))\n ns = NeighborSearch(beta_carbons1)\n interactions = []\n\n for atom in beta_carbons2:\n interact = ns.search(atom.get_coord(), dist)\n interactions.extend(\n [tuple(sorted([str(atom.get_parent().resname), str(x.get_parent().resname)])) for x in interact])\n return interactions", "def edge_direction(a, b):\n if a[0] == b[0]:\n return -1, 1\n elif a[0] == b[1]:\n return -1, -1\n elif a[1] == b[0]:\n return 1, 1\n elif a[1] == b[1]:\n return 1, -1\n else:\n constants.log.debug('\\n'.join([\n 'edges not connected!',\n 'vertex path %s',\n 'entity path: %s',\n 'entity[a]: %s,',\n 'entity[b]: %s']),\n vertex_path,\n entity_path,\n entities[ea].points,\n entities[eb].points)\n\n return None, None", "def bond_atoms(atom_list):\n pass", "def nqbyorder(self, order):\n idx = find_nearest(AVAILABLEORDERS, order)\n return AVAILABLEORDERS[idx], NUMBERQUADPOINTS[idx]", "def gather_referring_orders (gdbval):\n# TODO: Somehow also note speculative references and attributes in\n# general\n vec = gdbval[\"referring\"]\n return [int(i[\"referring\"][\"order\"]) for i in vec_iter(vec)]", "def _get_relevant_bond(self, atom1, atom2):\n bonds_1 = set(atom1.bonds)\n bonds_2 = set(atom2.bonds)\n relevant_bond_set = bonds_1.intersection(bonds_2)\n relevant_bond = relevant_bond_set.pop()\n if relevant_bond.type is None:\n return None\n relevant_bond_with_units = self._add_bond_units(relevant_bond)\n\n check_dimensionality(relevant_bond_with_units.type.req, unit.nanometers)\n check_dimensionality(relevant_bond_with_units.type.k, unit.kilojoules_per_mole/unit.nanometers**2)\n return relevant_bond_with_units", "def order(self):\n return reduce(lcm,[1]+[len(cycle) for cycle in self.cyclic_form])", "def order (self):\n order = 1\n result = self * self\n while result != self:\n result *= self\n order += 1\n return order", "def getOrder(self):\n return self._order", "def Order(self) -> int:\n return self.m_order", "def find_point_order(point, field, a_value, b_value):\n order = int(2)\n try:\n s_point = add_points(point, point, field, a_value, b_value)\n except ValueError:\n return order\n while True:\n try:\n s_point = add_points(s_point, point, field, a_value, b_value)\n order += 1\n except ValueError:\n return order", "def test_bond_order_method_passing(self, model, toolkit):\n mol = Molecule.from_smiles(\"CCO\")\n\n # Test that default model works\n mol.assign_fractional_bond_orders()\n\n mol.assign_fractional_bond_orders(\n bond_order_model=model,\n )\n\n mol.assign_fractional_bond_orders(\n bond_order_model=model,\n toolkit_registry=toolkit(),\n )\n\n mol.assign_fractional_bond_orders(\n bond_order_model=model,\n toolkit_registry=ToolkitRegistry([toolkit()]),\n )", "def order(a):\n order = 2\n circ = circlfunc(a,a)\n while True:\n if ifidentity(circ):\n return order\n else:\n circ= circlfunc(a,circ)\n order = order+1", "def column_order(self):\n return ((1, 2), (1, 0), (1, 1))", "def get_order(self):\n order = getattr(self, 'method_order', None)\n if order is None: # User-supplied method in MyRungeKutta\n order = _calculate_order_1_level(self.butcher_tableau)\n return order", "def get_order(self, order, fields_name, many_to_many_fields):\n next_direction = '' if order[:1] == '-' else '-'\n real_order = ''\n field = ''\n if order[1:] == 'pk' or order == 'pk':\n real_order = order\n field = 'pk'\n else:\n if order[1:] in fields_name or order in fields_name:\n if order[1:] in many_to_many_fields or order in many_to_many_fields:\n real_order = 'pk'\n field = 'pk'\n else:\n real_order = order\n field = order if next_direction == '-' else order[1:]\n else:\n real_order = 'pk'\n field = 'pk'\n return next_direction, field, real_order", "def order(self):\n return len(self.coeff)-1", "def get_bond(self, atom):\n assert isinstance(atom, Atom)\n assert atom != self\n\n for bond in self.bond_list:\n if atom == bond.atom1 or atom == bond.atom2:\n return bond\n return None", "def get_quote_orders(self, quote:str):\n\n\t\tconn = sqlite3.connect(self.name, detect_types=sqlite3.PARSE_DECLTYPES)\n\t\tconn.row_factory = sqlite3.Row\n\t\tc = conn.cursor()\n\n\t\tc.execute('SELECT * FROM orders WHERE quote=?', (quote, ))\n\t\torders = c.fetchall()\n\t\t\t\t\t\t\t\t\t\t\t\t\t# list(orders) = [<sqlite3.Row object at 0x0000020664E28670>, <sqlite3.Row object at 0x0000020664E9BA70>, ...]\n\t\treturn orders \t# We need to return None if there is no bot on the pair, so no dict(orders)", "def angle_between_two(self, other):\n # angle = math.atan2(other.position.y - self.position.y,\n # other.position.x - self.position.x)\n minus = other.position - self.position\n angle = math.atan2(minus.y, minus.x)\n return angle", "def distance_pbc(cls, atom_1, atom_2):\n\t\tif atom_1.box_dim is None or atom_2.box_dim is None:\n\t\t\traise Exception(\"simulation box size has not been specified\")\n\t\tif atom_1.box_dim != atom_2.box_dim:\n\t\t\traise Exception(\"simulation box size does not match\")\n\t\t\n\t\t[lx,ly,lz] = [atom_2.box_dim[0],atom_2.box_dim[1],atom_2.box_dim[2]]\n\t\t\n\t\t_pair_list = np.array([[0,0,0],[lx,0,0],[-lx,0,0],[0,ly,0],[0,-ly,0],[0,0,lz],[0,0,-lz]])\n\t\t\n\t\t_pair_distance = []\n\t\t\n\t\tfor _pair in _pair_list:\n\t\t\t_curr_pair_distance = Atom.distance(atom_1, Atom((np.array(atom_2.atom_loc) + _pair).tolist()))\n\t\t\t_pair_distance.append(_curr_pair_distance)\n\t\treturn min(_pair_distance)", "def objects_order(o: model.Documentable) -> Tuple[int, int, str]:\n return (-o.privacyClass.value, -o.kind.value if o.kind else 0, o.fullName().lower())", "def bond(self, i, j):\n i_covr = qcel.covalentradii.get(self.sym[i], units='angstrom')\n j_covr = qcel.covalentradii.get(self.sym[j], units='angstrom')\n r = np.linalg.norm(self.xyz[i] - self.xyz[j])\n if r < 1.1*(i_covr + j_covr):\n return int(1)\n return int(0)", "def get_bond_connectivity(self):\n m, connectivity = self.owner, []\n for index, i in enumerate(self.rix):\n for j in self.rix[index + 1:]:\n b1 = m.rings[i].bix\n b2 = m.rings[j].bix\n if set(b1).intersection(b2):\n connectivity.append((i, j))\n return tuple(connectivity)", "def degrees_of_separation(self, n1, n2):\n\t\t# Nodes aren't in graph\n\t\tif n1 not in self.nodes or n2 not in self.nodes:\n\t\t\traise ValueError\n\n\t\t# Get node\n\t\ta = self.nodes[n1]\n\t\tb = self.nodes[n2]\n\n\t\t# Get shortest distance using BFS\n\t\tpath = bfs(a, b)\n\n\t\t# Return path length or -1\n\t\tif path is not None:\n\t\t\treturn len(path) - 1\n\t\telse:\n\t\t\treturn -1", "def get_angle_between(self, other):\n cross = self.x*other[1] - self.y*other[0]\n dot = self.x*other[0] + self.y*other[1]\n return math.atan2(cross, dot)", "def remove_charge_and_bond_order_from_guanidinium(offmol):\n for atom in offmol.atoms:\n if atom.element.symbol != \"C\":\n continue\n nitrogen_neighbors = 0\n for neighbor in atom.bonded_atoms:\n if neighbor.element.symbol == \"N\":\n nitrogen_neighbors += 1\n if nitrogen_neighbors != 3:\n continue\n atom.formal_charge = 0\n for neighbor in atom.bonded_atoms:\n neighbor.formal_charge = 0\n for bond in atom.bonds:\n # Set bond order 4, which will produce a \"$\" character. We later replace this with \"~\".\n bond.bond_order = 4", "def check_bonds(self, force0, force1):\n\n assert type(force0) == type(force1), \"Error: force0 and force1 must be the same type.\"\n assert type(force0) == mm.HarmonicBondForce, \"Error: forces must be HarmonicBondForces\"\n\n n_bonds0 = force0.getNumBonds()\n n_bonds1 = force1.getNumBonds()\n\n dict0, dict1 = {}, {}\n\n i0, i1, r0, k0 = force0.getBondParameters(0)\n unit_r = u.angstrom\n #unit_k = k0.unit\n unit_k = u.kilojoules_per_mole/(u.angstrom)**2\n\n for k in range(n_bonds0):\n i0, i1, r0, k0 = force0.getBondParameters(k)\n i0, i1 = reorder_bonds(i0, i1)\n if k0 / k0.unit != 0.0: # Skip forces with strength 0.0\n dict0[i0, i1] = ((r0 / unit_r, k0 / unit_k))\n\n for k in range(n_bonds1):\n i0, i1, r0, k0 = force1.getBondParameters(k)\n i0, i1 = reorder_bonds(i0, i1)\n if k0 / k0.unit != 0.0: # Skip forces with strength 0.0\n dict1[i0, i1] = ((r0 / unit_r, k0 / unit_k))\n\n keys0 = set(dict0.keys())\n keys1 = set(dict1.keys())\n logger.info(\"Bonds0 - Bonds1 = %s\" % (keys0.difference(keys1)))\n logger.info(\"Bonds1 - Bonds0 = %s\" % (keys1.difference(keys0)))\n assert set(dict0.keys()) == set(dict1.keys()), \"Systems have different HarmonicBond Forces\"\n\n for k, parameter_name in enumerate([\"r0\", \"k0\"]):\n for (i0, i1) in dict0.keys():\n val0 = dict0[i0, i1][k]\n val1 = dict1[i0, i1][k]\n if parameter_name=='r0':\n assert compare(val0, val1), \"Error: Harmonic Bond distance (%d, %d) has equilibrium distances of %f and %f angstroms, respectively.\" % (i0, i1, val0, val1)\n else:\n assert compare(val0, val1), \"Error: Harmonic Bond force constant (%d, %d) has values of %f and %f kJ/mol, respectively.\" % (i0, i1, val0, val1)", "def gather_references_orders (gdbval):\n# TODO: Somehow also note speculative references and attributes in\n# general\n vec = gdbval[\"references\"]\n return [int(i[\"referred\"][\"order\"]) for i in vec_iter(vec)]", "def get_bond_info(self):\n return", "def sidechain(self):\n\n return self.atoms - self.backbone()", "def entityOrder(address_book):\n return zope.component.getUtility(IEntityOrder)", "def chemist_ordered(fermion_operator):\n # Make sure we're dealing with a fermion operator from a molecule.\n if not fermion_operator.is_two_body_number_conserving():\n raise OperatorSpecificationError(\n 'Operator is not two-body number conserving.')\n\n # Normal order and begin looping.\n normal_ordered_input = normal_ordered(fermion_operator)\n chemist_ordered_operator = FermionOperator()\n for term, coefficient in normal_ordered_input.terms.items():\n if len(term) == 2 or not len(term):\n chemist_ordered_operator += FermionOperator(term, coefficient)\n else:\n # Possibly add new one-body term.\n if term[1][0] == term[2][0]:\n new_one_body_term = (term[0], term[3])\n chemist_ordered_operator += FermionOperator(\n new_one_body_term, coefficient)\n # Reorder two-body term.\n new_two_body_term = (term[0], term[2], term[1], term[3])\n chemist_ordered_operator += FermionOperator(new_two_body_term,\n -coefficient)\n return chemist_ordered_operator", "def dmc_order(self):\n return sorted(self.lookup_table, key=lambda clr: int(clr.id) if clr.id.isdigit() else 0)", "def orders(self):\n return self._orders", "def orders(self) -> List[MetatraderOrder]:\n return self._orders", "def calculate_dihedral_atom_equivalences(mol1, mol2):\n\n # Check that the mols are identical-ish\n if mol1.GetNumHeavyAtoms() != mol2.GetNumHeavyAtoms():\n raise EqualityError('Molecules are not identical (Num Atoms) {!s} != {!s}.\\n{!s}\\n{!s}'.format(mol1.GetNumHeavyAtoms(),mol2.GetNumHeavyAtoms(),Chem.MolToSmiles(mol1),Chem.MolToSmiles(mol2)))\n if mol1.GetNumBonds() != mol2.GetNumBonds():\n raise EqualityError('Molecules are not identical (Num Bonds) {!s} != {!s}:\\n{!s}\\n{!s}'.format(mol1.GetNumBonds(),mol2.GetNumBonds(),Chem.MolToSmiles(mol1), Chem.MolToSmiles(mol2)))\n\n # Gets a list of lists of atoms in mol1 (12,16,3, ...) that match the atoms in mol2 (1,2,3, ...)\n match_patterns = mol1.GetSubstructMatches(mol2, uniquify=False)\n # Get the quadruplets to calculate the dihedrals from for mol1\n mol1_atom_sets = identify_rotatable_bond_atom_pairs(mol1)\n num_atms = mol1.GetNumHeavyAtoms()\n # List for returning\n paired_atom_sets = []\n # Iterate through the different ways of overlaying the molecule (ensures we get the minimum rmsd)\n for match_pattern in match_patterns:\n # Translate from the atoms in mol1 to the atoms in mol2 (for this match_pattern)\n trans_dict = dict(zip(match_pattern, range(0,num_atms)))\n # Translate the atoms in mol1 to the atoms in mol2\n mol2_atom_sets = [ tuple([trans_dict[atm] for atm in bond_set]) for bond_set in mol1_atom_sets]\n # Add to list\n paired_atom_sets.append((mol1_atom_sets, mol2_atom_sets))\n # Check that the atom types are identical (test)\n mol1_atom_types = [ tuple([mol1.GetAtomWithIdx(atm).GetAtomicNum() for atm in bond_set]) for bond_set in mol1_atom_sets]\n mol2_atom_types = [ tuple([mol2.GetAtomWithIdx(atm).GetAtomicNum() for atm in bond_set]) for bond_set in mol2_atom_sets]\n assert mol1_atom_types == mol2_atom_types, \"ATOM TYPES ARE NOT THE SAME ON THE DIHEDRAL ANGLE TO BE CALCULATED - THERE'S BEEN A MATCHING ERROR\"\n # Return the list of lists of paired atoms between the structures\n return paired_atom_sets", "def orders(self):\n return(self._d_orders['trades'])", "def _compute_hydrogen_bonds(self, entity):\n\n for (aa1, aa2) in combinations(entity, 2):\n\n # do not consider this pair if the number of atoms of the\n # residues is not sufficient\n if not (validate(aa1) and validate(aa2)):\n continue\n\n # stores both potentials between aa1 and aa2\n potentials = []\n\n segid1 = get_pos(aa1)\n segid2 = get_pos(aa2)\n\n # distance\n dist = np.abs(segid1 - segid2)\n\n # take care of the minimal sequence distance criterion\n # between aa1 and aa2\n if dist < self.min_seq_distance:\n continue\n\n # extract atoms from both amino acids\n atoms = [aa1.get_unpacked_list(),\n aa2.get_unpacked_list()]\n\n for i in range(0, len(atoms)):\n c_carboxyl = np.array(atoms[i][2].get_coord())\n o_carboxyl = np.array(atoms[i][3].get_coord())\n\n nitrogen = np.array(atoms[1-i][0].get_coord())\n hydrogen = None\n for atom in atoms[1-i]:\n if atom.get_name().strip() == 'H':\n hydrogen = np.array(atom.get_coord())\n\n if hydrogen is None:\n potentials.append(0)\n continue\n\n # compute relevant distances\n r_ON = np.linalg.norm(o_carboxyl - nitrogen)\n r_CH = np.linalg.norm(c_carboxyl - hydrogen)\n r_OH = np.linalg.norm(o_carboxyl - hydrogen)\n r_CN = np.linalg.norm(c_carboxyl - nitrogen)\n\n # compute potential\n pot = potential(r_ON, r_CH, r_OH, r_CN)\n\n potentials.append(pot if pot < co.HBOND_THRESHOLD else 0)\n\n # we return this as an result if at least one potential\n # is below the threshold , so they are not both 0\n if sum(potentials) != 0:\n yield (aa1, aa2, potentials[0], potentials[1])", "def cmplines(a, b):\n # we know that all lines that reach here have actions\n # make set actions first\n # depend actions last\n # rest in alpha order\n\n def typeord(a):\n if a.name == \"set\":\n return 1\n if a.name == \"depend\":\n return 3\n return 2\n c = cmp(typeord(a[0]) , typeord(b[0]))\n if c:\n return c\n c = cmp(a[0].name, b[0].name)\n if c:\n return c\n\n if a[0].name == \"set\" and a[0].attrs[\"name\"] == \"pkg.fmri\":\n return -1\n\n if b[0].name == \"set\" and b[0].attrs[\"name\"] == \"pkg.fmri\":\n return 1\n\n\n if a[0].name == \"set\" and a[0].attrs[\"name\"].startswith(\"pkg.\") and \\\n not b[0].attrs[\"name\"].startswith(\"pkg.\"):\n return -1\n\n if b[0].name == \"set\" and b[0].attrs[\"name\"].startswith(\"pkg.\") and \\\n not a[0].attrs[\"name\"].startswith(\"pkg.\"):\n return 1\n\n\n key_attr = a[0].key_attr\n if key_attr:\n c = cmp(a[0].attrs[key_attr], b[0].attrs[key_attr])\n if c:\n return c\n\n return cmp(str(a[0]), str(b[0]))", "def compare_entities(e1, e2):\n sp1 = e1.sorting_priority\n sp2 = e2.sorting_priority\n if sp1 > sp2:\n return 1\n elif sp1 == sp2:\n return 0\n else:\n return -1", "def _calculate_order(self, world: World) -> float:\n raise NotImplementedError()", "def generator_orders(self):\n return tuple(self._gen_orders)", "def get_orderings(self):\n if self._orderings is Undefined:\n self._orderings = self.normalize_orderings(self.ordering)\n return self._orderings", "def getOrder(f):\n order = 0\n while (2 ** order) ** 2 < len(f):\n order += 1\n return order", "def getOrder(self):\n return len(self.vertices)", "def iter_bonded_atoms(self):\n for bond in self.iter_bonds():\n partner = bond.get_partner(self)\n assert partner is not None\n yield partner", "def sortEdge(cls,x,y):\n return( Sentence.sortById(x[2],y[2]) )", "def assert_molecules_match_after_remap(self, mol1, mol2):\n for atoms in zip(mol1.atoms, mol2.atoms):\n assert atoms[0].to_dict() == atoms[1].to_dict()\n # bonds will not be in the same order in the molecule and the atom1 and atom2 indecies could be out of\n # order make a dict to compare them both\n remapped_bonds = dict(\n ((bond.atom1_index, bond.atom2_index), bond) for bond in mol2.bonds\n )\n for bond in mol1.bonds:\n key = (bond.atom1_index, bond.atom2_index)\n if key not in remapped_bonds:\n key = tuple(reversed(key))\n assert key in remapped_bonds\n # now compare each attribute of the bond except the atom indexes\n bond_dict = bond.to_dict()\n del bond_dict[\"atom1\"]\n del bond_dict[\"atom2\"]\n remapped_bond_dict = remapped_bonds[key].to_dict()\n del remapped_bond_dict[\"atom1\"]\n del remapped_bond_dict[\"atom2\"]\n assert mol1.n_bonds == mol2.n_bonds\n assert mol1.n_angles == mol2.n_angles\n assert mol1.n_propers == mol2.n_propers\n assert mol1.n_impropers == mol2.n_impropers\n assert mol1.total_charge == mol2.total_charge\n assert mol1.partial_charges.all() == mol2.partial_charges.all()", "def _reltype_ordering(self):\n return self.__reltype_ordering", "def get_angle_rad_between_joints(joint_a: Joint2D, joint_b: Joint2D) -> float:\n return math.atan2(joint_a.y - joint_b.y, joint_a.x - joint_b.x)", "def Hbond_donors(self):\n if not self.hasBonds:\n self.buildBondsByDistance()\n num_donors = 0\n for a in self.allAtoms:\n if a.element == 'H':\n num_donors += a.bonds[0].neighborAtom(a).element in ('O', 'N')\n return num_donors", "def get_angle(p1, p2):\n return math.atan2(p2[1] - p1[1], p2[0] - p1[0])", "def cmp_position(self, other):\n if self.position.data == other.position.data:\n return 0\n elif self.position.data < other.position.data:\n return 1\n else:\n return -1", "def assert_molecules_match_after_remap(mol1, mol2):\n for atoms in zip(mol1.atoms, mol2.atoms):\n assert atoms[0].to_dict() == atoms[1].to_dict()\n # bonds will not be in the same order in the molecule and the atom1 and atom2 indecies could be out of order\n # make a dict to compare them both\n remapped_bonds = dict(\n ((bond.atom1_index, bond.atom2_index), bond) for bond in mol2.bonds\n )\n for bond in mol1.bonds:\n key = (bond.atom1_index, bond.atom2_index)\n if key not in remapped_bonds:\n key = tuple(reversed(key))\n assert key in remapped_bonds\n # now compare each attribute of the bond except the atom indexes\n bond_dict = bond.to_dict()\n del bond_dict[\"atom1\"]\n del bond_dict[\"atom2\"]\n remapped_bond_dict = remapped_bonds[key].to_dict()\n del remapped_bond_dict[\"atom1\"]\n del remapped_bond_dict[\"atom2\"]\n assert mol1.n_bonds == mol2.n_bonds\n assert mol1.n_angles == mol2.n_angles\n assert mol1.n_propers == mol2.n_propers\n assert mol1.n_impropers == mol2.n_impropers\n assert mol1.total_charge == mol2.total_charge\n assert mol1.partial_charges.all() == mol2.partial_charges.all()", "def addDihedralBond(a1, a2, length, angleInfo, dihedInfo):\n\n\tif a1.molecule == a2.molecule:\n\t\traise ValueError(\"Atoms to be bonded must be in different models\")\n\n\t# first, get the distance correct\n\tfrom chimera import Xform, cross, angle, Point\n\tdvector = a1.xformCoord() - a2.xformCoord()\n\tdvector.length = dvector.length + length\n\topenState = a2.molecule.openState\n\topenState.globalXform(Xform.translation(dvector))\n\n\t# then angle\n\tif angleInfo:\n\t\tatoms, angleVal = angleInfo\n\t\tp1, p2, p3 = [a.xformCoord() for a in atoms]\n\t\taxis = cross(p1-p2, p2-p3)\n\t\tcurAngle = angle(p1, p2, p3)\n\t\tdelta = angleVal - curAngle\n\t\tv2 = p2 - Point(0.0, 0.0, 0.0)\n\t\ttrans1 = Xform.translation(v2)\n\t\tv2.negate()\n\t\ttrans2 = Xform.translation(v2)\n\t\ttrans1.multiply(Xform.rotation(axis, delta))\n\t\ttrans1.multiply(trans2)\n\t\topenState.globalXform(trans1)", "def calc_mainchain_bond_angle(self):\n aN = self.get_atom('N')\n aCA = self.get_atom('CA')\n aC = self.get_atom('C')\n aO = self.get_atom('O')\n aCB = self.get_atom('CB')\n\n naN = None\n naCA = None\n next_res = self.get_offset_residue(1)\n if next_res:\n naN = next_res.get_atom('N')\n naCA = next_res.get_atom('CA')\n\n N_CA_C = AtomMath.calc_angle(aN, aCA, aC)\n CA_C_O = AtomMath.calc_angle(aCA, aC, aO)\n N_CA_CB = AtomMath.calc_angle(aN, aCA, aCB)\n CB_CA_C = AtomMath.calc_angle(aCB, aCA, aC)\n CA_C_nN = AtomMath.calc_angle(aCA, aC, naN)\n C_nN_nCA = AtomMath.calc_angle(aC, naN, naCA)\n\n return (N_CA_C, N_CA_CB, CB_CA_C, CA_C_O, CA_C_nN, C_nN_nCA)", "def get_bend_port_distances(bend: Component) -> Tuple[float64, float64]:\n p0, p1 = bend.ports.values()\n return abs(p0.x - p1.x), abs(p0.y - p1.y)", "def get_atoms(self):\n\n return sorted(self.atoms, key=lambda atom: atom.get_symmetry_class())", "def order(self):\n return len(self.vertices())", "def determine_proposal_order(self):\n heavy_atoms_torsions, heavy_logp = self._propose_atoms_in_order(self._heavy)\n hydrogen_atoms_torsions, hydrogen_logp = self._propose_atoms_in_order(self._hydrogens)\n proposal_order = heavy_atoms_torsions + hydrogen_atoms_torsions\n\n if len(proposal_order) == 0:\n msg = 'NetworkXProposalOrder: proposal_order is empty\\n'\n raise Exception(msg)\n\n #Check that no atom is placed until each atom in the corresponding torsion is in the set of atoms with positions\n _set_of_atoms_with_positions = set(self._atoms_with_positions)\n\n # Now iterate through the proposal_order, ensuring that each atom in the corresponding torsion list is in the _set_of_atoms_with_positions (appending to the set after each placement)\n for torsion in proposal_order:\n assert set(torsion[1:]).issubset(_set_of_atoms_with_positions), \"Proposal Order Issue: a torsion atom is not position-defined\"\n _set_of_atoms_with_positions.add(torsion[0])\n\n # Ensure lists are not ill-defined\n assert heavy_logp + hydrogen_logp != [], \"logp list of log_probabilities from torsion choices is an empty list\"\n assert len(heavy_logp + hydrogen_logp) == len(proposal_order), \"There is a mismatch in the size of the atom torsion proposals and the associated logps\"\n\n #create a list of omitted_bonds tuples\n omitted_bonds = []\n omitted_bonds_forward_pass = [edge for edge in self._residue_graph.edges() if edge not in list(self._reference_connectivity_graph.edges())]\n for omitted_bond in omitted_bonds_forward_pass:\n if omitted_bond[::-1] not in list(self._reference_connectivity_graph.edges()):\n omitted_bonds.append(omitted_bond)\n\n #delete the residue graph and reference connectivity graph since they cannot be pickled...\n del self._residue_graph\n del self._reference_connectivity_graph\n\n return proposal_order, heavy_logp + hydrogen_logp, omitted_bonds", "def order(self):\n p = self._pants_decomposition\n g = p.genus()\n if g < 2 or p.num_punctures() > 0:\n raise NotImplementedError(\n \"The order computation currently \"\n \"only works for closed surfaces of genus 2 and higher.\")\n for n in range(1, 4*g+3):\n power = self**n\n if power.is_identity():\n if g > 2 or g == 2 and power.is_in_torelli():\n return n\n return 0", "def entry_orders(self):\n return store.orders.get_entry_orders(self.exchange, self.symbol)", "def compareFunction( self, first, second ):\n for ascending,column in self.sortOrder:\n aValue,bValue = column.get(first),column.get(second)\n diff = cmp(aValue,bValue)\n if diff:\n if not ascending:\n return - diff \n else:\n return diff \n return 0", "def _cmp(x, y):\n if x[1].count > y[1].count:\n return CmpRelation.GREATER\n if x[1].count < y[1].count:\n return CmpRelation.LESS\n if x[1].ptn_length < y[1].ptn_length:\n return CmpRelation.GREATER\n if x[1].ptn_length > y[1].ptn_length:\n return CmpRelation.LESS\n return CmpRelation.EQUAL", "def elementCom(Paire1,Paire2) :\n elem_com=\" \"\n elementPaire1=\" \"\n elementPaire2=\" \"\n p1 = Paire1[1]\n p2 = Paire2[1]\n if p1 != p2 :\n for i in range (2):\n for j in range (2):\n if p1[i] == p2[j]:\n elem_com = p1[i] \n elementPaire1 = p1[1-i] \n elementPaire2 = p2[1-j] \n return elem_com, elementPaire1, elementPaire2", "def bordasOf(self, bundle):\n\t\treturn sorted([self.borda[item] for item in bundle], reverse=True)" ]
[ "0.7267477", "0.66433895", "0.61120665", "0.61120355", "0.5785895", "0.5775953", "0.563049", "0.5494583", "0.5392209", "0.5384048", "0.5254621", "0.52386093", "0.5236131", "0.5225658", "0.52222705", "0.51403373", "0.51322603", "0.5125893", "0.508684", "0.5077968", "0.50548816", "0.5052437", "0.50480247", "0.50480247", "0.50480247", "0.50471795", "0.5034616", "0.5033629", "0.5008301", "0.5005437", "0.4980239", "0.4965152", "0.4961552", "0.49593604", "0.49365786", "0.49318787", "0.4919777", "0.48809665", "0.48803613", "0.48651606", "0.4849822", "0.48470536", "0.48467317", "0.48276943", "0.48175898", "0.47937495", "0.4782658", "0.47531348", "0.47283906", "0.4728352", "0.47177923", "0.47129187", "0.47079098", "0.47072282", "0.4705767", "0.47019526", "0.46910888", "0.46760046", "0.4661575", "0.46614352", "0.46612233", "0.46525252", "0.4640619", "0.46170005", "0.46164843", "0.4612827", "0.46036488", "0.45999077", "0.45997763", "0.45856652", "0.4581762", "0.45794988", "0.45767188", "0.45583907", "0.45576504", "0.45568597", "0.45455047", "0.45428738", "0.45390847", "0.45387828", "0.45256025", "0.45253485", "0.45194796", "0.45161617", "0.44971707", "0.4492381", "0.4485933", "0.44821778", "0.44820997", "0.44800213", "0.44788566", "0.44788542", "0.44718367", "0.44681817", "0.44668588", "0.4461016", "0.44580215", "0.44547078", "0.44509563", "0.44499916" ]
0.5774481
6
Rotates the ADP of 'atom' to match the orientation of 'source_atom.
def rotate_3D(atom, source_atom): from lauescript.cryst.match import get_transform lst2 = [np.array([0, 0, 0]), source_atom.orientation[0], source_atom.orientation[1]] lst1 = [np.array([0, 0, 0]), atom.orientation[0], atom.orientation[1]] matrix = get_transform(lst1, lst2, matrix=True) adp = source_atom.adp['cart_int'] atom.adp['cart_int'] = rotate_adp(adp, matrix)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rotation_alignment(referent_shape, current_shape):\n numerator = 0.\n denominator = 0.\n\n for i in range(len(referent_shape.points)):\n numerator += current_shape.points[i, 0] * referent_shape.points[i, 1] - current_shape.points[i, 1] * referent_shape.points[i, 0]\n denominator += current_shape.points[i, 0] * referent_shape.points[i, 0] + current_shape.points[i, 1] * referent_shape.points[i, 1]\n\n return math.atan2(numerator, denominator)", "def rotate_ADP_about_axis(ADP, angle, axisDirection):\n adp = get_adp_as_matrix(ADP)\n u, v = np.linalg.eig(adp)\n startPoints = [v[:, i].flatten().tolist()[0] for i in xrange(3)]\n endPoints = [rotate_point_about_axis(point, angle, axisDirection, (0, 0, 0)) for point in startPoints]\n rotMat = get_transform(startPoints, endPoints, matrix=True).transpose()\n newadp = np.dot(rotMat.transpose(), np.dot(adp, rotMat))\n return newadp[0, 0], newadp[1, 1], newadp[2, 2], newadp[0, 1], newadp[0, 2], newadp[1, 2]", "def _rotate(self, angle):\n angle *= self._degreesPerAU\n self._orient = self._orient.rotate(angle)", "def _rotate_about_origin(self, angle, axis):\n matrix = rotation_matrix(angle, axis)\n self._normal = matrix.dot(self._normal)\n self._position = matrix.dot(self._position)", "def align(image):\n angle = find_angle(image)\n image = rotate(image, angle)\n return image", "def setAzEncoderOffset(ant) :\n \n # Retrieve current azimuth offset (arcmin), elevation (degrees) \n # and az encoder pointing offset (implementation specific).\n\n azOffMpName = \"Control.Antenna%d.azimuthOffset\"%ant\n pointingConstants = pointingSetup( ant )\n\n if device.CarmaAnt().isOvro(ant):\n actualElMpName = \"Ovro%d.AntennaCommon.Drive.Track.actualElevation\"%ant\n elif device.CarmaAnt().isBima(ant): \n bimaAntNo = ant - 6\n actualElMpName = \"Bima%d.AntennaCommon.Drive.Track.actualElevation\"%bimaAntNo\n [azOffset,actualEl ] = queryMpValues([azOffMpName, actualElMpName])\n elif device.CarmaAnt().isSza(ant): \n szaAntNo = ant - 15\n actualElMpName = \"Sza%d.AntennaCommon.Drive.Track.actualElevation\"%szaAntNo\n [azOffset,actualEl ] = queryMpValues([azOffMpName, actualElMpName])\n else:\n raise Exception, \"Invalid ant\"\n\n [azOffset,actualEl ] = queryMpValues([azOffMpName, actualElMpName])\n cosEl = math.cos( actualEl * math.pi / 180.0 )\n\n if device.CarmaAnt().isOvro(ant):\n pointingConstants[0] = pointingConstants[0] + azOffset/cosEl\n ovroMountPointingConstants( pointingConstants[0],\n pointingConstants[1],\n pointingConstants[2],\n pointingConstants[3],\n pointingConstants[4], ant )\n elif device.CarmaAnt().isBima(ant): \n pointingConstants[0][0] = pointingConstants[0][0] + azOffset/cosEl\n bimaMountPointingConstants( pointingConstants[0], pointingConstants[1], ant )\n elif device.CarmaAnt().isSza(ant): \n # For SZA, the az zero (term 7 in the pointing constants) is in degrees \n pointingConstants[6] += ( ( azOffset/cosEl ) / 60.0 );\n # Avoid having to spell out all 19 arguments by using the special \n # form '*args' with a list of ordered args.\n args = pointingConstants \n args.append( ant )\n szaMountPointingConstants( *args )\n else:\n raise Exception, \"Invalid ant\"\n\n return offset(0, 0, ant)", "def _rotate_about_origin(self, angle, axis):\n print 'Invoked abstract {}._rotate_about_origin({}, {})'.format(\n self, angle, axis)\n return", "def orientate(self, node):\n\t\tfor i in self.SM(node):\n\t\t\tif node in self.E[i]:\n\t\t\t\tself.directArc(i,node)", "def _rotate(self, angle):\n if self.undobuffer:\n self.undobuffer.push((\"rot\", angle, self._degreesPerAU))\n angle *= self._degreesPerAU\n neworient = self._orient.rotate(angle)\n tracing = self.screen._tracing\n if tracing == 1 and self._speed > 0:\n anglevel = 3.0 * self._speed\n steps = 1 + int(abs(angle)/anglevel)\n delta = 1.0*angle/steps\n for _ in range(steps):\n self._orient = self._orient.rotate(delta)\n self._update()\n self._orient = neworient\n self._update()", "def addOffsetRotation(self, point):\n\n ox, oy, oz = OpenMaya.MVector(0.0, 0.0, 0.0)\n px, py, pz = point\n\n # Z Rotation\n if self.offset_rotation.z != 0.0:\n point = self.rotateZ(point)\n\n # Y Rotation\n if self.offset_rotation.y != 0.0:\n point = self.rotateY(point)\n\n # X Rotation\n if self.offset_rotation.x != 0.0:\n point = self.rotateX(point)\n\n return point", "def _rotate_about_origin(self, angle, axis):\n matrix = rotation_matrix(angle, axis)\n self._center = matrix.dot(self._center)", "def test_align_invert(self):\n al = align(self.amp1, self.amp2, inverse=False)\n\n al_inv = align(self.amp2, self.amp1, inverse=True)\n\n print(al.R)\n print(al_inv.R)\n\n print(al.T)\n print(al_inv.T)", "def rotate_local(self, angle, axis=(0., 0., 1.)):\n self.rotation *= aa2q(angle, glm.vec3(axis))", "def move_to_angle(alpha, theta, pan_pos = 0, tilt_pos = 0, base_rate = 500, object_distance_hint = -1):\n\n if abs(alpha) > absolute_pan_limit or abs(theta) > absolute_tilt_limit:\n return (-1, -1)\n\n # First calculate pan movement\n # TODO Account for displacement perpendicular to pan axis.\n # Similar calculation to tilt displacement but will have\n # to take into account left or right of axis.\n pan_steps = int(alpha / pan_deg_per_step) - pan_pos\n\n # Calculate compensation for sensor displacement\n # if object distance hint is specified.\n theta_comp_deg = 0.0\n\n if object_distance_hint > 0:\n # Cannot look \"back\"\n if object_distance_hint < sensor_displacement:\n return (-1, -1, 0, 0)\n # Compute angle compensation and compare to system's step resolution.\n # No need to bother correcting an angle that the motors cannot reach.\n angle_sensitivity = deg_per_step / gear_ratio / micro_steps\n theta_comp = math.asin(sensor_displacement / object_distance_hint)\n theta_comp_deg = theta_comp * 180.0 / math.pi\n #print(f'sensitivity={angle_sensitivity}, comp={theta_comp}[rad]/{theta_comp_deg}[deg]')\n if theta_comp_deg < angle_sensitivity:\n theta_comp_deg = 0.0\n\n # Calculate tilt movement\n tilt_steps = pan_steps + (int(round((theta - theta_comp_deg) / tilt_deg_per_step)) - tilt_pos)\n\n # Calculate relative step rate per motor and output as list\n max_delta = max(abs(pan_steps), abs(tilt_steps))\n\n if max_delta > 0:\n return (abs(pan_steps), abs(tilt_steps), int(round(base_rate * pan_steps / max_delta)), int(round(base_rate * tilt_steps / max_delta)))\n else:\n return (-1, -1, 0, 0)", "def set_angel(self):\n self.angle = math.degrees(math.atan2(self.next.y - self.y, self.next.x - self.x)\n - math.atan2(self.prev.y - self.y, self.prev.x - self.x))\n\n if self.angle < 0:\n self.angle += 360", "def rotate(self):\n\n self.pins = self.pins[1:] + list(self.pins[0])\n self.mapping = self.mapping[1:] + list(self.mapping[0])", "def initRelativeRotation(self):\n self.__relRotationStartValue = self.rotation()", "def align(self):\n number_of_Xs = 0\n xFront = \"\"\n xEnd = \"\"\n dashFront = \"\"\n dashEnd = \"\"\n\n # Determining if variable amino acids (\"X\") need to be added to the\n\t # beginning of the sequence:\n z = self.hmmStart-self.seqStart\n number_of_Xs = (self.hmmStart-1)-z\n if z > 0:\n dashFront = \"-\"*z\n xFront = \"X\"*number_of_Xs\n elif self.hmmStart-1<=self.seqStart-1:\n xFront = \"X\"*(self.hmmStart-1) \n\n # Determining if variable amino acids (\"X\") need to be added to the \n # end of the sequence:\n number_of_Xs_end = self.hmmLength - self.hmmEnd\n\n # The original sequence length; SPA format includes this\n delimeter = \"|\" #Need to fix can be \"_\" or \"|\" or something else...\n \n distToSeqEnd = self.origSeqLength - seqTo\n if distToSeqEnd >= number_of_Xs_end and number_of_Xs_end != self.hmmLength:\n xEnd = 'X'*number_of_Xs_end\n else:\n if distToSeqEnd < number_of_Xs_end:\n xEnd = 'X'*distToSeqEnd\n \tdashEnd += \"-\"*(number_of_Xs_end-distToSeqEnd)\n \t\n begin = \"{}{}\".format(dashFront, xFront)\n end = \"{}{}\".format(xEnd, dashEnd)\n self.addToFront(begin)\n self.data.extend(end)\n self.original = str(self)", "def anatomical_reorient_workflow(workflow, resource_pool, config, name=\"_\"):\n\n import nipype.pipeline.engine as pe\n from nipype.interfaces.afni import preprocess\n\n if \"anatomical_scan\" not in resource_pool.keys():\n return workflow, resource_pool\n\n anat_deoblique = pe.Node(interface=preprocess.Refit(),\n name='anat_deoblique%s' % name)\n\n anat_deoblique.inputs.in_file = resource_pool[\"anatomical_scan\"]\n anat_deoblique.inputs.deoblique = True\n\n anat_reorient = pe.Node(interface=preprocess.Resample(),\n name='anat_reorient%s' % name)\n\n anat_reorient.inputs.orientation = 'RPI'\n anat_reorient.inputs.outputtype = 'NIFTI_GZ'\n\n workflow.connect(anat_deoblique, 'out_file', anat_reorient, 'in_file')\n\n resource_pool[\"anatomical_reorient\"] = (anat_reorient, 'out_file')\n\n return workflow, resource_pool", "def apply_rotation_x(self, eta=0.0 ):\n \n eta = radians(eta)\n new_rotation_matrix = [[ 1 , 0 , 0 ],\n [ 0 , +cos(eta) , -sin(eta) ],\n [ 0 , +sin(eta) , +cos(eta) ]] \n \n self.rotation_matrix_exp = np.dot( new_rotation_matrix, self.rotation_matrix_exp )", "def orientation_ras_lps(affine: NdarrayTensor) -> NdarrayTensor:\n sr = max(affine.shape[0] - 1, 1) # spatial rank is at least 1\n flip_d = [[-1, 1], [-1, -1, 1], [-1, -1, 1, 1]]\n flip_diag = flip_d[min(sr - 1, 2)] + [1] * (sr - 3)\n if isinstance(affine, torch.Tensor):\n return torch.diag(torch.as_tensor(flip_diag).to(affine)) @ affine # type: ignore\n return np.diag(flip_diag).astype(affine.dtype) @ affine # type: ignore", "def alignment_start_angle(angle=0.10):\n\n smi = SMI_Beamline()\n yield from smi.modeAlignment()\n\n # Set reflected beam ROI\n yield from smi.setReflectedBeamROI(total_angle=angle, technique=\"gisaxs\")", "def obInit(position, angle, center):\n\tif angle > 360.0:\n\t angle = angle - 360\n\tif angle < - 360:\n\t angle = -angle - 360\n\tif angle > -360 and angle < 0:\n\t angle = -angle\n\tadjPosition = position - center\n\tnewposition = adjPosition.rotate(angle) + center\n\treturn newposition", "def rotate(self, increment):\n\n # the aiden rule works for positive OR negative\n adjusted_index = (increment + self.head) % len(self.array)\n self.head = adjusted_index", "def align_one(ptcl,ref,prefilt,align,aligncmp,ralign,raligncmp):\n\n\tif prefilt : ref=ref.process(\"filter.matchto\",{\"to\":ptcl})\n\n\t# initial alignment\n\tif align!=None :\n\t\tali=ptcl.align(align[0],ref,align[1],aligncmp[0],aligncmp[1])\n\n\t# refine alignment if requested\n\tif ralign!=None:\n\t\tralign[1][\"xform.align2d\"] = ali.get_attr(\"xform.align2d\")\n\t\tali=ptcl.align(ralign[0],ref,ralign[1],raligncmp[0],raligncmp[1])\n\n\treturn ali", "def rotate(self, angle, reshape=False):\n return IntensityMap.rotate(self, angle, reshape=reshape)", "def adjustToNewAngle(self):\n\n self.a,self.b,self.c = parametersFromPointAngle( 0.5*(self.point1+self.pointN), self.newAngle)\n\n #print 'adjustToNewAngle ', self, self.angle, self.newAngle\n self.angle = self.newAngle\n self.normalv = numpy.array( [ self.a, self.b ])\n self.unitv = numpy.array( [ self.b, -self.a ])\n if abs(self.angle) > numpy.pi/2 :\n if self.b > 0: self.unitv *= -1\n elif self.b<0 : self.unitv *= -1\n\n self.point1 = self.projectPoint(self.point1) # reset point1 \n if self.next is None or not self.next.isSegment():\n # move the last point (no intersect with next)\n\n pN = self.projectPoint(self.pointN)\n dirN = pN - self.point1 \n lN = length(pN, self.point1)\n self.pointN = dirN/lN*self.length + self.point1\n #print ' ... adjusting last seg angle ',p.dump() , ' normalv=', p.normalv, 'unitv ', p.unitv\n else:\n self.setIntersectWithNext()", "def do_altangle(self):\n nave = 10000\n x, y, z, angle = cbp.phidget.main(nave)\n current_angle = angle\n #print(current_angle)\n self.altangle = current_angle\n return current_angle", "def rotate_adp2(adp, rotmat, cell):\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n rotmat = np.linalg.inv(rotmat)\n rotmatT = np.transpose(rotmat)\n Nmat = np.matrix([[1 / cell[0], 0, 0],\n [0, 1 / cell[1], 0],\n [0, 0, 1 / cell[2]]])\n Nmat = np.linalg.inv(Nmat)\n NmatT = np.transpose(Nmat)\n\n adp = np.dot(rotmat, adp)\n adp = np.dot(adp, rotmatT)\n\n adp = np.dot(Nmat, adp)\n adp = np.dot(adp, NmatT)\n\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]", "def get_inplane_angle(ima,ref, iring=1, fring=-1, ringstep=1, xtransSearch=0, ytransSearch=0, stp=1, center=1):\n\n\tfrom alignment import Numrinit, ringwe, Applyws, ormq\n\tfrom filter import fshift\n\n\tfirst_ring=int(iring); last_ring=int(fring); rstep=int(ringstep); xrng=int(xtransSearch); yrng=int(ytransSearch); step=int(stp)\t\n\tnx=ima.get_xsize()\n\tif(last_ring == -1): last_ring=int(nx/2)-2\n\tcnx = int(nx/2)+1\n \tcny = cnx\n \tmode = \"F\"\n \t#precalculate rings\n\tnumr = Numrinit(first_ring, last_ring, rstep, mode)\n \twr = ringwe(numr, mode)\n\tif(center==1):\n\t\tcs = [0.0]*2 # additio\n\t\tcs = ref.phase_cog()\n\t\tref1 = fshift(ref, -cs[0], -cs[1])\n\t\tcimage=Util.Polar2Dm(ref1, cnx, cny, numr, mode)\n\t\tcs = ima.phase_cog()\n\t\tima1 = fshift(ima, -cs[0], -cs[1])\n\telse:\n\t\tima1=ima.copy()\n\t\tcimage=Util.Polar2Dm(ref, cnx, cny, numr, mode)\n\tUtil.Frngs(cimage, numr)\n\tApplyws(cimage, numr, wr)\n\t[angt, sxst, syst, mirrort, peakt]=ormq(ima1, cimage, xrng, yrng, step, mode, numr, cnx, cny)\n\treturn angt,sxst, syst, mirrort, peakt", "def rotate(self, angle):\n perp = TwoDV(-self[1], self[0])\n angle = angle * math.pi / 180.0\n c, s = math.cos(angle), math.sin(angle)\n return TwoDV(self[0]*c+perp[0]*s, self[1]*c+perp[1]*s)", "def orientate_reversed(self, node):\n\t\tfor i in self.PM(node):\n\t\t\tif i in self.E[node]:\n\t\t\t\tself.directArc(node,i)", "def rotate_adp3(adp, rotmat, cell):\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n rotmati = np.matrix(rotmat)\n rotmatiT = np.transpose(rotmati)\n rotmat = np.linalg.inv(rotmat)\n\n Nmat = np.matrix([[1 / cell[0], 0, 0],\n [0, 1 / cell[1], 0],\n [0, 0, 1 / cell[2]]])\n Nmat = np.linalg.inv(Nmat)\n NmatT = np.transpose(Nmat)\n adp = np.dot(rotmati, adp)\n adp = np.dot(adp, rotmatiT)\n\n adp = np.dot(Nmat, adp)\n adp = np.dot(adp, NmatT)\n\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]", "def srotate(self, angle):\n\n self.angle = self.angle + angle", "def update_angle(self, mouse):\n offset = (mouse[1]-self.player.rect.centery, mouse[0]-self.player.rect.centerx)\n self.angle = degrees(atan2(*offset))\n print(\"angle:\", self.angle)", "def rotate_arcs(self):\n\n if self.arc_direction:\n self.thick_arc_start_angle -= 5\n self.thick_arc_end_angle -= 5\n\n self.thin_arc_start_angle += 5\n self.thin_arc_end_angle += 5\n else:\n self.thick_arc_start_angle += 5\n self.thick_arc_end_angle += 5\n\n self.thin_arc_start_angle -= 5\n self.thin_arc_end_angle -= 5", "def fix_rotation(self):\n self.rotate(self.rotation)\n self.annotations.rotate(self.rotation)\n self.rotation = 0", "def rotate(self,center, angle):\n \n self.coord = [x-np.repeat([[center[0],center[1]]],[x.shape[0]],axis = 0) for x in self.coord]\n\n alpha = angle\n R = np.array([[np.cos(alpha),-np.sin(alpha)],[np.sin(alpha),np.cos(alpha)]])\n \n for i in range(len(self.coord)):\n self.coord[i] = np.squeeze([np.dot([x],R) for x in self.coord[i]])\n\n self.coord = [x+np.repeat([[center[0],center[1]]],[x.shape[0]],axis = 0) for x in self.coord]\n\n return self", "def _altaz_rotation(self, jd):\n R_lon = rot_z(- self.longitude.radians - jd.gast * TAU / 24.0)\n return einsum('ij...,jk...,kl...->il...', self.R_lat, R_lon, jd.M)", "def adjAngle(self, amt):\n \n self.angle = self.angle+radians(amt)\n self.redraw()", "def _align(self):\n\n shape = np.shape(self.x)\n\n # Get angle of direction (cbi: center beam index)\n # NOTE: This implementation seems to be unstable, because the shot with the center beam index can be NaN\n # cbi = np.median(np.arange(len(self.x[0, :]))).astype(int)\n # vec1 = [self.x[0, cbi], self.y[0, cbi], 0.0]\n # vec2 = [self.x[-1, cbi], self.y[-1, cbi], 0.0]\n\n # Alternative implementation with mean over all entries within the line.\n # -> should be a good approximation of the line center\n # NOTE: 2019-05-30: Relaxed the criterion even further (mean of first and last 10 scan lines)\n vec1 = [np.nanmedian(self.x[0:10, :]), np.nanmedian(self.y[0:10, :]), 0.0]\n vec2 = [np.nanmedian(self.x[-11:-1, :]), np.nanmedian(self.y[-11:-1, :]), 0.0]\n angle = -1.0*np.arctan((vec2[1]-vec1[1])/(vec2[0]-vec1[0]))\n\n # validity check -> Do not rotate if angle is nan\n if np.isnan(angle):\n return\n\n # Get center point\n xc = np.nanmedian(self.x)\n yc = np.nanmedian(self.y)\n\n # Reform points\n points = [self.x.flatten()-xc, self.y.flatten()-yc]\n\n # Execute the rotation\n rot_matrix = np.array([[np.cos(angle), -np.sin(angle)],\n [np.sin(angle), np.cos(angle)]])\n points_rotated = rot_matrix.dot(points)\n self.x = np.reshape(points_rotated[0, :], shape)\n self.y = np.reshape(points_rotated[1, :], shape)\n\n # Save conversion parameters for reuse\n self._align_parameters = {'center_point': (xc, yc),\n 'angle': angle,\n 'rotation_matrix': rot_matrix}", "def rotate2(x, angle, origin=(0, 0)):\n origin = np.asarray(origin)\n x = np.asarray(x) - origin\n r = rotation_matrix2(angle)\n return x.dot(r.T) + origin", "def rotate_adp(adp, rotmat):\n\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n rotmatT = np.transpose(rotmat)\n adp = np.dot(rotmatT, adp)\n adp = np.dot(adp, rotmat)\n # print '=\\n',adp,'\\n-------------------------------------------------\\n\\n\\n\\n\\n\\n'\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]", "def applyrotate(destOr, owner):\n smoothness = 10\n currOr = owner.worldOrientation\n dZ = [0.0,0.0,0.0]\n\n dZ = currOr.to_euler()[2] - destOr.to_euler()[2]\n\n # Blender allows multiples of 360 deg and negative angles\n # this is to get rid of those\n while(dZ < math.pi):\n dZ = dZ + 2 * math.pi\n while(dZ > math.pi):\n dZ = dZ - 2 * math.pi\n\n owner.worldOrientation = (owner.worldOrientation *\n Matrix.Rotation(-dZ/(smoothness), 3, 'Z'))\n # turn around a bit", "def point_to_node_azimuth(self, point, node=None, out=None):\n return point_to_point_azimuth(point, self._get_coord_at_node(node), out=out)", "def rz(self, angle: float) -> \"Mate\":\n a = angle / 180 * pi\n self.x_dir = Mate._rotate(self.x_dir, self.z_dir, a)\n self.y_dir = Mate._rotate(self.y_dir, self.z_dir, a)\n return self", "def set_pan_angle(intent, session):\n\n card_title = intent['name']\n session_attributes = {}\n should_end_session = False\n\n if 'angle' in intent['slots']:\n pan_angle = intent['slots']['angle']['value']\n\n speech_output = \"I am setting the pan angle to \" + pan_angle\n \n # update the shadow device\n set_thing_state(\"cam0\", \"panAngle\", pan_angle)\n\n reprompt_text = None\n else:\n speech_output = \"I'm not sure what your pan angle is. \" \\\n \"Please try again.\"\n reprompt_text = \"I'm not sure what your pan angle is. \" \\\n \"You can tell me your pan color by saying, \" \\\n \"set pan angle to 90.\"\n return build_response(session_attributes, build_speechlet_response(\n card_title, speech_output, reprompt_text, should_end_session))", "def rotate(self, angle):\n n, a = Vector.polar([self.x, self.y])\n a += angle\n self.x = n * cos(a)\n self.y = n * sin(a)", "def rotate_shape(shape, xy_center, angle_degrees):", "def angle_to(self, target_pos):\n return angle_to(self.tonp(), target_pos.tonp())", "def rotate(self, angle, aspeed):\n current_pose = [self.px, self.py, self.pth]\n initial_pose = current_pose\n # final pose is the final angle that the robot moves to about z\n final_angle = self.pth+angle\n if final_angle < self.pth:\n aspeed=aspeed*(-1)\n\n final_pose = [self.px, self.py, final_angle]\n \ttolerance = 0.01\n\n self.send_speed(0.0, aspeed)\n while abs(final_pose[2]-current_pose[2]) > tolerance:\n current_pose = [self.px, self.py, self.pth]\n self.send_speed(0.0, 0.0)", "def do_azangle(self):\n angle_1, angle_2 = cbp.potentiometer.main()\n current_angle = angle_2\n #print(current_angle)\n self.azangle = current_angle\n return current_angle", "def rotate(X):\n return X", "def transposeRelative(token, lastPitch):\n p = Pitch.fromToken(token, tokenizer)\n if not p:\n return lastPitch\n # absolute pitch determined from untransposed pitch of lastPitch\n octaveCheck = p.octaveCheck is not None\n p.absolute(lastPitch)\n if source.inSelection:\n # we may change this pitch. Make it relative against the\n # transposed lastPitch.\n try:\n last = lastPitch.transposed\n except AttributeError:\n last = lastPitch\n # transpose a copy and store that in the transposed\n # attribute of lastPitch. Next time that is used for\n # making the next pitch relative correctly.\n copy = p.copy()\n transposer.transpose(copy)\n p.transposed = copy # store transposed copy in new lastPitch\n new = copy.relative(last)\n if octaveCheck:\n new.octaveCheck = copy.octave\n if relPitchToken:\n # we are allowed to change the pitch after the\n # \\relative command. lastPitch contains this pitch.\n lastPitch.octave += new.octave\n new.octave = 0\n changes.replaceToken(relPitchToken[0], lastPitch.output(tokenizer.language))\n del relPitchToken[:]\n changes.replaceToken(token, new.output(tokenizer.language))\n return p", "def adjustRotor(self):\n #Ensures next positions are within alphabet range\n for k, v in self.alphabet.items():\n self.alphabet[k] = (v - 1)% 26 #Loops the alphabet from Z (26th letter) back to A (1st letter)\n \n #Swaps dictionary format from {‘Key’: value} to {value: ‘Key’} so we can change the characters\n tempAlphabet = {}\n for k, v in self.alphabet.items():\n tempAlphabet[v] = k\n self.alphabet = tempAlphabet\n \n #Changes internal wiring contact to its previous contact\n for k, v in self.alphabet.items():\n charNum = self.ordChar(v)\n prevCharNum = (charNum - 1)% 26\n prevChar = self.chrNum(prevCharNum)\n self.alphabet[k] = prevChar\n \n #Swaps dictionary format from {value: ‘Key’} back to original {‘Key’: value} format\n tempAlphabet = {}\n for k, v in self.alphabet.items():\n tempAlphabet[v] = k\n self.alphabet = tempAlphabet", "def rotate(self):\n\n last_center = self.rect.center\n self.image = pg.transform.rotate(self.image_copy,self.angle)\n self.rect = self.image.get_rect()\n self.rect.center = last_center\n self.angle +=self.rotate_by", "def nominal_to_altaz(altaz_coord,norm_coord):\n alt_norm,az_norm = norm_coord.array_direction\n az = altaz_coord.az\n alt = altaz_coord.alt\n x,y = altaz_to_offset(az,alt,az_norm,alt_norm)\n x=x*u.rad\n y=y*u.rad\n representation = CartesianRepresentation(x.to(u.deg),y.to(u.deg),0*u.deg)\n\n return norm_coord.realize_frame(representation)", "def calcScatterAngleOld(R, PHI, THETA, sun_rotation):\n \n H_rot = atmo_utils.calcRotationMatrix(sun_rotation)\n\n X_ = R * np.sin(THETA) * np.cos(PHI)\n Y_ = R * np.sin(THETA) * np.sin(PHI)\n Z_ = R * np.cos(THETA)\n \n XYZ_dst = np.vstack((X_.ravel(), Y_.ravel(), Z_.ravel(), np.ones(R.size)))\n XYZ_src_ = np.dot(H_rot, XYZ_dst)\n \n Z_rotated = XYZ_src_[2, :]\n R_rotated = np.sqrt(np.sum(XYZ_src_[:3, :]**2, axis=0))\n \n angle = np.arccos(Z_rotated/(R_rotated+amitibo.eps(R_rotated)))\n \n return angle", "def at_a (self, dx, dy, dth): \n self.xa = self.a[0] - self.cg_ant[0]\n self.ya = self.a[1] - self.cg_ant[1]\n self.za = self.a[2]\n self.Rz = rotation.matrix([0,0,1],dth) \n \n self.aux = np.array([self.xa,self.ya,self.za]) \n self.aux = np.dot(self.Rz,self.aux)\n \n self.aux[0] = self.aux[0] + self.cg_ant[0] + dx\n self.aux[1] = self.aux[1] + self.cg_ant[1] + dy \n\n self.a = np.array([self.aux[0],self.aux[1],self.aux[2]])", "def rotate_x(p, a=0):\n # turn value to radians\n a = math.radians(a)\n translation_mat = np.matrix([\n [1,0,0,0],\n [0,math.cos(a),math.sin(a),0],\n [0,-math.sin(a),math.cos(a),0],\n [0,0,0,1],\n ], dtype=\"float32\")\n\n new_p = p @ translation_mat\n\n return new_p", "def getAngle(self):\n return self.articulateEncoder.getDistance()+self.angleOffset", "def ADP (self):", "def rotate(self):\n pass", "def rx(self, angle: float) -> \"Mate\":\n a = angle / 180 * pi\n self.y_dir = Mate._rotate(self.y_dir, self.x_dir, a)\n self.z_dir = Mate._rotate(self.z_dir, self.x_dir, a)\n return self", "def atan(self, x):\n return self.arctan(x)", "def adjAngle(self, amt): \r\n\r\n self.angle = self.angle + radians(amt)\r\n self.redraw()", "def rotateAlignXform(self):\n r = np.concatenate(([self.x], [self.y], [self.z]), 0)\n r = np.concatenate((r, np.array([[0,0,0]])), 0)\n r = np.concatenate((r, np.array([0,0,0,1]).reshape(-1,1)), 1)\n return r", "def rotate_a(origin, point, angle):\n ox, oy = origin\n px, py = point\n\n qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)\n qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)\n return qx, qy", "def atan (cls, x) :\n return Angle_R (math.atan (x))", "def rotMatrix( source = None ):\n if source is None:\n return None,None\n else:\n (x,y,z, a) = source\n if a % TWOPI:\n return tmatrixaccel.rotMatrix( x,y,z,a ),tmatrixaccel.rotMatrix( x,y,z,-a )\n return None,None", "def GetAngle(self,basePnt=Apoint(0,0,0),prompt=''):\n\t\treturn self.acad.ActiveDocument.Utility.GetAngle(Point=basePnt,Prompt=prompt)", "def azimuth(source):\n srcAzEl = subarrayControl.s.azel(source, 0.0);\n return srcAzEl[0];", "def rotate(self, angle):\n image_center = np.array(self.img.shape[1::-1]) / 2\n rot_mat = cv2.getRotationMatrix2D(tuple(image_center), angle, 1.0)\n\n self.img = cv2.warpAffine(\n self.img, rot_mat, self.img.shape[1::-1], flags=cv2.INTER_LINEAR\n )\n\n self.edits.append(f\"rotate:{angle}\")\n return self", "def rotate_global(self, angle, axis=(0., 0., 1.)):\n self.rotation = aa2q(angle, glm.vec3(axis)) * self.rotation", "def q2aa(rotation, deg=False):\n \n if not rotation or rotation == (1., 0., 0., 0.):\n return 0, glm.vec3(0, 0, 1)\n \n c, xs, ys, zs = rotation #glm.conjugate(rotation)\n\n angle = math.acos(c) * 2\n s = math.sin(angle / 2)\n\n if s == 0:\n return 0, glm.vec3(0, 0, 1)\n\n if deg:\n angle = round(180 * angle / math.pi, 3)\n \n return angle, glm.vec3(xs / s, ys / s, zs / s)", "def alignment_org(angle=0.1):\n proposal_id('2023_2', '311564_test')\n yield from alignement_gisaxs_multisample(angle=angle)\n RE.md['ai_0'] = piezo.th.user_setpoint.get()\n proposal_id('2023_2', '311564_Pettersson')", "def set_scan_rotation(self, angle):\n raise NotImplementedError", "def setArticulateAngle(self, angle):\n self.articulatePID.setSetpoint(angle)", "def arc_to(self, position):\n ### EXTRA CREDIT\n # TODO\n pass # delete this when you implement your code", "def reorient_obj(obj, step_ang, plane):\n start_angle = 0\n end_angle = math.pi / 2\n min_area = math.inf\n best_angle = 0\n start_axis = array.array(\"d\", obj.Centroid)\n end_axis = []\n index = [0] * 3\n\n if plane == \"xy\":\n index = [1, 1, 0]\n end_axis = array.array(\"d\", [obj.Centroid[0], obj.Centroid[1], obj.Centroid[2] + 1])\n elif plane == \"xz\":\n index = [1, 0, 1]\n end_axis = array.array(\"d\", [obj.Centroid[0], obj.Centroid[1] + 1, obj.Centroid[2]])\n elif plane == \"yz\":\n index = [0, 1, 1]\n end_axis = array.array(\"d\", [obj.Centroid[0] + 1, obj.Centroid[1], obj.Centroid[2]])\n\n min_pt, max_pt = obj.GetBoundingBox()\n # projecting the points to the plane\n project_points_to_plane(min_pt, max_pt, index)\n while start_angle <= end_angle:\n obj.Rotate3D(start_axis, end_axis, step_ang)\n # compute the area\n dims = [(max_pt[0] - min_pt[0]), (max_pt[1] - min_pt[1]), (max_pt[2] - min_pt[2])]\n curr_area = 1\n for dim in dims:\n if dim > 0:\n curr_area *= dim\n if curr_area < min_area:\n min_area = curr_area\n best_angle = start_angle\n start_angle += step_ang\n min_pt, max_pt = obj.GetBoundingBox()\n # projecting the points to the plane\n project_points_to_plane(min_pt, max_pt, index)\n # rotate the object using the best angle\n obj.Rotate3D(start_axis, end_axis, best_angle)", "def startAngMovementX(self):\n self.boolrot[0] = True", "def angle_to(self, other):\n return other.angle - self.angle", "def rel_angle(self, angle):\n steps = int(angle / 360 * self.steps_per_rev)\n self.steps(steps)", "def reflect_adp(adp, planev):\n M = np.identity(4)\n M[:3, :3] -= 2.0 * np.outer(planev, planev)\n M[:3, 3] = (2.0 * np.dot(np.array([0, 0, 0]), planev)) * planev\n\n return rotate_adp(adp, M[:3, :3])", "def rotateZ(self, angle):\r\n if angle:\r\n c = cos(radians(angle))\r\n s = sin(radians(angle))\r\n self.mtrx = dot([[c, s, 0, 0],\r\n [-s, c, 0, 0],\r\n [0, 0, 1, 0],\r\n [0, 0, 0, 1]],\r\n self.mtrx)\r\n self.rtn[2] = angle\r\n self.was_moved = True", "def rotate(self, angle, point=None):\n if not point:\n point = self.middle\n self.p1.rotate(angle, point)\n self.p2.rotate(angle, point)", "def label_rotation(angle, both_directions):\n angle_label = angle\n anchor_label = \"south\"\n if angle > 90 or angle <= -90:\n angle_label = angle + 180\n if both_directions:\n # if transitions in both directions, the transition to the\n # left has its label below the transition, otherwise above\n anchor_label = \"north\"\n if hasattr(angle_label, 'n'):\n # we may need to convert symbolic expressions to floats,\n # but int does not have .n()\n angle_label = angle_label.n()\n return \"rotate=%.2f, anchor=%s\" % (angle_label, anchor_label)", "def transform_angle_by_quadrant(self, initial_angle, x_diff, y_diff):\n\t\tif x_diff > 0 and y_diff > 0:\n\t\t\tprint(\"p1 in quadrant: {}\".format(1))\n\t\t\t# Point B in quadrant 1..\n\t\t\treturn degrees(initial_angle)\n\t\telif x_diff < 0 and y_diff > 0:\n\t\t\tprint(\"p1 in quadrant: {}\".format(2))\n\t\t\t# Point B in quadrant 2..\n\t\t\treturn 180 - degrees(initial_angle)\n\t\telif x_diff < 0 and y_diff < 0:\n\t\t\tprint(\"p1 in quadrant: {}\".format(3))\n\t\t\t# Point B in quadrant 3..\n\t\t\treturn 180 + degrees(initial_angle)\n\t\telif x_diff > 0 and y_diff < 0:\n\t\t\tprint(\"p1 in quadrant: {}\".format(4))\n\t\t\t# Point B in quadrant 4..\n\t\t\treturn 360 - degrees(initial_angle)\n\t\telse:\n\t\t\traise \"Error occurred in basic_drive_3/transform_angle_by_quadrant func..\"", "def _transfer_adp(self):\n toleratedAtoms = []\n for atom in self['exp'].atoms:\n tolerated = atom.transfer_adp()\n if tolerated:\n toleratedAtoms.append(tolerated)\n for atom in toleratedAtoms:\n atom.averageADP()", "def adp_to_atp (self, num) :\n self.adp -= num\n self.atp += num", "def rotate(self, angle):\n perp = Vec2D(-self[1], self[0])\n angle = angle * math.pi / 180.0\n c, s = math.cos(angle), math.sin(angle)\n return Vec2D(self[0] * c + perp[0] * s, self[1] * c + perp[1] * s)", "def rotate(self,angle):\n radians = (angle * math.pi)/180\n self.direction += angle\n for object in self.objects:\n y = object.position[0]\n x = object.position[1]\n\n object.position[0] = x * math.sin(radians) + y * math.cos(radians)\n object.position[1] = x * math.cos(radians) - y * math.sin(radians)", "def rotation_angle_to_object(object_info, point):\n object_coords = object_info['axisAlignedBoundingBox']['center'] if 'axisAlignedBoundingBox' in object_info else \\\n object_info['position']\n\n x_delta = object_coords['x'] - point['x']\n z_delta = object_coords['z'] - point['z']\n r = np.sqrt(np.square(x_delta) + np.square(z_delta))\n\n angle = np.arctan2(x_delta / r, z_delta / r) * 180 / np.pi\n\n if angle < 0:\n angle += 360\n if angle > 360.0:\n angle -= 360.0\n return angle", "def relativeRotation(self):\n return self.rotation()", "def rotate_waypoint(self, direction: str, argument: int):\n if direction == \"R\":\n angle = radians(argument)\n else:\n angle = -1 * radians(argument)\n y = self.waypoint_vector[0]\n x = self.waypoint_vector[1]\n self.waypoint_vector[0] = int(round(x * sin(angle) + y * cos(angle)))\n self.waypoint_vector[1] = int(round(x * cos(angle) - y * sin(angle)))", "def calculate_attitude_angle(self):\n return np.arctan(np.pi * (1 - self.eccentricity_ratio ** 2) / (4 * self.eccentricity_ratio))", "def arctan(self):\n return type(self)(self.parent(),\n self._simplify(self._express.arctan()))", "def arctan(self):\t\t\n\t\tval = np.arctan(self.val)\n\t\tif len(self.der.shape):\n\t\t\tto_multiply = 1 / (1 + (self.val) ** 2)\n\t\t\tto_multiply = np.expand_dims(to_multiply, 1) if len(self.der.shape) > len(to_multiply.shape) else to_multiply\n\t\t\tder = to_multiply * self.der\n\t\telse:\n\t\t\tder = None\n\t\treturn Var(val, der)", "def rotate(self, a):\n ca = cos(a)\n sa = sin(a)\n rM = Matrix([\n [ca, -sa],\n [sa, ca]\n ])\n p0 = self.p0\n self.c = p0 + rM @ (self.c - p0)\n dp = p0 - self.c\n self.a0 = atan2(dp.y, dp.x)\n return self", "def arctan(obj):\n\tif isinstance(obj, Variable):\n\t\tval = np.arctan(obj.val)\n\t\tder = 1 / (1 + (obj.val) ** 2)\n\t\tif len(obj.der.shape)>len(der.shape):\n\t\t\tder = np.expand_dims(der,1)\n\t\tder = np.multiply(der, obj.der)\n\t\treturn Variable(val,der)\n\telse:\n\t\treturn np.arctan(obj)" ]
[ "0.54740787", "0.54063845", "0.53933036", "0.5287055", "0.5184366", "0.5183668", "0.5156844", "0.50909144", "0.50790036", "0.50740695", "0.50218683", "0.50043344", "0.499874", "0.49680153", "0.49113643", "0.49104938", "0.4907334", "0.489211", "0.48913074", "0.48873633", "0.48745838", "0.48705223", "0.48688522", "0.48675933", "0.4849631", "0.48493025", "0.48284987", "0.48226345", "0.48057982", "0.4765026", "0.47611034", "0.475089", "0.47395355", "0.47341695", "0.47204044", "0.4705675", "0.46993393", "0.4697737", "0.46975237", "0.46934712", "0.46912432", "0.4680001", "0.4676256", "0.4675645", "0.46700993", "0.4666941", "0.46488693", "0.46402156", "0.46322086", "0.4631448", "0.46312946", "0.46230307", "0.46217588", "0.46211812", "0.46067443", "0.46066347", "0.46038383", "0.46012205", "0.46008095", "0.45999435", "0.45973217", "0.4597118", "0.45850536", "0.45844156", "0.4577088", "0.45664108", "0.4565556", "0.45628738", "0.45608464", "0.4558212", "0.45578665", "0.45566663", "0.45552605", "0.45506743", "0.4550217", "0.453733", "0.45340082", "0.45326087", "0.4532395", "0.45296133", "0.45254406", "0.4523005", "0.4522412", "0.45223486", "0.45169964", "0.4514793", "0.45127788", "0.45125243", "0.45097163", "0.45058933", "0.45037833", "0.45022154", "0.4502103", "0.44983128", "0.44947276", "0.44920287", "0.44878197", "0.44819278", "0.4463382", "0.44622397" ]
0.6927287
0
Calculates the bond distinguishing parameter Xi.
def xi(element1, element2, distance): return (float(covalence_radius[element1]) + float(covalence_radius[element2]) - (0.08 * float(abs(electro_negativ[element1] - electro_negativ[element2]))) - distance)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Insurance(Md,X):\n u = X[iu]\n b = Md.b()\n return u/b - u/(1-u+u*b)", "def Insurance(Md,X):\n if VERSION == 16:\n utemp = 0.0*X[iu]+1.0*Md.ubar\n elif VERSION == 31:\n utemp = 1.25*(X[iu]-Md.ubar)+1.0*Md.ubar\n else:\n utemp = X[iu]\n\n Mom = Md.IP.get_Moments(utemp,Md.ubar,Md.tau)\n return beta*(-Mom[1]+Mom[3]/Mom[0])", "def bond(self, i, j):\n i_covr = qcel.covalentradii.get(self.sym[i], units='angstrom')\n j_covr = qcel.covalentradii.get(self.sym[j], units='angstrom')\n r = np.linalg.norm(self.xyz[i] - self.xyz[j])\n if r < 1.1*(i_covr + j_covr):\n return int(1)\n return int(0)", "def calculate_condensate_params(Rvi, Bgi):\n\n Rsi = 1 / Rvi\n Boi = Rsi * Bgi\n\n return(Rsi, Boi)", "def calculate_ndvi(self):\n self.ndvi = (self.bands[\"n\"].astype(float) - self.bands[\"r\"].astype(float)) \\\n / (self.bands[\"n\"].astype(float) + self.bands[\"r\"].astype(float))", "def isi_calc(self):\n arg = erfinv(0.8)*1.0E6/(self.speedup*self.br_nominal)\n print('arg: ', arg)\n\n # calculate center eye opening with no additional impairments\n self.isi_center = 2.0*erf(arg/self.tc) - self.l_1 # column Z\n\n # calculate center eye opening with residual DJ (DJ - DCD)\n self.isi_dj_center = (erf(arg*(1.0+self.dj_ui)/self.tc) + erf(arg*(1.0-self.dj_ui)/self.tc) - self.l_1) # column AD\n\n # calculate eye closing induced by interferometric effects from link end reflections\n mean_reflection = math.pow(10.0,0.05*(self.rx_reflection + self.tx_reflection)) # cell AB5\n er_lin = math.pow(10.0,0.1*self.er_dB_min) # cell AB7\n\n\n arg1 = np.sqrt(2.0*er_lin*self.isi_dj_center*(er_lin-1.0) + (er_lin+1.0)*self.l_1)\n print('arg1: ', arg1)\n arg2 = np.divide(arg1,self.isi_dj_center)\n arg3 = (2.0*self.ref_nf*np.power(10.0,-0.1*self.chil)*mean_reflection)\n self.isi_reflection = self.l_1-np.multiply(arg2,arg3)\n\n # calculate center eye opening with both residual DJ and reflection degradations included\n self.isi_dj_refl_closed = np.multiply(self.isi_dj_center, self.isi_reflection) # column AA\n print('isi_dj_refl_closed (AA) : ', self.isi_dj_refl_closed)\n \n # calculate eye opening at the corners with no additional impairments\n eff_rx_eye = 2.0*(0.5-self.X2)*self.speedup\n self.isi_corners = (erf(arg*(1.0+eff_rx_eye)/self.tc) + erf(arg*(1.0-eff_rx_eye)/self.tc) - self.l_1) # column AB\n\n # calculate eye opening at the corners with residual DJ impairment\n self.isi_dj_corners = (erf(arg*(1.0+eff_rx_eye+self.dj_ui)/self.tc) + erf(arg*(1.0-eff_rx_eye-self.dj_ui)/self.tc) - self.l_1) # column AC\n self.isi_tp4_rx = (erf(arg*(1.0+eff_rx_eye)/self.rx_1090_rise) + erf(arg*(1.0-eff_rx_eye)/self.rx_1090_rise) - 1) # cell AG5\n\n # end of GbE10.isi_calcdef isi_calc(self):", "def param(self,name,i):\n state = self.getstate(name)\n x,C = state.vec,state.cov\n cc = C[i,i]\n if (cc>0.): cc=sqrt(cc)\n xx,cc = x[i],cc\n debug('kfnode.param ',(name,xx,cc))\n return xx,cc", "def impurityBis(x,iw,size):\n Vi = 0\n ai =2*a0+iw*1.5*a0+0.5*(iw-1)*a0\n \n if (x > ai-size and x < ai):\n Vi = 300/Eh\n elif(x == ai or x == ai-size):\n Vi = 150/Eh\n \n return Vi", "def simplebattfunc(x, i):\n i0, i1 = i\n di = i1\n d2i = a*io*(n*F)/(R*T)*(-I/s + i0*(1/s + 1/K))\n return di, d2i", "def gini_coefficient(x):\n diffsum = 0\n for i, xi in enumerate(x[:-1], 1):\n diffsum += np.sum(np.abs(xi - x[i:]))\n return diffsum / (len(x) ** 2 * np.mean(x))", "def bic(self, X):\n raise NotImplementedError", "def linearbattfunc(x, IV):\n i1, i2, V1, V2 = IV\n di2 = a*io*(n*F)/(R*T)*(V1 - V2)\n #Kinetics\n di1 = -di2\n #charge neutrality\n dV1 = -i1/s\n #solids ohms law\n dV2 = -i2/K\n #liquids ohms law\n return di1, di2, dV1, dV2", "def res_bond_distance(self, resi):\n return self.peptide_bond_distances[resi]", "def calc_xi(self):\n\t\n\tk_dot_x = self.k[0]*self.x[0,:,:] + self.k[1]*self.x[1,:,:] + self.k[2]*self.x[2,:,:]\n\n\tself.xi = self.t.reshape((1,self.N)) - k_dot_x/l.Clight\n\n\treturn", "def icx(self) -> int:\n return self._icx", "def get_dilations(onnx_node): # type: (NodeWrapper) -> Tuple[int, int, int]\n dil_h, dil_w, dil_d = 1, 1, 1 # default values\n dilations = onnx_node.get_attribute_value('dilations', ()) # dilation along each filter axis\n\n if len(dilations) == 2: # ONNX input axes order NCHW\n dil_h, dil_w = dilations\n elif len(dilations) == 3: # ONNX input axes order NCHWD\n dil_h, dil_w, dil_d = dilations\n\n return dil_h, dil_w, dil_d", "def get_atomic_virial(self):\n return self.get_bond_virial() + self.get_coulomb_virial() + self.get_dispersion_virial()", "def get_bond_virial(self):\n if self._bond_virial is None:\n self._bond_virial = self._get_potential(self._system._bonded)\n return self._bond_virial", "def _get_gradient_delta(self, Xi, yi):\n\n z = sum(wi * xij for wi, xij in zip(self.weights, Xi)) + self.bias\n y_hat = 1 / (1 + exp(-z))\n bias_grad_delta = yi - y_hat\n weights_grad_delta = [bias_grad_delta * Xij for Xij in Xi]\n return bias_grad_delta, weights_grad_delta", "def bla_ipr(x):\n phi = x / np.sqrt(np.sum(x**2))\n return np.sum(phi**4)\n # if s2 < MACH_EPSILON:\n # # Zero sum. Could happen for veeery small overall prevalence.\n # return 0.\n # else:\n # return np.sum(x2 * x2 / (s2 * s2))", "def xi(element1, element2, distance):\n return (float(covalence_radius[element1]) + float(covalence_radius[element2]) -\n (0.08 * float(abs(electro_negativity[element1] - electro_negativity[element2]))) - distance)", "def derivative_sigmoid(x):\n return x * (1 - x)", "def derivative_sigmoid(x):\n return x * (1 - x)", "def calculateXi(self, rk=None):\n if rk is None:\n return self.xi, self.varxi\n else:\n return self.xi - rk.xi, self.varxi + rk.varxi", "def interaction(self):\n\n Fint1 = self._calc_Fint(self._mu_0, self._mu_ex, self.p_0, self.p_ex)\n Fint2 = self._calc_Fint(self._mu_ex, self._mu_0, self.p_ex, self.p_0)\n\n Iint = (self.I0 * self._mu_0 * self.V.omega *\n (np.exp(-self.V.tau / self._mu_ex) * Fint1 +\n np.exp(-self.V.tau / self._mu_0) * Fint2))\n\n return self.SRF.NormBRDF * (1. - self.bsf) * Iint", "def calculate_BIC(self): \n hmm_ll_calculator = LikelihoodInfEngineHMM(\n dbn=self.model.dbn, hidden_node_index=0, check_dbn=False)\n ll_full = hmm_ll_calculator.calc_ll(self.seq_list, self.mismask_list) \n return 2 * ll_full - self._get_parameter_count() * math.log(\n self._get_observation_count())", "def get_irr_boundary_and_psis(self, i):\n M, p1, p2 = self.space.irr_boundary_as_space()\n return (M, p1, p2), psi_class(M, p1) ** i * psi_class(M, p2) ** (self.degree - i - 1)", "def get_bond_info(self):\n return", "def best_split1(self,X,attributes):\n if (self.criterion==\"information_gain\"):\n global_if = float('-inf') # the highest value of varience seen so far\n attr , val = None, None\n for attribute in attributes[::-1]:\n attr_val = pd.Series(X[attribute].unique()).sort_values(ignore_index=True)\n last_val = attr_val[0]\n for i in range(1,attr_val.size):\n cur_val = attr_val[i]\n valc = round((last_val+cur_val)/2,4)\n last_val = cur_val\n cur_if = information_gain1(valc,X[attribute],X[\"Output\"],self.type)\n if (cur_if>global_if):\n global_if,attr,val = cur_if,attribute,valc\n return attr,val\n else:\n global_if = float('inf') # the lowest value of varience seen so far\n attr , val = None, None\n for attribute in attributes[::-1]:\n attr_val = pd.Series(X[attribute].unique()).sort_values(ignore_index=True)\n last_val = attr_val[0]\n for i in range(1,attr_val.size):\n cur_val = attr_val[i]\n valc = round((last_val+cur_val)/2,4)\n last_val = cur_val\n cur_if = gini_gain1(X[\"Output\"],X[attribute], valc)\n if (global_if>cur_if):\n global_if,attr,val = cur_if,attribute,valc\n return attr,val", "def get_xi_threshold(Mu,Y_max,Effective_Impedance,Phi_max):\n return (np.pi*Phi_max**5*ImZ_over_k)/(32*Mu*(Mu+1)*chi(Mu,Y_max)*Effective_Impedance)", "def stereomatic_descriptor(atoms_pair, x, database=new_bond_data_dict):\n \n try:\n data_to_import = database[atoms_pair]\n except KeyError:\n return 0\n bond_length_average, bond_orders, bond_range_list = prepare_data(data_to_import)\n \n ret_value = 0\n for i in range(len(bond_length_average)):\n ret_value += sigmoid_function(bond_length_average[i], 100, bond_orders[i]-1, x) * box_function(bond_range_list[i][0], bond_range_list[i][1], -bond_length_average[i], 0.0001, x)\n \n return ret_value", "def sigmoidPrime(self, x):\n # Derivada da sigmoid \n return x * (1 - x)", "def get_nuisance_parameters(self):\n pass", "def psi(self, i):\n res = self.all_residues[i]\n\n if i == len(self.all_residues) or not self.connected_to_next(i):\n return 0.0\n\n try:\n n = res['N'].get_vector()\n ca = res['CA'].get_vector()\n c = res['C'].get_vector()\n res_plus_one = self.all_residues[i + 1]\n\n nn = res_plus_one['N'].get_vector()\n psi = calc_dihedral(n, ca, c, nn)\n return psi\n except Exception:\n print \"Could not get psi for \"+repr(i)\n raise LookupError", "def ipi(self):\n return self._ipi", "def I(x):\n if abs(x-L/2.0) > 0.1:\n return 0\n else:\n return 1", "def delta_xi(xis, cco2, n_alts = 40):\n\n fu = 0.\n for atm in allatms:\n hr = all_coeffs[(atm, cco2, 'hr_ref')][:n_alts]\n hr_somma = hr_from_xi(xis, atm, cco2)\n fu += atmweigths[atm] * np.sum((hr - hr_somma)**2)\n\n return fu", "def get_icdf(self, xx):\n return self.parent.ppf(xx)", "def _subdiff_b(self, i, compensate_class_balance=False):\n if 1 - self._data.train_y[i] * self._f(self._data.train_X[:, i]) > 0:\n if compensate_class_balance:\n return - self._data.train_y[i] * self._data.importance(self._data.train_y[i])\n else:\n return - self._data.train_y[i]\n else:\n return 0", "def indicator_func(self):\n if self.indicator_type == IndicatorType.Logistic:\n self.indicator_score = scipy.special.expit(\n (self.predict - self.kappa) / self.sigma)\n self.indicator_derivative = self.indicator_score * (\n 1 - self.indicator_score) / self.sigma\n elif self.indicator_type == IndicatorType.Relu:\n self.indicator_score = 0.5 * (1 + np.minimum(\n 1, np.maximum(-1, (self.predict - self.kappa) / self.delta)))\n self.indicator_derivative = (\n (self.kappa - self.delta < self.predict) &\n (self.predict < self.kappa + self.delta)) / (2 * self.delta)\n elif self.indicator_type == IndicatorType.Flat:\n raise NotImplementedError(\"Flat indicator not implemented\")\n elif self.indicator_type == IndicatorType.Hard:\n raise NotImplementedError(\"Hard indicator not implemented\")\n else:\n raise RuntimeError(\"Unkown inidicator function\")", "def elemental_descriptor(A1_ion, A2_ion, B_ion):\n ele_A1 = mg.Element(A1_ion)\n ele_A2 = mg.Element(A2_ion)\n ele_B = mg.Element(B_ion)\n ele_O = mg.Element('O') \n # A/B ion oxidation state \n common_oxidation_states_A1 = ele_A1.common_oxidation_states[0]\n common_oxidation_states_A2 = ele_A2.common_oxidation_states[0]\n common_oxidation_states_A = np.mean(common_oxidation_states_A1 + common_oxidation_states_A2)\n common_oxidation_states_B = ele_B.common_oxidation_states[0]\n # ionic radius property\n ionic_radius_A1 = float(str(ele_A1.average_ionic_radius)[:-4])\n ionic_radius_A2 = float(str(ele_A2.average_ionic_radius)[:-4])\n ionic_radius_A = (ionic_radius_A1+ ionic_radius_A2)/2\n ionic_radius_B = float(str(ele_B.average_ionic_radius)[:-4])\n ionic_radius_O = float(str(ele_O.average_ionic_radius)[:-4])\n # Tolerance factor \n TF = (ionic_radius_A + ionic_radius_O)/(np.sqrt(2)*(ionic_radius_B + ionic_radius_O))\n # Octahedral factor\n OF = ionic_radius_B/ionic_radius_O \n # ionic_radius ratios\n ionic_ration_AO = ionic_radius_A / ionic_radius_O\n ionic_ration_BO = ionic_radius_B / ionic_radius_O\n # averaged electronegativity for A and B atoms\n Pauling_electronegativity_A1 = ele_A1.X\n Pauling_electronegativity_A2 = ele_A2.X\n Pauling_electronegativity_A = (Pauling_electronegativity_A1 + Pauling_electronegativity_A2)/2\n Pauling_electronegativity_B = ele_B.X\n Pauling_electronegativity_O = ele_O.X\n # Difference in the electronegativity for A-O and B-O\n Diff_A_O = Pauling_electronegativity_A - Pauling_electronegativity_O\n Diff_B_O = Pauling_electronegativity_B - Pauling_electronegativity_O\n return [common_oxidation_states_A, common_oxidation_states_B, Pauling_electronegativity_A, Pauling_electronegativity_B, TF, OF, ionic_ration_AO, ionic_ration_BO, Diff_A_O, Diff_B_O]", "def get_bond_connectivity(self):\n m, connectivity = self.owner, []\n for index, i in enumerate(self.rix):\n for j in self.rix[index + 1:]:\n b1 = m.rings[i].bix\n b2 = m.rings[j].bix\n if set(b1).intersection(b2):\n connectivity.append((i, j))\n return tuple(connectivity)", "def amorphous(x,nw,sizes):\n v = 0\n \n for i in range(1,nw+1):\n v = impurityBis(x,i,sizes[i-1])\n \n return v", "def bic(self):\n return np.log(self.sample_size) * self.number_of_parameters() - 2*self.ll[-1]", "def getIR2() -> int:\n pass", "def Incentives(Md,X,EEnvPrime):\n u = X[iu]\n b = Md.b()\n\n\n\n CplusG = 1./X[iMU] * (1+SSGRatio*X[ieG])\n\n SD = SkillDeriv(Md,X)\n SkillRisk = beta * ( EEnvPrime[iEnv_EAlph] *(1-delta)*X[iEAlph]*SD[0]\n + EEnvPrime[iEnv_ElogAlpha]*(1-delta)*SD[1])\n\n dWdu = (log(b) + (1-b)/(1-u+u*b) - (1+chi)*X[iA]*X[ih]/CplusG/X[iSA]\n + (1+chi)*psi_1*X[iM]**psi_2/CplusG+gamma_0*X[ih]**(1+gamma)/(1+gamma)-psy + SkillRisk)\n dqdb_M = (1./kappa)*X[iq] * X[idVndb] / X[iVn]\n dudq_M = -X[iM]*(X[iulag]+upsilon*(1-X[iulag]))\n dudb_M = dudq_M * dqdb_M\n\n dWdh = ( (1+chi)*X[iA]/CplusG/X[iSA] - gamma_0 * X[ih]**gamma ) * (1-u)\n\n # dhdq = -zeta_2/(1+gamma)*X[ih]/(1-X[iu])*dudq_M\n # dhdubar = zeta_2/(1+gamma)*X[ih]/(1-Md.ubar)\n # dhdb = dhdq/kappa*X[iq]/X[iVn]*X[idVndb] + dhdubar * Md.dubardb\n\n dhdu = -zeta_2/(1+gamma)*X[ih]/(1-X[iu])\n\n\n XSS = Md.XSS\n dhdubar = zeta_2/(1+gamma)*X[ih]/(1-XSS[iu])\n dubardq_M = -XSS[iM]*(XSS[iulag]+upsilon*(1-XSS[iulag]))\n dqbardb_M = (1./kappa)*XSS[iq] * XSS[idVndb] / XSS[iVn]\n\n\n dhdb = dhdu*dudq_M*dqdb_M + dhdubar * dubardq_M*dqbardb_M\n\n\n return dWdu * dudb_M - (X[iulag]+upsilon*(1-X[iulag]))*X[iM]*X[iVn]*dqdb_M + dWdh * dhdb + beta*dudb_M*EEnvPrime[iEnv_ulag]", "def calculate_ndvi ( red_filename, nir_filename ):\n\n g_red = gdal.Open ( red_filename )\n red = g_red.ReadAsArray()\n g_nir = gdal.Open ( nir_filename )\n nir = g_nir.ReadAsArray()\n if ( g_red.RasterXSize != g_nir.RasterXSize ) or \\\n ( g_red.RasterYSize != g_nir.RasterYSize ):\n print \"ERROR: Input datasets do't match!\"\n print \"\\t Red data shape is %dx%d\" % ( red.shape )\n print \"\\t NIR data shape is %dx%d\" % ( nir.shape )\n\n sys.exit ( -1 )\n passer = np.logical_and ( red > 1, nir > 1 )\n ndvi = np.where ( passer, (1.*nir - 1.*red ) / ( 1.*nir + 1.*red ), -999 )\n return ndvi", "def marginal_p(self, xi, thetai):\n if self.marginal_description == 'gaussian':\n mu, sig = thetai # mu, sig have size m by k\n xi = xi.reshape((-1, 1, 1))\n return (-(xi - mu)**2 / (2. * sig) - 0.5 * np.log(2 * np.pi * sig)).transpose((1, 0, 2)) # log p(xi|yj)\n\n elif self.marginal_description == 'discrete':\n # Discrete data: should be non-negative integers starting at 0: 0,...k. k < 32 because of np.choose limits\n logp = [theta[np.newaxis, ...] for theta in thetai] # Size dim_visible by n_hidden by dim_hidden\n return np.choose(xi.reshape((-1, 1, 1)), logp).transpose((1, 0, 2))\n\n else:\n print('Marginal description \"%s\" not implemented.' % self.marginal_description)\n sys.exit()", "def ci(self):\n var_assumptions = self.var_assumptions if self.var_assumptions == \"pooled\" else \"unequal\"\n ci_vals = self.comparison.zconfint_diff(self.alpha, self.hypothesis_sm, var_assumptions)\n\n return [ci_vals, self.ci_percents]", "def ndwi(self,\n img):\n return img.normalizedDifference(['NIR', 'SWIR2']).select([0], ['NDWI']).multiply(self.scale_factor)", "def _predict(self, Xi):\n\n z = sum(wi * Xij for wi, Xij in zip(self.weights, Xi)) + self.bias\n return 1 / (1 + exp(-z))", "def get_IC(self, x, dx, **kwargs):\n raise NotImplementedError(\"IC model %s invalid: must define the get_IC function\" % self.__class__.__name__)", "def _get_dP(ip, op) -> 'psi':\n if ('l' in ip and 'g' in op) or ('g' in ip and 'l' in op):\n # Latent fluid (boiling or condensing)\n return 1.5\n elif ip == 'l':\n # Sensible liquid\n return 5\n elif op == 'g':\n # Sensible vapor\n return 3", "def Idiode(Isat, Vdiode, Vt, n):\n return Isat * (np.exp(Vdiode / n / Vt) - 1.)", "def get_lig_bonds(np_xyz, lig_ndx, close_ndxs, inp):\n n_at1, n_at2 = np.sum(inp.lig1_n_per_bead), np.sum(inp.lig2_n_per_bead)\n n_core = int(len(np_xyz) - inp.lig1_num*n_at1 - inp.lig2_num*n_at2)\n core_xyz = np_xyz[:n_core]\n\n lig1_bonds, lig2_bonds = [], []\n\n for i in range(inp.lig1_num):\n ndx0 = n_core + i*n_at1\n ndx1 = ndx0*1\n ndx2 = close_ndxs[lig_ndx[0][i]]#np.argsort(cdist([np_xyz[ndx0]], core_xyz))[0,0]\n bond = [ndx1, ndx2]\n lig1_bonds.append(bond)\n for j in range(n_at1-1):\n ndx1 = ndx0 + j\n ndx2 = ndx1 + 1\n bond = [ndx1, ndx2]\n lig1_bonds.append(bond)\n\n for i in range(inp.lig2_num):\n ndx0 = n_core + n_at1*inp.lig1_num + i*n_at2\n ndx1 = ndx0*1\n ndx2 = close_ndxs[lig_ndx[1][i]]#np.argsort(cdist([np_xyz[ndx0]], core_xyz))[0,0]\n bond = [ndx1, ndx2]\n lig2_bonds.append(bond)\n for j in range(n_at2-1):\n ndx1 = ndx0 + j\n ndx2 = ndx1 + 1\n bond = [ndx1, ndx2]\n lig2_bonds.append(bond)\n return (lig1_bonds, lig2_bonds)", "def _construct_nll_costs(self, si, xo):\n # average log-likelihood over the refinement sequence\n xh = self.obs_transform(si)\n if self.x_type == 'bernoulli':\n ll_costs = log_prob_bernoulli(xo, xh)\n else:\n ll_costs = log_prob_gaussian2(xo, xh, \\\n log_vars=self.bounded_logvar)\n nll_costs = -ll_costs\n return nll_costs", "def get_P_1obs_xi(self, obsname, dataID):\n covmat = self.covmat[obsname]\n\n ##### Get the follow-up observable, obsintr is used for setting up mass range\n if obsname=='Yx':\n obsmeas, obsintr, obserr = self.catalog['Yx_fid'][dataID], self.scaling['Dx'], self.catalog['Yx_err'][dataID]\n elif obsname=='Mgas':\n obsmeas, obsintr, obserr = self.catalog['Mg_fid'][dataID], self.scaling['Dx'], self.catalog['Mg_err'][dataID]\n elif obsname=='WLMegacam':\n LSSnoise = self.WLcalib['Megacam_LSS'][0] + self.scaling['MegacamScatterLSS'] * self.WLcalib['Megacam_LSS'][1]\n obsmeas, obserr, obsintr = .8*self.scaling['bWL_Megacam']*self.obs2mass('zeta', self.xi2zeta(self.catalog['xi'][dataID]), self.catalog['redshift'][dataID]), .3, self.scaling['DWL_Megacam']\n elif obsname=='WLHST':\n LSSnoise = self.WLcalib['HST_LSS'][0] + self.scaling['HSTscatterLSS'] * self.WLcalib['HST_LSS'][1]\n obsmeas, obserr, obsintr = .8*self.scaling['bWL_HST']*self.obs2mass('zeta', self.xi2zeta(self.catalog['xi'][dataID]), self.catalog['redshift'][dataID]), .3, self.scaling['DWL_HST']\n\n ##### Define reasonable mass range\n # xi -> M(xi)\n xi_minmax = np.array([max(2.6,self.catalog['xi'][dataID]-5), self.catalog['xi'][dataID]+3])\n M_xi_minmax = self.obs2mass('zeta', self.xi2zeta(xi_minmax), self.catalog['redshift'][dataID])\n if M_xi_minmax[0]>self.HMF['M_arr'][-1]:\n print \"cluster mass exceeds HMF mass range\", self.catalog['SPT_ID'][dataID],\\\n M_xi_minmax[0], self.HMF['M_arr'][-1]\n return 0\n\n # obs: prediction\n lnobs0 = np.log(self.mass2obs(obsname, self.obs2mass('zeta', self.xi2zeta(self.catalog['xi'][dataID]), self.catalog['redshift'][dataID]), self.catalog['redshift'][dataID]))\n SZscatterobs = self.dlnM_dlnobs('zeta') / self.dlnM_dlnobs(obsname, self.SZmPivot, self.catalog['redshift'][dataID]) * self.scaling['Dsz']\n intrscatter = (SZscatterobs**2 + obsintr**2)**.5\n obsthminmax = np.exp(np.array([lnobs0-5.*intrscatter, lnobs0+3.5*intrscatter]))\n M_obsth_minmax = self.obs2mass(obsname, obsthminmax, self.catalog['redshift'][dataID])\n # obs: measurement\n if obsname in ('Mgas', 'Yx'):\n obsmeasminmax = np.amax((.1, obsmeas-3*obserr)), obsmeas+3*obserr\n else:\n obsmeasminmax = np.exp(np.log(obsmeas)-4*obserr), np.exp(np.log(obsmeas)+3*obserr)\n M_obsmeas_minmax = self.obs2mass(obsname, np.array(obsmeasminmax), self.catalog['redshift'][dataID])\n\n ##### Define grid in mass\n Mmin, Mmax = min(M_xi_minmax[0], M_obsth_minmax[0], M_obsmeas_minmax[0]), max(M_xi_minmax[1], M_obsth_minmax[1], M_obsmeas_minmax[1])\n Mmin, Mmax = max(.5*Mmin, self.HMF['M_arr'][0]), min(Mmax, self.HMF['M_arr'][-1])\n lenObs = 54\n M_obsArr = np.logspace(np.log10(Mmin), np.log10(Mmax), lenObs)\n\n ##### Observable arrays\n lnzeta_arr = np.log(self.mass2obs('zeta', M_obsArr, self.catalog['redshift'][dataID]))\n xi_arr = self.zeta2xi(np.exp(lnzeta_arr))\n obsArr = self.mass2obs(obsname, M_obsArr, self.catalog['redshift'][dataID])\n\n ##### Add radial dependence for X-ray observables\n if obsname in ('Mgas','Yx'):\n # Angular diameter distances in current and reference cosmology [Mpc]\n dA = cosmo.dA(self.catalog['redshift'][dataID], self.cosmology)/self.cosmology['h']\n dAref = cosmo.dA(self.catalog['redshift'][dataID], cosmologyRef)/cosmologyRef['h']\n # R500 [kpc]\n rho_c_z = cosmo.RHOCRIT * cosmo.Ez(self.catalog['redshift'][dataID], self.cosmology)**2\n r500 = 1000 * (3*M_obsArr/(4*np.pi*500*rho_c_z))**(1/3) / self.cosmology['h']\n # r500 in reference cosmology [kpc]\n r500ref = r500 * dAref/dA\n # Xray observable at fiducial r500...\n obsArr*= (self.catalog['r500'][dataID]/r500ref)**self.scaling['dlnMg_dlnr']\n # ... corrected to reference cosmology\n obsArr*= (dAref/dA)**2.5\n\n lnobsArr = np.log(obsArr)\n\n ##### HMF array for convolution\n M_HMF_arr = M_obsArr\n\n ##### Convert self.HMF to dN/(dlnzeta dlnobs) = dN/dlnM * dlnM/dlnzeta * dlnM/dlnobs\n # This only matter if dlnM/dlnobs is mass-dependent, as for dispersions\n dN_dlnzeta_dlnobs = np.exp(self.HMF_interp(np.log(self.catalog['redshift'][dataID]), np.log(M_HMF_arr)))[0]\n\n ##### HMF on 2D observable grid\n HMF_2d_in = np.zeros((lenObs, lenObs))\n np.fill_diagonal(HMF_2d_in, dN_dlnzeta_dlnobs)\n\n ##### 2D convolution with correlated scatter [lnobs,lnzeta]\n pos = np.empty((lenObs,lenObs,2))\n pos[:,:,0], pos[:,:,1] = np.meshgrid(lnobsArr, lnzeta_arr, indexing='ij')\n kernel = multivariate_normal.pdf(pos, mean=(lnobsArr[27], lnzeta_arr[27]), cov=covmat)\n HMF_2d = signal.fftconvolve(HMF_2d_in, kernel, mode='same')\n\n # set to 0 if zeta<2\n HMF_2d[:,np.where(lnzeta_arr<np.log(2.))] = 0.\n\n # Set small negative values to zero (FFT noise)\n if np.any(HMF_2d<-1e-7):\n if np.abs(np.amin(HMF_2d))/np.amax(HMF_2d)>1e-6:\n print \"HMF_2d has negative entries:\",np.amin(HMF_2d), np.amax(HMF_2d)\n HMF_2d[np.where(HMF_2d<0)] = 0.\n\n # Safety check\n if np.all(HMF_2d==0.):\n print self.catalog['SPT_ID'][dataID],'HMF_2d is zero, det',np.linalg.det(covmat),self.scaling['Dsz'],obsintr,self.scaling['rhoSZX']\n return 0.\n\n ##### dN/(dxi dlnobs) = dN/(dlnzeta dlnobs) * dlnzeta/dxi [lnobs,xi]\n HMF_2d*= self.dlnzeta_dxi(xi_arr)[None,:]\n\n #### Convolve with xi measurement error [lnobs]\n dP_dlnobs = np.trapz(HMF_2d * norm.pdf(self.catalog['xi'][dataID], xi_arr[None,:], 1.), xi_arr, axis=1)\n\n\n ##### Evaluate likelihood\n #dP/dobs = dP/dlnobs * dlnobs/dobs = dP/dlnobs /obs\n dP_dobs = dP_dlnobs/obsArr\n # normalize\n dP_dobs/= np.trapz(dP_dobs, obsArr)\n\n ##### WL\n if obsname in ('WLHST', 'WLMegacam'):\n # Concolve with Gaussian LSS scatter\n if LSSnoise>0.:\n integrand = dP_dobs[None,:] * norm.pdf(obsArr[:,None], obsArr[None,:], LSSnoise)\n dP_dobs = np.trapz(integrand, obsArr, axis=1)\n dP_dobs/= np.trapz(dP_dobs, obsArr)\n # P(Mwl) from data\n Pwl = self.WL.like(self.catalog, dataID, obsArr, self.cosmology, self.MCrel, self.lnM500_to_lnM200)\n # Get likelihood\n likeli = np.trapz(Pwl*dP_dobs, obsArr)\n\n\n ##### X-ray\n else:\n # Get likelihood\n likeli = np.trapz(dP_dobs*norm.pdf(obsmeas, obsArr, obserr), obsArr)\n\n if getpull:\n integrand = dP_dobs[None,:] * norm.pdf(obsArr[:,None], obsArr[None,:], obserr)\n dP_dobs_obs = np.trapz(integrand, obsArr, axis=1)\n dP_dobs_obs/= np.trapz(dP_dobs_obs,obsArr)\n cumtrapz = integrate.cumtrapz(dP_dobs_obs,obsArr)\n perc = np.interp(obsmeas, obsArr[1:], cumtrapz)\n print self.catalog['SPT_ID'][dataID], '%.4f %.4f %.4f %.4e'%(self.catalog['xi'][dataID], self.catalog['redshift'][dataID], obsmeas, 2**.5 * ss.erfinv(2*perc-1))\n\n if ((likeli<0)|(np.isnan(likeli))):\n print self.catalog['SPT_ID'][dataID], obsname, likeli\n #np.savetxt(self.catalog['SPT_ID'][dataID],np.transpose((obsArr, dP_dobs)))\n return 0.\n\n\n return likeli", "def gibbs_ask_traffic(self, X, e, Z, bn, N):\n\n #makes copies\n X = e\n e = e\n\n #probability\n probability = [0,0]\n numerator = 0\n\n\n #True, False\n\n for x in range(N):\n # second joint\n if Z == True: # if non evidence variable\n random_choice = np.random.choice([0,1], 1, True, [0.5, 0.5])[0] #Rain or No Rain\n X[1] = bn[1][random_choice][0]\n else:\n random_choice = np.random.choice([0, 1], 1, True, [0.5, 0.5])[0] #Rain or No Rain\n X[1] = bn[1][random_choice][1]\n\n # first joint\n if X[1] == 0.8 or X[1] == 0.2: # Rain is true\n X[0] = bn[0][0]\n else: # Rain is False\n X[0] = bn[0][1]\n\n # third joint\n if X[1] == 0.8 or X[1] == 0.1: # traffic\n random_late = np.random.choice([0,1], 1, True, [0.5,0.5])[0]\n X[2] = bn[2][0][random_late]\n else: # no traffic\n random_late = np.random.choice([0, 1], 1, True, [0.5, 0.5])[0]\n X[2] = bn[2][1][random_late]\n\n # print(X)\n if X[0] == 0.1:\n probability[0] += 1\n else:\n probability[1] += 1\n\n\n probability[0] = probability[0] / N\n probability[1] = probability[1] / N\n # print(probability)\n return probability", "def test_COPYxi():\n\tk, outputs = 2, [0,0,1,1]\n\t# Prime Implicants\n\ttrue_pi0s = set(['02'])\n\ttrue_pi1s = set(['12'])\n\n\ttdt0, tdt1 = make_transition_density_tables(k=k, outputs=outputs)\n\tpi0s, pi1s = find_implicants_qm(tdt0) , find_implicants_qm(tdt1)\n\n\tassert (pi0s == true_pi0s) , ('Prime Implicants for 0 does not match. %s != %s' % (pi0s,true_pi0s))\n\tassert (pi1s == true_pi1s) , ('Prime Implicants for 1 does not match. %s != %s' % (pi1s,true_pi1s))\n\t# Two Symbols\n\ttrue_ts0s = [('02',[],[])]\n\ttrue_ts1s = [('12',[],[])]\n\n\tts0s,ts1s = find_two_symbols_v2(k=k, prime_implicants=pi0s) , find_two_symbols_v2(k=k, prime_implicants=pi1s)\n\n\tassert (ts0s == true_ts0s) , ('Two Symbol for 0 does not match. %s != %s' % (ts0s,true_ts0s))\n\tassert (ts1s == true_ts1s) , ('Two Symbol for 1 does not match. %s != %s' % (ts1s,true_ts1s))", "def agnesi(x):\n return 1/(1+x**2)", "def calculate_parameter_covariance_ij(self, i, j):\n\n # Initialise\n\n # 1. Input data\n dataset_paths = self.dataset_paths\n sensor_data_path = self.sensor_data_path\n\n # 2. Output Data\n temp_directory = self.temp_directory\n\n ################################################################################################################\n # 1.\tRead Matchup Data and harmonisation output data\n ################################################################################################################\n\n # Input data\n HData = self.data_reader(dataset_paths, sensor_data_path,\n open_uncertainty=True, open_additional_values=False)\n\n # Re-open final solver\n GNOp = GNAlgo(HData)\n GNOp.open(temp_directory)\n\n ################################################################################################################\n # 2.\tPerform harmonisation parameter covariance matrix element\n ################################################################################################################\n\n parameter_covariance_ij = GNOp.calculate_parameter_covariance_matrix_ij([i, j])\n\n return parameter_covariance_ij", "def neighbors(self, x):\n pass", "def estimate_nb(x,y,smoothing):\n\n raise NotImplementedError", "def _lambda(xi):#xi=xis; xi=Phi.dot(w); xi = 0\n div0 = np.divide(1, (2*xi),where=xi!=0)\n return (div0 * (sigmoid(xi)-(1/2)))", "def get_tag_info(xint,conn):\n\n get_tags = ('SELECT DISTINCT fip2.value '\n 'FROM interaction i, feature_interaction fi, feature_interactionprop fip, '\n 'feature f, cvterm cvt, feature_interactionprop fip2, cvterm cvt2 '\n 'WHERE f.feature_id = fi.feature_id AND fi.interaction_id = i.interaction_id '\n 'AND fi.feature_interaction_id = fip.feature_interaction_id '\n 'AND fip.type_id = cvt.cvterm_id AND cvt.name = \\'participating feature\\' '\n 'AND fi.feature_interaction_id = fip2.feature_interaction_id AND fip2.type_id = cvt2.cvterm_id '\n 'AND cvt2.name = \\'comment\\' AND f.uniquename = %s AND i.uniquename = %s')\n tags = connect(get_tags,xint,conn)\n return(tags)", "def compute_information_gain(Y, xi):\r\n H_Y = H(Y)\r\n\r\n TrainSet = np.delete(AllSets[2], -1, axis=1)\r\n ColumnInd = AllSets[3].index(xi) # extract from dictionary\r\n\r\n NumHeadlines = AllSets[2].shape[0]\r\n AllOccurences, Count = np.unique(AllSets[2][:, ColumnInd], return_counts=True)\r\n\r\n TotalH_YGivenX = 0\r\n for i, count in zip(AllOccurences, Count):\r\n NewY = Y[TrainSet[:, ColumnInd] == i]\r\n\r\n TotalH_YGivenX += H(NewY) * float(count) / NumHeadlines\r\n\r\n return H_Y - TotalH_YGivenX", "def df(x_i):\n return [2 * x_ij for x_ij in x_i]", "def df(x_i):\n return [2 * x_ij for x_ij in x_i]", "def get_bias(self):", "def derivitive(x):\n return x * 1", "def icc(x,y=None,verbose=0):\r\n TINY = 1.0e-20\r\n if y:\r\n all = N.concatenate([x,y],0)\r\n else:\r\n all = x+0\r\n x = all[:,0]\r\n y = all[:,1]\r\n totalss = ass(all-mean(all))\r\n pairmeans = (x+y)/2.\r\n withinss = ass(x-pairmeans) + ass(y-pairmeans)\r\n withindf = float(len(x))\r\n betwdf = float(len(x)-1)\r\n withinms = withinss / withindf\r\n betweenms = (totalss-withinss) / betwdf\r\n rho = (betweenms-withinms)/(withinms+betweenms)\r\n t = rho*math.sqrt(betwdf/((1.0-rho+TINY)*(1.0+rho+TINY)))\r\n prob = abetai(0.5*betwdf,0.5,betwdf/(betwdf+t*t),verbose)\r\n return rho, prob", "def calc_new_sig_poi(new_poi, params, x, above_idx):\n A1_a = 0\n A2_a = 1\n x0_a = 2\n dx_a = 3\n A1_b = 4\n A2_b = 5\n x0_b = 6\n dx_b = 7\n \n row, col = cuda.grid(2)\n if row < x.shape[0] and col < x.shape[1]:\n if above_idx[row,col] == True: \n new_poi[row, col] = params[A2_a] + (params[A1_a] - params[A2_a]) \\\n / (1.+ math.exp((x[row, col] - params[x0_a])/params[dx_a]))\n else:\n new_poi[row, col] = params[A2_b] + (params[A1_b] - params[A2_b]) \\\n / (1.+ math.exp((x[row, col] - params[x0_b])/params[dx_b]))", "def get_frame_x(self, i: int) -> Tuple[int]:\n return (0, self.pendulum1.x[i], self.pendulum2.x[i])", "def Getdxdparam(Mda,Mdb,Xa):\n\n Xb = Xa.copy()\n #Xb[iulag] = Xa[iulag] + (1-Xa[iq]*Xa[iM])*(Mdb.ubar-Mda.ubar)\n Xb[Mdb.nX:Mdb.nXY] = Mdb.F(Xb[Mdb.interpstates])\n Xb[Mdb.nXY:] = Mdb.Static(Xb)\n\n if CLArgs.param == \"b\":\n D = Mdb.b() - Mda.b()\n else:\n D = Mdb.tau - Mda.tau\n\n return (Xb[iM] - Xa[iM])/D", "def test_I_fraction(self):\n nu = np.array([0, 1, 10, 101, 450, 1001])+0.5\n x = np.array([1e-4, 1, 1e2, 1e3])\n result = bessel_sk.i_fraction(x, nu)\n expected = np.zeros((len(nu), len(x)))\n for i in range(len(nu)):\n for j in range(len(x)):\n X = x[j]\n NU = nu[i]\n expected[i,j] = mpmath.besseli(NU, X)/mpmath.besseli(NU+1, X)\n assert_almost_equal(result/expected, 1)", "def awilcoxont(x,y):\r\n if len(x) <> len(y):\r\n raise ValueError, 'Unequal N in awilcoxont. Aborting.'\r\n d = x-y\r\n d = N.compress(N.not_equal(d,0),d) # Keep all non-zero differences\r\n count = len(d)\r\n absd = abs(d)\r\n absranked = arankdata(absd)\r\n r_plus = 0.0\r\n r_minus = 0.0\r\n for i in range(len(absd)):\r\n if d[i] < 0:\r\n r_minus = r_minus + absranked[i]\r\n else:\r\n r_plus = r_plus + absranked[i]\r\n wt = min(r_plus, r_minus)\r\n mn = count * (count+1) * 0.25\r\n se = math.sqrt(count*(count+1)*(2.0*count+1.0)/24.0)\r\n z = math.fabs(wt-mn) / se\r\n z = math.fabs(wt-mn) / se\r\n prob = 2*(1.0 -zprob(abs(z)))\r\n return wt, prob", "def compute_gradients(self, x_i, y_i):\n dw = 0\n db = 0\n if y_i * (np.dot(x_i, self.w) - self.b) >= 1: # if correct prediction, only margin updated\n dw = 2 * self.lam * self.w\n db = 0\n else:\n dw = 2 * self.lam * self.w - np.dot(x_i, y_i) # if wrong prediction, margin and bias updated\n db = y_i\n\n return dw, db", "def GetBondMasks(i, N, Bonds23, Bonds4):\n j = i + 1\n MinID = i*N - (i+1)*(i+2)/2 + j\n MaxID = MinID + N - j - 1\n b23 = Bonds23[logical_and(Bonds23 >= MinID, Bonds23 <= MaxID)] - MinID + j\n b4 = Bonds4[logical_and(Bonds4 >= MinID, Bonds4 <= MaxID)] - MinID + j\n MaskNot23 = ones(N, bool)\n MaskNot23[b23] = False\n Mask4 = zeros(N, bool)\n Mask4[b4] = True\n Mask4[b23] = False\n return MaskNot23, Mask4", "def getCrossingAngleIC (self):\n \n Axes = []\n \n Axes = [[tmhelix.ICAxis_X,tmhelix.ICAxis_Y,tmhelix.ICAxis_Z] for tmhelix in self.tmhelixmodel_set]\n \n CrossingAngleIC = SetOfVectors([Axes[0], Axes[1] ]) .AngleDEG ()\n \n return", "def get_lambda_without_minus(xi):\n output = np.multiply(1/(2*(xi + TOLERANCE)), (get_sigmoid(xi) - 0.5))\n return output", "def test_pid_ig1():\n d = bivariates['and']\n pid = PID_IG(d, ((0,), (1,)), (2,))\n assert pid[((0,), (1,))] == pytest.approx(0.08283, abs=1e-4)\n assert pid[((0,),)] == pytest.approx(0.22845, abs=1e-4)\n assert pid[((1,),)] == pytest.approx(0.22845, abs=1e-4)\n assert pid[((0, 1),)] == pytest.approx(0.27155, abs=1e-4)", "def get_iperparams(self):\n\t\treturn (self.D, self.K)", "def get_result(self, x):\n return self.i*x", "def calculate_ic(self):\n # dt:\n dt = self.E\n\n # dr:\n dr = np.sqrt(self.E ** 2 - (self.Q + self.L ** 2) / self.r ** 2)\n #print(dr)\n if np.isnan(dr):\n dr = 0\n #dr = self._check_dr_sign(self.alpha)\n\n # dtheta:\n omega = self.Q - self.L ** 2 * (np.cos(self.theta) / np.sin(self.theta)) ** 2\n if omega < 0:\n omega = np.abs(omega)\n dtheta = np.sqrt(omega) / self.r**2\n if self.eta < np.pi / 2:\n dtheta *= -1\n\n # dphi:\n dphi = self.L / (self.r * np.sin(self.theta)) ** 2\n\n return dt, dr, dtheta, dphi", "def compute_bic(kmeans,X):\n # assign centers and labels\n centers = [kmeans.cluster_centers_]\n labels = kmeans.labels_\n #number of clusters\n m = kmeans.n_clusters\n # size of the clusters\n n = np.bincount(labels)\n #size of data set\n N, d = X.shape\n\n #compute variance for all clusters beforehand\n cl_var = (1.0 / (N - m) / d) * sum([sum(distance.cdist(X[np.where(labels == i)], [centers[0][i]], \n 'euclidean')**2) for i in range(m)])\n\n const_term = 0.5 * m * np.log(N) * (d+1)\n\n BIC = np.sum([n[i] * np.log(n[i]) -\n n[i] * np.log(N) -\n ((n[i] * d) / 2) * np.log(2*np.pi*cl_var) -\n ((n[i] - 1) * d/ 2) for i in range(m)]) - const_term\n BIC_clusters =[n[i] * np.log(n[i]) -\n n[i] * np.log(N) -\n ((n[i] * d) / 2) * np.log(2*np.pi*cl_var) -\n ((n[i] - 1) * d/ 2) for i in range(m)]\n return(BIC , BIC_clusters)", "def getIR1() -> int:\n pass", "def ndvi(self,\n img):\n return img.normalizedDifference(['NIR', 'RED']).select([0], ['NDVI']).multiply(self.scale_factor)", "def sigmoid_derivative(x):\r\n\r\n ### START CODE HERE ### (≈ 2 lines of code)\r\n s = 1.0 /(1 + 1/np.exp(x))\r\n ds = s*(1-s)\r\n ### END CODE HERE ###\r\n\r\n return ds", "def interdependence_xy(Sw):\r\n\r\n Cw = coherence_from_spectral(Sw)\r\n return -np.log(1 - Cw)", "def bond_order(bondxi,\n threshold_single_meso=0.0847,\n # ================================================================\n # threshold_meso_double=0.184,\n #================================================================\n threshold_meso_double=0.0847,\n threshold_double_triple=0.27):\n if bondxi < threshold_single_meso:\n order = '1'\n elif bondxi < threshold_meso_double:\n order = '1.5'\n elif bondxi < threshold_double_triple:\n order = '2'\n else:\n order = '3'\n return order", "def motor_inferencia(x):\n\n # Defino mis operaciones borrosas\n AND = min # Tambien se llama conjuncion o interseccion\n OR = max # Tambien se llama disyuncion o union\n # FUERZA = min # Elijo la conjuncion. Tambien se pueden usar la disyuncion\n\n # --------------------------------------------------------\n # - CALCULO DEL VALOR DE PERTENENCIA DE LOS ANTECEDENTES -\n # --------------------------------------------------------\n\n # Guardo los antecedentes en las variables\n A_MN = []\n A_N = []\n A_Z = []\n A_P = []\n A_MP = []\n\n # Fila 0: P is MN and\n A_MP.append(AND(x[0], x[5])) # V is MN # then F is MP\n A_MP.append(AND(x[0], x[6])) # V is N # then F is MP\n A_MP.append(AND(x[0], x[7])) # V is Z # then F is MP\n A_MP.append(AND(x[0], x[8])) # V is P # then F is MP\n A_MP.append(AND(x[0], x[9])) # V is MP # then F is MP\n\n # Fila 1: P is N and\n A_MN.append(AND(x[1], x[5])) # V is MN # then F is MN\n A_MN.append(AND(x[1], x[6])) # V is N # then F is MN\n A_N.append(AND(x[1], x[7])) # V is Z # then F is N\n A_N.append(AND(x[1], x[8])) # V is P # then F is N\n A_N.append(AND(x[1], x[9])) # V is MP # then F is N\n\n # Fila 2: P is Z and\n A_MN.append(AND(x[2], x[5])) # V is MN # then F is MN\n A_N.append(AND(x[2], x[6])) # V is N # then F is N\n A_Z.append(AND(x[2], x[7])) # V is Z # then F is Z\n A_P.append(AND(x[2], x[8])) # V is P # then F is P\n A_MP.append(AND(x[2], x[9])) # V is MP # then F is MP\n\n # Fila 3: P is P and\n A_P.append(AND(x[3], x[5])) # V is MN # then F is P\n A_P.append(AND(x[3], x[6])) # V is N # then F is P\n A_P.append(AND(x[3], x[7])) # V is Z # then F is P\n A_MP.append(AND(x[3], x[8])) # V is P # then F is MP\n A_MP.append(AND(x[3], x[9])) # V is MP # then F is MP\n\n # Fila 4: P is MP and\n A_MN.append(AND(x[4], x[5])) # V is MN # then F is MN\n A_MN.append(AND(x[4], x[6])) # V is N # then F is MN\n A_MN.append(AND(x[4], x[7])) # V is Z # then F is MN\n A_MN.append(AND(x[4], x[8])) # V is P # then F is MN\n A_MN.append(AND(x[4], x[9])) # V is MP # then F is MN\n\n # ------------------------------------------------------------------------------------------\n # - COMBINACION DE LOS ANTECEDENTES Y RESOLUCION DE LA IMPLICACION -\n # ------------------------------------------------------------------------------------------\n\n # [ F_MN, F_N, F_Z, F_P, F_MP ]\n F = [OR(A_MN), OR(A_N), OR(A_Z), OR(A_P), OR(A_MP)]\n\n return F", "def i1(x):\n return tt.switch(tt.lt(x, 5), x / 2 + x**3 / 16 + x**5 / 384 + x**7 / 18432 +\n x**9 / 1474560 + x**11 / 176947200 + x**13 / 29727129600,\n np.e**x / (2 * np.pi * x)**0.5 * (1 - 3 / (8 * x) + 15 / (128 * x**2) + 315 / (3072 * x**3)\n + 14175 / (98304 * x**4)))", "def indicator_kernel(h: np.ndarray, Xi: np.ndarray, x: np.ndarray) -> np.ndarray:\n return (Xi - x) == 0", "def identify_bonds(chosen_atom, atom_list):\n list_of_hydrogens = ['H15', 'H14', 'H13', 'H12', 'H11', 'H10', 'H9', 'H8', 'H7', 'H6', 'H5', 'H4', 'H3', 'H2', 'H1'] \n if ((chosen_atom.atom_name not in list_of_hydrogens) and (chosen_atom.residue_name != \"P1A\")):\n nearby_atoms_crude = [atom for atom in atom_list if ((abs(chosen_atom.x - atom.x) <= 2) and (abs(chosen_atom.y - atom.y) <= 2) and (abs(chosen_atom.z - atom.z) <= 2))]\n nearby_atoms = [atom for atom in nearby_atoms_crude if (0 < calculate_3D_distance_2_atoms(chosen_atom,atom) <= 2)]\n identified_bonds = [[atom, calculate_3D_distance_2_atoms(chosen_atom, atom)] for atom in nearby_atoms if (check_bond(chosen_atom, atom) == True)] \n elif ((chosen_atom.atom_name not in list_of_hydrogens) and (chosen_atom.residue_name == \"P1A\")):\n nearby_atoms_crude = [atom for atom in atom_list if ((abs(chosen_atom.x - atom.x) <= 2) and (abs(chosen_atom.y - atom.y) <= 2) and (abs(chosen_atom.z - atom.z) <= 2))]\n nearby_atoms = [atom for atom in nearby_atoms_crude if (0 < calculate_3D_distance_2_atoms(chosen_atom,atom) <= 1.8)]\n identified_bonds = [[atom, calculate_3D_distance_2_atoms(chosen_atom, atom)] for atom in nearby_atoms if (check_bond(chosen_atom, atom) == True)] \n else:\n nearby_atoms_crude = [atom for atom in atom_list if ((abs(chosen_atom.x - atom.x) <= 1.6) and (abs(chosen_atom.y - atom.y) <= 1.6) and (abs(chosen_atom.z - atom.z) <= 1.6))]\n nearby_atoms = [atom for atom in nearby_atoms_crude if (0 < calculate_3D_distance_2_atoms(chosen_atom,atom) <= 1.6)]\n identified_bonds = [[atom, calculate_3D_distance_2_atoms(chosen_atom, atom)] for atom in nearby_atoms if (check_bond(chosen_atom, atom) == True)] \n for elements in nearby_atoms:\n if (check_if_no_bond(chosen_atom, elements, bond_list, bond_list_3) == True):\n nearby_atoms.remove(elements)\n if (len(nearby_atoms) == len(identified_bonds)):\n return identified_bonds\n else:\n return []", "def get_gating_probs(self,X):\n\t\tpi = self.gating.predict(X) #p(z_i = k|x_i) (N,K)\n\t\t#pi = np.divide(pi.T, np.sum(pi, axis = 1)).T\n\t\treturn pi", "def sigmoid_derivative(x):\n\n s = sigmoid(x)\n ds = s * (1 - s)\n\n return ds", "def LD_Vx_I(self, x):\n\t\tfor i in range(0, x + 1):\n\t\t\tself.V[i] = self.ram[self.I + i]", "def independent(self):\n return self.x", "def ins_ii(self):\n return self._ins_ii", "def sigmoid_derivative(x):\n return x * (1-x)" ]
[ "0.5771887", "0.5497939", "0.5485339", "0.54537404", "0.5412243", "0.52982736", "0.5246974", "0.5218475", "0.5195119", "0.51869047", "0.5173168", "0.5159012", "0.5122866", "0.51090294", "0.5108821", "0.509033", "0.5054854", "0.5019761", "0.50018966", "0.4988922", "0.49803066", "0.4956813", "0.4956813", "0.49525738", "0.4950849", "0.49423924", "0.49323764", "0.4923232", "0.4895072", "0.48909026", "0.48778164", "0.48776868", "0.48729086", "0.48516488", "0.4848608", "0.48366666", "0.48348626", "0.48237482", "0.48054132", "0.47954518", "0.47865233", "0.4783884", "0.47635752", "0.47593033", "0.47421846", "0.47278947", "0.47266018", "0.47198075", "0.47194892", "0.47140768", "0.4712639", "0.4711068", "0.4706214", "0.4701166", "0.4700246", "0.46970516", "0.46738818", "0.466898", "0.46646968", "0.46619833", "0.4656538", "0.4655175", "0.4654144", "0.46530026", "0.46526814", "0.4652355", "0.4636327", "0.4636327", "0.4632682", "0.46324393", "0.46323863", "0.4629383", "0.4628463", "0.4624273", "0.46218845", "0.46164477", "0.46161175", "0.4610917", "0.46108598", "0.4610225", "0.46093845", "0.46065208", "0.4602791", "0.4597272", "0.45961845", "0.4592216", "0.4588148", "0.4585347", "0.45765698", "0.45693952", "0.4562768", "0.45596942", "0.4559078", "0.45519724", "0.45486388", "0.45466313", "0.45459452", "0.4539436", "0.45358393", "0.4530244" ]
0.50392056
17
Function to identify atoms belonging to a previosly defined rigid group.
def framework_crawler(atom, direction, rigid_group_old=None): if not rigid_group_old: rigid_group = [atom, direction] else: rigid_group = rigid_group_old for atom in get_framework_neighbours(direction): if not atom in rigid_group and not atom.element == 'H': rigid_group.append(atom) framework_crawler(rigid_group[0], atom, rigid_group) if not rigid_group_old: #======================================================================= # print ' Determined rigid group:', [i.name for i in rigid_group] #======================================================================= return rigid_group
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_group(group):\n # Get the true classification from the longest reads\n true_species = group[group['file'].eq(f'OG_reads_{sample_letter}')]['classification'].iloc[0]\n print(true_species)\n # return a 1 if it's true across the group and 0 if not\n group['positives']= np.where(group['classification']==true_species, 1,0)\n # add our calcualtions to the results dictionary\n for row in group[['positives', 'file']].to_dict(orient=\"records\"):\n positive = row[\"positives\"]\n if positive:\n results[row[\"file\"]][0] += 1\n else:\n results[row[\"file\"]][1] += 1", "def get_free_standins(group):", "def framework_crawler(atom, direction, rigid_group_old=None):\n if not rigid_group_old:\n rigid_group = [atom, direction]\n else:\n rigid_group = rigid_group_old\n for atom in get_framework_neighbors(direction):\n if not atom in rigid_group and not atom.element == 'H':\n rigid_group.append(atom)\n framework_crawler(rigid_group[0], atom, rigid_group)\n if not rigid_group_old:\n #=======================================================================\n # print ' Determined rigid group:', [i.name for i in rigid_group]\n #=======================================================================\n return rigid_group", "def braid_group_action(self):\n G = []\n for c in self:\n c = c.relabel()\n if any(c in g for g in G):\n continue\n G.append(c.braid_group_orbit())\n return G", "def find_calib_group(self, grp):\n if 'calibbit' not in self.keys():\n msgs.error('Calibration groups are not set. First run set_calibration_groups.')\n return self.calib_bitmask.flagged(self['calibbit'].data, grp)", "def detect_conflict(candi_group, prob_group, cls_group,\n roi_feature_group, roi_elmo_feature_group, \n roi_label_group, roi_len_group, roi_char_ids_group, \n roi_word_lengths_group, sen_last_hidden_group,\n left_context_word_group, left_context_len_group, \n right_context_word_group, right_context_len_group):\n roi_feature_nonconf, roi_elmo_feature_nonconf, roi_label_nonconf, roi_len_nonconf = [], [], [], []\n roi_char_ids_nonconf, roi_word_lengths_nonconf, sen_last_hidden_nonconf = [], [], []\n left_context_word_nonconf, left_context_len_nonconf = [], []\n right_context_word_nonconf, right_context_len_nonconf = [], []\n\n keep = []\n orders = np.argsort(-np.array(prob_group))\n while orders.size > 0:\n save_item = list(range(orders.shape[0]))\n\n # Accept the anchor with hightest prob\n highest_idx = orders[0]\n keep.append(highest_idx)\n save_item.remove(0)\n\n if __DELETE_CONF__:\n # delete conflict anchors\n for k in range(1, len(orders)):\n if conflict(candi_group[highest_idx], candi_group[orders[k]]):\n save_item.remove(k)\n\n orders = orders[save_item]\n\n for idx in keep:\n # output probs and labels\n roi_feature_nonconf.append(roi_feature_group[idx])\n roi_elmo_feature_nonconf.append(roi_elmo_feature_group[idx])\n roi_label_nonconf.append(roi_label_group[idx])\n roi_len_nonconf.append(roi_len_group[idx])\n roi_char_ids_nonconf.append(roi_char_ids_group[idx])\n roi_word_lengths_nonconf.append(roi_word_lengths_group[idx])\n sen_last_hidden_nonconf.append(sen_last_hidden_group[idx])\n left_context_word_nonconf.append(left_context_word_group[idx])\n left_context_len_nonconf.append(left_context_len_group[idx])\n right_context_word_nonconf.append(right_context_word_group[idx])\n right_context_len_nonconf.append(right_context_len_group[idx])\n return roi_feature_nonconf, roi_elmo_feature_nonconf, roi_label_nonconf, roi_len_nonconf, roi_char_ids_nonconf, roi_word_lengths_nonconf, sen_last_hidden_nonconf, left_context_word_nonconf, left_context_len_nonconf, right_context_word_nonconf, right_context_len_nonconf", "def Group(self) -> _n_5_t_0:", "def Group(self) -> _n_5_t_0:", "def get_group_atoms(self, group_name):\r\n return self.groups[group_name].getAtoms()", "def get_grp(self):\n\n grp = -1\n\n if self.depth > 2:\n\n inp = ri.RhinoInput(self.path[2])\n\n grp = inp.get_no()\n\n return grp", "def axial_correction_group(obj,\n to_parents_origin=False,\n name_prefix=\"\",\n name_postfix=\"_ACGroup#\"):\n obj = get_valid_dag_node(obj)\n\n if name_postfix == \"\":\n name_postfix = \"_ACGroup#\"\n\n ac_group = pm.group(\n em=True,\n n=(name_prefix + obj.name() + name_postfix)\n )\n\n ac_group = pm.parent(ac_group, obj)[0]\n\n pm.setAttr(ac_group + \".t\", [0, 0, 0])\n pm.setAttr(ac_group + \".r\", [0, 0, 0])\n pm.setAttr(ac_group + \".s\", [1, 1, 1])\n\n parent = pm.listRelatives(obj, p=True)\n if len(parent) != 0:\n pm.parent(ac_group, parent[0], a=True)\n else:\n pm.parent(ac_group, w=True)\n\n if to_parents_origin:\n pm.setAttr(ac_group + \".t\", [0, 0, 0])\n pm.setAttr(ac_group + \".r\", [0, 0, 0])\n pm.setAttr(ac_group + \".s\", [1, 1, 1])\n\n pm.parent(obj, ac_group, a=True)\n\n # for joints also set the joint orient to zero\n if isinstance(obj, pm.nodetypes.Joint):\n # set the joint rotation and joint orient to zero\n obj.setAttr('r', (0, 0, 0))\n obj.setAttr('jo', (0, 0, 0))\n\n return ac_group", "def test_whole_group_iselection():\n phil_groups = ncs_group_master_phil.fetch(\n iotbx.phil.parse(phil_str)).extract()\n pdb_inp = iotbx.pdb.input(source_info=None, lines=test_pdb_str_2)\n ncs_obj = ncs.input(hierarchy=pdb_inp.construct_hierarchy(),\n ncs_phil_groups=phil_groups.ncs_group)\n nrgl = ncs_obj.get_ncs_restraints_group_list()\n assert len(nrgl) == nrgl.get_n_groups() == 2\n isel = nrgl[1].whole_group_iselection()\n expected = [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23]\n assert list(isel) == expected\n isel = nrgl[0].whole_group_iselection()\n expected = [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19]\n assert list(isel) == expected", "def test_selection():\n pdb_inp = iotbx.pdb.input(source_info=None, lines=pdb_answer_0)\n ncs_obj_phil = ncs.input(\n hierarchy=pdb_inp.construct_hierarchy())\n nrg = ncs_obj_phil.get_ncs_restraints_group_list()\n\n m1 = list(nrg[0].master_iselection)\n c1 = list(nrg[0].copies[0].iselection)\n c2 = list(nrg[0].copies[1].iselection)\n\n assert len(m1) == len(c1) # renumbering\n assert m1 == [0, 1, 2, 3, 4, 5, 6] # 0, 1, X, 3, X, 5, X | 0, 1, 3\n assert c1 == [7, 8, 9, 10, 11, 12, 13] # 7, 8, 9, X, X, 12, X | 4, 5, 7\n assert c2 == [14, 15, 16, 17, 18, 19, 20] # 14, 15, X, 17, X, 19, X | 8, 9, 11\n\n selection1 = flex.size_t([0,1,5,3,100,101])\n selection2 = flex.size_t([0,1,5,3,7,8,9,12,100,101])\n selection3 = flex.size_t([0,1,5,3,7,8,9,12,14,15,19,17,100,101])\n # gone iseqs for selection3: 2,4,6,10,11,13,16,18,20-99\n\n new_nrg = nrg.select(flex.bool(102, selection1))\n # only atoms in master are selected\n mt = list(new_nrg[0].master_iselection)\n c1t = list(new_nrg[0].copies[0].iselection)\n\n assert mt == []\n assert c1t == []\n\n # atoms selected in both master and copies\n new_nrg = nrg.select(flex.bool(102, selection2))\n # only atoms in master are selected\n mt = list(new_nrg[0].master_iselection)\n c1t = list(new_nrg[0].copies[0].iselection)\n\n assert mt == []\n assert c1t == []\n\n new_nrg = nrg.select(flex.bool(102, selection3))\n # only atoms in master are selected\n mt = list(new_nrg[0].master_iselection)\n c1t = list(new_nrg[0].copies[0].iselection)\n c2t = list(new_nrg[0].copies[1].iselection)\n\n assert mt == [0, 1, 3], list(mt)\n assert c1t == [4, 5, 7], list(c1t)\n assert c2t == [8, 9, 11], list(c2t)", "def is_group(id):\n return id.startswith('G')", "def pick_grom_group(group, other, selected):\n\treturn Faction(over(group, selected), over(group + other, selected))", "def __make_group_by_atom(self, group_name, name_list):\r\n pass", "def getGroup(self, resname, atomname):\n group = \"\"\n if resname in self.map:\n resid = self.map[resname]\n if resid.hasAtom(atomname):\n atom = resid.atoms[atomname]\n group = atom.group\n return group", "def __group_alt_atoms__(self, atoms):\n def ordering_key(atoms):\n return atoms[0].alt_id\n alt_ids = coll.defaultdict(list)\n for atom in atoms:\n alt_ids[atom.alt_id].append(atom)\n\n if len(alt_ids) == 1:\n return list(alt_ids.values())\n\n if None in alt_ids:\n common = alt_ids.pop(None)\n for alt_id, specific_atoms in list(alt_ids.items()):\n for common_atom in common:\n copied = copy.deepcopy(common_atom)\n copied.alt_id = alt_id\n specific_atoms.append(copied)\n\n return sorted(list(alt_ids.values()), key=ordering_key)", "def _propose_atoms_in_order(self, atom_group):\n atom_torsions= []\n logp = []\n assert len(atom_group) == len(set(atom_group)), \"There are duplicate atom indices in the list of atom proposal indices\"\n while len(atom_group) > 0:\n #initialise an eligible_torsions_list\n eligible_torsions_list = list()\n\n for atom_index in atom_group:\n\n # Find the shortest path up to length four from the atom in question:\n shortest_paths = nx.algorithms.single_source_shortest_path(self._residue_graph, atom_index, cutoff=4)\n\n # Loop through the destination and path of each path and append to eligible_torsions_list\n # if destination has a position and path[1:3] is a subset of atoms with positions\n for destination, path in shortest_paths.items():\n\n # Check if the path is length 4 (a torsion) and that the destination has a position. Continue if not.\n if len(path) != 4 or destination not in self._atoms_with_positions_set:\n continue\n\n # If the last atom is in atoms with positions, check to see if the others are also.\n # If they are, append the torsion to the list of possible torsions to propose\n if set(path[1:3]).issubset(self._atoms_with_positions_set):\n eligible_torsions_list.append(path)\n\n assert len(eligible_torsions_list) != 0, \"There is a connectivity issue; there are no torsions from which to choose\"\n #now we have to randomly choose a single torsion\n ntorsions = len(eligible_torsions_list)\n random_torsion_index = np.random.choice(range(ntorsions))\n random_torsion = eligible_torsions_list[random_torsion_index]\n\n #append random torsion to the atom_torsions and remove source atom from the atom_group\n chosen_atom_index = random_torsion[0]\n first_old_atom_index = random_torsion[1]\n atom_torsions.append(random_torsion)\n atom_group.remove(chosen_atom_index)\n\n #add atom to atoms with positions and corresponding set\n self._atoms_with_positions_set.add(chosen_atom_index)\n\n #add a bond from the new to the previous torsion atom in the _reference_connectivity_graph\n self._reference_connectivity_graph.add_edge(chosen_atom_index, first_old_atom_index)\n\n #add the log probability of the choice to logp\n logp.append(np.log(1./ntorsions))\n\n # Ensure that logp is not ill-defined\n assert len(logp) == len(atom_torsions), \"There is a mismatch in the size of the atom torsion proposals and the associated logps\"\n\n return atom_torsions, logp", "def groupsChanged(self):\n # Get the list of groups for the present user according to\n # the checklist.\n nglist = []\n for r in self.liststore:\n if (r[1] and (r[0] != self.gidnm)):\n nglist.append(r[0])\n if (gui.getUserGroups(gui.currentUser) != nglist):\n return nglist\n else:\n return None", "def _get_ring_nodes(m, namin=3, namax=9, remove_redudant=T):\n # first search for rings\n sets = []\n for i in range(namin, namax+1):\n #if i in [3,4,5]:\n pat_i = '*~1' + '~*'*(i-2) + '~*1'\n #else:\n # pat_i = '*:1' + ':*'*(i-2) + ':*1'\n Qi = Chem.MolFromSmarts( pat_i )\n for tsi in m.GetSubstructMatches(Qi):\n set_i = set(tsi)\n if set_i not in sets:\n sets.append( set(tsi) )\n if remove_redudant:\n # now remove those rings that are union of smaller rings\n n = len(sets)\n sets_remove = []\n ijs = itl.combinations( list(range(n)), 2 )\n sets_u = []\n for i,j in ijs:\n set_ij = sets[i].union( sets[j] )\n if (set_ij in sets) and (set_ij not in sets_remove):\n sets_remove.append( set_ij )\n sets_u = cim.get_compl(sets, sets_remove)\n else:\n sets_u = sets\n return sets_u", "def buGroup(self):\n return self.xsID[1]", "def group(seq):\n pass # replace with your solution", "def identify_expressed_gRNA_families(gRNAs, mRNAs, init_seq_len):\n gRNA_families = {'family_no':[], 'family_end':[], 'family_id':[]}\n strand_name = {'coding':'', 'template':'t'}\n index = []\n\n gRNAs['gene_mRNA_end'] = gRNAs['mRNA_end']+gRNAs['rel_pos'].apply(lambda x: 0 if x is pd.NA else x)\n gRNAs['gene_mRNA_end'] = gRNAs['gene_mRNA_end'].astype('Int32')\n gRNAs['tmp'] = gRNAs.apply(lambda x: x['cassette_label']+strand_name[x['strand']], axis=1)\n\n for mRNA_name, mRNA in sorted(mRNAs.items()):\n # get all gRNAs with an init_pos for this mRNA\n # nonexpressed gRNAs can be in an editing group if they have a init_seq. this is because\n # they have transcripts in the init_position but not enough to be called expressed\n # gRNAs without an init_seq have no transcripts within the initiation site\n # these are added to a group below\n mask1 = gRNAs['mRNA_name'] == mRNA_name\n mask2 = gRNAs['init_seq'].notnull()\n g = gRNAs[mask1 & mask2]\n\n # positions where the start of expressed gRNAs align to mRNA\n a = np.zeros(mRNA['length']+100)\n i = np.array(g['gene_mRNA_end']-1, dtype=int)\n for ii in range(init_seq_len):\n a[i-ii] = 1\n a = ''.join([str(int(i)) for i in a])\n g_end = 'gene_mRNA_end'\n\n tmp_g = []\n family_no = 0\n\n # find regions where groups of gRNAs anchor to mRNA starting from 3' end of edited mRNA\n for m in re.finditer('1+', a):\n s, e = m.start(0), m.end(0)\n # get all gRNAs that anchor at this region\n anchor_group = g[(g[g_end] >= s) & (g[g_end] <= e)]\n\n if len(anchor_group) == 0:\n continue\n\n # for each cassette position of these gRNAs create a dictionary of cassette position and editing position\n cas_pos = {}\n for _, gRNA in anchor_group.iterrows():\n pos = gRNA['tmp']\n if pos not in cas_pos:\n cas_pos[pos] = gRNA[g_end]\n cas_pos[pos] = max(gRNA[g_end], cas_pos[pos])\n\n # group gRNAs with the same cassette position ordered by editing position\n for pos, end in sorted(cas_pos.items(), key=lambda kv: kv[1]):\n group = anchor_group.query('tmp == @pos')\n index.extend(group.index.values)\n gRNA_families['family_no'].extend([family_no]*len(group))\n gRNA_families['family_end'].extend([end]*len(group))\n gRNA_families['family_id'].extend([f'{mRNA_name}-{pos}-{int(end)}']*len(group))\n tmp_g.append((family_no, end, f'{mRNA_name}-{pos}-{int(end)}'))\n family_no += 1\n\n # gRNAs without an init_seq\n mask2 = gRNAs['init_seq'].isnull()\n unknown = gRNAs[mask1 & mask2]\n # for each unknown gRNA\n for idx, gRNA in unknown.iterrows():\n # search for a group that ends just after mRNA_end of this unknown gRMA\n for f_no, gene_mRNA_end, family_id in sorted(tmp_g, key=itemgetter(1)):\n [g_mRNA_name, g_pos, g_end] = family_id.split('-')\n if g_mRNA_name == mRNA_name and gRNA['mRNA_end']-1 <= gene_mRNA_end and gRNA['cassette_label'] == g_pos:\n index.append(idx)\n gRNA_families['family_no'].append(f_no)\n gRNA_families['family_end'].append(gene_mRNA_end)\n gRNA_families['family_id'].append(f'{family_id}')\n break\n else:\n # no suitable gRNA found, so make a unique family for this non-expressed gRNA \n index.append(idx)\n gRNA_families['family_no'].append(family_no)\n gRNA_families['family_end'].append(gRNA['mRNA_end'])\n gRNA_families['family_id'].append(f'{mRNA_name}-{gRNA[\"cassette_label\"]}-{gRNA[\"mRNA_end\"]}')\n family_no += 1\n\n gRNAs = gRNAs.drop(['tmp'], axis=1)\n gRNAs = gRNAs.join(pd.DataFrame(gRNA_families, index=index))\n gRNAs['family_no'] = gRNAs['family_no'].astype('Int64')\n gRNAs['family_end'] = gRNAs['family_end'].astype('Int64')\n return gRNAs", "def _ExpectationPartOfNonRemovableGroup(\n current_expectation: data_types.Expectation,\n group_to_expectations: Dict[str, Set[data_types.Expectation]],\n expectation_to_group: Dict[data_types.Expectation, str],\n removable_expectations: List[data_types.Expectation]):\n # Since we'll only ever be using this to check for inclusion, use a set\n # for efficiency.\n removable_expectations = set(removable_expectations)\n\n group_name = expectation_to_group.get(current_expectation)\n if not group_name:\n return False\n\n all_expectations_in_group = group_to_expectations[group_name]\n return not (all_expectations_in_group <= removable_expectations)", "def groups(self):\n\n\t\tprint \"completed minimization\"\n\t\tcopy(self.rootdir+'counterions-minimized.gro',self.rootdir+'system.gro')\n\t\tcopy(self.rootdir+'counterions.top',self.rootdir+'system.top')\n\t\tif self.simscale == 'aamd': grouptype = 'standard'\n\t\tif self.simscale == 'cgmd': grouptype = 'cgmd_water'\n\t\tself.grouping(grouptype=grouptype)", "def _reset_group_ids(self, start_id: int):\n\n min_group_id = self.min_group_id()\n if min_group_id is not None:\n add_id = start_id - min_group_id\n self.group_ids = [i + add_id if i is not None else i for i in self.group_ids]\n self.groups = [Skeleton._group_modify_id(group, id_modifier=lambda x: x + add_id) for group in self.groups]", "def check_order(racers):\n racers_orderd = sorted(racers, key=lambda x: (x.group, x.time))\n a = 1\n b = 1\n for r in racers_orderd:\n if r.group == \"ALL\":\n r.rank = str(a)\n a += 1\n else:\n r.rank = str(b)\n b += 1\n return racers_orderd", "def __contains__(self, i):\n if not isinstance(i, FreeGroupElement):\n return False\n group = i.group\n return self == group", "def which_group(list_of_elements):\n if is_Matrix(list_of_elements[-1]):\n R = PolynomialRing(list_of_elements[-1].base_ring(),'z')\n z = R.gen(0)\n G=[(t[0,0]*z+t[0,1])/(t[1,0]*z+t[1,1]) for t in list_of_elements]\n else:\n G = list_of_elements\n\n n = ZZ(len(G))\n\n # invalid input\n if n == 0:\n raise ValueError(\"group must have at least one element\")\n\n # define ground field and ambient function field\n rational_function = G[-1]\n\n if rational_function.parent().is_field():\n K = rational_function.parent()\n R = K.ring()\n else:\n R = rational_function.parent()\n K = R.fraction_field()\n\n z = R.gen(0)\n p = K.characteristic()\n\n # factor n = mp^e; set e = 0 and m = n if p = 0 (Sage sets 0^0 = 1)\n if p > 0:\n m = n.prime_to_m_part(p)\n e = ZZ(n/m).exact_log(p)\n else:\n m = n\n e = 0\n\n # Determine if G is cyclic or dihedral.\n # This determines the maximal cyclic subgroup and the maximal cyclic\n # p-regular subgroup. Algorithm terminates if the order of this subgroup agrees with\n # the order of the group.\n max_reg_cyclic = [1, z, [z]] # initialize order of cyclic p-regular subgroup and generator\n discard = [] # list of elements already considered\n\n for g in G:\n if g not in discard:\n H = [g]\n for i in range(n-1):\n h = g(H[-1])\n H.append(h)\n H = list(set(H))\n if len(H) == n:\n return 'Cyclic of order {0}'.format(n)\n if len(H) > max_reg_cyclic[0] and gcd(len(H), p) != p:\n max_reg_cyclic = [len(H), g, H]\n discard = list(set(discard +H)) # adjoin all new elements to discard\n\n n_reg = max_reg_cyclic[0]\n # Test for dihedral subgroup. A subgroup of index 2 is always normal, so the\n # presence of a cyclic subgroup H of index 2 indicates the group is either\n # H x Z/2Z or dihedral. The former occurs only if H has order 1 or 2, both of\n # which are dihedral.\n if 2*n_reg == n:\n for g in G:\n if g not in max_reg_cyclic[2]:\n return 'Dihedral of order {0}'.format(n)\n # Check the p-irregular cases. There is overlap in these cases when p^e = 2,\n # which is dihedral and so already dealt with above. By the classification theorem,\n # these are either p-semi-elementary, PGL(2,q), PSL(2,q), or A_5 when p=3. The latter\n # case is already covered by the remaining sporadic cases below.\n if e > 0:\n if n_reg == m: # p-semi-elementary\n return '{0}-semi-elementary of order {1}'.format(p, n)\n if n_reg == m / (p**e - 1) and m == p**(2*e) - 1: # PGL(2)\n return 'PGL(2,{0})'.format(p**e)\n if n_reg == m / (p**e - 1) and m == (1/2)*(p**(2*e) - 1): # PSL(2)\n return 'PSL(2,{0})'.format(p**e)\n\n # Treat sporadic cases\n if n == 12:\n return ['A_4']\n elif n == 24:\n return ['S_4']\n else:\n return ['A_5']", "def last_group(self):\n return BaseLayer.groups-1", "def get_group_keys(self):\r\n if len(self.conflicting_exclusives) == 0:\r\n return [\"<none>\"]\r\n else:\r\n return self.key_to_targets.keys()", "def comm_group(self):\n return self._gcomm", "def slotAdd(self):\n seq = Group.Sequencer()\n groups = seq.getGlobalGroups()\n i = 1\n while i != -1:\n if not GROUP_PREFIX+str(i) in groups:\n seq.slotAddGlobalGroup(GROUP_PREFIX+str(i))\n i = -1\n else:\n i += 1", "def getRC(self, group):\n\n if self.arrangement is None:\n raise ValueError(\"specifying a group requires an arrangment dictionary\")\n # look for the group label in the arrangement dicts\n for c, colname in enumerate(self.arrangement.keys()):\n if group in self.arrangement[colname]:\n # print ('column name, column: ', colname, self.arrangement[colname])\n # print ('group: ', group)\n r = self.arrangement[colname].index(\n group\n ) # get the row position this way\n return self.axarr[r, c]\n print(\"Group {:s} not in the arrangement\".format(group))\n return None\n\n # sizer = {'A': {'pos': [0.08, 0.22, 0.50, 0.4]}, 'B1': {'pos': [0.40, 0.25, 0.60, 0.3]}, 'B2': {'pos': [0.40, 0.25, 0.5, 0.1]},\n # 'C1': {'pos': [0.72, 0.25, 0.60, 0.3]}, 'C2': {'pos': [0.72, 0.25, 0.5, 0.1]},\n # 'D': {'pos': [0.08, 0.25, 0.1, 0.3]}, 'E': {'pos': [0.40, 0.25, 0.1, 0.3]}, 'F': {'pos': [0.72, 0.25, 0.1, 0.3]},\n # }", "def what_is(self, _id):\n for g in self.groups:\n if _id in self.h_group_ids[g]:\n return g\n return None", "def get_group_label(group):\n indices = [a.index for a in group.atoms]\n names = [a.name for a in group.atoms]\n label = []\n for i in range(len(indices)):\n label.append('%d/%s' % (indices[i], names[i]))\n return(' '.join(label))", "def group_group_collide(missiles, group):\n remove1 = set()\n remove2 = set()\n collisions = 0\n for missile in missiles:\n for obj in group:\n if missile.collide(obj):\n collisions += 1\n missile.dead = True\n obj.dead = True\n obj.after_death()\n remove1.add(missile)\n remove2.add(obj)\n missiles.difference_update(remove1)\n group.difference_update(remove2)\n return (missiles, group, collisions)", "def get_umi_groups(bam_file, edit_distance):\n all_umis = set([])\n with pysam.AlignmentFile(bam_file, \"rb\", check_sq=False) as bam_iter:\n for rec in bam_iter:\n all_umis.add(rec.get_tag(\"RX\"))\n print(len(all_umis))\n grs = []\n for i, cur_umi in enumerate(sorted(all_umis)):\n if i % 1000 == 0:\n print(i, len(grs))\n if edit_distance == 0:\n grs.append([cur_umi])\n else:\n for g in grs:\n if any(Levenshtein.distance(cur_umi, w) <= edit_distance for w in g):\n g.append(cur_umi)\n break\n else:\n grs.append([cur_umi])\n out = {}\n for cur_gr in grs:\n base = Levenshtein.median(cur_gr)\n for gr in cur_gr:\n out[gr] = base\n return out", "def select_point_from_group(group, pts):\n\n p_in_group = []\n\n for i in range(len(group)):\n p_in_group.append(pts[group[i]])\n \n return p_in_group", "def select_point_from_group(group, pts):\n\n p_in_group = []\n\n for i in range(len(group)):\n p_in_group.append(pts[group[i]])\n \n return p_in_group", "def test_get_ancestors_for_device_groups(self):\n pass", "def find_best_group(mdl_array, lpx, tcost_bar, contract_size, max_group_pos=20, exclude_gp=None, clip_maxpos=True, best_pos=None, best_shp=0):\n ndays, n = lpx.shape\n mcnt = len(mdl_array)\n best_gp = None\n if best_pos is None:\n best_pos = np.zeros((ndays,n))\n\n best_shp0 = best_shp\n for m, mdl_dict in enumerate(mdl_array):\n pick_array = mdl_dict['pick']\n for p, pick in enumerate(pick_array):\n if exclude_gp is not None and (m,p) in exclude_gp:\n continue\n (k0,k1) = pick[0]\n pos0 = best_pos.copy()\n pos0[:,k0:k1+1]+=mdl_dict['pos_scale'][:,k0:k1+1]\n pos00, pnl0, shp0 = pnl_from_pos_tcost(lpx, np.r_[pos0.flatten(),0], tcost_bar, max_group_pos, contract_size, clip_maxpos=clip_maxpos)\n\n # recover the original k0k1 position. i.e. from model_dict['pos_scale']\n pos0-=best_pos\n if shp0>best_shp0:\n print(' best shp update - ', m, p, '(%d,%d) shp:%f pnl:%f'%(k0,k1,shp0,np.sum(pnl0)))\n best_shp0 = shp0\n best_gp0 = (m,p)\n best_pos0 = pos0.copy()\n best_pnl = pnl0.copy()\n dshp = best_shp0-best_shp\n if dshp == 0:\n print('Nothing beats the given, fold')\n return None, None, None, None\n\n print('PICK: ', best_gp0, best_shp0, np.sum(best_pnl))\n return best_gp0, best_pos0, best_pnl, best_shp0", "def make_set(g, nodes):\n s = Set()\n names = nodes['names']\n for ii,name in enumerate(names):\n \"\"\" \n We will assume node is entirely contained\n in group if they have one atom in common\n \"\"\" \n atoms = mdn.dic2list(nodes[name]['atoms'])\n atom0 = atoms[0]\n if (atom0 in mdn.dic2list(g['atoms'])):\n s.add(ii)\n return s", "def _keys_in_groupby(move):\n return (move.picking_id, move.product_id.responsible_id)", "def has_rank_improved(self, previous_matchday_group_standing):\n return \\\n self.rank < previous_matchday_group_standing.rank and \\\n self.played_games > previous_matchday_group_standing.played_games", "def find_frame_calib_groups(self, row):\n return self.calib_bitmask.flagged_bits(self['calibbit'][row])", "def group_is_surrounded(group, board):\n if group_adjacents(group, board, filter_by=\"None\"):\n return False\n else:\n return True", "def _confirm_group(cls):\n if cls.GROUP_NAME in bpy.data.objects:\n return\n #Backup current selection\n selection = ObjectSelection()\n #Create empty object\n bpy.ops.object.empty_add()\n new_group = bpy.context.selected_objects[0]\n new_group.name = cls.GROUP_NAME\n new_group.hide = True\n #Restore selection\n selection.restore()", "def test_get_ancestors_for_device_group(self):\n pass", "def update_from(self, grp_names):\n import GEOM, SMESH\n mesh_types = {\n GEOM.VERTEX : SMESH.NODE,\n GEOM.EDGE : SMESH.EDGE,\n GEOM.WIRE : SMESH.EDGE,\n GEOM.FACE : SMESH.FACE,\n GEOM.SHELL : SMESH.FACE,\n GEOM.SOLID : SMESH.VOLUME,\n GEOM.COMPSOLID : SMESH.VOLUME,\n }\n smesh = self.get_smesh()\n\n\n smesh_grps_MA = []\n smesh_grps_NO = []\n for grp in smesh.GetGroups() :\n if str(grp.GetType()) == 'NODE' :\n smesh_grps_NO.append(grp.GetName())\n else :\n smesh_grps_MA.append(grp.GetName())\n\n print smesh_grps_MA,smesh_grps_NO\n done = False\n for geom in self.give_geom().get_children():\n grp_name = geom.read_name()\n #if grp_name in smesh_grps:\n # continue\n #Modif Fournier\n print grp_name\n if grp_name in grp_names[0]:\n if grp_name in smesh_grps_MA:\n pass\n else :\n mesh_type = mesh_types.get(geom.get_shape_type())\n if mesh_type:\n #smesh.CreateGroup(mesh_type, grp_name)\n smesh.CreateGroupFromGEOM(mesh_type,grp_name,geom.get_sgeom())\n done = True\n if grp_name in grp_names[1]:\n if grp_name in smesh_grps_NO:\n continue\n #smesh.CreateGroup(SMESH.NODE,grp_name)\n smesh.CreateGroupFromGEOM(SMESH.NODE,grp_name,geom.get_sgeom())\n done = True\n return done", "def pid_gid(p):\n return (p['iOrder'], p['iGroup'])", "def group_top(group: pygame.sprite.Group, rect: pygame.Rect):\r\n same_centerx_lower_centery = filter(lambda sprt: sprt.rect.centerx == rect.centerx\r\n and sprt.rect.centery >= rect.centery, group)\r\n return min(map(lambda sprt: sprt.rect.top, same_centerx_lower_centery), default=RESOLUTION[1])", "def space_group_irreps(self, *k: Array) -> Array:\n k = _ensure_iterable(k)\n # Wave vectors\n big_star_Cart = np.tensordot(self.point_group_.matrices(), k, axes=1)\n big_star = self.lattice.to_reciprocal_lattice(big_star_Cart) * (\n 2 * pi / self.lattice.extent\n )\n # Little-group-irrep factors\n # Conjugacy_table[g,p] lists p^{-1}gp, so point_group_factors[i,:,p]\n # of irrep #i for the little group of p(k) is the equivalent\n # Phase factor for non-symmorphic symmetries is exp(-i w_g . p(k))\n point_group_factors = self._little_group_irreps(k, divide=True)[\n :, self.point_group_.conjugacy_table\n ] * np.exp(\n -1j\n * np.tensordot(\n self.point_group_.translations(), big_star_Cart, axes=(-1, -1)\n )\n )\n # Translational factors\n trans_factors = []\n for axis in range(self.lattice.ndim):\n n_trans = self.lattice.extent[axis] if self.lattice.pbc[axis] else 1\n factors = np.exp(-1j * np.outer(np.arange(n_trans), big_star[:, axis]))\n shape = (\n [1] * axis\n + [n_trans]\n + [1] * (self.lattice.ndim - 1 - axis)\n + [len(self.point_group_)]\n )\n trans_factors.append(factors.reshape(shape))\n trans_factors = reduce(np.multiply, trans_factors).reshape(\n -1, len(self.point_group_)\n )\n\n # Multiply the factors together and sum over the \"p\" PGSymmetry axis\n # Translations are more major than point group operations\n result = np.einsum(\n \"igp, tp -> itg\", point_group_factors, trans_factors\n ).reshape(point_group_factors.shape[0], -1)\n return prune_zeros(result)", "def get_conflicting_element(self, new_element):\n\n for element in self._list:\n \n if (element.no == new_element.no) and (element.grp == new_element.grp):\n \n return element\n \n return None", "def sort_groups(x, group_order):\n for i, g in enumerate(group_order):\n if g == x:\n return i + 1", "def get_group_formula(composition):\n new_comp_dict = defaultdict(float)\n\n for e in composition.elements:\n new_comp_dict[e.group] += composition[e]\n\n new_comp = Composition(new_comp_dict).reduced_composition\n\n form = \"\"\n sorted_elements = sorted(new_comp.elements, key=lambda x: x.Z)\n\n for x in sorted_elements:\n amt = new_comp[x] if new_comp[x] != int(new_comp[x]) else int(new_comp[x])\n l = x.Z\n form += \"({}){}-\".format(l, amt)\n\n return form[0:-1]", "def get_nested_groups_names(group):\n return (\n criterion.findtext(\"value\")\n for criterion in group.findall(\"criteria/criterion\") if\n criterion.findtext(\"name\") in (\"Computer Group\", \"Mobile Device Group\")\n and criterion.findtext(\"search_type\") == \"member of\")", "def has_rank_changed(self, previous_matchday_group_standing):\n return \\\n self.rank != previous_matchday_group_standing.rank and \\\n self.played_games > previous_matchday_group_standing.played_games", "def moc_group(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"moc_group\")", "def moc_group(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"moc_group\")", "def group_serial(self):\n return self.structure.group_serial[self.mask]", "def same_group(self,i,j):\n if self.group_number(i) == self.group_number(j):\n return True\n else:\n return False", "def give_group_key(self, mesh, grp_name):\n grps = self._grps\n for key in grps:\n if grp_name in grps[key].find_groups(mesh):\n return key\n else:\n mess = \"Group '%s' not found on the mesh '%s'\"\n raise ValueError(mess % (grp_name, mesh.read_name()))", "def build_group(similarities,group1,group2,Paire,NBmatch,count):\n groupBuilt=[]\n event=[]\n #on ajoute la liste des evenements\n for x in count :\n event.append(x)\n groupBuilt.append(event)\n groupBuilt.append(NBmatch)\n #on ajoute toutes les paires de la premiere chronique\n for p in group1[2:] :\n groupBuilt.append(p)\n #on enleve les paires communes aux deux de la deuxieme chronique \n for p in similarities:\n group2.pop(group2.index(p))\n #on ajoute les restantes : celles uniques a la deuxieme chronique\n for p in group2[2:] :\n groupBuilt.append(p)\n #on ajoute la paire qui les relie\n groupBuilt.append(Paire)\n return groupBuilt", "def groupTrajectories(self, dt = 100)->None:#4 * 30)->None:\r\n for i, p1 in enumerate(self._analyzer.activePeople):\r\n for j, p2 in enumerate(self._analyzer.activePeople):\r\n if (i > j) and (p1 not in p2.inGroupWith):\r\n if ((len(p1.coordinates) >= dt) and (len(p2.coordinates) >= dt)):\r\n in_group = True\r\n for k in range(dt):\r\n if ((p1.coordinates[-k] != None) and (p2.coordinates[-k] != None) and (p1.coordinates[-k].DistanceFrom(p2.coordinates[-k]) > self._minDist)):\r\n in_group = False\r\n if in_group:\r\n p1.inGroupWith.append(p2)\r\n p2.inGroupWith.append(p1)", "def at_least_a_group(exp, mesh, mod):\n is_valid = True\n if not exp.find_groups(mesh):\n mess = \"At least a group needs to be defined on the selected object\"\n mod.launch(GC.ERROR, mess)\n is_valid = False\n return is_valid", "def participants_group_name(self):\n return self.short_name+\"_participants\"", "def test_group(self):\n # leave out particle 0\n group = hoomd.group.tags(1,2)\n\n # compute forces\n f = azplugins.restrain.plane(group=group, point=(0,0,0), normal=(1,0,0), k=2.0)\n hoomd.run(1)\n np.testing.assert_array_almost_equal(f.forces[0].force, ( 0.,0,0))\n np.testing.assert_array_almost_equal(f.forces[1].force, ( 2.,0,0))\n np.testing.assert_array_almost_equal(f.forces[2].force, ( 6.,0,0))\n self.assertAlmostEqual(f.forces[0].energy, 0.)\n self.assertAlmostEqual(f.forces[1].energy, 1.)\n self.assertAlmostEqual(f.forces[2].energy, 9.)\n np.testing.assert_array_almost_equal(f.forces[0].virial, (0,0,0,0,0,0))\n np.testing.assert_array_almost_equal(f.forces[1].virial, (-2.,0,4.,0,0,0))\n np.testing.assert_array_almost_equal(f.forces[2].virial, (12.,0,0,0,0,0))", "def is_potential_group(self, player: int, row: int, col: int, row_diff: int, col_diff: int):\n opponent = 1 - player\n for _ in range(4):\n square = Square(row, col)\n if not self.is_valid(square):\n return False\n if self.state[opponent][row][col]:\n # If there is a token that belongs to the opponent in this group,\n # then this group is not a potential group that belongs to the given player.\n return False\n row, col = row + row_diff, col + col_diff\n return True", "def test_3():\n h = iotbx.pdb.input(source_info=None, lines=test_pdb_3).construct_hierarchy()\n asc = h.atom_selection_cache()\n ncs_inp = iotbx.ncs.input(\n hierarchy=h,\n params=ncs_pars.ncs_search)\n ncs_groups = ncs_inp.get_ncs_restraints_group_list()\n assert len(ncs_groups) == 1\n # group 1\n assert ncs_groups[0].master_iselection.all_eq(\n asc.selection(string = \"chain A\").iselection())\n g1_c = ncs_groups[0].copies\n assert len(g1_c)==1\n assert g1_c[0].iselection.all_eq(\n asc.selection(string = \"chain B\").iselection())", "def get_parent_assigned_group(context):\n # Are we in add form ?\n if not context.REQUEST.get('PATH_INFO', '/').split('/')[-1].startswith('++add++'):\n return None\n if base_hasattr(context, 'assigned_group') and context.assigned_group:\n return context.assigned_group\n return None", "def slotGroupEdit(self):\n dialog = GroupDialog(self)\n if dialog.exec_loop() == QDialog.Accepted:\n if dialog.group_id != None:\n # set group\n self.sampleGroup.globalGroupId = dialog.group_id\n self.groupLabel.setText(dialog.group_id)\n else:\n # ungroup\n self.sampleGroup.globalGroupId = None\n self.groupLabel.setText('Not\\nGrouped')\n self.emit(PYSIGNAL('groupChanged'), (self,))", "def test_4():\n h = iotbx.pdb.input(source_info=None, lines=test_pdb_4).construct_hierarchy()\n asc = h.atom_selection_cache()\n ncs_inp = iotbx.ncs.input(\n hierarchy=h,\n params=ncs_pars.ncs_search)\n ncs_groups = ncs_inp.get_ncs_restraints_group_list()\n assert len(ncs_groups) == 1\n # group 1\n assert ncs_groups[0].master_iselection.all_eq(\n asc.selection(string = \"chain A\").iselection())\n g1_c = ncs_groups[0].copies\n assert len(g1_c)==1\n assert g1_c[0].iselection.all_eq(\n asc.selection(string = \"chain B\").iselection())", "def get_group(self):\n\t\treturn self.variables.get('group')", "def GetAncestorGroups(self):\n return [node for node in self.GetAncestors() if node.IsGroup()]", "def restricted_automorphism_group(self):\n if '_restricted_automorphism_group' in self.__dict__:\n return self._restricted_automorphism_group\n\n from sage.groups.perm_gps.permgroup import PermutationGroup\n\n if self.field() is QQ:\n def rational_approximation(c):\n return c\n\n else: # self.field() is RDF\n c_list = []\n def rational_approximation(c):\n # Implementation detail: Return unique integer if two\n # c-values are the same up to machine precision. But\n # you can think of it as a uniquely-chosen rational\n # approximation.\n for i,x in enumerate(c_list):\n if self._is_zero(x-c):\n return i\n c_list.append(c)\n return len(c_list)-1\n \n # The algorithm identifies the restricted automorphism group\n # with the automorphism group of a edge-colored graph. The\n # nodes of the graph are the V-representation objects. If all\n # V-representation objects are vertices, the edges are\n # labelled by numbers (to be computed below). Roughly\n # speaking, the edge label is the inner product of the\n # coordinate vectors with some orthogonalization thrown in\n # [BSS].\n def edge_label_compact(i,j,c_ij):\n return c_ij\n\n # In the non-compact case we also label the edges by the type\n # of the V-representation object. This ensures that vertices,\n # rays, and lines are only permuted amongst themselves.\n def edge_label_noncompact(i,j,c_ij):\n return (self.Vrepresentation(i).type(), c_ij, self.Vrepresentation(j).type())\n\n if self.is_compact():\n edge_label = edge_label_compact\n else:\n edge_label = edge_label_noncompact\n\n # good coordinates for the V-representation objects\n v_list = []\n for v in self.Vrepresentation():\n v_coords = list(self._affine_coordinates(v))\n if v.is_vertex():\n v_coords = [1]+v_coords\n else:\n v_coords = [0]+v_coords\n v_list.append(vector(v_coords))\n\n # Finally, construct the graph\n Qinv = sum( v.column() * v.row() for v in v_list ).inverse()\n\n # Was set to sparse = False, but there is a problem with Graph\n # backends. It should probably be set back to sparse = False as soon as\n # the backends are fixed.\n G = Graph(sparse=True)\n for i in range(0,len(v_list)):\n for j in range(i+1,len(v_list)):\n v_i = v_list[i]\n v_j = v_list[j]\n c_ij = rational_approximation( v_i * Qinv * v_j )\n G.add_edge(i,j, edge_label(i,j,c_ij))\n\n group, node_dict = G.automorphism_group(edge_labels=True, translation=True)\n\n # Relabel the permutation group\n perm_to_vertex = dict( (i,v+1) for v,i in node_dict.items() )\n group = PermutationGroup([ [ tuple([ perm_to_vertex[i] for i in cycle ])\n for cycle in generator.cycle_tuples() ]\n for generator in group.gens() ])\n\n self._restricted_automorphism_group = group\n return group", "def _local_search(self):\n\n # Set occupancies of rigid cluster and its direct neighboring atoms to\n # 1 for clash detection and MIQP\n selection = self.ligand._selection\n self.ligand._active[selection] = True\n center = self.ligand.coor[self._cluster].mean(axis=0)\n new_coor_set = []\n new_bs = []\n for coor, b in zip(self._coor_set, self._bs):\n self.ligand._coor[selection] = coor\n self.ligand._b[selection] = b\n rotator = GlobalRotator(self.ligand, center=center)\n for rotmat in RotationSets.get_local_set():\n rotator(rotmat)\n translator = Translator(self.ligand)\n iterator = itertools.product(\n *[np.arange(*trans) for trans in self._trans_box]\n )\n for translation in iterator:\n translator(translation)\n new_coor = self.ligand.coor\n if self.options.remove_conformers_below_cutoff:\n values = self.xmap.interpolate(new_coor)\n mask = self.ligand.e != \"H\"\n if np.min(values[mask]) < self.options.density_cutoff:\n continue\n if self.options.external_clash:\n if not self._cd() and not self.ligand.clashes():\n if new_coor_set:\n delta = np.array(new_coor_set) - np.array(new_coor)\n if (\n np.sqrt(\n min(np.square((delta)).sum(axis=2).sum(axis=1))\n )\n >= self.options.rmsd_cutoff\n ):\n new_coor_set.append(new_coor)\n new_bs.append(b)\n else:\n new_coor_set.append(new_coor)\n new_bs.append(b)\n elif not self.ligand.clashes():\n if new_coor_set:\n delta = np.array(new_coor_set) - np.array(new_coor)\n if (\n np.sqrt(min(np.square((delta)).sum(axis=2).sum(axis=1)))\n >= self.options.rmsd_cutoff\n ):\n new_coor_set.append(new_coor)\n new_bs.append(b)\n else:\n new_coor_set.append(new_coor)\n new_bs.append(b)\n self.ligand._active[self.ligand._selection] = False\n selection = self.ligand._selection[self._cluster]\n self.ligand._active[selection] = True\n for atom in self._cluster:\n atom_sel = self.ligand._selection[self.ligand.connectivity[atom]]\n self.ligand._active[atom_sel] = True\n self.conformer = self.ligand\n self._coor_set = new_coor_set\n self._bs = new_bs\n if len(self._coor_set) < 1:\n logger.warning(\n f\"{self.ligand.resn[0]}: \"\n f\"Local search {self._cluster_index}: {len(self._coor_set)} conformers\"\n )\n return\n\n # QP score conformer occupancy\n logger.debug(\"Converting densities.\")\n self._convert()\n self._solve_qp()\n logger.debug(\"Updating conformers\")\n self._update_conformers()\n if self.options.write_intermediate_conformers:\n self._write_intermediate_conformers(prefix=\"localsearch_ligand_qp\")\n if len(self._coor_set) < 1:\n logger.warning(\n f\"{self.ligand.resn[0]}: \"\n f\"Local search QP {self._cluster_index}: {len(self._coor_set)} conformers\"\n )\n return\n\n # MIQP score conformer occupancy\n self._convert()\n self._solve_miqp(\n threshold=self.options.threshold, cardinality=self.options.cardinality\n )\n self._update_conformers()\n if self.options.write_intermediate_conformers:\n self._write_intermediate_conformers(prefix=\"localsearch_ligand_miqp\")", "def identify_coref_chains(dialog):\n\n for r_id, datum in enumerate(dialog['dialog']):\n label = datum['template']\n if label in gvars.METAINFO['independent_questions']:\n dialog['graph']['history'][r_id + 1]['dependence'] = None\n continue\n\n if (label == 'exist-attribute-group' or label == 'count-attribute-group' or\n label == 'count-all-group'):\n dialog['graph']['history'][r_id + 1]['dependence'] = r_id - 1\n continue\n\n if 'imm' in label:\n dialog['graph']['history'][r_id + 1]['dependence'] = r_id - 1\n continue\n\n if 'early' in label:\n # Go over previous history.\n cur_history = dialog['graph']['history'][r_id + 1]\n assert 'focus_id' in cur_history and 'focus_desc' in cur_history,\\\n 'More focus objects than one, no focus objects!'\n focus_id = cur_history['focus_id']\n for attr in gvars.METAINFO['attributes']:\n if attr in cur_history['focus_desc']: break\n\n history = dialog['graph']['history'][:r_id + 1]\n for hist_id, hist_datum in enumerate(history):\n for obj in hist_datum['objects']:\n if obj['id'] == focus_id and attr in obj:\n dialog['graph']['history'][r_id + 1]['dependence'] = hist_id - 1\n break\n return dialog", "def searchOrigin(concForm, preSet, formulSet):\r\n lis = []\r\n for i in range(len(preSet)):\r\n if preSet[i] == concForm:\r\n lis.append(i + 1)\r\n liss = preSet[i].split()\r\n liss.append(\"Assumed\")\r\n lis.append(liss)\r\n return [1, lis]\r\n for subset in formulSet:\r\n if concForm == subset[1][0]:\r\n lis.append(subset[0])\r\n lis.append(subset[1])\r\n return [0, lis]\r\n return [-1]", "def find_pacgums(self):\n for row in range(len(self.structure)):\n for col in range(len(self.structure[row])):\n if self.structure[row][col] == 'n': \n self.pacgums.append((col, row))", "def get_group_label(i):\n if i//4 == 0:\n return \"buildUpPlay\"\n elif i//4 == 1:\n return \"chanceCreation\"\n elif i//4 == 2:\n return \"defence\"", "def maak_group_set(df_in, name_group_column):\n group_nummers = set(df_in[name_group_column])\n return group_nummers", "def set_dihedral_force_group(system, g=2):\n print('Scanning forces:')\n for f in system.getForces():\n if isinstance(f, simtk.openmm.openmm.PeriodicTorsionForce):\n print('Found the torsions - setting group to 2')\n f.setForceGroup(2)\n print(f.getForceGroup(), f.__class__)", "def set_dihedral_force_group(system, g=2):\n print('Scanning forces:')\n for f in system.getForces():\n if isinstance(f, simtk.openmm.openmm.PeriodicTorsionForce):\n print('Found the torsions - setting group to 2')\n f.setForceGroup(2)\n print(f.getForceGroup(), f.__class__)", "def remove_from_group(self, org, contact, group):\n pass", "def group_diff(options, db):\n nested_rvals = []\n for ip in options.gmp:\n nested_rvals.append(get_ip_parents(ip, db))\n # get just the list of groups, stripping out the networks.\n group1 = [x[0] for x in nested_rvals[0]]\n group2 = [x[0] for x in nested_rvals[1]]\n common = sorted(list(set(group1) & set(group2)))\n diff1 = sorted(list(set(group1) - set(group2)))\n diff2 = sorted(list(set(group2) - set(group1)))\n return common, diff1, diff2", "def enter_group():\n logline(\"\\\\\", indent=False)\n global group_length\n group_length = group_length + 1", "def find_groups_from_ctypes(self, mesh, ctypes):\n raise NotImplementedError", "def proximity_placement_group_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"proximity_placement_group_id\")", "def find_image(grouped):\n for _i in grouped:\n _i[0] = _i[0] * 10 #increases value of red components\n if _i[0] > 225:\n _i[0] = 225\n _i[1] = _i[0] #sets green components equal to red\n _i[2] = _i[0] #sets blue components equal to red\n return grouped", "def finalize_groups(self):\n merged_rdd = self.merged_rdd\n group_merge_map = self.group_merge_map\n sc = self.sc\n sqc = pyspark.sql.SQLContext(sc)\n\n nPartitions = sc.defaultParallelism*5\n\n nMinMembers = self.nMinMembers\n\n # we need to use the group merge map used in a previous step to see which \n # groups are actually spread across domain boundaries\n group_merge_map = self.group_merge_map\n \n\n def count_groups_local(i, iterator, nMinMembers):\n # the first element is the group mapping dictionary\n dist_groups = set(iterator.next().values())\n print len(dist_groups)\n print 'sizeof set in ', i, ' ', asizeof.asizeof(dist_groups)\n p_arrs = np.concatenate([p_arr for p_arr in iterator])\n gids, counts = np.unique(p_arrs['iGroup'], return_counts=True)\n print 'number of groups in partition ', i, ' = ', len(gids)\n return ((g,cnt) for (g,cnt) in zip(gids, counts) if (g in dist_groups) or (cnt >= nMinMembers))\n \n\n def filter_groups_by_map(rdd, not_in_map=False):\n def perform_filter(iterator, exclusive):\n # the first element after the union is the group mapping\n # here we have already remapped the groups so we need to just take the final group IDs\n dist_groups = set(iterator.next().values())\n return ((gid, count) for (gid,count) in iterator if (gid in dist_groups)^exclusive)\n return rdd.mapPartitions(lambda i: perform_filter(i,not_in_map), preservesPartitioning=True)\n\n def get_local_groups(rdd, map_rdd): \n return filter_groups_by_map(map_rdd + rdd, not_in_map=True)\n\n def get_distributed_groups(rdd, map_rdd):\n return filter_groups_by_map(map_rdd + rdd, not_in_map=False)\n\n # first, get rid of ghost particles\n no_ghosts_rdd = self.filter_ghosts(merged_rdd)\n\n # count up the number of particles in each group in each partition\n group_counts = (group_merge_map + no_ghosts_rdd).mapPartitionsWithIndex(lambda index,i: count_groups_local(index, i, nMinMembers), True).cache()\n\n # merge the groups that reside in multiple domains\n distributed_groups = get_distributed_groups(group_counts, group_merge_map)\n\n merge_group_counts = (distributed_groups.reduceByKey(lambda a,b: a+b, nPartitions)\n .filter(lambda (g,cnt): cnt>=nMinMembers)).cache()\n\n if self.DEBUG:\n print 'spark_fof DEBUG: non-merge groups = %d merge groups = %d'%(group_counts.count(), merge_group_counts.count()) \n\n # combine the group counts\n groups_rdd = (get_local_groups(group_counts, group_merge_map) + merge_group_counts).setName('groups_rdd')\n total_group_counts = groups_rdd.cache().count()\n \n print 'Total number of groups: ', total_group_counts\n\n self.total_group_counts = total_group_counts\n\n return groups_rdd", "def _split_groups(self, ge_vec, mask):\n functional_grp = ge_vec[mask]\n diff = set(ge_vec) - set(functional_grp)\n control_grp = np.array(list(diff))\n return control_grp, functional_grp", "def id(self):\n return self._group", "def score_group_conflicts(self):\n group_conflict_score = 0\n multiplier = 4\n \n for day_num in range(self.num_days):\n \n current_day = self.days[ day_num ]\n num_conflicts = 0\n \n for groups in current_day.values():\n for group in groups:\n if not group.available( day_num ):\n num_conflicts += 1\n \n group_conflict_score += multiplier * ( num_conflicts ** 2 )\n \n self.group_conflict_score = group_conflict_score\n return self.group_conflict_score", "def find_bond_groups(mol):\n rot_atom_pairs = mol.GetSubstructMatches(RotatableBondSmarts)\n rot_bond_set = set([mol.GetBondBetweenAtoms(*ap).GetIdx() for ap in rot_atom_pairs])\n rot_bond_groups = []\n while (rot_bond_set):\n i = rot_bond_set.pop()\n connected_bond_set = set([i])\n stack = [i]\n while (stack):\n i = stack.pop()\n b = mol.GetBondWithIdx(i)\n bonds = []\n for a in (b.GetBeginAtom(), b.GetEndAtom()):\n bonds.extend([b.GetIdx() for b in a.GetBonds() if (\n (b.GetIdx() in rot_bond_set) and (not (b.GetIdx() in connected_bond_set)))])\n connected_bond_set.update(bonds)\n stack.extend(bonds)\n rot_bond_set.difference_update(connected_bond_set)\n rot_bond_groups.append(tuple(connected_bond_set))\n return tuple(sorted(rot_bond_groups, reverse = True, key = lambda x: len(x)))", "def find_nb(self, ox1, atoms, r1, r2):\n nb_check = [{}, \"\"]\n for k in atoms:\n dox = Vector.length(ox1[1][1] - atoms[k][1])\n if (k != ox1[0] and ox1[1][2] != atoms[k][2] and\n dox <= (r1 + r2)):\n nb_check[0][k] = atoms[k]\n if dox <= r2:\n nb_check[1] = ''.join([nb_check[1], atoms[k][0]])\n return nb_check", "def db(r):\n ey = gQ(r)\n gR = b.tcl(\n 'global no_gizmo; set no_gizmo 1; in %s {%s -New} ; return [value [stack 0].name]' % (ey.fullName(), r.Class()))\n group = b.toNode('.'.join((ey.fullName(), gR)))\n group.setSelected(False)\n if ew(r):\n for node, gS in ew(r).iteritems():\n for c in gS:\n node.setInput(c, group)\n\n for c in range(r.inputs()):\n group.setInput(c, r.input(c))\n\n group.setXYpos(r.xpos(), r.ypos())\n group.readKnobs(r.writeKnobs(b.TO_SCRIPT))\n b.delete(r)\n return group", "def get_entry(self, name):\n for grp_name in self.h5:\n if grp_name == name:\n grp = self.h5[grp_name]\n if isinstance(grp, h5py.Group) and \\\n (\"start_time\" in grp) and \\\n self.get_attr(grp, \"NX_class\") == \"NXentry\":\n return grp", "def test_Mesh_ElementMat_2elem_2tgl_add_tgl_group(self):\n\n self.mesh.add_element(np.array([0, 1]), \"Segment2\")\n self.mesh.add_element(np.array([1, 2]), \"Segment2\")\n self.mesh.add_element(np.array([1, 2, 3]), \"Triangle3\")\n self.mesh.add_element(np.array([2, 3, 0]), \"Triangle3\") # already exist\n self.mesh.add_element(np.array([2, 0, 1]), \"Triangle3\", group=3)\n\n solution = np.array([-1, -1, 3], dtype=int)\n result = self.mesh.element[\"Triangle3\"].group\n testA = (result == solution).all()\n msg = \"Wrong result: returned \" + str(result) + \", expected: \" + str(solution)\n self.assertTrue(testA, msg=msg)" ]
[ "0.53478336", "0.526648", "0.52470756", "0.5181944", "0.51471263", "0.51189107", "0.50924", "0.50924", "0.5078987", "0.5077038", "0.505254", "0.4997841", "0.4995758", "0.49540886", "0.49455371", "0.49252045", "0.49165574", "0.48953816", "0.48634696", "0.48486274", "0.48388016", "0.4794436", "0.47861168", "0.47718892", "0.47687843", "0.47655007", "0.47651014", "0.47638258", "0.47628638", "0.47531822", "0.4748769", "0.4746012", "0.4734793", "0.47267872", "0.47224554", "0.47141826", "0.47089377", "0.46795392", "0.46757415", "0.46685183", "0.46685183", "0.46504658", "0.46499202", "0.46397948", "0.46264285", "0.46259174", "0.4622436", "0.46164498", "0.46075305", "0.46064425", "0.46027002", "0.46004403", "0.45982167", "0.45800835", "0.4578387", "0.45755225", "0.45692766", "0.45676863", "0.45644173", "0.45445898", "0.45445898", "0.45418614", "0.45374092", "0.4519436", "0.45176354", "0.4514693", "0.45123816", "0.4507186", "0.45058596", "0.45055503", "0.44974375", "0.44962022", "0.44916958", "0.44896263", "0.4487464", "0.44800237", "0.44773316", "0.44728133", "0.44643533", "0.4457409", "0.44541144", "0.445254", "0.44461724", "0.44439995", "0.44439995", "0.44417906", "0.44375426", "0.44218686", "0.44191763", "0.44187006", "0.44163203", "0.441395", "0.4406957", "0.44052917", "0.44034645", "0.44010645", "0.43982634", "0.4398037", "0.43929905", "0.43928584" ]
0.52450806
3
Returns the atom with the shortest distance to the given atom.
def get_closest_atom_of_element(element, atom, exclude=None): for atom2 in atom.partner: if (element == atom2.element or not element) and not atom2 == exclude: return atom2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shortest_distance_to(self, pt):\n return self._nearest_to_point(pt)[0]", "def _minimum_distance(self,arg):\n return min([abs(arg-e) for e in self if not e is arg])", "def get_min_distance(self, node):\r\n if self.have_min_distance(node):\r\n return self.table[node][\"dist\"]\r\n return None", "def nearest_neigh(self, atom):\n atoms = self.hutch.get_atoms_in_same_hutch(atom)[:]\n if atom in atoms: atoms.remove(atom)\n\n # This generation of nearby hutches isn't perfect but it will work\n rots = [(1,0,0),(0,1,0),(0,0,1)]\n i = 0\n while len(atoms) == 0:\n hutch = ((hutch[0]+rots[i][0])%self.hutch.nhutchs,(hutch[1]+rots[i][1])%self.hutch.nhutchs,(hutch[2]+rots[i][2])%self.hutch.nhutchs)\n i = (i+1) % 3\n atoms = self.hutch.hutchs[hutch]\n if atom in atoms: atoms.remove(atom)\n start = atoms[0]\n\n atoms = self.get_atoms_in_cutoff(atom,self.dist(atom,start))\n #if atom in atoms: atoms.remove(atom)\n d = float(\"inf\")\n for atomi in atoms:\n dt = self.dist(atom,atomi)\n if dt < d:\n d = dt\n a = atomi\n return a", "def ensure_atom(self, atom):\n return self.ensure_atoms([atom])[0]", "def smallest (self):\n return self.pointers[0].smallest()", "def get_min_distance(self):\n return round(min(self.combined_euclidian_distance))", "def nearest_neigh_of_same_type(self, atom, cutoff=3.5):\n atoms = []\n while(len(atoms) == 0):\n atoms = self.get_atoms_in_cutoff(atom, cutoff)\n atoms = [x for x in atoms if x.z == atom.z]\n #if atom in atoms: atoms.remove(atom)\n cutoff *= 2\n cutoff /= 2 # set back to the value used in case I want it later\n d = float(\"inf\")\n for atomi in atoms:\n dt = self.dist(atom, atomi)\n if dt < d:\n d = dt\n a = atomi\n if(a.z != atom.z): raise Exception(\"Error! Function 'nearest_neigh_of_same_type' didn't work!\")\n return a", "def find_min_distance():\n return np.argmin(d)", "def _findMinNode(self, s):\n\n minNode = None\n minVal = self.inf\n for vertex in s:\n if self.dist[vertex] < minVal:\n minVal = self.dist[vertex]\n minNode = vertex\n return minNode", "def find_smallest(self):\n return self._find_smallest(self.root)", "def nn(x, S, dist):\n\n # note that there might be more than on minimal item. min will return the\n # first one ecountered\n return min(S, key=lambda y: dist(x, y[:-1]))", "def nodeAtMinimumDistance(self, notFoundYet, distances):\n # found minimal\n minimal = None\n for node in notFoundYet:\n if (distances[node] >= 0): \n if minimal == None or (distances[minimal] > distances[node]):\n minimal = node\n\n # return\n if minimal == -1: return None\n else: return minimal", "def min_tss_dist(dist_seq):\n amended_dist = [abs(i + 0.1) for i in dist_seq]\n # `index` only returns index of the first instance even if there are multiple min values\n min_index = amended_dist.index(min(amended_dist))\n\n return dist_seq[min_index]", "def get_equivalent_atom(self, atom):\n try:\n return self.atom_dict[atom.name]\n except KeyError:\n return None", "def get_nearest_atom_inds(self):\n # Create empty data structure\n self.closest_ats = np.zeros((self.natom, self.natom-1), dtype=int)\n\n # Get and sort distances\n all_at_inds = np.arange(self.natom)\n for iat in range(self.natom):\n at_inds = all_at_inds[all_at_inds != iat]\n dist = self.all_dist[iat, at_inds]\n\n at_inds = [i[1] for i in sorted(zip(dist, at_inds))]\n self.closest_ats[iat] = at_inds", "def min_distance(distance, spt_set, self_nodes):\n minimum = sys.maxsize\n minimum_node = None\n for curr_node in self_nodes.values():\n if distance[curr_node.id] < minimum and not spt_set[curr_node.id]:\n minimum = distance[curr_node.id]\n minimum_node = curr_node\n return minimum_node", "def closest_station(lat, lon, cache_dir):\n if lat is None or lon is None or not os.path.isdir(cache_dir):\n return\n stations = zamg_stations(cache_dir)\n\n def comparable_dist(zamg_id):\n \"\"\"Calculate the pseudo-distance from lat/lon.\"\"\"\n station_lat, station_lon = stations[zamg_id]\n return (lat - station_lat) ** 2 + (lon - station_lon) ** 2\n\n return min(stations, key=comparable_dist)", "def _find_smallest(node):\n if node.left:\n return BinarySearchTree._find_smallest(node.left)\n else:\n return node", "def find_min(self):\n return min(self.nodes, key=int)", "def __find_closest_symbol(symbol):\n closest_symbol = []\n min_dist = float(\"inf\")\n\n for symbol_2 in symbols_info:\n if symbol != symbol_2:\n dist = math.sqrt(\n math.pow(symbol[4][0] - symbol_2[4][0], 2) +\n math.pow(symbol[4][1] - symbol_2[4][1], 2)\n )\n if dist < min_dist:\n min_dist = dist\n closest_symbol = symbol_2\n\n return closest_symbol", "def find_smallest(node):\n smallest = node.value\n\n while node.left is not None:\n node = node.left\n smallest = node.value\n\n return smallest", "def take_min(self):\n return self.get_first()", "def shortest_distance(self, bint reverse=False):\n cdef vector[openfst.TropicalWeight] distances\n openfst.ShortestDistance(self.fst[0], &distances, reverse)\n cdef unsigned i\n dist = [TropicalWeight(distances[i].Value()) for i in range(distances.size())]\n return dist", "def extractMinimum(self):\n\n return self.heap[1]", "def find_min(self):\n return self.min", "def find_min(self):\n return self.min", "def min_dist_to_spray(lat, lon, spray):\n\n # Should really be array-like??\n if isinstance(lat, float) or isinstance(lon, float):\n N = spray.shape[0]\n lata = np.empty(N)\n lata.fill(lat)\n lat = lata\n lona = np.empty(N)\n lona.fill(lon)\n lon = lona\n dist = haversinea(lat, lon, spray.Latitude.values, spray.Longitude.values)\n return dist.min()", "def get_equivalent_atom(self, atom):\n try:\n return self.chain_dict[atom.chain_id].fragment_dict[atom.fragment_id].atom_dict[atom.name]\n except KeyError:\n return None", "def min(self):\n return self.get_first()", "def findmin(self):\n return self.heap[0] if len(self.heap) > 0 else None", "def get_min_distance(distances, unvisited_nodes):\n min_value = None\n node = None\n for city, distance in distances.items():\n if city not in unvisited_nodes:\n continue\n if min_value is None:\n node = city\n min_value = distance\n elif distance < min_value:\n node = city\n min_value = distance\n return node", "def min_neighbor_node(g):\r\n return min(g.degree_iter(),key = lambda item:item[1])[0]", "def get_equivalent_atom(self, atom):\n try:\n return self.fragment_dict[atom.fragment_id].atom_dict[atom.name]\n except KeyError:\n return None", "def nearest_neighbor(self, xRand):\n # TODO: Make this more efficient?\n #within a neighborhood of XRand, determine the lowest cost to go\n minCost = np.inf\n minNode = None\n\n for node in self.Tree:\n\n cost = self.compute_dist(node.state_time[0:6], xRand)\n\n if cost < minCost:\n minNode = node\n minCost = cost\n\n return minNode", "def nearest_neighbor(self, xRand):\n # TODO: Make this more efficient?\n #within a neighborhood of XRand, determine the lowest cost to go\n minCost = np.inf\n minNode = None\n\n for node in self.Tree:\n\n cost = self.compute_dist(node.state_time[0:6], xRand)\n\n if cost < minCost:\n minNode = node\n minCost = cost\n\n return minNode", "def getAtom(self, atomname):\n if self.hasAtom(atomname):\n return self.atoms[atomname]\n else:\n return None", "def get_cost_min(self) -> float:\n\n return min(cost for position, cost in self.memory)", "def shortest_distance(puzzle_input: List[str], satellite_name_a: str, satellite_name_b: str) -> Tuple[int, str]:\n orbit_tree = make_tree(puzzle_input)\n\n distances_satellite_a = distance_to_objects(orbit_tree, satellite_name_a)\n\n distances_satellite_b = distance_to_objects(orbit_tree, satellite_name_b)\n\n # & gives the intersection between the sets of keys, leaving only the objects they both orbit directly/indirectly\n objects_in_common = set(distances_satellite_a.keys()) & set(distances_satellite_b.keys())\n distances = [\n # Sum of distance from satellite a, b to each object, object name\n (distances_satellite_a[obj] + distances_satellite_b[obj], obj)\n for obj in objects_in_common\n ]\n\n min_distance, satellite_name = min(distances)\n return min_distance, satellite_name", "def shortest_tour(tours):\n return min(tours, key=tour_length)", "def get_min_path(self, paths):\n shortest_path = paths[0]\n shortest_distance = self.get_path_distance(paths[0])\n\n for path in paths[1:]:\n distance = self.get_path_distance(path)\n\n if distance < shortest_distance:\n shortest_path = path\n shortest_distance = distance\n\n return shortest_path", "def get_equivalent_atom(self, atom):\n try:\n return self.model_dict[atom.model_id].chain_dict[atom.chain_id].fragment_dict[atom.fragment_id].atom_dict[atom.name]\n except KeyError:\n return None", "def shortest(self):\n shortest = None\n if self._vectors:\n shortest = self._vectors[0]\n min_len = shortest.length2\n for vector in self._vectors:\n len = vector.length2\n if len < min_len:\n shortest = vector\n min_len = len\n return shortest", "def findSmallest(distancesWithNames):\n smallest = distancesWithNames[0][2]\n smallestIndex = -1\n for i in range(len(distancesWithNames)):\n if smallest >= distancesWithNames[i][2]:\n smallest = distancesWithNames[i][2]\n smallestIndex = i\n return smallestIndex", "def get_nearest_atom_inds_per_mol(self):\n self.closest_at_per_mol = np.zeros((self.nmol,\n self.at_per_mol,\n self.at_per_mol-1), dtype=int)\n\n # Get and sort distances\n all_at_inds = np.arange(self.at_per_mol)\n for imol in range(self.nmol):\n for iat in range(self.at_per_mol):\n at_inds = all_at_inds[all_at_inds != iat]\n dist = self.all_dist_per_mol[imol, iat, at_inds]\n\n at_inds = [i[1] for i in sorted(zip(dist, at_inds))]\n self.closest_at_per_mol[imol, iat] = at_inds", "def shortest_path_to_root(self):\n paths = self.hypernym_paths()\n shortest = paths.index(min([len(path) for path in paths]))\n return paths[shortest]", "def find_min_node(self):\n min_energy = 10 ** 10\n min_id = -1\n for node in self.node:\n if node.energy < min_energy:\n min_energy = node.energy\n min_id = node.id\n return min_id", "def minimal_distance(me):\n smallest_d = 101 # given length of edge <= 100\n ismallest = -1 # index of the edge in the list, me\n for i, e in enumerate(me):\n if e[0] < smallest_d:\n smallest_d = e[0]\n ismallest = i\n\n d = me[ismallest][0]\n v1 = me[ismallest][1]\n v2 = me[ismallest][2]\n me.pop(ismallest)\n\n smallest_d = 101\n for i, e in enumerate(me):\n if (e[1] == v1 or e[2] == v1 or e[1] == v2 or e[2] == v2) and e[0] < smallest_d:\n smallest_d = e[0]\n\n d += smallest_d\n return d", "def closest_distance(node_a, node_b):\n min_distance = 999999\n for loc_a in node_a.locations:\n for loc_b in node_b.locations:\n distance = abs(loc_a - loc_b)\n if distance < min_distance:\n min_distance = distance\n return min_distance", "def _minPermanenceSynapse(self):\n minSynapse = None\n minPermanence = float(\"inf\")\n\n for synapse in sorted(self.__synapses,\n key=lambda s: s._ordinal):\n if synapse.permanence < minPermanence - EPSILON:\n minSynapse = synapse\n minPermanence = synapse.permanence\n\n assert minSynapse is not None\n\n return minSynapse", "def find_min(self):\n\n if self.left:\n return self.left.find_min()\n\n return self.data", "def shortest_manhattan_distance(coordinates):\n current_minimum = sys.maxsize\n\n for x, y in coordinates:\n if abs(x) + abs(y) < current_minimum:\n current_minimum = abs(x) + abs(y)\n\n return current_minimum", "def min(self):\n return self._reduce_for_stat_function(F.min, only_numeric=False)", "def shortest_tour(all_tours):\n shortest = all_tours[0]\n \n for tour in all_tours:\n if tour_distance(shortest) > tour_distance(tour):\n shortest = tour\n return shortest", "def find_min(self) -> TreeNode:\n node = self.root\n while True:\n if not node.left:\n return node\n node = node.left", "def nearest(source):\n def mycmp(a,b):\n return -cmp(a[1],b[1])\n dmin = 999.999\n smin = 'Unknown'\n if len(stars_) == 0:\n print \"No stars have been selected, go use 'stars()'\"\n return\n sdlist=[]\n for s in stars_:\n d = distance(s[0],source)\n sdlist.append((s[0],d))\n if d < dmin:\n dmin = d\n smin = s[0]\n sdlist.sort(mycmp)\n for sd in sdlist:\n print \"%s at %g\" % (sd[0],sd[1])\n print \"Nearest object from stars() to %s is %s at %g deg\" % (source,smin,dmin)", "def _find_min(self):\n if self.is_empty(): # is_empty inherited from base class\n raise Empty('Priority queue is empty')\n small = self._data.first()\n walk = self._data.after(small)\n while walk is not None:\n if walk.element() < small.element():\n small = walk\n walk = self._data.after(walk)\n return small", "def heuristic_1(node):\n x_node, y_node = node.state.location()\n goals = node.state.grid.components.white_walkers\n goals.append(node.state.grid.components.dragon_stone)\n distance = [np.sqrt((x_node - x)**2 + (y_node - y)**2) for x, y in goals]\n return distance[np.argmin(distance)]", "def find_closest_atom(coords1, coords2):\n\n coords1 = np.array(coords1)\n coords2 = np.array(coords2)\n diff = coords2[:, np.newaxis] - coords1[np.newaxis, :]\n dist = np.einsum('ijk->ij', diff**2)**0.5\n index = np.argmin(dist)\n return index", "def _find_lowest_cost_node(self) -> str:\n lowest_cost = float(\"inf\")\n lowest_cost_node = None\n for node in self.costs:\n cost = self.costs[node]\n if cost < lowest_cost and node not in self.closed_nodes:\n lowest_cost = cost\n lowest_cost_node = node\n return lowest_cost_node", "def nearest_point(pt):\n nearest_point = None\n min_dist = float(\"inf\")\n for p in cur_points:\n dist = euclidean_dist(pt, p.to_tuple())\n if dist < min_dist:\n min_dist, nearest_point = dist, p\n\n return nearest_point.to_tuple()", "def get_min_rm(distance_file, rm_range, step_size, potential_function):\n return min((i[1],i[0]) for i in vary_across_rm(distance_file, rm_range, step_size, potential_function))[1]", "def nearestStar(self, loc):\n if loc in self.stardistances:\n return self.stardistances[loc].keys()[0]\n return False", "def min(self, column):\n return self.aggregate('min', *[column])", "def minimum_distance(self, state, *args, **kwargs):\n raise NotImplementedError", "def __search_left_closest_symbol(symbol, index):\n closest_left_index = -1\n min_dist = float(\"inf\")\n\n for index_2 in range(len(symbols_info)):\n # check if the two symbols are different\n if index_2 != index:\n # take the rect info\n rect_info = symbols_info[index_2]\n # checks if they are on the same line\n same_line = __check_if_rects_are_on_same_height(symbol, rect_info)\n\n # if are on the same line and the rect are on the left\n rect_center_x_coord = rect_info.center[0]\n symbol_center_x_coord = symbol.center[0]\n\n if same_line and rect_center_x_coord < symbol_center_x_coord:\n rightmost_corner_center_x = rect_info.bottom_right_corner[0]\n rightmost_corner_center_y = rect_info.bottom_right_corner[1] - rect_info.top_right_corner[1]\n dist = math.hypot(symbol.center[0] - rightmost_corner_center_x,\n symbol.center[1] - rightmost_corner_center_y)\n\n if dist < min_dist:\n min_dist = dist\n closest_left_index = index_2\n\n return closest_left_index", "def LineMinDistanceTo(line, point_or_line):\n line = rhutil.coerceline(line, True)\n test = rhutil.coerceline(point_or_line)\n if test is None: test = rhutil.coerce3dpoint(point_or_line, True)\n return line.MinimumDistanceTo(test)", "def nearest_neighbor(A, cities):\n return min(cities, key = lambda c: distance(c,A))", "def min(self, fn=lambda x: x):\n return _(min(*self._, key=fn))", "def closest_node(node, nodes):\n nodes = np.asarray(nodes)\n deltas = nodes - node\n dist_2 = np.einsum(\"ij,ij->i\", deltas, deltas)\n return np.argmin(dist_2), np.min(dist_2)", "def getMinNode(self):\n currentNode = self.openList[0]\n for node in self.openList:\n if node.g + node.h < currentNode.g + currentNode.h:\n currentNode = node\n return currentNode", "def shortest_flight(g):\n min_distance = sys.maxsize\n min_destination = None\n min_key = None\n \n for key in g.city_dict:\n for flight in g.city_dict[key].get_flights_out():\n if(flight[1] < min_distance):\n min_key = key\n min_destination = flight[0]\n min_distance = flight[1]\n return g.city_dict[min_key].get_name(), min_destination, min_distance", "def node_with_min_fscore(open_set, f_cost): # open_set is a set (of cell) and f_cost is a dict (with cells as keys)\n f_cost_open = dict([a for a in f_cost.items() if a[0] in open_set])\n return min(f_cost_open, key=f_cost_open.get)", "def node_with_min_fscore(open_set, f_cost): # open_set is a set (of cell) and f_cost is a dict (with cells as keys)\n f_cost_open = dict([a for a in f_cost.items() if a[0] in open_set])\n return min(f_cost_open, key=f_cost_open.get)", "def best_cell(self, coord):\n if coord[0] == self.pos[0] and coord[1] == self.pos[1]:\n return self.pos\n\n # Get all available cells\n free_cells = self.get_moves()\n smal_dist = float(\"Inf\")\n\n for cell in free_cells:\n d_x = abs(coord[0] - cell[0])\n d_y = abs(coord[1] - cell[1])\n dist = (d_x**2 + d_y**2)**0.5\n if dist < smal_dist:\n smal_dist = dist\n new_cell = cell\n\n return new_cell", "def extract_min(self):\n if self.is_empty():\n raise ValueError(\"Priority queue is empty\")\n\n edge_tuple = heapq.heappop(self.__heap)\n ew = edge_tuple[1]\n return ew.edge()", "def nearest_min(dist_matrix):\n # much faster than np.where\n i, j = np.unravel_index(\n np.argmin(dist_matrix), \n dims=dist_matrix.shape\n )\n return i, j", "def extractMin(self):\n if not self.heap:\n raise IndexError(\"there is no root\")\n elif len(self.heap) < 2:\n return self.heap.pop()\n else:\n self.heap[0], oldMin = self.heap.pop(), self.heap[0]\n self._shiftDown()\n return oldMin", "def _FindNearestAnat(self, acqtime):\n tdiff_min = 1e6\n for anat in self.entry_map['anat']:\n if self.info[anat]['type'] == 'T1High' and \\\n self.info[anat]['InversionTime'] > 0.:\n tdiff = abs(acqtime - self.info[anat]['acqtime'])\n if tdiff < tdiff_min:\n tdiff_min = tdiff\n anat_min = anat\n return anat_min", "def nearest(node):\n count = 0\n distance = 100000\n while count != node_count[0]:\n city = d_list[node.value - 1]\n if city != []:\n if city[0][1] < distance:\n distance = city[0][1]\n new_city = city[0][0]\n closest_city = node.value\n node = node.left\n count = count + 1\n return (closest_city, new_city, distance)", "def peek_min(self):\n if self.root:\n return self.root.min().value\n raise ValueError(\"cannot perform peek_min on an empty tree\")", "def find_min(self):\n current = self\n while current.left is not None:\n current = current.left\n return current", "def min(self):\n return self._min(self.root)", "def shortest_route(self, with_return=False):\n return min(self.__routes(with_return))", "def _min_node(node):\n if not node:\n return None\n i = node\n while i.left:\n i = i.left\n return i", "def find_nearest(a, a0):\n idx = numpy.abs(a - a0).argmin()\n return a.flat[idx]", "def _get_closest_station_by_zcta_ranked(zcta):\n\n zcta = zcta.zfill(5) # Ensure that we have 5 characters, and if not left-pad it with zeroes.\n lat, lon = zcta_to_lat_long(zcta)\n finding_station = True\n rank = 0\n while finding_station:\n rank = rank + 1\n station_ranking = _rank_stations_by_distance_and_quality(lat, lon)\n station, warnings = select_station(station_ranking, rank=rank)\n\n # Ignore stations that begin with A\n if str(station)[0] != 'A':\n finding_station = False\n\n return station, warnings, lat, lon", "def get_closest_node(data, loc):\n min_dist = None\n closest = None\n for i in data:\n # Standard min-value search loop\n dist = great_circle_distance(get_coords(data, i), loc)\n if closest is None or dist < min_dist:\n closest = i\n min_dist = dist\n return closest", "def get_min(self):\n\t\tif self.left:\n\t\t\treturn self.left.get_min()\n\t\treturn self.value", "def min(self):\n return min(self)", "def min_distance_vertex(distance, visited):\n vertices = len(visited)\n min_distance = INF\n min_index = None\n for v in range(vertices):\n if not visited[v] and distance[v] <= min_distance:\n min_distance = distance[v]\n min_index = v\n return min_index", "def get_min(self):\n if not self:\n return None\n return self.left.get_min() if self.left else self.value #Ternarary Operator", "def get_min(self):\n return self.serie.min()", "def find_min(list):\n return find_value_at(list, -1)", "def closest_point(point, points):\n return points[cdist([point], points).argmin()]", "def closest_fruit(maze, currX, currY, fruit_list):\n curr_min = sys.maxsize\n for position in fruit_list:\n distance = Astar(maze, currX, currY, position[0], position[1])\n if distance < curr_min:\n curr_min = distance\n return curr_min", "def get_min(paths_list):\n return min(paths_list, key=len)", "def get_min(self) -> object:\n if self.is_empty()==True:\n return None\n return self.heap.get_at_index(0)", "def getAtom(self, atomid):\n\n\t\tfor chain in self.chain:\n\t\t\tfor res in chain.residue:\n\t\t\t\tfor atm in res.atom:\n\t\t\t\t\tif atomid == int(atm.file_id):\n\t\t\t\t\t\treturn atm\n\n\t\treturn None", "def min(self):\n least = self.data[0]\n \n for i in range(len(self.data)):\n if self.data[i] < least:\n least = self.data[i]\n return least" ]
[ "0.6927831", "0.637707", "0.6341551", "0.6167055", "0.61642516", "0.60590255", "0.59727126", "0.595667", "0.59553444", "0.5929634", "0.5880962", "0.5837155", "0.58368987", "0.5720252", "0.5698188", "0.56682664", "0.5647115", "0.56391704", "0.56333494", "0.5632058", "0.5616186", "0.55863744", "0.55403155", "0.55300313", "0.54500914", "0.54347634", "0.54347634", "0.54151016", "0.5409012", "0.540596", "0.5373988", "0.53632355", "0.5361277", "0.5361011", "0.53566486", "0.53566486", "0.53357595", "0.5321885", "0.53175527", "0.53136545", "0.53092134", "0.53062165", "0.5302674", "0.5262644", "0.5257739", "0.5247423", "0.5236585", "0.52310514", "0.5213317", "0.5199085", "0.51981884", "0.5188443", "0.5185071", "0.517998", "0.51790357", "0.5174035", "0.5171047", "0.5166029", "0.5164892", "0.5161995", "0.5157562", "0.51567525", "0.51495343", "0.51406586", "0.5127563", "0.5123641", "0.5122734", "0.5107155", "0.5103296", "0.5096362", "0.50949657", "0.508679", "0.50823075", "0.50823075", "0.50778306", "0.5068336", "0.5067786", "0.50633514", "0.50607437", "0.50554085", "0.5054533", "0.5052902", "0.5033156", "0.5029803", "0.50260365", "0.5025008", "0.502304", "0.50185937", "0.501795", "0.50173134", "0.5013334", "0.50114274", "0.5005571", "0.50043726", "0.49996856", "0.49988082", "0.49987674", "0.49986446", "0.49913254", "0.49900383" ]
0.5314012
39
Needs a ATOM.atom instance as argument. Returns the names of the framework atoms bound to that atom.
def get_framework_neighbours(atom, useH=True): neighbourlist = [] for atom2 in atom.partner[:5]: #if not 'H(' in atom2.name and np.linalg.norm(atom.cart-atom2.cart)<=1.6: if np.linalg.norm(atom.cart - atom2.cart) <= float(covalence_radius[atom.element]) + float( covalence_radius[atom2.element]) + .1: if not 'H' == atom2.element or useH: neighbourlist.append(atom2) return neighbourlist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_atom_labels(self, full=False):\n import numpy\n\n labels = self.get_attr(\"atom_labels\")\n if full:\n return labels\n return numpy.array(labels)[self._get_equivalent_atom_list()].tolist()", "def getAtomNames(self):\n return self._raw_data['ATOM_NAME']", "def bond_atoms(atom_list):\n pass", "def all_registered_appnames():\n yield from sorted(Registry.monomers.keys())", "def get_pyxb_namespaces():\n return pyxb.namespace.utility.AvailableNamespaces()", "def atomList(self):\n\n\t\tal = []\t\n\t\tfor chain in self.chain:\n\t\t\tfor res in chain.residue:\n\t\t\t\tfor atom in res.atom:\n\t\t\t\t\tal.append(atom)\n\n\t\treturn al", "def defined_names(self, tree=False):\n if not tree:\n return list(self.bindings.keys())\n else:\n return list(self.bindings.keys()) + (list(self.parent.defined_names(tree=True)) if self.parent else [])", "def xontrib_installed(ns=None):\n installed_xontribs = set()\n xontrib_locations = importlib.util.find_spec(\"xontrib2\").submodule_search_locations\n names = None if not ns or len(ns.names) == 0 else set(ns.names)\n if xontrib_locations:\n for xl in xontrib_locations:\n for x in Path(xl).glob(\"*\"):\n name = x.name.split(\".\")[0]\n if name[0] == \"_\" or (names and name not in names):\n continue\n installed_xontribs.add(name)\n return installed_xontribs", "def getNames(self, resname, atomname):\n rname = None\n aname = None\n if resname in self.map:\n res = self.map[resname]\n if res.hasAtom(atomname):\n atom = res.atoms[atomname]\n aname = atom.name\n rname = atom.resname\n return rname, aname", "def installed_appnames():\n appnames = set()\n for finder in sys.meta_path:\n if hasattr(finder, 'appname'):\n appnames.add(finder.appname)\n return appnames", "def find_ntyp(atoms):\n symbs = atoms.get_chemical_symbols()\n unique_symbols = []\n for s in symbs:\n if not (s in unique_symbols):\n unique_symbols.append(s)\n #\n return len(unique_symbols)", "def atoms(self):\n return self._atoms", "def getSBMLExtensionNamespaces(self, *args):\n return _libsbml.QualExtension_getSBMLExtensionNamespaces(self, *args)", "def get_bonded_atom(self, name_list):\n current_atom = self\n for name in name_list:\n moveto_atom = None\n for atom in current_atom.iter_bonded_atoms():\n if atom.name == name:\n moveto_atom = atom\n break\n if moveto_atom is None:\n return None\n current_atom = moveto_atom\n return current_atom", "def names(self):\n if not self.extensions:\n self.discover()\n\n names = list(self.builtins.keys())\n names += self.extensions.keys()\n\n return sorted(names)", "def fqns(self):\n return [fqn for fqn in self.runinfos]", "def getAtomIndices( structure, resname ):\n atom_indices_ligand = []\n topology = structure.topology\n for atom in topology.atoms():\n if str(resname) in atom.residue.name:\n atom_indices_ligand.append(atom.index)\n\n return atom_indices_ligand", "def get_input_names(self):\n inputNames = []\n for inVar in self.inputs:\n # inVar is of type InOutVar and the object that it contains is a PyFMI variable\n inputNames.append(inVar.get_object().name)\n return inputNames", "def _get_bindings_list_yang_name(self, bindings_list=None):\n\n yang_name_list = []\n\n for bindings_tuple in bindings_list:\n if self._module_name == bindings_tuple[2]:\n yang_name_list.append(bindings_tuple[0].split('.')[-1].replace('_', '-'))\n \n return yang_name_list", "def getSBMLExtensionNamespaces(self, *args):\n return _libsbml.FbcExtension_getSBMLExtensionNamespaces(self, *args)", "def getAtoms(self):\n return self.atoms", "def getSBMLExtensionNamespaces(self, *args):\n return _libsbml.CompExtension_getSBMLExtensionNamespaces(self, *args)", "def _get_atoms(self):\n atoms = []\n invarioms = []\n\n for molecule in self.values():\n atoms += [atom for atom in molecule.atoms]\n invarioms += [atom for atom in molecule.atoms if atom.invariom_name is not None]\n self.atoms = atoms\n self.invarioms = invarioms", "def names() -> Tuple[str, ...]:\n return plugins.list_all(package_name=__name__)", "def getSBMLExtensionNamespaces(self, *args):\n return _libsbml.LayoutExtension_getSBMLExtensionNamespaces(self, *args)", "def getBindedNames(self):\n names = []\n for function in self.functions:\n names.append(function.__name__)\n return \", \".join(names)", "def getModuleNames():\n import setup\n names = [e.name[1:] for e in setup.wxpExtensions]\n return names", "def lookup(obj):\n return list(dir(obj))", "def lookup(obj):\n return list(dir(obj))", "def getAtomTypes(self):\n return self._raw_data['AMBER_ATOM_TYPE']", "def get_allref(self):\n return self.__applicationList.keys()", "def get_info(atom):\n return [atom.GetIdx(), atom.GetNeighbors()[0].GetIdx()]", "def get_all():\n temp = inspect.getmembers(sys.modules[__name__], inspect.isclass)\n return [i[1] for i in temp if i[0] != \"Aggregator\"]", "def filter_carbon_atoms(atom_list, rings):\n list_3 = []\n list_2 = []\n list_2n = []\n for atom in atom_list:\n if (check_connected(atom, identify_bonds(atom, atom_list)) == False):\n if (len(identify_bonds(atom, atom_list)) == 3):\n list_3.append(atom)\n elif (len(identify_bonds(atom, atom_list)) == 2):\n list_2.append(atom)\n for neighbour in identify_bonds(atom, atom_list):\n if (len(identify_bonds(neighbour[0], atom_list)) == 2):\n for ring in rings:\n if( (atom in ring) and (neighbour[0] in ring)):\n list_2n.append(atom) \n return list_3, list_2, list_2n", "def list_processor_names():\n return [ep.name for ep in pkg_resources.iter_entry_points(ENTRY_POINT_NAME)]", "def namelist(self):\n return self._handle.namelist()", "def namelist(self):\n return self._handle.namelist()", "def models_with_all_atoms(formula, atoms):\n if formula == True:\n return [model for model in tablize(atoms)]\n\n original_models = [model for model in satisfiable(formula, all_models=True)]\n extra_atoms = atoms - formula.atoms()\n\n if not extra_atoms:\n return original_models\n else:\n models_all_atoms = []\n for model in original_models:\n models_all_atoms += [updated_model for updated_model in tablize(extra_atoms, model)]\n return models_all_atoms", "def namelist(self):\n return self._handle.getnames()", "def namelist(self):\n return self._handle.getnames()", "def iter_atoms(self):\n return iter(self.atom_list)", "def get_all_target_namespaces():\n setup_roots = get_all_setups_roots()\n techanim_ns = [x.split(\":\")[0] for x in setup_roots]\n namespaces = get_all_namespaces()\n filtered_ns = []\n for ns in namespaces:\n if ns in [\"UI\", \"ui\", \"shared\", \"Shared\"] + techanim_ns:\n continue\n filtered_ns.append(ns)\n return filtered_ns", "def getRegisterNames(self):\n pass", "def names(self) -> List:\n ...", "def get_atypes(self):\n self.atypes = []\n self.hybs = []\n #self.zs = []\n for ai in self.m.GetAtoms():\n hybi = str( ai.GetHybridization() )\n self.hybs.append( hybi )\n zi = ai.GetAtomicNum()\n #self.zs.append( zi )\n si = ai.GetSymbol()\n if hybi == 'SP2':\n ar = ai.GetIsAromatic()\n ar_suffix = '_R' if ar else '_2'\n ap = si + ar_suffix # atomic_pattern\n elif hybi == 'SP3':\n if zi == 16 and ai.GetExplicitValence() == 6:\n ap = si + 'o3'\n elif zi in [9, 17, 35, 53]:\n ap = si\n else:\n ap = si + '_3'\n elif hybi == 'SP':\n ap = si + '_1'\n elif hybi in ['S', ]: #'UNSPECIFIED']:\n ap = si\n else:\n print((' unknown atom type: `%s`'%hybi))\n raise\n self.atypes.append( ap )", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)", "def getSBMLExtensionNamespaces(self, *args):\n return _libsbml.SBMLExtension_getSBMLExtensionNamespaces(self, *args)", "def _resolve_atomtypes(topology, typemap):\n atoms = list(topology.atoms())\n for atom_id, atom in typemap.items():\n atomtype = [rule_name for rule_name in \n atom['whitelist'] - atom['blacklist']]\n if len(atomtype) == 1:\n atom['atomtype'] = atomtype[0]\n elif len(atomtype) > 1:\n raise FoyerError(\"Found multiple types for atom {} ({}): {}.\".format(\n atom_id, atoms[atom_id].element.name, atomtype))\n else:\n raise FoyerError(\"Found no types for atom {} ({}).\".format(\n atom_id, atoms[atom_id].element.name))", "def getSBMLExtensionNamespaces(self, *args):\n return _libsbml.MultiExtension_getSBMLExtensionNamespaces(self, *args)", "def names(self):\n\t\treturn", "def get_input_node_names(self, node_name):\n # (str) -> list\n node = self.get_node(node_name)\n return node.bottoms", "def _get_buffer_names(self, root_module: nn.Module) -> Set[str]:\n\n def module_fn(module: nn.Module, prefix: str, buffer_names: Set[str]):\n # For FSDP modules, only add the entry when considering the\n # contained `FlattenParamsWrapper` to avoid duplication\n if not isinstance(module, FullyShardedDataParallel):\n for buffer_name, _ in module.named_buffers(recurse=False):\n # Clean module wrapper prefixes in case of nested wrapping\n prefixed_buffer_name = clean_tensor_name(prefix + buffer_name)\n buffer_names.add(prefixed_buffer_name)\n\n def return_fn(buffer_names: Set[str], *args):\n return buffer_names\n\n buffer_names: Set[str] = set()\n return _apply_to_modules(\n root_module,\n module_fn,\n return_fn,\n buffer_names,\n )", "def names(self) -> list[str]:", "def names(self):\n return list(item.name for item in self.mechanisms)", "def lookup(obj):\n a = list(dir(obj))\n return a", "def find_atomtypes(topology, forcefield, max_iter=10):\n typemap = {atom.index: {'whitelist': set(), 'blacklist': set(), \n 'atomtype': None} for atom in topology.atoms()}\n\n rules = _load_rules(forcefield, typemap)\n\n # Only consider rules for elements found in topology\n subrules = dict()\n system_elements = {a.element.symbol for a in topology.atoms()}\n for key,val in rules.items():\n atom = val.nodes[0]['atom']\n if len(list(atom.find_data('atom_symbol'))) == 1 and \\\n not list(atom.find_data('not_expression')):\n try:\n element = next(atom.find_data('atom_symbol')).children[0]\n except IndexError:\n try:\n atomic_num = next(atom.find_data('atomic_num')).children[0]\n element = pt.Element[int(atomic_num)]\n except IndexError:\n element = None\n else:\n element = None\n if element is None or element in system_elements:\n subrules[key] = val\n rules = subrules\n\n _iterate_rules(rules, topology, typemap, max_iter=max_iter)\n _resolve_atomtypes(topology, typemap)\n\n return typemap", "def getBuilderNames():", "def listBuilderNames():", "def listBuilderNames():", "def xontrib_context(name):\n spec = find_xontrib(name)\n if spec is None:\n return None\n m = importlib.import_module(spec.name)\n pubnames = getattr(m, \"__all__\", None)\n if pubnames is not None:\n ctx = {k: getattr(m, k) for k in pubnames}\n else:\n ctx = {k: getattr(m, k) for k in dir(m) if not k.startswith(\"_\")}\n return ctx", "def find_xontrib(name):\n if name.startswith(\".\"):\n spec = importlib.util.find_spec(name, package=\"xontrib2\")\n else:\n spec = importlib.util.find_spec(\".\" + name, package=\"xontrib2\")\n return spec or importlib.util.find_spec(name)", "def get_all_windows(self):\n success, result = self.manager.c.eval(\n textwrap.dedent(\n \"\"\"\n [win.wid for win in self.core.mapped_windows]\n \"\"\"\n )\n )\n assert success\n return eval(result)", "def __getitem__(self, atom_name):\n return self.atoms_by_name[atom_name]", "def get_names(dep):\n res = [dep.name]\n return res", "def get_all_setups_roots():\n ta_roots = cmds.ls(\"*.{}\".format(CONFIG[\"config_attr\"]), r=True, o=True)\n return ta_roots", "def get_interactions(list_atoms1, list_atoms2, dist):\n beta_carbons1 = list(filter(lambda x: x.get_id() == \"CB\", list_atoms1))\n beta_carbons2 = list(filter(lambda x: x.get_id() == \"CB\", list_atoms2))\n ns = NeighborSearch(beta_carbons1)\n interactions = []\n\n for atom in beta_carbons2:\n interact = ns.search(atom.get_coord(), dist)\n interactions.extend(\n [tuple(sorted([str(atom.get_parent().resname), str(x.get_parent().resname)])) for x in interact])\n return interactions", "def get_atom_info(self):\n return", "def getNumAtoms(self):\n return int(self._getPointerValue('NATOM'))", "def get_afferents_names(self):\n\t\treturn self._afferentsNames", "def find_app(app, symbol_by_name=..., imp=...):\n ...", "def atoms(self):\n return set(self.array_form)", "def app_names(self):\n return self.get_app_names()", "def get_framework_neighbors(atom, useH=True):\n neighborlist = []\n for atom2 in atom.partner[:5]:\n if np.linalg.norm(atom.cart - atom2.cart) <= float(covalence_radius[atom.element]) + float(\n covalence_radius[atom2.element]) + .1:\n if not 'H' == atom2.element or useH:\n neighborlist.append(atom2)\n return neighborlist", "def atomList(joints):\n assert len(joints) > 0\n first = joints[0]\n functorList = first[1][:-2] # Second element of row, last two elements of that are joint prob and log prob\n atomList = []\n for (node,_) in functorList:\n atomList.append(node.functor+\"(\"+\",\".join(node.varList)+\")\")\n return atomList", "def getCmdList():\n return [obj for name, obj in inspect.getmembers(sys.modules[__name__]) \n if inspect.isclass(obj) and issubclass(obj, Cmd)][1:]", "def getExtnNodes(self):\n for name in self._names:\n try:\n mod = __import__(name, fromlist=['open'])\n except ImportError:\n raise ImportError(\"import %s error\" % name)\n self._AllExtnNodes = mod.AllXtens", "def getAtomResonances(atom):\n\n resonances = set()\n atomSet = atom.atomSet\n if atomSet:\n for resonanceSet in atomSet.resonanceSets:\n resonances.update(resonanceSet.resonances)\n return resonances", "def imports():\n for name, val in globals().items():\n if isinstance(val, getattr(types, \"ModuleType\")):\n yield val.__name__", "def get_chebi_synonyms(chebi_ent):\n if hasattr(chebi_ent, 'Synonyms'):\n return [entry.data for entry in chebi_ent.Synonyms]\n else:\n return []", "def get_map_anywhere(atom_list):\n anywhere_map = [atom for atom in atom_list if (check_connected(atom, identify_bonds(atom, atom_list)) == False)]\n return anywhere_map", "def _list_all(root_pkg, prog):\n res = \"\\n\".join(\n sorted(\n pkinspect.package_module_names(_import(root_pkg)),\n key=str.lower,\n ),\n )\n sys.stderr.write(f\"usage: {prog} module command [args...]\\nModules:\\n{res}\\n\")\n return 1", "def names():\n pass", "def names(self):\n\n allnames = []\n for term in self.terms:\n allnames += term.names()\n return allnames", "def names(self):\n\n allnames = []\n for term in self.terms:\n allnames += term.names()\n return allnames", "def get_atom_features(self, atom):\n period = [0,1,1,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3\n ,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4\n ,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5\n ,6,6,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6\n ,7,7,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]\n\n group = [0,1,18,1,2,13,14,15,16,17,18,1,2,13,14,15,16,17,18\n ,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18\n ,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18\n ,1,2,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18\n ,1,2,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18]\n atomic_num = atom.number\n return np.array([period[atomic_num],group[atomic_num]])", "def list_manifest_labels(tag_manifest, prefix_filter=None):\n query = (\n Label.select(Label, MediaType)\n .join(MediaType)\n .switch(Label)\n .join(LabelSourceType)\n .switch(Label)\n .join(TagManifestLabel)\n .where(TagManifestLabel.annotated == tag_manifest)\n )\n\n if prefix_filter is not None:\n query = query.where(prefix_search(Label.key, prefix_filter))\n\n return query", "def getChemCompSysNames(self):\n dataDict = self.__dict__\n result = frozenset(y for x in self.chemComp.namingSystems for y in x.chemCompSysNames if not y.specificChemCompVars).union(self.specificSysNames)\n return result", "def atoms(self, resnum, chain_id, icode=' ', alt=' ', model_num = 0):\n return [atm for atm in self.residue(resnum, chain_id, icode, alt, model_num)]", "def list_class_names(clz, package):\n\n def isclz(obj):\n if inspect.isclass(obj):\n return issubclass(obj, clz) and not obj == clz\n return False\n\n module = importlib.import_module(package)\n\n return [name for name, _ in inspect.getmembers(module, isclz)]", "def activemodes(self):\n\t\tret_active = []\n\t\tfor key,val in self.ms_all.iteritems():\n\t\t\tret_active.extend( val.active() )\n\t\treturn ret_active", "def get_all_namespaces():\n cmds.namespace(setNamespace=':')\n return cmds.namespaceInfo(listOnlyNamespaces=True, recurse=True)", "def items(self):\n return self.namespace_to_alias.items()" ]
[ "0.5786468", "0.5760434", "0.5465059", "0.54395527", "0.5285232", "0.52612984", "0.52286106", "0.5170458", "0.5095022", "0.5048037", "0.5035679", "0.5034012", "0.49822837", "0.49687123", "0.4942803", "0.49194297", "0.49104977", "0.49047342", "0.4869437", "0.48587328", "0.4852082", "0.4840675", "0.4829039", "0.4822362", "0.4811568", "0.4810619", "0.4808622", "0.4799978", "0.4799978", "0.4795981", "0.4736011", "0.47321567", "0.47297364", "0.47159797", "0.47068104", "0.47023106", "0.47023106", "0.47001618", "0.4699204", "0.4699204", "0.46985132", "0.46883008", "0.4683279", "0.46767035", "0.46759668", "0.4660091", "0.4660091", "0.4660091", "0.4660091", "0.4660091", "0.4660091", "0.4660091", "0.4660091", "0.4660091", "0.46587837", "0.46443975", "0.46411455", "0.46392143", "0.4631139", "0.46290624", "0.4622316", "0.46176785", "0.46157855", "0.4614827", "0.46086472", "0.46072483", "0.46072483", "0.4603554", "0.4601164", "0.45996723", "0.45984766", "0.45894808", "0.45860296", "0.45855227", "0.4581334", "0.45790514", "0.45771754", "0.4575187", "0.45743796", "0.45725185", "0.4569602", "0.45647654", "0.45549145", "0.45537135", "0.4548317", "0.45416355", "0.45386574", "0.45328185", "0.45292175", "0.452912", "0.452684", "0.452684", "0.45188373", "0.45180884", "0.45157114", "0.45119992", "0.45082745", "0.45044893", "0.44977933", "0.44952616" ]
0.45080042
97
Reads the measured ADP from the xd.res file. The parameters are stored in atom.adp['frac_meas'] and atom.adp['cart_meas']
def read_meas_adp(data, path='xd.res', use='meas'): use2 = 'frac_' + use switch = False filepointer = open(path, 'r') atomname = None for line in filepointer: if switch: split = [i for i in line.split(' ') if len(i) > 0] if not len(split) == 6: print('WARNING!!! Inconsistend number of floats while\ reading measured ADP.') data['exp'][atomname].adp[use2] = split switch = False if '(' in line: split = [i for i in line.split(' ') if len(i) > 0] if split[0][-1] == ')': switch = True atomname = split[0] use = 'cart_' + use for atom in data['exp'].atoms: # if use == 'cart_neut': print(atom) atom.adp[use] = rotate_adp2(atom.adp[use2], atom.molecule.frac2cartmatrix, atom.molecule.cell) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def R_adp(data):\n printer('S_adp = ?')\n printer('R_adp = | (U_iso_xxx - U_iso_obs) / U_iso_obs |')\n printer('mean = sum((U_iso_xxx - U_iso_obs) / U_iso_obs) / n')\n printer('abs = sum(R_adp) / n\\n')\n printer('(geometric mean is used)\\n')\n\n printer(' | ADP_calc / ADP_obs | APD_tls / ADP_obs')\n printer(' |--------------------|-------------------')\n printer(' Atom | S_adp | R_adp | S_adp | R_adp')\n printer(' ===============================================')\n S_sum = []\n R_sum = []\n S_sum_tls = []\n R_sum_tls = []\n for atom in data['exp'].atoms:\n if not atom.element == 'H':\n U_rel_calc = cg.Uiso(atom.adp['cart_sum'])\n U_rel_obs = cg.Uiso(atom.adp['cart_meas'])\n R_adp = (U_rel_calc - U_rel_obs) / U_rel_obs\n R_sum.append(R_adp)\n S_adp = ws06(atom.adp['cart_sum'], atom.adp['cart_meas'])\n S_sum.append(S_adp)\n\n U_rel_tls = cg.Uiso(atom.adp['cart_ext'])\n R_tls = (U_rel_tls - U_rel_obs) / U_rel_obs\n R_sum_tls.append(R_tls)\n\n S_tls = ws06(atom.adp['cart_ext'], atom.adp['cart_meas'])\n S_sum_tls.append(S_tls)\n\n printer(' {0:5s}| {1:4.2f} | {2:4.2f} | {3:4.2f} | {4:4.2f}'.format(atom.name,\n S_adp,\n abs(R_adp),\n S_tls,\n abs(R_tls)))\n\n printer(' ------|----------|---------|----------|--------')\n printer(' {0:5s}| {1:4.2f} | {2:4.2f} | {3:4.2f} | {4:4.2f}'.format('mean',\n np.mean(S_sum),\n np.mean(R_sum),\n np.mean(S_sum_tls),\n np.mean(R_sum_tls)))\n printer(' {0:5s}| {1:4.2f} | {2:4.2f} | {3:4.2f} | {4:4.2f}'.format('abs',\n np.mean(S_sum),\n np.mean([abs(i) for i in R_sum]),\n np.mean(S_sum_tls),\n np.mean(\n [abs(i) for i in R_sum_tls])))\n printer(' {0:5s}| {1:4.2f} | {2:4.2f} | {3:4.2f} | {4:4.2f}'.format('SD',\n np.std(S_sum),\n np.std(R_sum),\n np.std(S_sum_tls),\n np.std(R_sum_tls)))\n if config.arg('correlate'):\n printer('\\n\\'mean R_adp (ADP_calc / ADP_obs)\\' can be\\ninterpreted as the ratio 1 - (ADP_int / ADP_obs).')\n else:\n printer('\\n\\'mean R_adp (ADP_tls / ADP_obs)\\' can be\\ninterpreted as the ratio 1 - (ADP_obs / ADP_int).')", "def readAD(self):\n\n fname = self.ad_file\n print \"reading ad file \", fname, \" curdir = \", os.getcwd()\n try:\n fh = open(fname,'r')\n self.lines_ad = fh.readlines()\n fh.close()\n except:\n sys.stdout.write (\"Error opening {:}\\n\".format(fname))\n return 0\n\n for i in range(len(self.lines_ad)):\n ln = self.lines_ad[i].split() \n if (len(ln) >1):\n if (ln[1] == \"NumFoil\"):\n self.nSeg = int(ln[0])\n break\n if (ln[1] == \"WindFile\" and self.wind_file == None):\n self.wind_file = ln[0][1:-1]\n self.af_dict = {}\n self.af_dict['polar_idx'] = [0]*self.nSeg\n self.af_dict['polar_files'] = [0]*self.nSeg\n print \"ln, nSeg, i\", ln, self.nSeg, i\n for j in range(self.nSeg):\n lnidx = i+1+j\n ln = self.lines_ad[lnidx].split()\n afpath = fix_path(ln[0].strip().strip(\"\\\"\").strip(\"\\'\"))\n ln[0] = \"\\\"%s\\\"\" % afpath\n self.lines_ad[lnidx] = unsplit(ln)\n self.af_dict['polar_idx'][j] = j+1\n self.af_dict['polar_files'][j] = afpath", "def ADF(self, dP, ax):\n from scipy.special import sph_harm\n ang = self._ang_part(dP)\n #scipy defines their harmonics to have `theta` be azimuthal, which is\n #opposite from physics.\n #we set $m = 0$ so that the azimuthal part doesn't contribute at all.\n result = np.zeros(len(ax))\n for l, p in ang.items():\n Ylm = sph_harm(0, l, 0, ax)*np.sqrt(2*l+1)\n #We are interested in the c* c of this value, which is multiplied\n #together to get pissnnl.\n result += p*np.sqrt(np.absolute(Ylm*Ylm.conjugate()))\n return result", "def _update_adp_calculation(self, Temp):\n from sys import stdout\n\n self.printer('\\n ...calculating ADPs...\\n')\n\n import time\n\n start = time.time()\n\n daba_counter = 0.\n max_counter = float(len(self.keys()))\n for molecule in self.keys():\n daba_counter += 1.\n\n pstate = daba_counter / max_counter\n pstate = int(58 * pstate)\n bar = '[' + pstate * '#' + (58 - pstate) * '-' + ']'\n print ' | {}\\r'.format(bar),\n stdout.flush()\n\n try:\n self[molecule].get_adp(Temp)\n\n except KeyError:\n self.errorlog.write('Error: No ADP calculated by atom.get_adp() for {}.'.format(molecule))\n end = time.time()\n self.printer('\\n\\n Time used for ADP calculation: {:5.3f} sec on {} CPUs'.format(end - start, 1))", "def ADP (self):", "def read_raw_data(self, meas_name=''):\n if meas_name:\n self.selected_measure = meas_name\n else:\n meas_name = self.selected_measure\n\n is_big_endian = self._pna.data_endianess == 'big'\n data_request = 'CALCulate{}:DATA? SDATA'.format(self._channel)\n if self._pna.data_format == 'REAL,+32':\n data = self._pna.query_binary_values(data_request, datatype='f',\n is_big_endian=is_big_endian,\n container=np.ndarray)\n\n elif self._pna.data_format == 'REAL,+64':\n data = self._pna.query_binary_values(data_request, datatype='d',\n is_big_endian=is_big_endian,\n container=np.ndarray)\n\n elif self._pna.data_format == 'ASC,+0':\n data = self._pna.query_ascii_values(data_request, converter='f',\n container=np.ndarray)\n\n else:\n raise InstrIOError(cleandoc('''Agilent PNA did not return the\n channel {} formatted data for meas {}'''.format(\n self._channel, meas_name)))\n\n return data[::2] + 1j*data[1::2]", "def getMeasurement(self):\n if(ADConverterSettings.useRealAD):\n print \"Real AD not activated\"\n #return self.adcdac.read_adc_voltage(1)\n else:\n return self.__readMeasurementFromFile()", "def calc_dda(self, feedrate, spm):\n\n second_const = 60\n micro_second_const = 1000000\n #dda = micro_second_const / (feedrate * spm)\n dda = second_const * micro_second_const / (feedrate * spm) #Assuming feedrate in mm/min\n return dda", "def format_gcc_dsp(self):\n result_x, doa_from_file = self.load_audio()\n\n return result_x, doa_from_file", "def _transfer_adp(self):\n toleratedAtoms = []\n for atom in self['exp'].atoms:\n tolerated = atom.transfer_adp()\n if tolerated:\n toleratedAtoms.append(tolerated)\n for atom in toleratedAtoms:\n atom.averageADP()", "def process_point_measurement(procstatus, dscfg, radar_list=None):\n if procstatus != 1:\n return None, None\n\n for datatypedescr in dscfg['datatype']:\n radarnr, datagroup, datatype, dataset, product = get_datatype_fields(\n datatypedescr)\n break\n field_name = get_fieldname_pyart(datatype)\n ind_rad = int(radarnr[5:8])-1\n if ((radar_list is None) or (radar_list[ind_rad] is None)):\n warn('ERROR: No valid radar')\n return None, None\n radar = radar_list[ind_rad]\n\n if field_name not in radar.fields:\n warn('Unable to extract point measurement information. ' +\n 'Field not available')\n return None, None\n\n projparams = dict()\n projparams.update({'proj': 'pyart_aeqd'})\n projparams.update({'lon_0': radar.longitude['data']})\n projparams.update({'lat_0': radar.latitude['data']})\n\n if dscfg['latlon']:\n lon = dscfg['lon']\n lat = dscfg['lat']\n alt = dscfg['alt']\n x, y = pyart.core.geographic_to_cartesian(lon, lat, projparams)\n\n if not dscfg['truealt']:\n ke = 4./3. # constant for effective radius\n a = 6378100. # earth radius\n re = a * ke # effective radius\n\n elrad = dscfg['ele'] * np.pi / 180.\n r_ground = np.sqrt(x ** 2. + y ** 2.)\n r = r_ground / np.cos(elrad)\n alt_radar = radar.altitude['data']+np.sqrt(\n r ** 2. + re ** 2. + 2. * r * re * np.sin(elrad)) - re\n alt_radar = alt_radar[0]\n else:\n alt_radar = dscfg['alt']\n\n r, az, el = pyart.core.cartesian_to_antenna(\n x, y, alt_radar-radar.altitude['data'])\n r = r[0]\n az = az[0]\n el = el[0]\n else:\n r = dscfg['rng']\n az = dscfg['azi']\n el = dscfg['ele']\n\n x, y, alt = pyart.core.antenna_to_cartesian(r, az, el)\n lon, lat = pyart.core.cartesian_to_geographic(x, y, projparams)\n\n d_az = np.min(np.abs(radar.azimuth['data'] - az))\n if d_az > dscfg['AziTol']:\n warn(' No radar bin found for point (az, el, r):(' +\n str(az)+', '+str(el)+', '+str(r) +\n '). Minimum distance to radar azimuth '+str(d_az) +\n ' larger than tolerance')\n return None, None\n\n d_el = np.min(np.abs(radar.elevation['data'] - el))\n if d_el > dscfg['EleTol']:\n warn(' No radar bin found for point (az, el, r):(' +\n str(az)+', '+str(el)+', '+str(r) +\n '). Minimum distance to radar elevation '+str(d_el) +\n ' larger than tolerance')\n return None, None\n\n d_r = np.min(np.abs(radar.range['data'] - r))\n if d_r > dscfg['RngTol']:\n warn(' No radar bin found for point (az, el, r):(' +\n str(az)+', '+str(el)+', '+str(r) +\n '). Minimum distance to radar range bin '+str(d_r) +\n ' larger than tolerance')\n return None, None\n\n ind_ray = np.argmin(np.abs(radar.azimuth['data'] - az) +\n np.abs(radar.elevation['data'] - el))\n ind_r = np.argmin(np.abs(radar.range['data'] - r))\n\n val = radar.fields[field_name]['data'].data[ind_ray, ind_r]\n time = num2date(radar.time['data'][ind_ray], radar.time['units'],\n radar.time['calendar'])\n\n # prepare for exit\n new_dataset = dict()\n new_dataset.update({'value': val})\n new_dataset.update({'datatype': datatype})\n new_dataset.update({'time': time})\n new_dataset.update(\n {'point_coordinates_WGS84_lon_lat_alt': [lon, lat, alt]})\n new_dataset.update({'antenna_coordinates_az_el_r': [az, el, r]})\n new_dataset.update(\n {'used_antenna_coordinates_az_el_r': [radar.azimuth['data'][ind_ray],\n radar.elevation['data'][ind_ray],\n radar.range['data'][ind_r]]})\n\n return new_dataset, ind_rad", "def read_dip(fname, verbose=None):\n dipole = read_dipole(fname)\n return (dipole.times * 1000., dipole.pos, dipole.amplitude,\n 1e9 * dipole.ori * dipole.amplitude[:, np.newaxis], dipole.gof)", "def AD(self, using, dx=0.0001, vmin=0.005, vmax=0.995):\n pits = np.array(self.PIT(using=using,dx=dx))\n mask = (pits>vmin) & (pits<vmax)\n ad_result = skgof.ad_test(pits[mask], stats.uniform())\n return ad_result.statistic, ad_result.pvalue", "def read_formatted_data(self, meas_name=''):\n if meas_name:\n self.selected_measure = meas_name\n else:\n meas_name = self.selected_measure\n\n is_big_endian = self._pna.data_endianess == 'big'\n data_request = 'CALCulate{}:DATA? FDATA'.format(self._channel)\n if self._pna.data_format == 'REAL,+32':\n data = self._pna.query_binary_values(data_request, datatype='f',\n is_big_endian=is_big_endian,\n container=np.ndarray)\n\n elif self._pna.data_format == 'REAL,+64':\n data = self._pna.query_binary_values(data_request, datatype='d',\n is_big_endian=is_big_endian,\n container=np.ndarray)\n\n elif self._pna.data_format == 'ASC,+0':\n data = self._pna.query_ascii_values(data_request, converter='f',\n container=np.ndarray)\n\n else:\n raise InstrIOError(cleandoc('''Agilent PNA did not return the\n channel {} formatted data for meas {}'''.format(\n self._channel, meas_name)))\n\n return data", "def ADFs(self, resolution=100, catom=False):\n if resolution not in self._adfs:\n self._adfs[resolution] = ADFCollection.from_soap(self, resolution, catom)\n return self._adfs[resolution]", "def get_adx(data):\n if data is None:\n raise EmptyDataError('[!] Invalid data value')\n\n result = TA.ADX(data)\n if result is None:\n raise IndicatorException\n return result", "def read_apr(self, lexclude=[], discontinuity=None, rename=None, verbose=False):\n###############################################################################\n \n import pyacs.lib.astrotime\n from pyacs.sol.gpoint import Gpoint\n\n # DEAL WITH RENAME IF PROVIDED\n \n if rename is not None:\n \n if verbose:print(\"-- Rename info provided for apr file: \", self.name)\n\n H_rename = {}\n\n # Case for a CODE rename applying for all SINEX files\n if 'all' in rename:\n \n for (code, new_code) in rename['all']:\n H_rename[code] = new_code\n \n # Case for a CODE rename applying for the current SINEX\n \n if self.name in list(rename.keys()):\n\n for (code, new_code) in rename[self.name]:\n H_rename[code] = new_code\n \n # READING APR FILE\n \n if verbose:\n print('-- Reading Globk apr file ', self.name)\n\n try:\n APR_VALUE = np.genfromtxt(self.name, comments='#', usecols=(1,2,3,4,5,6,7,8,9,10,11,12,12))\n APR_NAME = np.genfromtxt(self.name, comments='#', usecols=(0), dtype=str)\n except:\n print('!!!ERROR: could not read Globk format apr file:' , self.name)\n import sys\n sys.exit()\n \n for i in np.arange( APR_VALUE.shape[0]) :\n print('-- processing ', APR_NAME[i][:4])\n [x,y,z,sx,sy,sz,epoch, vx,vy,vz,svx,svy,svz]= APR_VALUE[i,:]\n M=Gpoint(X=x,Y=y,Z=z,\\\n SX=sx,SY=sy,SZ=sz,\\\n VX=vx,VY=vy,VZ=vz,SVX=svx,SVY=svy,SVZ=svz, \\\n epoch=epoch,code=APR_NAME[i][:4],pt='A',soln=1)\n \n self.estimates[ APR_NAME[i][:4], 1 ] = M", "def _read_arf(file):\n with fits.open(file) as hdul:\n data = hdul[1].data\n\n return data['energ_lo'], data['energ_hi'], data['specresp']", "def read_vmdas(self,):\n fd = self.f\n # The raw files produced by VMDAS contain a binary navigation data\n # block.\n self.cfg['sourceprog'] = 'VMDAS'\n ens = self.ensemble\n k = ens.k\n if self._source != 1 and self._debug_level >= 1:\n print(' \\n***** Apparently a VMDAS file \\n\\n')\n self._source = 1\n self.vars_read += ['time_gps',\n 'latitude_gps',\n 'longitude_gps',\n 'etime_gps',\n 'elatitude_gps',\n 'elongitude_gps',\n 'flags',\n 'ntime', ]\n utim = fd.read_ui8(4)\n date = tmlib.datetime(utim[2] + utim[3] * 256, utim[1], utim[0])\n # This byte is in hundredths of seconds (10s of milliseconds):\n time = tmlib.timedelta(milliseconds=(int(fd.read_ui32(1) / 10)))\n fd.seek(4, 1) # \"PC clock offset from UTC\" - clock drift in ms?\n ens.time_gps[k] = tmlib.date2epoch(date + time)[0]\n ens.latitude_gps[k] = fd.read_i32(1) * self._cfac\n ens.longitude_gps[k] = fd.read_i32(1) * self._cfac\n ens.etime_gps[k] = tmlib.date2epoch(date + tmlib.timedelta(\n milliseconds=int(fd.read_ui32(1) * 10)))[0]\n ens.elatitude_gps[k] = fd.read_i32(1) * self._cfac\n ens.elongitude_gps[k] = fd.read_i32(1) * self._cfac\n fd.seek(12, 1)\n ens.flags[k] = fd.read_ui16(1)\n fd.seek(6, 1)\n utim = fd.read_ui8(4)\n date = tmlib.datetime(utim[0] + utim[1] * 256, utim[3], utim[2])\n ens.ntime[k] = tmlib.date2epoch(date + tmlib.timedelta(\n milliseconds=int(fd.read_ui32(1) / 10)))[0]\n fd.seek(16, 1)\n self._nbyte = 2 + 76", "def get_all_DLP_measurements(self):\n pass", "def Getdxdparam(Mda,Mdb,Xa):\n\n Xb = Xa.copy()\n #Xb[iulag] = Xa[iulag] + (1-Xa[iq]*Xa[iM])*(Mdb.ubar-Mda.ubar)\n Xb[Mdb.nX:Mdb.nXY] = Mdb.F(Xb[Mdb.interpstates])\n Xb[Mdb.nXY:] = Mdb.Static(Xb)\n\n if CLArgs.param == \"b\":\n D = Mdb.b() - Mda.b()\n else:\n D = Mdb.tau - Mda.tau\n\n return (Xb[iM] - Xa[iM])/D", "def explore_FAAM_aerosol_data():\n # -- PCASP\n dsPCASP = get_FAAM_mineral_dust_calibration(instrument='PCASP',\n rtn_values=False)\n # -- CDP\n dsCDP = get_FAAM_mineral_dust_calibration(instrument='CDP',\n rtn_values=False)\n # only consider \"potential dust\" above a certain size?\n # Use 100 um for now", "def read_adas(self):\n for name in self.files_atte:\n self.beam_atte.append(adas.ADAS21(name))\n for name in self.files_emis:\n self.beam_emis.append(adas.ADAS22(name))", "def ADF(self, ax, catom=False):\n if catom:\n return self._get_DF(ax, \"cADF\", \"ax\", catom=True)\n else:\n return self._get_DF(ax, \"nADF\", \"ax\", catom=False)", "def atmparamread(filename):\n f = open(filename, 'r')\n f.readline()\n line = f.readline()\n #Td = float(line.split()[0])\n #Pd = float(line.split()[1])\n #Mc = float(line.split()[2])\n #rc = float(line.split()[3])\n n = int(line.split()[0])\n f.readline()\n atm = 0*numpy.ndarray(shape=(n, ncol), dtype=float)\n S = 0*numpy.ndarray(shape=(n), dtype=float)\n for i in range(n):\n line = f.readline()\n S[i] = float(line.split()[0])\n for j in range(ncol ):\n atm[i, j] = float(line.split()[j+1])\n f.close()\n return atm, S", "def get_admittance(self, param_name: list = ['Y11', 'Y21']):\n # TODO: move the plot in this analysis module. Renderer should recover the entire data\n return self.renderer.plot_params(param_name)", "def RDF(self, dP, rx, fast=True):\n parts = np.zeros((len(dP), len(rx)))\n for i, dPi in enumerate(dP):\n w = np.sign(dPi[1])*np.sqrt(np.sqrt(np.abs(dPi[1])))\n parts[i,:] = w*self.apnl(dPi, rx, fast=fast)\n return np.sum(parts, axis=0)", "def XPLMGetDatad(inDataRef):\n return float", "def read_results():\r\n with open(\"packing.nfo\", \"r\") as fin:\r\n fin.readline()\r\n fin.readline()\r\n por_theory = float(fin.readline().split()[2])\r\n por_final = float(fin.readline().split()[2])\r\n print('Theoretical porosity:', por_theory)\r\n print('Final porosity:', por_final)\r\n with open(\"packing.xyzd\", \"rb\") as fin:\r\n btxt = fin.read()\r\n txt = list(struct.unpack(\"<\" + \"d\" * (len(btxt) // 8), btxt))\r\n data = array(zip(*[iter(txt)] * 4))\r\n data[:, 3] = data[:, 3] * \\\r\n ((1 - por_final) / (1 - por_theory))**(1 / 3)\r\n return data", "def handle_dpad(self):\n # pylint: disable=no-member\n x_raw = self.microbit.accelerometer.get_x()\n y_raw = self.microbit.accelerometer.get_y()\n minus_sens = self.sensitivity * -1\n if x_raw < minus_sens:\n x_state = ('Absolute', 0x10, -1)\n elif x_raw > self.sensitivity:\n x_state = ('Absolute', 0x10, 1)\n else:\n x_state = ('Absolute', 0x10, 0)\n\n if y_raw < minus_sens:\n y_state = ('Absolute', 0x11, -1)\n elif y_raw > self.sensitivity:\n y_state = ('Absolute', 0x11, 1)\n else:\n y_state = ('Absolute', 0x11, 1)\n\n return x_state, y_state", "def get_adcs(self):\n self.clear_in_serial_buffer()\n s = \"/ADC/run\\n\"\n self.serial.write(s)\n resp = self.serial.readline()\n regex = re.compile(\"(?:[ADC])+ \"\n \"(?P<adc0>[0-9A-Fa-f]*), \"\n \"(?P<adc1>[0-9A-Fa-f]*)\")\n m = regex.match(resp)\n adcval0 = int(m.group('adc0'),16) * self.conf['ADCCONST0']\n adcval1 = int(m.group('adc1'),16) * self.conf['ADCCONST1']\n return adcval0, adcval1", "def ADP_trace(adp):\n return sum(adp[:3])", "def parse_file(axmlfile, **kwargs):\n adm = ADM()\n from .common_definitions import load_common_definitions\n load_common_definitions(adm)\n load_axml_file(adm, axmlfile, **kwargs)\n return adm", "def rdspecdat(self):\n # TODO : ugh. this is crude. Should have some checks for file format\n # and probably better to use the astropy.io functions now.\n try:\n w, f, e = np.loadtxt(self.filename, unpack=True)\n except:\n w, f = np.loadtxt(self.filename, unpack=True)\n e = []", "def read_rdi(fname, userdata=None, nens=None, debug=0):\n # Reads into a dictionary of dictionaries using netcdf naming conventions\n # Should be easier to debug\n with _RdiReader(fname, debug_level=debug) as ldr:\n dat = ldr.load_data(nens=nens)\n\n # Read in userdata\n userdata = _find_userdata(fname, userdata)\n for nm in userdata:\n dat['attrs'][nm] = userdata[nm]\n\n if 'time_gps' in dat['coords']:\n # GPS data not necessarily sampling at the same rate as ADCP DAQ.\n dat = _remove_gps_duplicates(dat)\n\n # Create xarray dataset from upper level dictionary\n ds = _create_dataset(dat)\n ds = _set_coords(ds, ref_frame=ds.coord_sys)\n\n # Create orientation matrices\n if 'beam2inst_orientmat' not in ds:\n ds['beam2inst_orientmat'] = xr.DataArray(_calc_beam_orientmat(\n ds.beam_angle,\n ds.beam_pattern == 'convex'),\n coords={'x': [1, 2, 3, 4],\n 'x*': [1, 2, 3, 4]},\n dims=['x', 'x*'])\n\n if 'orientmat' not in ds:\n ds['orientmat'] = xr.DataArray(_calc_orientmat(ds),\n coords={'earth': ['E', 'N', 'U'],\n 'inst': ['X', 'Y', 'Z'],\n 'time': ds['time']},\n dims=['earth', 'inst', 'time'])\n\n # Check magnetic declination if provided via software and/or userdata\n _set_rdi_declination(ds, fname, inplace=True)\n\n # VMDAS applies gps correction on velocity in .ENX files only\n if fname.rsplit('.')[-1] == 'ENX':\n ds.attrs['vel_gps_corrected'] = 1\n else: # (not ENR or ENS) or WinRiver files\n ds.attrs['vel_gps_corrected'] = 0\n\n # Convert time coords to dt64\n t_coords = [t for t in ds.coords if 'time' in t]\n for ky in t_coords:\n dt = tmlib.epoch2dt64(ds[ky])\n ds = ds.assign_coords({ky: dt})\n\n # Convert time vars to dt64\n t_data = [t for t in ds.data_vars if 'time' in t]\n for ky in t_data:\n dt = tmlib.epoch2dt64(ds[ky])\n ds[ky].data = dt\n\n return ds", "def plot_pade_figure(self):\n data_analysis = DatabaseData(dataframe=self.plot_data)\n print (data_analysis.dataframe.columns)\n data_analysis.run_pade_through_R(rscript='birch',get_inits_ev=True)\n data_analysis.create_precisions()\n data_analysis.extract_pade_curve()\n x_eos_kpts, y_eos, xs_err, ys_err, x_pade_kpts, y_pade = \\\n data_analysis.create_pade_bokeh_compat(properties=self.properties)\n print (type(self.properties), self.properties)\n if self.properties == 'B':\n ext = data_analysis.Bp\n print ('HERE AT PROPERTIES', ext, type(ext))\n elif self.properties == 'BP':\n ext = data_analysis.BPp\n elif self.properties == 'E0':\n ext = data_analysis.E0p\n elif self.properties == 'V0':\n ext = data_analysis.V0p\n p = figure(plot_height=400, plot_width=400,tools=\"pan,wheel_zoom,box_zoom,reset,previewsave\",\\\n x_axis_type=\"log\", x_axis_label='K-points per atom', title='Pade Extrapolate of {0} is {1}'.format(self.properties, str(ext)) )\n p.xaxis.axis_label = 'K-points per atom'\n p.line(x_pade_kpts, y_pade, color='red')\n p.circle(x_eos_kpts, y_eos,color='blue',size=5, line_alpha=0)\n p.multi_line(xs_err, ys_err, color='black')\n if self.properties == 'B':\n p.yaxis.axis_label = 'Bulk Modulus B (GPa)'\n elif self.properties == 'dB':\n p.yaxis.axis_label = 'Bulk Modulus Pressure Derivative'\n elif self.properties == 'E0':\n p.yaxis.axis_label = 'DFT Energy (eV/atom)'\n elif self.properties == 'V0':\n p.yaxis.axis_label = 'Volume (A^3/atom)'\n\n return p", "def _get_A_rdm(self, p):\n\n return rdm_graph(self.G, nodelist=self.nodelist, percent=p)", "def read_log_attitude(ac_id, filename):\n f = open(filename, 'r')\n pattern = re.compile(\"(\\S+) \"+ac_id+\" ATTITUDE (\\S+) (\\S+) (\\S+)\")\n list_meas = []\n while True:\n line = f.readline().strip()\n if line == '':\n break\n m = re.match(pattern, line)\n if m:\n list_meas.append([float(m.group(1)), float(m.group(2)), float(m.group(3)), float(m.group(4))])\n return np.array(list_meas)", "def ReadMeasurements(test_result):\n try:\n artifact = test_result['outputArtifacts']['measurements.json']\n except KeyError:\n return {}\n with open(artifact['filePath']) as f:\n return json.load(f)['measurements']", "def read_log_airdata(ac_id, filename):\n f = open(filename, 'r')\n pattern = re.compile(\"(\\S+) \"+ac_id+\" AIR_DATA (\\S+) (\\S+) (\\S+) (\\S+) (\\S+) (\\S+) (\\S+)\")\n list_meas = []\n while True:\n line = f.readline().strip()\n if line == '':\n break\n m = re.match(pattern, line)\n if m:\n list_meas.append([float(m.group(1)), float(m.group(2)), float(m.group(3)), float(m.group(4)), float(m.group(5)), float(m.group(6)), \n float(m.group(7)), float(m.group(8))])\n return np.array(list_meas)", "def ad(path_to_inputs, request):\n filename = request.param\n path = os.path.join(path_to_inputs, filename)\n\n if os.path.exists(path):\n ad = astrodata.open(path)\n else:\n raise FileNotFoundError(path)\n\n return ad", "def get_dacs(self):\n self.clear_in_serial_buffer()\n s = \"/DAC/run\\n\"\n self.serial.write(s)\n resp = self.serial.readline()\n regex = re.compile(\"(?:[DAC])+ \"\n \"(?P<dac0>[0-9A-Fa-f]*), \"\n \"(?P<dac1>[0-9A-Fa-f]*)\")\n m = regex.match(resp)\n dacval0 = int(m.group('dac0'),16) * self.conf['DACCONST0']\n dacval1 = int(m.group('dac1'),16) * self.conf['DACCONST1']\n return dacval0, dacval1", "def read_raw_data(self, measname):\n #stop continuous measure mode\n self.write('INIT:CONT OFF')\n #start new measurement and wait before continuing with the commands\n self.write('INIT;*WAI')\n #get sweep data\n trace = measname[1]\n data_request = 'TRAC? TRACE{}'.format(trace)\n data = np.array(self.ask(data_request).split(','),dtype = float)\n \n if list(data):\n return data\n else:\n raise InstrIOError(cleandoc('''Rhode&Schwarz PSA did not return the\n data for trace {}'''.format(trace)))", "def Get_Meas_Res_Average(self, mode, ch=1):\n rdStr = self.query(f':MEAS{ch}:RES:AVG? {mode}')\n return rdStr", "def _update_H_ADP(self):\n # return\n for atom in self.invarioms:\n if (not 'cart_meas' in atom.adp.keys() or len(atom.adp['cart_meas']) == 1) and atom.molecule.name == 'exp':\n atom.update_H_ADP()", "def test_acdi(self):\n q = qufilab.acdi(self.close, self.high, self.low, self.volume)\n t = talib.AD(self.high, self.low, self.close, self.volume)\n np.testing.assert_allclose(q, t, rtol = self.tolerance)", "def read_log_adc_generic(ac_id, filename):\n f = open(filename, 'r')\n pattern = re.compile(\"(\\S+) \"+ac_id+\" ADC_GENERIC (\\S+) (\\S+)\")\n list_meas = []\n while True:\n line = f.readline().strip()\n if line == '':\n break\n m = re.match(pattern, line)\n if m:\n list_meas.append([float(m.group(1)), float(m.group(2)), float(m.group(3))])\n return np.array(list_meas)", "def computeLstFromDada(filename):\n d = dada.DadaReader(filename, n_int=0)\n\n telescope = d.header[\"TELESCOPE\"]\n if telescope in ('LEDA', 'LWAOVRO', 'LWA-OVRO', 'LEDAOVRO', 'LEDA512', 'LEDA-OVRO'):\n h3(\"Data appears to be from LWAOVRO\")\n site = ledafits_config.ovro\n elif telescope in ('LWA1', 'LWA-1', 'LWA-NM', 'LWANM', 'LEDA64', 'LEDA64-NM'):\n h3(\"Data appears to be from LWA1\")\n site = ledafits_config.lwa1\n\n dt_obj = datetime.strptime(d.header[\"UTC_START\"], \"%Y-%m-%d-%H:%M:%S\") \n tsamp = float(d.header[\"TSAMP\"]) * 1e-6 # Sampling time per channel, in microseconds \n navg = int(d.header[\"NAVG\"]) # Number of averages per integration \n int_tim = tsamp * navg # Integration time is tsamp * navg \n \n byte_offset = int(d.header[\"OBS_OFFSET\"]) \n bytes_per_avg = int(d.header[\"BYTES_PER_AVG\"]) \n num_int = byte_offset / bytes_per_avg \n time_offset = num_int * int_tim \n \n pat = '(\\d+)-(\\d+)-(\\d+)[-_](\\d\\d)[:h](\\d\\d)[:m](\\d\\d)$'\n\n match = re.search(pat, d.header[\"UTC_START\"])\n if match:\n # Convert re match to integers, apart from file extension\n #(y, m, d, hh, mm, ss) = [int(m) for m in match.groups()[:-1]]\n dt = dt_obj + timedelta(seconds=time_offset)\n site.date = dt\n lst = site.sidereal_time()\n date_str = \"%04d%02d%02d\"%(dt.year,dt.month,dt.day)\n time_str = \"%02d%02d%02d\"%(dt.hour,dt.minute,dt.second)\n lst_str = str(float(lst) / 2 / np.pi * 24)\n #print lst\n #print lst_str \n #lst = str(lst).split(\":\")\n #lst_str = \"%s%s%s\"%(lst[0], lst[1], lst[2].split(\".\")[0])\n \n printRed( \"UTC START: %s\"%d.header[\"UTC_START\"] )\n printRed( \"TIME OFFSET: %s\"%timedelta(seconds=time_offset))\n printRed( \"NEW START: (%s, %s)\"%(date_str, time_str) )\n \n return date_str, time_str, lst_str\n else:\n print filename\n raise Exception(\"DadaToSiderealError\")", "def _loadData(self):\n self.d = read_ac_data.read_ac_data_wrapper(self.sc_id, self.date,\n dType='10Hz')\n return", "def EstimatePeriodAndPlot(ID,\n f_in=\"../data/mira_asas/\",\n f_out=\"diag_figs/mira_plots/\"):\n print ID\n star = np.loadtxt(f_in + ID + \".dat\",\n usecols=(0,1,2),skiprows=0)\n ctimes = star[star[:,1] > 29.5,0]\n star = star[star[:,1] < 29.5,:]\n cvals = np.array(np.max(star[:,1])) * np.ones(ctimes.shape[0])\n ## estimate period\n freqs = lomb.get_freqs2(star[:,0])\n rss = lomb.lomb(star[:,0],star[:,1],star[:,2],freqs)\n period = 1. / freqs[np.argmin(rss)]\n ## make figure\n fig = plt.figure()\n ax = fig.add_subplot(211)\n ax.plot(star[:,0],-star[:,1],'o',color=\"gray\",alpha=.5)\n ax.plot(ctimes,-cvals,'ro',alpha=.5)\n ax.set_yticklabels(np.abs(ax.get_yticks()))\n ax.set_xlabel('Time')\n ax.set_ylabel('Magnitude')\n ax2 = fig.add_subplot(212)\n ax2.plot(star[:,0] % period,-star[:,1],'o',color=\"gray\",alpha=.5)\n ax2.plot(ctimes % period,-cvals,'ro',alpha=.5)\n ax2.set_yticklabels(np.abs(ax2.get_yticks()))\n ax2.set_xlabel('Phase')\n ax2.set_ylabel('Magnitude')\n plt.savefig(f_out + ID + \".pdf\")\n plt.close()\n return period", "def XPLMGetDatad_f(inRefcon):\n pass", "def read_assignResourcesMeasuredDuration(self):\n # PROTECTED REGION ID(CspSubElementSubarray.assignResourcesMeasuredDuration_read) ENABLED START #\n return self._cmd_measured_duration[\"assignresources\"]\n # PROTECTED REGION END # // CspSubElementSubarray.assignResourcesMeasuredDuration_read", "def read_conversions(db):\n mpart,Lbox,rsdfac,acheck = None,None,None,None\n with open(db+\"Header/attr-v2\",\"r\") as ff:\n for line in ff.readlines():\n mm = re.search(\"MassTable.*\\#HUMANE\\s+\\[\\s*0\\s+(\\d*\\.\\d*)\\s*0+\\s+0\\s+0\\s+0\\s+\\]\",line)\n if mm != None:\n mpart = float(mm.group(1)) * 1e10\n mm = re.search(\"BoxSize.*\\#HUMANE\\s+\\[\\s*(\\d+)\\s*\\]\",line)\n if mm != None:\n Lbox = float(mm.group(1))\n mm = re.search(\"RSDFactor.*\\#HUMANE\\s+\\[\\s*(\\d*\\.\\d*)\\s*\\]\",line)\n if mm != None:\n rsdfac = float(mm.group(1))\n mm = re.search(\"ScalingFactor.*\\#HUMANE\\s+\\[\\s*(\\d*\\.\\d*)\\s*\\]\",line)\n if mm != None:\n acheck = float(mm.group(1))\n if (mpart is None)|(Lbox is None)|(rsdfac is None)|(acheck is None):\n print(mpart,Lbox,rsdfac,acheck)\n raise RuntimeError(\"Unable to get conversions from attr-v2.\")\n if np.abs(acheck-aa)>1e-4:\n raise RuntimeError(\"Read a={:f}, expecting {:f}.\".format(acheck,aa))\n return(rsdfac)\n #", "def atmprofileread(filename):\n f = open(filename, 'r')\n line1 = f.readline()\n Nst = int(line1.split()[-1])\n line = f.readline()\n Np = int(line.split()[1])\n atm = 0*numpy.ndarray(shape=(Nst, Np, 5), dtype=float)\n S = 0*numpy.ndarray(shape=(Nst), dtype=float)\n f = open(filename, 'r')\n f.readline()\n for i in range(Nst):\n line = f.readline()\n S[i] = float(line.split()[0])\n for j in range(Np):\n line = f.readline()\n for k in range(numpy.shape(atm)[-1]):\n atm[i, j, k] = float(line.split()[k])\n f.close()\n return atm, S", "def test_detrend_gradient(self):\r\n results = detrend_pcoa(input_fp=self.tmp_pc_fp,\r\n map_fp=self.tmp_map_fp, gradient_variable='Gradient',\r\n suppress_prerotate=False, output_dir=self.output_dir,\r\n HALT_EXEC=False)\r\n\r\n # check formatting of summary file\r\n lines = results['summary'].readlines()\r\n self.assertEqual(len(lines), 4)\r\n\r\n # check formatting of coords file\r\n lines = results['coords'].readlines()\r\n # ensure one line per sample in detrended pcoa\r\n self.assertEqual(len(lines), len(test_pc.split('\\n')) - 4)\r\n # ensure three columns tab delimited\r\n self.assertEqual(len(lines[0].split('\\t')), 3)\r\n\r\n # ensure that plot pdf is at least present\r\n self.assertEqual(str(type(results['plot'])), \"<type 'file'>\")", "def getMeasures():", "def get_adl(data):\n if data is None:\n raise EmptyDataError('[!] Invalid data value')\n\n result = TA.ADL(data)\n if result is None:\n raise IndicatorException\n return result", "def readExperiAll(varid,timeperiod,level):\n print('\\n>>>>>>>>>> Using readExperiAll function!')\n \n ### Import modules\n import numpy as np\n from netCDF4 import Dataset\n\n ###########################################################################\n ###########################################################################\n ###########################################################################\n ### Directories for Antarctic experiments (1-100 members)\n if any([timeperiod=='ANT_Fu',timeperiod=='ANT_Cu',timeperiod=='ANT_Pi']):\n if timeperiod == 'ANT_Fu':\n experi = 'PAMIP-1.8'\n directorydata = '/seley/ypeings/simu/'\n totaldirectory = directorydata + experi + '/monthly/'\n filename = totaldirectory + varid + '_1900-2000.nc'\n print('Reading in Antarctic Future Sea Ice!')\n elif timeperiod == 'ANT_Cu':\n experi = 'PAMIP-1.1-QBO'\n directorydata = '/seley/ypeings/simu/'\n totaldirectory = directorydata + experi + '/monthly/'\n filename = totaldirectory + varid + '_1900-2000.nc'\n if varid == 'SIC':\n experi = 'PAMIP_Cu' # missing SIC data in 1.1-QBO\n directorydata = '/seley/zlabe/simu/'\n totaldirectory = directorydata + experi + '/monthly/'\n filename = totaldirectory + varid + '_1701-2000.nc'\n print('Reading in Antarctic Present-Day Sea Ice!')\n elif timeperiod == 'ANT_Pi':\n experi = 'PAMIP-1.7'\n directorydata = '/seley/ypeings/simu/'\n totaldirectory = directorydata + experi + '/monthly/'\n filename = totaldirectory + varid + '_1900-2000.nc'\n print('Reading in Antarctic Pre-Industrial Sea Ice!')\n else:\n print(ValueError('Selected wrong time period!')) \n else:\n print(ValueError('Selected wrong experiment name!'))\n \n if varid == 'EGR' and level == 'surface': # integrated from 500-850 hPa\n filename = totaldirectory + varid + '_500_850.nc'\n\n ### Read in Data\n if level == 'surface': # 3d variables\n data = Dataset(filename,'r')\n lev = 'surface'\n lat = data.variables['latitude'][:]\n lon = data.variables['longitude'][:]\n varq = data.variables['%s' % varid][:]\n data.close()\n elif level == 'profile': # 4d variables\n data = Dataset(filename,'r')\n lev = data.variables['level'][:]\n lat = data.variables['latitude'][:]\n lon = data.variables['longitude'][:]\n varq = data.variables['%s' % varid][:]\n data.close()\n elif level == 'zonmean': # 3d variables (zonal mean!)\n varidz = varid + '_' + level\n filename = totaldirectory + varidz + '_1900-2000.nc'\n data = Dataset(filename,'r')\n lev = data.variables['level'][:]\n lat = data.variables['lat'][:]\n lon = data.variables['lon'][:]\n varq = data.variables['%s' % varid][:].squeeze()\n data.close()\n else:\n print(ValueError('Selected wrong height - (surface or profile!)!')) \n print('Completed: Read data for *%s* : %s!' % (experi[:],varid))\n\n ### Reshape to split years and months\n months = 12\n if level == 'surface': # 3d variables\n var = np.reshape(varq,(varq.shape[0]//months,months,\n int(lat.shape[0]),int(lon.shape[0])))\n elif level == 'profile': # 4d variables\n var = np.reshape(varq,(varq.shape[0]//months,months,int(lev.shape[0]),\n int(lat.shape[0]),int(lon.shape[0])))\n elif level == 'zonmean': # 3d variables (zonal mean!)\n var = np.reshape(varq,(varq.shape[0]//months,months,int(lev.shape[0]),\n int(lat.shape[0])))\n else:\n print(ValueError('Selected wrong height - (surface or profile!)!')) \n print('Completed: Reshaped %s array!' % (varid))\n \n ### Convert units\n if varid in ('TEMP','T2M'):\n var = var - 273.15 # Kelvin to degrees Celsius \n print('Completed: Changed units (K to C)!')\n elif varid == 'SWE':\n var = var*1000. # Meters to Millimeters \n print('Completed: Changed units (m to mm)!')\n \n print('Completed: Read members 1-100!')\n\n print('>>>>>>>>>> Completed: Finished readExperiAll function!')\n return lat,lon,lev,var", "def load_dwarf_info(file, titles=None):\n dwarf_specs = apascii.read(file, format='ecsv')\n\n\n if titles is None:\n titles = [f'RA: {np.round(ra, 2)}, DEC: {np.round(dec, 2)}' for (ra, dec) in zip(dwarf_specs['RA'], dwarf_specs['DEC'])]\n dwarf_pmra, dwarf_pmdec = [None]*len(titles), [None]*len(titles)\n else:\n titles = dwarf_specs['MAIN_ID']\n dwarf_pmra = dwarf_specs['PMRA']\n dwarf_pmdec = dwarf_specs['PMDEC']\n\n return np.array(titles), dwarf_pmra, dwarf_pmdec", "def read(self, command):\n return self.meas.read(command)", "def read_log_payload(ac_id, filename):\n f = open(filename, 'r')\n pattern = re.compile(\"(\\S+) \"+ac_id+\" PAYLOAD_FLOAT (\\S+),(\\S+),(\\S+),(\\S+)\")\n list_meas = []\n while True:\n line = f.readline().strip()\n if line == '':\n break\n m = re.match(pattern, line)\n if m:\n list_meas.append([float(m.group(1)), float(m.group(2)), float(m.group(3)), float(m.group(4)), float(m.group(5))])\n return np.array(list_meas)", "def load_annual_accumulation(reanalysis):\n ds = xr.open_dataset(accumulation_period_filepath[reanalysis])\n\n # Modify coordinate names to match other files\n # This will be fixed in a later version\n if reanalysis == 'CFSR':\n print (ds)\n #ds.rename({'row': 'x', 'col': 'y'}, inplace=True)\n\n return ds", "def plot_apertures(image, ext=1):\n\n hdu = fits.open(image)\n apfile = './database/ape' + image.strip('.fits') + '_{:d}'.format(ext)\n\n b = np.array(\n [i.split()[3:] for i in open(apfile).readlines() if 'begin' in i])\n\n apid = b[:, 0]\n x = np.array([float(i) for i in b[:, 2]])\n\n sci_exts = np.array([i for i in range(len(hdu)) if hdu[i].name == 'SCI'])\n data = hdu[sci_exts[len(sci_exts)/2]].data\n\n profile = np.average(data, 1)\n\n fig = plt.figure(1)\n ax = fig.add_subplot(111)\n\n pmax = profile.max()\n\n ax.plot(np.arange(len(profile))+1, profile/pmax)\n ax.set_ylim(0, 1.1)\n\n for i, j in enumerate(apid):\n ax.annotate(j, xy=(x[i], 1), ha='center')\n ax.axvline(x[i], alpha=.3)\n\n plt.show()", "def get_movement_dda(self, cpoint, point, f, mf, spm):\n # Displacement Vector (Line 297)\n dv = self.calculate_vector_difference(point, cpoint)\n # Correct for safe feedrate\n af = self.get_safe_feedrate(dv, mf, f) #Assuming return of mm/min\n dvSteps = self.multiply_vector(dv, spm)\n la = self.find_longest_axis(dvSteps)\n vm = self.calculate_vector_magnitude(dv)\n if vm != 0:\n ff = af * (float(abs(dv[la])) / vm) #Assuming af in mm/min\n dda = self.calc_dda(ff, abs(spm[la])) #Assuming ff in mm/min\n else:\n dda = 0\n return dda", "def import_dnxhd_essence(self, path, edit_rate, tape=None, length=None, offline=False):\n\n essencedata, slot = self.create_essence(edit_rate, 'picture', offline=offline)\n\n if tape:\n slot.segment = tape\n\n # create essence descriptor\n descriptor = self.root.create.CDCIDescriptor()\n self.descriptor = descriptor\n\n # set minimal properties\n descriptor['SampleRate'].value = edit_rate\n descriptor['VideoLineMap'].value = [42, 0] # Not exactly sure what linemap is\n descriptor['ContainerFormat'].value = self.root.dictionary.lookup_containerdef(\"AAF\")\n dnxhd_codec_auid = AUID(\"8ef593f6-9521-4344-9ede-b84e8cfdc7da\")\n descriptor['CodecDefinition'].value = self.root.dictionary.lookup_codecdef(dnxhd_codec_auid)\n\n stream = None\n if essencedata is not None:\n # open essence stream\n stream = essencedata.open('w')\n\n # open input file\n with io.open(path, 'rb', buffering=io.DEFAULT_BUFFER_SIZE) as f:\n\n cid = None\n for i, packet in enumerate(video.iter_dnx_stream(f), 1):\n if cid is None:\n (cid, width, height, bitdepth, interlaced) = video.read_dnx_frame_header(packet)\n descriptor['StoredWidth'].value = width\n descriptor['StoredHeight'].value = height\n descriptor['ComponentWidth'].value = bitdepth\n descriptor['FrameLayout'].value = 'SeparateFields' if interlaced else 'FullFrame'\n descriptor['ImageAspectRatio'].value = \"%d/%d\" % (width, height)\n descriptor['FrameSampleSize'].value = len(packet)\n descriptor['Compression'].value = video.dnx_compression_auids[cid]\n descriptor['HorizontalSubsampling'].value = 2\n\n if stream is not None:\n stream.write(packet)\n # set descriptor and component lengths\n slot.segment.length = length or i\n descriptor.length = i\n\n return slot", "def read_SimCenter_EDP_input(input_path, EDP_kinds=('PID','PFA'), \n units = dict(PID=1., PFA=1.),\n verbose=False):\n \n # initialize the data container\n data = {}\n\n # read the collection of EDP inputs...\n # the read_csv method in pandas is sufficiently versatile to handle the\n # tabular format of dakota\n EDP_raw = pd.read_csv(input_path, sep='\\s+', header=0,\n index_col='%eval_id')\n # set the index to be zero-based\n EDP_raw.index = EDP_raw.index - 1\n\n # search the header for EDP information\n for column in EDP_raw.columns:\n for kind in EDP_kinds:\n if kind in column:\n\n if kind not in data.keys():\n data.update({kind: []})\n\n # extract info about the location, direction, and scenario\n info = column.split('-')\n \n # get the scale factor to perform unit conversion\n f_unit = units[kind]\n \n # store the data\n data[kind].append(dict(\n raw_data=(EDP_raw[column].values * f_unit).tolist(),\n location=info[2],\n direction=info[3],\n scenario_id=info[0]\n ))\n\n if verbose: pp.pprint(data)\n\n return data", "def read(self, command):\n return self.meas.read(command)", "def read(self, command):\n return self.meas.read(command)", "def calc_psd(self):\n psd2d = np.array(self.calc_psd2d())\n\n print(\"Azimuthally averaging 2D power spectral density ... \",\n end=\"\", flush=True)\n dim = self.shape[0]\n dim_half = (dim+1) // 2\n # NOTE:\n # The zero-frequency component is shifted to position of index\n # (0-based): (ceil((n-1) / 2), ceil((m-1) / 2))\n px = np.arange(dim_half-dim, dim_half)\n x, y = np.meshgrid(px, px)\n rho = np.sqrt(x**2 + y**2)\n\n radii = self.radii\n nr = len(radii)\n if nr > 100:\n print(\"\\n ... %d data points, may take a while ... \" % nr,\n end=\"\", flush=True)\n else:\n print(\" %d data points ... \" % nr, end=\"\", flush=True)\n psd1d = np.zeros(shape=(nr, 4))\n psd1d[:, 0] = self.frequencies\n\n for i, r in enumerate(radii):\n if (i+1) % 100 == 0:\n percent = 100 * (i+1) / nr\n print(\"%.1f%% ... \" % percent, end=\"\", flush=True)\n ii, jj = (rho <= r).nonzero()\n rho[ii, jj] = np.inf\n cells = psd2d[ii, jj]\n psd1d[i, 3] = len(cells)\n if self.meanstd:\n psd1d[i, 1] = np.mean(cells)\n psd1d[i, 2] = np.std(cells)\n else:\n median = np.median(cells)\n mad = np.median(np.abs(cells - median))\n psd1d[i, 1] = median\n psd1d[i, 2] = mad * 1.4826\n print(\"DONE\", flush=True)\n\n self.psd1d = psd1d\n return psd1d", "def read_conversions(db):\n mpart,Lbox,rsdfac,acheck = None,None,None,None\n with open(db+\"Header/attr-v2\",\"r\") as ff:\n for line in ff.readlines():\n mm = re.search(\"MassTable.*\\#HUMANE\\s+\\[\\s*0\\s+(\\d*\\.\\d*)\\s*0+\\s+0\\s+0\\s+0\\s+\\]\",line)\n if mm != None:\n mpart = float(mm.group(1)) * 1e10\n mm = re.search(\"BoxSize.*\\#HUMANE\\s+\\[\\s*(\\d+)\\s*\\]\",line)\n if mm != None:\n Lbox = float(mm.group(1))\n mm = re.search(\"RSDFactor.*\\#HUMANE\\s+\\[\\s*(\\d*\\.\\d*)\\s*\\]\",line)\n if mm != None:\n rsdfac = float(mm.group(1))\n mm = re.search(\"ScalingFactor.*\\#HUMANE\\s+\\[\\s*(\\d*\\.\\d*)\\s*\\]\",line)\n if mm != None:\n acheck = float(mm.group(1))\n if (mpart is None)|(Lbox is None)|(rsdfac is None)|(acheck is None):\n print(mpart,Lbox,rsdfac,acheck)\n raise RuntimeError(\"Unable to get conversions from attr-v2.\")\n return mpart, Lbox, rsdfac, acheck\n #", "def read_log_esc(ac_id, filename):\n f = open(filename, 'r')\n pattern = re.compile(\"(\\S+) \"+ac_id+\" ESC (\\S+) (\\S+) (\\S+) (\\S+) (\\S+) (\\S+) (\\S+)\")\n list_meas = []\n while True:\n line = f.readline().strip()\n if line == '':\n break\n m = re.match(pattern, line)\n if m:\n list_meas.append([float(m.group(1)), float(m.group(2)), float(m.group(3)), float(m.group(4)), float(m.group(5)), float(m.group(6)), \n float(m.group(7)), float(m.group(8))])\n return np.array(list_meas)", "def extract_data( file_name):\n\n main_file = fits.open( file_name)\n\n ssp = main_file[1].data\n flux_elines = main_file[3].data\n org_hdr = main_file[0].header\n\n main_file.close()\n\n ###########################################################################\n # NOTE: The visual band fluxes are multiplied by 10^-16 as stated in the\n # units of the MaNGA Data Model.\n #\n # <https://data.sdss.org/datamodel/files/MANGA_PIPE3D/MANGADRP_VER\n # /PIPE3D_VER/PLATE/manga.Pipe3D.cube.html#hdu1>\n ###########################################################################\n v_band = ssp[0] # in units of erg / s / cm^2\n v_band_err = ssp[4] # in units of erg / s / cm^2\n sMass_density = ssp[19] * u.dex( u.M_sun) # in units of log10( Msun / spaxel**2)\n\n Ha_vel = flux_elines[102] # in units of km/s\n Ha_vel_err = flux_elines[330] # in units of km/s\n\n gal_ra = org_hdr['OBJRA']\n gal_dec = org_hdr['OBJDEC']\n\n target_galaxy = True\n MaNGA_galaxy_target = org_hdr['MNGTARG1']\n if MaNGA_galaxy_target == 0:\n target_galaxy = False\n\n data_quality = True\n DRP_3D_quality = org_hdr['DRP3QUAL']\n if DRP_3D_quality > 10000:\n data_quality = False\n\n return target_galaxy, data_quality, Ha_vel, Ha_vel_err, v_band, v_band_err, \\\n sMass_density, gal_ra, gal_dec", "def plot_DA(filename):\n\n # Set up an array of redshift values.\n dz = 0.1\n z = numpy.arange(0., 10. + dz, dz)\n\n # Set up a cosmology dictionary, with an array of matter density values.\n cosmo = {}\n dom = 0.01\n om = numpy.atleast_2d(numpy.linspace(0.1, 1.0, (1.-0.1)/dom)).transpose()\n cosmo['omega_M_0'] = om\n cosmo['omega_lambda_0'] = 1. - cosmo['omega_M_0']\n cosmo['h'] = 0.701\n cosmo['omega_k_0'] = 0.0\n\n # Calculate the hubble distance.\n dh = cd.hubble_distance_z(0, **cosmo)\n # Calculate the angular diameter distance.\n da = cd.angular_diameter_distance(z, **cosmo)\n\n # Make plots.\n plot_dist(z, dz, om, dom, da, dh, 'angular diameter distance', r'D_A',\n filename)\n plot_dist_ony(z, dz, om, dom, da, dh, 'angular diameter distance', r'D_A',\n filename)", "def read_from_pln(self, path):\n\n # Read the .pln file contents to a dictionary.\n pln_dict = read_pln_file(path)\n\n # Look for each attribute listed in self.attributes in the results\n # dictionary.\n for attr in self.attributes:\n\n # Get the corresponding ExoParameter object.\n current = getattr(self, attr)\n\n # Look for this attribute in the results dictionary and set\n # ExoParameter.value.\n key_str = attr\n try:\n current.value = pln_dict[key_str]\n del pln_dict[key_str]\n except KeyError:\n current.value = current.default\n\n # Look for reference and URL information in the results dictionary,\n # and use this to set ExoParameter.reference and ExoParameter.url.\n # Skip 'transit' since 'transitref' and 'transiturl', are separate\n # fields in the references section.\n if not attr == \"transit\":\n\n key_str = \"\".join([attr, \"ref\"])\n try:\n current.reference = pln_dict[key_str]\n del pln_dict[key_str]\n except KeyError:\n current.reference = None\n\n key_str = \"\".join([attr, \"url\"])\n try:\n current.url = pln_dict[key_str]\n del pln_dict[key_str]\n except KeyError:\n current.url = None\n\n # If this attribute can take uncertainty values, look for these in\n # the results dictionary, then set ExoParameter.uncertainty and\n # ExoParameter.uncertainty_upper.\n if current.uncertain_flag:\n\n key_str = \"\".join([\"u\", attr])\n try:\n current.uncertainty = pln_dict[key_str]\n del pln_dict[key_str]\n except KeyError:\n current.uncertainty = None\n\n key_str = \"\".join([\"u\", attr, \"d\"])\n try:\n current.uncertainty_upper = pln_dict[key_str]\n del pln_dict[key_str]\n except KeyError:\n current.uncertainty_upper = None\n\n # If there are still keyword / value pairs in pln_dict, these fields\n # are not in the self.attributes list, which is built from\n # self.template_file.\n \"\"\"\n if len(pln_dict.keys()) > 0:\n print(\"{0} contains unknown .pln fields: {1}\".format(\n path, pln_dict.keys()))\n print(\"Add fields to {0} to include.\".format(self.template_file))\n \"\"\"\n\n # Trigger uncertainty calculations.\n self._populate_uncertainties()", "def overviewCommand(self):\n plt.figure(11)\n plt.clf()\n ax = plt.subplot(211)\n plt.plot(self.raw['OPDC'].data.field('TIME'),\n 1e6*self.raw['OPDC'].data.field('FUOFFSET'),\n color='r', label='FUOFFSET',\n linewidth=1, alpha=1) \n plt.plot(self.raw['OPDC'].data.field('TIME'),\n 1e6*(self.raw['OPDC'].data.field(self.DLtrack)-\n self.raw['OPDC'].data.field('PSP')),\n color='r', linewidth=3, alpha=0.5,\n label=self.DLtrack+'-PSP')\n plt.legend()\n plt.subplot(212, sharex=ax)\n plt.plot(self.raw['OPDC'].data.field('TIME'),\n 1e6*self.raw['OPDC'].data.field('FUOFFSET')-\n 1e6*(self.raw['OPDC'].data.field(self.DLtrack)-\n self.raw['OPDC'].data.field('PSP')),\n color='k', label='$\\Delta$',\n linewidth=1, alpha=1) \n \n signal = self.raw['OPDC'].data.field('FUOFFSET')\n plt.figure(12)\n plt.clf()\n ax2 = plt.subplot(111)\n Fs = 1e6/np.diff(self.raw['OPDC'].data.field('TIME')).mean()\n print Fs\n ax2.psd(signal[:50000], NFFT=5000, Fs=Fs, label='FUOFFSET',scale_by_freq=0)\n plt.legend()", "def measure_adc(self, rng = 'DEF', res = 'DEF', unit = 'A', samp = 1):\n self.configure_adc(rng, res, unit)\n self.write_to_serial(':samp:coun ' + str(samp))\n if samp == 1:\n return float(self.query_serial('read?'))\n else:\n samples = self.query_serial('read?').split(',')\n sampleresult = []\n for sample in samples:\n sampleresult.append(float(sample))\n return sampleresult", "def test_get_many(self):\r\n\r\n with open(os.path.join(RESOURCE_PATH, 'ND072023.PD0'), 'rb') as stream_handle:\r\n\r\n parser = AdcpPd0Parser(self.config_recov, stream_handle, self.exception_callback)\r\n\r\n particles = parser.get_records(54)\r\n log.debug('got back %d records', len(particles))\r\n\r\n self.assert_particles(particles, 'ND072023_recov.yml', RESOURCE_PATH)", "def load_gldas_elevation_dataset(gldas_elevation_file): \n d1 = xr.open_dataset(gldas_elevation_file).load()\n return d1", "def getAV(ra,dec):\n coords = FK5Coordinates(ra,dec)\n rah,ram,ras = coords.ra.hms\n decd,decm,decs = coords.dec.dms\n url = 'http://ned.ipac.caltech.edu/cgi-bin/nph-calc?in_csys=Equatorial&in_equinox=J2000.0&obs_epoch=2010&lon='+'%i' % rah + \\\n '%3A'+'%i' % ram + '%3A' + '%05.2f' % ras + '&lat=%2B' + '%i' % decd + '%3A' + '%i' % decm + '%3A' + '%05.2f' % decs + \\\n '&pa=0.0&out_csys=Equatorial&out_equinox=J2000.0'\n tmpfile = '/tmp/nedsearch.html'\n cmd = 'wget \\'%s\\' -O %s -q' % (url,tmpfile)\n sp.Popen(cmd,shell=True).wait()\n for line in open(tmpfile,'r'):\n m = re.search('V \\(0.54\\)\\s+(\\S+)',line)\n if m:\n AV = float(m.group(1))\n os.remove(tmpfile)\n return AV", "def readExperi(directory,varid,experi,level):\n print('\\n>>> Using readExperi function! \\n')\n \n ### Import modules\n import numpy as np\n from netCDF4 import Dataset\n \n ### Call files\n totaldirectory = directory + experi + '/monthly/'\n filename = totaldirectory + varid + '_1900-2000.nc'\n \n if any([experi == 'FPOL',experi == 'FSUB']):\n directory = '/home/zlabe/green/simu/'\n totaldirectory = directory + experi + '/monthly/'\n filename = totaldirectory + varid + '_1900-2000.nc'\n \n ### Read in Data\n if level == 'surface': # 3d variables\n data = Dataset(filename,'r')\n varq = data.variables['%s' % varid][:,:,:,0]\n data.close()\n \n dataq = Dataset(totaldirectory + 'T2M_1900-2000.nc')\n time = dataq.variables['time'][:]\n lev = 'surface'\n lat = dataq.variables['latitude'][:]\n lon = dataq.variables['longitude'][:]\n dataq.close()\n elif level == 'profile': # 4d variables\n data = Dataset(filename,'r')\n varq = data.variables['%s' % varid][:,:,:,0]\n data.close()\n \n dataq = Dataset(totaldirectory + 'TEMP_1900-2000.nc')\n time = dataq.variables['time'][:]\n lev = dataq.variables['level'][:]\n lat = dataq.variables['latitude'][:]\n lon = dataq.variables['longitude'][:]\n dataq.close()\n else:\n print(ValueError('Selected wrong height - (surface or profile!)!')) \n print('Completed: Read data for *%s* : %s!' % (experi[:4],varid))\n \n ### Reshape to split years and months\n months = 12\n if level == 'surface': # 3d variables\n var = np.reshape(varq,(int(varq.shape[0]/12),months,\n int(lat.shape[0])))\n elif level == 'profile': # 4d variables\n var = np.reshape(varq,(int(varq.shape[0]/12),months,int(lev.shape[0]),\n int(lat.shape[0])))\n else:\n print(ValueError('Selected wrong height - (surface or profile!)!')) \n print('Completed: Reshaped %s array!' % (varid))\n \n ### Convert units\n if varid in ('TEMP','T2M'):\n var = var - 273.15 # Kelvin to degrees Celsius \n print('Completed: Changed units (K to C)!')\n\n print('\\n*Completed: Finished readExperi function!')\n return lat,lon,time,lev,var", "def parse_file(self):\n file_time = ''\n num_dir = 0\n num_freq = 0\n freq_w_band = 0.0\n freq_0 = 0.0\n start_dir = 0.0\n\n dspec_matrix = []\n\n # Extract the file time from the file name\n input_file_name = self._stream_handle.name\n\n match = FILE_NAME_MATCHER.match(input_file_name)\n\n if match:\n file_time = match.group(1)\n else:\n error_message = 'Unable to extract file time from DSpec input file name: %s '\\\n % input_file_name\n log.warn(error_message)\n self._exception_callback(RecoverableSampleException(error_message))\n\n # read the first line in the file\n line = self._stream_handle.readline()\n\n # loop over all lines in the data file\n while line:\n\n if EMPTY_LINE_MATCHER.match(line):\n # ignore blank lines, do nothing\n pass\n\n elif HEADER_MATCHER.match(line):\n\n # we need header records to extract useful information\n for matcher in HEADER_MATCHER_LIST:\n header_match = matcher.match(line)\n\n if header_match is not None:\n\n # Look for specific header lines and extract header fields\n if matcher is DIR_FREQ_MATCHER:\n num_dir = int(header_match.group(1))\n num_freq = int(header_match.group(2))\n\n elif matcher is FREQ_BAND_MATCHER:\n freq_w_band = header_match.group(1)\n freq_0 = header_match.group(2)\n\n elif matcher is START_DIR_MATCHER:\n start_dir = header_match.group(1)\n\n else:\n #ignore\n pass\n\n elif DSPEC_DATA_MATCHER.match(line):\n\n # Extract a row of the Directional Surface Spectrum matrix\n sensor_match = DSPEC_DATA_MATCHER.match(line)\n data = sensor_match.group(1)\n values = [int(x) for x in data.split()]\n\n num_values = len(values)\n\n # If the number of values in a line of data doesn't match num_dir,\n # Drop the record, throw a recoverable exception and continue parsing\n if num_values != num_dir:\n error_message = 'Unexpected Number of directions in line: expected %s, got %s'\\\n % (num_dir, num_values)\n log.warn(error_message)\n self._exception_callback(RecoverableSampleException(error_message))\n else:\n # Add the row to the dspec matrix\n dspec_matrix.append(values)\n\n else:\n # Generate a warning for unknown data\n error_message = 'Unexpected data found in line %s' % line\n log.warn(error_message)\n self._exception_callback(RecoverableSampleException(error_message))\n\n # read the next line in the file\n line = self._stream_handle.readline()\n\n # Check to see if the specified number of frequencies were retrieved from the data\n dspec_matrix_length = len(dspec_matrix)\n if dspec_matrix_length != num_freq:\n error_message = 'Unexpected Number of frequencies in DSpec Matrix: expected %s, got %s'\\\n % (num_freq, dspec_matrix_length)\n log.warn(error_message)\n self._exception_callback(RecoverableSampleException(error_message))\n\n # Construct the parsed data list to hand over to the Data Particle class for particle creation\n parsed_data = [\n file_time, # ('file_time', 0, str),\n num_dir, # ('num_dir', 1, int),\n num_freq, # ('num_freq', 2, int),\n freq_w_band, # ('freq_w_band', 3, float),\n freq_0, # ('freq_0', 4, float),\n start_dir, # ('start_dir', 5, float),\n dspec_matrix # ('directional_surface_spectrum', 6, list)]\n ]\n\n # Extract a particle and append it to the record buffer\n particle = self._extract_sample(AdcptMDspecInstrumentDataParticle, None, parsed_data)\n self._record_buffer.append(particle)", "def get_DA(self, rest_range=None, exclude_type=None, mask_metals=False):\n\n # [\"waves\", \"flux\",\"error\", \"abs\",\"cont\"]\n\n\n\n sp = self.metal_sp if exclude_type == \"HI\" else self.HI_sp\n waves, flux, err, ab, cont = DumpData.normalize(sp, mask_metals=mask_metals)\n\n indices = None\n if rest_range:\n rest_range = ((1. + self.zem) * rest_range[0], (1. + self.zem) * rest_range[1])\n indices = Spectrum.get_indices(spec.waves, rest_range)\n\n flux, ab, err = tuple(ma.masked_invalid(it[indices])\n for it in (flux, ab, err))\n else:\n flux, ab, err = tuple(ma.masked_invalid(it)\n for it in (flux, ab, err))\n\n if exclude_type:\n da = (1.0 - ab)\n\n else:\n da = (1.0 - flux)\n\n dda = err\n\n da = ma.masked_invalid(da)\n da = ma.masked_where(np.fabs(da) > 10., da)\n\n dda = ma.masked_invalid(dda)\n dda = ma.masked_where(np.fabs(dda) > 10., da)\n mse = np.sqrt(np.sum(dda ** 2.)) / float(dda.shape[0])\n return np.mean(da), mse", "def asd(x, t, windowname=\"none\", ave=bool(True)):\n f, Pxx = crsd(x, x, t, windowname=windowname, ave=ave)\n Pxx = Pxx.real\n return f, Pxx", "def datapackager(dfiles):\n return core.annual_resource_datapackager(eia923_raw, dfiles)", "def addDEX(self, filename, data, dx=None):\n digest = hashlib.sha256(data).hexdigest()\n log.debug(\"add DEX:%s\" % digest)\n\n log.debug(\"Parsing format ...\")\n d = DalvikVMFormat(data)\n log.debug(\"added DEX:%s\" % digest)\n\n if filename not in self.analyzed_files:\n self.analyzed_files[filename] = []\n\n self.analyzed_files[filename].append(digest)\n self.analyzed_digest[digest] = filename\n\n if dx is None:\n dx = Analysis()\n\n dx.add(d)\n dx.create_xref()\n\n for d in dx.vms:\n d.set_decompiler(DecompilerDAD(d, dx))\n d.set_vmanalysis(dx)\n self.analyzed_dex[digest] = dx\n\n if self.export_ipython:\n log.debug(\"Exporting in ipython\")\n d.create_python_export()\n\n return digest, d, dx", "def getdns(self):\r\n filename = r\"dns_profiles.txt\"\r\n fp = open(filename)\r\n data = []\r\n for lines in fp.readlines():\r\n data.append(list(map(float, lines.split())))\r\n #use the fundamental string function 'append','split' to extract floating point number\r\n fp.close()\r\n dns_data = np.array(data) #transfer list to array\r\n self.dns_z = dns_data[:, 0] / 1000 #z-plus -> z/h\r\n self.dns_u = dns_data[:, 1] # u-plus\r\n self.dns_uw = dns_data[:, 2]\r\n self.dns_uu = dns_data[:, 3]\r\n self.dns_ww = dns_data[:, 4]\r\n self.dns_vv = dns_data[:, 5]\r\n self.dns_tau = dns_data[:, 7]\r\n self.dns_tot = dns_data[:, 8]", "def test_with_status_data(self):\r\n\r\n with open(os.path.join(RESOURCE_PATH, 'ND161646.PD0'), 'rb') as stream_handle:\r\n\r\n parser = AdcpPd0Parser(self.config_recov, stream_handle, self.exception_callback)\r\n\r\n particles = parser.get_records(250)\r\n log.debug('got back %d records', len(particles))\r\n\r\n self.assert_particles(particles, 'ND161646.yml', RESOURCE_PATH)", "def Get_Meas_Res_Actual(self, mode, ch=1):\n rdStr = self.query(f':MEAS{ch}:RES:ACT? {mode}')\n return rdStr", "def log_AD_results(xp_path, learner):\n\n log_file = \"{}/log.txt\".format(xp_path)\n log = open(log_file, \"a\")\n\n log.write(\"Results\\n\\n\")\n\n log.write(\"Train AUC: {} %\\n\".format(round(learner.diag['train']['auc'][-1]*100, 4)))\n log.write(\"Train accuracy: {} %\\n\".format(round(learner.diag['train']['acc'][-1], 4)))\n log.write(\"Train time: {}\\n\\n\".format(round(learner.train_time, 4)))\n\n log.write(\"Val AUC: {} %\\n\".format(round(learner.diag['val']['auc'][-1] * 100, 4)))\n log.write(\"Val accuracy: {} %\\n\\n\".format(round(learner.diag['val']['acc'][-1], 4)))\n\n log.write(\"Test AUC: {} %\\n\".format(round(learner.diag['test']['auc'][-1]*100, 4)))\n log.write(\"Test accuracy: {} %\\n\".format(round(learner.diag['test']['acc'][-1], 4)))\n log.write(\"Test time: {}\\n\".format(round(learner.test_time, 4)))\n\n log.write(\"\\n\\n\")\n log.close()", "def test_eam_density():\n cfg_string = u\"\"\"[EAM-Density]\nA : density1 1000.0 0.1 1.0\nB : density2 2000.0 0.2 2.0\n\"\"\"\n parsed = ConfigParser(io.StringIO(cfg_string))\n\n expect = [\n EAMDensityTuple(u\"A\", PFI(u\"density1\", [1000.0, 0.1, 1.0], MRD(u\">\",0.0), None)),\n EAMDensityTuple(u\"B\", PFI(u\"density2\", [2000.0, 0.2, 2.0], MRD(u\">\",0.0), None))]\n\n actual = parsed.eam_density\n assert DeepDiff(expect, actual) == {}", "def scn2ard(self, unq_id):\n if not os.path.exists(self.ardFinalPath):\n raise EODataDownException(\"The ARD final path does not exist, please create and run again.\")\n\n if not os.path.exists(self.ardProdWorkPath):\n raise EODataDownException(\"The ARD working path does not exist, please create and run again.\")\n\n if not os.path.exists(self.ardProdTmpPath):\n raise EODataDownException(\"The ARD tmp path does not exist, please create and run again.\")\n\n eodd_utils = eodatadown.eodatadownutils.EODataDownUtils()\n\n logger.debug(\"Creating Database Engine and Session.\")\n db_engine = sqlalchemy.create_engine(self.db_info_obj.dbConn)\n session_sqlalc = sqlalchemy.orm.sessionmaker(bind=db_engine)\n ses = session_sqlalc()\n\n logger.debug(\"Perform query to find scenes which need converting to ARD.\")\n query_result = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.PID == unq_id,\n EDDSentinel1ASF.Downloaded == True,\n EDDSentinel1ASF.ARDProduct == False).one_or_none()\n\n proj_epsg = None\n if self.ardProjDefined:\n proj_epsg = self.projEPSG\n\n if query_result is not None:\n start_date = datetime.datetime.now()\n logger.debug(\"Create the specific output directories for the ARD processing.\")\n dt_obj = datetime.datetime.now()\n\n tmp_ard_path = os.path.join(self.ardProdTmpPath, dt_obj.strftime(\"%Y-%m-%d\"))\n if not os.path.exists(tmp_ard_path):\n os.mkdir(tmp_ard_path)\n\n wrk_ard_path = os.path.join(self.ardProdWorkPath, dt_obj.strftime(\"%Y-%m-%d\"))\n if not os.path.exists(wrk_ard_path):\n os.mkdir(wrk_ard_path)\n\n logger.debug(\"Create info for running ARD analysis for scene: {}\".format(query_result.Product_File_ID))\n final_ard_scn_path = os.path.join(self.ardFinalPath,\n \"{}_{}\".format(query_result.Product_File_ID, query_result.PID))\n if not os.path.exists(final_ard_scn_path):\n os.mkdir(final_ard_scn_path)\n\n tmp_ard_scn_path = os.path.join(tmp_ard_path,\n \"{}_{}\".format(query_result.Product_File_ID, query_result.PID))\n if not os.path.exists(tmp_ard_scn_path):\n os.mkdir(tmp_ard_scn_path)\n\n wrk_ard_scn_path = os.path.join(wrk_ard_path,\n \"{}_{}\".format(query_result.Product_File_ID, query_result.PID))\n if not os.path.exists(wrk_ard_scn_path):\n os.mkdir(wrk_ard_scn_path)\n\n pols = list()\n if 'VV' in query_result.Polarization:\n pols.append('VV')\n if 'VH' in query_result.Polarization:\n pols.append('VH')\n zip_file = eodd_utils.findFilesRecurse(query_result.Download_Path, '.zip')\n if len(zip_file) == 1:\n zip_file = zip_file[0]\n else:\n logger.error(\"Could not find unique zip file for Sentinel-1 zip: PID = {}\".format(query_result.PID))\n raise EODataDownException(\n \"Could not find unique zip file for Sentinel-1 zip: PID = {}\".format(query_result.PID))\n success_process_ard = self.convertSen1ARD(zip_file, final_ard_scn_path, wrk_ard_scn_path, tmp_ard_scn_path,\n self.demFile, self.outImgRes, pols, proj_epsg, self.projabbv,\n self.out_proj_img_res, self.out_proj_interp, self.use_roi,\n self.intersect_vec_file, self.intersect_vec_lyr,\n self.subset_vec_file, self.subset_vec_lyr, self.mask_outputs,\n self.mask_vec_file, self.mask_vec_lyr)\n end_date = datetime.datetime.now()\n if success_process_ard:\n query_result.ARDProduct = True\n query_result.ARDProduct_Start_Date = start_date\n query_result.ARDProduct_End_Date = end_date\n query_result.ARDProduct_Path = final_ard_scn_path\n ses.commit()\n else:\n query_result.ARDProduct = False\n query_result.ARDProduct_Start_Date = start_date\n query_result.ARDProduct_End_Date = end_date\n query_result.Invalid = True\n ses.commit()\n\n if os.path.exists(tmp_ard_scn_path):\n shutil.rmtree(tmp_ard_scn_path)\n if os.path.exists(wrk_ard_scn_path):\n shutil.rmtree(wrk_ard_scn_path)\n\n ses.close()", "def readRatio(self):\n\t\tf=open(Options.ratioFilename)\n\t\tself.xRatio = float(f.readline())\n\t\tself.yRatio = float(f.readline())\n\t\tf.close()", "def get_dssp_from_file(in_file):\n with open(in_file) as file:\n return file.readlines()[1].strip()", "def readpsrarch(fname, dedisperse=True, verbose=True):\n import psrchive\n \n arch = psrchive.Archive_load(fname)\n source = arch.get_source()\n tel = arch.get_telescope()\n if verbose:\n print(\"Read archive of {0} from {1}\".format(source, fname))\n\n if dedisperse:\n if verbose:\n print(\"Dedispersing...\")\n arch.dedisperse()\n data = arch.get_data()\n midf = arch.get_centre_frequency()\n bw = arch.get_bandwidth()\n F = np.linspace(midf-bw/2., midf+bw/2., data.shape[2], endpoint=False)\n #F = arch.get_frequencies()\n\n a = arch.start_time()\n t0 = a.strtempo()\n t0 = Time(float(t0), format='mjd', precision=0)\n\n # Get frequency and time info for plot axes\n nt = data.shape[0]\n Tobs = arch.integration_length()\n dt = (Tobs / nt)*u.s\n T = t0 + np.arange(nt)*dt\n T = T.mjd\n \n return data, F, T, source, tel", "def cal_beam_MADMFD(infile):\n\n data = np.loadtxt(infile)\n maxfdensity = data[:,8]\n mad_maxfdensity = round(median_absolute_deviation(maxfdensity), 3)\n \n return mad_maxfdensity", "def postpro(file,show=True):\n #folder = get_folder(atom,xyz,dn)\n p = None\n volume = None\n if is_complete(file,show):\n with open(file) as f:\n lines = f.readlines()\n for line in lines:\n if line.rfind(\"| Cartesian Polarization \") != -1:\n p = float64(split_line(line)[-3:]) #\n if line.rfind(\"| Unit cell volume \") != -1:\n volume = float(split_line(line)[-2])\n return p, volume\n else :\n return None,None", "async def set_offsets_radec(self, dra: float, ddec: float, **kwargs: Any) -> None:\n\n log.info(\"Moving offset dra=%.5f, ddec=%.5f\", dra, ddec)\n await self.comm.send_event(OffsetsRaDecEvent(ra=dra, dec=ddec))\n self._telescope.set_offsets(dra, ddec)", "def read(self):\n # open the .SPE file\n with open(self._input_file_path, 'rb') as f:\n lines = f.readlines()\n # Create an empty dictionary for the metadata\n metadata_dictionary = {}\n\n # Search through the file for the needed metadata\n metadata_dictionary['date_acquired'] = re.search(b'date=\"(.*?)\"', lines[1])[1].decode('ANSI') \n metadata_dictionary['width'] = int(re.search(b'width=\"(.*?)\"', lines[1])[1])\n metadata_dictionary['height'] = int(re.search(b'height=\"(.*?)\"', lines[1])[1])\n metadata_dictionary['size'] = metadata_dictionary['width']*metadata_dictionary['height']\n metadata_dictionary['exposure_time'] = int(re.search(b'<ExposureTime type=\"Double\">(.*?)</ExposureTime>', lines[1])[1])\n metadata_dictionary['excitation_wavelength'] = float(re.search(b'laserLine=\"(.*?)\"',lines[1])[1])\n metadata_dictionary['center_wavelength'] = float(re.search(b'<CenterWavelength type=\"Double\">(.*?)</CenterWavelength>',lines[1])[1])\n metadata_dictionary['orientation'] = re.search(b'orientation=\"(.*?)\"',lines[1])[1].decode('ANSI')\n\n # Get the wavelength and intensity\n wavelength_string = re.search(b'<Wavelength xml:space=\"preserve\">(.*?)</Wavelength>',lines[1])[1].decode('utf-8')\n wavelength = np.array(wavelength_string.split(','), dtype=np.float64)\n\n f.seek(4100)\n intensity = np.fromfile(f,dtype=np.float32,count=metadata_dictionary['size'])\n\n raman_shift_wavenumbers = 1e7*(1/metadata_dictionary['excitation_wavelength'] - 1/wavelength)\n\n f.close()\n \n # create the sidpy dataset\n data_set = Dataset.from_array(intensity, name='Raman Spectra')\n\n data_set.data_type = 'spectrum'\n data_set.units = 'counts'\n data_set.quantity = 'Intensity'\n\n # set dimensions\n data_set.set_dimension(0, Dimension(raman_shift_wavenumbers, name='Raman Shift',\n units = 'cm-1',\n quantity='Raman shift',\n dimension_type='spectral'))\n data_set.set_dimension(1, Dimension(intensity, name='Intensity',\n units = 'counts',\n quantity='intensity',\n dimension_type='spectral')) \n\n data_set.metadata = metadata_dictionary\n\n return data_set", "def to_amdl(self):\n from .adipls import ADIPLSStellarModel\n\n ioff = (0 if self.r[0] < 1e6 else 1) # mimic ADIPLS's FGONG to AMDL script\n A = np.zeros((len(self.data) + ioff, 6))\n\n # we can safely ignore division by 0 here\n with np.errstate(divide='ignore', invalid='ignore'):\n A[ioff:,0] = self.x\n A[ioff:,1] = self.q/self.x**3\n A[ioff:,2] = self.Vg\n A[ioff:,3] = self.Gamma_1\n A[ioff:,4] = self.AA\n A[ioff:,5] = self.U\n\n A[0,0] = 0.\n A[0,1] = 4.*np.pi/3.*self.rho[0]*self.R**3/self.M\n A[0,2] = 0.\n A[0,3] = self.Gamma_1[0]\n A[0,4] = 0.\n A[0,5] = 3.\n\n D = np.zeros(8)\n D[0] = self.M\n D[1] = self.R\n D[2] = self.P[0]\n D[3] = self.rho[0]\n D[4] = 4.*np.pi/3.*self.G*(self.rho[0]*self.R)**2/(self.P[0]*self.Gamma_1[0])\n D[5] = D[4]\n D[6] = -1.0\n D[7] = 0.0\n\n return ADIPLSStellarModel(D, A, G=self.G)", "def cal_read(self):\n self.write(\":CAL:DATA?\")\n return self.adapter.read_raw()" ]
[ "0.58027154", "0.546091", "0.5428187", "0.5426959", "0.5236421", "0.5206721", "0.5204981", "0.5189248", "0.5178479", "0.51778173", "0.51361907", "0.5132914", "0.51140034", "0.51137125", "0.5094768", "0.505379", "0.5024168", "0.49520048", "0.49162722", "0.4861708", "0.48393014", "0.4827799", "0.48126784", "0.4803462", "0.47973534", "0.47912204", "0.475231", "0.47431862", "0.4728082", "0.4724658", "0.47174037", "0.47130632", "0.47075886", "0.46983913", "0.46979263", "0.46974263", "0.4673576", "0.46527153", "0.46505845", "0.46402383", "0.46364337", "0.4632383", "0.4631751", "0.46176708", "0.46165797", "0.46128163", "0.45972726", "0.45921332", "0.45903507", "0.45874006", "0.45863008", "0.4582455", "0.4574014", "0.45703277", "0.45686254", "0.45662814", "0.45628688", "0.45621294", "0.45382375", "0.45167622", "0.44921535", "0.44900241", "0.4489911", "0.4489588", "0.44794863", "0.44688293", "0.44616517", "0.44616517", "0.44559655", "0.44523516", "0.44511762", "0.44507077", "0.44467887", "0.44463968", "0.44446623", "0.44442073", "0.44436336", "0.4439015", "0.442898", "0.44266313", "0.4412006", "0.44101843", "0.4408984", "0.44037396", "0.4402577", "0.43953204", "0.43950483", "0.439315", "0.4390575", "0.43842554", "0.43708947", "0.43577003", "0.4357088", "0.43521896", "0.4351134", "0.43506977", "0.43500215", "0.43496084", "0.43472221", "0.43463954" ]
0.78116286
0
Returns the ADP after reflection on the plane defined by its normal vector 'planev'.
def reflect_adp(adp, planev): M = np.identity(4) M[:3, :3] -= 2.0 * np.outer(planev, planev) M[:3, 3] = (2.0 * np.dot(np.array([0, 0, 0]), planev)) * planev return rotate_adp(adp, M[:3, :3])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plane(self):\n return Plane(Point(0, self.evaluations.exposedWing.edges[2].point1.y, 0), Vector(0, 1, 0),\n hidden=True)", "def GetPlane(plane):\r\n pass", "def get_adp_from_calc(vx, vy, vz):\n ## lx=np.linalg.norm(vx)\n ## ly=np.linalg.norm(vy)\n ## lz=np.linalg.norm(vz)\n lx = vx\n ly = vy\n lz = vz\n L = np.matrix([[lx, 0, 0],\n [0, ly, 0],\n [0, 0, lz]])\n\n\n ## Vx=vx/lx\n ## Vy=vy/ly\n ## Vz=vz/lz\n Vx = np.array([1, 0, 0])\n Vy = np.array([0, 1, 0])\n Vz = np.array([0, 0, 1])\n V = np.matrix([[Vx[0], Vy[0], Vz[0]],\n [Vx[1], Vy[1], Vz[1]],\n [Vx[2], Vy[2], Vz[2]]])\n Vinv = np.linalg.inv(V)\n #print V,Vinv\n M = np.dot(np.dot(Vinv, L), V)\n #print M\n return M", "def ADP (self):", "def getPlane(entry):\n\n \n \n a,b,c = getNewLattice(entry,2)\n a_vector = np.linalg.solve(np.array(entry[0].lattice.as_dict()['matrix']).T,a)\n b_vector = np.linalg.solve(np.array(entry[0].lattice.as_dict()['matrix']).T,b)\n fracs = np.cross(a_vector,b_vector)\n fracs /= min([x for x in fracs if abs(x)>1E-4])\n \n return(fracs)", "def plane(self):\r\n from lsst.analysis import utils\r\n return utils.fitplane(self.points, self.z)", "def test_antinormal_reflection(self):\n n1 = 1.0\n n2 = 1.5\n normal = (0.0, 0.0, -1.0)\n angle = 0.0\n ray = Ray(position=(0.0, 0.0, 0.0), direction=(0.0, 0.0, 1.0), wavelength=None)\n fresnel = FresnelReflection()\n assert np.isclose(fresnel.reflectivity(angle, n1, n2), 0.04)\n new_ray = fresnel.transform(ray, {\"normal\": normal})\n assert np.allclose(flip(ray.direction), new_ray.direction)", "def pr(self, vertex):\n log_pr = self.log_pr(vertex)\n return np.exp(log_pr - self.logZ)", "def plot_plane(unit_normal, x_array, y_array, fore):\n # print'unit normal = ', unit_normal\n z = (((unit_normal[0] * (fore[0] - x_array)) + (unit_normal[1] * (fore[1] - y_array))) / unit_normal[2]) + fore[2]\n # print 'plane numbers\\n', z\n return z", "def p(self):\n return 'Plane'", "def project_onto_plane(self,z):\n U=self.U\n Q=self.Q_p\n #print(((z-Q[-2,:,[2]])/P[-2,:,[2]]).T)\n #print(P[-2])\n return ((z-Q[-2,:,[2]])/U[-2,:,[2]]).T*U[-2]+Q[-2]", "def plane(self):\n return plane(self.N, self.o)", "def plane_equation(point_a, point_b, point_c):\n v1 = np.subtract(point_a, point_c)\n v2 = np.subtract(point_a, point_b)\n normal = np.cross(v1, v2)\n # print 'b4 norm', normal\n unit_normal = norm_vect(normal)\n # print 'unityyy', unit_normal\n return unit_normal", "def get_real_pwv(pwv, altitude):\n zenith_angle = 90-altitude\n airmass = 1/np.cos(zenith_angle*np.pi/180)\n return pwv*airmass", "def project_point_plane(point, plane):\n base, normal = plane\n normal = normalize_vector(normal)\n vector = subtract_vectors(point, base)\n snormal = scale_vector(normal, dot_vectors(vector, normal))\n return subtract_vectors(point, snormal)", "def reflect_line_plane(line, plane, epsilon=1e-6):\n intx_pt = intersection_line_plane(line, plane, epsilon)\n if not intx_pt:\n return None\n vec_line = subtract_vectors(line[1], line[0])\n vec_reflect = mirror_vector_vector(vec_line, plane[1])\n if angle_smallest_vectors(plane[1], vec_reflect) > 0.5 * pi:\n return None\n return [intx_pt, add_vectors(intx_pt, vec_reflect)]", "def plane_point_side_v3(p: np.ndarray, v: np.ndarray) -> Any:\n return p[:3].dot(v) + p[3]", "def plane_distance(p, plane):\n x, y, z = p\n A, B, C, D = plane\n return A*x + B*y + C*z + D", "def planarize(self):\r\n from lsst.analysis import utils\r\n assert numpy.isfinite(self.z).all()\r\n self.z -= utils.evalplane(self.plane(), self.points)", "def OAVolterra_direct(p0,wD,dt,Nt):\n # INITIALIZATION ------------------------------------------------------\n pz = np.zeros(Nt) # oa signal at detection point\n K0 = wD # oa propagator: K(0,0) \n K1 = wD*np.exp(-wD*dt) # oa propagator: K(1,0) \n K1_K0 = np.exp(-wD*dt) # quotient: K(i+1)/K(i)\n\n # SOLVE FORWARD PROBLEM VIA RECURRENCE RELATION -----------------------\n I = 0 \n pz[0] = p0[0] \n for i in range(1,Nt):\n I = I*K1_K0 + 0.5*dt*(K1*p0[i-1] + K0*p0[i])\n pz[i] = p0[i] - I\n return pz", "def distance_from_plane(n,p,r,nnorm=None):\n #return np.abs(np.dot(n,(p-r)))/np.linalg.norm(n)\n #return np.abs(np.dot(n,(p-r)))/nnorm\n # the normal vector is already a unit vector!\n return np.abs(np.dot(n,(p-r)))", "def extract_phase(eigvector, point_arr=[]):\n pa = point_arr\n if np.size(pa) == 0:\n pa = np.arange(len(evY))\n\n evX = eigvector[2 * pa]\n evY = eigvector[2 * pa + 1]\n phase = np.arctan2(evY.real, evX.real)\n # print 'evY[0] =', evY[0]\n # print 'evX[0] =', evX[0]\n # print 'phase[0] = ', phase[0]\n return phase", "def VaporPressure(dwpt):\n\n return 611.2*exp(17.67*dwpt/(243.5+dwpt))", "def test_point_on_plane(self, point, plane):\n _dist = point.dot(plane[:3]) + plane[3]\n if _dist <= epsilon:\n print('OK => point on plane')\n else:\n print('NO => point not on plane')", "def plane_desc(self) -> str:\n return self.planes[0].join(' ') + self.planes[1].join(' ') + self.planes[2].join(' ')", "def test_reflection_vector(self):\n\n # A ray approaching at 45 degrees\n v = vectors.Vector(1, -1, 0)\n n = vectors.Vector(0, 1, 0)\n r = v.reflect(n)\n self.assertEqual(r, vectors.Vector(1, 1, 0))\n\n # Ray along an axis hits a surface at an angle\n v = vectors.Vector(0, -1, 0)\n n = vectors.Vector(math.sqrt(2)/2, math.sqrt(2)/2, 0)\n r = v.reflect(n)\n self.assertEqual(r, vectors.Vector(1, 0, 0))", "def Drepp(self):\n sinE = np.sin(self.E())\n cosE = np.cos(self.E())\n return -self.alpha()*cosE-(self.beta()+self.GAMMA)*sinE", "def distance_point_plane(point, plane):\n base, normal = plane\n vector = subtract_vectors(point, base)\n return fabs(dot_vectors(vector, normal))", "def ransac_plane_estimation (numpy_cloud, threshold, fixed_point=None, w = .9, z = 0.95 ):\r\n\r\n # variables\r\n current_consensus = 0 # keeps track of how many points match the current plane\r\n best_consensus = 0 # shows how many points matched the best plane yet\r\n consensus_points = np.array([]) # np.ndarray of points matching the cloud\r\n best_normal_vector = np.array ([]) # current best normal vector\r\n\r\n # determine probabilities and number of draws\r\n b = np.float_power(w, 3 ) # probability that all three observations belong to the model\r\n k = ceil(np.log(1-z ) / np.log(1-b )) # estimated number of draws\r\n\r\n # copy cloud\r\n numpy_cloud = numpy_cloud[:, 0:3].copy ()\r\n\r\n # estimate k * 3 random planes, defined through one normal vector and one plane parameter d, respectively\r\n normal_vectors, plane_parameters_d = random_plane_estimation (numpy_cloud, k * 3, fixed_point )\r\n\r\n # iterate through all planes found to see which one performs best\r\n for (normal_vector, d) in zip (normal_vectors, plane_parameters_d ):\r\n\r\n # count all points that consent with the plane\r\n current_consensus, current_consensus_points = plane_consensus (numpy_cloud, normal_vector, d, threshold )\r\n\r\n # is the current consensus match higher than the previous ones?\r\n if (current_consensus > best_consensus ):\r\n\r\n # keep best consensus set\r\n consensus_points = current_consensus_points\r\n best_normal_vector = normal_vector\r\n best_consensus = current_consensus\r\n\r\n return best_normal_vector, consensus_points", "def reflect_ghost(self, p0):\n # Instead of self.p1, one could take any point on the line p1--p2.\n dist = self.p1 - p0\n alpha = numpy.einsum(\"ij, ij->i\", dist, self.mirror_edge)\n # q is sits at the perpendicular intersection of the reflection\n q = dist - (alpha / self.beta)[:, None] * self.mirror_edge\n return p0 + 2 * q", "def np_vplane_2_vparam(vplane):\n vparam = np.cross(\n vplane,\n np.stack([-vplane[...,1], vplane[...,0], np.zeros_like(vplane[...,0])], axis=-1),\n axis=-1)\n return vparam[..., :2] / vparam[..., [2]]", "def plane_equation(p1, p2, p3):\n a1 = p2[0] - p1[0]\n b1 = p2[1] - p1[1]\n c1 = p2[2] - p1[2]\n a2 = p3[0] - p1[0]\n b2 = p3[1] - p1[1]\n c2 = p3[2] - p1[2]\n a = b1 * c2 - b2 * c1\n b = a2 * c1 - a1 * c2\n c = a1 * b2 - b1 * a2\n # Points are collinear\n if (abs(a) < 1e-6) and (abs(b) < 1e-6) and (abs(c) < 1e-6):\n return None\n # All clear\n d = (- a * p1[0] - b * p1[1] - c * p1[2])\n return a, b, c, d", "def ppd(self):\n return math.sqrt(np.dot(self.v, self.v) / np.dot(self.w, self.w) )", "def APV_equation(self, uct):\n q_v_ = uct.total_reward\n nsa = uct.visit_time\n sigma_nsb = uct.my_parent.visit_time - 1\n psa = uct.psa\n if nsa == 0:\n return float('inf')\n equation = q_v_ / nsa + 2 * psa * math.sqrt(sigma_nsb) / nsa\n return equation", "def project_onto_plane(vect):\n x, y, z = vect\n \n return (x, y, 0.)", "def invert_point_on_plane(point, plane):\n _, _, proj = project_point_to_plane(point, plane)\n\n u, v = proj[0][1]\n return u, v", "def get_sag_plane_dist(self, P):\n if self.sp is None:\n print('ERROR: sagittal plane not setted yet!')\n return 0\n else:\n sp = self.sp\n #return abs(P[0]*sp[0] + P[1]*sp[1] + P[2]*sp[2] + sp[3]) / math.sqrt(sp[0]**2 + sp[1]**2 + sp[2]**2)\n return abs(P[0]*sp[0] + P[1]*sp[1] + P[2]*sp[2] + sp[3]) / math.sqrt(sp.dot(sp))", "def ADP_trace(adp):\n return sum(adp[:3])", "def get_plane_of_points(\n self,\n normal_vector=\"z\",\n planar_coordinate=None,\n ):\n # Get results vectors\n if (normal_vector == \"z\"):\n x_flat = self.floris.grid.x_sorted_inertial_frame[0, 0].flatten()\n y_flat = self.floris.grid.y_sorted_inertial_frame[0, 0].flatten()\n z_flat = self.floris.grid.z_sorted_inertial_frame[0, 0].flatten()\n else:\n x_flat = self.floris.grid.x_sorted[0, 0].flatten()\n y_flat = self.floris.grid.y_sorted[0, 0].flatten()\n z_flat = self.floris.grid.z_sorted[0, 0].flatten()\n u_flat = self.floris.flow_field.u_sorted[0, 0].flatten()\n v_flat = self.floris.flow_field.v_sorted[0, 0].flatten()\n w_flat = self.floris.flow_field.w_sorted[0, 0].flatten()\n\n # Create a df of these\n if normal_vector == \"z\":\n df = pd.DataFrame(\n {\n \"x1\": x_flat,\n \"x2\": y_flat,\n \"x3\": z_flat,\n \"u\": u_flat,\n \"v\": v_flat,\n \"w\": w_flat,\n }\n )\n if normal_vector == \"x\":\n df = pd.DataFrame(\n {\n \"x1\": y_flat,\n \"x2\": z_flat,\n \"x3\": x_flat,\n \"u\": u_flat,\n \"v\": v_flat,\n \"w\": w_flat,\n }\n )\n if normal_vector == \"y\":\n df = pd.DataFrame(\n {\n \"x1\": x_flat,\n \"x2\": z_flat,\n \"x3\": y_flat,\n \"u\": u_flat,\n \"v\": v_flat,\n \"w\": w_flat,\n }\n )\n\n # Subset to plane\n # TODO: Seems sloppy as need more than one plane in the z-direction for GCH\n if planar_coordinate is not None:\n df = df[np.isclose(df.x3, planar_coordinate)] # , atol=0.1, rtol=0.0)]\n\n # Drop duplicates\n # TODO is this still needed now that we setup a grid for just this plane?\n df = df.drop_duplicates()\n\n # Sort values of df to make sure plotting is acceptable\n df = df.sort_values([\"x2\", \"x1\"]).reset_index(drop=True)\n\n return df", "def pinfPerspective( fov, aspect, near, far=None ):\n result = zeros( (4,4),'d')\n # need the cotangent of the field-of-view\n cotFOV = 1/tan(fov)\n result[0,0] = cotFOV/aspect\n result[1,1] = cotFOV\n result[2,2:4] = -1\n result[3,2] = -2*near\n return result", "def reflectivity(self, point):\n return self._r", "def get_ab_tpdm(self) -> 'Nparray':\n dveca, dvecb = self.calculate_dvec_spin()\n tpdm_ab = numpy.transpose(numpy.tensordot(dveca.conj(),\n dvecb,\n axes=((2, 3), (2, 3))),\n axes=(1, 2, 3, 0))\n return tpdm_ab", "def OAVolterra_FrauenhoferZone_direct(p0,wD,dt,Nt):\n pFZ = np.zeros(Nt)\n for i in range(1,Nt-1):\n pFZ[i]=(p0[i+1]-p0[i-1])\n return pFZ/(2.*dt*wD)", "def planeIndex(plane, center = False):\n return (planeBase(center = center) * np.array(plane)).sum()", "def get_distance_to_plane(planepoints, otherpoint):\n # from\n # https://en.wikipedia.org/wiki/Plane_(geometry)#Describing_a_plane_through_three_points\n p0, p1, p2 = planepoints\n x1, y1, z1 = p0.getArray()\n x2, y2, z2 = p1.getArray()\n x3, y3, z3 = p2.getArray()\n D = np.linalg.det(np.array([[x1, y1, z1], [x2, y2, z2], [x3, y3, z3]]))\n if D != 0:\n d = -1\n at = np.linalg.det(np.array([[1, y1, z1], [1, y2, z2], [1, y3, z3]]))\n bt = np.linalg.det(np.array([[x1, 1, z1], [x2, 1, z2], [x3, 1, z3]]))\n ct = np.linalg.det(np.array([[x1, y1, 1], [x2, y2, 1], [x3, y3, 1]]))\n a = (-d / D) * at\n b = (-d / D) * bt\n c = (-d / D) * ct\n\n numer = np.abs(a * otherpoint.x +\n b * otherpoint.y +\n c * otherpoint.z + d)\n denom = np.sqrt(a**2 + b**2 + c**2)\n dist = numer / denom\n else:\n dist = 0\n return dist", "def ADF(self, dP, ax):\n from scipy.special import sph_harm\n ang = self._ang_part(dP)\n #scipy defines their harmonics to have `theta` be azimuthal, which is\n #opposite from physics.\n #we set $m = 0$ so that the azimuthal part doesn't contribute at all.\n result = np.zeros(len(ax))\n for l, p in ang.items():\n Ylm = sph_harm(0, l, 0, ax)*np.sqrt(2*l+1)\n #We are interested in the c* c of this value, which is multiplied\n #together to get pissnnl.\n result += p*np.sqrt(np.absolute(Ylm*Ylm.conjugate()))\n return result", "def mirror_point_to_plane(point, plane):\n assert isinstance(plane, cg3d_plane.CGPlane)\n pn, norm = plane.get_point_and_normal()\n norm.normalize()\n return point - 2.0 * ((point - pn) * norm) * norm", "def __getPlaneName(self):\n item = self._item()\n planeNormal = item.getNormal() if item is not None else None\n\n for name, normal in self._PLANES.items():\n if numpy.array_equal(planeNormal, normal):\n return name\n return '-'", "def maape(self) -> float:\n return float(np.mean(np.arctan(np.abs((self.true - self.predicted) / (self.true + EPS)))))", "def postepy(self,przedmiot:str)-> float:\n return self.przedmioty[przedmiot].srednia()", "def calcPfePres(voltage: float):\n # V → Torr\n exponent = 1.667 * voltage - 11.46\n pres = 10**exponent\n return pres", "def _getPlaneRef(self, tracker, station, plane):\n ref = plane + (station-1 + tracker*N_Station)*N_Plane\n return ref", "def calculate_marginal(self):\n self.marginal_ray=beam_field()\n m=self.marginal_ray\n m.U=np.array([[[0,0,1]]])\n m.Q_p=np.array([[[0,self.entrance_pupil,0]]])\n m.propagate(self.surfaces)", "def test_compute_alphas(self):\n\t\tdetails = self.watcher.analyze(layers=[self.second_layer], pool=False, randomize=False, plot=False, mp_fit=False, pl_package=WW_POWERLAW)\n\t\t#d = self.watcher.get_details(results=results)\n\t\ta = details.alpha.to_numpy()\n\t\tself.assertAlmostEqual(a[0],1.74859, places=3)\n\t\tself.assertAlmostEqual(a[1],1.66595, places=3)\n\t\tself.assertAlmostEqual(a[3],1.43459, places=3)", "def dv(self):\n return self.dvdlogdp.mul(self.dlogdp)", "def maape(actual: np.ndarray, predicted: np.ndarray):\n return np.mean(np.arctan(np.abs((actual - predicted) / (actual + EPSILON))))", "def distance_to_plane(plane, pt):\n if plane is None:\n return None\n d = abs((plane[0] * pt[0] + plane[1] * pt[1] + plane[2] * pt[2] + plane[3]))\n e = (math.sqrt(plane[0] * plane[0] + plane[1] * plane[1] + plane[2] * plane[2]))\n # Not the best assumption, but will work for the task.\n if abs(e) < 1e-10:\n return 1e10\n return d / e", "def Dre(self):\n er = self.er()\n sinE = np.sin(self.E())\n cosE = np.cos(self.E())\n return self.alpha()*(cosE- er)+ \\\n (self.beta()+self.GAMMA)*sinE", "def mTV(self):\n distance = abs(self.vertPosT - self.vertPosW) # distance between htp and vortex shred plane,\n # approximated with the wing root chordplane\n return distance / (self.spanW / 2)", "def V_magNeptune(alpha,a_p,d):\n V = 5.*np.log10(a_p*d) - 7.00 + 7.944e-3*alpha + 9.617e-5*alpha**2.\n return V", "def get_normal_vector_of_plane(p1, p2, p3):\n v12 = np.array(p1) - np.array(p2)\n v13 = np.array(p1) - np.array(p3)\n nvec = np.cross(v12, v13)\n ## print 'norm: '+str(np.linalg.norm(nvec))\n return nvec / np.linalg.norm(nvec)", "def calc_a(dert__):\n return dert__[[2, 3]] / dert__[1] # np.array([dy, dx]) / g", "def get_projection_point(self, point, plane, test=False):\n return point_on_plane_projection(point, plane, test=test)", "def get_normal_fluctuation(hover,target,normal,vec):\n\tvector = hover - target\n\tvector = vector - vec*(vector>(vec/2.)) + vec*(vector<(-1*vec/2.))\n\tprojected = planeproject(vector,normal)\n\t#---get the sign of the projection\n\tplane_point = vector+projected\n\tsign = 1.0-2.0*(np.arccos(np.dot(vecnorm(normal),vecnorm(vector)))>np.pi/2.)\n\treturn sign*np.linalg.norm(plane_point)", "def verify_plane_endpoints(self):\n return [self.x0 + self.nx * self.dx, self.y0 + self.ny * self.dy, self.z0 + self.nz * self.dz]", "def reflected(self, normal):\n return self - (2 * normal * self) * normal", "def np_vparam_2_vplane(vparam):\n d = np.linalg.norm(vparam, ord=2, axis=-1, keepdims=True)\n a = vparam[..., [0]] / d\n b = vparam[..., [1]] / d\n neg_sign = (a < 0)\n a[neg_sign] = -a[neg_sign]\n b[neg_sign] = -b[neg_sign]\n c = -(a * vparam[..., [0]] + b * vparam[..., [1]])\n vplane = np.concatenate([a, b, c], axis=-1)\n vplane[np.isnan(vplane)] = 0\n return vplane", "def determinant(v,w):\n return v[0] * w[1] - v[1] * w[0]", "def plans():", "def mad(v):\n return np.mean(np.abs(v - np.mean(v)))", "def getVref(alpha,initialWalkers, Walkers):\n ### Here we want to start by calculating Vbar, the average potential of all walkers.\n ### We need to correct by a term in order to keep our number of walkers relatively consistant.\n ### this term takes the form alpha/initialWalkers*(currentWalkers-initialWalkers). Why are we using number of walkers\n ### here? They're a convineient stand-in, but for what? This will matter when you implement continuous weighting.", "def getAerTransmittance(self, run):\n if not self.modtran_wl:\n self.initModtranWavelengths()\n\n # Compute Vandermonde matrix\n # degree is hardcoded but only 2nd degree polynomial is used\n vdm_wl = numpy.vander(self.modtran_wl, 3)\n # Get polynomial roots as well as zenith angle\n p0, p1, p2, z_ang = self.aerosol_visits[run]\n pfit = numpy.array([p0, p1, p2])\n # Reconstitute polynom\n polynom = numpy.dot(vdm_wl, pfit)\n # Retrieve airmass from zenith angle\n airmass = modtranTools.zenith2airmass(z_ang, site='lsst', unit='rad')\n return numpy.exp(-1.0 * airmass * polynom)", "def Drep(self):\n sinE = np.sin(self.E())\n cosE = np.cos(self.E())\n return -self.alpha()*sinE+(self.beta()+self.GAMMA)*cosE", "def _transfer_adp(self):\n toleratedAtoms = []\n for atom in self['exp'].atoms:\n tolerated = atom.transfer_adp()\n if tolerated:\n toleratedAtoms.append(tolerated)\n for atom in toleratedAtoms:\n atom.averageADP()", "def mbplane(A, c=sqrt(8 / 3)):\n # for Sun Mg Potential: c=1.6281689374348\n if A[0] == 0 and A[1] == 0 and A[2] == 0:\n B = np.array([0, 0, 1])\n\n else:\n if A[0] == 0:\n p1 = np.array([-0.5 / A[1], 0.5 * sqrt(3) / A[1], 0])\n p2 = np.array([-0.5 / A[2], -0.5 * sqrt(3) / A[2], 0])\n elif A[1] == 0:\n p1 = np.array([-0.5 / A[2], -0.5 * sqrt(3) / A[2], 0])\n p2 = np.array([1 / A[0], 0, 0])\n else:\n p1 = np.array([1 / A[0], 0, 0])\n p2 = np.array([-0.5 / A[1], 0.5 * sqrt(3) / A[1], 0])\n if A[3] == 0:\n z = p1 + np.array([0, 0, 1])\n else:\n z = np.array([0, 0, c / A[3]])\n\n P1 = p1 - z\n P2 = p2 - z\n B = np.cross(P1, P2)\n return B", "def trail_length_from_plane_intersection_numpy(point, vector, plane, tol=1e-6):\n origin, normal = plane\n cos_nv = np.dot(normal, normalize_vector_numpy(vector))\n\n if np.abs(cos_nv) < tol:\n return\n\n oa = origin - point\n cos_noa = np.dot(normal, oa)\n\n return cos_noa / cos_nv", "def mae(actual, predicted):\n rms = np.abs(actual-predicted)\n\n # Returning the sqaure root of the root mean square\n return float(rms.mean())", "def fit_plane(xyz,z_pos=None):\n mean = np.mean(xyz,axis=0)\n xyz_c = xyz - mean[None,:]\n l,v = np.linalg.eig(xyz_c.T.dot(xyz_c))\n abc = v[:,np.argmin(l)]\n d = -np.sum(abc*mean)\n # unit-norm the plane-normal:\n abcd = np.r_[abc,d]/np.linalg.norm(abc)\n # flip the normal direction:\n if z_pos is not None:\n if np.sum(abcd[:3]*z_pos) < 0.0:\n abcd *= -1\n return abcd", "def _epsilon_eval(z, A, ord=2):\n z=np.array(z)\n A=np.array(A)\n zc = complex(z[0], z[1])\n try :\n ep = 1/spl.norm(spl.inv(zc*np.eye(*A.shape)-A),ord=ord)\n # ep = spl.norm(zc*np.eye(*A.shape)-A,ord=ord)\n except TypeError:\n if ord==\"svd\":\n ep = np.min(spl.svdvals(zc*np.eye(*A.shape)-A))\n else: raise Exception(\"invalid method\")\n return ep", "def _calpara(self):\n self.up = math.exp(self.sigma*math.sqrt(self.deltatime))\n self.down = math.exp(-self.sigma*math.sqrt(self.deltatime))\n self.upprob = (math.exp((self.r-self.d)*self.deltatime)-self.down)/(self.up-self.down)", "def test_vic_abspearson_linear(self):\n z_matrix = np.array(\n [[0.0, 0.0, 1.0],\n [0.1, 0.2, 0.8],\n [0.2, 0.4, 0.6],\n [0.3, 0.7, 0.3],\n [0.6, 0.8, 0.2],\n [0.8, 0.9, 0.1],\n [1.0, 1.0, 0.0]],\n dtype=np.float64)\n obtained_w_vector = mcdm.weigh(z_matrix, \"VIC\", \"AbsPearson\")\n expected_w_vector = np.array(\n [0.33861310, 0.33069345, 0.33069345],\n dtype=np.float64)\n np.testing.assert_allclose(obtained_w_vector, expected_w_vector)\n self.assertEqual(obtained_w_vector.dtype, expected_w_vector.dtype)", "def evaporatePheromones(self):\n for u in self.graph.nodes:\n for v in self.graph.neighbors(u):\n if self.graph_data[u]['pheromones'][v] > 0 :\n self.graph_data[u]['pheromones'][v] -= self.parameters['evaporation_rate'] * self.graph_data[u]['pheromones'][v]\n self.graph_data[v]['pheromones'][u] -= self.parameters['evaporation_rate'] * self.graph_data[v]['pheromones'][u]", "def get_delta_v_tot(f, e, a, P):\n\n coeff = (2.0*np.pi/P) * a / np.sqrt(1.0 - e*e)\n delta_v_tot = coeff * (1.0 + 2.0*e*np.cos(f) + e*e) / 1.0e5\n\n return delta_v_tot", "def V_magEarth(alpha,a_p,d):\n V = 5.*np.log10(a_p*d) - 3.99 - 1.060e-3*alpha + 2.054e-4*alpha**2.\n return V", "def line_plane(l, p):\n d = dot((p.o - l.o), p.n) / dot(l.d, p.n)\n return l(d)", "def get_phase_adv(self):\n if self._phase_advance is None:\n self._phase_advance = get_phase_advances(self.twiss_df)\n return self._phase_advance", "def vdot(self):\n return _cantera.wall_vdot(self.__wall_id)", "def compute_ddw_active_zpr(self):\n self.zero_point_renormalization = (\n self.sum_qpt_function('get_zpr_ddw_active'))\n self.renormalization_is_dynamical = False", "def _get_alpha(self, m_t, v_t):\n return max(0, ((-m_t * self._psi \n + math.sqrt((m_t ** 2 * self._phi ** 4) \n / 4 + v_t * self._phi ** 2 * self._xi)) \n / (v_t * self._xi)))", "def dft(im: np.array, uv: np.array, vis: np.array):\n m, n = im.shape\n size = im.size\n xy = np.mgrid[0:m, 0:n].reshape(2, size)\n for i in range(uv.shape[1]):\n vis[i] = np.sum(\n im.reshape(size) * np.exp(\n -2j * np.pi * (uv[0, i] * xy[0, :] / m + uv[1, i] * xy[1, :] / n)))\n\n return vis", "def ev2ve(eV): \n return cv*np.sqrt( eV*(eV+2.e0*mec2))/(eV+mec2)", "def depolarizer(dp):\n return np.array([[1,0,0,0],[0,dp,0,0],[0,0,dp,0],[0,0,0,dp]])", "def _prepare_plane(self):\n verticies = [\n # main plane - note that the mainplane is scaled so the mat_plane\n # matrix will it transform to the correct coordinates\n -self.i_border[0]/self._scaling[0], self.i_border[1]/self._scaling[1],\n -self.i_border[0]/self._scaling[0], -(self.o_wh[1]-self.i_border[1])/self._scaling[1],\n (self.o_wh[0]-self.i_border[0])/self._scaling[0], -(self.o_wh[1]-self.i_border[1])/self._scaling[1],\n (self.o_wh[0]-self.i_border[0])/self._scaling[0], -(self.o_wh[1]-self.i_border[1])/self._scaling[1],\n (self.o_wh[0]-self.i_border[0])/self._scaling[0], self.i_border[1]/self._scaling[1],\n -self.i_border[0]/self._scaling[0], self.i_border[1]/self._scaling[1],\n\n # coord plane\n 0, 0,\n 0, -self.o_wh[1],\n self.o_wh[0], -self.o_wh[1],\n self.o_wh[0], -self.o_wh[1],\n self.o_wh[0], 0,\n 0, 0,\n\n # axes\n 0, -self.o_wh[1], self.o_wh[0], -self.o_wh[1], #x\n 0, 0, 0, -self.o_wh[1], #y\n ]\n\n colors = [\n 1.0, 1.0, 1.0, 1.0, # outer box XXX Remove outer box...\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n .9, .9, .9, 9.0, # plot box\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n 0.0, 0.0, 0.0, 1.0, #lines\n 0.0, 0.0, 0.0, 1.0,\n 0.0, 0.0, 0.0, 1.0,\n 0.0, 0.0, 0.0, 1.0,\n ]\n\n self._fonts = []\n for u in range(1, self._unit_count[0]+1):\n verticies.append(self._unit_w[0]*u)\n verticies.append(-self.o_wh[1]+0.02)\n verticies.append(self._unit_w[0]*u)\n verticies.append(-self.o_wh[1]-0.02)\n colors += [0.0, 0.0, 0.0, 1.0]\n colors += [0.0, 0.0, 0.0, 1.0]\n self._fonts.append([\n '{:.2f}'.format(u*(self.i_axis[0]/self._unit_count[0])-self.i_origin[0]),\n (self._unit_w[0]*u+self.i_border[0]-0.05)*self._scaling[0],\n (-self.o_wh[1]+(self.i_border[3])*0.5)\n ])\n for u in range(0, self._unit_count[1]):\n verticies.append(0.02)\n verticies.append(-self._unit_w[1]*u)\n verticies.append(-0.02)\n verticies.append(-self._unit_w[1]*u)\n colors += [0.0, 0.0, 0.0, 1.0]\n colors += [0.0, 0.0, 0.0, 1.0]\n self._fonts.append([\n '{:.2f}'.format(self.i_axis[1]-u*self.i_axis[1]/self._unit_count[1]-self.i_origin[1]),\n (0.025)*self._scaling[0],\n (-(self._unit_w[1])*u-self.i_border[1]+0.01)*self._scaling[1]\n ])\n\n self._draw_plane_indicies = (0, 12)\n self._draw_line_indicies = (12, 4+self._unit_count[0]*2+self._unit_count[1]*2)\n\n # convert data into valid data format\n verticies = numpy.array(verticies, dtype=numpy.float32)\n colors = numpy.array(colors, dtype=numpy.float32)\n\n self._plane_vao = util.VAO()\n self._plane_vbo = util.VBO(2)\n\n with self._plane_vao:\n # plane verticies\n with self._plane_vbo.get(0):\n glBufferData(GL_ARRAY_BUFFER, ArrayDatatype.arrayByteCount(verticies), verticies, GL_STATIC_DRAW)\n glVertexAttribPointer(self.plane_shader.attributeLocation('vertex_position'), 2, GL_FLOAT, GL_FALSE, 0, None)\n glEnableVertexAttribArray(0)\n\n # place vertex colors\n with self._plane_vbo.get(1):\n glBufferData(GL_ARRAY_BUFFER, ArrayDatatype.arrayByteCount(colors), colors, GL_STATIC_DRAW)\n glVertexAttribPointer(self.plane_shader.attributeLocation('vertex_color'), 4, GL_FLOAT, GL_FALSE, 0, None)\n glEnableVertexAttribArray(1)", "def calc_separation_alt_az(self, body):\n self.body.compute(self.site)\n body.body.compute(self.site)\n\n delta_az = float(self.body.az) - float(target.az)\n delta_alt = float(self.body.alt) - float(target.alt)\n return (delta_alt, delta_az)", "def fD(self, vpd):\n\t if vpd < 0.1:\n\t return 1.\n\t else:\n\t return 3/13./sqrt(vpd/1000.)", "def ds_AET(pet, A, paw_now, paw_max):\n\n aet = pet * ((1 - np.exp(-A*paw_now/paw_max))/(1 - 2*np.exp(-A) + np.exp(-A*paw_now/paw_max)))\n return aet", "def reflect(self, ray):\n normal = self.normal(ray.position)\n if normal.dot(ray.direction) > 0:\n normal = -normal\n return Ray(\n ray.direction - 2 * dot(ray.direction, normal) * normal, ray.position)", "def filter_plane(img_plane):\n img_plane = despeckle_by_opening(img_plane)\n img_plane = pseudo_flatfield(img_plane)\n return img_plane", "def adjust_lines_to_planes(lines, planes):\n \n lines = at.LineSet(lines)\n planes = at.PlaneSet(planes)\n \n angles = np.zeros(len(lines))\n adjusted_lines = np.zeros_like(lines)\n for i, (line, plane) in enumerate(zip(lines, planes)):\n cos_theta = np.dot(line, plane)\n angles[i] = pi/2. - acos(cos_theta)\n adjusted_line = line - line*cos_theta\n adjusted_lines[i] = adjusted_line/sqrt(np.dot(adjusted_line,\n adjusted_line))\n return adjusted_lines, angles", "def compute(dm,do):\n mae = MV.average(MV.absolute(MV.subtract(dm,do)))\n return float(mae)" ]
[ "0.6131728", "0.57741225", "0.56889206", "0.56324", "0.55201805", "0.55160964", "0.5514133", "0.5479282", "0.5462305", "0.5436542", "0.52982074", "0.52627504", "0.52521896", "0.5238149", "0.52197284", "0.52163255", "0.51973253", "0.5194445", "0.5152425", "0.5139117", "0.5137654", "0.5130722", "0.51272553", "0.51077765", "0.5082127", "0.5051763", "0.50444925", "0.50361395", "0.50334024", "0.50302327", "0.50262105", "0.5017403", "0.49880305", "0.49442607", "0.4929934", "0.49217436", "0.4913808", "0.49099064", "0.48973528", "0.48881763", "0.48839703", "0.48785144", "0.48784274", "0.48762313", "0.4865428", "0.48654148", "0.48617414", "0.4861713", "0.4859344", "0.48442197", "0.48320156", "0.48295805", "0.48234493", "0.4821654", "0.48201898", "0.48107266", "0.48055655", "0.48024723", "0.47868276", "0.47839192", "0.47815475", "0.4770977", "0.47666746", "0.4760592", "0.475218", "0.47496104", "0.47483495", "0.47396973", "0.4739518", "0.4726191", "0.4717635", "0.47106823", "0.4705273", "0.4702773", "0.4691605", "0.4686359", "0.46830392", "0.46799988", "0.4678616", "0.46720853", "0.46628472", "0.46577054", "0.4656556", "0.46533734", "0.46432337", "0.46336251", "0.46289477", "0.4626931", "0.4618902", "0.46173006", "0.4616113", "0.46066344", "0.46062112", "0.45994362", "0.45968583", "0.45967287", "0.45961767", "0.4592158", "0.4590827", "0.45905066" ]
0.6869817
0
Calculates the tensor representation of ADP from its priciple axis.
def eigenv2tensor(axis): vec = np.ones((3, 3)) vecval = np.ones((3, 3)) for i in xrange(len(axis)): vmag = np.linalg.norm(axis[i]) v = axis[i] / vmag #print v vec[:, i] = v vecval[:, i] = axis[i] adp = np.linalg.solve(vec, vecval) return adp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ADP (self):", "def get_axis(adp):\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n w, v = np.linalg.eig(adp)\n return [np.array((w[j] * v[:, j]).flatten().tolist()[0]) for j \\\n in xrange(3)]", "def get_aa_tpdm(self) -> Tuple['Nparray', 'Nparray']:\n dveca, _ = self.calculate_dvec_spin()\n alpha_opdm = numpy.tensordot(dveca, self.coeff.conj(), axes=2)\n nik_njl_aa = numpy.transpose(numpy.tensordot(dveca.conj(),\n dveca,\n axes=((2, 3), (2, 3))),\n axes=(1, 2, 0, 3))\n for ii in range(nik_njl_aa.shape[1]):\n nik_njl_aa[:, ii, ii, :] -= alpha_opdm\n return alpha_opdm, -nik_njl_aa", "def ptp(a, axis=None):\r\n\r\n a = as_tensor_variable(a)\r\n\r\n out = max(a, axis) - min(a, axis)\r\n\r\n return out", "def reflect_adp(adp, planev):\n M = np.identity(4)\n M[:3, :3] -= 2.0 * np.outer(planev, planev)\n M[:3, 3] = (2.0 * np.dot(np.array([0, 0, 0]), planev)) * planev\n\n return rotate_adp(adp, M[:3, :3])", "def get_ab_tpdm(self) -> 'Nparray':\n dveca, dvecb = self.calculate_dvec_spin()\n tpdm_ab = numpy.transpose(numpy.tensordot(dveca.conj(),\n dvecb,\n axes=((2, 3), (2, 3))),\n axes=(1, 2, 3, 0))\n return tpdm_ab", "def ap(self, P):\n if P.divides(self.conductor()):\n if (P*P).divides(self.conductor()):\n # It is 0, because the reduction is additive.\n return ZZ(0)\n else:\n # TODO: It is +1 or -1, but I do not yet know how to\n # compute which without using the L-function.\n return '?'\n else:\n return self._S.hecke_matrix(P)[0,0]", "def ADP_trace(adp):\n return sum(adp[:3])", "def adj_ptycho(self, data, prb, scan, igpu):\n res = cp.zeros([self.ptheta, self.nz, self.n],\n dtype='complex64')\n data = data.copy() # avoid this todo\n res = cp.ascontiguousarray(res)\n data = cp.ascontiguousarray(data)\n prb = cp.ascontiguousarray(prb)\n scan = cp.ascontiguousarray(scan)\n \n self.adj(res.data.ptr, data.data.ptr,\n prb.data.ptr, scan.data.ptr, igpu)\n #res.real = ndimage.rotate(res.real,-72.035, axes=(2,1), reshape=False, order=1)\n #res.imag = ndimage.rotate(res.imag,-72.035, axes=(2,1), reshape=False, order=1)\n return res", "def depolarizer(dp):\n return np.array([[1,0,0,0],[0,dp,0,0],[0,0,dp,0],[0,0,0,dp]])", "def pia_from_kdp(kdp, dr, gamma=0.08):\n alpha = gamma * kdp\n return 2 * np.cumsum(alpha, axis=-1) * dr", "def compute_ap(ranks, nres):\n\n # number of images ranked by the system\n nimgranks = len(ranks)\n\n # accumulate trapezoids in PR-plot\n ap = 0\n\n recall_step = 1. / nres\n\n for j in np.arange(nimgranks):\n rank = ranks[j]\n\n if rank == 0:\n precision_0 = 1.\n else:\n precision_0 = float(j) / rank\n\n precision_1 = float(j + 1) / (rank + 1)\n\n ap += (precision_0 + precision_1) * recall_step / 2.\n\n return ap", "def prod(tensor, axis=None):\n raise NotImplementedError", "def axis_representation(self, zero=1e-8):\n it = self.inertia_tensor(zero=zero)\n Iidx = np.argsort(np.diagonal(it))\n if np.array_equal(Iidx, np.asarray([1, 2, 0])):\n ar = 'IR'\n elif np.array_equal(Iidx, np.asarray([2, 0, 1])):\n ar = 'IIR'\n elif np.array_equal(Iidx, np.asarray([0, 1, 2])):\n ar = 'IIIR'\n elif np.array_equal(Iidx, np.asarray([2, 1, 0])):\n ar = 'IL'\n elif np.array_equal(Iidx, np.asarray([0, 2, 1])):\n ar = 'IIL'\n elif np.array_equal(Iidx, np.asarray([1, 0, 2])):\n ar = 'IIIL'\n\n # if inertial tensor has non-zero off-diagonals, this whole classification is iffy\n if np.count_nonzero(it - np.diag(np.diagonal(it))):\n ar = '~' + ar\n\n return ar", "def rotate_ADP_about_axis(ADP, angle, axisDirection):\n adp = get_adp_as_matrix(ADP)\n u, v = np.linalg.eig(adp)\n startPoints = [v[:, i].flatten().tolist()[0] for i in xrange(3)]\n endPoints = [rotate_point_about_axis(point, angle, axisDirection, (0, 0, 0)) for point in startPoints]\n rotMat = get_transform(startPoints, endPoints, matrix=True).transpose()\n newadp = np.dot(rotMat.transpose(), np.dot(adp, rotMat))\n return newadp[0, 0], newadp[1, 1], newadp[2, 2], newadp[0, 1], newadp[0, 2], newadp[1, 2]", "def to_axang(self) -> Tuple[np.ndarray, float]:\n denom = np.linalg.norm(self.v)\n angle = 2.0*np.arctan2(denom, self.w)\n axis = np.zeros(3) if angle==0.0 else self.v/denom\n return axis, angle", "def get_A_gcn(self, p):\n\n if p == 0:\n return preprocess_adj(self.A)\n else:\n return preprocess_adj(self._get_A_rdm(p))\n\n return rdm_feature(self.X, percent=p)", "def ADF(self, dP, ax):\n from scipy.special import sph_harm\n ang = self._ang_part(dP)\n #scipy defines their harmonics to have `theta` be azimuthal, which is\n #opposite from physics.\n #we set $m = 0$ so that the azimuthal part doesn't contribute at all.\n result = np.zeros(len(ax))\n for l, p in ang.items():\n Ylm = sph_harm(0, l, 0, ax)*np.sqrt(2*l+1)\n #We are interested in the c* c of this value, which is multiplied\n #together to get pissnnl.\n result += p*np.sqrt(np.absolute(Ylm*Ylm.conjugate()))\n return result", "def axialt(a):\n return np.array([[0, -a[2], a[1]], [a[2], 0, -a[0]], [-a[1], a[0], 0]])", "def _propagate_A(self):\n A_roots = np.roots(self.A)\n A_roots_norm = [r if np.abs(r) < 1 else 1/np.conj(r) for r in A_roots]\n A_poly = np.poly(A_roots_norm)\n self.alpha_g = -A_poly[1:]\n self.A = np.concatenate([[1], -self.alpha_g])\n\n self.rev_A = self.A[::-1]\n\n self.pie = np.dot(self.M_mu, self.rev_A)\n self.pi = self.pie*self.e\n self.p = self.pi*self.d\n\n\n M_R = np.lib.stride_tricks.as_strided(self.R_pad,\n shape=[self.L_h, self.L_h, self.P+1],\n strides=[self.R_pad.strides[0], self.R_pad.strides[1], self.R_pad.strides[0]])\n self.half_pie_var = np.dot(M_R, self.rev_A)\n self.half_pie_var_pad = np.pad(self.half_pie_var, [(0, 0), (self.P, 0)], 'constant')\n self.M_half_pie_var_pad = np.lib.stride_tricks.as_strided(self.half_pie_var_pad,\n shape=[self.L_h, self.P+1],\n strides=[self.half_pie_var_pad.strides[0]+self.half_pie_var_pad.strides[1], self.half_pie_var_pad.strides[1]])\n\n self.pie_var = np.dot(self.M_half_pie_var_pad, self.rev_A)", "def adj(self):\n\n d = self.rank\n permutation = [0] * d\n permutation[::2] = range(1, d, 2)\n permutation[1::2] = range(0, d, 2)\n t = np.conj(self._t).transpose(permutation)\n return self.__class__(t)", "def project_perp(A):\n return np.eye(A.shape[1]) - project(A)", "def R_adp(data):\n printer('S_adp = ?')\n printer('R_adp = | (U_iso_xxx - U_iso_obs) / U_iso_obs |')\n printer('mean = sum((U_iso_xxx - U_iso_obs) / U_iso_obs) / n')\n printer('abs = sum(R_adp) / n\\n')\n printer('(geometric mean is used)\\n')\n\n printer(' | ADP_calc / ADP_obs | APD_tls / ADP_obs')\n printer(' |--------------------|-------------------')\n printer(' Atom | S_adp | R_adp | S_adp | R_adp')\n printer(' ===============================================')\n S_sum = []\n R_sum = []\n S_sum_tls = []\n R_sum_tls = []\n for atom in data['exp'].atoms:\n if not atom.element == 'H':\n U_rel_calc = cg.Uiso(atom.adp['cart_sum'])\n U_rel_obs = cg.Uiso(atom.adp['cart_meas'])\n R_adp = (U_rel_calc - U_rel_obs) / U_rel_obs\n R_sum.append(R_adp)\n S_adp = ws06(atom.adp['cart_sum'], atom.adp['cart_meas'])\n S_sum.append(S_adp)\n\n U_rel_tls = cg.Uiso(atom.adp['cart_ext'])\n R_tls = (U_rel_tls - U_rel_obs) / U_rel_obs\n R_sum_tls.append(R_tls)\n\n S_tls = ws06(atom.adp['cart_ext'], atom.adp['cart_meas'])\n S_sum_tls.append(S_tls)\n\n printer(' {0:5s}| {1:4.2f} | {2:4.2f} | {3:4.2f} | {4:4.2f}'.format(atom.name,\n S_adp,\n abs(R_adp),\n S_tls,\n abs(R_tls)))\n\n printer(' ------|----------|---------|----------|--------')\n printer(' {0:5s}| {1:4.2f} | {2:4.2f} | {3:4.2f} | {4:4.2f}'.format('mean',\n np.mean(S_sum),\n np.mean(R_sum),\n np.mean(S_sum_tls),\n np.mean(R_sum_tls)))\n printer(' {0:5s}| {1:4.2f} | {2:4.2f} | {3:4.2f} | {4:4.2f}'.format('abs',\n np.mean(S_sum),\n np.mean([abs(i) for i in R_sum]),\n np.mean(S_sum_tls),\n np.mean(\n [abs(i) for i in R_sum_tls])))\n printer(' {0:5s}| {1:4.2f} | {2:4.2f} | {3:4.2f} | {4:4.2f}'.format('SD',\n np.std(S_sum),\n np.std(R_sum),\n np.std(S_sum_tls),\n np.std(R_sum_tls)))\n if config.arg('correlate'):\n printer('\\n\\'mean R_adp (ADP_calc / ADP_obs)\\' can be\\ninterpreted as the ratio 1 - (ADP_int / ADP_obs).')\n else:\n printer('\\n\\'mean R_adp (ADP_tls / ADP_obs)\\' can be\\ninterpreted as the ratio 1 - (ADP_obs / ADP_int).')", "def pad(self, nxp, nyp):\n assert (nxp > self.nx)\n assert (nyp > self.ny)\n assert (np.mod(nxp - self.nx, 2) == 0)\n assert (np.mod(nyp - self.ny, 2) == 0)\n\n ret = tqumap(nx=nxp, dx=self.dx, ny=nyp, dy=self.dy)\n for this, that in [[self.tmap, ret.tmap], [self.qmap, ret.qmap],\n [self.umap, ret.umap]]:\n that[(nyp - self.ny) / 2:(nyp + self.ny) / 2, (nxp - self.nx) / 2:(\n nxp + self.nx) / 2] = this\n return ret", "def alpha(self, Ppump):\n\n EsatL, TR, tauL = self.EsatL, self.TR, self.tauL\n Pst, gst = self.steadystate(Ppump)\n a = (1. / tauL + Pst * (self.dqP_dEP(Pst * TR) + 1. / EsatL))\n return(a)", "def to_axisangle(self) -> Tuple[np.ndarray, float]:\n angle = np.arccos((self.A.trace()-1)/2)\n axis = np.zeros(3)\n if angle!=0:\n axis = np.array([self.A[2, 1]-self.A[1, 2], self.A[0, 2]-self.A[2, 0], self.A[1, 0]-self.A[0, 1]])/(2*np.sin(angle))\n return axis, angle", "def project(self, alpha):\n ax = alpha[0]\n ay = alpha[1]\n az = alpha[2]\n anorm = ax ** 2.0 + ay ** 2.0 + az ** 2.0\n i = anorm > 1.0\n\n anorm_i = anorm[i] ** 0.5 # Square root is taken here. Faster.\n ax[i] = np.divide(ax[i], anorm_i)\n ay[i] = np.divide(ay[i], anorm_i)\n az[i] = np.divide(az[i], anorm_i)\n\n return [ax, ay, az]", "def p(self):\n if self._p is None:\n self._p = np.eye(4)\n\n return self._p", "def P_post(self):\n return dot(self.U_post, dot(diag(self.D_post), self.U_post.T))", "def calculate_alphabeta(self, p, prec):\n var('x')\n sqrtdelta = None\n try:\n M = Qp(p, prec).extension(x ** 2 - self.constants.delta, names=\"padicroot\")\n sqrtdelta = M.gen(0)\n except NotImplementedError:\n try:\n M = Qp(p, prec)\n sqrtdelta = M(self.constants.delta).sqrt()\n except NotImplementedError:\n # Exceptional case.\n M = Qp(p, prec).extension(x ** 2 - self.constants.A * x - self.constants.B, names=\"padicroot\")\n alpha = M.gen(0)\n beta = self.constants.A - alpha\n return (alpha, beta)\n\n alpha = (self.constants.A + sqrtdelta) / 2\n beta = self.constants.A - alpha\n return (alpha, beta)", "def dd_axis(axis, ambient_dim, operand):\n d = Derivative()\n\n unit_vector = np.zeros(ambient_dim)\n unit_vector[axis] = 1\n\n unit_mvector = MultiVector(unit_vector)\n\n return d.resolve(\n (unit_mvector.scalar_product(d.dnabla(ambient_dim)))\n * d(operand))", "def c1(adp1, adp2):\n\n def get_axis(adp):\n \"\"\"\n Returns ADP as its three principle axis representation.\n :param adp: List/Array type of length 6.\n :returns: List of three arrays of length 3.\n \"\"\"\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n w, v = np.linalg.eig(adp)\n return [np.array((w[j] * v[:, j]).flatten().tolist()[0]) for j \\\n in xrange(3)]\n\n adp1_axis = get_axis(adp1)\n adp2_axis = get_axis(adp2)\n\n val = 0\n for i in xrange(3):\n addval = abs(norm(adp1_axis[i] - adp2_axis[i]))\n addval = addval * abs((1 - abs(np.dot(adp1_axis[i], adp2_axis[i]))))\n val += addval\n return val", "def adj_op(self, x):\n self.plan.f = x\n return np.copy(self.plan.adjoint()) / np.sqrt(self.plan.M)", "def alpha_pp(self, x):\n y = (2.*x)**3.\n return 0.10 - ( (self.alpha_p + 0.10) * y / (1. + y) )", "def _etap(self,x):\n return self._eta_sfr_scaling(x,'p_cool') + self._eta_sfr_scaling(x,'p_hot')", "def OAVolterra_direct(p0,wD,dt,Nt):\n # INITIALIZATION ------------------------------------------------------\n pz = np.zeros(Nt) # oa signal at detection point\n K0 = wD # oa propagator: K(0,0) \n K1 = wD*np.exp(-wD*dt) # oa propagator: K(1,0) \n K1_K0 = np.exp(-wD*dt) # quotient: K(i+1)/K(i)\n\n # SOLVE FORWARD PROBLEM VIA RECURRENCE RELATION -----------------------\n I = 0 \n pz[0] = p0[0] \n for i in range(1,Nt):\n I = I*K1_K0 + 0.5*dt*(K1*p0[i-1] + K0*p0[i])\n pz[i] = p0[i] - I\n return pz", "def get_px(self, x):\r\n \r\n T = len(x)\r\n \r\n alphas = np.zeros((T, self.M))\r\n \r\n # Get the first alpha\r\n alphas[0] = self.pi * self.B[:, x[0]]\r\n \r\n # Get the rest of the alphas\r\n for t in range(1, T):\r\n \r\n alpha = alphas[t-1].dot(self.A)\r\n \r\n alphas[t] = alpha * self.B[:, x[t]]\r\n \r\n if t == T-1: print(\"Alphas:\", alphas)\r\n \r\n return alphas[-1].sum()", "def angle(pred):\n pred_vec = pred.unsqueeze(0) - pred.unsqueeze(1) # (N, N, C)\n norm_pred_vec = F.normalize(pred_vec, p=2, dim=2)\n angle = torch.bmm(norm_pred_vec,\n norm_pred_vec.transpose(1, 2)).view(-1) # (N*N*N, )\n return angle", "def fitfunc_AP(x, *p):\n val = p[0]\n for n in range(0, len(p) - 1, 2):\n ind = n + 1\n mode = (n / 2) + 1\n val = val + p[ind] * np.cos(2 * np.pi * mode * (x - p[ind + 1]) / 360.0)\n return val", "def _build_attention_equation(qkv_rank, attn_axes):\n import string\n\n _CHR_IDX = string.ascii_lowercase\n target_notation = _CHR_IDX[:qkv_rank]\n # `batch_dims` includes the head dim.\n batch_dims = tuple(np.delete(range(qkv_rank), attn_axes + (qkv_rank - 1,)))\n letter_offset = qkv_rank\n source_notation = \"\"\n for i in range(qkv_rank):\n if i in batch_dims or i == qkv_rank - 1:\n source_notation += target_notation[i]\n else:\n source_notation += _CHR_IDX[letter_offset]\n letter_offset += 1\n\n product_notation = \"\".join(\n [target_notation[i] for i in batch_dims]\n + [target_notation[i] for i in attn_axes]\n + [source_notation[i] for i in attn_axes]\n )\n dot_product_equation = \"%s,%s->%s\" % (\n source_notation,\n target_notation,\n product_notation,\n )\n attn_scores_rank = len(product_notation)\n combine_equation = \"%s,%s->%s\" % (\n product_notation,\n source_notation,\n target_notation,\n )\n return dot_product_equation, combine_equation, attn_scores_rank", "def preprocess_adj(adj):\n adj_normalized = normalize_adj(adj + np.eye(adj.shape[0]) + adj.T)\n return adj_normalized", "def matSys(A_r, Tx, x):\n B = np.linalg.pinv(A_r).T\n Xi = np.diagonal(B.T @ Tx @ B)\n return Xi / np.tensordot(A_r, x, axes= [0,0])\n # Original\n # B = tf.transpose(tf.linalg.pinv(A_r))\n # xi = tf.linalg.diag_part(tf.linalg.matmul(tf.linalg.matmul(tf.transpose(B), Tx), B))\n # Xi = tf.divide(xi, tf.tensordot(tf.transpose(A_r), x, axes= [[1],[0]]))\n # return Xi\n\n # TODO: debug this method\n # With Khatri-Rao and without pinv\n # _, r = tf.shape(A_r)\n # A = tf.reshape(tf.expand_dims(A_r,1) * tf.expand_dims(A_r,0), [-1, r])\n # Xi = tf.linalg.lstsq(A, tf.reshape(Tx,[-1,1]))\n # return Xi/tf.tensordot(A_r, x, axes = [0,0])\n\n # # With Khatri-Rao\n # _, r = tf.shape(A_r)\n # A = tf.reshape(tf.expand_dims(A_r,1) * tf.expand_dims(A_r,0), [-1, r])\n # Xi = tf.matmul(tf.linalg.pinv(A), tf.reshape(Tx,[-1,1]))\n # return Xi/tf.tensordot(A_r, x, axes = [0,0])", "def pp(self):\n \n return np.cross(self.v, self.w) / np.dot(self.w, self.w)", "def apool1(x, p):\n if p > 1:\n x = tf.expand_dims(x, 3) # N x M x F x 1\n x = tf.nn.avg_pool(x, ksize=[1, p, 1, 1], strides=[1, p, 1, 1], padding='SAME')\n return tf.squeeze(x, [3]) # N x M/p x F\n else:\n return x", "def orientation_ras_lps(affine: NdarrayTensor) -> NdarrayTensor:\n sr = max(affine.shape[0] - 1, 1) # spatial rank is at least 1\n flip_d = [[-1, 1], [-1, -1, 1], [-1, -1, 1, 1]]\n flip_diag = flip_d[min(sr - 1, 2)] + [1] * (sr - 3)\n if isinstance(affine, torch.Tensor):\n return torch.diag(torch.as_tensor(flip_diag).to(affine)) @ affine # type: ignore\n return np.diag(flip_diag).astype(affine.dtype) @ affine # type: ignore", "def d_dp(self, points):\n d_dp = self.model.components.reshape(self.model.n_active_components,\n -1, self.n_dims)\n return d_dp.swapaxes(0, 1)", "def RDF(self, dP, rx, fast=True):\n parts = np.zeros((len(dP), len(rx)))\n for i, dPi in enumerate(dP):\n w = np.sign(dPi[1])*np.sqrt(np.sqrt(np.abs(dPi[1])))\n parts[i,:] = w*self.apnl(dPi, rx, fast=fast)\n return np.sum(parts, axis=0)", "def adj_ptycho_prb(self, data, psi, scan, igpu):\n res = cp.zeros([self.ptheta, self.nprb, self.nprb],\n dtype='complex64')\n data = data.copy() # avoid this todo\n res = cp.ascontiguousarray(res)\n data = cp.ascontiguousarray(data)\n psi = cp.ascontiguousarray(psi)\n scan = cp.ascontiguousarray(scan)\n self.adjprb(res.data.ptr, data.data.ptr,\n psi.data.ptr, scan.data.ptr, igpu)\n return res", "def generate_A(self, X, psd=True):\n n = len(X)\n A = None\n for i in range(n):\n R = None\n for j in range(n):\n if self.pred_comp[i][j] == 0:\n if R is None:\n R = np.zeros((X[i].shape[1], X[j].shape[1]))\n else:\n R = np.hstack((R, np.zeros((X[i].shape[1],\n X[j].shape[1]))))\n else:\n if R is None:\n R = np.dot(X[i].T, X[j])\n else:\n R = np.hstack((R, np.dot(X[i].T, X[j])))\n if A is None:\n A = R\n else:\n A = np.vstack((A, R))\n\n if psd:\n A = (A + A.T) / 2.0 # Make it symmetric\n [D, _] = np.linalg.eig(A)\n sigma = np.min(np.real(D))\n if sigma < 0.0: # It is negative definite\n # Make it positive semi-definite:\n A = A + np.eye(*A.shape) * sigma * -n\n\n return A", "def _paa(self):\n self.paa = np.array([self.series[i * self.points_per_symbol : (i + 1) * self.points_per_symbol].mean() for i in range(len(self.series) / self.points_per_symbol)])", "def propabilityLVQ(self):\n self.labels = self.labelingLVQ()\n for i in range(self.labels.shape[0]):\n for j in range(self.labels.shape[1]):\n for k in range(self.labels.shape[2]):\n total = sum(self.labels[i, j, k] for i in range(self.labels.shape[0]))\n if total == 0. :\n continue\n else:\n self.propa[i, j, k] = self.labels[i, j, k] / total\n self.propa[i, j, k] = round(self.propa[i, j, k], 2)\n return self.propa", "def to_tensor(self): \n raise NotImplementedError", "def ptp(self, axis=0, **kwargs) -> \"Dataset\":\n return self.aggregate(axis=axis, func=np.ptp, **kwargs)", "def T_from_approach_axis_center(approach, axis, center):\n T = np.eye(4)\n T[0:3, 0] = approach\n T[0:3, 1] = np.cross(approach, axis)\n T[0:3, 2] = axis\n T[0:3, 3] = center\n return T", "def grads(Y, P):\n n, ndim = Y.shape\n Q, num = Q_affinities(Y)\n equation1 = ((P - Q) * num)\n dY = np.zeros((n, ndim))\n for i in range(n):\n aux = np.tile(equation1[:, i].reshape(-1, 1), ndim)\n dY[i] = (aux * (Y[i] - Y)).sum(axis=0)\n return (dY, Q)", "def preprocess_adj(adj):\n adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0])) # A_~\n return sparse_to_tensor(adj_normalized)", "def open_toda(x):\n q, p = extract_q_p(x)\n # q2, q3, ... , qN, q1\n qshift = tf.manip.roll(q, shift=-1, axis=2)\n # q1-q2, q2-q3, ... , q{N-1}-qN -> omit qN-q1, so qdiff shape (N,1,n-1,1)\n qdiff = q[:,:,:-1,:] - qshift[:,:,:-1,:]\n V = tf.reduce_sum(tf.exp(qdiff), axis=2)\n K = 0.5 * tf.reduce_sum(tf.square(p), axis=2)\n return K + V", "def preprocess_adj(adj):\r\n adj_add_diag=adj + sp.eye(adj.shape[0])\r\n adj_normalized = normalize_adj(adj_add_diag)\r\n return adj_normalized.astype(np.float32) #sp.coo_matrix(adj_unnorm)\r", "def get_adp_from_calc(vx, vy, vz):\n ## lx=np.linalg.norm(vx)\n ## ly=np.linalg.norm(vy)\n ## lz=np.linalg.norm(vz)\n lx = vx\n ly = vy\n lz = vz\n L = np.matrix([[lx, 0, 0],\n [0, ly, 0],\n [0, 0, lz]])\n\n\n ## Vx=vx/lx\n ## Vy=vy/ly\n ## Vz=vz/lz\n Vx = np.array([1, 0, 0])\n Vy = np.array([0, 1, 0])\n Vz = np.array([0, 0, 1])\n V = np.matrix([[Vx[0], Vy[0], Vz[0]],\n [Vx[1], Vy[1], Vz[1]],\n [Vx[2], Vy[2], Vz[2]]])\n Vinv = np.linalg.inv(V)\n #print V,Vinv\n M = np.dot(np.dot(Vinv, L), V)\n #print M\n return M", "def axis_asymmetry_graph(self,label,masked,pa,Ax_center):\n from algorithms import bend180iraf\n x = Ax_center[1] ; y = Ax_center[0]\n image1, image2 = bend180iraf(masked.copy(),x,y,pa)\n AxImg = num.abs(image1 - image2)\n \n id = self._getGraphId()\n root = 'Ax_%s' % (label,id)\n pngname = root + '.png' ; epsname = root + '.eps'\n jpgname = root + '.jpg'\n doStamp(AxImg,pngname,format='PNG')\n Convert(pngname,jpgname)\n \n Painted = Paint(jpgname)\n Painted.load()\n text = 'AxsAs%s=%5.2f' % (label,self['M_AXS%s'%label])\n #Painted.Graffiti(text,commtextpos)\n Painted.save(jpgname) \n Painted.release()\n \n Convert(jpgname,epsname)\n os.system('rm %s' % (pngname,jpgname))\n self['figures']['Ax%s'%label] = epsname\n self['figcomms']['Ax%s'%label] = text", "def ppd(self):\n return math.sqrt(np.dot(self.v, self.v) / np.dot(self.w, self.w) )", "def _e_2d_(p, a):\r\n diff = a - p[np.newaxis, :]\r\n return np.einsum('ij,ij->i', diff, diff)", "def _create_transpose(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('perm', op.perm),\n ])\n return node", "def arctan(self):\t\t\n\t\tval = np.arctan(self.val)\n\t\tif len(self.der.shape):\n\t\t\tto_multiply = 1 / (1 + (self.val) ** 2)\n\t\t\tto_multiply = np.expand_dims(to_multiply, 1) if len(self.der.shape) > len(to_multiply.shape) else to_multiply\n\t\t\tder = to_multiply * self.der\n\t\telse:\n\t\t\tder = None\n\t\treturn Var(val, der)", "def _get_A_rdm(self, p):\n\n return rdm_graph(self.G, nodelist=self.nodelist, percent=p)", "def prediction2d(self, X, t):\n self.A = self.createA(t)\n X = self.A.dot(X)\n return X", "def _mps_decompose_AC(self, A):\n Dl, d, Dr = A.shape\n Q, C = qr(np.reshape(A, [Dl * d, Dr]))\n nC = nfactor(C)\n # nC = max(abs(C.min()), abs(C.max()))\n if C.shape == (1, 1): # if number then makes C = 1\n Q *= np.sign(C.flat[0])\n C = np.ones((1, 1))\n else:\n C = C / nC\n Dr = C.shape[0]\n Q = np.reshape(Q, [Dl, d, Dr])\n return Q, C, nC, Dr", "def P(self):\n self.eigenmatrix()", "def get_ade(forecasted_trajectory, gt_trajectory) -> float:\n pred_len = forecasted_trajectory.shape[1]\n bs = forecasted_trajectory.shape[0]\n \n\n ade=torch.sqrt(\n (forecasted_trajectory[:,:, 0] - gt_trajectory[:,:, 0]) ** 2\n + (forecasted_trajectory[:,:, 1] - gt_trajectory[:,:, 1]) ** 2\n )\n \n \n return ade.mean()", "def call(self, x):\n reval = tf.matmul(x, self.components_) + self.mean_\n if self.output_reshape:\n reval = tf.reshape(reval, (3, self.vertice_size, -1))\n reval = tf.transpose(reval, perm=[2, 1, 0])\n return reval", "def calc_x(x, ALD,PL): ## jit works\n\n row, col = cuda.grid(2)\n if row < ALD.shape[0] and col < ALD.shape[1]:\n if PL[row,col] != 0 :\n x[row,col] = (ALD[row,col] / PL[row,col]) - 1", "def _propagate_R(self):\n self.R_pad = np.pad(self.R, [(self.P, 0), (0, 0)], 'constant')\n M_R = np.lib.stride_tricks.as_strided(self.R_pad,\n shape=[self.L_h, self.L_h, self.P+1],\n strides=[self.R_pad.strides[0], self.R_pad.strides[1], self.R_pad.strides[0]])\n\n self.half_pie_var = np.dot(M_R, self.rev_A)\n self.half_pie_var_pad = np.pad(self.half_pie_var, [(0, 0), (self.P, 0)], 'constant')\n self.M_half_pie_var_pad = np.lib.stride_tricks.as_strided(self.half_pie_var_pad,\n shape=[self.L_h, self.P+1],\n strides=[self.half_pie_var_pad.strides[0]+self.half_pie_var_pad.strides[1], self.half_pie_var_pad.strides[1]])\n\n self.pie_var = np.dot(self.M_half_pie_var_pad, self.rev_A)", "def addem(inarr):\n return np.expand_dims(inarr, axis=1)", "def get_p_y(self, x):\n xdim = x.ndim\n if xdim == 1:\n x = np.expand_dims(x, axis=0)\n x = stdify(x, self.xScale[0], self.xScale[1])\n inx = Variable(torch.from_numpy(x).float(), volatile=True)\n outp = f.softmax(self.gate(inx)) # get b by p\n p = outp.data.numpy()\n outy = self.expert(inx) # get p by b by N\n if self.argmax:\n max_idx = np.argmax(p, axis=1)\n y_three = outy.data.numpy()\n y = np.array([y_three[max_idx[i], i] for i in range(x.shape[0])])\n else:\n Og_before = outy * outp.t().unsqueeze(2)\n Og = torch.sum(Og_before, dim=0)\n y = Og.data.numpy()\n y = destdify(y, self.yScale[0], self.yScale[1])\n if xdim == 1:\n p = np.squeeze(p, axis=0)\n y = np.squeeze(y, axis=0)\n return p, y", "def getAxisTuple(axis):", "def score_ap_from_ranks_1(ranks, nres):\n\n # accumulate trapezoids in PR-plot\n ap = 0.0\n\n # All have an x-size of:\n recall_step = 1.0 / nres\n\n for ntp, rank in enumerate(ranks):\n\n # y-size on left side of trapezoid:\n # ntp = nb of true positives so far\n # rank = nb of retrieved items so far\n if rank == 0:\n precision_0 = 1.0\n else:\n precision_0 = ntp / float(rank)\n\n # y-size on right side of trapezoid:\n # ntp and rank are increased by one\n precision_1 = (ntp + 1) / float(rank + 1)\n\n ap += (precision_1 + precision_0) * recall_step / 2.0\n\n return ap", "def _cal_uct(node):\n uct = C_PUCT * node.p * math.sqrt(node.parent.N / (1 + node.N))\n uct += node.Q\n\n return uct", "def _version_a(in_tensor, numerator_weights, denominator_weights, training):\n\n xps = _get_xps(in_tensor, numerator_weights, denominator_weights)\n\n numerator = 0\n for i in range(numerator_weights.shape[0]):\n w_n = numerator_weights[i]\n numerator = numerator + w_n * xps[i]\n\n denominator = 1.0\n for j in range(denominator_weights.shape[0]):\n w_d = denominator_weights[j]\n denominator = denominator + tf.abs(w_d * xps[j + 1])\n\n return numerator / denominator", "def return_policy_evaluation(self, p, u, r, T, gamma):\n for s in range(0, self.env.observation_space.n):\n if not np.isnan(p[s]):\n v = np.zeros((1, self.env.observation_space.n), dtype=float)\n v[0, s] = 1.0\n action = int(p[s])\n u[s] = r[s] + gamma * np.sum(np.multiply(u, np.dot(v, T[:, :, action])))\n return u", "def score_ap_from_ranks_1(ranks, nres):\n if nres == 0:\n raise ValueError(\"This query does not have a matching db img.\"\n \"Remove it from the dataset and re-run.\")\n\n ap=0.0 # accumulate trapezoids in PR-plot\n recall_step=1.0/nres # All have an x-size of:\n absc = np.linspace(0, 1, nres)\n for ntp, rank in enumerate(ranks):\n # y-size on left side of trapezoid:\n # ntp = nb of true positives so far\n if rank == 0: # rank = nb of retrieved items so far\n precision_0 = 1.0\n else:\n precision_0 = ntp/float(rank)\n\n # y-size on right side of trapezoid\n # ntp and rank are increased by one\n precision_1=(ntp+1)/float(rank+1)\n ap+=(precision_1+precision_0)*recall_step/2.0\n return ap", "def to_axang(self) -> Tuple[np.ndarray, float]:\n return self.to_axisangle()", "def get_abscissa(self, p):\n return np.dot(p - self.zero, self.direction)", "def theta_topic(self):\n f1 = self.ndk+self.alpha\n f2 = np.sum(self.ndk, axis=1, keepdims=True)+self.nTopics*self.alpha\n return f1/f2", "def GraphFn(self, inp):\n tensor = inp * 2.0\n tensor = array_ops.reshape(tensor, self.tensor_shapes[1])\n tensor = tensor + 3.0\n tensor = array_ops.reshape(tensor, self.tensor_shapes[2])\n tensor = tensor * 4.0\n tensor = array_ops.reshape(tensor, self.tensor_shapes[3])\n tensor += tensor + 5.0\n return array_ops.identity(tensor, name='output_0')", "def preprocess_adj(adj):\n adj = adj + sp.eye(adj.shape[0])\n adj = sp.coo_matrix(adj)\n row_sum = np.array(adj.sum(1))\n d_inv_sqrt = np.power(row_sum, -0.5).flatten()\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = sp.diags(d_inv_sqrt)\n return d_mat_inv_sqrt.dot(adj).dot(d_mat_inv_sqrt)", "def getQ(cls, alpha, axis):\n\n if isinstance(axis, basestring):\n\n # rotation about one of the main axes\n if axis == 'x':\n q = numpy.array(\n [[1, 0, 0],\n [0, numpy.cos(alpha), -numpy.sin(alpha)],\n [0, numpy.sin(alpha), numpy.cos(alpha)]])\n elif axis == 'y':\n q = numpy.array(\n [[numpy.cos(alpha), 0, numpy.sin(alpha)],\n [0, 1, 0],\n [-numpy.sin(alpha), 0, numpy.cos(alpha)]])\n elif axis == 'z':\n q = numpy.array(\n [[numpy.cos(alpha), -numpy.sin(alpha), 0],\n [numpy.sin(alpha), numpy.cos(alpha), 0],\n [0, 0, 1]])\n\n elif isinstance(axis, (list, tuple, numpy.ndarray)):\n\n # get phi and theta for the axis vector\n axis_vector = Vector(axis)\n phi = axis_vector.phi\n theta = axis_vector.theta\n\n q_tilt = numpy.dot(cls.getQ(phi, 'z'), cls.getQ(theta, 'y'))\n q_back = numpy.dot(cls.getQ(-theta, 'y'), cls.getQ(-phi, 'z'))\n q = numpy.dot(cls.getQ(alpha, 'z'), q_back)\n q = numpy.dot(q_tilt, q)\n \n else:\n raise ValueError(\n \"Axis can be one of the majot axes ('x', 'y', 'z') or a \"\n + \"vector.\")\n\n return q", "def _ang_part(self, dP):\n import pandas as pd\n dsP = pd.DataFrame(dP, columns=[\"i\", \"Pij\", \"nisi\", \"njsj\", \"l\"])\n dsP[\"Pij\"] = dsP[\"Pij\"].apply(self._renorm_p)\n return dsP.groupby(\"l\").sum()[\"Pij\"].to_dict()", "def affineTransform(x,output_dim):\n w=tf.get_variable(\"w\", [x.get_shape()[1], output_dim])\n b=tf.get_variable(\"b\", [output_dim], initializer=tf.constant_initializer(0.0))\n return tf.matmul(x,w)+b", "def to_pda(self) -> \"pda.PDA\":\n state = pda.State(\"q\")\n pda_object_creator = PDAObjectCreator(self._terminals, self._variables)\n input_symbols = {pda_object_creator.get_symbol_from(x)\n for x in self._terminals}\n stack_alphabet = {pda_object_creator.get_stack_symbol_from(x)\n for x in self._terminals.union(self._variables)}\n start_stack_symbol = pda_object_creator.get_stack_symbol_from(\n self._start_symbol)\n new_pda = pda.PDA(states={state},\n input_symbols=input_symbols,\n stack_alphabet=stack_alphabet,\n start_state=state,\n start_stack_symbol=start_stack_symbol)\n for production in self._productions:\n new_pda.add_transition(state, pda.Epsilon(),\n pda_object_creator.get_stack_symbol_from(\n production.head),\n state,\n [pda_object_creator.get_stack_symbol_from(x)\n for x in production.body])\n for terminal in self._terminals:\n new_pda.add_transition(state,\n pda_object_creator.get_symbol_from(\n terminal),\n pda_object_creator.get_stack_symbol_from(\n terminal),\n state, [])\n return new_pda", "def test_issue43():\n a = np.array([-1., 1., 1., np.pi - 5e-8])\n a[:3] = a[:3] / np.linalg.norm(a[:3])\n R = pr.matrix_from_axis_angle(a)\n a2 = pr.axis_angle_from_matrix(R)\n pr.assert_axis_angle_equal(a, a2)\n\n a = np.array([-1., 1., 1., 5e-8])\n a[:3] = a[:3] / np.linalg.norm(a[:3])\n R = pr.matrix_from_axis_angle(a)\n a2 = pr.axis_angle_from_matrix(R)\n pr.assert_axis_angle_equal(a, a2)\n\n a = np.array([-1., 1., 1., np.pi + 5e-8])\n a[:3] = a[:3] / np.linalg.norm(a[:3])\n R = pr.matrix_from_axis_angle(a)\n a2 = pr.axis_angle_from_matrix(R)\n pr.assert_axis_angle_equal(a, a2)\n\n a = np.array([-1., 1., 1., -5e-8])\n a[:3] = a[:3] / np.linalg.norm(a[:3])\n R = pr.matrix_from_axis_angle(a)\n a2 = pr.axis_angle_from_matrix(R)\n pr.assert_axis_angle_equal(a, a2)", "def _normalize(self, x, axis, eps=1e-5):\n return x / (\n tf.sqrt(tf.reduce_sum(tf.square(x), axis=axis, keep_dims=True)) + 1e-5)", "def _mps_AA(self, A1, A2):\n Dl, d1, _ = A1.shape\n _, d2, Dr = A2.shape\n return np.reshape(np.tensordot(A1, A2, axes=(2, 0)), [Dl, d1 * d2, Dr])", "def APV_equation(self, uct):\n q_v_ = uct.total_reward\n nsa = uct.visit_time\n sigma_nsb = uct.my_parent.visit_time - 1\n psa = uct.psa\n if nsa == 0:\n return float('inf')\n equation = q_v_ / nsa + 2 * psa * math.sqrt(sigma_nsb) / nsa\n return equation", "def forward(x, pi, A, B):\n # TODO: Write this function.\n #x = x[1]\n B_col = B[:, x[0]] # [N_z, 1]\n alpha = np.multiply(pi, B_col)\n ret = np.zeros((x.shape[0], pi.shape[0]))\n ret[0] = alpha\n for i in range(1, x.shape[0]):\n B_col = B[:, x[i]]\n sum_term = np.dot(A, alpha) #before: alpha, A\n alpha = np.multiply(B_col, sum_term) #before: sum_term before\n ret[i] = alpha\n return ret", "def strange_attractor(pos):\n\n P = 10\n R = 350\n B = 8/3\n #P = 10\n #R = 35\n #B = 5\n\n def x_dot(x, y, z):\n return P * (y - x)\n\n def y_dot(x, y, z):\n return R*x - y - x*z\n\n def z_dot(x, y, z):\n return x*y - B*z\n\n return np.array([x_dot(pos[0], pos[1], pos[2]),\n y_dot(pos[0], pos[1], pos[2]),\n z_dot(pos[0], pos[1], pos[2])])", "def PPV(self):\n return _div(self.TP, self.TP + self.FP)", "def _update_adp_calculation(self, Temp):\n from sys import stdout\n\n self.printer('\\n ...calculating ADPs...\\n')\n\n import time\n\n start = time.time()\n\n daba_counter = 0.\n max_counter = float(len(self.keys()))\n for molecule in self.keys():\n daba_counter += 1.\n\n pstate = daba_counter / max_counter\n pstate = int(58 * pstate)\n bar = '[' + pstate * '#' + (58 - pstate) * '-' + ']'\n print ' | {}\\r'.format(bar),\n stdout.flush()\n\n try:\n self[molecule].get_adp(Temp)\n\n except KeyError:\n self.errorlog.write('Error: No ADP calculated by atom.get_adp() for {}.'.format(molecule))\n end = time.time()\n self.printer('\\n\\n Time used for ADP calculation: {:5.3f} sec on {} CPUs'.format(end - start, 1))", "def convert_p_norm(g, op, blcok):\n\n x = g.get_node(op.input(\"X\")[0])\n axis = op.attr(\"axis\")\n p = op.attr(\"porder\")\n keepdim = op.attr(\"keepdim\")\n p_node = _expr.const(p, dtype=\"float32\")\n abs_node = _op.abs(x)\n pow_node = _op.power(abs_node, p_node)\n reduce_sum = _op.sum(pow_node, axis=[axis], keepdims=keepdim)\n p_node1 = _expr.const(1.0 / p, dtype=\"float32\")\n out = _op.power(reduce_sum, p_node1)\n g.add_node(op.output(\"Out\")[0], out)", "def project_onto_plane(self,z):\n U=self.U\n Q=self.Q_p\n #print(((z-Q[-2,:,[2]])/P[-2,:,[2]]).T)\n #print(P[-2])\n return ((z-Q[-2,:,[2]])/U[-2,:,[2]]).T*U[-2]+Q[-2]", "def T(self):\n return Op('transpose', self)" ]
[ "0.6216488", "0.60510916", "0.60209346", "0.58952725", "0.5770266", "0.5735157", "0.55482674", "0.5510566", "0.53965545", "0.5327899", "0.5300603", "0.5298402", "0.52585346", "0.5229013", "0.520525", "0.5193558", "0.51586944", "0.51218873", "0.50987124", "0.5092296", "0.5079355", "0.50303376", "0.50270736", "0.5021495", "0.5019086", "0.49858806", "0.49683368", "0.49643627", "0.49593967", "0.49542466", "0.49523112", "0.49221817", "0.4918897", "0.4914708", "0.48970798", "0.48800895", "0.4878804", "0.48646358", "0.48517773", "0.48488638", "0.48475826", "0.48369175", "0.48327294", "0.48295024", "0.4823482", "0.48216268", "0.48195222", "0.48184243", "0.47880042", "0.4781849", "0.47807947", "0.47792938", "0.4778679", "0.47723222", "0.47665903", "0.4753747", "0.47451368", "0.4743086", "0.4741933", "0.4741174", "0.47304326", "0.47241542", "0.47220004", "0.47151643", "0.47125065", "0.4699868", "0.46926957", "0.46899003", "0.46890587", "0.46807936", "0.46566057", "0.46559313", "0.46539876", "0.46454987", "0.46370378", "0.4632516", "0.46317488", "0.46286833", "0.46264166", "0.46227485", "0.46125913", "0.46078843", "0.46027076", "0.46010834", "0.45974603", "0.4596754", "0.4588767", "0.45883057", "0.45845312", "0.45835775", "0.4583068", "0.45792493", "0.45783707", "0.45767477", "0.4573718", "0.45734465", "0.45709392", "0.45536056", "0.45478576", "0.45459726" ]
0.56617206
6
Calculates an ADP in its matrix representation from the three principle axis representing the displacement ellipsoid. The three principle axis of the ellipsoid are needed as arguments. A Matrix representation of the ADP is returned.
def get_adp_from_calc(vx, vy, vz): ## lx=np.linalg.norm(vx) ## ly=np.linalg.norm(vy) ## lz=np.linalg.norm(vz) lx = vx ly = vy lz = vz L = np.matrix([[lx, 0, 0], [0, ly, 0], [0, 0, lz]]) ## Vx=vx/lx ## Vy=vy/ly ## Vz=vz/lz Vx = np.array([1, 0, 0]) Vy = np.array([0, 1, 0]) Vz = np.array([0, 0, 1]) V = np.matrix([[Vx[0], Vy[0], Vz[0]], [Vx[1], Vy[1], Vz[1]], [Vx[2], Vy[2], Vz[2]]]) Vinv = np.linalg.inv(V) #print V,Vinv M = np.dot(np.dot(Vinv, L), V) #print M return M
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def A_coefficients_ellipsoid(v, DD, bDDisDelta=False):\n #v can be given as an array with X/Y/Z cartesian dimensions being the last.\n #\"\"\"\n if bDDisDelta:\n delta=DD\n else:\n delta=Ddelta_ellipsoid(dd)\n #v=_sanitise_v(v)\n #v2=np.square(v)\n #v4=np.square(v2)\n #fact2=np.multiply(0.75,np.sum(v4))-0.25\n v2 = [ v[i]*v[i] for i in range(3) ]\n v4 = [ v2[i]*v2[i] for i in range(3) ]\n fact2 = 0.25*( 3.0*(v4[0]+v4[1]+v4[2])-1.0)\n fact3 = 1.0/12.0*(delta[0]*(3*v4[0]+6*v2[1]*v2[2]-1) + delta[1]*(3*v4[1]+6*v2[0]*v2[2]-1) + delta[2]*(3*v4[2]+6*v2[0]*v2[1]-1))\n A=np.zeros(5)\n A[0]= 3*v2[1]*v2[2]\n A[1]= 3*v2[0]*v2[2]\n A[2]= 3*v2[0]*v2[1]\n A[3]= fact2-fact3\n A[4]= fact2+fact3\n return A", "def c1(adp1, adp2):\n\n def get_axis(adp):\n \"\"\"\n Returns ADP as its three principle axis representation.\n :param adp: List/Array type of length 6.\n :returns: List of three arrays of length 3.\n \"\"\"\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n w, v = np.linalg.eig(adp)\n return [np.array((w[j] * v[:, j]).flatten().tolist()[0]) for j \\\n in xrange(3)]\n\n adp1_axis = get_axis(adp1)\n adp2_axis = get_axis(adp2)\n\n val = 0\n for i in xrange(3):\n addval = abs(norm(adp1_axis[i] - adp2_axis[i]))\n addval = addval * abs((1 - abs(np.dot(adp1_axis[i], adp2_axis[i]))))\n val += addval\n return val", "def __getAVEA(a, b, c, d):\n\tassert(a>-1)\n\tassert(b>-1)\n\tassert(c>-1)\n\tassert(d>-1)\n\tm1i = a+b\n\tm0i = c+d\n\tn1i = a+c\n\tn0i = b+d\n\tni = a+b+c+d\n\tv = 0.0\n\tea = 0.0\n\tif ni<=1:\n\t\t# Avoid divide-by-zero\n\t\tea = n1i*m1i\n\telse:\n\t\tv = n1i*n0i*m1i*m0i/float((ni-1)*ni*ni)\n\t\tea = n1i*m1i/float(ni)\n\treturn (a,v,ea)", "def calc_main_axis(self):\n #Clarify why the above step has been done\n c0, c1, c2 = self.calc_geom_center ()\n M = numpy.zeros ((3, 3), dtype=float)\n M = [[0] * 3, [0] * 3, [0] * 3]\n for x in self.atomcoords:\n xi = x[0] - c0\n yi = x[1] - c1\n zi = x[2] - c2\n M[0][0] = M[0][0] + xi * xi\n M[0][1] = M[0][1] + xi * yi\n M[0][2] = M[0][2] + xi * zi\n M[1][1] = M[1][1] + yi * yi\n M[1][2] = M[1][2] + yi * zi\n M[2][2] = M[2][2] + zi * zi\n M[1][0] = M[0][1]\n M[2][0] = M[0][2]\n M[2][1] = M[1][2]\n M = numpy.array (M)\n d = sum (numpy.diag (M))\n M = -M\n M[0, 0] = M[0, 0] + d\n M[1, 1] = M[1, 1] + d\n M[2, 2] = M[2, 2] + d\n\n eigenVals, eigenVecs = numpy.linalg.eig (M)\n eigenVecs = eigenVecs.transpose ()\n return eigenVecs", "def reflect_adp(adp, planev):\n M = np.identity(4)\n M[:3, :3] -= 2.0 * np.outer(planev, planev)\n M[:3, 3] = (2.0 * np.dot(np.array([0, 0, 0]), planev)) * planev\n\n return rotate_adp(adp, M[:3, :3])", "def get_A3():\n\n return array([[0.68557183+0.46550108j, 0.12934765-0.1622676j,\n 0.24409518+0.25335939j],\n [0.1531015 + 0.66678983j, 0.45112492+0.18206976j,\n -0.02633966+0.43477693j],\n [-0.10817164-1.16879196j, -0.18446849+0.03755672j,\n 0.06430325-0.44757084j]])", "def eigenv2tensor(axis):\n vec = np.ones((3, 3))\n vecval = np.ones((3, 3))\n for i in xrange(len(axis)):\n vmag = np.linalg.norm(axis[i])\n v = axis[i] / vmag\n #print v\n vec[:, i] = v\n vecval[:, i] = axis[i]\n adp = np.linalg.solve(vec, vecval)\n return adp", "def get_axis(adp):\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n w, v = np.linalg.eig(adp)\n return [np.array((w[j] * v[:, j]).flatten().tolist()[0]) for j \\\n in xrange(3)]", "def estimate_affine_matrix_3d_to_2d(X, x):\n assert x.shape[0] == X.shape[0]\n assert x.shape[0] >= 4\n X = X.T # (3, n)\n x = x.T # (2, n)\n n = x.shape[1]\n\n ###---- 1. normalization\n ## 2d points\n mean = np.mean(x, 1) # (2, )\n x = x - np.tile(mean[:, np.newaxis], [1, n]) # (2, n)\n average_norm = np.mean(np.sqrt(np.sum(x ** 2, 0)))\n scale = np.sqrt(2) / average_norm\n x = scale * x\n\n # T = [[scale, 0, -mean * scale], \n # [ 0, scale, -mean * scale], \n # [ 0, 0, 1 ]]\n T = np.zeros((3, 3), dtype=np.float32)\n T[0, 0] = T[1, 1] = scale\n T[:2, 2] = -mean * scale\n T[2, 2] = 1\n\n ## 3d points\n X_homo = np.vstack((X, np.ones((1, n)))) # (4, n)\n mean = np.mean(X, 1) # (3, )\n X = X - np.tile(mean[:, np.newaxis], [1, n]) # (3, n)\n m = X_homo[: 3, :] - X\n average_norm = np.mean(np.sqrt(np.sum(X ** 2, 0)))\n scale = np.sqrt(3) / average_norm\n X = scale * X\n\n U = np.zeros((4, 4), dtype=np.float32)\n U[0, 0] = U[1, 1] = U[2, 2] = scale\n U[: 3, 3] = -mean * scale\n U[3, 3] = 1\n\n ###---- 2. equations\n A = np.zeros((n * 2, 8), dtype=np.float32)\n X_homo = np.vstack((X, np.ones((1, n)))).T\n A[: n, : 4] = X_homo\n A[n: , 4: ] = X_homo\n b = np.reshape(x, [-1, 1]) # (2n, 1)\n\n ###---- 3.solution\n p_8 = np.linalg.pinv(A).dot(b) # (8, 2n) x (2n, 1) -> (8, 1)\n p = np.zeros((3, 4), dtype=np.float32)\n p[0, :] = p_8[:4, 0]\n p[1, :] = p_8[4:, 0]\n p[-1, -1] = 1\n\n ###---- 4. denormalization\n P_Affine = np.linalg.inv(T).dot(p.dot(U))\n return P_Affine", "def ADF(self, dP, ax):\n from scipy.special import sph_harm\n ang = self._ang_part(dP)\n #scipy defines their harmonics to have `theta` be azimuthal, which is\n #opposite from physics.\n #we set $m = 0$ so that the azimuthal part doesn't contribute at all.\n result = np.zeros(len(ax))\n for l, p in ang.items():\n Ylm = sph_harm(0, l, 0, ax)*np.sqrt(2*l+1)\n #We are interested in the c* c of this value, which is multiplied\n #together to get pissnnl.\n result += p*np.sqrt(np.absolute(Ylm*Ylm.conjugate()))\n return result", "def matrix_exp_pade3(matrix, multiplication_rule=None):\n b = [120.0, 60.0, 12.0]\n b = [tf.constant(x, matrix.dtype) for x in b]\n ident = tf.linalg.eye(\n tf.shape(matrix)[-2],\n batch_shape=tf.shape(matrix)[:-2],\n dtype=matrix.dtype)\n matrix_2 = tf.linalg.matmul(matrix, matrix)\n tmp = matrix_2 + b[1] * ident\n matrix_u = tf.linalg.matmul(matrix, tmp)\n matrix_v = b[2] * matrix_2 + b[0] * ident\n return matrix_u, matrix_v", "def get_aa_tpdm(self) -> Tuple['Nparray', 'Nparray']:\n dveca, _ = self.calculate_dvec_spin()\n alpha_opdm = numpy.tensordot(dveca, self.coeff.conj(), axes=2)\n nik_njl_aa = numpy.transpose(numpy.tensordot(dveca.conj(),\n dveca,\n axes=((2, 3), (2, 3))),\n axes=(1, 2, 0, 3))\n for ii in range(nik_njl_aa.shape[1]):\n nik_njl_aa[:, ii, ii, :] -= alpha_opdm\n return alpha_opdm, -nik_njl_aa", "def to_amdl(self):\n from .adipls import ADIPLSStellarModel\n\n ioff = (0 if self.r[0] < 1e6 else 1) # mimic ADIPLS's FGONG to AMDL script\n A = np.zeros((len(self.data) + ioff, 6))\n\n # we can safely ignore division by 0 here\n with np.errstate(divide='ignore', invalid='ignore'):\n A[ioff:,0] = self.x\n A[ioff:,1] = self.q/self.x**3\n A[ioff:,2] = self.Vg\n A[ioff:,3] = self.Gamma_1\n A[ioff:,4] = self.AA\n A[ioff:,5] = self.U\n\n A[0,0] = 0.\n A[0,1] = 4.*np.pi/3.*self.rho[0]*self.R**3/self.M\n A[0,2] = 0.\n A[0,3] = self.Gamma_1[0]\n A[0,4] = 0.\n A[0,5] = 3.\n\n D = np.zeros(8)\n D[0] = self.M\n D[1] = self.R\n D[2] = self.P[0]\n D[3] = self.rho[0]\n D[4] = 4.*np.pi/3.*self.G*(self.rho[0]*self.R)**2/(self.P[0]*self.Gamma_1[0])\n D[5] = D[4]\n D[6] = -1.0\n D[7] = 0.0\n\n return ADIPLSStellarModel(D, A, G=self.G)", "def equation(self):\n mat = np.empty((self.nunknowns, self.model.neq))\n rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero\n for icp in range(self.ncp):\n istart = icp * self.nlayers\n ieq = 0\n for e in self.model.elementlist:\n if e.nunknowns > 0:\n mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] = \\\n e.potinflayers(self.xcin[icp], self.ycin[icp], self.layers, aq=self.aqin) / self.aqin.Tcol - \\\n e.potinflayers(self.xcout[icp], self.ycout[icp], self.layers, aq=self.aqout) / self.aqout.Tcol\n ieq += e.nunknowns\n else:\n rhs[istart:istart + self.nlayers] -= \\\n e.potentiallayers(self.xcin[icp], self.ycin[icp], self.layers, aq=self.aqin) / self.aqin.T - \\\n e.potentiallayers(self.xcout[icp], self.ycout[icp], self.layers, aq=self.aqout) / self.aqout.T\n return mat, rhs", "def __convert(args):\n a, b, zone, ellipsoid, datum, inverse = args\n projection = Proj(\"+proj=utm +zone={}, +ellps={} +datum={} +units=m +no_defs\".format(zone, ellipsoid, datum))\n c, d = projection(a, b, inverse=inverse)\n\n return c, d", "def get_proj(self):\n relev, razim = np.pi * self.elev/180, np.pi * self.azim/180\n\n xmin, xmax = self.get_xlim3d()/self.pbaspect[0]\n ymin, ymax = self.get_ylim3d()/self.pbaspect[1]\n zmin, zmax = self.get_zlim3d()/self.pbaspect[2]\n\n # transform to uniform world coordinates 0-1.0,0-1.0,0-1.0 \n worldM = proj3d.world_transformation(xmin, xmax,\n ymin, ymax,\n zmin, zmax)\n\n # look into the middle of the new coordinates \n R = np.array([0.5, 0.5, 0.5])\n\n xp = R[0] + np.cos(razim) * np.cos(relev) * self.dist\n yp = R[1] + np.sin(razim) * np.cos(relev) * self.dist\n zp = R[2] + np.sin(relev) * self.dist\n E = np.array((xp, yp, zp))\n\n self.eye = E\n self.vvec = R - E\n self.vvec = self.vvec / proj3d.mod(self.vvec)\n\n if abs(relev) > np.pi/2:\n # upside down \n V = np.array((0, 0, -1))\n else:\n V = np.array((0, 0, 1))\n\n zfront, zback = -self.dist, self.dist\n\n viewM = proj3d.view_transformation(E, R, V)\n perspM = proj3d.persp_transformation(zfront, zback)\n M0 = np.dot(viewM, worldM)\n M = np.dot(perspM, M0)\n return M", "def e(self):\n return np.matrix([self.y - self.arg[0,0]*self.x**3 - self.arg[1,0]*self.x**2 - self.arg[2,0]*self.x**1 - self.arg[3,0]])", "def _propagate_A(self):\n A_roots = np.roots(self.A)\n A_roots_norm = [r if np.abs(r) < 1 else 1/np.conj(r) for r in A_roots]\n A_poly = np.poly(A_roots_norm)\n self.alpha_g = -A_poly[1:]\n self.A = np.concatenate([[1], -self.alpha_g])\n\n self.rev_A = self.A[::-1]\n\n self.pie = np.dot(self.M_mu, self.rev_A)\n self.pi = self.pie*self.e\n self.p = self.pi*self.d\n\n\n M_R = np.lib.stride_tricks.as_strided(self.R_pad,\n shape=[self.L_h, self.L_h, self.P+1],\n strides=[self.R_pad.strides[0], self.R_pad.strides[1], self.R_pad.strides[0]])\n self.half_pie_var = np.dot(M_R, self.rev_A)\n self.half_pie_var_pad = np.pad(self.half_pie_var, [(0, 0), (self.P, 0)], 'constant')\n self.M_half_pie_var_pad = np.lib.stride_tricks.as_strided(self.half_pie_var_pad,\n shape=[self.L_h, self.P+1],\n strides=[self.half_pie_var_pad.strides[0]+self.half_pie_var_pad.strides[1], self.half_pie_var_pad.strides[1]])\n\n self.pie_var = np.dot(self.M_half_pie_var_pad, self.rev_A)", "def get_matrix(adp):\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n return adp", "def project(self, a):\n for g in xrange(0, len(a), 3):\n\n ax = a[g + 0]\n ay = a[g + 1]\n az = a[g + 2]\n anorm = ax ** 2.0 + ay ** 2.0 + az ** 2.0\n i = anorm > 1.0\n\n anorm_i = anorm[i] ** 0.5 # Square root is taken here. Faster.\n ax[i] = np.divide(ax[i], anorm_i)\n ay[i] = np.divide(ay[i], anorm_i)\n az[i] = np.divide(az[i], anorm_i)\n\n a[g + 0] = ax\n a[g + 1] = ay\n a[g + 2] = az\n\n return a", "def equation(self):\n mat = np.empty((self.nunknowns, self.model.neq))\n # rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero\n rhs = self.pc.copy()\n for icp in range(self.ncp):\n istart = icp * self.nlayers\n # rhs[istart:istart+self.nlayers] = self.pc[]\n ieq = 0\n for e in self.model.elementlist:\n if e.nunknowns > 0:\n mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] = \\\n e.potinflayers(self.xc[icp], self.yc[icp], self.layers)\n if e == self:\n mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] -= self.resfac[icp]\n ieq += e.nunknowns\n else:\n rhs[istart:istart + self.nlayers] -= \\\n e.potentiallayers(self.xc[icp], self.yc[icp],\n self.layers) # Pretty cool that this works, really\n return mat, rhs", "def principal_axis(alpha_carbons):\n # alpha carbons coordinates as a numpy array\n coord = numpy.array(alpha_carbons, float)\n\n # get geometrical center\n center = numpy.mean(coord, 0)\n coord = coord - center\n\n # create inertia matrix and extract eigenvectors and values\n inertia = numpy.dot(coord.transpose(), coord)\n e_values, e_vectors = numpy.linalg.eig(inertia)\n\n # sort eigenvalues\n order = numpy.argsort(e_values)\n\n # axis1 is the principal axis with the greatest eigenvalue\n _, _, axis1 = e_vectors[:, order].transpose()\n\n axis_direction = axis1 / numpy.linalg.norm(axis1)\n\n return center, axis_direction", "def equation(self):\n mat = np.empty((self.nunknowns, self.model.neq))\n # rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero\n rhs = self.hc.copy()\n for icp in range(self.ncp):\n istart = icp * self.nlayers\n # rhs[istart:istart+self.nlayers] = self.pc[]\n ieq = 0\n for e in self.model.elementlist:\n if e.nunknowns > 0:\n mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] = \\\n e.potinflayers(self.xc[icp], self.yc[icp], self.layers) / self.aq.Tcol[self.layers]\n if e == self:\n mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] -= self.resfac[icp]\n ieq += e.nunknowns\n else:\n rhs[istart:istart + self.nlayers] -= \\\n e.potentiallayers(self.xc[icp], self.yc[icp], self.layers) / self.aq.T[\n self.layers] # Pretty cool that this works, really\n return mat, rhs", "def Compute3d(self, *args):\n return _BRepAlgo.BRepAlgo_NormalProjection_Compute3d(self, *args)", "def ap(self, P):\n if P.divides(self.conductor()):\n if (P*P).divides(self.conductor()):\n # It is 0, because the reduction is additive.\n return ZZ(0)\n else:\n # TODO: It is +1 or -1, but I do not yet know how to\n # compute which without using the L-function.\n return '?'\n else:\n return self._S.hecke_matrix(P)[0,0]", "def adjugate_matrix(self, determinant, transposed_cofactor):\n if transposed_cofactor.__class__.__name__ != \"Matrix3\":\n raise TypeError(self._ERRORS[0])\n\n r1 = transposed_cofactor.row_1\n r2 = transposed_cofactor.row_2\n r3 = transposed_cofactor.row_3\n\n r1[0] /= determinant\n r1[1] /= determinant\n r1[2] /= determinant\n\n r2[0] /= determinant\n r2[1] /= determinant\n r2[2] /= determinant\n\n r3[0] /= determinant\n r3[1] /= determinant\n r3[2] /= determinant\n\n return Matrix3(r1, r2, r3)", "def anl_solution(self):\r\n\r\n m = float(self.mass) / self.nu_m\r\n qe = 1 / self.nu_m * (self.nu_t * self.nu_t / self.nu_x) * 1.0 \\\r\n / float(self.size_tick * self.size_tick)\r\n print 'qE=', qe\r\n c = self.light_vel\r\n for i in range(0, len(self.obs.obt_g)):\r\n ddt = float(self.obs.obt[i] - self.obs.obt[i - 1])\r\n x = m * c ** 2 / qe * (math.sqrt(1.0 + (qe * self.t[i] / (m\r\n * c)) ** 2) - 1.0)\r\n self.xa_track.append(x)\r\n p = qe * self.t[i]\r\n self.pa.append(p)\r\n v = p / math.sqrt(m ** 2 + (p / c) ** 2)\r\n jv = self.t[i] * qe / (m * c)\r\n v = math.sqrt(jv * jv / (1 + jv * jv)) * c\r\n self.va.append(v)\r\n print 'Analytical solution of the differential equation of motion'", "def equation(self):\n mat = np.empty((self.nunknowns, self.model.neq))\n rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero\n for icp in range(self.ncp):\n istart = icp * self.nlayers\n rhs[istart:istart + self.nlayers] = self.pc\n ieq = 0\n for e in self.model.elementlist:\n if e.nunknowns > 0:\n mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] = \\\n e.potinflayers(self.xc[icp], self.yc[icp], self.layers)\n ieq += e.nunknowns\n else:\n rhs[istart:istart + self.nlayers] -= \\\n e.potentiallayers(self.xc[icp], self.yc[icp],\n self.layers) # Pretty cool that this works, really\n return mat, rhs", "def equation(self):\n mat = np.zeros((self.nunknowns, self.model.neq))\n rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero\n rhs[0:self.nlayers - 1] = 0.0\n rhs[self.nlayers - 1] = self.Qc\n ieq = 0\n for e in self.model.elementlist:\n if e.nunknowns > 0:\n head = e.potinflayers(self.xc, self.yc, self.layers) / self.aq.Tcol[self.layers, :]\n mat[0:self.nlayers - 1, ieq:ieq + e.nunknowns] = head[:-1] - head[1:]\n if e == self:\n for i in range(self.nlayers - 1):\n mat[i, ieq + i] -= self.resfac[i]\n mat[i, ieq + i + 1] += self.resfac[i + 1]\n mat[self.nlayers - 1, ieq:ieq + self.nlayers] = 1.0\n ieq += e.nunknowns\n else:\n head = e.potentiallayers(self.xc, self.yc, self.layers) / self.aq.T[self.layers]\n rhs[0:self.nlayers - 1] -= head[:-1] - head[1:]\n return mat, rhs", "def equation(self):\n mat = np.empty((self.nunknowns, self.model.neq))\n rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero\n for icp in range(self.ncp):\n istart = icp * self.nlayers\n ieq = 0\n for e in self.model.elementlist:\n if e.nunknowns > 0:\n qx, qy = e.disvecinflayers(self.xc[icp], self.yc[icp], self.layers)\n mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] = \\\n qx * self.cosnorm[icp] + qy * self.sinnorm[icp] - self.resfac[:, np.newaxis] * \\\n (e.potinflayers(self.xcin[icp], self.ycin[icp], self.layers, aq=self.aq) / self.aq.Tcol[\n self.layers] - \\\n e.potinflayers(self.xcout[icp], self.ycout[icp], self.layers, aq=self.aq) / self.aq.Tcol[\n self.layers])\n ieq += e.nunknowns\n else:\n qx, qy = e.disveclayers(self.xc[icp], self.yc[icp], self.layers)\n rhs[istart:istart + self.nlayers] -= qx * self.cosnorm[icp] + qy * self.sinnorm[icp] + self.resfac * \\\n (e.potentiallayers(self.xcin[icp], self.ycin[icp], self.layers,\n aq=self.aq) / self.aq.T[self.layers] -\n e.potentiallayers(self.xcout[icp], self.ycout[icp],\n self.layers, aq=self.aq) / self.aq.T[\n self.layers])\n return mat, rhs", "def A_calc(self, x, y, theta, v, omega, dt):\n # Initialize 5x5 A matrix\n A = np.zeros((5,5))\n A[0,0] = 1\n A[1,1] = 1\n A[2,2] = 1\n A[3,3] = 1\n A[4,4] = 1\n \n A[0,2] = -1 * v * np.sin(theta) * dt\n A[0,3] = np.cos(theta) * dt\n A[1,2] = v * np.cos(theta) * dt\n A[1,3] = np.sin(theta) * dt\n A[2,4] = dt\n \n return(A)", "def immoment3D(X, Y, Z, p, q, r):\n assert len(X) == len(Y)\n assert len(Y) == len(Z)\n return (X ** p * Y ** q * Z ** r).sum()", "def project(self, alpha):\n ax = alpha[0]\n ay = alpha[1]\n az = alpha[2]\n anorm = ax ** 2.0 + ay ** 2.0 + az ** 2.0\n i = anorm > 1.0\n\n anorm_i = anorm[i] ** 0.5 # Square root is taken here. Faster.\n ax[i] = np.divide(ax[i], anorm_i)\n ay[i] = np.divide(ay[i], anorm_i)\n az[i] = np.divide(az[i], anorm_i)\n\n return [ax, ay, az]", "def ap2ep(uamp,uphs,vamp,vphs):\r\n # Make complex amplitudes for u and v\r\n u = uamp*np.exp(-1j*uphs)\r\n v = vamp*np.exp(-1j*vphs)\r\n\r\n #Calculate complex radius of anticlockwise and clockwise circles:\r\n wp = (u+1j*v)/2.0 # for anticlockwise circles\r\n wm = np.conj(u-1j*v)/2.0 # for clockwise circles\r\n # and their amplitudes and angles\r\n Wp = np.abs(wp)\r\n Wm = np.abs(wm)\r\n THETAp = np.angle(wp)\r\n THETAm = np.angle(wm)\r\n \r\n # calculate ep-parameters (ellipse parameters)\r\n SEMA = Wp+Wm # Semi Major Axis, or maximum speed\r\n SEMI = Wp-Wm # Semin Minor Axis, or minimum speed\r\n ECC = SEMI/SEMA # Eccentricity\r\n\r\n PHA = (THETAm-THETAp)/2.0 # Phase angle, the time (in angle) when \r\n # the velocity reaches the maximum\r\n INC = (THETAm+THETAp)/2.0 # Inclination, the angle between the \r\n # semi major axis and x-axis (or u-axis).\r\n \r\n return SEMA, SEMI, INC, PHA, ECC", "def __get_A(N, c, dx, order, type):\n\n coeff = None\n stencil = None\n zero_pos = None\n\n if type == 'center':\n\n if order == 2:\n stencil = [-1.0, 0.0, 1.0]\n zero_pos = 2\n coeff = 1.0 / 2.0\n else:\n raise ProblemError(\"Order \" + str(order) + \" not implemented.\")\n\n else:\n\n if order == 1:\n stencil = [-1.0, 1.0]\n coeff = 1.0\n zero_pos = 2\n else:\n raise ProblemError(\"Order \" + str(order) + \" not implemented.\")\n\n offsets = [pos - zero_pos + 1 for pos in range(len(stencil))]\n\n A = sp.diags(stencil, offsets, shape=(N, N), format='csc')\n A *= c * coeff * (1.0 / dx)\n\n return A", "def mxz(mXZ,P_dot_Dj,P_times_Dj):\n return np.divide(np.multiply(P_times_Dj, mXZ), P_dot_Dj)", "def __det3x3__(a):\r\n # val = +a[0,0] * ( a[1,1] * a[2,2] - a[2,1] * a[1,2] )\r\n # val += -a[0,1] * ( a[1,0] * a[2,2] - a[2,0] * a[1,2] )\r\n # val += +a[0,2] * ( a[1,0] * a[2,1] - a[2,0] * a[1,1] )\r\n val = +a[0] * (a[4] * a[8] - a[7] * a[5])\r\n val += -a[1] * (a[3] * a[8] - a[6] * a[5])\r\n val += +a[2] * (a[3] * a[7] - a[6] * a[4])\r\n return val", "def problem3():\n t = np.array([-27.1, -2.9, -3.2])\n principal_point = np.array([8, -10])\n focal_length = 8\n\n # model transformations\n T = gettranslation(t)\n Ry = getyrotation(135)\n Rx = getxrotation(-30)\n Rz = getzrotation(90)\n print(T)\n print(Ry)\n print(Rx)\n print(Rz)\n\n K = getcentralprojection(principal_point, focal_length)\n\n P,M = getfullprojection(T, Rx, Ry, Rz, K)\n print(P)\n print(M)\n\n points = loadpoints()\n displaypoints2d(points)\n\n z = loadz()\n Xt = invertprojection(K, points, z)\n\n Xh = inverttransformation(M, Xt)\n\n worldpoints = hom2cart(Xh)\n displaypoints3d(worldpoints)\n\n points2 = projectpoints(P, worldpoints)\n displaypoints2d(points2)\n\n plt.show()", "def equation(self):\n mat = np.empty((self.nunknowns, self.model.neq))\n rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero\n for icp in range(self.ncp):\n istart = icp * self.nlayers\n ieq = 0\n for e in self.model.elementlist:\n if e.nunknowns > 0:\n headin = self.intpot(e.potinflayers, self.xcin[icp], self.ycin[icp],\n self.xcin[icp + 1], self.ycin[icp + 1], self.layers,\n aq=self.aqin) / self.aqin.Tcol[self.layers]\n headout = self.intpot(e.potinflayers, self.xcout[icp], self.ycout[icp],\n self.xcout[icp + 1], self.ycout[icp + 1], self.layers,\n aq=self.aqout) / self.aqout.Tcol[self.layers]\n mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] = headin - headout\n ieq += e.nunknowns\n else:\n headin = self.intpot(e.potentiallayers, self.xcin[icp], self.ycin[icp],\n self.xcin[icp + 1], self.ycin[icp + 1], self.layers,\n aq=self.aqin) / self.aqin.T[self.layers]\n headout = self.intpot(e.potentiallayers, self.xcout[icp], self.ycout[icp],\n self.xcout[icp + 1], self.ycout[icp + 1], self.layers,\n aq=self.aqout) / self.aqout.T[self.layers]\n rhs[istart:istart + self.nlayers] -= headin - headout\n return mat, rhs", "def dRdE_anapole(E, m_x, c_A, target, vlag=232.0, sigmav=156.0, vesc=544.0):\n \n #See https://arxiv.org/pdf/1401.4508.pdf\n \n alpha = 0.007297\n e = np.sqrt(4*np.pi*alpha)\n \n cn = np.zeros(11)\n cp = np.zeros(11)\n \n #Operator 8\n cp[7] = -2.0*e*c_A\n \n #Operator 9\n cp[8] = -2.0*e*c_A\n \n return dRdE_NREFT(E, m_x, cp, cn, target, vlag, sigmav, vesc)", "def inverse_3by3_double(M):\n if len(M.shape) > 1:\n M = M.flatten()\n\n M = np.array(M, 'float')\n\n determinant = 0.\n adj_M = np.zeros((9,), 'float')\n\n # First row of adjugate matrix\n adj_M[0] = (M[4] * M[8] - M[7] * M[5]) # Det #0\n adj_M[1] = -(M[1] * M[8] - M[7] * M[2])\n adj_M[2] = (M[1] * M[5] - M[4] * M[2])\n\n # Second row of adjugate matrix\n adj_M[3] = -(M[3] * M[8] - M[6] * M[5]) # Det #1\n adj_M[4] = (M[0] * M[8] - M[6] * M[2])\n adj_M[5] = -(M[0] * M[5] - M[3] * M[2])\n\n # Third row of adjugate matrix\n adj_M[6] = (M[3] * M[7] - M[6] * M[4]) # Det #2\n adj_M[7] = -(M[0] * M[7] - M[6] * M[1])\n adj_M[8] = (M[0] * M[4] - M[3] * M[1])\n\n determinant += M[0] * adj_M[0]\n determinant += M[1] * adj_M[3] # Using addition since minus is integrated in adjugate matrix.\n determinant += M[2] * adj_M[6]\n\n return (adj_M / determinant)", "def makeenv(self):\n eps=np.ones((self.nx,self.ny))*const.epsilon_0\n mu=np.ones((self.nx,self.ny))*const.mu_0\n\n eps[:20,:] *= self.q #adself.ds a space of higher permittivity \n eps[-20:,:] *= self.q #adself.ds a space of higher permittivity \n eps[:,:20] *= self.q #adself.ds a space of higher permittivity \n eps[:,-20:] *= self.q #adself.ds a space of higher permittivity \n #mu[:20,:] /= self.q #adself.ds a space of higher permittivity \n #mu[-20:,:] /= self.q #adself.ds a space of higher permittivity \n #mu[:,:20] /= self.q #adself.ds a space of higher permittivity \n #mu[:,-20:] /= self.q #adself.ds a space of higher permittivity \n\n return eps, mu", "def rotate_ADP_about_axis(ADP, angle, axisDirection):\n adp = get_adp_as_matrix(ADP)\n u, v = np.linalg.eig(adp)\n startPoints = [v[:, i].flatten().tolist()[0] for i in xrange(3)]\n endPoints = [rotate_point_about_axis(point, angle, axisDirection, (0, 0, 0)) for point in startPoints]\n rotMat = get_transform(startPoints, endPoints, matrix=True).transpose()\n newadp = np.dot(rotMat.transpose(), np.dot(adp, rotMat))\n return newadp[0, 0], newadp[1, 1], newadp[2, 2], newadp[0, 1], newadp[0, 2], newadp[1, 2]", "def euler_from_matrix(matrix, axes='sxyz'):\n try:\n firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]\n except (AttributeError, KeyError):\n _ = _TUPLE2AXES[axes]\n firstaxis, parity, repetition, frame = axes\n\n i = firstaxis\n j = _NEXT_AXIS[i+parity]\n k = _NEXT_AXIS[i-parity+1]\n\n M = np.array(matrix, dtype=np.float64, copy=False)[:3, :3]\n if repetition:\n sy = math.sqrt(M[i, j]*M[i, j] + M[i, k]*M[i, k])\n if sy > _EPS:\n ax = math.atan2( M[i, j], M[i, k])\n ay = math.atan2( sy, M[i, i])\n az = math.atan2( M[j, i], -M[k, i])\n else:\n ax = math.atan2(-M[j, k], M[j, j])\n ay = math.atan2( sy, M[i, i])\n az = 0.0\n else:\n cy = math.sqrt(M[i, i]*M[i, i] + M[j, i]*M[j, i])\n if cy > _EPS:\n ax = math.atan2( M[k, j], M[k, k])\n ay = math.atan2(-M[k, i], cy)\n az = math.atan2( M[j, i], M[i, i])\n else:\n ax = math.atan2(-M[j, k], M[j, j])\n ay = math.atan2(-M[k, i], cy)\n az = 0.0\n\n if parity:\n ax, ay, az = -ax, -ay, -az\n if frame:\n ax, az = az, ax\n return ax, ay, az", "def plane_equation(p1, p2, p3):\n a1 = p2[0] - p1[0]\n b1 = p2[1] - p1[1]\n c1 = p2[2] - p1[2]\n a2 = p3[0] - p1[0]\n b2 = p3[1] - p1[1]\n c2 = p3[2] - p1[2]\n a = b1 * c2 - b2 * c1\n b = a2 * c1 - a1 * c2\n c = a1 * b2 - b1 * a2\n # Points are collinear\n if (abs(a) < 1e-6) and (abs(b) < 1e-6) and (abs(c) < 1e-6):\n return None\n # All clear\n d = (- a * p1[0] - b * p1[1] - c * p1[2])\n return a, b, c, d", "def affine_matrix(mpa, mpb):\n if len(mpa) != len(mpb):\n raise GeometryError(\"Input geometries must have identical length\")\n vecp = np.asarray(mpb.get_vertices(mpa.crs)).ravel()\n A = np.empty([2*len(mpa), 6], dtype=np.float64)\n for i, (x, y) in enumerate(mpa.get_vertices()):\n A[2*i:2*i+2,:] = np.kron(np.eye(2), [x, y, 1])\n M, res, rank, singvals = np.linalg.lstsq(A, vecp)\n return np.vstack([np.reshape(M, [2, 3]), np.atleast_2d([0, 0, 1])])", "def get_proj(self):\n relev, razim = np.pi * self.elev/180, np.pi * self.azim/180\n\n xmin, xmax = self.get_xlim3d()\n ymin, ymax = self.get_ylim3d()\n zmin, zmax = self.get_zlim3d()\n\n # transform to uniform world coordinates 0-1.0,0-1.0,0-1.0\n worldM = proj3d.world_transformation(xmin, xmax,\n ymin, ymax,\n zmin, zmax)\n\n # look into the middle of the new coordinates\n R = np.array([0.5, 0.5, 0.5])\n\n xp = R[0] + np.cos(razim) * np.cos(relev) * self.dist\n yp = R[1] + np.sin(razim) * np.cos(relev) * self.dist\n zp = R[2] + np.sin(relev) * self.dist\n E = np.array((xp, yp, zp))\n\n self.eye = E\n self.vvec = R - E\n self.vvec = self.vvec / proj3d.mod(self.vvec)\n\n if abs(relev) > np.pi/2:\n # upside down\n V = np.array((0, 0, -1))\n else:\n V = np.array((0, 0, 1))\n zfront, zback = -self.dist, self.dist\n\n viewM = proj3d.view_transformation(E, R, V)\n perspM = proj3d.persp_transformation(zfront, zback)\n M0 = np.dot(viewM, worldM)\n M = np.dot(perspM, M0)\n return M", "def get_Amn(self):\n for ik in range(self.nkpt):\n self.Amn[ik, :, :] = np.array(self.get_Amn_one_k(ik),\n dtype=complex)\n return self.Amn", "def ancmig_adj_3(params, ns):\n #8 parameters \n nu1, nuA, nu2, nu3, m1_1, T1a, T1b, T2 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function and migration matrix for T1a\n nu_T1 = [nu1, nuA]\n mig1 = numpy.array([[0, m1_1],[m1_1, 0]])\n fs.integrate(nu_T1, T1a, m=mig1)\n fs.integrate(nu_T1, T1b) \n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T2\n nu_T2 = [nu1, nu2, nu3]\n fs.integrate(nu_T2, T2)\n return fs", "def method3(self):\n cres=0.\n Ux_aloc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n Uy_aloc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n for ix in range(self.kS.Nx+1):\n for iy in range(self.kS.Ny+1):\n mat1=self.ALDM[ix ,iy, : , : ]\n mat2=self.ALDM[(ix%self.kS.Nx)+1, iy, : , : ]\n mat3=self.ALDM[ix ,(iy%self.kS.Ny)+1, : , : ]\n \n Ux_aloc[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat2)[self.NL-1:,self.NL-1:])\n Uy_aloc[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat3)[self.NL-1:,self.NL-1:])\n\n for ix in range(self.kS.Nx):\n for iy in range(self.kS.Ny):\n ftemp=np.log(Ux_aloc[ix,iy]*Uy_aloc[ix+1,iy]/Ux_aloc[ix,iy+1]/Uy_aloc[ix,iy])\n cres+=ftemp/2./pi/1j\n \n return cres.real\n #End of method3", "def laplace2dq3(get_A3, get_rho, N=Mynum, Te=2):\n # Reduce the row and column of Laplacian matrix by 2 \n # Reduced row and column will be replace with embed in future\n # n = N - 2\n n = N\n\n # Solving for the PDE(1)\n h = 1.0/(n-1)\n A = get_A3(n) * (1/(12*(h**2)))\n b = get_rho(n, Te)\n U = sp.linalg.solve(A, b)\n # Reshape the u vector into nxn matrix for heat map plotting\n T = U.reshape((n, n))\n print T\n # Embed the surrounding of U matrix into zeros\n Tfull = embed(T, Te)\n\n # Verify that dot function of A matrix and U vector\n # return the same rho value at midpoint\n CheckU = np.dot(A,U)\n\n # Filter very small value into zeros\n for i in range(0,len(CheckU)):\n if (abs(CheckU[i]) < 1e-12):\n CheckU[i] = 0\n\n # Validate that product of A and U matrix is the same as rho vector\n # Will give warning if it is not the same\n # assert np.all(CheckU == b) # work for Mynum = 7 and 9\n\n # Print value of the products at midpoint.\n mid = (n**2-1)/2\n print \"Q3: Value of the dot product A.u3 is %5.3f at (0.5,0.5).\" % (CheckU[mid])\n return Tfull", "def equation(self):\n mat = np.empty((self.nunknowns, self.model.neq))\n rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero\n for icp in range(self.ncp):\n istart = icp * self.nlayers\n ieq = 0\n for e in self.model.elementlist:\n if e.nunknowns > 0:\n fluxin = self.intflux(e.disvecinflayers, self.xcin[icp], self.ycin[icp],\n self.xcin[icp + 1], self.ycin[icp + 1], self.layers, aq=self.aqin)\n fluxout = self.intflux(e.disvecinflayers, self.xcout[icp], self.ycout[icp],\n self.xcout[icp + 1], self.ycout[icp + 1], self.layers, aq=self.aqout)\n mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] = fluxin - fluxout\n ieq += e.nunknowns\n else:\n fluxin = self.intflux(e.disveclayers, self.xcin[icp], self.ycin[icp],\n self.xcin[icp + 1], self.ycin[icp + 1], self.layers, aq=self.aqin)\n fluxout = self.intflux(e.disveclayers, self.xcout[icp], self.ycout[icp],\n self.xcout[icp + 1], self.ycout[icp + 1], self.layers, aq=self.aqout)\n rhs[istart:istart + self.nlayers] -= fluxin - fluxout\n return mat, rhs", "def calc_vad_3d(az, elev, vel):\n elev = np.deg2rad(elev)\n az = np.deg2rad(az)\n\n if vel.size > 1: # If there could be sufficient data points...\n A = sum(vel * np.sin(az))\n B = sum(np.sin(az) ** 2 * np.cos(elev))\n C = sum(np.cos(az) * np.sin(az) * np.cos(elev))\n G = sum(np.sin(az) * np.sin(elev))\n\n D = sum(vel * np.cos(az))\n E = sum(np.sin(az) * np.cos(az) * np.cos(elev))\n F = sum(np.cos(az) ** 2 * np.cos(elev))\n H = sum(np.cos(az) * np.sin(elev))\n\n W = sum(vel)\n X = sum(np.sin(az) * np.cos(elev))\n Y = sum(np.cos(az) * np.cos(elev))\n Z = sum(az * np.sin(elev))\n\n # solve A = uB + vC + wG , D = uE + vF + wH and W = uX + vY+ wZ\n y = np.array([[B, E, X], [C, F, Y], [G, H, Z]])\n z = np.array([A, D, W])\n # print y\n # print z\n try:\n sol = np.linalg.solve(y, z)\n # print sol\n u = sol[0]\n v = sol[1]\n w = sol[2]\n return u, v, w\n except np.linalg.linalg.LinAlgError:\n return FILL_VALUE, FILL_VALUE, FILL_VALUE\n else:\n return FILL_VALUE, FILL_VALUE, FILL_VALUE", "def _mps_decompose_AC(self, A):\n Dl, d, Dr = A.shape\n Q, C = qr(np.reshape(A, [Dl * d, Dr]))\n nC = nfactor(C)\n # nC = max(abs(C.min()), abs(C.max()))\n if C.shape == (1, 1): # if number then makes C = 1\n Q *= np.sign(C.flat[0])\n C = np.ones((1, 1))\n else:\n C = C / nC\n Dr = C.shape[0]\n Q = np.reshape(Q, [Dl, d, Dr])\n return Q, C, nC, Dr", "def project_perp(A):\n return np.eye(A.shape[1]) - project(A)", "def xyz_to_acescg(xyz: Vector) -> Vector:\n\n return alg.dot(XYZ_TO_AP1, xyz, dims=alg.D2_D1)", "def P_to_a(M1, M2, P):\n mu = c.GGG * (M1 + M2) * c.Msun_to_g\n n = 2.0*np.pi / P / c.day_to_sec\n return np.power(mu/(n*n), 1.0/3.0) / c.Rsun_to_cm", "def altitude(p):\r\n \r\n R = 290 #specific gas constant \r\n T = 93.65 #surface temperature K from A.Coustenis book\r\n g = 1.354 #surface gravity from A.Coustenis book\r\n p0 = 1467 #surface pressure in hPa 6.1 for mars\r\n \r\n z = np.empty_like(p)\r\n \r\n for i in range(p.shape[0]):\r\n z[i] = (-1)*(R*T/g)*np.log((p[i])/p0)/(10**3)\r\n \r\n # Make into an xarray DataArray\r\n z_xr = xr.DataArray(z, coords=[z], dims=['pfull'])\r\n z_xr.attrs['units'] = 'km'\r\n \r\n #below is the inverse of the calculation\r\n #p[i] = p0*np.exp((-1)*z[i]*(10**3)/((R*T/g)))\r\n \r\n return z_xr", "def ADP (self):", "def adjoint(self) -> 'MultiVector':\n # The multivector created by reversing all multiplications\n return self._newMV(self.layout.adjoint_func(self.value))", "def get_ab_tpdm(self) -> 'Nparray':\n dveca, dvecb = self.calculate_dvec_spin()\n tpdm_ab = numpy.transpose(numpy.tensordot(dveca.conj(),\n dvecb,\n axes=((2, 3), (2, 3))),\n axes=(1, 2, 3, 0))\n return tpdm_ab", "def get_E_matrix(dR, dt):\n\n E = np.matmul(\n np.reshape(np_skew_symmetric(dt), (-1, 3, 3)),\n dR\n ).reshape(3, 3)\n return E", "def det3(m):\n\ta, b, c = m[0]\n\tda = det2([ m[1][1:] , m[2][1:]])\n\tdb = det2([[m[1][0],m[1][2]],[m[2][0],m[2][2]]])\n\tdc = det2([ m[1][:2] , m[2][:2]])\n\treturn a*da - b*db + c*dc", "def equation(self):\n mat = np.empty((self.nunknowns, self.model.neq))\n rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero\n for icp in range(self.ncp):\n istart = icp * self.nlayers\n ieq = 0\n for e in self.model.elementlist:\n if e.nunknowns > 0:\n qx, qy = e.disvecinflayers(self.xc[icp], self.yc[icp], self.layers)\n mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] = \\\n qx * self.cosnorm[icp] + qy * self.sinnorm[icp]\n ieq += e.nunknowns\n else:\n qx, qy = e.disveclayers(self.xc[icp], self.yc[icp], self.layers)\n rhs[istart:istart + self.nlayers] -= qx * self.cosnorm[icp] + qy * self.sinnorm[icp]\n return mat, rhs", "def get_projection_matrix(self, aspect):\n return self.ptr.get_projection_matrix(aspect)", "def calc_a(dert__):\n return dert__[[2, 3]] / dert__[1] # np.array([dy, dx]) / g", "def equation(self):\n mat = np.empty((self.nunknowns, self.model.neq))\n rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero\n for icp in range(self.ncp):\n istart = icp * self.nlayers\n ieq = 0\n for e in self.model.elementlist:\n if e.nunknowns > 0:\n qx, qy = e.disvecinflayers(self.xcout[icp], self.ycout[icp], self.layers)\n mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] = \\\n qx * self.cosnorm[icp] + qy * self.sinnorm[icp]\n ieq += e.nunknowns\n else:\n qx, qy = e.disveclayers(self.xcout[icp], self.ycout[icp], self.layers)\n rhs[istart:istart + self.nlayers] -= qx * self.cosnorm[icp] + qy * self.sinnorm[icp]\n return mat, rhs", "def _adi_pca(cube, angle_list, ncomp, source_xy, delta_rot, fwhm, scaling,\n mask_center_px, debug, svd_mode, imlib, interpolation, collapse,\n verbose, start_time, full_output):\n n, y, x = cube.shape\n\n if not n == angle_list.shape[0]:\n msg = \"Angle list vector has wrong length. It must equal the \"\n msg += \"number of frames in the cube\"\n raise ValueError(msg)\n\n if ncomp > n:\n ncomp = min(ncomp, n)\n msg = 'Number of PCs too high (max PCs={}), using {} PCs instead.'\n print(msg.format(n, ncomp))\n\n if source_xy is None:\n residuals_result = _subtr_proj_fullfr(cube, None, ncomp, scaling,\n mask_center_px, debug,\n svd_mode, verbose, full_output)\n if verbose:\n timing(start_time)\n if full_output:\n residuals_cube = residuals_result[0]\n reconstructed = residuals_result[1]\n V = residuals_result[2]\n pcs = reshape_matrix(V, y, x)\n recon = reshape_matrix(reconstructed, y, x)\n else:\n residuals_cube = residuals_result\n else:\n if delta_rot is None or fwhm is None:\n msg = 'Delta_rot or fwhm parameters missing. Needed for the'\n msg += 'PA-based rejection of frames from the library'\n raise TypeError(msg)\n nfrslib = []\n residuals_cube = np.zeros_like(cube)\n recon_cube = np.zeros_like(cube)\n yc, xc = frame_center(cube[0], False)\n x1, y1 = source_xy\n ann_center = dist(yc, xc, y1, x1)\n pa_thr = _compute_pa_thresh(ann_center, fwhm, delta_rot)\n mid_range = np.abs(np.amax(angle_list) - np.amin(angle_list)) / 2\n if pa_thr >= mid_range - mid_range * 0.1:\n new_pa_th = float(mid_range - mid_range * 0.1)\n if verbose:\n msg = 'PA threshold {:.2f} is too big, will be set to '\n msg += '{:.2f}'\n print(msg.format(pa_thr, new_pa_th))\n pa_thr = new_pa_th\n\n for frame in range(n):\n if ann_center > fwhm * 3: # TODO: 3 optimal value? new par?\n ind = _find_indices(angle_list, frame, pa_thr,\n truncate=True)\n else:\n ind = _find_indices(angle_list, frame, pa_thr)\n\n res_result = _subtr_proj_fullfr(cube, None, ncomp, scaling,\n mask_center_px, debug, svd_mode,\n verbose, full_output, ind,\n frame)\n if full_output:\n nfrslib.append(res_result[0])\n residual_frame = res_result[1]\n recon_frame = res_result[2]\n residuals_cube[frame] = residual_frame.reshape((y, x))\n recon_cube[frame] = recon_frame.reshape((y, x))\n else:\n nfrslib.append(res_result[0])\n residual_frame = res_result[1]\n residuals_cube[frame] = residual_frame.reshape((y, x))\n\n # number of frames in library printed for each annular quadrant\n if verbose:\n descriptive_stats(nfrslib, verbose=verbose, label='Size LIB: ')\n\n residuals_cube_ = cube_derotate(residuals_cube, angle_list, imlib=imlib,\n interpolation=interpolation)\n frame = cube_collapse(residuals_cube_, mode=collapse)\n if verbose:\n print('Done de-rotating and combining')\n timing(start_time)\n if source_xy is not None:\n return recon_cube, residuals_cube, residuals_cube_, frame\n else:\n return pcs, recon, residuals_cube, residuals_cube_, frame", "def three_point_method(d, point_a, point_b, point_c, point_a0, point_b0, point_c0, point_d0):\n dx, dy, dz = d\n\n mag_rda = magnitude(point_a0, point_d0) # length of vector RDA\n mag_rdb = magnitude(point_b0, point_d0) # length of vector RDB\n mag_rdc = magnitude(point_c0, point_d0) # length of vector RDC\n\n # set up equation for scipy.fsolve by adding all 3 together and moving all over to right side\n eq1 = (dx - point_a[0])**2 + (dy - point_a[1])**2 + (dz - point_a[2])**2 - mag_rda**2\n eq2 = (dx - point_b[0])**2 + (dy - point_b[1])**2 + (dz - point_b[2])**2 - mag_rdb**2\n eq3 = (dx - point_c[0])**2 + (dy - point_c[1])**2 + (dz - point_c[2])**2 - mag_rdc**2\n\n return eq1, eq2, eq3", "def dd_axis(axis, ambient_dim, operand):\n d = Derivative()\n\n unit_vector = np.zeros(ambient_dim)\n unit_vector[axis] = 1\n\n unit_mvector = MultiVector(unit_vector)\n\n return d.resolve(\n (unit_mvector.scalar_product(d.dnabla(ambient_dim)))\n * d(operand))", "def ws06(adp1, adp2):\n # print sum(adp1[:3])/3. - sum(adp2[:3])/3.\n adp1 = get_matrix(adp1)\n adp2 = get_matrix(adp2)\n adp1i = np.linalg.inv(adp1)\n adp2i = np.linalg.inv(adp2)\n a = 2 ** 1.5\n b = np.dot(adp1i, adp2i)\n c = np.linalg.det(b)\n\n # if c <= 0:\n # c *= -1\n d = c ** 0.25\n up = a * d\n\n x = adp1i + adp2i\n y = np.linalg.det(x)\n # if y <= 0:\n # y *= -1\n z = y ** 0.5\n R = up / z\n return 100 * (1 - R)", "def cartesian_angular_momenta(self, x='x', y='y', z='z',\n vx='vx', vy='vy', vz='vz',\n Lx='Lx', Ly='Ly', Lz='Lz',\n propagate_uncertainties=False,\n inplace=False):\n df = self.df if inplace else self.df.copy()\n x, y, z, vx, vy, vz = df._expr(x, y, z, vx, vy, vz)\n df.add_virtual_column(Lx, y * vz - z * vy)\n df.add_virtual_column(Ly, z * vx - x * vz)\n df.add_virtual_column(Lz, x * vy - y * vx)\n if propagate_uncertainties:\n df.propagate_uncertainties([df[Lx], df[Ly], df[Lz]])\n return df", "def acescg_to_xyz(acescg: Vector) -> Vector:\n\n return alg.dot(AP1_TO_XYZ, acescg, dims=alg.D2_D1)", "def parmdbToAxes(solEntry):\n pol = None; pol1 = None; pol2 = None;\n dir = None; ant = None; parm = None\n\n thisSolType = solEntry.split(':')[0]\n\n # For CommonRotationAngle assuming [CommonRotationAngle:ant]\n if thisSolType == 'CommonRotationAngle':\n thisSolType, ant = solEntry.split(':')\n dir = 'pointing'\n\n # For RotationAngle assuming [RotationAngle:ant:sou]\n elif thisSolType == 'RotationAngle':\n thisSolType, ant, dir = solEntry.split(':')\n\n # For CommonScalarPhase assuming [CommonScalarPhase:ant]\n elif thisSolType == 'CommonScalarPhase':\n thisSolType, ant = solEntry.split(':')\n\n # For ScalarPhase assuming [ScalarPhase:ant:sou]\n elif thisSolType == 'ScalarPhase':\n thisSolType, ant, dir = solEntry.split(':')\n\n # For Gain assuming [Gain:pol1:pol2:parm:ant]\n elif thisSolType == 'Gain':\n thisSolType, pol1, pol2, parm, ant = solEntry.split(':')\n dir = 'pointing'\n\n # For DirectionalGain assuming [DirecitonalGain:pol1:pol2:parm:ant:sou]\n elif thisSolType == 'DirectionalGain':\n thisSolType, pol1, pol2, parm, ant, dir = solEntry.split(':')\n\n else:\n logging.error('Unknown solution type \"'+thisSolType+'\". Ignored.')\n\n if pol1 != None and pol2 != None:\n if pol1 == '0' and pol2 == '0': pol = 'XX'\n if pol1 == '1' and pol2 == '0': pol = 'YX'\n if pol1 == '0' and pol2 == '1': pol = 'XY'\n if pol1 == '1' and pol2 == '1': pol = 'YY'\n\n if pol != None:\n pol = re.escape(pol)\n if dir != None:\n dir = re.escape(dir)\n if ant != None:\n ant = re.escape(ant)\n return pol, dir, ant, parm", "def equation(self):\n mat = np.empty((self.nunknowns, self.model.neq))\n rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero\n for icp in range(self.ncp):\n istart = icp * self.nlayers\n ieq = 0\n for e in self.model.elementlist:\n if e.nunknowns > 0:\n qxin, qyin = e.disvecinflayers(self.xcin[icp], self.ycin[icp], self.layers, aq=self.aqin)\n qxout, qyout = e.disvecinflayers(self.xcout[icp], self.ycout[icp], self.layers, aq=self.aqout)\n mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] = \\\n (qxin - qxout) * self.cosnorm[icp] + (qyin - qyout) * self.sinnorm[icp]\n ieq += e.nunknowns\n else:\n qxin, qyin = e.disveclayers(self.xcin[icp], self.ycin[icp], self.layers, aq=self.aqin)\n qxout, qyout = e.disveclayers(self.xcout[icp], self.ycout[icp], self.layers, aq=self.aqout)\n rhs[istart:istart + self.nlayers] -= (qxin - qxout) * self.cosnorm[icp] + (qyin - qyout) * \\\n self.sinnorm[icp]\n return mat, rhs", "def line_equation_ap(angle, (x1, y1)):\n \n # get second point on the line\n x2 = float(x1) + cos(angle)\n y2 = float(y1) + sin(angle)\n \n # return A, B and C coefficients\n return (y1 - y2, x2 - x1, x1*y2 - x2*y1)", "def _e_2d_(p, a):\r\n diff = a - p[np.newaxis, :]\r\n return np.einsum('ij,ij->i', diff, diff)", "def construct_by_ellipse(a_xx, h_xy, b_yy, g_x, f_y, d, focal_length):\n gamma = - focal_length\n a = gamma**2 * a_xx\n b = gamma**2 * b_yy\n c = d\n d = gamma**2 * d\n f = -gamma*(f_y)\n g = -gamma*(g_x)\n h = gamma**2 * h_xy\n #Not needed\n u = gamma**2 * g_x\n v = gamma**2 * f_y\n w = -gamma*(d)\n return ConeCamera(a, b, c, f, g, h)", "def getAcc(pos: dc.float64[N, 3], mass: dc.float64[N], G: dc.float64,\n softening: dc.float64):\n # positions r = [x,y,z] for all particles\n x = pos[:, 0:1]\n y = pos[:, 1:2]\n z = pos[:, 2:3]\n\n # matrix that stores all pairwise particle separations: r_j - r_i\n # dx = x.T - x\n # dy = y.T - y\n # dz = z.T - z\n # dx = np.transpose(x) - x\n # dy = np.transpose(y) - y\n # dz = np.transpose(z) - z\n dx = np.add.outer(-x, x)\n dy = np.add.outer(-y, y)\n dz = np.add.outer(-z, z)\n\n # matrix that stores 1/r^3 for all particle pairwise particle separations\n inv_r3 = (dx**2 + dy**2 + dz**2 + softening**2)\n # inv_r3[inv_r3>0] = inv_r3[inv_r3>0]**(-1.5)\n I = inv_r3 > 0\n np.power(inv_r3, -1.5, out=inv_r3, where=I)\n\n ax = G * (dx * inv_r3) @ mass\n ay = G * (dy * inv_r3) @ mass\n az = G * (dz * inv_r3) @ mass\n\n # pack together the acceleration components\n # a = np.hstack((ax,ay,az))\n a = np.ndarray((N, 3), dtype=np.float64)\n # hstack(a, ax, ay, az)\n a[:, 0] = ax\n a[:, 1] = ay\n a[:, 2] = az\n\n return a", "def projection_matrix(self):\n scene = self.figure.scene\n scene_size = tuple(scene.get_size())\n aspect_ratio = float(scene_size[0]) / float(scene_size[1])\n p = scene.camera.get_perspective_transform_matrix(\n aspect_ratio, -1, 1).to_array().astype(np.float32)\n return p", "def _adi_rdi_pca(cube, cube_ref, angle_list, ncomp, scaling, mask_center_px,\n debug, svd_mode, imlib, interpolation, collapse, verbose,\n full_output, start_time):\n n, y, x = cube.shape\n if not cube_ref.ndim == 3:\n msg = 'Input reference array is not a cube or 3d array'\n raise ValueError(msg)\n if not cube_ref.shape[1] == y:\n msg = 'Reference and target frames have different shape'\n raise TypeError(msg)\n\n if ncomp > n:\n ncomp = min(ncomp, n)\n msg = 'Number of PCs too high (max PCs={}), using {} PCs instead.'\n print(msg.format(n, ncomp))\n residuals_result = _subtr_proj_fullfr(cube, cube_ref, ncomp, scaling,\n mask_center_px, debug, svd_mode,\n verbose, full_output)\n if full_output:\n residuals_cube = residuals_result[0]\n reconstructed = residuals_result[1]\n V = residuals_result[2]\n pcs = reshape_matrix(V, y, x)\n recon = reshape_matrix(reconstructed, y, x)\n else:\n residuals_cube = residuals_result\n residuals_cube_ = cube_derotate(residuals_cube, angle_list, imlib=imlib,\n interpolation=interpolation)\n frame = cube_collapse(residuals_cube_, mode=collapse)\n\n if verbose:\n print('Done de-rotating and combining')\n timing(start_time)\n return pcs, recon, residuals_cube, residuals_cube_, frame", "def Euler2Mat(e):\n x=e[0]\n y=e[1]\n z=e[2]\n s1=np.sin(x)\n s2=np.sin(y)\n s3=np.sin(z)\n c1=np.cos(x)\n c2=np.cos(y)\n c3=np.cos(z)\n m=np.array([[c1*c2*c3-s1*s3,-c3*s1-c1*c2*s3,c1*s2],\n [c1*s3+c2*c3*s1,c1*c3-c2*s1*s3,s1*s2],\n [-c3*s2,s2*s3,c2]])\n return m", "def get_analytical_damping_matrix(self):\n if not self.bearing_type == \"short_bearing\":\n warnings.warn(\n \"Function get_analytical_damping_matrix suitable only for short bearings. \"\n \"The ratio between the bearing length and its radius should be less or \"\n \"equal to 0.25. Currently we have \"\n + str(self.length / self.radius_stator)\n + \".\"\n )\n # fmt: off\n f = self.get_rotor_load()\n h0 = 1.0 / (((np.pi ** 2) * (1 - self.eccentricity_ratio ** 2) + 16 * self.eccentricity_ratio ** 2) ** 1.5)\n a = f / (self.radial_clearance * self.omega)\n cxx = (a * h0 * 2 * np.pi * np.sqrt(1 - self.eccentricity_ratio ** 2) *\n ((np.pi ** 2) * (\n 1 + 2 * self.eccentricity_ratio ** 2) - 16 * self.eccentricity_ratio ** 2) / self.eccentricity_ratio)\n cxy = (-a * h0 * 8 * (\n (np.pi ** 2) * (1 + 2 * self.eccentricity_ratio ** 2) - 16 * self.eccentricity_ratio ** 2))\n cyx = cxy\n cyy = (a * h0 * (2 * np.pi * (\n (np.pi ** 2) * (1 - self.eccentricity_ratio ** 2) ** 2 + 48 * self.eccentricity_ratio ** 2)) /\n (self.eccentricity_ratio * np.sqrt(1 - self.eccentricity_ratio ** 2)))\n # fmt: on\n return [cxx, cxy, cyx, cyy]", "def _pmatrix(kn_u, kn_d, thickness):\n p = np.zeros((kn_u.size, 4, 4), np.complex128)\n\n p0 = np.exp(complex(0, 1) * kn_u * thickness)\n p1 = np.exp(complex(0, 1) * kn_d * thickness)\n\n p[:, 0, 0] = 1 / p0\n p[:, 1, 1] = p0\n p[:, 2, 2] = 1 / p1\n p[:, 3, 3] = p1\n\n return p", "def reaction_forces(Ca, la, x1, x2, x3, xa, h, d1, d3, theta, P, q, E, I):\r\n \r\n equation_matrix = np.array([[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \r\n [1, 0, 0, 1, 0, 1, 0, np.sin(theta), 0, 0, 0, 0, (P*np.sin(theta)+q*la*np.cos(theta))], \r\n [0, 1, 0, 0, 1, 0, 1, np.cos(theta), 0, 0, 0, 0, (P*np.cos(theta)-q*la*np.sin(theta))],\r\n \r\n [-(Ca/4-h/2), 0, 0, -(Ca/4-h/2) ,0 , -(Ca/4-h/2), 0, (np.cos(theta)*h/2-np.sin(theta)*Ca/4), 0, 0, 0, 0, (P*np.cos(theta)*h/2*-P*np.sin(theta)*Ca/4)], \r\n [0, (x2-x1), 0, 0, 0, 0, -(x3-x2), (np.cos(theta)*xa/2), 0, 0, 0, 0, (-P*np.cos(theta)*xa/2+q*la*np.sin(theta)*(la/2-x2))], \r\n [-(x2-x1), 0, 0, 0, 0, (x3-x2), 0, -np.sin(theta)*xa/2, 0, 0, 0, 0, (P*np.sin(theta)*xa/2+q*la*np.cos(theta)*(la/2-x2))], \r\n \r\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, x1, 1, -q*np.sin(theta)*((x1**4)/24)], \r\n [0, ((x2-x1)**3)/6, 0, 0, 0, 0, 0, ((np.cos(theta))*((xa/2)**3)/6), 0, 0, x2, 1, (-q*np.sin(theta)*((x2**4)/24))], \r\n [0, ((x3-x1)**3)/6, 0, 0, ((x3-x2)**3)/6, 0, 0, ((np.cos(theta))*((x3-x2+xa/2)**3)/6), 0, 0, x3, 1, (-q*np.sin(theta)*((x3**4)/24)+P*(np.cos(theta))*(x3-x2-xa/2)**3/6)], \r\n [0, 0, 0, 0, 0, 0, 0, 0, x1, 1, 0, 0, (-E*I*d1*+q*np.cos(theta)*(x1**4)/24)], \r\n [(((x2-x1)**3)/6), 0, 0, 0, 0, 0, 0, ((-np.sin(theta))*((xa/2)**3)/6), x2, 1, 0, 0, (q*np.cos(theta)*(x2**4)/24)], \r\n [(((x3-x1)**3)/6),0,0,(((x3-x2)**3)/6),0,0,0,((-np.sin(theta))*((x3-x2+xa/2)**3)/6),x3,1,0,0,(-E*I*d3*+q*np.cos(theta)*((x3**4)/24)+P/6*np.sin(theta)*(x3-x2-xa/2)**3)]])\r\n \r\n \r\n unknown_matrix = equation_matrix[:,:-1]\r\n constant_matrix = equation_matrix[:,-1]\r\n \r\n \r\n solution_matrix = np.linalg.solve(unknown_matrix,constant_matrix)\r\n \r\n solution_matrix = solution_matrix/1000\r\n \r\n (R1y, R1z, R2x, R2y, R2z, R3y, R3z, RI, c1, c2, c3, c4) = tuple(solution_matrix)\r\n \r\n print((R1y, R1z, R2x, R2y, R2z, R3y, R3z, RI, c1, c2, c3, c4))", "def solve_elas(self,x,E_p=None):\n \n if x['Crystal_Structure'] == \"Cubic\":\n self.estf = self.Ccubic( x['Stiffness'][0], x['Stiffness'][1], x['Stiffness'][2] )\n\n elif x['Crystal_Structure'] == \"HCP\":\n self.estf = self.Chcp( x['Stiffness'][0], x['Stiffness'][1], x['Stiffness'][2], x['Stiffness'][3], x['Stiffness'][4] )\n\n # Update orientation\n for n in range(9):\n cell_num_list = list((9*self.cell_num)+n)\n self.orient.vector()[cell_num_list] = self.rots[self.subdomain_num,n]\n \n self.a = inner(self.sigs3x3(self.u), sym(grad(self.v)))*dx\n \n if E_p:\n # Note use of sym(), assuming E_p to be the \\chi field\n L_elas_rhs = self.L_elas + inner(self.sigs_e(sym(E_p)), sym(grad(self.v)))*dx\n else:\n L_elas_rhs = self.L_elas \n\n self.A_elas, self.b_elas = assemble_system(self.a, L_elas_rhs, self.bc_elas) \n \n # Attach near nullspace to matrix\n as_backend_type(self.A_elas).set_near_nullspace(self.null_space)\n\n # Set matrix operator\n self.elasticity_solver.set_operator(self.A_elas);\n\n # Compute solution\n self.elasticity_solver.solve(self.ue.vector(), self.b_elas);\n \n if E_p:\n self.Ue_sym = project( sym(grad(self.ue) - E_p), self.TFS, solver_type=\"cg\", preconditioner_type=\"ilu\")\n else:\n self.Ue_sym = project( sym(grad(self.ue)), self.TFS, solver_type=\"cg\", preconditioner_type=\"ilu\")\n \n self.sim_strn = np.reshape(self.Ue_sym.vector().get_local(),(len(self.grains.array()),9))\n\n for grain_no in range(self.grains.array().max()):\n # Grain numbering is 1 index origin\n cell_subset = self.grains.array()==(grain_no+1)\n if np.any(cell_subset):\n self.sim_avg[grain_no,:] = np.average(self.sim_strn[cell_subset,:],\n axis=0,weights=self.dVol[cell_subset]) \n \n deps = self.exp_strn - self.sim_avg\n resid = np.linalg.norm(deps.ravel())\n print(resid) #,self.its)\n return resid", "def __ComputeDesignMatrix(self, groundPoints):\n # initialization for readability\n omega = self.exteriorOrientationParameters[3]\n phi = self.exteriorOrientationParameters[4]\n kappa = self.exteriorOrientationParameters[5]\n\n # Coordinates subtraction\n dX = groundPoints[:, 0] - self.exteriorOrientationParameters[0]\n dY = groundPoints[:, 1] - self.exteriorOrientationParameters[1]\n dZ = groundPoints[:, 2] - self.exteriorOrientationParameters[2]\n dXYZ = np.vstack([dX, dY, dZ])\n\n rotationMatrixT = self.rotationMatrix.T\n rotatedG = rotationMatrixT.dot(dXYZ)\n rT1g = rotatedG[0, :]\n rT2g = rotatedG[1, :]\n rT3g = rotatedG[2, :]\n\n focalBySqauredRT3g = self.camera.focalLength / rT3g ** 2\n\n dxdg = rotationMatrixT[0, :][None, :] * rT3g[:, None] - rT1g[:, None] * rotationMatrixT[2, :][None, :]\n dydg = rotationMatrixT[1, :][None, :] * rT3g[:, None] - rT2g[:, None] * rotationMatrixT[2, :][None, :]\n\n dgdX0 = np.array([-1, 0, 0], 'f')\n dgdY0 = np.array([0, -1, 0], 'f')\n dgdZ0 = np.array([0, 0, -1], 'f')\n\n # Derivatives with respect to X0\n dxdX0 = -focalBySqauredRT3g * np.dot(dxdg, dgdX0)\n dydX0 = -focalBySqauredRT3g * np.dot(dydg, dgdX0)\n\n # Derivatives with respect to Y0\n dxdY0 = -focalBySqauredRT3g * np.dot(dxdg, dgdY0)\n dydY0 = -focalBySqauredRT3g * np.dot(dydg, dgdY0)\n\n # Derivatives with respect to Z0\n dxdZ0 = -focalBySqauredRT3g * np.dot(dxdg, dgdZ0)\n dydZ0 = -focalBySqauredRT3g * np.dot(dydg, dgdZ0)\n\n dRTdOmega = Compute3DRotationDerivativeMatrix(omega, phi, kappa, 'omega').T\n dRTdPhi = Compute3DRotationDerivativeMatrix(omega, phi, kappa, 'phi').T\n dRTdKappa = Compute3DRotationDerivativeMatrix(omega, phi, kappa, 'kappa').T\n\n gRT3g = dXYZ * rT3g\n\n # Derivatives with respect to Omega\n dxdOmega = -focalBySqauredRT3g * (dRTdOmega[0, :][None, :].dot(gRT3g) -\n rT1g * (dRTdOmega[2, :][None, :].dot(dXYZ)))[0]\n\n dydOmega = -focalBySqauredRT3g * (dRTdOmega[1, :][None, :].dot(gRT3g) -\n rT2g * (dRTdOmega[2, :][None, :].dot(dXYZ)))[0]\n\n # Derivatives with respect to Phi\n dxdPhi = -focalBySqauredRT3g * (dRTdPhi[0, :][None, :].dot(gRT3g) -\n rT1g * (dRTdPhi[2, :][None, :].dot(dXYZ)))[0]\n\n dydPhi = -focalBySqauredRT3g * (dRTdPhi[1, :][None, :].dot(gRT3g) -\n rT2g * (dRTdPhi[2, :][None, :].dot(dXYZ)))[0]\n\n # Derivatives with respect to Kappa\n dxdKappa = -focalBySqauredRT3g * (dRTdKappa[0, :][None, :].dot(gRT3g) -\n rT1g * (dRTdKappa[2, :][None, :].dot(dXYZ)))[0]\n\n dydKappa = -focalBySqauredRT3g * (dRTdKappa[1, :][None, :].dot(gRT3g) -\n rT2g * (dRTdKappa[2, :][None, :].dot(dXYZ)))[0]\n\n # all derivatives of x and y\n dd = np.array([np.vstack([dxdX0, dxdY0, dxdZ0, dxdOmega, dxdPhi, dxdKappa]).T,\n np.vstack([dydX0, dydY0, dydZ0, dydOmega, dydPhi, dydKappa]).T])\n\n a = np.zeros((2 * dd[0].shape[0], 6))\n a[0::2] = dd[0]\n a[1::2] = dd[1]\n\n return a", "def advection_1D(q_l,q_r,aux_l,aux_r,problem_data):\n \n # Number of Riemann problems we are solving\n num_rp = q_l.shape[1]\n\n # Return values\n wave = np.empty( (num_eqn, num_waves, num_rp) )\n s = np.empty( (num_waves, num_rp) )\n amdq = np.zeros( (num_eqn, num_rp) )\n apdq = np.zeros( (num_eqn, num_rp) )\n \n wave[0,0,:] = q_r[0,:] - q_l[0,:]\n s[0,:] = problem_data['u']\n if problem_data['u'] > 0:\n apdq[0,:] = s[0,:] * wave[0,0,:]\n else:\n amdq[0,:] = s[0,:] * wave[0,0,:]\n\n return wave, s, amdq, apdq", "def pm3d_formula(x,formula):\n \n if(formula<0):\t\t\n\tx=1.0-x\n\tformula=-formula\n\n if(formula==0): return 0\n elif(formula==1): return 0.5\n elif(formula==2): return 1\n elif(formula==3): return x\n elif(formula==4): return(x * x)\n elif(formula==5): return(x * x * x)\n elif(formula==6): return(x * x * x * x)\n elif(formula==7): return(Numeric.sqrt(x))\n elif(formula==8): return(x**0.25)\n elif(formula==9): return(Numeric.sin(90.0 * x * DEG2RAD))\n elif(formula==10): return(Numeric.cos(90 * x * DEG2RAD))\n elif(formula==11): return(Numeric.fabs(x - 0.5))\n elif(formula==12): return((2 * x - 1) * (2.0 * x - 1))\n elif(formula==13): return(Numeric.sin(180 * x * DEG2RAD))\n elif(formula==14): return(Numeric.fabs(cos(180 * x * DEG2RAD)))\n elif(formula==15): return(Numeric.sin(360 * x * DEG2RAD))\n elif(formula==16): return(Numeric.cos(360 * x * DEG2RAD))\n elif(formula==17): return(Numeric.fabs(Numeric.sin(360 * x * DEG2RAD)))\n elif(formula==18): return(Numeric.fabs(Numeric.cos(360 * x * DEG2RAD)))\n elif(formula==19): return(Numeric.fabs(Numeric.sin(720 * x * DEG2RAD)))\n elif(formula==20): return(Numeric.fabs(Numeric.cos(720 * x * DEG2RAD)))\n elif(formula==21): return(3 * x) # ???????\n elif(formula==22): return(3 * x - 1)\n elif(formula==23): return(3 * x - 2)\n elif(formula==24): return(Numeric.fabs(3 * x - 1))\n elif(formula==25): return(Numeric.fabs(3 * x - 2))\n elif(formula==26): return((1.5 * x - 0.5))\n elif(formula==27): return((1.5 * x - 1))\n elif(formula==28): return(Numeric.fabs(1.5 * x - 0.5))\n elif(formula==29): return(Numeric.fabs(1.5 * x - 1))\n elif(formula==30):\n if (x <= 0.25): return 0.0\n if (x >= 0.57): return 1.0\n\treturn(x / 0.32 - 0.78125)\n elif(formula==31):\n if (x <= 0.42): return 0.0\n if (x >= 0.92): return 1.0\n\treturn(2 * x - 0.84)\n elif(formula==32):\n if (x <= 0.42): return(4*x)\n if (x <= 0.92): return(-2 * x + 1.84)\n return(x / 0.08 - 11.5)\n elif(formula==33): return(Numeric.fabs(2 * x - 0.5))\n elif(formula==34): return(2 * x)\n elif(formula==35): return(2 * x - 0.5)\n elif(formula==36): return(2 * x - 1)\n return(0)", "def m1(el1, daz1, el2, daz2):\n x1=math.cos(el1 * math.pi/180.0)\n x2=math.cos(el2 * math.pi/180.0)\n b = (daz2-daz1)/(x2-x1)\n a = b*x1-daz1\n return (a,b)", "def process_adj(adj, model_name):\n if model_name == 'Cheby':\n laplacian = sp.eye(adj.shape[0]) - normalize_adj(adj - sp.eye(adj.shape[0]))\n # TODO(chamii): compare with\n # adj)\n largest_eigval, _ = eigsh(laplacian, 1, which='LM')\n laplacian_norm = (2. / largest_eigval[0]) * laplacian - sp.eye(adj.shape[0])\n return laplacian_norm\n else:\n return normalize_adj(adj)", "def ADT_QCD_LEPTON():\n\n # As input for the quark-mass ratios, we use the quark masses at MZ and the lepton masses\n ip = Num_input()\n\n mu = ip.mu_at_MZ\n md = ip.md_at_MZ\n ms = ip.ms_at_MZ\n me = ip.me\n mmu = ip.mmu\n mtau = ip.mtau\n\n # Create the ADT:\n\n gamma_hat_P63eu_Q81u = np.hstack((-16 * me**2/mu**2, np.zeros(5)))\n gamma_hat_P63muu_Q81u = np.hstack((np.zeros(1), -16 * mmu**2/mu**2, np.zeros(4)))\n gamma_hat_P63tauu_Q81u = np.hstack((np.zeros(2), -16 * mtau**2/mu**2, np.zeros(3)))\n\n gamma_hat_P63ed_Q81d = np.hstack((-16 * me**2/md**2, np.zeros(5)))\n gamma_hat_P63mud_Q81d = np.hstack((np.zeros(1), -16 * mmu**2/md**2, np.zeros(4)))\n gamma_hat_P63taud_Q81d = np.hstack((np.zeros(2), -16 * mtau**2/md**2, np.zeros(3)))\n\n gamma_hat_P63es_Q81s = np.hstack((-16 * me**2/ms**2, np.zeros(5)))\n gamma_hat_P63mus_Q81s = np.hstack((np.zeros(1), -16 * mmu**2/ms**2, np.zeros(4)))\n gamma_hat_P63taus_Q81s = np.hstack((np.zeros(2), -16 * mtau**2/ms**2, np.zeros(3)))\n\n\n\n gamma_hat_P63eu_Q82u = np.hstack((np.zeros(3), -16 * me**2/mu**2, np.zeros(2)))\n gamma_hat_P63muu_Q82u = np.hstack((np.zeros(4), -16 * mmu**2/mu**2, np.zeros(1)))\n gamma_hat_P63tauu_Q82u = np.hstack((np.zeros(5), -16 * mtau**2/mu**2))\n\n gamma_hat_P63ed_Q82d = np.hstack((np.zeros(3), -16 * me**2/md**2, np.zeros(2)))\n gamma_hat_P63mud_Q82d = np.hstack((np.zeros(4), -16 * mmu**2/md**2, np.zeros(1)))\n gamma_hat_P63taud_Q82d = np.hstack((np.zeros(5), -16 * mtau**2/md**2))\n\n gamma_hat_P63es_Q82s = np.hstack((np.zeros(3), -16 * me**2/ms**2, np.zeros(2)))\n gamma_hat_P63mus_Q82s = np.hstack((np.zeros(4), -16 * mmu**2/ms**2, np.zeros(1)))\n gamma_hat_P63taus_Q82s = np.hstack((np.zeros(5), -16 * mtau**2/ms**2))\n\n\n\n gamma_hat_P62ue_Q83u = np.hstack((-16 * me**2/mu**2, np.zeros(5)))\n gamma_hat_P62umu_Q83u = np.hstack((np.zeros(1), -16 * mmu**2/mu**2, np.zeros(4)))\n gamma_hat_P62utau_Q83u = np.hstack((np.zeros(2), -16 * mtau**2/mu**2, np.zeros(3)))\n\n gamma_hat_P62de_Q83d = np.hstack((-16 * me**2/md**2, np.zeros(5)))\n gamma_hat_P62dmu_Q83d = np.hstack((np.zeros(1), -16 * mmu**2/md**2, np.zeros(4)))\n gamma_hat_P62dtau_Q83d = np.hstack((np.zeros(2), -16 * mtau**2/md**2, np.zeros(3)))\n\n gamma_hat_P62se_Q83s = np.hstack((-16 * me**2/ms**2, np.zeros(5)))\n gamma_hat_P62smu_Q83s = np.hstack((np.zeros(1), -16 * mmu**2/ms**2, np.zeros(4)))\n gamma_hat_P62stau_Q83s = np.hstack((np.zeros(2), -16 * mtau**2/ms**2, np.zeros(3)))\n\n\n\n gamma_hat_P62ue_Q84u = np.hstack((np.zeros(3), -16 * me**2/mu**2, np.zeros(2)))\n gamma_hat_P62umu_Q84u = np.hstack((np.zeros(4), -16 * mmu**2/mu**2, np.zeros(1)))\n gamma_hat_P62utau_Q84u = np.hstack((np.zeros(5), -16 * mtau**2/mu**2))\n\n gamma_hat_P62de_Q84d = np.hstack((np.zeros(3), -16 * me**2/md**2, np.zeros(2)))\n gamma_hat_P62dmu_Q84d = np.hstack((np.zeros(4), -16 * mmu**2/md**2, np.zeros(1)))\n gamma_hat_P62dtau_Q84d = np.hstack((np.zeros(5), -16 * mtau**2/md**2))\n\n gamma_hat_P62se_Q84s = np.hstack((np.zeros(3), -16 * me**2/ms**2, np.zeros(2)))\n gamma_hat_P62smu_Q84s = np.hstack((np.zeros(4), -16 * mmu**2/ms**2, np.zeros(1)))\n gamma_hat_P62stau_Q84s = np.hstack((np.zeros(5), -16 * mtau**2/ms**2))\n\n\n\n gamma_hat_Q81u = np.vstack((gamma_hat_P63eu_Q81u, gamma_hat_P63muu_Q81u, gamma_hat_P63tauu_Q81u, np.zeros((15,6))))\n gamma_hat_Q81d = np.vstack((np.zeros((3,6)), gamma_hat_P63ed_Q81d, gamma_hat_P63mud_Q81d, gamma_hat_P63taud_Q81d, np.zeros((12,6))))\n gamma_hat_Q81s = np.vstack((np.zeros((6,6)), gamma_hat_P63es_Q81s, gamma_hat_P63mus_Q81s, gamma_hat_P63taus_Q81s, np.zeros((9,6))))\n\n gamma_hat_Q82u = np.vstack((gamma_hat_P63eu_Q82u, gamma_hat_P63muu_Q82u, gamma_hat_P63tauu_Q82u, np.zeros((15,6))))\n gamma_hat_Q82d = np.vstack((np.zeros((3,6)), gamma_hat_P63ed_Q82d, gamma_hat_P63mud_Q82d, gamma_hat_P63taud_Q82d, np.zeros((12,6))))\n gamma_hat_Q82s = np.vstack((np.zeros((6,6)), gamma_hat_P63es_Q82s, gamma_hat_P63mus_Q82s, gamma_hat_P63taus_Q82s, np.zeros((9,6))))\n\n gamma_hat_Q83u = np.vstack((np.zeros((9,6)), gamma_hat_P62ue_Q83u, gamma_hat_P62umu_Q83u, gamma_hat_P62utau_Q83u, np.zeros((6,6))))\n gamma_hat_Q83d = np.vstack((np.zeros((12,6)), gamma_hat_P62de_Q83d, gamma_hat_P62dmu_Q83d, gamma_hat_P62dtau_Q83d, np.zeros((3,6))))\n gamma_hat_Q83s = np.vstack((np.zeros((15,6)), gamma_hat_P62se_Q83s, gamma_hat_P62smu_Q83s, gamma_hat_P62stau_Q83s))\n\n gamma_hat_Q84u = np.vstack((np.zeros((9,6)), gamma_hat_P62ue_Q84u, gamma_hat_P62umu_Q84u, gamma_hat_P62utau_Q84u, np.zeros((6,6))))\n gamma_hat_Q84d = np.vstack((np.zeros((12,6)), gamma_hat_P62de_Q84d, gamma_hat_P62dmu_Q84d, gamma_hat_P62dtau_Q84d, np.zeros((3,6))))\n gamma_hat_Q84s = np.vstack((np.zeros((15,6)), gamma_hat_P62se_Q84s, gamma_hat_P62smu_Q84s, gamma_hat_P62stau_Q84s))\n\n\n\n\n gamma_hat = np.array([gamma_hat_Q81u, gamma_hat_Q81d, gamma_hat_Q81s, gamma_hat_Q82u, gamma_hat_Q82d, gamma_hat_Q82s,\n gamma_hat_Q83u, gamma_hat_Q83d, gamma_hat_Q83s, gamma_hat_Q84u, gamma_hat_Q84d, gamma_hat_Q84s])\n\n\n # Return the tensor\n\n # tensor, zeile, spalte\n\n return gamma_hat", "def get_projection(attrs):\n df = load_df()\n\n X = get_all_vectors(df, attrs)\n logger.info('- Data shape original: {}'.format(X.shape))\n\n X = X if isinstance(X, np.ndarray) else X.toarray()\n X = dimension_reduction(X, attrs['decomposition'], attrs['distanceMetric'])\n return X, df", "def abc_matrix(a, b, c):\n ax = np.linalg.norm(a)\n a_hat = a/ax\n bx = np.dot(b, a_hat)\n by = np.linalg.norm(np.cross(a_hat, b))\n cx = np.dot(c, a_hat)\n axb = np.cross(a,b)\n axb_hat = axb / np.linalg.norm(axb)\n cy = np.dot(c, np.cross(axb_hat, a_hat))\n cz = np.dot(c, axb_hat)\n return np.array([[ax, bx, cx],[0, by, cy],[0 , 0, cz]])", "def P(self):\n self.eigenmatrix()", "def coord_space(\n a0: numpy.ndarray, a1: numpy.ndarray, a2: numpy.ndarray, rev: bool = False\n) -> Tuple[numpy.ndarray, Optional[numpy.ndarray]]:\n # dbg = False\n # if dbg:\n # print(a0.transpose())\n # print(a1.transpose())\n # print(a2.transpose())\n\n # a0 = acs[0]\n # a1 = acs[1]\n # a2 = acs[2]\n\n global gtm\n global gmry\n global gmrz, gmrz2\n\n tm = gtm\n mry = gmry\n mrz = gmrz\n mrz2 = gmrz2\n\n # tx acs[1] to origin\n # tm = homog_trans_mtx(-a1[0][0], -a1[1][0], -a1[2][0])\n set_homog_trans_mtx(-a1[0], -a1[1], -a1[2], tm)\n\n # directly translate a2 using a1\n p = a2 - a1\n sc = get_spherical_coordinates(p)\n\n # if dbg:\n # print(\"p\", p.transpose())\n # print(\"sc\", sc)\n\n # mrz = homog_rot_mtx(-sc[1], \"z\") # rotate translated a2 -azimuth about Z\n set_Z_homog_rot_mtx(-sc[1], mrz)\n # mry = homog_rot_mtx(-sc[2], \"y\") # rotate translated a2 -polar_angle about Y\n set_Y_homog_rot_mtx(-sc[2], mry)\n\n # mt completes a1-a2 on Z-axis, still need to align a0 with XZ plane\n # mt = mry @ mrz @ tm # python 3.5 and later\n mt = gmry.dot(gmrz.dot(gtm))\n\n # if dbg:\n # print(\"tm:\\n\", tm)\n # print(\"mrz:\\n\", mrz)\n # print(\"mry:\\n\", mry)\n # # print(\"mt \", mt)\n\n p = mt.dot(a0)\n\n # if dbg:\n # print(\"mt:\\n\", mt, \"\\na0:\\n\", a0, \"\\np:\\n\", p)\n\n # need azimuth of translated a0\n # sc2 = get_spherical_coordinates(p)\n # print(sc2)\n azimuth2 = _get_azimuth(p[0], p[1])\n\n # rotate a0 -azimuth2 about Z to align with X\n # mrz2 = homog_rot_mtx(-azimuth2, \"z\")\n set_Z_homog_rot_mtx(-azimuth2, mrz2)\n\n # mt = mrz2 @ mt\n mt = gmrz2.dot(mt)\n\n # if dbg:\n # print(\"mt:\", mt, \"\\na0:\", a0, \"\\np:\", p)\n # # print(p, \"\\n\", azimuth2, \"\\n\", mrz2, \"\\n\", mt)\n\n # if dbg:\n # print(\"mt:\\n\", mt)\n # print(\"<<<<<<==============================\")\n\n if not rev:\n return mt, None\n\n # rev=True, so generate the reverse transformation\n\n # rotate a0 theta about Z, reversing alignment with X\n # mrz2 = homog_rot_mtx(azimuth2, \"z\")\n set_Z_homog_rot_mtx(azimuth2, mrz2)\n # rotate a2 phi about Y\n # mry = homog_rot_mtx(sc[2], \"y\")\n set_Y_homog_rot_mtx(sc[2], mry)\n # rotate a2 theta about Z\n # mrz = homog_rot_mtx(sc[1], \"z\")\n set_Z_homog_rot_mtx(sc[1], mrz)\n # translation matrix origin to a1\n # tm = homog_trans_mtx(a1[0][0], a1[1][0], a1[2][0])\n set_homog_trans_mtx(a1[0], a1[1], a1[2], tm)\n\n # mr = tm @ mrz @ mry @ mrz2\n mr = gtm.dot(gmrz.dot(gmry.dot(gmrz2)))\n # mr = numpy.dot(tm, numpy.dot(mrz, numpy.dot(mry, mrz2)))\n\n return mt, mr", "def test_issue43():\n a = np.array([-1., 1., 1., np.pi - 5e-8])\n a[:3] = a[:3] / np.linalg.norm(a[:3])\n R = pr.matrix_from_axis_angle(a)\n a2 = pr.axis_angle_from_matrix(R)\n pr.assert_axis_angle_equal(a, a2)\n\n a = np.array([-1., 1., 1., 5e-8])\n a[:3] = a[:3] / np.linalg.norm(a[:3])\n R = pr.matrix_from_axis_angle(a)\n a2 = pr.axis_angle_from_matrix(R)\n pr.assert_axis_angle_equal(a, a2)\n\n a = np.array([-1., 1., 1., np.pi + 5e-8])\n a[:3] = a[:3] / np.linalg.norm(a[:3])\n R = pr.matrix_from_axis_angle(a)\n a2 = pr.axis_angle_from_matrix(R)\n pr.assert_axis_angle_equal(a, a2)\n\n a = np.array([-1., 1., 1., -5e-8])\n a[:3] = a[:3] / np.linalg.norm(a[:3])\n R = pr.matrix_from_axis_angle(a)\n a2 = pr.axis_angle_from_matrix(R)\n pr.assert_axis_angle_equal(a, a2)", "def _xyz_matrix():\n fx = 583.0\n fy = 583.0\n cx = 321\n cy = 249\n a = -0.0028300396\n b = 3.1006268\n mat = np.array([[1/fx, 0, 0, -cx/fx],\n [0, -1/fy, 0, cy/fy],\n [0, 0, 0, -1],\n [0, 0, a, b]])\n return mat", "def _get_proj_mat(self): \n if self._proj_mat is None:\n if self.symmetric:\n IP_mat = self.vec_space.compute_symmetric_inner_product_mat(\n self.basis_vecs)\n else:\n IP_mat = self.vec_space.compute_inner_product_mat(\n self.adjoint_basis_vecs, self.basis_vecs)\n self._proj_mat = np.linalg.inv(IP_mat)\n return self._proj_mat", "def get_affine(x, m, c):\n x = m*x + c\n return x" ]
[ "0.6180545", "0.5795742", "0.5749824", "0.56150705", "0.55909604", "0.55889744", "0.5540044", "0.5520576", "0.5389443", "0.53017646", "0.52529544", "0.5236133", "0.5234051", "0.52170014", "0.5192699", "0.5171544", "0.51684576", "0.5142525", "0.51380336", "0.5134151", "0.5129932", "0.51243186", "0.5115185", "0.5096115", "0.50739235", "0.50687164", "0.50564426", "0.5033324", "0.5014548", "0.4993605", "0.49929714", "0.49895224", "0.49884525", "0.49829054", "0.497914", "0.49789762", "0.496627", "0.49645418", "0.4961342", "0.49499413", "0.4948478", "0.49440417", "0.49419618", "0.4924274", "0.49236414", "0.4911912", "0.49058163", "0.48958358", "0.48856094", "0.4883935", "0.48822132", "0.48764133", "0.48731747", "0.48657423", "0.4865701", "0.48531005", "0.48522133", "0.48475942", "0.48457408", "0.48451644", "0.48348466", "0.4834037", "0.4823537", "0.4815119", "0.4814508", "0.48055065", "0.47994608", "0.4786871", "0.47701707", "0.47607782", "0.475555", "0.47540885", "0.47532862", "0.47374448", "0.47371343", "0.47264957", "0.47140956", "0.4710219", "0.4710028", "0.47084466", "0.47074157", "0.4703723", "0.4700989", "0.46998486", "0.46979523", "0.4696775", "0.46884617", "0.46879575", "0.46864027", "0.46813262", "0.46812", "0.4680963", "0.4679754", "0.467461", "0.4672657", "0.46679616", "0.46669084", "0.46546724", "0.46539485", "0.46538344" ]
0.59788334
1
Determines the the quaternion representing the best possible transformation of two coordinate systems into each other using a least sqare approach. This function is used by the get_refined_rotation() function.
def get_best_quaternion(coordlist1, coordlist2): M = np.matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0]]) if len(coordlist1) <= len(coordlist2): number = len(coordlist1) else: number = len(coordlist2) for i in xrange(number): aaa = np.matrix(np.outer(coordlist1[i], coordlist2[i])) M = M + aaa N11 = float(M[0][:, 0] + M[1][:, 1] + M[2][:, 2]) N22 = float(M[0][:, 0] - M[1][:, 1] - M[2][:, 2]) N33 = float(-M[0][:, 0] + M[1][:, 1] - M[2][:, 2]) N44 = float(-M[0][:, 0] - M[1][:, 1] + M[2][:, 2]) N12 = float(M[1][:, 2] - M[2][:, 1]) N13 = float(M[2][:, 0] - M[0][:, 2]) N14 = float(M[0][:, 1] - M[1][:, 0]) N21 = float(N12) N23 = float(M[0][:, 1] + M[1][:, 0]) N24 = float(M[2][:, 0] + M[0][:, 2]) N31 = float(N13) N32 = float(N23) N34 = float(M[1][:, 2] + M[2][:, 1]) N41 = float(N14) N42 = float(N24) N43 = float(N34) N = np.matrix([[N11, N12, N13, N14], [N21, N22, N23, N24], [N31, N32, N33, N34], [N41, N42, N43, N44]]) values, vectors = np.linalg.eig(N) w = list(values) quat = vectors[:, w.index(max(w))] quat = np.array(quat).reshape(-1, ).tolist() return quat, max(w)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_best_rotation(q1, q2, allow_reflection = False, only_xy = False):\n if q1.ndim != 2 or q2.ndim != 2:\n raise Exception(\"This only supports curves of shape (N,M) for N dimensions and M samples\")\n\n n = q1.shape[0]\n\n # if only_xy, strip everything but the x and y coordinates of q1 and q2\n if only_xy:\n _q1 = q1[0:2, :]\n _q2 = q2[0:2, :]\n else:\n _q1 = q1\n _q2 = q2\n\n _n = _q1.shape[0]\n A = _q1@_q2.T\n U, s, Vh = svd(A)\n S = eye(_n)\n\n # if reflections are not allowed and the determinant of A is negative,\n # then the entry corresponding to the smallest singular value is negated\n # as in the Kabsch algorithm\n if det(A) < 0 and not allow_reflection:\n S[-1, -1] = -1 # the last entry of the matrix becomes -1\n\n _R = U@S@Vh # optimal\n \n # if only_xy, the top left block of the matrix is _R and the rest is identity matrix\n if only_xy:\n R = eye(n)\n R[0:2, 0:2] = _R\n else:\n R = _R\n \n q2new = R@q2\n\n return (q2new, R)", "def rotation_only(q_1: Q, h: Q) -> Q:\n h_4_rotation = vector_q(h)\n return rotation_and_or_boost(q_1, h_4_rotation)", "def next_rotation(q_1: Q, q_2: Q) -> Q:\n q_1.check_representations(q_2)\n\n if not math.isclose(q_1.t, q_2.t):\n raise ValueError(f\"Oops, to be a rotation, the first values must be the same: {q_1.t} != {q_2.t}\")\n\n if not math.isclose(norm_squared(q_1).t, norm_squared(q_2).t):\n raise ValueError(f\"Oops, the norm squared of these two are not equal: {norm_squared(q_1).t} != {norm_squared(q_2).t}\")\n\n next_rot = product(q_1, q_2)\n v_abs_q_1 = abs_of_vector(q_1).t\n next_vector_normalized = normalize(vector_q(next_rot), v_abs_q_1)\n next_vector_normalized.t = q_1.t\n\n return next_vector_normalized", "def setup_s_matrix(dq_1, dq_2):\n scalar_parts_1 = dq_1.scalar()\n scalar_parts_2 = dq_2.scalar()\n\n assert np.allclose(\n scalar_parts_1.dq, scalar_parts_2.dq,\n atol=5e-2), (\n \"\\ndq1:\\n{},\\nscalar_parts_1:\\n{},\\ndq2:\\n{},\\nscalar_parts_2:\\n{}\\n\"\n \"Scalar parts should always be equal.\".format(dq_1, scalar_parts_1, dq_2,\n scalar_parts_2))\n\n s_matrix = np.zeros([6, 8])\n s_matrix[0:3, 0:3] = skew_from_vector(dq_1.q_rot.q[0:-1] + dq_2.q_rot.q[0:-1])\n s_matrix[0:3, 3] = dq_1.q_rot.q[0:-1] - dq_2.q_rot.q[0:-1]\n s_matrix[3:6, 0:3] = skew_from_vector(dq_1.q_dual.q[0:-1] +\n dq_2.q_dual.q[0:-1])\n s_matrix[3:6, 3] = dq_1.q_dual.q[0:-1] - dq_2.q_dual.q[0:-1]\n s_matrix[3:6, 4:7] = skew_from_vector(dq_1.q_rot.q[0:-1] + dq_2.q_rot.q[0:-1])\n s_matrix[3:6, 7] = dq_1.q_rot.q[0:-1] - dq_2.q_rot.q[0:-1]\n # print(\"S: \\n{}\".format(s_matrix))\n\n rank_s_matrix = np.linalg.matrix_rank(s_matrix)\n assert rank_s_matrix <= 6, s_matrix\n return s_matrix.copy()", "def next_rotation_randomized(q_1: Q, q_2: Q) -> Q:\n q_1.check_representations(q_2)\n\n if not math.isclose(q_1.t, q_2.t):\n raise ValueError(f\"Oops, to be a rotation, the first values must be the same: {q_1.t} != {q_2.t}\")\n\n if not math.isclose(norm_squared(q_1).t, norm_squared(q_2).t):\n raise ValueError(f\"Oops, the norm squared of these two are not equal: {norm_squared(q_1).t} != {norm_squared(q_2).t}\")\n\n next_rot = product(product(q_1, q_2), qrandom())\n v_abs_q_1 = abs_of_vector(q_1).t\n next_vector_normalized = normalize(vector_q(next_rot), v_abs_q_1)\n next_vector_normalized.t = q_1.t\n\n return next_vector_normalized", "def quat_diff(q1, q2):\n q1 = np.asarray(q1)\n if np.dot(q1, q2) < 0:\n # Quaternions have opposite handedness, flip q1 since it's already an ndarray\n q1 = -1 * q1\n q_inv = q1 * np.array([1.0, -1.0, -1.0, -1.0])\n q_inv = q_inv / np.dot(q_inv, q_inv)\n\n # We only coare about the scalar component, compose only that\n z0 = q_inv[0] * q2[0] - q_inv[1] * q2[1] - q_inv[2] * q2[2] - q_inv[3] * q2[3]\n return 2 * float(np.arccos(min(1, max(-1, z0))))", "def slerp(cls, q0, q1, amount=0.5):\n # Ensure quaternion inputs are unit quaternions and 0 <= amount <=1\n q0._fast_normalise()\n q1._fast_normalise()\n amount = np.clip(amount, 0, 1)\n\n dot = np.dot(q0.q, q1.q)\n\n # If the dot product is negative, slerp won't take the shorter path.\n # Note that v1 and -v1 are equivalent when the negation is applied to all four components.\n # Fix by reversing one quaternion\n if (dot < 0.0):\n q0.q = -q0.q\n dot = -dot\n\n # sin_theta_0 can not be zero\n if (dot > 0.9995):\n qr = Quaternion(q0.q + amount*(q1.q - q0.q))\n qr._fast_normalise()\n return qr\n\n theta_0 = np.arccos(dot) # Since dot is in range [0, 0.9995], np.arccos() is safe\n sin_theta_0 = np.sin(theta_0)\n\n theta = theta_0*amount\n sin_theta = np.sin(theta)\n\n s0 = np.cos(theta) - dot * sin_theta / sin_theta_0\n s1 = sin_theta / sin_theta_0\n qr = Quaternion((s0 * q0.q) + (s1 * q1.q))\n qr._fast_normalise()\n return qr", "def rotation_and_or_boost(q_1: Q, h: Q, verbose=False) -> Q:\n q_1.check_representations(h)\n end_q_type = f\"{q_1.q_type}rotation/boost\"\n\n if not h.is_symbolic():\n\n if (not math.isclose(h.t, 0) and not equal(q0(), vector_q(h))) or equal(h, q0()):\n\n if not math.isclose(square(h).t, 1):\n # The scalar part of h will be used to calculate cosh(h.t) and sinh(h.t)\n # The normalized vector part will point sinh(t) in the direction of vector_q(h)\n h_scalar = scalar_q(h)\n h_nomralized_vector = normalize(vector_q(h))\n\n if np.abs(h_scalar.t) > 1:\n h_scalar = inverse(h_scalar)\n\n h_cosh = product(add(exp(h_scalar), exp(flip_sign(h_scalar))), q1(1.0 / 2.0))\n h_sinh = product(dif(exp(h_scalar), exp(flip_sign(h_scalar))), q1(1.0 / 2.0))\n\n h = add(h_cosh, product(h_nomralized_vector, h_sinh))\n\n if verbose:\n h.print_state(\"To do a Lorentz boost, adjusted value of h so scalar_q(h²) = 1\")\n\n else:\n if not math.isclose(norm_squared(h).t, 1):\n h = normalize(h)\n if verbose:\n h.print_state(\"To do a 3D rotation, adjusted value of h so scalar_q(h h^*) = 1\")\n\n triple_1 = triple_product(h, q_1, conj(h))\n triple_2 = conj(triple_product(h, h, q_1))\n triple_3 = conj(triple_product(conj(h), conj(h), q_1))\n\n triple_23 = dif(triple_2, triple_3)\n half_23 = product(triple_23, Q([0.5, 0, 0, 0], representation=q_1.representation))\n triple_123 = add(triple_1, half_23)\n triple_123.q_type = end_q_type\n triple_123.representation = q_1.representation\n\n return triple_123", "def compute_error_minimizing_rotation(Points1, Points2):\r\n #TODO: implement me\r\n\r\n H_1_1 = 0\r\n H_1_2 = 0\r\n H_2_1 = 0\r\n H_2_2 = 0\r\n\r\n for t in range(1, len(Points1)):\r\n H_1_1 = H_1_1 + (Points1[t][0] * Points2[t][0])\r\n H_1_2 = H_1_2 + (Points1[t][1] * Points2[t][0])\r\n H_2_1 = H_2_1 + (Points1[t][0] * Points2[t][1])\r\n H_2_2 = H_2_2 + (Points1[t][1] * Points2[t][1])\r\n\r\n H = [[H_1_1,H_1_2],[H_2_1,H_2_2]]\r\n\r\n U, S, V = numpy.linalg.svd(H)\r\n\r\n V = numpy.transpose(V)\r\n\r\n R_1_1 = (U[0][0] * V[0][0]) +((U[0][1] * V[1][0]))\r\n R_1_2 = (U[0][0] * V[0][1]) +((U[0][1] * V[1][1]))\r\n R_2_1 = (U[1][0] * V[0][0]) +((U[1][1] * V[1][0]))\r\n R_2_2 = (U[1][0] * V[0][1]) +((U[1][1] * V[1][1]))\r\n\r\n R = [[R_1_1,R_1_2],[R_2_1,R_2_2]]\r\n\r\n return R", "def find_rotation_and_seed_q(q1, q2, closed=0, rotation=True, method=\"DP\"):\n\n n, T = q1.shape\n scl = 4.\n minE = 4000\n if closed == 1:\n end_idx = int(floor(T/scl))\n scl = 4\n else:\n end_idx = 0\n \n for ctr in range(0, end_idx+1):\n if closed == 1:\n q2n = shift_f(q2, scl*ctr)\n else:\n q2n = q2\n \n if rotation:\n q2new, R = find_best_rotation(q1, q2n)\n else:\n q2new = q2n.copy()\n R = eye(n)\n\n # Reparam\n if norm(q1-q2new,'fro') > 0.0001:\n gam = optimum_reparam_curve(q2new, q1, 0.0, method)\n gamI = uf.invertGamma(gam)\n q2new = group_action_by_gamma(q2new,gamI)\n if closed == 1:\n q2new = project_curve(q2new)\n else:\n gamI = linspace(0,1,T)\n \n tmp = innerprod_q2(q1,q2new)\n if tmp > 1:\n tmp = 1\n if tmp < -1:\n tmp = -1\n Ec = arccos(tmp)\n if Ec < minE:\n Rbest = R\n q2best = q2new\n gamIbest = gamI\n minE = Ec\n\n return (q2best, Rbest, gamIbest)", "def is_same_quaternion(q0, q1):\r\n q0 = numpy.array(q0)\r\n q1 = numpy.array(q1)\r\n return numpy.allclose(q0, q1) or numpy.allclose(q0, -q1)", "def quaternion_product(q1, q2):\r\n Wa = q1[0]\r\n Wb = q2[0]\r\n Xa = q1[1]\r\n Xb = q2[1]\r\n Ya = q1[2]\r\n Yb = q2[2]\r\n Za = q1[3]\r\n Zb = q2[3]\r\n x = Xa * Wb + Ya * Zb - Za * Yb + Wa * Xb\r\n y = -Xa * Zb + Ya * Wb + Za * Xb + Wa * Yb\r\n z = Xa * Yb - Ya * Xb + Za * Wb + Wa * Zb\r\n w = -Xa * Xb - Ya * Yb - Za * Zb + Wa * Wb\r\n return [w, x, y, z]", "def find_rotation_and_seed_unique(q1, q2, closed=0, lam=0.0, rotation=True, method=\"DP\"):\n\n n, T = q1.shape\n\n scl = 4.\n minE = 1000\n if closed == 1:\n end_idx = int(floor(T/scl))\n scl = 4\n else:\n end_idx = 0\n \n for ctr in range(0, end_idx+1):\n if closed == 1:\n q2n = shift_f(q2, scl*ctr)\n else:\n q2n = q2.copy()\n \n if rotation:\n q2new, R = find_best_rotation(q1, q2n)\n else:\n q2new = q2n\n R = eye(n)\n\n # Reparam\n if norm(q1-q2new,'fro') > 0.0001:\n gam = optimum_reparam_curve(q2new, q1, lam, method)\n gamI = uf.invertGamma(gam)\n p2n = q_to_curve(q2n)\n p2n = group_action_by_gamma_coord(p2n,gamI)\n q2new = curve_to_q(p2n)[0]\n if closed == 1:\n q2new = project_curve(q2new)\n else:\n gamI = linspace(0,1,T)\n \n tmp = innerprod_q2(q1,q2new)\n if tmp > 1:\n tmp = 1\n if tmp < -1:\n tmp = -1\n Ec = arccos(tmp)\n if Ec < minE:\n Rbest = R\n q2best = q2new\n gamIbest = gamI\n minE = Ec\n\n return (q2best, Rbest, gamIbest)", "def qdist(q1: np.ndarray, q2: np.ndarray) -> float:\n _quaternions_guard_clauses(q1, q2)\n q1, q2 = np.copy(q1), np.copy(q2)\n if q1.ndim == 1:\n q1 /= np.linalg.norm(q1)\n q2 /= np.linalg.norm(q2)\n if np.allclose(q1, q2) or np.allclose(-q1, q2):\n return 0.0\n return min(np.linalg.norm(q1-q2), np.linalg.norm(q1+q2))\n q1 /= np.linalg.norm(q1, axis=1)[:, None]\n q2 /= np.linalg.norm(q2, axis=1)[:, None]\n return np.r_[[np.linalg.norm(q1-q2, axis=1)], [np.linalg.norm(q1+q2, axis=1)]].min(axis=0)", "def quaternion_multiplication(self, q1, q2):\n\n # Unpack these quaternions\n a_scalar, a_vecx, a_vecy, a_vecz = torch.unbind(q1,\n dim=-1)\n b_scalar, b_vecx, b_vecy, b_vecz = torch.unbind(q2,\n dim=-1)\n\n r_scalar = a_scalar * b_scalar - a_vecx * b_vecx - a_vecy * b_vecy - a_vecz * b_vecz\n r_vecx = a_scalar * b_vecx + a_vecx * b_scalar + a_vecy * b_vecz - a_vecz * b_vecy\n r_vecy = a_scalar * b_vecy + a_vecy * b_scalar + a_vecz * b_vecx - a_vecx * b_vecz\n r_vecz = a_scalar * b_vecz + a_vecz * b_scalar + a_vecx * b_vecy - a_vecy * b_vecx\n\n \"\"\"\n a = torch.randn([2, 3, 4])\n b = torch.randn([2, 3, 4])\n print(a) # 2 matrices of size 3 x 4\n print(b) # 2 matrices of size 3 x 4\n print(torch.stack([a, b])) # 4 matrices of size 3 x 4, first a, then b\n \"\"\"\n return torch.stack(\n [r_scalar, r_vecx, r_vecy, r_vecz],\n dim=-1\n )", "def compute_subspace_angles(S1, S2):\n # Check the if the input arrays are 1D or 2D\n if S1.ndim == 1:\n # mat1 = np.reshape(S1, (1,S1.size))\n mat1 = np.reshape(S1, (S1.size, 1))\n elif S1.ndim == 2:\n mat1 = S1\n else:\n raise ValueError('The function is intended only to handle 1D and 2D numpy arrays')\n if S2.ndim == 1:\n # mat2 = np.reshape(S2, (1,S2.size))\n mat2 = np.reshape(S2, (S2.size, 1))\n elif S2.ndim == 2:\n mat2 = S2\n else:\n raise ValueError('The function is intended only to handle 1D and 2D numpy arrays')\n\n\n # Do a QR Factorization of S1 and S2\n Q1, R1 = np.linalg.qr(mat1)\n # print('S1 = \\n', S1)\n # print('Q1 = \\n', Q1)\n Q2, R2 = np.linalg.qr(mat2)\n # print('S1 = \\n', S2)\n # print('Q2 = \\n', Q2)\n intmat = np.matmul(Q1.T, Q2)\n # print('intmat = \\n', intmat)\n Y, s, Z = np.linalg.svd(intmat)\n # print('Y = \\n', Y)\n # print('U = \\n', np.matmul(Q1, Y))\n # print('V = \\n', np.matmul(Q2, Y))\n # print('s = \\n', s)\n\n # NaN prevention check\n indices = np.where(s > 1) # Get the indices where the violation exisits\n for entry in indices: # Loop over these indices to fix the violation\n for i in entry:\n if s[i] - 1 < 1.e-13: # This violation limit is pulled out of thin air!\n s[i] = 1.0\n\n s_radians = np.arccos(s)\n\n return s_radians", "def compare_quaternion_lists(new_quats, ref_quats, tol=0.05):\n nquats = len(ref_quats) # 3 for multiruby case\n\n # FIRST CHECK THAT NUMBER OF ORIENTATIONS MATCHES\n if len(new_quats) != nquats:\n raise RuntimeError(\n \"Incorrect number of orientations found; should be %d\" % nquats\n + \", currently found %d\" % len(new_quats)\n )\n\n # NEXT CHECK THE ACTUAL MISORIENTATIONS\n # !!! order may be different\n for i, nq in enumerate(new_quats):\n ang, mis = misorientation(nq.reshape(4, 1), ref_quats.T)\n if np.min(ang) > np.radians(tol):\n raise RuntimeError(\n \"Misorientation for test orientation %d \" % i\n + \"is greater than threshold\"\n )", "def qslerp(q, x1, x2, geometric=False, eq_tolerance=1e-12):\n qi = deepcopy(q)\n x2i = deepcopy(x2)\n x1i = deepcopy(x1)\n\n # check that quaternions are consistent with generic quaternion invariants\n qi = qvalidate(qi,'qi','qslerp')\n\n if isinstance(qi, int):\n return qi\n\n # check that input quaternions are unit length\n qn = qnorm(qi)\n\n idx = np.argwhere(np.abs(qn - 1.0) > eq_tolerance).flatten()\n if len(idx) > 0:\n logging.error('At least one input quaternion is not unit length')\n return\n\n if qi.shape[0] != len(x1i):\n logging.error('Number of input abscissa values does not match the number of input quaternions')\n return\n\n # check that input abscissa values are monotonic\n if len(x1i) > 1:\n idx = np.argwhere((x1i[1:len(x1i)]-x1i[0:len(x1i)-1]) < 0)\n if len(idx) > 0:\n logging.error('input abscissa values not monotonic')\n return\n\n # check that output abscissa values are strictly monotonic\n if len(x2i) > 1:\n idx = np.argwhere((x2i[1:len(x2i)]-x2i[0:len(x2i)-1]) < 0)\n if len(idx) > 0:\n logging.error('output abscissa values not monotonic')\n return\n\n # construct the output array\n q_out = np.zeros((len(x2i), 4))\n\n # if output abscissa values are outside of the range of input abscissa\n # values constant extrapolation is used\n idx = np.argwhere(x2i < x1i[0]).flatten()\n\n if len(idx) > 0:\n q_out[idx, :] = np.array(idx.size*[qi[0, :]])\n\n idx = np.argwhere(x2i > x1i[-1]).flatten()\n\n if len(idx) > 0:\n q_out[idx, :] = np.array(idx.size*[qi[-1, :]])\n\n out_idx = np.argwhere((x2i >= x1i[0]) & (x2i <= x1i[-1])).flatten()\n\n if len(out_idx) == 0:\n return q_out.reshape((-1, 4))\n\n x2i = x2i[out_idx]\n\n # construct arguments to the slerp function, this includes the source\n # quaternion list, the target quaternions list, and the proportion of\n # interpolation list for each quaternion pair. They should all have\n # the same number of elements as the output abscissa value list\n\n t_temp = interpol(np.arange(qi.shape[0], dtype='float64'), x1i, x2i)\n\n t_list = t_temp % 1.0\n\n q_idx = np.int64(np.floor(t_temp))\n\n # if the last abscissa values are identical,the indexing scheme to\n # generate the q_list could generate an overflow, the two conditionals\n # below prevent this\n idx = np.argwhere(np.abs(t_list) <= eq_tolerance).flatten() # where t_list =~ 0.0\n if len(idx) > 0:\n q_out[out_idx[idx], :] = qi[q_idx[idx], :]\n\n slerp_idx = np.argwhere(np.abs(t_list) > eq_tolerance).flatten() # where t_list !=~ 0.0\n\n # if there is nothing left, then we're done\n if slerp_idx.size == 0:\n return q_out.reshape((-1, 4))\n\n q_idx = q_idx[slerp_idx]\n out_idx = out_idx[slerp_idx]\n t_list = t_list[slerp_idx]\n\n q1_list = qi[q_idx, :]\n\n q2_list = qi[q_idx + 1, :]\n\n # calculate the dot product which is needed to to flip the\n # appropriate quaternions to guarantee interpolation is done along the\n # shortest path\n dotp = qdotp(q1_list, q2_list)\n\n if dotp.ndim == 0 and dotp == -1:\n return -1\n\n # the following code flips quaternions in q2_list to ensure the\n # shortest path is followed\n idx = np.argwhere(dotp < 0.0).flatten()\n\n if idx.size != 0:\n q2_list[idx, :] = -q2_list[idx, :]\n\n # interpolation cannot be performed on colinear quaternions\n # it is assumed that colinear quaternions will be returned unchanged\n # since dotp(q1,q2) = cos(angle between q1,q2) if dotp = 1.0 the\n # quaternions are colinear\n idx = np.argwhere(np.abs(dotp - 1.0) <= eq_tolerance).flatten() # where dotp = 1.0\n\n # store colinear quaternions into output array\n if idx.size != 0:\n q_out[out_idx[idx], :] = q1_list[idx, :]\n\n # copy non-colinear quaternions for processing\n idx = np.argwhere(np.abs(dotp - 1.0) > eq_tolerance).flatten()\n\n if idx.size == 0:\n return q_out.reshape((-1, 4)) # if no non-colinear quaternions are left, we are done\n\n dotp = dotp[idx]\n t_list = t_list[idx]\n q1_list = q1_list[idx, :]\n q2_list = q2_list[idx, :]\n out_idx = out_idx[idx]\n\n # now the actual processing begins\n\n # testing both methods to verify results\n if geometric:\n theta = np.arccos(dotp)\n\n sin_theta = np.sin(theta)\n\n theta_t = theta * t_list\n\n co1 = np.sin(theta - theta_t) / sin_theta\n co2 = np.sin(theta_t) / sin_theta\n\n q_out[out_idx, 0] = co1 * q1_list[:, 0] + co2 * q2_list[:, 0]\n q_out[out_idx, 1] = co1 * q1_list[:, 1] + co2 * q2_list[:, 1]\n q_out[out_idx, 2] = co1 * q1_list[:, 2] + co2 * q2_list[:, 2]\n q_out[out_idx, 3] = co1 * q1_list[:, 3] + co2 * q2_list[:, 3]\n else:\n # slerp will be performed by calculating:\n # ((q2*(q1^-1))^t)*q1\n # since the quaternions are unit q1^-1 = conjugate(q1)\n # exponentiation can be calculated by transforming to\n # polar form cos(theta*t)+v*sin(theta*t)\n # theta = acos(q[0])\n # NOTE: this potentially more numerically stable implementation needs\n # to be verified by comparison to the geometric slerp\n q1_conj = qconj(q1_list)\n\n q2_q1_prod = qdecompose(qmult(q2_list, q1_conj))\n\n if isinstance(q2_q1_prod, int):\n return -1\n\n # sometimes a dimension disappears.\n if q2_q1_prod.ndim == 1 and q2_q1_prod.size == 4:\n q2_q1_prod = q2_q1_prod.reshape((1, 4))\n\n theta_scale = q2_q1_prod[:, 0] * t_list\n\n q_total = qmult(qcompose(q2_q1_prod[:, 1:4], theta_scale), q1_list)\n\n if isinstance(q_total, int):\n return -1\n\n q_out[out_idx, :] = q_total\n\n return qnormalize(q_out)", "def make_q(v0, v2):\n return (v0.y - v2.y)/(v0.x - v2.x)", "def test_quaternion_dist_for_almost_identical_rotations():\n random_state = np.random.RandomState(0)\n\n for _ in range(5):\n a = pr.random_axis_angle(random_state)\n q1 = pr.quaternion_from_axis_angle(a)\n r = 1e-4 * random_state.randn(4)\n q2 = -pr.quaternion_from_axis_angle(a + r)\n assert_almost_equal(pr.quaternion_dist(q1, q2), 0.0, places=3)", "def test_quaternion_dist():\n random_state = np.random.RandomState(0)\n\n for _ in range(5):\n q1 = pr.quaternion_from_axis_angle(pr.random_axis_angle(random_state))\n q2 = pr.quaternion_from_axis_angle(pr.random_axis_angle(random_state))\n q1_to_q1 = pr.quaternion_dist(q1, q1)\n assert_almost_equal(q1_to_q1, 0.0)\n q2_to_q2 = pr.quaternion_dist(q2, q2)\n assert_almost_equal(q2_to_q2, 0.0)\n q1_to_q2 = pr.quaternion_dist(q1, q2)\n q2_to_q1 = pr.quaternion_dist(q2, q1)\n assert_almost_equal(q1_to_q2, q2_to_q1)\n assert_greater(2.0 * np.pi, q1_to_q2)", "def quaternion_difference(q1, q2):\n q1_abs = np.ndarray(4)\n q1_con = np.ndarray(4)\n q1_inv = np.ndarray(4)\n\n q1_con[0] = q1[0]\n q1_con[1] = -q1[1]\n q1_con[2] = -q1[2]\n q1_con[3] = -q1[3]\n\n functions.mju_mulQuat(q1_abs, q1, q1_con)\n q1_abs[0] += q1_abs[1] + q1_abs[2] + q1_abs[3]\n q1_inv = q1_con / q1_abs[0]\n\n q_diff = np.ndarray(4)\n functions.mju_mulQuat(q_diff, q2, q1_inv)\n\n return q_diff", "def test_quaternion_dist_for_identical_rotations():\n random_state = np.random.RandomState(0)\n\n for _ in range(5):\n q = pr.quaternion_from_axis_angle(pr.random_axis_angle(random_state))\n assert_array_almost_equal(pr.matrix_from_quaternion(q),\n pr.matrix_from_quaternion(-q))\n assert_equal(pr.quaternion_dist(q, -q), 0.0)", "def quaternion_subtraction(self, q1, q2):\n\n # Unpack these quaternions\n a_scalar, a_vecx, a_vecy, a_vecz = torch.unbind(q1,\n dim=-1)\n b_scalar, b_vecx, b_vecy, b_vecz = torch.unbind(q2,\n dim=-1)\n\n r_scalar = a_scalar - b_scalar\n r_vecx = a_vecx - b_vecx\n r_vecy = a_vecy - b_vecy\n r_vecz = a_vecz - b_vecz\n\n return torch.stack(\n [r_scalar, r_vecx, r_vecy, r_vecz],\n dim=-1\n )", "def test_align_sanity(self):\n # QWERTY resemblance matrix:\n R = qwerty_distance()\n diff, u, r = min_difference_align(\"polynomial\", \"exponential\", R)\n # Warning: we may (read: 'will') use another matrix!\n self.assertEqual(diff, 15)\n # Warning: there may be other optimal matchings!\n self.assertEqual(u, '--polyn-om-ial')\n self.assertEqual(r, 'exp-o-ne-ntial')", "def rotation_matrix_to_quaternion(rotation_matrix):\n trace = np.trace(rotation_matrix)\n\n if trace > 0:\n S = np.sqrt(trace + 1) * 2\n q_w = 0.25 * S\n q_x = (rotation_matrix[2, 1] - rotation_matrix[1, 2]) / S\n q_y = (rotation_matrix[0, 2] - rotation_matrix[2, 0]) / S\n q_z = (rotation_matrix[1, 0] - rotation_matrix[0, 1]) / S\n return np.asarray([q_w, q_x, q_y, q_z])\n\n elif ((rotation_matrix[0, 0] > rotation_matrix[1, 1]) and\n (rotation_matrix[0, 0] > rotation_matrix[2, 2])):\n\n S = np.sqrt(1.0 + rotation_matrix[0, 0] - rotation_matrix[1, 1] -\n rotation_matrix[2, 2]) * 2\n q_w = (rotation_matrix[2, 1] - rotation_matrix[1, 2]) / S\n q_x = 0.25 * S\n q_y = (rotation_matrix[0, 1] + rotation_matrix[1, 0]) / S\n q_z = (rotation_matrix[0, 2] + rotation_matrix[2, 0]) / S\n\n elif rotation_matrix[1, 1] > rotation_matrix[2, 2]:\n\n S = np.sqrt(1.0 + rotation_matrix[1, 1] - rotation_matrix[0, 0] -\n rotation_matrix[2, 2]) * 2\n q_w = (rotation_matrix[0, 2] - rotation_matrix[2, 0]) / S\n q_x = (rotation_matrix[0, 1] + rotation_matrix[1, 0]) / S\n q_y = 0.25 * S\n q_z = (rotation_matrix[1, 2] + rotation_matrix[2, 1]) / S\n\n else:\n S = np.sqrt(1.0 + rotation_matrix[2, 2] - rotation_matrix[0, 0] -\n rotation_matrix[1, 1]) * 2\n q_w = (rotation_matrix[1, 0] - rotation_matrix[0, 1]) / S\n q_x = (rotation_matrix[0, 2] + rotation_matrix[2, 0]) / S\n q_y = (rotation_matrix[1, 2] + rotation_matrix[2, 1]) / S\n q_z = 0.25 * S\n\n if q_w >= 0:\n return np.asarray([q_w, q_x, q_y, q_z])\n else:\n return -1 * np.asarray([q_w, q_x, q_y, q_z])", "def test_quaternion_rotation_consistent_with_multiplication():\n random_state = np.random.RandomState(1)\n for _ in range(5):\n v = pr.random_vector(random_state)\n q = pr.random_quaternion(random_state)\n v_im = np.hstack(((0.0,), v))\n qv_mult = pr.concatenate_quaternions(\n q, pr.concatenate_quaternions(v_im, pr.q_conj(q)))[1:]\n qv_rot = pr.q_prod_vector(q, v)\n assert_array_almost_equal(qv_mult, qv_rot)", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi, omega) = self.get_phi_chi_omega(angles)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi, omega) = self.get_phi_chi_omega(angles)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)", "def test_interpolate_quaternion():\n n_steps = 10\n random_state = np.random.RandomState(0)\n a1 = pr.random_axis_angle(random_state)\n a2 = pr.random_axis_angle(random_state)\n q1 = pr.quaternion_from_axis_angle(a1)\n q2 = pr.quaternion_from_axis_angle(a2)\n\n traj_q = [pr.quaternion_slerp(q1, q2, t)\n for t in np.linspace(0, 1, n_steps)]\n traj_R = [pr.matrix_from_quaternion(q) for q in traj_q]\n R_diff = np.diff(traj_R, axis=0)\n R_diff_norms = [np.linalg.norm(Rd) for Rd in R_diff]\n assert_array_almost_equal(R_diff_norms,\n R_diff_norms[0] * np.ones(n_steps - 1))", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi, omega) = angles[0:3]\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)", "def test_quaternion_diff():\n random_state = np.random.RandomState(0)\n\n for _ in range(5):\n q1 = pr.random_quaternion(random_state)\n q2 = pr.random_quaternion(random_state)\n a_diff = pr.quaternion_diff(q1, q2) # q1 - q2\n q_diff = pr.quaternion_from_axis_angle(a_diff)\n q3 = pr.concatenate_quaternions(q_diff, q2) # q1 - q2 + q2\n pr.assert_quaternion_equal(q1, q3)", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi, omega) = self.get_phi_chi_omega(angles)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, omega) = angles[0:2]\n chi = np.deg2rad(self.chi)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, omega) = angles[0:2]\n chi = np.deg2rad(self.chi)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi) = angles[0:2]\n omega = np.deg2rad(self.omega)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)", "def max_min_distance(self, T0: SE3, T1: SE3, T2: SE3) -> (float, float, str):\n tol = 10e-10\n # T_rel_01 = T0.inv().dot(T1)\n T_rel_12 = T1.inv().dot(T2)\n\n p0 = T0.as_matrix()[0:3, 3]\n z1 = T1.as_matrix()[0:3, 2]\n x1 = T1.as_matrix()[0:3, 0]\n p1 = T1.as_matrix()[0:3, 3]\n p2 = T2.as_matrix()[0:3, 3]\n\n p0_proj = p0 - (z1.dot(p0 - p1)) * z1 # p0 projected onto T1 plane\n p2_proj = p2 - (z1.dot(p2 - p1)) * z1 # p2 projected onto T1 plane\n\n if norm(p1 - p0_proj) < tol or norm(p2_proj - p1) < tol:\n d = norm(T2.trans - T0.trans)\n return d, d, False\n\n r = norm(p2_proj - p1) # radius of circle p2_proj is on\n delta_th = arctan2(cross(x1, p2_proj - p1).dot(z1), np.dot(x1, p2_proj - p1))\n\n # closest and farthest point from p0_proj\n sol_1 = r * (p0_proj - p1) / norm(p0_proj - p1) + p1\n sol_2 = -r * (p0_proj - p1) / norm(p0_proj - p1) + p1\n sol_min = min(sol_1 - p0_proj, sol_2 - p0_proj, key=norm) + p0_proj\n sol_max = max(sol_1 - p0_proj, sol_2 - p0_proj, key=norm) + p0_proj\n\n th_max = arctan2(cross(x1, sol_max - p1).dot(z1), np.dot(x1, sol_max - p1))\n th_min = arctan2(cross(x1, sol_min - p1).dot(z1), np.dot(x1, sol_min - p1))\n\n rot_min = rot_axis(th_min - delta_th, \"z\")\n d_min = norm(T1.dot(rot_min).dot(T_rel_12).trans - T0.trans)\n\n rot_max = rot_axis(th_max - delta_th, \"z\")\n d_max = norm(T1.dot(rot_max).dot(T_rel_12).trans - T0.trans)\n\n if abs(th_max - delta_th) < tol and d_max > d_min:\n return d_max, d_min, \"below\"\n elif abs(th_min - delta_th) < tol and d_max > d_min:\n return d_max, d_min, \"above\"\n else:\n return d_max, d_min, False", "def Rot_to_quaternion(r: array):\n\n # Compute the trace of the rotation matrix\n tr = r[0, 0] + r[1, 1] + r[2, 2]\n\n if tr > 0:\n S = sqrt(tr + 1.0) * 2\n qw = 0.25 * S\n qx = (r[2, 1] - r[1, 2]) / S\n qy = (r[0, 2] - r[2, 0]) / S\n qz = (r[1, 0] - r[0, 1]) / S\n elif (r[0, 0] > r[1, 1]) and (r[0, 0] > r[2, 2]):\n S = sqrt(1.0 + r[0, 0] - r[1, 1] - r[2, 2]) * 2\n qw = (r[2, 1] - r[1, 2]) / S\n qx = 0.25 * S\n qy = (r[0, 1] + r[1, 0]) / S\n qz = (r[0, 2] + r[2, 0]) / S\n elif r[1, 1] > r[2, 2]:\n S = sqrt(1.0 + r[1, 1] - r[0, 0] - r[2, 2]) * 2\n qw = (r[0, 2] - r[2, 0]) / S\n qx = (r[0, 1] + r[1, 0]) / S\n qy = 0.25 * S\n qz = (r[1, 2] + r[2, 1]) / S\n else:\n S = sqrt(1.0 + r[2, 2] - r[0, 0] - r[1, 1]) * 2\n qw = (r[1, 0] - r[0, 1]) / S\n qx = (r[0, 2] + r[2, 0]) / S\n qy = (r[1, 2] + r[2, 1]) / S\n qz = 0.25 * S\n\n q = array([qw, qx, qy, qz])\n q = q * sign(qw)\n\n return q", "def quatMultiply(q1, q2):\n\tq1 = q1.flatten()\n\tq2 = q2.flatten()\n\tq3 = np.zeros(4)\n\tq3[0] = q1[0] * q2[0] - np.dot(q1[1:], q2[1:])\n\tq3[1:] = (q1[0] * q2[1:] + q2[0] * q1[1:] + np.cross(q1[1:], q2[1:]))\n\treturn (q3 / np.linalg.norm(q3)).reshape(-1, 1)", "def test_quaternion_conventions():\n q_wxyz = np.array([1.0, 0.0, 0.0, 0.0])\n q_xyzw = pr.quaternion_xyzw_from_wxyz(q_wxyz)\n assert_array_equal(q_xyzw, np.array([0.0, 0.0, 0.0, 1.0]))\n q_wxyz2 = pr.quaternion_wxyz_from_xyzw(q_xyzw)\n assert_array_equal(q_wxyz, q_wxyz2)\n\n random_state = np.random.RandomState(42)\n q_wxyz_random = pr.random_quaternion(random_state)\n q_xyzw_random = pr.quaternion_xyzw_from_wxyz(q_wxyz_random)\n assert_array_equal(q_xyzw_random[:3], q_wxyz_random[1:])\n assert_equal(q_xyzw_random[3], q_wxyz_random[0])\n q_wxyz_random2 = pr.quaternion_wxyz_from_xyzw(q_xyzw_random)\n assert_array_equal(q_wxyz_random, q_wxyz_random2)", "def quatLeftMat(q):\n\ts = q[0]\n\tv = q[1:].reshape(-1,)\n\tL = np.zeros((4, 4))\n\tL[0, 0] = s\n\tL[0, 1:] = -v\n\tL[1:, 0] = v\n\tL[1:, 1:] = s*np.eye(3) + skewMat(v)\n\treturn L", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, kappa, omega) = self.get_phi_kappa_omega(angles)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.kappa_opposite_rotation_matrix(phi, np.deg2rad(self.alpha), kappa, omega)", "def lsqfit(A, B):\n U = np.dot(B.T, A)\n d = det(U)\n\n # either of atom sets lies in one plane or one line\n if (d==0.0):\n return None,None;\n\n Omega = np.zeros((6, 6))\n Omega[0:3, 3:6] = U\n Omega[3:6, 0:3] = U.T\n\n w,v = eig(Omega)\n perm = np.argsort(w) # to sort in descending order, use argsort(-w)\n v = v[:, perm]\n\n if d > 0.0:\n rot = v[3:6, 5].reshape(-1,1) * v[0:3, 5] + \\\n v[3:6, 4].reshape(-1,1) * v[0:3, 4] + \\\n v[3:6, 3].reshape(-1,1) * v[0:3, 3]\n else:\n rot = v[3:6, 5].reshape(-1,1) * v[0:3, 5] + \\\n v[3:6, 4].reshape(-1,1) * v[0:3, 4] - \\\n v[3:6, 3].reshape(-1,1) * v[0:3, 3]\n\n rot *= 2.0\n A = np.dot(A,rot) - B\n rmsd = np.sqrt(np.mean(np.sum(A**2, axis=1)))\n\n return rot,rmsd", "def strassenMatx(a, b):\r\n if dimensions(a) != dimensions(b):\r\n raise Exception(f'Both matrices are not the same dimension! \\nMatrix A:{matrix_a} \\nMatrix B:{matrix_b}')\r\n if dimensions(a) == (2, 2):\r\n return StrassenMatrixM(a, b)\r\n\r\n A, B, C, D = split(a)\r\n E, F, G, H = split(b)\r\n\r\n num1 = strassen(A, subtract(F, H))\r\n num2 = strassen(add(A, B), H)\r\n num3 = strassen(add(C, D), E)\r\n num4 = strassen(D, subtract(G, E))\r\n num5 = strassen(add(A, D), add(E, H))\r\n num6 = strassen(subtract(B, D), add(G, H))\r\n num7 = strassen(subtract(A, C),add(E, F))\r\n\r\n tLeft = add(subtract(add(num5, num4), num2), num6)\r\n tRight = add(num1,num2)\r\n bLeft = add(num3, num4)\r\n bRight = subtract(subtract(add(num1, num5), num3), num7)\r\n\r\n # construct the new matrix from our 4 quadrants\r\n new_matrix = []\r\n for i in range(len(tRight)):\r\n new_matrix.append(tLeft[i] + tRight[i])\r\n for i in range(len(bottom_right)):\r\n new_matrix.append(bottom_left[i] + bottom_right[i])\r\n return new_matrix", "def quatmul_torch(q1, q2):\n # RoI dimension. Unsqueeze if not fitting.\n a = q1.unsqueeze(0) if q1.dim() == 1 else q1\n b = q2.unsqueeze(0) if q2.dim() == 1 else q2\n\n # Corner dimension. Unsequeeze if not fitting.\n a = a.unsqueeze(1) if a.dim() == 2 else a\n b = b.unsqueeze(1) if b.dim() == 2 else b\n\n # Quaternion product\n x = a[:, :, 1] * b[:, :, 0] + a[:, :, 2] * b[:, :, 3] - a[:, :, 3] * b[:, :, 2] + a[:, :, 0] * b[:, :, 1]\n y = -a[:, :, 1] * b[:, :, 3] + a[:, :, 2] * b[:, :, 0] + a[:, :, 3] * b[:, :, 1] + a[:, :, 0] * b[:, :, 2]\n z = a[:, :, 1] * b[:, :, 2] - a[:, :, 2] * b[:, :, 1] + a[:, :, 3] * b[:, :, 0] + a[:, :, 0] * b[:, :, 3]\n w = -a[:, :, 1] * b[:, :, 1] - a[:, :, 2] * b[:, :, 2] - a[:, :, 3] * b[:, :, 3] + a[:, :, 0] * b[:, :, 0]\n\n return torch.stack((w, x, y, z), dim=2)", "def rotmat_to_quaternion(rotmat):\n r00 = rotmat[0,0]\n r01 = rotmat[0,1]\n r02 = rotmat[0,2]\n r10 = rotmat[1,0]\n r11 = rotmat[1,1]\n r12 = rotmat[1,2]\n r20 = rotmat[2,0]\n r21 = rotmat[2,1]\n r22 = rotmat[2,2]\n\n tr = r00 + r11 + r22\n quat = np.zeros(4)\n if tr > 0:\n S = np.sqrt(tr+1.0) * 2. # S=4*qw\n quat[0] = 0.25 * S\n quat[1] = (r21 - r12) / S\n quat[2] = (r02 - r20) / S\n quat[3] = (r10 - r01) / S\n elif (r00 > r11) and (r00 > r22):\n S = np.sqrt(1.0 + r00 - r11 - r22) * 2. # S=4*qx\n quat[0] = (r21 - r12) / S\n quat[1] = 0.25 * S\n quat[2] = (r01 + r10) / S\n quat[3] = (r02 + r20) / S\n elif r11 > r22:\n S = np.sqrt(1.0 + r11 - r00 - r22) * 2. # S=4*qy\n quat[0] = (r02 - r20) / S\n quat[1] = (r01 + r10) / S\n quat[2] = 0.25 * S\n quat[3] = (r12 + r21) / S\n else:\n S = np.sqrt(1.0 + r22 - r00 - r11) * 2. # S=4*qz\n quat[0] = (r10 - r01) / S\n quat[1] = (r02 + r20) / S\n quat[2] = (r12 + r21) / S\n quat[3] = 0.25 * S\n\n return quat", "def coord_space(\n a0: numpy.ndarray, a1: numpy.ndarray, a2: numpy.ndarray, rev: bool = False\n) -> Tuple[numpy.ndarray, Optional[numpy.ndarray]]:\n # dbg = False\n # if dbg:\n # print(a0.transpose())\n # print(a1.transpose())\n # print(a2.transpose())\n\n # a0 = acs[0]\n # a1 = acs[1]\n # a2 = acs[2]\n\n global gtm\n global gmry\n global gmrz, gmrz2\n\n tm = gtm\n mry = gmry\n mrz = gmrz\n mrz2 = gmrz2\n\n # tx acs[1] to origin\n # tm = homog_trans_mtx(-a1[0][0], -a1[1][0], -a1[2][0])\n set_homog_trans_mtx(-a1[0], -a1[1], -a1[2], tm)\n\n # directly translate a2 using a1\n p = a2 - a1\n sc = get_spherical_coordinates(p)\n\n # if dbg:\n # print(\"p\", p.transpose())\n # print(\"sc\", sc)\n\n # mrz = homog_rot_mtx(-sc[1], \"z\") # rotate translated a2 -azimuth about Z\n set_Z_homog_rot_mtx(-sc[1], mrz)\n # mry = homog_rot_mtx(-sc[2], \"y\") # rotate translated a2 -polar_angle about Y\n set_Y_homog_rot_mtx(-sc[2], mry)\n\n # mt completes a1-a2 on Z-axis, still need to align a0 with XZ plane\n # mt = mry @ mrz @ tm # python 3.5 and later\n mt = gmry.dot(gmrz.dot(gtm))\n\n # if dbg:\n # print(\"tm:\\n\", tm)\n # print(\"mrz:\\n\", mrz)\n # print(\"mry:\\n\", mry)\n # # print(\"mt \", mt)\n\n p = mt.dot(a0)\n\n # if dbg:\n # print(\"mt:\\n\", mt, \"\\na0:\\n\", a0, \"\\np:\\n\", p)\n\n # need azimuth of translated a0\n # sc2 = get_spherical_coordinates(p)\n # print(sc2)\n azimuth2 = _get_azimuth(p[0], p[1])\n\n # rotate a0 -azimuth2 about Z to align with X\n # mrz2 = homog_rot_mtx(-azimuth2, \"z\")\n set_Z_homog_rot_mtx(-azimuth2, mrz2)\n\n # mt = mrz2 @ mt\n mt = gmrz2.dot(mt)\n\n # if dbg:\n # print(\"mt:\", mt, \"\\na0:\", a0, \"\\np:\", p)\n # # print(p, \"\\n\", azimuth2, \"\\n\", mrz2, \"\\n\", mt)\n\n # if dbg:\n # print(\"mt:\\n\", mt)\n # print(\"<<<<<<==============================\")\n\n if not rev:\n return mt, None\n\n # rev=True, so generate the reverse transformation\n\n # rotate a0 theta about Z, reversing alignment with X\n # mrz2 = homog_rot_mtx(azimuth2, \"z\")\n set_Z_homog_rot_mtx(azimuth2, mrz2)\n # rotate a2 phi about Y\n # mry = homog_rot_mtx(sc[2], \"y\")\n set_Y_homog_rot_mtx(sc[2], mry)\n # rotate a2 theta about Z\n # mrz = homog_rot_mtx(sc[1], \"z\")\n set_Z_homog_rot_mtx(sc[1], mrz)\n # translation matrix origin to a1\n # tm = homog_trans_mtx(a1[0][0], a1[1][0], a1[2][0])\n set_homog_trans_mtx(a1[0], a1[1], a1[2], tm)\n\n # mr = tm @ mrz @ mry @ mrz2\n mr = gtm.dot(gmrz.dot(gmry.dot(gmrz2)))\n # mr = numpy.dot(tm, numpy.dot(mrz, numpy.dot(mry, mrz2)))\n\n return mt, mr", "def test_quaternion_hamilton():\n q_ij = pr.concatenate_quaternions(pr.q_i, pr.q_j)\n assert_array_equal(pr.q_k, q_ij)\n q_ijk = pr.concatenate_quaternions(q_ij, pr.q_k)\n assert_array_equal(-pr.q_id, q_ijk)", "def align_procrustes_rt(t_a : np.ndarray, q_a : np.ndarray,\n t_ref : np.ndarray,\n use_first_k : int = 1000000,\n want_transform : bool = False):\n assert t_ref.shape[0] == t_a.shape[0]\n s, R, t = align_umeyama(t_ref[:use_first_k], t_a[:use_first_k])\n\n # # Advanced alignment\n # n_points = t_a.shape[0]\n # z = np.zeros((n_points, 3))\n # z[:, -1] = 0.05\n # t_a_aug = t_a + quaternion_rotate_vector_np(q_a, z) / s\n # t_ref_aug = t_ref + quaternion_rotate_vector_np(q_ref, z)\n #\n # _, R, t = align_umeyama(np.concatenate([t_ref, t_ref_aug], axis=0), np.concatenate([t_a * s, t_a_aug * s], axis=0), known_scale=True)\n\n def transform(t_b : np.ndarray, q_b : np.ndarray):\n t_align = s * t_b @ R.T + t\n Ra = Rotation.from_matrix(R)\n q_align = (Ra * Rotation.from_matrix(q_b)).as_matrix()\n return t_align, q_align\n return transform if want_transform else transform(t_a, q_a)", "def test_quaternion_invert():\n q = np.array([0.58183503, -0.75119889, -0.24622332, 0.19116072])\n q_inv = pr.q_conj(q)\n q_q_inv = pr.concatenate_quaternions(q, q_inv)\n assert_array_almost_equal(pr.q_id, q_q_inv)", "def qmult(q1, q2):\n q1i = np.array(q1)\n q2i = np.array(q2)\n\n if q1i.ndim != q2i.ndim:\n logging.error('Number of dimensions in quaternion q1 and quaternion q2 do not match')\n return -1\n\n # check to make sure input has the correct dimensions\n q1i = qvalidate(q1i, 'q1', 'qmult')\n q2i = qvalidate(q2i, 'q2', 'qmult')\n\n if isinstance(q1i, int):\n return q1i\n\n if isinstance(q2i, int):\n return q2i\n\n # make sure elements match\n if q1i.size != q2i.size:\n logging.error('Number of elements in quaternion q1 and quaternion q2 do not match')\n return -1\n\n # now the actual dirty work\n qtmp0 = q1i[:, 0] * q2i[:, 0] - q1i[:, 1] * q2i[:, 1] - q1i[:, 2] * q2i[:, 2] - q1i[:, 3] * q2i[:, 3]\n qtmp1 = q1i[:, 1] * q2i[:, 0] + q1i[:, 0] * q2i[:, 1] - q1i[:, 3] * q2i[:, 2] + q1i[:, 2] * q2i[:, 3]\n qtmp2 = q1i[:, 2] * q2i[:, 0] + q1i[:, 3] * q2i[:, 1] + q1i[:, 0] * q2i[:, 2] - q1i[:, 1] * q2i[:, 3]\n qtmp3 = q1i[:, 3] * q2i[:, 0] - q1i[:, 2] * q2i[:, 1] + q1i[:, 1] * q2i[:, 2] + q1i[:, 0] * q2i[:, 3]\n\n qout = np.array([qtmp0, qtmp1, qtmp2, qtmp3]).T\n\n return qout", "def test_to_rotation(self):\r\n q = np.array([-1, 1, 3, 2])\r\n q = q / np.linalg.norm(q)\r\n R_gt = np.array([\r\n [-1/3., -14/15., -2/15.],\r\n [2/3., -1/3., 2/3.],\r\n [-2/3., 2/15., 11/15.]]).T\r\n R = to_rotation(q)\r\n\r\n zero_matrix = R - R_gt\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)\r\n\r\n for _ in range(20):\r\n q = np.random.randn(4)\r\n q /= np.linalg.norm(q)\r\n q_inv = quaternion_conjugate(q)\r\n\r\n R = to_rotation(q)\r\n R_inv = to_rotation(q_inv)\r\n\r\n zero_matrix = R @ R_inv - np.identity(3)\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)\r\n\r\n # orthogonal matrix\r\n zero_matrix = R @ R.T - np.identity(3)\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)", "def test_interpolate_same_quaternion():\n n_steps = 3\n random_state = np.random.RandomState(42)\n a = pr.random_axis_angle(random_state)\n q = pr.quaternion_from_axis_angle(a)\n traj = [pr.quaternion_slerp(q, q, t) for t in np.linspace(0, 1, n_steps)]\n assert_equal(len(traj), n_steps)\n assert_array_almost_equal(traj[0], q)\n assert_array_almost_equal(traj[1], q)\n assert_array_almost_equal(traj[2], q)", "def parallel_translate(w, q1, q2, basis, mode=0):\n modes = [0, 1]\n mode = [i for i, x in enumerate(modes) if x == mode]\n if len(mode) == 0:\n mode = 0\n else:\n mode = mode[0]\n\n wtilde = w - 2 * innerprod_q2(w, q2) / innerprod_q2(q1 + q2, q1 + q2) * (q1 + q2)\n l = sqrt(innerprod_q2(wtilde, wtilde))\n\n if mode == 1:\n wbar = project_tangent(wtilde, q2, basis)\n normwbar = sqrt(innerprod_q2(wbar, wbar))\n if normwbar > 10 ** (-4):\n wbar = wbar * l / normwbar\n else:\n wbar = wtilde\n\n return (wbar)", "def quaternion_addition(self, q1, q2):\n\n \"\"\"\n Unpack these quaternions.\n \n tensor = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n print(torch.unbind(tensor)) => (tensor([1, 2, 3]), tensor([4, 5, 6]), tensor([7, 8, 9]))\n \"\"\"\n a_scalar, a_vecx, a_vecy, a_vecz = torch.unbind(q1,\n dim=-1)\n b_scalar, b_vecx, b_vecy, b_vecz = torch.unbind(q2,\n dim=-1)\n\n r_scalar = a_scalar + b_scalar\n r_vecx = a_vecx + b_vecx\n r_vecy = a_vecy + b_vecy\n r_vecz = a_vecz + b_vecz\n\n return torch.stack(\n [r_scalar, r_vecx, r_vecy, r_vecz],\n dim=-1\n )", "def test_to_quaternion(self):\r\n R = np.identity(3)\r\n q = to_quaternion(R)\r\n zero_vec = q - np.array([0., 0., 0., 1.])\r\n self.assertAlmostEqual(np.linalg.norm(zero_vec), 0.0)\r\n\r\n for _ in range(20):\r\n q = np.random.randn(4)\r\n q /= np.linalg.norm(q)\r\n\r\n R = to_rotation(q)\r\n R2 = to_rotation(to_quaternion(R))\r\n zero_matrix = R - R2\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)", "def _quatm(q1, q0):\n w0, x0, y0, z0 = q0\n w1, x1, y1, z1 = q1\n\n return torch.cuda.FloatTensor([\n -x1*x0 - y1*y0 - z1*z0 + w1*w0,\n x1*w0 + y1*z0 - z1*y0 + w1*x0,\n -x1*z0 + y1*w0 + z1*x0 + w1*y0,\n x1*y0 - y1*x0 + z1*w0 + w1*z0,\n ])", "def _quaternions_guard_clauses(q1: Union[list, np.ndarray], q2: Union[list, np.ndarray]) -> None:\n for label, quaternion in zip(['q1', 'q2'], [q1, q2]):\n if not isinstance(quaternion, (list, np.ndarray)):\n raise TypeError(f\"{label} must be an array. Got {type(quaternion)}\")\n q1, q2 = np.copy(q1), np.copy(q2)\n for quaternion in [q1, q2]:\n if quaternion.shape[-1] != 4:\n raise ValueError(f\"Quaternions must be of shape (N, 4) or (4,). Got {quaternion.shape}.\")\n if q1.shape != q2.shape:\n raise ValueError(f\"Cannot compare q1 of shape {q1.shape} and q2 of shape {q2.shape}\")", "def distance(cls, q0, q1):\n q = Quaternion.log_map(q0, q1)\n return q.norm", "def solve_rigid_transform(X, Y, debug=True):\n assert X.shape[0] == Y.shape[0] >= 3\n assert X.shape[1] == Y.shape[1] == 3\n A = X.T # (3,N)\n B = Y.T # (3,N)\n\n # Look for Inge Soderkvist's solution online if confused.\n meanA = np.mean(A, axis=1, keepdims=True)\n meanB = np.mean(B, axis=1, keepdims=True)\n A = A - meanA\n B = B - meanB\n covariance = B.dot(A.T)\n U, sigma, VH = np.linalg.svd(covariance) # VH = V.T, i.e. numpy transposes it for us.\n\n V = VH.T\n D = np.eye(3)\n D[2,2] = np.linalg.det( U.dot(V.T) )\n R = U.dot(D).dot(V.T)\n t = meanB - R.dot(meanA)\n RB_matrix = np.concatenate((R, t), axis=1)\n\n #################\n # SANITY CHECKS #\n #################\n\n print(\"\\nBegin debug prints for rigid transformation from A to B:\")\n print(\"meanA:\\n{}\\nmeanB:\\n{}\".format(meanA, meanB))\n print(\"Rotation R:\\n{}\\nand R^TR (should be identity):\\n{}\".format(R, (R.T).dot(R)))\n print(\"translation t:\\n{}\".format(t))\n print(\"RB_matrix:\\n{}\".format(RB_matrix))\n\n # Get residual to inspect quality of solution. Use homogeneous coordinates for A.\n # Also, recall that we're dealing with (3,N) matrices, not (N,3).\n # In addition, we don't want to zero-mean for real applications.\n A = X.T # (3,N)\n B = Y.T # (3,N)\n\n ones_vec = np.ones((1, A.shape[1]))\n A_h = np.concatenate((A, ones_vec), axis=0)\n B_pred = RB_matrix.dot(A_h)\n assert B_pred.shape == B.shape\n\n # Careful! Use raw_errors for the RF, but it will depend on pred-targ or targ-pred.\n raw_errors = B_pred - B # Use pred-targ, of shape (3,N)\n l2_per_example = np.sum((B-B_pred)*(B-B_pred), axis=0)\n frobenius_loss = np.mean(l2_per_example)\n\n if debug:\n print(\"\\nInput, A.T:\\n{}\".format(A.T))\n print(\"Target, B.T:\\n{}\".format(B.T))\n print(\"Predicted points:\\n{}\".format(B_pred.T))\n print(\"Raw errors, B-B_pred:\\n{}\".format((B-B_pred).T))\n print(\"Mean abs error per dim: {}\".format( (np.mean(np.abs(B-B_pred), axis=1))) )\n print(\"Residual (L2) for each:\\n{}\".format(l2_per_example.T))\n print(\"loss on data: {}\".format(frobenius_loss))\n print(\"End of debug prints for rigid transformation.\\n\")\n\n assert RB_matrix.shape == (3,4)\n return RB_matrix", "def test_small_x_rot(self):\n\n # Create a Matrix representing 90 deg x rot.\n mat = Matrix44.from_rot_x(0.001)\n # Use from_matrix44()\n quat = Quat.from_matrix44(mat)\n\n # Ensure the quat matches the small degree x rotation.\n expected = Quat.from_axis_angle_deg(Vec3(1, 0, 0), 0.001)\n self.assertAlmostEqual(quat.x, expected.x)\n self.assertAlmostEqual(quat.y, expected.y)\n self.assertAlmostEqual(quat.z, expected.z)\n self.assertAlmostEqual(quat.w, expected.w)", "def rotation_to_align_a_with_b(a, b):\n norm_a = np.linalg.norm(a)\n norm_b = np.linalg.norm(b)\n if not np.allclose(a, a/norm_a):\n print('Input a vector not unit normal - normalising')\n a = a / norm_a\n print(a)\n if not np.allclose(b, b/norm_b):\n print('Input b vector not unit normal - normalising')\n b = b / norm_b\n print(b)\n\n v = np.cross(a,b)\n #s = np.linalg.norm(v)\n c = np.dot(a,b)\n f = 1./(1. + c)\n vmat = np.array([[ 0, -v[2], v[1]],\n [ v[2], 0, -v[0]],\n [-v[1], v[0], 0]])\n return np.eye(3,3) + vmat + f *(np.matmul(vmat,vmat))", "def get_mgc_rotation(side_a, side_b):\n # Can be reused when building the MST\n k_rotations_a = 0\n k_rotations_b = 0\n mgc_specific_relation = None\n piece_swap = False\n\n # No rotation required as MGC works with Right -> Left and Bottom -> Top relations correctly\n if side_a == RIGHT:\n k_rotations_a = 0\n mgc_specific_relation = RIGHT_LEFT\n k_rotations_b = k_rotational[side_a][side_b]\n if side_a == BOTTOM:\n k_rotations_a = 0\n mgc_specific_relation = BOTTOM_TOP\n k_rotations_b = k_rotational[side_a][side_b]\n\n if side_a == LEFT:\n if side_b == RIGHT:\n # Pretty much switch positions and that will be all\n piece_swap = True\n k_rotations_a = 0\n k_rotations_b = 0\n else:\n # Make the LEFT to be RIGHT\n # Adjust side_b to become LEFT\n k_rotations_a = 2\n k_rotations_b = k_rotational[side_a][side_b]\n mgc_specific_relation = RIGHT_LEFT\n if side_a == TOP:\n if side_b == BOTTOM:\n # Pretty much switch positions and that will be all\n piece_swap = True\n k_rotations_a = 0\n k_rotations_b = 0\n else:\n # Make the TOP side to be BOTTOM\n # Adjust side_b to become TOP\n k_rotations_a = 2\n k_rotations_b = k_rotational[side_a][side_b]\n mgc_specific_relation = BOTTOM_TOP\n return k_rotations_a, k_rotations_b, mgc_specific_relation, piece_swap", "def matrix_discrepancy(centers1, rotations1, centers2, rotations2,\n angle_weight=None, center_weight=None):\n\n n = len(centers1)\n\n assert len(centers2) == n\n assert len(rotations1) == n\n assert len(rotations2) == n\n assert n >= 2\n\n if not angle_weight:\n angle_weight = 1.0\n\n if not center_weight:\n center_weight = [1.0] * n\n\n if n > 2:\n rotation_matrix, new1, mean1, RMSD, sse = \\\n besttransformation_weighted(centers1, centers2, center_weight)\n\n orientation_error = 0\n angles = []\n for r1, r2 in zip(rotations1, rotations2):\n if r1.shape[0] > 0 and r2.shape[0] > 0:\n angle = angle_of_rotation(np.dot(np.dot(rotation_matrix, r2),\n np.transpose(r1)))\n orientation_error += np.square(angle)\n discrepancy = np.sqrt(sse + angle_weight * orientation_error) / n\n\n else:\n\n R1 = np.dot(np.transpose(rotations1[1]),rotations1[0]) # rotation from nt 0 to nt1 of 1st motif\n R2 = np.dot(np.transpose(rotations2[0]),rotations2[1]) # rotation from nt 0 to nt1 of 2nd motif\n\n rot1 = np.dot(R1,R2)\n ang1 = angle_of_rotation(rot1)\n\n rot2 = np.dot(np.transpose(R1),np.transpose(R2))\n ang2 = angle_of_rotation(rot2)\n\n T1 = np.dot(centers1[1] - centers1[0],rotations1[0])\n T2 = np.dot(centers1[0] - centers1[1],rotations1[1])\n\n S1 = np.dot(centers2[1] - centers2[0],rotations2[0])\n S2 = np.dot(centers2[0] - centers2[1],rotations2[1])\n\n D1 = T1-S1\n D2 = T2-S2\n\n discrepancy = np.sqrt(D1[0]**2 + D1[1]**2 + D1[2]**2 + (angle_weight*ang1)**2)\n discrepancy += np.sqrt(D2[0]**2 + D2[1]**2 + D2[2]**2 + (angle_weight*ang2)**2)\n\n# factor = 1/(4*np.sqrt(2)) # factor to multiply by discrepancy; faster to precompute?\n\n discrepancy = discrepancy * 0.17677669529663687\n\n return discrepancy", "def as_homogenous_transformation(self):\n r3 = self.orientation.normalize().unit_quaternion_as_r3_rotation_matrix()\n return matrix.sqr((r3[0],r3[1],r3[2],self.translation[0],\n r3[3],r3[4],r3[5],self.translation[1],\n r3[6],r3[7],r3[8],self.translation[2],\n 0,0,0,1))", "def __mul__(self, other):\n # combined rotation is the product of the two rotations (Rself*Rother):\n v1 = self.pure\n v2 = other.pure\n real = self.real * other.real - \\\n numpy.inner(v1, v2)\n cofactor1 = v1[1] * v2[2] - v1[2] * v2[1]\n cofactor2 = v1[2] * v2[0] - v1[0] * v2[2]\n cofactor3 = v1[0] * v2[1] - v1[1] * v2[0]\n pure = numpy.array([cofactor1, cofactor2, cofactor3]) \\\n + self.real * other.pure \\\n + other.real * self.pure\n # combined translation\n trans = self.getQuaternion().apply(other.trans) + self.trans\n trans[3] = 1.\n return Transformation(trans=trans, quaternion=(real, pure))", "def test_quaternion_from_matrix_180():\n a = np.array([1.0, 0.0, 0.0, np.pi])\n q = pr.quaternion_from_axis_angle(a)\n R = pr.matrix_from_axis_angle(a)\n q_from_R = pr.quaternion_from_matrix(R)\n assert_array_almost_equal(q, q_from_R)\n\n a = np.array([0.0, 1.0, 0.0, np.pi])\n q = pr.quaternion_from_axis_angle(a)\n R = pr.matrix_from_axis_angle(a)\n q_from_R = pr.quaternion_from_matrix(R)\n assert_array_almost_equal(q, q_from_R)\n\n a = np.array([0.0, 0.0, 1.0, np.pi])\n q = pr.quaternion_from_axis_angle(a)\n R = pr.matrix_from_axis_angle(a)\n q_from_R = pr.quaternion_from_matrix(R)\n assert_array_almost_equal(q, q_from_R)\n\n R = np.array(\n [[0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, -1.0]])\n assert_raises_regexp(\n ValueError, \"Expected rotation matrix\", pr.quaternion_from_matrix, R)\n\n R = np.array(\n [[-1.0, 0.0, 0.0],\n [0.0, 0.00000001, 1.0],\n [0.0, 1.0, -0.00000001]])\n q_from_R = pr.quaternion_from_matrix(R)", "def tf_hard_intersection(\n left: TFTBoxTensor, right: TFTBoxTensor\n) -> TFTBoxTensor:\n t1 = left\n t2 = right\n z = tf.math.maximum(t1.z, t2.z)\n Z = tf.math.minimum(t1.Z, t2.Z)\n\n return left.from_zZ(z, Z)", "def CombineRotation(a, b):\n # Use matrix multiplication: c = b*a.\n # We put 'b' on the left and 'a' on the right because,\n # just like when you use a matrix M to rotate a vector V,\n # you put the M on the left in the product M*V.\n # We can think of this as 'b' rotating all the 3 column vectors in 'a'.\n\n return RotationMatrix([\n [\n b.rot[0][0]*a.rot[0][0] + b.rot[1][0]*a.rot[0][1] + b.rot[2][0]*a.rot[0][2],\n b.rot[0][1]*a.rot[0][0] + b.rot[1][1]*a.rot[0][1] + b.rot[2][1]*a.rot[0][2],\n b.rot[0][2]*a.rot[0][0] + b.rot[1][2]*a.rot[0][1] + b.rot[2][2]*a.rot[0][2]\n ],\n [\n b.rot[0][0]*a.rot[1][0] + b.rot[1][0]*a.rot[1][1] + b.rot[2][0]*a.rot[1][2],\n b.rot[0][1]*a.rot[1][0] + b.rot[1][1]*a.rot[1][1] + b.rot[2][1]*a.rot[1][2],\n b.rot[0][2]*a.rot[1][0] + b.rot[1][2]*a.rot[1][1] + b.rot[2][2]*a.rot[1][2]\n ],\n [\n b.rot[0][0]*a.rot[2][0] + b.rot[1][0]*a.rot[2][1] + b.rot[2][0]*a.rot[2][2],\n b.rot[0][1]*a.rot[2][0] + b.rot[1][1]*a.rot[2][1] + b.rot[2][1]*a.rot[2][2],\n b.rot[0][2]*a.rot[2][0] + b.rot[1][2]*a.rot[2][1] + b.rot[2][2]*a.rot[2][2]\n ]\n ])", "def qmult(q1: torch.Tensor, q2: torch.Tensor) -> torch.Tensor:\n w1, x1, y1, z1 = q1[:, 0], q1[:, 1], q1[:, 2], q1[:, 3]\n w2, x2, y2, z2 = q2[:, 0], q2[:, 1], q2[:, 2], q2[:, 3]\n w = w1*w2 - x1*x2 - y1*y2 - z1*z2\n x = w1*x2 + x1*w2 + y1*z2 - z1*y2\n y = w1*y2 + y1*w2 + z1*x2 - x1*z2\n z = w1*z2 + z1*w2 + x1*y2 - y1*x2\n return torch.stack((w, x, y, z), dim=1)", "def sfm(points):\n # Construct the required W/Rh/Sh matrices.\n\t\n # Get ih/jh from Rh and use them to find Q.\n\n # Use Q, Rh, and Sh to get R and S.\n\n # Extract the F 2x3 rotation matrices from R and form an (F,2,3) array of\n # rotation matrices.\n\n # Build an orthonormal matrix that rotates the first R matrix into an\n # identity matrix.\n\n # Apply the computed rotation matrix to the rotation matrices and the\n # points in S.\n\n # Return the R matrices and an ** Nx3 ** matrix containing the\n # reconstructed 3D points (note that S is 3xN).\n return None", "def compute_geometric_transform(p1,p2,best_matches):\n # How many good matches are there?\n num_bad_matches = sum([x == None for x in best_matches])\n num_good_matches = p1.shape[0]-num_bad_matches\n\n # Prepare data for fitting\n A = np.ones((3, num_good_matches))\n B = np.ones((3, num_good_matches))\n count = 0\n for i in range(p1.shape[0]):\n if best_matches[i] != None:\n A[0,count] = p1[i,0]\n A[1,count] = p1[i,1]\n A[2,count] = p1[i,2]\n B[0,count] = p2[best_matches[i],0]\n B[1,count] = p2[best_matches[i],1]\n B[2,count] = p2[best_matches[i],2]\n count += 1\n A = A.T\n B = B.T\n\n model = GeometricTransform(bScale=False)\n data = np.hstack((A,B))\n\n # Need at least seven points for a good transform fit...\n if (num_good_matches < 7):\n print 'WARNING: not enough matches to compute a geometric transform.'\n return 1, np.identity(3), np.array([0,0,0])\n elif (num_good_matches < 20):\n print 'WARNING: not enough matches to compute a robust fit.'\n return model.fit(data)\n else:\n import lflib.calibration.ransac as ransac\n try:\n bestdata = ransac.ransac(data,model,\n 10, #rand samp size (num required to fit)\n 30, #num iterations\n 4.0, #transformed dist required to be considered inlier,\n 15, #min inliers to be considered \n debug=False,return_all=False)\n return model.fit(bestdata)\n except ValueError:\n return model.fit(data)", "def test_from_two_vectors(self):\r\n for _ in range(20):\r\n v0 = np.random.randn(3)\r\n v1 = np.random.randn(3)\r\n v0 /= np.linalg.norm(v0)\r\n v1 /= np.linalg.norm(v1)\r\n\r\n q = from_two_vectors(v0, v1)\r\n R = to_rotation(q)\r\n\r\n zero_vec = R @ v0 - v1\r\n self.assertAlmostEqual(np.linalg.norm(zero_vec), 0.0)\r\n\r\n q_inv = from_two_vectors(v1, v0)\r\n R_inv = to_rotation(q_inv)\r\n zero_matrix = R @ R_inv - np.identity(3)\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)", "def square(q_1: Q) -> Q:\n\n end_q_type = f\"{q_1.q_type}²\"\n\n qxq = _commuting_products(q_1, q_1)\n\n sq_q = Q(q_type=end_q_type, representation=q_1.representation)\n sq_q.t = qxq[\"tt\"] - qxq[\"xx+yy+zz\"]\n sq_q.x = qxq[\"tx+xt\"]\n sq_q.y = qxq[\"ty+yt\"]\n sq_q.z = qxq[\"tz+zt\"]\n\n return sq_q", "def alignment(gram1, gram2):\n # BUG: this loss function causes abnormal optimization behaviors, see\n # comments in past commits\n\n alignment = frobenius_inner_prod(gram1, gram2) /\\\n m.sqrt(frobenius_inner_prod(gram1, gram1) *\n frobenius_inner_prod(gram2, gram2))\n return alignment", "def quat_multiply(q1, q2):\n q = np.array([0.0, 0.0, 0.0, 0.0])\n q[0] = q1[0]*q2[0] - q1[1]*q2[1] - q1[2]*q2[2] - q1[3]*q2[3]\n q[1] = q1[0]*q2[1] + q1[1]*q2[0] + q1[2]*q2[3] - q1[3]*q2[2]\n q[2] = q1[0]*q2[2] - q1[1]*q2[3] + q1[2]*q2[0] + q1[3]*q2[1]\n q[3] = q1[0]*q2[3] + q1[1]*q2[2] - q1[2]*q2[1] + q1[3]*q2[0]\n return q", "def correct_rsqs(b, neg_only=True):\n\t\t## B . x = partition areas, where x is vector of model R^2 for models (A, B, C, AB, AC, BC, ABC)\n\tB = np.array([[0, 0, 0, 0, 0, -1, 1], # Abc: envelope \n\t\t\t\t [0, 0, 0, 0, -1, 0, 1], # aBc: phonological feature \n\t\t\t\t [0, 0, 0, -1, 0, 0, 1], # abC: pitch\n\t\t\t\t [0, 0, -1, 0, 1, 1, -1], # ABc: envelope U phonological feature \n\t\t\t\t [0, -1, 0, 1, 0, 1, -1], # AbC: envelope U pitch\n\t\t\t\t [-1, 0, 0, 1, 1, 0, -1], # aBC: phonological feature U pitch \n\t\t\t\t [1, 1, 1, -1, -1, -1, 1], # ABC: envelope U phonological feature U pitch \n\t\t\t\t ])\n\t#maxs = A.dot(np.nan_to_num(b))\n\tmaxs = B.dot(np.nan_to_num(b))\n\tminfun = lambda x: (x ** 2).sum()\n\t#minfun = lambda x: np.abs(x).sum()\n\n\tbiases = np.zeros((maxs.shape[1], 7)) + np.nan\n\tM = b.shape[1]\n\tfor vi in range(M):\n\t\tif not (vi % 1000):\n\t\t\tprint (\"%d / %d\" % (vi, M))\n\t\t\n\t\tif neg_only:\n\t\t\tbnds = [(None, 0)] * 7\n\t\telse:\n\t\t\tbnds = [(None, None)] * 7\n\t\tres = scipy.optimize.fmin_slsqp(minfun, np.zeros(7),\n\t\t\t\t\t\t\t\t\t\t#f_ieqcons=lambda x: maxs[:,vi] - A.dot(x),\n\t\t\t\t\t\t\t\t\t\tf_ieqcons=lambda x: maxs[:,vi] - B.dot(x),\n\t\t\t\t\t\t\t\t\t\tbounds=bnds, iprint=0)\n\t\tbiases[vi] = res\n\t\n\t# compute fixed (legal) variance explained values for each model\n\tfixed_b = np.array(b) - np.array(biases).T\n\n\torig_parts = B.dot(b)\n\tfixed_parts = B.dot(fixed_b)\n\t\n\treturn biases, orig_parts, fixed_parts", "def rotation_between_anglesets(agls1, agls2):\n\tfrom math import sin, cos, pi, sqrt, atan2, acos, atan\n\tfrom numpy import array, linalg, matrix\n\timport types\n\n\tdeg2rad = pi/180.0\n\n\tdef ori2xyz(ori):\n\t\tif(type(ori) == types.ListType):\n\t\t\tphi, theta, psi = ori[:3]\n\t\telse:\n\t\t\t# it has to be Transformation object\n\t\t\td = ori.get_params(\"spider\")\n\t\t\tphi = d[\"phi\"]\n\t\t\ttheta = d[\"theta\"]\n\t\t\tpsi = d[\"psi\"]\n\t\t\"\"\"\n\t\t# This makes no sense here! PAP 09/2011\n\t\tif theta > 90.0:\n\t\t\tphi += 180.0\n\t\t\ttheta = 180.0-theta\n\t\t\"\"\"\n\t\tphi *= deg2rad\n\t\ttheta *= deg2rad\n\t\tx = sin(theta) * sin(phi)\n\t\ty = sin(theta) * cos(phi)\n\t\tz = cos(theta)\n\n\t\treturn [x, y, z]\n\n\tN = len(agls1)\n\tif N != len(agls2):\n\t\tprint 'Both lists must have the same length'\n\t\treturn -1\n\tif N < 2:\n\t\tprint 'At least two orientations are required in each list'\n\t\treturn -1\n\tU1, U2 = [], []\n\tfor n in xrange(N):\n\t\tp1 = ori2xyz(agls1[n])\n\t\tp2 = ori2xyz(agls2[n])\n\t\tU1.append(p1)\n\t\tU2.append(p2)\n\n\t# compute all Suv with uv = {xx, xy, xz, yx, ..., zz}\n\tSuv = [0] * 9\n\tc = 0\n\tnbori = len(U1)\n\tfor i in xrange(3):\n\t\tfor j in xrange(3):\n\t\t\tfor s in xrange(nbori):\n\t\t\t\tSuv[c] += (U2[s][i] * U1[s][j])\n\t\t\tc += 1\n\n # create matrix N\n\tN = array([[Suv[0]+Suv[4]+Suv[8], Suv[5]-Suv[7], Suv[6]-Suv[2], Suv[1]-Suv[3]], \n\t\t [Suv[5]-Suv[7], Suv[0]-Suv[4]-Suv[8], Suv[1]+Suv[3], Suv[6]+Suv[2]], \n\t\t [Suv[6]-Suv[2], Suv[1]+Suv[3], -Suv[0]+Suv[4]-Suv[8], Suv[5]+Suv[7]],\n\t\t [Suv[1]-Suv[3], Suv[6]+Suv[2], Suv[5]+Suv[7], -Suv[0]-Suv[4]+Suv[8]]])\n\n # eigenvector corresponding to the most positive eigenvalue\n\tval, vec = linalg.eig(N)\n\tq0, qx, qy, qz = vec[:, val.argmax()]\n\n # create quaternion Rot matrix \n\tr = [q0*q0-qx*qx+qy*qy-qz*qz, 2*(qy*qx+q0*qz), 2*(qy*qz-q0*qx), 0.0,\n\t 2*(qx*qy-q0*qz), q0*q0+qx*qx-qy*qy-qz*qz, 2*(qx*qz+q0*qy), 0.0,\n\t 2*(qz*qy+q0*qx), 2*(qz*qx-q0*qy), q0*q0-qx*qx-qy*qy+qz*qz, 0.0]\n\t\n\tR = Transform(r)\n\tdictR = R.get_rotation('SPIDER')\n\n\treturn dictR['phi'], dictR['theta'], dictR['psi']", "def qdist(q1: np.ndarray, q2: np.ndarray) -> float:\n q1 = np.copy(q1)\n q2 = np.copy(q2)\n if q1.shape!=q2.shape:\n raise ValueError(\"Cannot compare q1 of shape {} and q2 of shape {}\".format(q1.shape, q2.shape))\n if q1.ndim==1:\n q1 /= np.linalg.norm(q1)\n q2 /= np.linalg.norm(q2)\n if np.allclose(q1, q2) or np.allclose(-q1, q2):\n return 0.0\n return min(np.linalg.norm(q1-q2), np.linalg.norm(q1+q2))\n q1 /= np.linalg.norm(q1, axis=1)[:, None]\n q2 /= np.linalg.norm(q2, axis=1)[:, None]\n return np.r_[[np.linalg.norm(q1-q2, axis=1)], [np.linalg.norm(q1+q2, axis=1)]].min(axis=0)", "def _calculate_gravity(self, object_2, object_1):\n\n def _calculate_angle(x0, y0, x1, y1):\n \"\"\"Counts angle in radians between vector (x0, y0)(x1, y1) and horizontal axis (CW) in canvas\n coordinate system\n :returns 0 if x0 == y0 == x1 == y1 == 0\n [0.. +3.14] if vector points down\n (-3.14.. 0] if vector points up\n \"\"\"\n if x0 == y0 == x1 == y1 == 0:\n return 0\n\n if x1 - x0 > 0: # pointing to the right semi-plane\n angle = atan((y1 - y0) / (x1 - x0))\n elif x1 - x0 < 0 and y1 - y0 >= 0: # adding pi if pointing to the left-bottom quart\n angle = pi + atan((y1 - y0) / (x1 - x0))\n elif x1 - x0 < 0 and y1 - y0 < 0: # subtract pi if pointing to the left-upper quart\n angle = -pi + atan((y1 - y0) / (x1 - x0))\n else: # zerodevision handle\n if y1 - y0 > 0: # pointing down\n angle = pi / 2\n else: # pointing up\n angle = -pi / 2\n\n return angle\n\n m1, x1, y1 = self._get_object_params(object_1)\n m2, x2, y2 = self._get_object_params(object_2)\n R = ((x2 - x1) ** 2 + (y2 - y1) ** 2) ** 0.5\n F = G * m1 * m2 / R ** 2\n angle = _calculate_angle(x1, y1, x2, y2)\n Fx1 = F * cos(angle)\n Fy1 = F * sin(angle)\n Fy2, Fx2 = -Fy1, -Fx1 # vectors are exactly opposite\n return Fx2, Fy2, Fx1, Fy1", "def fidelity(A: numpy.ndarray, B: numpy.ndarray) -> float:\n Asqrtm = scipy.linalg.sqrtm(A)\n return (numpy.trace(scipy.linalg.sqrtm(Asqrtm@B@Asqrtm)).real)**2", "def optimal_string_alignment_similarity(s1, s2):\n max_cost = max(len(s1), len(s2))\n\n if max_cost == 0:\n return 1.0\n\n return 1.0 - float(optimal_string_alignment_distance(s1, s2)) / max_cost", "def find_rotation(a, b):\n a.shape = (3,)\n b.shape = (3,)\n\n a /= np.linalg.norm(a)\n b /= np.linalg.norm(b)\n \n v = np.cross(a, b)\n \n angle_AB = -1*vector_angle(a, b) \n \n print(angle_AB)\n s = np.linalg.norm(v) * np.sin(angle_AB)\n \n c = np.dot(a, b) * np.cos(angle_AB)\n \n # Rotation matrix, R = I + Vx + Vx^2 * (1-c)/s^2\n I = np.identity(3)\n Vx = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])\n \n R = I + Vx + np.linalg.matrix_power(Vx, 2) / (1+c)\n return R", "def estimate_rigid_transform(points1, points2, translation_only=False):\n centroid1 = points1.mean(axis=0)\n centroid2 = points2.mean(axis=0)\n\n if translation_only:\n rotation = np.eye(2)\n translation = centroid2 - centroid1\n\n else:\n centered_points1 = points1 - centroid1\n centered_points2 = points2 - centroid2\n\n sigma = centered_points2.T @ centered_points1\n U, _, Vt = np.linalg.svd(sigma)\n\n rotation = U @ Vt\n translation = -rotation @ centroid1 + centroid2\n\n H = np.eye(3)\n H[:2,:2] = rotation\n H[:2, 2] = translation\n return H", "def quatRightMat(q):\n\ts = q[0]\n\tv = q[1:].reshape(-1,)\n\tR = np.zeros((4, 4))\n\tR[0, 0] = s\n\tR[0, 1:] = -v\n\tR[1:, 0] = v\n\tR[1:, 1:] = s*np.eye(3) - skewMat(v)\n\treturn R", "def kspace_cholesky_solve_(self, other):\n n_points = np.max(np.array([n_lattice(self), n_lattice(other)]), axis = 0)\n self_k = transform(self, np.fft.fftn, n_points = n_points)\n other_k = transform(other, np.fft.fftn, n_points = n_points)\n\n ret = tmat()\n ret.load_nparray(np.ones((self_k.coords.shape[0],self_k.blockshape[0], other_k.blockshape[1]), dtype = np.complex), self_k.coords, safemode = False)\n #ret = self_k*1.0\n ret.blocks*=0.0\n\n #ret.blocks[:-1] = np.einsum(\"ijk,ikl->ijl\", self_k.blocks[:-1], other_k.blocks[:-1], optimize = True)\n\n for i in np.arange(len(self_k.blocks)-1):\n \n #print(np.max(np.abs(self_k.blocks[i].T-self_k.blocks[i])))\n #assert(np.max(np.abs(self_k.blocks[i].T-self_k.blocks[i]))<1e-10), \"not symmetric\"\n #assert(np.linalg.norm(self_k.blocks[i].T-self_k.blocks[i])<1e-10), \"not symmetric\"\n Mk = np.linalg.cholesky(self_k.blocks[i])\n yk = np.linalg.solve(Mk, other_k.blocks[i])\n\n\n ret.blocks[i] = np.linalg.solve(Mk.conj().T, yk)\n\n ret = transform(ret, np.fft.ifftn, n_points = n_points, complx = False)\n return ret", "def dist_sph(w1, w2):\n r = w1.norm(2, -1)\n theta = torch.sum((w1*w2), -1)/r**2\n return torch.acos(theta)", "def multiply_quaternions( qa, qb ):\n combined = Quaternion()\n\n combined.w = (qa.w * qb.w - qa.x * qb.x - qa.y * qb.y - qa.z * qb.z)\n combined.x = (qa.x * qb.w + qa.w * qb.x + qa.y * qb.z - qa.z * qb.y)\n combined.y = (qa.w * qb.y - qa.x * qb.z + qa.y * qb.w + qa.z * qb.x)\n combined.z = (qa.w * qb.z + qa.x * qb.y - qa.y * qb.x + qa.z * qb.w)\n return combined", "def sym_distance(cls, q0, q1):\n q = Quaternion.sym_log_map(q0, q1)\n return q.norm", "def quatreal(q):\n a = q[0,0]\n b = q[0,1]\n c = q[0,2]\n d = q[0,3]\n amat = a*np.identity(4)\n bmat = b*np.array([[0,1,0,0],[-1,0,0,0],[0,0,0,-1],[0,0,1,0]])\n cmat = c*np.array([[0,0,1,0],[0,0,0,1],[-1,0,0,0],[0,-1,0,0]])\n dmat = d*np.array([[0,0,0,1],[0,0,-1,0],[0,1,0,0],[-1,0,0,0]])\n return amat+bmat+cmat+dmat", "def get_best_transform(x,y):\n # test a simple translation\n if False:\n x = np.array([[0,0],[0,1],[1,0],[1,1]])\n y = np.array([[1,1],[1,2],[2,1],[2,2]]) \n M_correct = np.array([[1,0,1],[0,1,1],[0,0,1]])\n \n x = np.float32(x)\n y = np.float32(y)\n all_idx = [i for i in range(0,len(x))]\n combos = tuple(combinations(all_idx,4))\n min_err = np.inf\n bestM = 0\n for comb in combos:\n M = cv2.getPerspectiveTransform(x[comb,:],y[comb,:])\n xtf = transform_pt_array(x,M)\n err = avg_transform_error(xtf,y)\n if err < min_err:\n min_err = err\n bestM = M\n bestComb = comb\n return bestM", "def leastsquares(A,b,qr=qrfact.qri_mgs_piv,alpha=0.5):\n \n\n A = numpy.array(A, dtype=float)\n m,n = A.shape\n z = numpy.zeros( n )\n a = numpy.zeros( n )\n x = numpy.zeros( n )\n b = numpy.transpose(b)[0]\n\n # do the QR factorization\n try:\n Q,R = qr(A)[:2] # Some QR routines return a third permutation P solving AP=QR.\n PA = A\n except TypeError:\n Q,R,P = qr(A,alpha)[:3] # Some QR routines return a third permutation P solving AP=QR.\n AP = numpy.dot( A, P )\n\n # Step 1'': orthogonalization of b against Q\n u = b\n for j in range( 0, n ) :\n # print \"Qj = \", Q[:,j]\n # print \"u = \", u\n # print \"dot = \", numpy.dot( Q[:,j], u )\n z[j] = numpy.dot( Q[:,j], u )\n u = u - z[j] * Q[:,j]\n\n # Step 2'': iterative orthogonalization of u\n ul2norm = numpy.linalg.norm( u )\n ii = 0\n while True : # iterate\n for j in range( 0, n ) :\n a[j] = numpy.dot( Q[:,j], u )\n z[j] = z[j] + a[j]\n u = u - a[j] * Q[:,j]\n\n ii = ii + 1\n ulnorm = ul2norm\n ul2norm = numpy.linalg.norm( u )\n\n #print ul2norm, ulnorm\n \n if (ul2norm > alpha * ulnorm) or ul2norm == 0 :\n # print \"used\", ii, \"orthogonalizations\"\n break\n\n #print z\n #print R\n\n # Step 3'': use back substitution to solve Rx = z\n for i in range( n-1, -1, -1 ) :\n x[i] = z[i]\n for j in range( i+1, n ) :\n x[i] = x[i] - R[i,j] * x[j]\n x[i] = x[i] / R[i,i]\n #print x\n\n #need to permute x according to permutation matrix P\n \n return numpy.dot( P, x )", "def checkMin(oldskew,oldkurt,newskew,newkurt,oldtransform,newtransform):\n if (newskew < oldskew and newkurt < oldkurt) or (newkurt < oldkurt and (newskew-oldskew) < 2.0*(oldkurt-newkurt) ):\n return (newskew,newkurt,newtransform)\n elif newskew < oldskew:\n return (oldskew,oldkurt,oldtransform)\n elif newkurt < oldkurt:\n return (oldskew,oldkurt,oldtransform)\n else:\n return (oldskew,oldkurt,oldtransform)", "def _compute_Q(self, X_far, X_near, inner_product_far, A):\n\n if inner_product_far is None:\n inner_product_far = X_far @ X_far.T\n\n inner_product_far_near = X_far @ X_near.T\n inner_product_near = X_near @ X_near.T\n\n Q_block = []\n for i in range(X_near.shape[0]):\n Q_block.append([])\n for j in range(X_near.shape[0]):\n if i > j:\n Q_block[i].append(\n Q_block[j][i].T\n )\n else:\n Q_block[i].append(\n inner_product_far\n - inner_product_far_near[:, i][np.newaxis, :]\n - inner_product_far_near[:, j][:, np.newaxis]\n + inner_product_near[i, j]\n )\n return np.block(Q_block)", "def compute_hand_eye_calibration(dq_B_H_vec_inliers, dq_W_E_vec_inliers,\n scalar_part_tolerance=1e-2,\n enforce_same_non_dual_scalar_sign=True):\n n_quaternions = len(dq_B_H_vec_inliers)\n\n # Verify that the first pose is at the origin.\n assert np.allclose(dq_B_H_vec_inliers[0].dq,\n [0., 0., 0., 1.0, 0., 0., 0., 0.],\n atol=1.e-8), dq_B_H_vec_inliers[0]\n assert np.allclose(dq_W_E_vec_inliers[0].dq,\n [0., 0., 0., 1.0, 0., 0., 0., 0.],\n atol=1.e-8), dq_W_E_vec_inliers[0]\n\n if enforce_same_non_dual_scalar_sign:\n for i in range(n_quaternions):\n dq_W_E = dq_W_E_vec_inliers[i]\n dq_B_H = dq_B_H_vec_inliers[i]\n if ((dq_W_E.q_rot.w < 0. and dq_B_H.q_rot.w > 0.) or\n (dq_W_E.q_rot.w > 0. and dq_B_H.q_rot.w < 0.)):\n dq_W_E_vec_inliers[i].dq = -dq_W_E_vec_inliers[i].dq.copy()\n\n # 0. Stop alignment if there are still pairs that do not have matching\n # scalar parts.\n for j in range(n_quaternions):\n dq_B_H = dq_W_E_vec_inliers[j]\n dq_W_E = dq_B_H_vec_inliers[j]\n\n scalar_parts_B_H = dq_B_H.scalar()\n scalar_parts_W_E = dq_W_E.scalar()\n\n assert np.allclose(scalar_parts_B_H.dq, scalar_parts_W_E.dq,\n atol=scalar_part_tolerance), (\n \"Mismatch of scalar parts of dual quaternion at idx {}:\"\n \" dq_B_H: {} dq_W_E: {}\".format(j, dq_B_H, dq_W_E))\n\n # 1.\n # Construct 6n x 8 matrix T\n t_matrix = setup_t_matrix(dq_B_H_vec_inliers, dq_W_E_vec_inliers)\n\n # 2.\n # Compute SVD of T and check if only two singular values are almost equal to\n # zero. Take the corresponding right-singular vectors (v_7 and v_8)\n U, s, V = np.linalg.svd(t_matrix)\n\n # Check if only the last two singular values are almost zero.\n bad_singular_values = False\n for i, singular_value in enumerate(s):\n if i < 6:\n if singular_value < 5e-1:\n bad_singular_values = True\n else:\n if singular_value > 5e-1:\n bad_singular_values = True\n v_7 = V[6, :].copy()\n v_8 = V[7, :].copy()\n # print(\"v_7: {}\".format(v_7))\n # print(\"v_8: {}\".format(v_8))\n\n # 3.\n # Compute the coefficients of (35) and solve it, finding two solutions for s.\n u_1 = v_7[0:4].copy()\n u_2 = v_8[0:4].copy()\n v_1 = v_7[4:8].copy()\n v_2 = v_8[4:8].copy()\n # print(\"u_1: {}, \\nu_2: {}, \\nv_1: {}, \\nv_2: {}\".format(u_1, u_2, v_1, v_2))\n\n a = np.dot(u_1.T, v_1)\n assert a != 0.0, \"This would involve division by zero.\"\n b = np.dot(u_1.T, v_2) + np.dot(u_2.T, v_1)\n c = np.dot(u_2.T, v_2)\n # print(\"a: {}, b: {}, c: {}\".format(a, b, c))\n square_root_term = b * b - 4.0 * a * c\n\n if square_root_term < -1e-2:\n assert False, \"square_root_term is too negative: {}\".format(\n square_root_term)\n if square_root_term < 0.0:\n square_root_term = 0.0\n s_1 = (-b + np.sqrt(square_root_term)) / (2.0 * a)\n s_2 = (-b - np.sqrt(square_root_term)) / (2.0 * a)\n # print(\"s_1: {}, s_2: {}\".format(s_1, s_2))\n\n # 4.\n # For these two s values, compute s^2*u_1^T*u_1 + 2*s*u_1^T*u_2 + u_2^T*u_2\n # From these choose the largest to compute lambda_2 and then lambda_1\n solution_1 = s_1 * s_1 * np.dot(u_1.T, u_1) + 2.0 * \\\n s_1 * np.dot(u_1.T, u_2) + np.dot(u_2.T, u_2)\n solution_2 = s_2 * s_2 * np.dot(u_1.T, u_1) + 2.0 * \\\n s_2 * np.dot(u_1.T, u_2) + np.dot(u_2.T, u_2)\n\n if solution_1 > solution_2:\n assert solution_1 > 0.0, solution_1\n lambda_2 = np.sqrt(1.0 / solution_1)\n lambda_1 = s_1 * lambda_2\n else:\n assert solution_2 > 0.0, solution_2\n lambda_2 = np.sqrt(1.0 / solution_2)\n lambda_1 = s_2 * lambda_2\n # print(\"lambda_1: {}, lambda_2: {}\".format(lambda_1, lambda_2))\n\n # 5.\n # The result is lambda_1*v_7 + lambda_2*v_8\n dq_H_E = DualQuaternion.from_vector(lambda_1 * v_7 + lambda_2 * v_8)\n # Normalize the output, to get rid of numerical errors.\n dq_H_E.normalize()\n\n if (dq_H_E.q_rot.w < 0.):\n dq_H_E.dq = -dq_H_E.dq.copy()\n return (dq_H_E, s, bad_singular_values)", "def products(q_1: Qs, q_2: Qs, kind: str = \"\", reverse: bool = False) -> Qs:\n\n q_1_copy = deepcopy(q_1)\n q_2_copy = deepcopy(q_2)\n qs_left, qs_right = Qs(), Qs()\n\n # Diagonalize if need be.\n if ((q_1.rows == q_2.rows) and (q_1.columns == q_2.columns)) or (\n \"scalar_q\" in [q_1.qs_type, q_2.qs_type]\n ):\n\n if q_1.columns == 1:\n qs_right = q_2_copy\n qs_left = diagonal(q_1_copy, qs_right.rows)\n\n elif q_2.rows == 1:\n qs_left = q_1_copy\n qs_right = diagonal(q_2_copy, qs_left.columns)\n\n else:\n qs_left = q_1_copy\n qs_right = q_2_copy\n\n # Typical matrix multiplication criteria.\n elif q_1.columns == q_2.rows:\n qs_left = q_1_copy\n qs_right = q_2_copy\n\n else:\n print(\n \"Oops, cannot multiply series with row/column dimensions of {}/{} to {}/{}\".format(\n q_1.rows, q_1.columns, q_2.rows, q_2.columns\n )\n )\n\n # Operator products need to be transposed.\n operator_flag = False\n if qs_left in [\"op\", \"operator\"] and qs_right in [\"op\", \"operator\"]:\n operator_flag = True\n\n outer_row_max = qs_left.rows\n outer_column_max = qs_right.columns\n shared_inner_max = qs_left.columns\n projector_flag = (\n (shared_inner_max == 1) and (outer_row_max > 1) and (outer_column_max > 1)\n )\n\n result = [\n [q0(q_type=\"\") for _i in range(outer_column_max)]\n for _j in range(outer_row_max)\n ]\n\n for outer_row in range(outer_row_max):\n for outer_column in range(outer_column_max):\n for shared_inner in range(shared_inner_max):\n\n # For projection operators.\n left_index = outer_row\n right_index = outer_column\n\n if outer_row_max >= 1 and shared_inner_max > 1:\n left_index = outer_row + shared_inner * outer_row_max\n\n if outer_column_max >= 1 and shared_inner_max > 1:\n right_index = shared_inner + outer_column * shared_inner_max\n\n result[outer_row][outer_column] = add(result[outer_row][outer_column],\n product(qs_left.qs[left_index],\n qs_right.qs[right_index], kind=kind, reverse=reverse\n )\n )\n\n # Flatten the list.\n new_qs = [item for sublist in result for item in sublist]\n new_states = Qs(new_qs, rows=outer_row_max, columns=outer_column_max)\n\n if projector_flag or operator_flag:\n return transpose(new_states)\n\n else:\n return new_states", "def _findrotationmatrix(ccdata1, ccdata2):\n natoms = ccdata1.natom\n J = np.zeros((3, 3), dtype=np.float)\n\n for i in range(natoms):\n J += np.outer(ccdata1.atomcoords[0][i], ccdata2.atomcoords[0][i])\n\n U, s, V = np.linalg.svd(J)\n\n R = np.transpose(np.dot(V, np.transpose(U)))\n\n return R", "def test_rotation_isometry(self):\n import numpy\n\n # test for all kinds of curvatures K\n for k in (0, 1, -1, 1/11, -1/11, 11, -2):\n \n s = space(curvature=k)\n\n # use a small enough magnitude to not break math for very negative K\n magic = 0.33377777373737737777\n # 1/sqrt(2)\n s2_ref = 0.707106781186547524400844362104785\n\n o = s.make_origin(2)\n p = s.make_point((1, 0), magic)\n q = s.make_point((s2_ref, s2_ref), magic)\n\n rot = space_point_transform(\n numpy.array([[1,0,0],[0,s2_ref,-s2_ref],[0,s2_ref,s2_ref]]),\n curvature=k,\n math = common_math\n )\n\n f, g, i = map(space_point_transform, (p, q, o))\n\n def check_transform_eq(t1, t2, invert=False):\n for ref in (\n s.make_point((5/13, 12/13), magic),\n s.make_point((-3/5, 4/5), magic)\n ):\n self.assertTrue(invert ^ point_isclose(\n t1(ref),\n t2(ref),\n abs_tol = 1e-12\n ))\n\n # 1/8 turn, times 8\n check_transform_eq(rot*8, i)\n\n # rotate, shift, rotate\n check_transform_eq(g, rot + f + rot * -1)\n\n # the other way\n check_transform_eq(f, rot * -1 + g + rot)", "def minimalDistance(a1, a2, b1, b2):\n adir = a2 - a1\n bdir = b2 - b1\n amid = a1 + 0.5 * adir\n s = b1 - amid\n A = np.dot(bdir, bdir)\n B_2 = np.dot(bdir, s)\n lambda_beta = - B_2 / A\n bOpt = lambda_beta * bdir + b1\n s = a1 - bOpt\n A = np.dot(adir, adir)\n B_2 = np.dot(adir, s)\n lambda_alpha = - B_2 / A\n aOpt = lambda_alpha * adir + a1\n Delta = bOpt - aOpt\n return np.sqrt(np.dot(Delta, Delta))", "def getMinimum(self):\n v1 = Vector(*self.p1)\n v2 = Vector(*self.p2)\n if v1.angle < v2.angle:\n return self.p1\n else:\n return self.p2" ]
[ "0.71130097", "0.6103423", "0.60989857", "0.6034236", "0.5984659", "0.5808696", "0.5793859", "0.5789661", "0.57519877", "0.57339966", "0.5732701", "0.56862444", "0.56858575", "0.56503344", "0.563665", "0.5622966", "0.554343", "0.5540692", "0.5537474", "0.55108356", "0.55057293", "0.5496861", "0.54470605", "0.54389596", "0.5432244", "0.54125637", "0.53605336", "0.5350723", "0.5350723", "0.53500175", "0.534825", "0.53336966", "0.5331383", "0.5318162", "0.5318162", "0.53164655", "0.5304472", "0.52903515", "0.5284076", "0.52803427", "0.5273106", "0.5253921", "0.52423686", "0.5240425", "0.52383", "0.52268887", "0.5217376", "0.52165306", "0.5211772", "0.5180225", "0.517425", "0.5166313", "0.5136972", "0.5130271", "0.51280147", "0.5127503", "0.5117387", "0.5112234", "0.510555", "0.5100915", "0.5094342", "0.50886726", "0.5085591", "0.50832915", "0.50830704", "0.50760615", "0.5053225", "0.5052123", "0.50482076", "0.5047661", "0.5047597", "0.504173", "0.5041601", "0.50409514", "0.50389934", "0.5034616", "0.50346047", "0.50226516", "0.5020877", "0.5019535", "0.50180084", "0.50154686", "0.5011058", "0.5007852", "0.49982426", "0.49891788", "0.498812", "0.49861807", "0.4981976", "0.4975043", "0.49716324", "0.49679652", "0.4966271", "0.49655667", "0.49578902", "0.49545583", "0.49506983", "0.49429744", "0.49353796", "0.4926023" ]
0.6384881
1
Returns the rotation matrix equivalent of the given quaternion. This function is used by the get_refined_rotation() function.
def get_rotation_matrix_from_quaternion(q): R = np.matrix([[q[0] * q[0] + q[1] * q[1] - q[2] * q[2] - q[3] * q[3], 2 * (q[1] * q[2] - q[0] * q[3]), 2 * (q[1] * q[3] + q[0] * q[2])], [2 * (q[2] * q[1] + q[0] * q[3]), q[0] * q[0] - q[1] * q[1] + q[2] * q[2] - q[3] * q[3], 2 * (q[2] * q[3] - q[0] * q[1])], [2 * (q[3] * q[1] - q[0] * q[2]), 2 * (q[3] * q[2] + q[0] * q[1]), q[0] * q[0] - q[1] * q[1] - q[2] * q[2] + q[3] * q[3]]]) return R
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def quaternion_to_rotation_matrix(quaternion):\n\n q_w, q_x, q_y, q_z = quaternion\n sqw, sqx, sqy, sqz = np.square(quaternion)\n norm = (sqx + sqy + sqz + sqw)\n rotation_matrix = np.zeros((3, 3))\n\n # division of square length if quaternion is not already normalized\n rotation_matrix[0, 0] = (+sqx - sqy - sqz + sqw) / norm\n rotation_matrix[1, 1] = (-sqx + sqy - sqz + sqw) / norm\n rotation_matrix[2, 2] = (-sqx - sqy + sqz + sqw) / norm\n\n tmp1 = q_x * q_y\n tmp2 = q_z * q_w\n rotation_matrix[1, 0] = 2.0 * (tmp1 + tmp2) / norm\n rotation_matrix[0, 1] = 2.0 * (tmp1 - tmp2) / norm\n\n tmp1 = q_x * q_z\n tmp2 = q_y * q_w\n rotation_matrix[2, 0] = 2.0 * (tmp1 - tmp2) / norm\n rotation_matrix[0, 2] = 2.0 * (tmp1 + tmp2) / norm\n tmp1 = q_y * q_z\n tmp2 = q_x * q_w\n rotation_matrix[2, 1] = 2.0 * (tmp1 + tmp2) / norm\n rotation_matrix[1, 2] = 2.0 * (tmp1 - tmp2) / norm\n return rotation_matrix", "def quaternion_matrix(quaternion):\r\n q = numpy.array(quaternion, dtype=numpy.float64, copy=True)\r\n n = numpy.dot(q, q)\r\n if n < _EPS:\r\n return numpy.identity(4)\r\n q *= math.sqrt(2.0 / n)\r\n q = numpy.outer(q, q)\r\n return numpy.array([\r\n [1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.0],\r\n [ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.0],\r\n [ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0],\r\n [ 0.0, 0.0, 0.0, 1.0]])", "def quat_to_rotmat(quat):\n norm_quat = quat\n norm_quat = norm_quat / norm_quat.norm(p=2, dim=1, keepdim=True)\n w, x, y, z = norm_quat[:, 0], norm_quat[:, 1], norm_quat[:, 2], norm_quat[:, 3]\n B = quat.size(0)\n w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)\n wx, wy, wz = w * x, w * y, w * z\n xy, xz, yz = x * y, x * z, y * z\n rotMat = torch.stack([w2 + x2 - y2 - z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz, 2 * wz + 2 * xy, w2 - x2 + y2 - z2, 2 * yz - 2 * wx, 2 * xz - 2 * wy, 2 * wx + 2 * yz, w2 - x2 - y2 + z2], dim=1).view(B, 3, 3)\n return rotMat", "def quat_to_rotmat(quat): \n norm_quat = quat\n norm_quat = norm_quat/norm_quat.norm(p=2, dim=1, keepdim=True)\n w, x, y, z = norm_quat[:,0], norm_quat[:,1], norm_quat[:,2], norm_quat[:,3]\n\n B = quat.size(0)\n\n w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)\n wx, wy, wz = w*x, w*y, w*z\n xy, xz, yz = x*y, x*z, y*z\n\n rotMat = torch.stack([w2 + x2 - y2 - z2, 2*xy - 2*wz, 2*wy + 2*xz,\n 2*wz + 2*xy, w2 - x2 + y2 - z2, 2*yz - 2*wx,\n 2*xz - 2*wy, 2*wx + 2*yz, w2 - x2 - y2 + z2], dim=1).view(B, 3, 3)\n return rotMat", "def quaternion_matrix(quaternion):\n q = np.array(quaternion, dtype=np.float64, copy=True)\n n = np.dot(q, q)\n if n < _EPS:\n return np.identity(4)\n q *= math.sqrt(2.0 / n)\n q = np.outer(q, q)\n return np.array([\n [1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.0],\n [ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.0],\n [ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0],\n [ 0.0, 0.0, 0.0, 1.0]])", "def quat_to_rotmat(quat):\n norm_quat = quat\n norm_quat = norm_quat / norm_quat.norm(p=2, dim=1, keepdim=True)\n w, x, y, z = norm_quat[:, 0], norm_quat[:, 1], norm_quat[:, 2], norm_quat[:, 3]\n\n B = quat.size(0)\n\n w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)\n wx, wy, wz = w * x, w * y, w * z\n xy, xz, yz = x * y, x * z, y * z\n\n rotMat = torch.stack([w2 + x2 - y2 - z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz,\n 2 * wz + 2 * xy, w2 - x2 + y2 - z2, 2 * yz - 2 * wx,\n 2 * xz - 2 * wy, 2 * wx + 2 * yz, w2 - x2 - y2 + z2], dim=1).view(B, 3, 3)\n return rotMat", "def quaternion_to_Rot(q: array):\n\n # Create a vector from the quaternion parameters (and check dimensions)\n q = array(q).reshape(4)\n\n # Normalize the quaternion\n q = divide(q, sqrt(sum(power(q, 2))))\n\n # Auxiliary matrix\n q_hat = zeros((3, 3))\n q_hat[0, 1] = -q[3]\n q_hat[0, 2] = q[2]\n q_hat[1, 2] = -q[1]\n q_hat[1, 0] = q[3]\n q_hat[2, 0] = -q[2]\n q_hat[2, 1] = q[1]\n\n # Return the rotation matrix\n return eye(3) + 2 * dot(q_hat, q_hat) + 2 * dot(q[0], q_hat)", "def rotation_matrix(self):\n self._normalise()\n product_matrix = np.dot(self._q_matrix(), self._q_bar_matrix().conj().transpose())\n return product_matrix[1:][:,1:]", "def quat2mat(quat):\n norm_quat = torch.cat([quat[:, :1].detach() * 0 + 1, quat], dim=1)\n norm_quat = norm_quat / norm_quat.norm(p=2, dim=1, keepdim=True)\n w, x, y, z = norm_quat[:, 0], norm_quat[:, 1], norm_quat[:, 2], norm_quat[:, 3]\n\n B = quat.size(0)\n\n w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)\n wx, wy, wz = w * x, w * y, w * z\n xy, xz, yz = x * y, x * z, y * z\n\n rotMat = torch.stack([w2 + x2 - y2 - z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz,\n 2 * wz + 2 * xy, w2 - x2 + y2 - z2, 2 * yz - 2 * wx,\n 2 * xz - 2 * wy, 2 * wx + 2 * yz, w2 - x2 - y2 + z2], dim=1).view(B, 3, 3)\n return rotMat", "def quat2mat(quat):\r\n norm_quat = torch.cat([quat[:,:1].detach()*0 + 1, quat], dim=1)\r\n norm_quat = norm_quat/norm_quat.norm(p=2, dim=1, keepdim=True)\r\n w, x, y, z = norm_quat[:,0], norm_quat[:,1], norm_quat[:,2], norm_quat[:,3]\r\n\r\n B = quat.size(0)\r\n\r\n w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)\r\n wx, wy, wz = w*x, w*y, w*z\r\n xy, xz, yz = x*y, x*z, y*z\r\n\r\n rotMat = torch.stack([w2 + x2 - y2 - z2, 2*xy - 2*wz, 2*wy + 2*xz,\r\n 2*wz + 2*xy, w2 - x2 + y2 - z2, 2*yz - 2*wx,\r\n 2*xz - 2*wy, 2*wx + 2*yz, w2 - x2 - y2 + z2], dim=1).reshape(B, 3, 3)\r\n return rotMat", "def quat2mat(quat):\n norm_quat = torch.cat([quat[:,:1].detach()*0 + 1, quat], dim=1)\n norm_quat = norm_quat/norm_quat.norm(p=2, dim=1, keepdim=True)\n w, x, y, z = norm_quat[:,0], norm_quat[:,1], norm_quat[:,2], norm_quat[:,3]\n\n B = quat.size(0)\n\n w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)\n wx, wy, wz = w*x, w*y, w*z\n xy, xz, yz = x*y, x*z, y*z\n\n rotMat = torch.stack([w2 + x2 - y2 - z2, 2*xy - 2*wz, 2*wy + 2*xz,\n 2*wz + 2*xy, w2 - x2 + y2 - z2, 2*yz - 2*wx,\n 2*xz - 2*wy, 2*wx + 2*yz, w2 - x2 - y2 + z2], dim=1).reshape(B, 3, 3)\n return rotMat", "def quaternion_to_matrix(quaternions):\n r, i, j, k = torch.unbind(quaternions, -1)\n two_s = 2.0 / (quaternions * quaternions).sum(-1)\n\n o = torch.stack(\n (\n 1 - two_s * (j * j + k * k),\n two_s * (i * j - k * r),\n two_s * (i * k + j * r),\n two_s * (i * j + k * r),\n 1 - two_s * (i * i + k * k),\n two_s * (j * k - i * r),\n two_s * (i * k - j * r),\n two_s * (j * k + i * r),\n 1 - two_s * (i * i + j * j),\n ),\n -1,\n )\n return o.reshape(quaternions.shape[:-1] + (3, 3))", "def _rotate_quaternion(self, q):\n self._normalise()\n return self * q * self.conjugate", "def quaternion_to_rotation_matrix(q0, q1, q2, q3) -> np:\n\n # First row of the rotation matrix\n r00 = 2 * (q0 * q0 + q1 * q1) - 1\n r01 = 2 * (q1 * q2 - q0 * q3)\n r02 = 2 * (q1 * q3 + q0 * q2)\n\n # Second row of the rotation matrix\n r10 = 2 * (q1 * q2 + q0 * q3)\n r11 = 2 * (q0 * q0 + q2 * q2) - 1\n r12 = 2 * (q2 * q3 - q0 * q1)\n\n # Third row of the rotation matrix\n r20 = 2 * (q1 * q3 - q0 * q2)\n r21 = 2 * (q2 * q3 + q0 * q1)\n r22 = 2 * (q0 * q0 + q3 * q3) - 1\n\n # 3x3 rotation matrix\n rot_matrix = np.array([[r00, r01, r02],\n [r10, r11, r12],\n [r20, r21, r22]])\n\n return rot_matrix", "def rotmat(p, q):\n rot = numpy.dot(refmat(q, -p), refmat(p, -p))\n return rot", "def rotation_mat_to_quat(R, q):\n q[0] = np.sqrt(R[0] + R[4] + R[8]) / 2\n q[1] = (R[7] - R[5]) / (4. * q[0])\n q[2] = (R[2] - R[6]) / (4. * q[0])\n q[3] = (R[3] - R[1]) / (4. * q[0])", "def rotation_matrix_to_quaternion(rotation_matrix):\n trace = np.trace(rotation_matrix)\n\n if trace > 0:\n S = np.sqrt(trace + 1) * 2\n q_w = 0.25 * S\n q_x = (rotation_matrix[2, 1] - rotation_matrix[1, 2]) / S\n q_y = (rotation_matrix[0, 2] - rotation_matrix[2, 0]) / S\n q_z = (rotation_matrix[1, 0] - rotation_matrix[0, 1]) / S\n return np.asarray([q_w, q_x, q_y, q_z])\n\n elif ((rotation_matrix[0, 0] > rotation_matrix[1, 1]) and\n (rotation_matrix[0, 0] > rotation_matrix[2, 2])):\n\n S = np.sqrt(1.0 + rotation_matrix[0, 0] - rotation_matrix[1, 1] -\n rotation_matrix[2, 2]) * 2\n q_w = (rotation_matrix[2, 1] - rotation_matrix[1, 2]) / S\n q_x = 0.25 * S\n q_y = (rotation_matrix[0, 1] + rotation_matrix[1, 0]) / S\n q_z = (rotation_matrix[0, 2] + rotation_matrix[2, 0]) / S\n\n elif rotation_matrix[1, 1] > rotation_matrix[2, 2]:\n\n S = np.sqrt(1.0 + rotation_matrix[1, 1] - rotation_matrix[0, 0] -\n rotation_matrix[2, 2]) * 2\n q_w = (rotation_matrix[0, 2] - rotation_matrix[2, 0]) / S\n q_x = (rotation_matrix[0, 1] + rotation_matrix[1, 0]) / S\n q_y = 0.25 * S\n q_z = (rotation_matrix[1, 2] + rotation_matrix[2, 1]) / S\n\n else:\n S = np.sqrt(1.0 + rotation_matrix[2, 2] - rotation_matrix[0, 0] -\n rotation_matrix[1, 1]) * 2\n q_w = (rotation_matrix[1, 0] - rotation_matrix[0, 1]) / S\n q_x = (rotation_matrix[0, 2] + rotation_matrix[2, 0]) / S\n q_y = (rotation_matrix[1, 2] + rotation_matrix[2, 1]) / S\n q_z = 0.25 * S\n\n if q_w >= 0:\n return np.asarray([q_w, q_x, q_y, q_z])\n else:\n return -1 * np.asarray([q_w, q_x, q_y, q_z])", "def _cubelet_rotation_matrix(self, cubelet_meta_info, qpos_array):\n euler_angles = qpos_array[cubelet_meta_info[\"euler_qpos\"]]\n return rotation.euler2mat(euler_angles)", "def rotation_matrix_to_quaternion(rotation_matrix: np) -> object:\n\n cosine_for_pitch = math.sqrt(rotation_matrix[0][0] ** 2 + rotation_matrix[1][0] ** 2)\n is_singular = cosine_for_pitch < 10 ** -6\n if not is_singular:\n yaw = math.atan2(rotation_matrix[1][0], rotation_matrix[0][0])\n pitch = math.atan2(-rotation_matrix[2][0], cosine_for_pitch)\n roll = math.atan2(rotation_matrix[2][1], rotation_matrix[2][2])\n else:\n yaw = math.atan2(-rotation_matrix[1][2], rotation_matrix[1][1])\n pitch = math.atan2(-rotation_matrix[2][0], cosine_for_pitch)\n roll = 0\n\n e = (yaw, pitch, roll)\n\n return euler_to_quaternion(e)", "def rotationMatrix(self):\n\n R = Compute3DRotationMatrix(self.exteriorOrientationParameters[3], self.exteriorOrientationParameters[4],\n self.exteriorOrientationParameters[5])\n\n return R", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi, omega) = angles[0:3]\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)", "def _get_rotation_matrix(transform):\n # caution: UE4 is using left-hand ortation order\n roll = np.deg2rad(-transform.rotation.roll)\n pitch = np.deg2rad(-transform.rotation.pitch)\n yaw = np.deg2rad(transform.rotation.yaw)\n sr, cr = np.sin(roll), np.cos(roll)\n sp, cp = np.sin(pitch), np.cos(pitch)\n sy, cy = np.sin(yaw), np.cos(yaw)\n rotation_matrix = np.array([[cy * cp, -sy * sr + cy * sp * sr, cy * sp * cr + sy * sr],\n [sy * cp, cy * sp * sr + cy * sr, -cy * sr + sy * sp * cr],\n [-sp, cp * sr, cp * cr]])\n return rotation_matrix", "def quaternion_to_angle(self, q):\n\tx, y, z, w = q.x, q.y, q.z, q.w\n\troll, pitch, yaw = tf.transformations.euler_from_quaternion((x, y, z, w))\n\treturn yaw", "def test_to_rotation(self):\r\n q = np.array([-1, 1, 3, 2])\r\n q = q / np.linalg.norm(q)\r\n R_gt = np.array([\r\n [-1/3., -14/15., -2/15.],\r\n [2/3., -1/3., 2/3.],\r\n [-2/3., 2/15., 11/15.]]).T\r\n R = to_rotation(q)\r\n\r\n zero_matrix = R - R_gt\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)\r\n\r\n for _ in range(20):\r\n q = np.random.randn(4)\r\n q /= np.linalg.norm(q)\r\n q_inv = quaternion_conjugate(q)\r\n\r\n R = to_rotation(q)\r\n R_inv = to_rotation(q_inv)\r\n\r\n zero_matrix = R @ R_inv - np.identity(3)\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)\r\n\r\n # orthogonal matrix\r\n zero_matrix = R @ R.T - np.identity(3)\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)", "def quat2mat(self,quat):\n\t quat = np.asarray(quat, dtype=np.float64)\n\t assert quat.shape[-1] == 4, \"Invalid shape quat {}\".format(quat)\n\n\t w, x, y, z = quat[..., 0], quat[..., 1], quat[..., 2], quat[..., 3]\n\t Nq = np.sum(quat * quat, axis=-1)\n\t s = 2.0 / Nq\n\t X, Y, Z = x * s, y * s, z * s\n\t wX, wY, wZ = w * X, w * Y, w * Z\n\t xX, xY, xZ = x * X, x * Y, x * Z\n\t yY, yZ, zZ = y * Y, y * Z, z * Z\n\n\t mat = np.empty(quat.shape[:-1] + (3, 3), dtype=np.float64)\n\t mat[..., 0, 0] = 1.0 - (yY + zZ)\n\t mat[..., 0, 1] = xY - wZ\n\t mat[..., 0, 2] = xZ + wY\n\t mat[..., 1, 0] = xY + wZ\n\t mat[..., 1, 1] = 1.0 - (xX + zZ)\n\t mat[..., 1, 2] = yZ - wX\n\t mat[..., 2, 0] = xZ - wY\n\t mat[..., 2, 1] = yZ + wX\n\t mat[..., 2, 2] = 1.0 - (xX + yY)\n\t return np.where((Nq > _FLOAT_EPS)[..., np.newaxis, np.newaxis], mat, np.eye(3))", "def quat2mat(self,quat):\n quat = np.asarray(quat, dtype=np.float64)\n assert quat.shape[-1] == 4, \"Invalid shape quat {}\".format(quat)\n\n w, x, y, z = quat[..., 0], quat[..., 1], quat[..., 2], quat[..., 3]\n Nq = np.sum(quat * quat, axis=-1)\n s = 2.0 / Nq\n X, Y, Z = x * s, y * s, z * s\n wX, wY, wZ = w * X, w * Y, w * Z\n xX, xY, xZ = x * X, x * Y, x * Z\n yY, yZ, zZ = y * Y, y * Z, z * Z\n\n mat = np.empty(quat.shape[:-1] + (3, 3), dtype=np.float64)\n mat[..., 0, 0] = 1.0 - (yY + zZ)\n mat[..., 0, 1] = xY - wZ\n mat[..., 0, 2] = xZ + wY\n mat[..., 1, 0] = xY + wZ\n mat[..., 1, 1] = 1.0 - (xX + zZ)\n mat[..., 1, 2] = yZ - wX\n mat[..., 2, 0] = xZ - wY\n mat[..., 2, 1] = yZ + wX\n mat[..., 2, 2] = 1.0 - (xX + yY)\n return np.where((Nq > _FLOAT_EPS)[..., np.newaxis, np.newaxis], mat, np.eye(3))", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi) = angles[0:2]\n omega = np.deg2rad(self.omega)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi, omega) = self.get_phi_chi_omega(angles)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi, omega) = self.get_phi_chi_omega(angles)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)", "def py_rotation_from_matrix(matrix):\n return np.float32(quat2angle_axis(mat2quat(matrix)))", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi, omega) = self.get_phi_chi_omega(angles)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, omega) = angles[0:2]\n chi = np.deg2rad(self.chi)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, omega) = angles[0:2]\n chi = np.deg2rad(self.chi)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)", "def quaternion_to_angle(q):\n\tx, y, z, w = q.x, q.y, q.z, q.w\n\troll, pitch, yaw = tf.transformations.euler_from_quaternion((x, y, z, w))\n\treturn yaw", "def quat2mat(q):\n #leila: https://www.euclideanspace.com/maths/geometry/rotations/conversions/quaternionToMatrix/index.htm\n\n sz = quat.get_size(q)\n q0 = quat.getq0(q)\n q1 = quat.getq1(q)\n q2 = quat.getq2(q)\n q3 = quat.getq3(q)\n qt = quat.get_type(q)\n\n g = np.zeros((sz, 3, 3))\n g[:, 0, 0] = np.square(q0) + np.square(q1) - np.square(q2) - np.square(q3)\n g[:, 0, 1] = 2*(q1*q2 - q0*q3)\n g[:, 0, 2] = 2*(q3*q1 + q0*q2)\n g[:, 1, 0] = 2*(q1*q2 + q0*q3)\n g[:, 1, 1] = np.square(q0) - np.square(q1) + np.square(q2) - np.square(q3)\n g[:, 1, 2] = 2*(q2*q3 - q0*q1)\n g[:, 2, 0] = 2*(q3*q1 - q0*q2)\n g[:, 2, 1] = 2*(q2*q3 + q0*q1)\n g[:, 2, 2] = np.square(q0) - np.square(q1) - np.square(q2) + np.square(q3)\n\n if sz == 1:\n g = g.reshape((3, 3))\n if qt == -1:\n g = -g\n else:\n inds1 = np.where(qt == -1)\n g[inds1, :, :] = -g[inds1, :, :]\n\n return g", "def from_quaternion(self, q: np.ndarray) -> np.ndarray:\n if q is None:\n return np.identity(3)\n if q.shape[-1]!=4 or q.ndim>2:\n raise ValueError(\"Quaternion must be of the form (4,) or (N, 4)\")\n if q.ndim>1:\n q /= np.linalg.norm(q, axis=1)[:, None] # Normalize\n R = np.zeros((q.shape[0], 3, 3))\n R[:, 0, 0] = 1.0 - 2.0*(q[:, 2]**2 + q[:, 3]**2)\n R[:, 1, 0] = 2.0*(q[:, 1]*q[:, 2]+q[:, 0]*q[:, 3])\n R[:, 2, 0] = 2.0*(q[:, 1]*q[:, 3]-q[:, 0]*q[:, 2])\n R[:, 0, 1] = 2.0*(q[:, 1]*q[:, 2]-q[:, 0]*q[:, 3])\n R[:, 1, 1] = 1.0 - 2.0*(q[:, 1]**2 + q[:, 3]**2)\n R[:, 2, 1] = 2.0*(q[:, 0]*q[:, 1]+q[:, 2]*q[:, 3])\n R[:, 0, 2] = 2.0*(q[:, 1]*q[:, 3]+q[:, 0]*q[:, 2])\n R[:, 1, 2] = 2.0*(q[:, 2]*q[:, 3]-q[:, 0]*q[:, 1])\n R[:, 2, 2] = 1.0 - 2.0*(q[:, 1]**2 + q[:, 2]**2)\n return R\n q /= np.linalg.norm(q)\n return np.array([\n [1.0-2.0*(q[2]**2+q[3]**2), 2.0*(q[1]*q[2]-q[0]*q[3]), 2.0*(q[1]*q[3]+q[0]*q[2])],\n [2.0*(q[1]*q[2]+q[0]*q[3]), 1.0-2.0*(q[1]**2+q[3]**2), 2.0*(q[2]*q[3]-q[0]*q[1])],\n [2.0*(q[1]*q[3]-q[0]*q[2]), 2.0*(q[0]*q[1]+q[2]*q[3]), 1.0-2.0*(q[1]**2+q[2]**2)]])", "def quaternion2rot3d(quat):\n q01 = quat[0] * quat[1]\n q02 = quat[0] * quat[2]\n q03 = quat[0] * quat[3]\n q11 = quat[1] * quat[1]\n q12 = quat[1] * quat[2]\n q13 = quat[1] * quat[3]\n q22 = quat[2] * quat[2]\n q23 = quat[2] * quat[3]\n q33 = quat[3] * quat[3]\n\n # Obtain the rotation matrix\n rotation = np.zeros((3, 3))\n rotation[0, 0] = (1. - 2. * (q22 + q33))\n rotation[0, 1] = 2. * (q12 - q03)\n rotation[0, 2] = 2. * (q13 + q02)\n rotation[1, 0] = 2. * (q12 + q03)\n rotation[1, 1] = (1. - 2. * (q11 + q33))\n rotation[1, 2] = 2. * (q23 - q01)\n rotation[2, 0] = 2. * (q13 - q02)\n rotation[2, 1] = 2. * (q23 + q01)\n rotation[2, 2] = (1. - 2. * (q11 + q22))\n\n return rotation", "def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, kappa, omega) = self.get_phi_kappa_omega(angles)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.kappa_opposite_rotation_matrix(phi, np.deg2rad(self.alpha), kappa, omega)", "def quaternion_to_angle(q):\n x, y, z, w = q.x, q.y, q.z, q.w\n roll, pitch, yaw = tf.transformations.euler_from_quaternion((x, y, z, w))\n return yaw", "def get_rot_as_quat(m_obj):\n mfn_obj = oMa.MFnTransform(m_obj)\n\n rot = mfn_obj.rotation(asQuaternion=True)\n\n return rot", "def _quaternions(self, R):\n # Simple Wikipedia version\n # en.wikipedia.org/wiki/Rotation_matrix#Quaternion\n # For other options see math.stackexchange.com/questions/2074316/calculating-rotation-axis-from-rotation-matrix\n diag = torch.diagonal(R, dim1=-2, dim2=-1)\n Rxx, Ryy, Rzz = diag.unbind(-1)\n magnitudes = 0.5 * torch.sqrt(torch.abs(1 + torch.stack([\n Rxx - Ryy - Rzz, \n - Rxx + Ryy - Rzz, \n - Rxx - Ryy + Rzz\n ], -1)))\n _R = lambda i,j: R[:,:,:,i,j]\n signs = torch.sign(torch.stack([\n _R(2,1) - _R(1,2),\n _R(0,2) - _R(2,0),\n _R(1,0) - _R(0,1)\n ], -1))\n xyz = signs * magnitudes\n # The relu enforces a non-negative trace\n w = torch.sqrt(F.relu(1 + diag.sum(-1, keepdim=True))) / 2.\n Q = torch.cat((xyz, w), -1)\n Q = F.normalize(Q, dim=-1)\n return Q", "def rotation_matrix(self):\n return self.affine_matrix[0:3][:, 0:3]", "def toRot(q):\n R = SX.zeros(3, 3)\n qi = q[0]; qj = q[1]; qk = q[2]; qr = q[3]\n R[0, 0] = 1. - 2. * (qj * qj + qk * qk);\n R[0, 1] = 2. * (qi * qj - qk * qr);\n R[0, 2] = 2. * (qi * qk + qj * qr)\n R[1, 0] = 2. * (qi * qj + qk * qr);\n R[1, 1] = 1. - 2. * (qi * qi + qk * qk);\n R[1, 2] = 2. * (qj * qk - qi * qr)\n R[2, 0] = 2. * (qi * qk - qj * qr);\n R[2, 1] = 2. * (qj * qk + qi * qr);\n R[2, 2] = 1. - 2. * (qi * qi + qj * qj)\n\n return R", "def axis2rotmat(axis):\n return quat2rotmat(axis2quat(axis))", "def angle_to_quaternion(angle):\n\treturn Quaternion(*tf.transformations.quaternion_from_euler(0, 0, angle))", "def rotmat_to_quaternion(rotmat):\n r00 = rotmat[0,0]\n r01 = rotmat[0,1]\n r02 = rotmat[0,2]\n r10 = rotmat[1,0]\n r11 = rotmat[1,1]\n r12 = rotmat[1,2]\n r20 = rotmat[2,0]\n r21 = rotmat[2,1]\n r22 = rotmat[2,2]\n\n tr = r00 + r11 + r22\n quat = np.zeros(4)\n if tr > 0:\n S = np.sqrt(tr+1.0) * 2. # S=4*qw\n quat[0] = 0.25 * S\n quat[1] = (r21 - r12) / S\n quat[2] = (r02 - r20) / S\n quat[3] = (r10 - r01) / S\n elif (r00 > r11) and (r00 > r22):\n S = np.sqrt(1.0 + r00 - r11 - r22) * 2. # S=4*qx\n quat[0] = (r21 - r12) / S\n quat[1] = 0.25 * S\n quat[2] = (r01 + r10) / S\n quat[3] = (r02 + r20) / S\n elif r11 > r22:\n S = np.sqrt(1.0 + r11 - r00 - r22) * 2. # S=4*qy\n quat[0] = (r02 - r20) / S\n quat[1] = (r01 + r10) / S\n quat[2] = 0.25 * S\n quat[3] = (r12 + r21) / S\n else:\n S = np.sqrt(1.0 + r22 - r00 - r11) * 2. # S=4*qz\n quat[0] = (r10 - r01) / S\n quat[1] = (r02 + r20) / S\n quat[2] = (r12 + r21) / S\n quat[3] = 0.25 * S\n\n return quat", "def quaternion_inv(quaternion):\r\n q = numpy.array(quaternion, dtype=numpy.float64, copy=True)\r\n numpy.negative(q[1:], q[1:])\r\n return q / numpy.dot(q, q)", "def Rot_to_quaternion(r: array):\n\n # Compute the trace of the rotation matrix\n tr = r[0, 0] + r[1, 1] + r[2, 2]\n\n if tr > 0:\n S = sqrt(tr + 1.0) * 2\n qw = 0.25 * S\n qx = (r[2, 1] - r[1, 2]) / S\n qy = (r[0, 2] - r[2, 0]) / S\n qz = (r[1, 0] - r[0, 1]) / S\n elif (r[0, 0] > r[1, 1]) and (r[0, 0] > r[2, 2]):\n S = sqrt(1.0 + r[0, 0] - r[1, 1] - r[2, 2]) * 2\n qw = (r[2, 1] - r[1, 2]) / S\n qx = 0.25 * S\n qy = (r[0, 1] + r[1, 0]) / S\n qz = (r[0, 2] + r[2, 0]) / S\n elif r[1, 1] > r[2, 2]:\n S = sqrt(1.0 + r[1, 1] - r[0, 0] - r[2, 2]) * 2\n qw = (r[0, 2] - r[2, 0]) / S\n qx = (r[0, 1] + r[1, 0]) / S\n qy = 0.25 * S\n qz = (r[1, 2] + r[2, 1]) / S\n else:\n S = sqrt(1.0 + r[2, 2] - r[0, 0] - r[1, 1]) * 2\n qw = (r[1, 0] - r[0, 1]) / S\n qx = (r[0, 2] + r[2, 0]) / S\n qy = (r[1, 2] + r[2, 1]) / S\n qz = 0.25 * S\n\n q = array([qw, qx, qy, qz])\n q = q * sign(qw)\n\n return q", "def quatRightMat(q):\n\ts = q[0]\n\tv = q[1:].reshape(-1,)\n\tR = np.zeros((4, 4))\n\tR[0, 0] = s\n\tR[0, 1:] = -v\n\tR[1:, 0] = v\n\tR[1:, 1:] = s*np.eye(3) - skewMat(v)\n\treturn R", "def rotation_matrix(yaw, pitch) -> TransformationMatrixType:\n return rotation_matrix_yx(math.radians(yaw + 180), math.radians(pitch))", "def rotation_matrix(angle, axis):\n about_z = rotation_about_z(angle)\n z_to_axis = z_to_vector(axis)\n axis_to_z = np.linalg.inv(z_to_axis)\n return reduce(np.dot, [z_to_axis, about_z, axis_to_z])", "def rotmat2quat(R):\n rotdiff = R - R.T\n\n r = np.zeros(3)\n r[0] = -rotdiff[1, 2]\n r[1] = rotdiff[0, 2]\n r[2] = -rotdiff[0, 1]\n sintheta = np.linalg.norm(r) / 2\n r0 = np.divide(r, np.linalg.norm(r) + np.finfo(np.float32).eps)\n\n costheta = (np.trace(R) - 1) / 2\n\n theta = np.arctan2(sintheta, costheta)\n\n q = np.zeros(4)\n q[0] = np.cos(theta / 2)\n q[1:] = r0 * np.sin(theta / 2)\n return q", "def angle_to_quaternion(angle):\n return Quaternion(*tf.transformations.quaternion_from_euler(0, 0, angle))", "def rotation_matrix_to_quaternion(rotation_matrix, eps=1e-6):\n if not torch.is_tensor(rotation_matrix):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\".format(\n type(rotation_matrix)))\n\n if len(rotation_matrix.shape) > 3:\n raise ValueError(\n \"Input size must be a three dimensional tensor. Got {}\".format(\n rotation_matrix.shape))\n if not rotation_matrix.shape[-2:] == (3, 4):\n raise ValueError(\n \"Input size must be a N x 3 x 4 tensor. Got {}\".format(\n rotation_matrix.shape))\n\n rmat_t = torch.transpose(rotation_matrix, 1, 2)\n\n mask_d2 = (rmat_t[:, 2, 2] < eps).float()\n\n mask_d0_d1 = (rmat_t[:, 0, 0] > rmat_t[:, 1, 1]).float()\n mask_d0_nd1 = (rmat_t[:, 0, 0] < -rmat_t[:, 1, 1]).float()\n\n t0 = 1 + rmat_t[:, 0, 0] - rmat_t[:, 1, 1] - rmat_t[:, 2, 2]\n q0 = torch.stack([rmat_t[:, 1, 2] - rmat_t[:, 2, 1],\n t0, rmat_t[:, 0, 1] + rmat_t[:, 1, 0],\n rmat_t[:, 2, 0] + rmat_t[:, 0, 2]], -1)\n t0_rep = t0.repeat(4, 1).t()\n\n t1 = 1 - rmat_t[:, 0, 0] + rmat_t[:, 1, 1] - rmat_t[:, 2, 2]\n q1 = torch.stack([rmat_t[:, 2, 0] - rmat_t[:, 0, 2],\n rmat_t[:, 0, 1] + rmat_t[:, 1, 0],\n t1, rmat_t[:, 1, 2] + rmat_t[:, 2, 1]], -1)\n t1_rep = t1.repeat(4, 1).t()\n\n t2 = 1 - rmat_t[:, 0, 0] - rmat_t[:, 1, 1] + rmat_t[:, 2, 2]\n q2 = torch.stack([rmat_t[:, 0, 1] - rmat_t[:, 1, 0],\n rmat_t[:, 2, 0] + rmat_t[:, 0, 2],\n rmat_t[:, 1, 2] + rmat_t[:, 2, 1], t2], -1)\n t2_rep = t2.repeat(4, 1).t()\n\n t3 = 1 + rmat_t[:, 0, 0] + rmat_t[:, 1, 1] + rmat_t[:, 2, 2]\n q3 = torch.stack([t3, rmat_t[:, 1, 2] - rmat_t[:, 2, 1],\n rmat_t[:, 2, 0] - rmat_t[:, 0, 2],\n rmat_t[:, 0, 1] - rmat_t[:, 1, 0]], -1)\n t3_rep = t3.repeat(4, 1).t()\n\n mask_c0 = mask_d2 * mask_d0_d1\n mask_c1 = mask_d2 * (1 - mask_d0_d1)\n mask_c2 = (1 - mask_d2) * mask_d0_nd1\n mask_c3 = (1 - mask_d2) * (1 - mask_d0_nd1)\n mask_c0 = mask_c0.view(-1, 1).type_as(q0)\n mask_c1 = mask_c1.view(-1, 1).type_as(q1)\n mask_c2 = mask_c2.view(-1, 1).type_as(q2)\n mask_c3 = mask_c3.view(-1, 1).type_as(q3)\n\n q = q0 * mask_c0 + q1 * mask_c1 + q2 * mask_c2 + q3 * mask_c3\n q /= torch.sqrt(t0_rep * mask_c0 + t1_rep * mask_c1 + # noqa\n t2_rep * mask_c2 + t3_rep * mask_c3) # noqa\n q *= 0.5\n return q", "def matrix(self):\n return self._rotation", "def quaternion2rot3D(quaternion):\n theta, axis = quaternion2AngleAxis(quaternion)\n return angleAxis2rot3D(axis, theta)", "def quat_to_angle(self, quat):\n\t\trot = PyKDL.Rotation.Quaternion(quat.x, quat.y, quat.z, quat.w)\n\t\treturn rot.GetRPY()[2]", "def point_rotation_by_quaternion(v, q):\r\n r = [0] + v\r\n q_conj = [q[0], -q[1], -q[2], -q[3]]\r\n return quaternion_product(quaternion_product(q, r), q_conj)[1:]", "def test_to_quaternion(self):\r\n R = np.identity(3)\r\n q = to_quaternion(R)\r\n zero_vec = q - np.array([0., 0., 0., 1.])\r\n self.assertAlmostEqual(np.linalg.norm(zero_vec), 0.0)\r\n\r\n for _ in range(20):\r\n q = np.random.randn(4)\r\n q /= np.linalg.norm(q)\r\n\r\n R = to_rotation(q)\r\n R2 = to_rotation(to_quaternion(R))\r\n zero_matrix = R - R2\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)", "def convert_rotmat2quat(request):\n\n # TODO complete the function to transform a rotation matrix to quaternion\n\n m = np.array(request.R.data).reshape(3,3)\n\n tr = np.trace(m)\n\n theta = np.arccos((tr-1)/2)\n\n response = rotmat2quatResponse()\n\n if theta==0:\n response.q.x = 0\n response.q.y = 0\n response.q.z = 0\n response.q.w = 1\n elif theta == np.pi or theta == -np.pi:\n K = 0.5 *(m +np.eye(3))\n\n sth2=np.sin(theta/2)\n\n response.q.x = np.sqrt(K[0,0])*sth2\n response.q.y = np.sqrt(K[1,1])*sth2\n response.q.z = np.sqrt(K[2,2])*sth2\n response.q.w = 0\n else:\n den = 2*np.sin(theta)\n\n r_x = (1/den) * (m[2,1]-m[1,2])\n r_y = (1/den) * (m[0,2]-m[2,0])\n r_z = (1/den) * (m[1,0]-m[0,1])\n\n sth2=np.sin(theta/2)\n cth2=np.cos(theta/2)\n\n response.q.x = r_x*sth2\n response.q.y = r_y*sth2\n response.q.z = r_z*sth2\n response.q.z = cth2\n\n return response", "def invert_quaternion(quaternion):\n norm = np.linalg.norm(quaternion)\n quaternion[1:] = -1.0 * quaternion[1:]\n return quaternion / norm", "def as_homogenous_transformation(self):\n r3 = self.orientation.normalize().unit_quaternion_as_r3_rotation_matrix()\n return matrix.sqr((r3[0],r3[1],r3[2],self.translation[0],\n r3[3],r3[4],r3[5],self.translation[1],\n r3[6],r3[7],r3[8],self.translation[2],\n 0,0,0,1))", "def quatActiveRot(q, v):\n\tv_q = np.zeros((4, 1))\n\tv_q[1:] = v\n\tv_qnew = quatLeftMat(q) @ quatRightMat(q).T @ v_q\n\treturn v_qnew[1:]", "def test_conversions_matrix_quaternion():\n R = np.eye(3)\n a = pr.axis_angle_from_matrix(R)\n assert_array_almost_equal(a, np.array([1, 0, 0, 0]))\n\n random_state = np.random.RandomState(0)\n for _ in range(5):\n q = pr.random_quaternion(random_state)\n R = pr.matrix_from_quaternion(q)\n pr.assert_rotation_matrix(R)\n\n q2 = pr.quaternion_from_matrix(R)\n pr.assert_quaternion_equal(q, q2)\n\n R2 = pr.matrix_from_quaternion(q2)\n assert_array_almost_equal(R, R2)\n pr.assert_rotation_matrix(R2)", "def rpy_from_quaternion(quaternion):\n (yaw, pitch, roll) = quaternion.yaw_pitch_roll\n return (roll, pitch, yaw)", "def quat_rotate(X, q):\n # repeat q along 2nd dim\n ones_x = X[[0], :, :][:, :, [0]] * 0 + 1\n q = torch.unsqueeze(q, 1) * ones_x\n\n q_conj = torch.cat([q[:, :, [0]], -1 * q[:, :, 1:4]], dim=-1)\n X = torch.cat([X[:, :, [0]] * 0, X], dim=-1)\n\n X_rot = hamilton_product(q, hamilton_product(X, q_conj))\n return X_rot[:, :, 1:4]", "def quat_rotate(X, q):\n # repeat q along 2nd dim\n ones_x = X[[0], :, :][:, :, [0]] * 0 + 1\n q = torch.unsqueeze(q, 1) * ones_x\n\n q_conj = torch.cat([q[:, :, [0]], -1 * q[:, :, 1:4]], dim=-1)\n X = torch.cat([X[:, :, [0]] * 0, X], dim=-1)\n\n X_rot = hamilton_product(q, hamilton_product(X, q_conj))\n return X_rot[:, :, 1:4]", "def quat2transform(q):\n x, y, z, w = q\n xx2 = 2 * x * x\n yy2 = 2 * y * y\n zz2 = 2 * z * z\n xy2 = 2 * x * y\n wz2 = 2 * w * z\n zx2 = 2 * z * x\n wy2 = 2 * w * y\n yz2 = 2 * y * z\n wx2 = 2 * w * x\n\n rmat = np.empty((3, 3), float)\n rmat[0,0] = 1. - yy2 - zz2\n rmat[0,1] = xy2 - wz2\n rmat[0,2] = zx2 + wy2\n rmat[1,0] = xy2 + wz2\n rmat[1,1] = 1. - xx2 - zz2\n rmat[1,2] = yz2 - wx2\n rmat[2,0] = zx2 - wy2\n rmat[2,1] = yz2 + wx2\n rmat[2,2] = 1. - xx2 - yy2\n\n return rmat", "def quaternion(self, name, q):\n R = self.R(name=name, q=q)\n quat = transformations.unit_vector(\n transformations.quaternion_from_matrix(matrix=R))\n return quat", "def test_matrix_from_quaternion_hamilton():\n q = np.sqrt(0.5) * np.array([1, 0, 0, 1])\n R = pr.matrix_from_quaternion(q)\n assert_array_almost_equal(\n np.array([[0, -1, 0],\n [1, 0, 0],\n [0, 0, 1]]),\n R\n )", "def get_world_rot_as_quat(m_obj):\n plug = get_world_matrix_plug(m_obj, 0)\n matrix_obj = plug.asMObject()\n matrix_data = oMa.MFnMatrixData(matrix_obj)\n matrix = matrix_data.matrix()\n\n trans_matrix = oMa.MTransformationMatrix(matrix)\n rot = trans_matrix.rotation(asQuaternion=True)\n\n return rot", "def quatLeftMat(q):\n\ts = q[0]\n\tv = q[1:].reshape(-1,)\n\tL = np.zeros((4, 4))\n\tL[0, 0] = s\n\tL[0, 1:] = -v\n\tL[1:, 0] = v\n\tL[1:, 1:] = s*np.eye(3) + skewMat(v)\n\treturn L", "def random_rotate():\n u = np.random.uniform(size=3)\n\n # Random quaternion\n q = np.array([np.sqrt(1-u[0])*np.sin(2*np.pi*u[1]),\n np.sqrt(1-u[0])*np.cos(2*np.pi*u[1]),\n np.sqrt(u[0])*np.sin(2*np.pi*u[2]),\n np.sqrt(u[0])*np.cos(2*np.pi*u[2])])\n \n # Convert the quaternion into a rotation matrix \n rotMat = np.array([[q[0]*q[0] + q[1]*q[1] - q[2]*q[2] - q[3]*q[3],\n 2*q[1]*q[2] - 2*q[0]*q[3],\n 2*q[1]*q[3] + 2*q[0]*q[2]],\n [2*q[1]*q[2] + 2*q[0]*q[3],\n q[0]*q[0] - q[1]*q[1] + q[2]*q[2] - q[3]*q[3],\n 2*q[2]*q[3] - 2*q[0]*q[1]],\n [2*q[1]*q[3] - 2*q[0]*q[2],\n 2*q[2]*q[3] + 2*q[0]*q[1],\n q[0]*q[0] - q[1]*q[1] - q[2]*q[2] + q[3]*q[3]]])\n return rotMat", "def quaternion_from_axis_angle(x, y, z, theta):\n if x == y == z == 0:\n return np.array([1, 0, 0, 0])\n axis = np.array([x, y, z])\n axis /= np.linalg.norm(axis)\n return rowan.from_axis_angle(axis, theta)", "def test_quaternion_from_matrix_180():\n a = np.array([1.0, 0.0, 0.0, np.pi])\n q = pr.quaternion_from_axis_angle(a)\n R = pr.matrix_from_axis_angle(a)\n q_from_R = pr.quaternion_from_matrix(R)\n assert_array_almost_equal(q, q_from_R)\n\n a = np.array([0.0, 1.0, 0.0, np.pi])\n q = pr.quaternion_from_axis_angle(a)\n R = pr.matrix_from_axis_angle(a)\n q_from_R = pr.quaternion_from_matrix(R)\n assert_array_almost_equal(q, q_from_R)\n\n a = np.array([0.0, 0.0, 1.0, np.pi])\n q = pr.quaternion_from_axis_angle(a)\n R = pr.matrix_from_axis_angle(a)\n q_from_R = pr.quaternion_from_matrix(R)\n assert_array_almost_equal(q, q_from_R)\n\n R = np.array(\n [[0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, -1.0]])\n assert_raises_regexp(\n ValueError, \"Expected rotation matrix\", pr.quaternion_from_matrix, R)\n\n R = np.array(\n [[-1.0, 0.0, 0.0],\n [0.0, 0.00000001, 1.0],\n [0.0, 1.0, -0.00000001]])\n q_from_R = pr.quaternion_from_matrix(R)", "def from_rotation_mat(rot: np.ndarray) -> Quaternion:\n if rot.shape != (3, 3):\n raise TypeError('input rot should be a 3x3 matrix')\n\n t = rot.trace()\n if t > 0:\n t = np.sqrt(t + 1.0)\n w = 0.5 * t\n t = 0.5 / t\n x = (rot[2, 1] - rot[1, 2]) * t\n y = (rot[0, 2] - rot[2, 0]) * t\n z = (rot[1, 0] - rot[0, 1]) * t\n return Quaternion(w, np.array([x, y, z]))\n else:\n i = 0\n if rot[1, 1] > rot[0, 0]:\n i = 1\n if rot[2, 2] > rot[i, i]:\n i = 2\n j = (i + 1) % 3\n k = (j + 1) % 3\n\n data = np.zeros(4) # quaternion item [x, y, z, w]\n t = np.sqrt(rot[i, i] - rot[j, j] - rot[k, k] + 1.0)\n data[i] = 0.5 * t\n t = 0.5 / t\n data[-1] = (rot[k, j] - rot[j, k]) * t # w\n data[j] = (rot[j, i] + rot[i, j]) * t\n data[k] = (rot[k, i] + rot[i, k]) * t\n return Quaternion(data[-1], data[:3])", "def transformation_matrix(self):\n t = np.array([[0.0], [0.0], [0.0]])\n Rt = np.hstack([self.rotation_matrix, t])\n return np.vstack([Rt, np.array([0.0, 0.0, 0.0, 1.0])])", "def rotation_matrix(theta=0, phi=0, psi=0, units='deg'):\n\n rpy = Rpy(theta,units)\n rmx = Rmx(phi, units)\n rpz = Rpz(psi, units)\n\n return np.matmul(rpy, np.matmul(rmx, rpz))", "def get_random_quaternion(self):\n random_angles = self.get_random_vector([0,0,0], [2*np.pi, 2*np.pi, 1])\n return tf.transformations.quaternion_from_euler(random_angles[0],\n random_angles[1],\n 0)", "def angle_to_rotation_matrix(angle: torch.Tensor) -> torch.Tensor:\n ang_rad = deg2rad(angle)\n cos_a: torch.Tensor = torch.cos(ang_rad)\n sin_a: torch.Tensor = torch.sin(ang_rad)\n return torch.stack([cos_a, sin_a, -sin_a, cos_a], dim=-1).view(*angle.shape, 2, 2)", "def quatPassiveRot(q, v):\n\tv_q = np.zeros((4, 1))\n\tv_q[1:] = v\n\tv_qnew = quatLeftMat(q).T @ quatRightMat(q) @ v_q\n\treturn v_qnew[1:]", "def mat2quat(mat, rot_type='proper'):\n #leila: read this: https://www.euclideanspace.com/maths/algebra/realNormedAlgebra/quaternions/index.htm\n ax_ang = vrrotmat2vec(mat, rot_type)\n q0 = np.cos(ax_ang[3, :]/2)\n q1 = ax_ang[0, :]*np.sin(ax_ang[3, :]/2)\n q2 = ax_ang[1, :]*np.sin(ax_ang[3, :]/2)\n q3 = ax_ang[2, :]*np.sin(ax_ang[3, :]/2)\n qtype = ax_ang[4, :]\n\n return quat.Quaternion(q0, q1, q2, q3, qtype)", "def get_rotation(self) -> np.array:\n axis = self.get_arms()[1]\n force = [self.d_x, self.d_y] # \"Force applied on the arm\"\n o_m = [self.target.x_obj - axis.x_obj, self.target.y_obj - axis.y_obj]\n torque = o_m[0]*force[1] - o_m[1] * force[0] # OM vectorial F\n if torque == 1: # Anti clockwise rotation\n rotation = np.array([[0, -1], [1, 0]])\n if torque == -1: # Clockwise rotation\n rotation = np.array([[0, 1], [-1, 0]])\n if torque == 0: # No rotation\n rotation = np.array([[0, 0], [0, 0]])\n return rotation", "def quaternion_to_RPY(q: array):\n\n roll: float = arctan2(2 * (q[0] * q[1] + q[2] * q[3]), 1 - (2 * (power(q[1], 2) + power(q[2], 2))))\n pitch: float = arcsin(2 * (q[0] * q[2] - q[3] * q[1]))\n yaw: float = arctan2(2 * (q[0] * q[3] + q[1] * q[2]), 1 - (2 * (power(q[2], 2) + power(q[3], 2))))\n\n return roll, pitch, yaw", "def base_orientation_quaternion(self):\n raise NotImplementedError('Not yet implemented!')", "def quaternion_from_rpy(roll, pitch, yaw):\n quaternion = tf.transformations.quaternion_from_euler(roll, pitch, yaw)\n return pyquaternion.Quaternion(numpy.roll(quaternion, 1))", "def getRotationMatrix(x, y, z, angle):\n # impossible to have a rotational matrix around (0, 0 ,0)\n if x == 0 and y == 0 and z == 0:\n raise Exception(\"Cannot have a rotation matrix around (0, 0, 0)\")\n\n # normalize vector\n vec = MatrixExtended([x, y, z])\n length = np.linalg.norm(vec)\n x /= length\n y /= length\n z /= length\n\n # some shortcuts for readability\n xx = x * x\n yy = y * y\n zz = z * z\n C = math.cos\n S = math.sin\n\n # calculate matrix elements\n e11 = xx + (1 - xx) * C(angle)\n e12 = x * y * (1 - C(angle)) - z * S(angle)\n e13 = x * z * (1 - C(angle)) + y * S(angle)\n e21 = x * y * (1 - C(angle)) + z * S(angle)\n e22 = yy + (1 - yy) * C(angle)\n e23 = y * z * (1 - C(angle)) - x * S(angle)\n e31 = x * z * (1 - C(angle)) - y * S(angle)\n e32 = y * z * (1 - C(angle)) + x * S(angle)\n e33 = zz + (1 - zz) * C(angle)\n\n return MatrixExtended([\n [e11, e12, e13, 0],\n [e21, e22, e23, 0],\n [e31, e32, e33, 0],\n [0, 0, 0, 1]])", "def euler_to_quaternion(euler: tuple) -> object:\n\n (yaw, pitch, roll) = (euler[0], euler[1], euler[2])\n qy = np.sin(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) - np.cos(roll / 2) * np.sin(pitch / 2) * np.sin(yaw / 2)\n qx = np.cos(roll / 2) * np.sin(pitch / 2) * np.cos(yaw / 2) + np.sin(roll / 2) * np.cos(pitch / 2) * np.sin(yaw / 2)\n qw = np.cos(roll / 2) * np.cos(pitch / 2) * np.sin(yaw / 2) - np.sin(roll / 2) * np.sin(pitch / 2) * np.cos(yaw / 2)\n qz = np.cos(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) + np.sin(roll / 2) * np.sin(pitch / 2) * np.sin(yaw / 2)\n return qx, qy, qz, qw", "def getDejaVuMatrix(self):\n mtx = self.getRotMatrix((4, 4), transpose=None) # from Quaternion\n mtx[3] = self.getTranslation()\n mtx[:3, 3] = mtx[3, :3]\n mtx[3, :3] = [0, 0, 0]\n return mtx", "def _quatm(q1, q0):\n w0, x0, y0, z0 = q0\n w1, x1, y1, z1 = q1\n\n return torch.cuda.FloatTensor([\n -x1*x0 - y1*y0 - z1*z0 + w1*w0,\n x1*w0 + y1*z0 - z1*y0 + w1*x0,\n -x1*z0 + y1*w0 + z1*x0 + w1*y0,\n x1*y0 - y1*x0 + z1*w0 + w1*z0,\n ])", "def RotationMatrix(theta, x, y, z, point=None):\n\treturn mach.rotation_matrix(theta, [x, y, z])", "def quatreal(q):\n a = q[0,0]\n b = q[0,1]\n c = q[0,2]\n d = q[0,3]\n amat = a*np.identity(4)\n bmat = b*np.array([[0,1,0,0],[-1,0,0,0],[0,0,0,-1],[0,0,1,0]])\n cmat = c*np.array([[0,0,1,0],[0,0,0,1],[-1,0,0,0],[0,-1,0,0]])\n dmat = d*np.array([[0,0,0,1],[0,0,-1,0],[0,1,0,0],[-1,0,0,0]])\n return amat+bmat+cmat+dmat", "def get_rot(self) -> WAQuaternion:\n pass", "def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor:\n if not torch.is_tensor(quaternion):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\".format(\n type(quaternion)))\n\n if not quaternion.shape[-1] == 4:\n raise ValueError(\"Input must be a tensor of shape Nx4 or 4. Got {}\"\n .format(quaternion.shape))\n # unpack input and compute conversion\n q1: torch.Tensor = quaternion[..., 1]\n q2: torch.Tensor = quaternion[..., 2]\n q3: torch.Tensor = quaternion[..., 3]\n sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3\n\n sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta)\n cos_theta: torch.Tensor = quaternion[..., 0]\n two_theta: torch.Tensor = 2.0 * torch.where(\n cos_theta < 0.0,\n torch.atan2(-sin_theta, -cos_theta),\n torch.atan2(sin_theta, cos_theta))\n\n k_pos: torch.Tensor = two_theta / sin_theta\n k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta)\n k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg)\n\n angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3]\n angle_axis[..., 0] += q1 * k\n angle_axis[..., 1] += q2 * k\n angle_axis[..., 2] += q3 * k\n return angle_axis", "def getRotMatrix(self, shape=(4, 4), transpose=None):\n try:\n assert (shape in [(3, 3), (4, 4), (9,), (16,)])\n except:\n raise ValueError('shape must be (3,3), (4,4), (9,) or (16,)')\n\n # get the inverse 4x4 from rotax\n mtx = rotax.rotax(numpy.array([0., 0., 0.], 'f'), self.pure, 2 * numpy.arccos(self.real))\n\n # strip if necessary\n if shape in ((3, 3), (9,)):\n mtx = [x[:3] for x in mtx]\n mtx = mtx[:3]\n\n if not transpose:\n return numpy.reshape(numpy.transpose(mtx), shape)\n else:\n return numpy.reshape(mtx, shape)", "def rotation_mat(self) -> np.ndarray:\n rot = np.zeros((3, 3))\n\n txx = 2 * self.x * self.x\n tyy = 2 * self.y * self.y\n tzz = 2 * self.z * self.z\n twx = 2 * self.w * self.x\n twy = 2 * self.w * self.y\n twz = 2 * self.w * self.z\n txy = 2 * self.x * self.y\n txz = 2 * self.x * self.z\n tyz = 2 * self.y * self.z\n\n rot[0, 0] = 1. - tyy - tzz\n rot[0, 1] = txy - twz\n rot[0, 2] = txz + twy\n rot[1, 0] = txy + twz\n rot[1, 1] = 1. - txx - tzz\n rot[1, 2] = tyz - twx\n rot[2, 0] = txz - twy\n rot[2, 1] = tyz + twx\n rot[2, 2] = 1. - txx - tyy\n\n return rot", "def to_quaternion(self, roll=0.0, pitch=0.0, yaw=0.0):\n t0 = math.cos(math.radians(yaw * 0.5))\n t1 = math.sin(math.radians(yaw * 0.5))\n t2 = math.cos(math.radians(roll * 0.5))\n t3 = math.sin(math.radians(roll * 0.5))\n t4 = math.cos(math.radians(pitch * 0.5))\n t5 = math.sin(math.radians(pitch * 0.5))\n\n w = t0 * t2 * t4 + t1 * t3 * t5\n x = t0 * t3 * t4 - t1 * t2 * t5\n y = t0 * t2 * t5 + t1 * t3 * t4\n z = t1 * t2 * t4 - t0 * t3 * t5\n\n return [w, x, y, z]", "def _rotation_matrix(theta):\n c, s = np.cos(theta), np.sin(theta)\n return np.array(((c, -s), (s, c)))", "def rotation_matrix(phi):\n return np.asmatrix([\n [np.cos(phi), -np.sin(phi), 0],\n [np.sin(phi), np.cos(phi), 0],\n [0, 0, 1]\n ])", "def rot2mat(rotation: np.ndarray) -> np.ndarray:\n rotation_radians = ndarray_to_rotation(rotation)\n pitch = np.deg2rad(rotation_radians.pitch)\n roll = np.deg2rad(rotation_radians.roll)\n yaw = np.deg2rad(rotation_radians.yaw)\n return transforms3d.euler.euler2mat(roll, pitch, yaw).T" ]
[ "0.8077829", "0.79751414", "0.7973847", "0.797261", "0.79455817", "0.79306656", "0.79097867", "0.780534", "0.7727341", "0.77201724", "0.77022475", "0.7419111", "0.74067664", "0.7311962", "0.7208562", "0.7142397", "0.71323454", "0.71113116", "0.70910096", "0.7048041", "0.69940317", "0.6971428", "0.6941373", "0.69386965", "0.69329625", "0.6930702", "0.69224066", "0.6919321", "0.6919321", "0.69137985", "0.6898834", "0.6868026", "0.6868026", "0.68509674", "0.6848649", "0.68468344", "0.6846473", "0.68445957", "0.68262446", "0.6817498", "0.6812973", "0.68067306", "0.68035173", "0.6796243", "0.6761111", "0.67498", "0.67055744", "0.6701874", "0.6696724", "0.66649985", "0.66607684", "0.6660748", "0.6653667", "0.66239536", "0.66127306", "0.6604881", "0.6587833", "0.6576355", "0.6575108", "0.6562569", "0.6549369", "0.6541474", "0.652368", "0.6518132", "0.6503336", "0.6499726", "0.6499726", "0.64790064", "0.64687574", "0.6462367", "0.64555436", "0.64419985", "0.6441698", "0.644071", "0.6418779", "0.64143133", "0.64021724", "0.6380904", "0.6353233", "0.635217", "0.6339214", "0.6329648", "0.6315079", "0.6311279", "0.6291337", "0.6285608", "0.6262913", "0.62621385", "0.6261672", "0.62490964", "0.6243126", "0.62248117", "0.6222157", "0.62094337", "0.6192065", "0.6181783", "0.6180523", "0.6165885", "0.61611146", "0.6158212" ]
0.8253612
0
Calculates the geometrical center of a set of points.
def get_geom_center(coordlist): return sum(coordlist) / len(coordlist)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def centre_of_points(list_of_points):\n\n cp = np.average(list_of_points, axis=0)\n return cp", "def pointcenter(x):\n return point(x)", "def center(self):\n points = set()\n for face in self._points:\n points.update(face)\n x_points = [point[0] for point in points]\n y_points = [point[1] for point in points]\n z_points = [point[2] for point in points]\n return \\\n (np.average(x_points), np.average(y_points), np.average(z_points))", "def calculate_center(self):\n return [(self.startX + self.endX) / 2., (self.startY + self.endY) / 2.]", "def centre(self):\n n = len(self.point)\n return Point(\n sum(map(lambda p: p.x, self.point)) / n,\n sum(map(lambda p: p.y, self.point)) / n\n )", "def calculateCenter(self):\n y_avg = int(sum(self.points[:,0])/float(len(self.points)))\n x_avg = int(sum(self.points[:,1])/float(len(self.points)))\n self.center = (x_avg, y_avg)\n return(x_avg,y_avg)", "def center(self):\n if not hasattr(self, '_center'):\n self._center = np.unique(self.points, axis=0).mean(axis=0)\n return self._center", "def center_point(self) -> tuple:\n return (self.min_lat + self.max_lat) / 2, (self.min_lon + self.max_lon) / 2", "def get_centroid(points):\n\n xs, ys = points[:, 0], points[:, 1]\n\n a = xs[:-1] * ys[1:]\n b = ys[:-1] * xs[1:]\n\n A = numpy.sum(a - b) / 2.\n\n cx = xs[:-1] + xs[1:]\n cy = ys[:-1] + ys[1:]\n\n Cx = numpy.sum(cx * (a - b)) / (6. * A)\n Cy = numpy.sum(cy * (a - b)) / (6. * A)\n\n return Cx, Cy", "def get_centre(self):\n # just get the centroid\n # perhaps try something like:\n # https://github.com/mapbox/polylabel/blob/master/polylabel.js\n # in the future\n coords = np.array([(n.x, n.y) for n in self.nodes])\n centre_x = coords[:, 0].mean()\n centre_y = coords[:, 1].mean()\n return centre_x, centre_y", "def getCenter(self):\n return Point.average(self.points)", "def calc_centroid(self, points):\n\t\tself.canvas.create_polygon(points)\n\t\tx = [i[0] for i in points] # all the math is wrong :(\n\t\ty = [j[1] for j in points]\n\n\t\tarea = x[0] * (y[0] - y[-1])\n\t\tx_hat = (x[0] ** 2) * (y[0] - y[-1]) / (2) \n\t\ty_hat = -(y[0] ** 2) * (x[0] - x[-1]) / (2)\n\n\t\tfor i in range(1, len(points) - 1):\n\t\t\tdt = length(x[i], y[i], x[i - 1], y[i - 1])\n\t\t\tdy = y[i] - y[i - 1]\n\t\t\tdx = x[i] - x[i - 1]\n\t\t\tarea += 2 * x[i] * dy\n\t\t\tx_hat += (x[i] ** 2) * dy\n\t\t\ty_hat -= (y[i] ** 2) * dx\n\n\t\tarea += x[-1] * (y[-1] - y[-2])\n\t\tx_hat += (x[-1] ** 2) * (y[-1] - y[-2]) / 2\n\t\ty_hat -= (y[-1] ** 2) * (x[-1] - x[-2]) / 2\n\t\tarea /= 2\n\t\tx_hat /=2\n\t\ty_hat /= 2\n\t\tprint(\"Area: %s\\nX: %s\\nY: %s\" % (area, x_hat/area, y_hat/area))\n\t\treturn x_hat/area, y_hat/area", "def center(x):\n if ispoint(x):\n # return pointcenter(x)\n return point(x)\n elif isline(x):\n return linecenter(x)\n elif isarc(x):\n return arccenter(x)\n elif ispoly(x):\n return polycenter(x)\n elif isgeomlist(x):\n pl = []\n for g in x:\n pl.append(center(g))\n return polycenter(pl)\n else:\n raise ValueError(\"inappropriate type for center(): \",format(x))", "def getcenter(self):\n return self.centro.cartesianas()", "def _get_center_pos(self):\n if not hasattr(self, 'lon_center'):\n raise ValueError('ERROR: You need to specify first the center position!')\n d = np.abs((self.x.lon - self.lon_center) ** 2. + (self.x.lat - self.lat_center) ** 2.)\n dmin = d.min()\n m = d == dmin\n\n idx = np.indices(d.shape)\n i = idx[0][m][0]\n j = idx[1][m][0]\n\n if (np.abs(1. - self.x.lon[i, j] / self.lon_center) > 0.05) or (np.abs(1. - self.x.lat[i, j] / self.lat_center) > 0.05): # at least 5% acc.\n print 'lon: ', self.x.lon[i, j], self.lon_center\n print 'lat: ', self.x.lat[i, j], self.lat_center\n i = None\n j = None\n return i, j", "def CenterOfMass(points):\n A = AreaOfPolygon(points)\n N = len(points)\n cx = 0\n cy = 0\n for i in xrange(0, N):\n x_i = points[i][0]\n y_i = points[i][1]\n x_ip1 = points[(i+1) % N][0]\n y_ip1 = points[(i+1) % N][1]\n part = (x_i * y_ip1 - x_ip1 * y_i)\n cx += ((x_i + x_ip1) * part)\n cy += ((y_i + y_ip1) * part)\n return (cx/(6*A), cy/(6*A), abs(A))", "def get_center(self):\n lon, lat = self.coordinates\n\n dimx = lon.shape[0]\n dimy = lon.shape[1]\n \n return (lon[dimx/2][dimy/2],lat[dimx/2][dimy/2])", "def center_coords(self):\n coords = set()\n for x in range(self.radius, self.container.width - self.radius):\n for y in range(self.radius, self.container.height - self.radius):\n coords.add((x, y))\n\n return coords", "def centroid_of_points(pts):\n xs, ys, zs = 0, 0, 0\n for pt in pts:\n xs += pt[0]\n ys += pt[1]\n if len(pt) > 2:\n zs += pt[2]\n if len(pts) > 0:\n xs /= len(pts)\n ys /= len(pts)\n if len(pts[0]) > 2:\n zs /= len(pts)\n return xs, ys, zs\n return xs, ys", "def get_center_location(self):\n latitude = 0\n longitude = 0\n for centroid in self.centroids:\n latitude += centroid[0]\n longitude += centroid[1]\n return [latitude / len(self.centroids), longitude / len(self.centroids)]", "def centroid_points(points):\n p = float(len(points))\n x, y, z = zip(*points)\n return sum(x) / p, sum(y) / p, sum(z) / p", "def centroid(self):\n x, y = self.coordinates\n A = 0.5 * sum(x[i]*y[i+1] - x[i+1]*y[i] for i in range(-1, len(self)-1))\n cx = sum((x[i] + x[i+1]) * (x[i]*y[i+1] - x[i+1]*y[i])\n for i in range(-1, len(self)-1)) / (6*A)\n cy = sum((y[i] + y[i+1]) * (x[i]*y[i+1] - x[i+1]*y[i])\n for i in range(-1, len(self)-1)) / (6*A)\n return Point((cx, cy), properties=self.properties, crs=self.crs)", "def get_center_point(self):\n raise NotImplementedError()", "def get_center(self):\n\n x = np.array(self.x)\n y = np.array(self.y)\n return np.mean(x), np.mean(y)", "def get_center_coordinates(self):\n totalX = 0\n totalY = 0\n totalZ = 0\n for atom in self.get_atoms():\n totalX += atom.get_x()\n totalY += atom.get_y()\n totalZ += atom.get_z()\n \n xCenter = totalX / len(self.get_atoms())\n yCenter = totalY / len(self.get_atoms())\n zCenter = totalZ / len(self.get_atoms())\n \n return xCenter, yCenter, zCenter", "def compute_center(self, mole_object):\r\n if mole_object.plugin_type == \"PyMOL\":\r\n sel = PymolPlugin.PymolPlugin().get_model('all')\r\n cnt = len(sel.atom)\r\n\r\n else:\r\n sel = ChimeraPlugin.ChimeraPlugin().select()\r\n cnt = len(ChimeraPlugin.ChimeraPlugin().current_atoms())\r\n\r\n cent_x = 0\r\n cent_y = 0\r\n cent_z = 0\r\n\r\n if cnt == 0:\r\n return 0, 0, 0\r\n\r\n if mole_object.plugin_type == \"PyMOL\":\r\n\r\n for a in sel.atom:\r\n cent_x += a.coord[0]\r\n cent_y += a.coord[1]\r\n cent_z += a.coord[2]\r\n\r\n else:\r\n\r\n for a in ChimeraPlugin.ChimeraPlugin().current_atoms():\r\n cent_x += a.coord()[0]\r\n cent_y += a.coord()[1]\r\n cent_z += a.coord()[2]\r\n\r\n cent_x /= cnt\r\n cent_y /= cnt\r\n cent_z /= cnt\r\n\r\n self.point_x.component('entryfield').setentry(cent_x)\r\n self.point_y.component('entryfield').setentry(cent_y)\r\n self.point_z.component('entryfield').setentry(cent_z)\r\n\r\n self.show_crisscross(mole_object)", "def ComputeCentroid(self, vtkPoints, int_tuple, p_float=..., p_float=..., p_float=...):\n ...", "def center(self, obj):\n mn0 = self.master.xy >= obj.center\n mn1 = self.master.xy <= obj.center\n\n point_list = [self.master.xy[mn0], self.master.xy[mn1], self.master.xy[mn0[0], mn1[1]], self.master.xy[mn1[0], mn0[1]]] # 4 physical points near the center coordinate.\n dist_list = []\n idx = 0\n for point in point_list:\n dist_list.append([idx, np.linalg.norm(point - obj.center)]) # Calculate Euclidean distances.\n idx += 1\n dist_sorted = sorted(dist_list, key=lambda distance : distance[1]) # Sort distances in ascending order.\n return self.master.mn(point_list[dist_sorted[0][0]]) # Convert the closest point to abstract coordinate and then return.", "def compute_platform_center(self):\n base = self.platform_vertices[1] - self.platform_vertices[0] # base of triangle, vector\n x = np.linalg.norm(base) # base length, scalar\n m = self.platform_vertices[0] + base/2 # midpoint on the base, vector\n cm = x/(2*np.sqrt(3)) # length from m to center c, scalar\n cm_dir = self.platform_vertices[2] - m # direction to center from midpoint, vector\n cm_vec = cm_dir*cm/np.linalg.norm(cm_dir) # make cm_dir a unit vector and multiply by the length, vector\n c = m + cm_vec # center position, vector\n return c", "def find_center(self):\n x = np.int(np.rint((len(self.grid[0][0]))/2))\n center = np.array([x, x, x])\n self.grid[center[0]][center[1]][center[2]] = 1\n return self.grid, center", "def center(self):\n xc = (self.x.max() + self.x.min())/2.\n yc = (self.y.max() + self.y.min())/2.\n return (xc, yc)", "def GetCentroid(self, p_float=..., p_float=..., p_float=...):\n ...", "def center_data(x: npt.NDArray, y: npt.NDArray) -> Tuple[npt.NDArray, ...]:\n centroid = np.array([x.mean(), y.mean()])\n xc = x - centroid[0]\n yc = y - centroid[1]\n return xc, yc, centroid", "def center_of_coor(coordinates):\n return (np.sum(coordinates, axis=0) / coordinates.shape[0])", "def get_center(self,lonlat=False):\n lon, lat = np.asarray(self.rotator.rots[0][0:2])*180/pi\n if lonlat: return lon,lat\n else: return pi/2.-lat*dtor, lon*dtor", "def centroid(self) -> PointValue:\n return ops.GeoCentroid(self).to_expr()", "def _get_x_center_pts(halfway_x, halfway_y):\n return reduce(iconcat, _get_pt_tuple(range(1, halfway_x),\n range(1, halfway_y)))", "def find_center(self) -> tuple:\r\n \r\n # Add up all the x values of pixels in the plant\r\n # Then divide by total pixels in the plant\r\n avg_x = sum([i[0] for i in self.cluster]) / len(self.cluster)\r\n\r\n # Add up all the y values of pixels in the plant\r\n # Then divide by total pixels in the plant\r\n avg_y = sum([i[1] for i in self.cluster]) / len(self.cluster)\r\n\r\n self.center = (int(round(avg_x)), int(round(avg_y)))\r\n \r\n # return the results in a tuple of integers\r\n return self.center", "def center(self) -> Point:\n return Point(*np.sum(self.normalized_array[:, :-1], axis=0))", "def find_center_points(df, lat1, long1, lat2, long2):\n df['center_latitude'] = (df[lat1].values + df[long2].values) / 2\n df['center_longitude'] = (df[long1].values + df[lat2].values) / 2\n\n return df", "def center(self):\n bounds = self.bounds\n x = (bounds[1] + bounds[0]) / 2\n y = (bounds[3] + bounds[2]) / 2\n z = (bounds[5] + bounds[4]) / 2\n return [x, y, z]", "def center_point(polyline):\n\tpts = unique(polyline.points)\n\treturn sum(pts) / len(pts)", "def get_center_coords(antipodes):\n lat = antipodes[\"minx\"] + (antipodes[\"maxx\"] - antipodes[\"minx\"]) / 2\n lon = antipodes[\"miny\"] + (antipodes[\"maxy\"] - antipodes[\"miny\"]) / 2\n return [lon, lat]", "def center(self):\n return self.center_x, self.center_y", "def centroid(self): # -> BaseGeometry:\n ...", "def centers(self):\n return self.xc, self.yc", "def calc_centroid(self):\n sumX = 0.0\n sumY = 0.0\n dis = 0.0\n for p in self.points:\n sumX += p.x\n sumY += p.y\n d = p.distance(self.centroid)\n if dis < d: dis = d\n # radius is the longest distance within points\n self.radius = dis + 0.1\n size = len(self.points)\n if size:\n return Point(x=float(sumX)/size, y=float(sumY)/size)\n else:\n return self.centroid", "def _find_barycenter(_in_points: List[Point]) -> Barycenter:\n return Barycenter(Point(sum([p.x for p in _in_points]) / len(_in_points), sum([p.y for p in _in_points]) / len(_in_points)))", "def find_incentre(point_1, point_2, point_3):\r\n _a = np.linalg.norm([point_1.x - point_2.x, point_1.y - point_2.y])\r\n _b = np.linalg.norm([point_2.x - point_3.x, point_2.y - point_3.y])\r\n _c = np.linalg.norm([point_1.x - point_3.x, point_1.y - point_3.y])\r\n _p = _a + _b + _c\r\n\r\n centre_x = (_a * point_3.x + _b * point_1.x + _c * point_2.x)/ _p\r\n centre_y = (_a * point_3.y + _b * point_1.y + _c * point_2.y)/ _p\r\n\r\n return Point(centre_x, centre_y)", "def find_center(self):\n return(Point(self.corner.x + self.width/2.0, self.corner.y + self.height/2.0))", "def getCentroid(self):\n centroid = 0.0\n sumMagnitude = 0.0\n\n for i in range(0,self.nUniquePoints):\n freq,magnitude = self.fDomain[i]\n\n centroid += freq*magnitude\n sumMagnitude += magnitude\n \n centroid /= sumMagnitude\n return centroid", "def center(self):\n\n ca_atoms = self.ca_atoms\n ca_atom_vectors = ca_atoms[\"ca.atom\"].to_list()\n ca_atom_vectors = [i for i in ca_atom_vectors if i is not None]\n centroid = self.center_of_mass(ca_atom_vectors, geometric=False)\n centroid = Vector(centroid)\n\n return centroid", "def calc_centroid(self):\n num = 0\n centroid = numpy.zeros(3, float)\n for atm in self:\n if atm.position is not None:\n centroid += atm.position\n num += 1\n return centroid / num", "def midpoint_of_points(pnts: Iterable[Point]) -> Point:\n num = len(pnts)\n x = sum(pnt.x for pnt in pnts)/num\n y = sum(pnt.y for pnt in pnts)/num\n z = sum(pnt.z for pnt in pnts)/num\n return Point(x, y, z)", "def get_center(self):\n x = round(self.x_pos)\n y = round(self.y_pos)\n return [int(x),int(y)]", "def center(x):\n return x - x.mean()", "def center(self):\n return (self.centerx, self.centery)", "def get_cell_center_coordinates(self):\n import numpy as np\n x1, x2, x3 = np.ix_(*self.cell_center_coordinates)\n if self.geometry == 'cartesian':\n x, y, z = x1, x2, x3\n elif self.geometry == 'spherical':\n x = x1 * np.sin(x2) * np.cos(x3)\n y = x1 * np.sin(x2) * np.sin(x3)\n z = x1 * np.cos(x2)\n return x, y, z", "def points_to_bbox_center(p):\n bbox = points_to_bbox(p)\n return (bbox[0] + bbox[2]) / 2.0, (bbox[1] + bbox[3]) / 2.0", "def center_of_mass(points):\n # break into many triangles\n # each point is part of two triangles\n cor = [sum(points) / len(points)]\n mass_points = []\n area = 0\n for i in range(len(points) - 1):\n triangle = cor + points[i:i + 2]\n # print(triangle)\n mass_points.append(build_triangle_point_mass(triangle))\n area += shoelace_area(triangle)\n # print(triangle, area)\n mass_points.append(build_triangle_point_mass(cor + [points[-1], points[0]]))\n area += shoelace_area(cor + [points[-1], points[0]])\n return Vector2D(*find_com(*zip(*mass_points))), area", "def getCentroid(self):\n if len(self.points) == 0:\n # None\n return None\n elif len(self.points) == 1:\n # Same point\n return self.points[0]\n elif len(self.points) == 2:\n # Middle of a segment\n return Segment(*self.points).middle\n elif len(self.points) == 3:\n # Intersection point of 2 medians\n return Point.average(self.points)\n else:\n # Geometric decomposition to compute centroids (wikipedia)\n n = len(self.points) # n is the number of points\n # There are n-2 forms\n forms = [Form([self.points[0]] + self.points[i:i + 2]) for i in range(1, n - 1)]\n # So n-2 centroids and areas, except if some of the points are one upon another, no area is null\n centroids = [form.center for form in forms]\n areas = [form.area for form in forms]\n # we compute the average centroid weighted by the areas\n weighted_centroid = Point.sum([a * c for (c, a) in zip(centroids, areas)])\n centroid = weighted_centroid / sum(areas)\n return centroid", "def _center_position(ephemerides: List[Ephemeris]) -> Tuple[Quantity, Quantity]:\n # find the RA, dec center\n center_ra, center_dec = EphemerisService.center_position(ephemerides)\n\n return center_ra, center_dec", "def get_center(self):\n return center_points(np.expand_dims(self.best_box, axis=0))[0]", "def _get_centre(self, gdf):\n bounds = gdf[\"geometry\"].bounds\n centre_x = (bounds[\"maxx\"].max() + bounds[\"minx\"].min()) / 2\n centre_y = (bounds[\"maxy\"].max() + bounds[\"miny\"].min()) / 2\n return centre_x, centre_y", "def center(coords):\n for c in coords:\n if 'avg' not in locals():\n avg = c\n else:\n avg += c\n return avg / len(coords)", "def compute_centers(points, labels):\n df = pd.DataFrame(points)\n return df.groupby(labels).mean().values", "def centroid(self):\n return _property_geo(arctern.ST_Centroid, self)", "def calc_centroid(x1, y1, x2, y2):\n x = x1 + ((x2 - x1) / 2.0)\n y = y1 + ((y2 - y1) / 2.0)\n return [x, y]", "def xcenters(self):\n return self.centers[0]", "def center(self):\n try: \n return self._center\n except AttributeError:\n self._center = vector(ZZ, [0]*self.ambient_dim())\n for v in self.vertex_generator(): self._center += v.vector()\n self._center /= self.n_vertices()\n return self._center", "def find_center(self, drone_coords):\n lat_sum = 0\n lon_sum = 0\n\n for coord in drone_coords.values():\n lat_sum += coord.lat\n lon_sum += coord.lon\n\n center_lat = lat_sum / len(drone_coords)\n center_lon = lon_sum / len(drone_coords)\n\n center_coord = Coordinate(center_lat, center_lon)\n\n self.log.info(\n 'Found cluster center: {coord}'.format(\n coord=center_coord,\n )\n )\n\n return center_coord", "def center_of_geometry(self, selectedAtoms:list=None) -> list:\n if (\"POSITION\" in dir(self)):\n cogx = 0.0\n cogy = 0.0\n cogz = 0.0\n if selectedAtoms is None:\n iterator = self.POSITION.content\n else:\n iterator = []\n for i in selectedAtoms:\n iterator.append(self.POSITION.content[i])\n for i in iterator:\n cogx += i.xp\n cogy += i.yp\n cogz += i.zp\n n = len(self.POSITION.content)\n return [cogx/n, cogy/n, cogz/n]\n else:\n raise ValueError(\"NO POSITION block in cnf-Object: \" + self.path)", "def circle_center(top_aerofoil_points, bottom_aerofoil_points):\n q = np.array(top_aerofoil_points[0].coordinates) - np.array(top_aerofoil_points[1].coordinates)\n r = np.array(bottom_aerofoil_points[-1].coordinates) - np.array(bottom_aerofoil_points[-2].coordinates)\n c = np.cross(q, [0, 0, -1]) / np.linalg.norm(q)\n d = np.cross(r, [0, 0, 1]) / np.linalg.norm(r)\n radius = (q[1] - r[1]) / (d[1] - c[1])\n s = q + radius * c\n return Point(tuple(-s))", "def centerPoint(featureCollection):\n features = featureCollection[\"features\"]\n center = [0, 0]\n for feature in features:\n geometry = feature[\"geometry\"]\n if geometry[\"type\"] == \"Point\":\n point = feature[\"geometry\"][\"coordinates\"]\n center[0] += point[0]\n center[1] += point[1]\n else:\n raise ValueError(\"expected a point but got a {}\".format(geometry[\"type\"]))\n center[0] /= len(features) \n center[1] /= len(features) \n\n return geojson.Point(coordinates=center)", "def center(self) -> Tuple[int, int]:\n center_x = int((self.x1 + self.x2) // 2)\n center_y = int((self.y1 + self.y2) // 2)\n return (center_x, center_y)", "def find_center_of_coordinates(selection='(all)', state=-1):\n # find middle x, y, z coordinate of the selection\n state = utils.int_to_state(state)\n minc, maxc = cmd.get_extent(selection, state=state)\n coc = [float(l + (u - l) / 2.0) for l, u in zip(minc, maxc)]\n return coc", "def center(self):\n return np.array([0,0,1/self.C+self.pos()])", "def center(self) -> Tuple[float, float]:\n return self.x + self.width / 2, self.y + self.height / 2", "def getBoundingBoxCenter(self, shell=False, *args, **kwargs):\n if shell:\n self.grabShell()\n uvBB = pm.polyEvaluate(boundingBoxComponent2d=True)\n uvCenter = [((uvBB[0][1] + uvBB[0][0]) / 2), ((uvBB[1][1] + uvBB[1][0]) / 2)]\n return uvCenter", "def test_get_center():\n center = Coordinates(7, 3)\n radius = 12\n\n returned_center = get_center(center, radius, 25)\n\n assert returned_center.get_x() == center.get_x()\n assert returned_center.get_y() == center.get_y()", "def centroid(coords,masses,divider):\n\treturn np.array([np.dot(masses[r].T,coords[r])/np.sum(masses[r]) for r in divider])", "def get_face_center_coordinates(root_dir, images_dir, face_center_cordinates_filename):\n face_center = pd.read_csv(\n root_dir + \"/\" + images_dir + \"/\" + face_center_cordinates_filename,\n sep=\" \",\n header=None,\n dtype=\"float64\",\n names=[\"x\", \"y\", \"z\", \"none\"],\n ).iloc[3, :3]\n\n face_center_rgb = pd.read_csv(\n root_dir + \"/\" + images_dir + \"/rgb.cal\",\n sep=\" \",\n header=None,\n error_bad_lines=False,\n warn_bad_lines=False,\n dtype=\"float64\",\n )\n\n moved_x = face_center[\"x\"] + face_center_rgb.iloc[6, 0]\n moved_y = face_center[\"y\"] + face_center_rgb.iloc[6, 1]\n\n x_result = (\n moved_x * face_center_rgb.iloc[0, 0] / face_center[\"z\"]\n + face_center_rgb.iloc[0, 2]\n )\n y_result = (\n moved_y * face_center_rgb.iloc[1, 1] / face_center[\"z\"]\n + face_center_rgb.iloc[1, 2]\n )\n\n return x_result, y_result", "def _center_distance(self):\n # Split positions in segments of two points :\n cut = np.vsplit(self.a_position, int(self.a_position.shape[0]/2))\n # Get center position and starting line position :\n center = np.mean(cut, axis=1)\n\n # ============ EUCLIDIAN DISTANCE ============\n diff = np.sqrt(np.square(center[:, np.newaxis, :] - center).sum(2))\n diff[np.tril_indices_from(diff)] = np.inf\n\n return center, diff", "def _find_coord_centre(self,\n shape_coords) -> np.array:\n return shape_coords.mean(axis=0)", "def centroid(self) -> Point:\n points = self.normalized_array\n centroids = [np.average(points[[0, i, i + 1], :-1], axis=0) for i in range(1, points.shape[0] - 1)]\n weights = [det(self._normalized_projection()[[0, i, i + 1]]) / 2 for i in range(1, points.shape[0] - 1)]\n return Point(*np.average(centroids, weights=weights, axis=0))", "def find_centroid_for_each(self):", "def get_circles_centers(triangles):\n points1, points2, points3 = (triangles[:, 0],\n triangles[:, 1],\n triangles[:, 2])\n # Vectors\n sides1 = points2 - points1\n sides2 = points3 - points1\n # Length of vector of cross product * 2\n area = 2 * (sides1[:, 0] * sides2[:, 1] - sides1[:, 1] * sides2[:, 0])\n\n # (y_2(x_1^2 + y_1^2) - y_1(x_2^2 + y_2^2)) / area + x\n centers_x = ((sides2[:, 1] *\n (np.square(sides1[:, 0]) + np.square(sides1[:, 1])) -\n sides1[:, 1] *\n (np.square(sides2[:, 0]) + np.square(sides2[:, 1]))) /\n area + points1[:, 0])\n centers_y = ((sides1[:, 0] *\n (np.square(sides2[:, 0]) + np.square(sides2[:, 1])) -\n sides2[:, 0] *\n (np.square(sides1[:, 0]) + np.square(sides1[:, 1]))) /\n area + points1[:, 1])\n\n # Transportated.\n return np.array((centers_x, centers_y)).T", "def center(self):\n return np.sum(self.bbox, 0) / 2", "def getpolycenter(poly):\n polylength = len(poly)\n\n return (\n round(sum(x for x, y in poly) / polylength, 2),\n round(sum(y for x, y in poly) / polylength, 2)\n )", "def center(self):\n return Point(self.width/2, self.height/2)", "def getCenter(self):\n return [self.tx/self.tw, self.ty/self.tw]", "def _get_pose_center(self, landmarks):\n left_hip = landmarks[self._landmark_names.index('left_hip')]\n right_hip = landmarks[self._landmark_names.index('right_hip')]\n center = (left_hip + right_hip) * 0.5\n return center", "def center(self):\n return self.map_.geom.center_skydir", "def find_center(\n x,\n centers,\n distance,\n):\n return np.argmin(\n [distance(x, centers[i, :]) for i in range(centers.shape[0])])", "def centroid(func, step=0.1):\n points = func.points(step)\n num, den = 0, 0\n\n for x, y in points:\n num += x * y\n den += y\n\n return num / den", "def centroid(self) -> Point[Scalar]:\n return self._context.multipoint_centroid(self)", "def test_set_center_coord():\r\n da = xr.DataArray(np.arange(2), dims=\"lead\", coords={\"lead\": [\"1-3\", \"2-4\"]})\r\n actual = _set_center_coord(da).lead_center.values\r\n expected = [2.0, 3.0]\r\n assert (actual == expected).all()", "def _get_centeroids(self, pts, max_dist=None):\n\n if max_dist == None:\n max_dist=self.maximum_edge_point_distance\n dist=0\n center_pts=[]\n if len(pts)>0:\n pts = sorted(pts)\n i = 0\n j=1\n center_pts.append(pts[0])\n num_pts=1.0 # to avoid integer division\n num_clusters = 1\n while i<len(pts) and j<len(pts):\n if pts[j]-pts[i]<max_dist:\n if len(center_pts)<num_clusters:\n center_pts.append(0)\n center_pts[num_clusters-1] = center_pts[num_clusters-1]+pts[j]\n num_pts+=1.0\n j=j+1\n i+=1\n else:\n if len(center_pts)<=num_clusters:\n center_pts.append(0)\n center_pts[num_clusters-1]= int(center_pts[num_clusters-1]/num_pts)\n num_pts = 1\n num_clusters += 1\n i=j\n j=i+1\n if len(center_pts)<=num_clusters:\n center_pts[num_clusters-1] = int(center_pts[num_clusters-1]/num_pts)\n else:\n print \"error! center_pts!\", len(center_pts), num_clusters, self.key\n return center_pts", "def centroid2d(points):\n number_of_points = 0\n centroid = [0, 0]\n\n for p in points:\n centroid = (centroid[0] + p[0], centroid[1] + p[1])\n number_of_points += 1\n\n centroid = [centroid[0] / number_of_points, centroid[1] /\n number_of_points] if number_of_points else None\n\n return centroid", "def _get_pose_center(self, landmarks):\n left_hip = landmarks[self._landmark_names.index(\"left_hip\")]\n right_hip = landmarks[self._landmark_names.index(\"right_hip\")]\n center = (left_hip + right_hip) * 0.5\n return center" ]
[ "0.7816311", "0.7496035", "0.74138576", "0.7411869", "0.735026", "0.73026866", "0.72750986", "0.7227958", "0.7207048", "0.7157561", "0.71556854", "0.7141065", "0.71169", "0.70823437", "0.7073579", "0.70721674", "0.7064834", "0.7059459", "0.70201606", "0.7017879", "0.7004704", "0.69872516", "0.6964822", "0.6964499", "0.6910163", "0.6906823", "0.687036", "0.6852701", "0.68488294", "0.68258137", "0.6814327", "0.6812786", "0.6807781", "0.67929834", "0.67873687", "0.6784845", "0.67741024", "0.6764316", "0.6764256", "0.6762018", "0.67599344", "0.67552567", "0.67417383", "0.6736132", "0.6728087", "0.6724906", "0.6711394", "0.67097163", "0.66986936", "0.66943336", "0.6693893", "0.6667933", "0.6667436", "0.6664131", "0.66595316", "0.66516155", "0.66426355", "0.66397333", "0.66301435", "0.66084534", "0.6602535", "0.65907884", "0.6590185", "0.6580819", "0.6560211", "0.6544872", "0.65389293", "0.6535507", "0.6534603", "0.65256476", "0.6521169", "0.65171933", "0.65155035", "0.6510189", "0.64886063", "0.6481622", "0.64789695", "0.647237", "0.64716905", "0.6468295", "0.64668083", "0.6462072", "0.6460383", "0.6459572", "0.64447135", "0.6437604", "0.6437237", "0.6413201", "0.63801247", "0.6378391", "0.6372563", "0.63686025", "0.6368164", "0.6367026", "0.63668454", "0.6363807", "0.6363064", "0.63578665", "0.63558227", "0.6351921" ]
0.75747705
1
Moves the geometrical center of the atoms in atomlist to the given point.
def move_center_to_point(atomlist, point): for atom in range(len(atomlist)): atomlist[atom] = atomlist[atom] - point return atomlist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def recenter(self, point=(0, 0)):\n self.center = Point(*point)", "def centerOn(self, point):\n rect = self.rect()\n x = point.x() - rect.width() / 2.0\n y = point.y() - rect.height() / 2.0\n \n self.setPos(x, y)", "def centerOnPoint(self, point):\n\n inClass = point.__class__.__name__.lower()\n # check if we've been passed an OpenCV Point2f object\n if inClass == 'point2f':\n # convert the Point2f object to a simple list\n point = QPointF(point.x, point.y)\n\n # check if we've been passed a list\n elif inClass == 'list':\n # convert the Point2f object to a simple list\n point = QPointF(point[0], point[1])\n\n self.isZooming = True\n self.centerPoint = point\n self.centerOn(self.centerPoint)\n self.isZooming = False", "def move_to(self, mobject_or_point):\n layer_center = self.surrounding_rectangle.get_center()\n if isinstance(mobject_or_point, Mobject):\n target_center = mobject_or_point.get_center() \n else:\n target_center = mobject_or_point\n\n self.shift(target_center - layer_center)", "def setCenter(self, center):\n p = center - self.center\n for i in range(len(self.points)):\n self.points[i] += p", "def __moveCenterTo(self, x, y):\n x0, y0, w, h = self.currentBox\n x2, y2 = x - (w/2), y - (h/2)\n self.__moveTo(x2, y2)", "def center(self, destination):\n self.move(destination=destination, origin=self.center)", "def pointcenter(x):\n return point(x)", "def center(self, obj):\n mn0 = self.master.xy >= obj.center\n mn1 = self.master.xy <= obj.center\n\n point_list = [self.master.xy[mn0], self.master.xy[mn1], self.master.xy[mn0[0], mn1[1]], self.master.xy[mn1[0], mn0[1]]] # 4 physical points near the center coordinate.\n dist_list = []\n idx = 0\n for point in point_list:\n dist_list.append([idx, np.linalg.norm(point - obj.center)]) # Calculate Euclidean distances.\n idx += 1\n dist_sorted = sorted(dist_list, key=lambda distance : distance[1]) # Sort distances in ascending order.\n return self.master.mn(point_list[dist_sorted[0][0]]) # Convert the closest point to abstract coordinate and then return.", "def set_center(self,structure):\n for i,b in enumerate(self.bfs):\n b.set_center( structure[ self.LIST1[i] ] ) \n return", "def center_from_tuple(self, center):\n self.center = Point(*center)", "def position_center(self, x, y):\n self.x = x\n self.y = y\n self.pos[0] = x - self.pos[2]/2\n self.pos[1] = y - self.pos[3]/2", "def move_center(obj):\n desktop = QApplication.desktop()\n dw = desktop.width()\n dh = desktop.height()\n size = obj.size()\n mw = size.width()\n mh = size.height()\n obj.move(dw/2-mw/2, dh/2-mh/2)", "def setCenter(self, np):\n p = self.getCenter()\n v = Vector.createFromTwoPoints(p, np)\n for i in range(len(self.points)):\n self.points[i] = v(self.points[i])", "def setCentroid(self, center):\n p = center - self.centroid\n for i in range(len(self.points)):\n self.points[i] += p", "def move_to_origin(x):\n # Correct x so it is centered at (0,0)\n tx = np.mean(x[:no_points, :])\n ty = np.mean(x[no_points:, :])\n x[:no_points, :] = (x[:no_points, :] - tx)\n x[no_points:, :] = (x[no_points:, :] - ty)\n return x, tx, ty", "def center(self, x):\n\n shape = x.shape\n nx = shape[1]\n ny = shape[0]\n hnx = nx // 2\n hny = ny // 2\n\n temp = x[0:hny, 0:hnx].copy()\n x[0:hny, 0:hnx] = x[hny:ny, hnx:nx].copy()\n x[hny:ny, hnx:nx] = temp\n\n temp = x[0:hny, hnx:nx].copy()\n x[0:hny, hnx:nx] = x[hny:ny, 0:hnx].copy()\n x[hny:ny, 0:hnx] = temp", "def update_center(self): \r\n \r\n self.grfx[0].center = self.center\r\n\r\n self.update_bbox()", "def centre_of_points(list_of_points):\n\n cp = np.average(list_of_points, axis=0)\n return cp", "def center(self):\n cp = self.dat.flowsheet.getCenter()\n self.centerOn(cp[0], cp[1])", "def center_on_spawn(self):\n self.center_on(*self.world.metadata['playerStart'])", "def set_atomic_position(molecule, atom_index, x_coord, y_coord, z_coord):\n molecule.SetAtomPosition(atom_index, x_coord, y_coord, z_coord)", "def move_point(start, end, bbox):\n vector = end - start\n shift = calculate_shift(start, vector, bbox)\n if shift is not None and 0 < shift < 1:\n start = start + shift * vector\n return start", "def centerOnMark(self, mark):\n\n # get the center of the mark\n point = mark.mapToScene(mark.pos())\n\n # and center the view on it\n self.centerOnPoint(point)", "def setCenter(self, p):\n self.__center = p", "def _move_actor(self, actor):\n\n actor.center_x = actor.center_x + actor.change_x\n actor.center_y = actor.center_y + actor.change_y", "def centre(self):\n n = len(self.point)\n return Point(\n sum(map(lambda p: p.x, self.point)) / n,\n sum(map(lambda p: p.y, self.point)) / n\n )", "def _setCenter(self, value, index):\n item = self.item()\n if item is not None:\n if value == 'Origin':\n value = 0.\n elif value not in self._ROTATION_CENTER_OPTIONS:\n value = float(value)\n else:\n value = value.lower()\n\n center = list(item.getRotationCenter())\n center[index] = value\n item.setRotationCenter(*center)", "def update_to_coord(self, point):\r\n if self._index_of_sel_point != -1 and self._index_of_sel_point <= len(self.points)-1:\r\n self._command_stack.do(model.structure.UpdatePoint(\r\n self._structure, self._index_of_sel_point, round(point[0]), round(point[1])))\r\n elif self._index_of_sel_point == len(self.points) or not self.points:\r\n self._command_stack.do(model.structure.AddPoint(\r\n self._structure, self._index_of_sel_point+1, round(point[0]), round(point[1])))\r\n if self._index_of_sel_point+1 >= len(self.points):\r\n self.winfo_toplevel().update()\r\n self._index_of_sel_point = len(self.points)\r\n else:\r\n self._set_selection(self._index_of_sel_point+1)\r\n self.winfo_toplevel().update()", "def center(self, center):\n if not isinstance(center, Point):\n raise TypeError(\"The center must be a Point!\")\n self._center = center", "def center_ship(self):\r\n self.center = self.screen_rect.centerx", "def center_ship(self):\r\n self.center = self.screen_rect.centerx", "def move_center_by_vector(self, direction_vector):\n assert isinstance(direction_vector, tuple)\n self.smd3.move_center(direction_vector)\n min_vector, max_vector = self.smd3.get_min_max_vector()\n self.logic.move_center(direction_vector, self.header.type)\n self.header.set_box(min_vector, max_vector)\n self.header.update(self.smd3)\n self.meta.move_center_by_vector(direction_vector)", "def center_ship(self):\n self.center = self.screen_rect.centerx", "def center_ship(self):\n self.center = self.screen_rect.centerx", "def center_ship(self):\n self.center = self.screen_rect.centerx", "def _moveTo(self, pt):\n self._handleAnchor()\n t = \"M%s\" % (pointToString(pt))\n self._commands.append(t)\n self._lastCommand = \"M\"\n self._lastX, self._lastY = pt", "def set_center_position(self, lon, lat):\n self.lon_center = lon\n self.lat_center = lat", "def action_to_coords(self, x, y):\n self.scene.center_on(x, y)", "def moveBasedOnCurrentMomentum(self):\n self.xPos-=self.xMomentum\n self.yPos-=self.yMomentum\n self.syncSpriteCoordinates()", "def get_geom_center(coordlist):\n return sum(coordlist) / len(coordlist)", "def compute_center(self, mole_object):\r\n if mole_object.plugin_type == \"PyMOL\":\r\n sel = PymolPlugin.PymolPlugin().get_model('all')\r\n cnt = len(sel.atom)\r\n\r\n else:\r\n sel = ChimeraPlugin.ChimeraPlugin().select()\r\n cnt = len(ChimeraPlugin.ChimeraPlugin().current_atoms())\r\n\r\n cent_x = 0\r\n cent_y = 0\r\n cent_z = 0\r\n\r\n if cnt == 0:\r\n return 0, 0, 0\r\n\r\n if mole_object.plugin_type == \"PyMOL\":\r\n\r\n for a in sel.atom:\r\n cent_x += a.coord[0]\r\n cent_y += a.coord[1]\r\n cent_z += a.coord[2]\r\n\r\n else:\r\n\r\n for a in ChimeraPlugin.ChimeraPlugin().current_atoms():\r\n cent_x += a.coord()[0]\r\n cent_y += a.coord()[1]\r\n cent_z += a.coord()[2]\r\n\r\n cent_x /= cnt\r\n cent_y /= cnt\r\n cent_z /= cnt\r\n\r\n self.point_x.component('entryfield').setentry(cent_x)\r\n self.point_y.component('entryfield').setentry(cent_y)\r\n self.point_z.component('entryfield').setentry(cent_z)\r\n\r\n self.show_crisscross(mole_object)", "def _move_receptor_to_grid_center(self):\n lower_receptor_corner = np.array([self._crd[:,i].min() for i in range(3)], dtype=float)\n upper_receptor_corner = np.array([self._crd[:,i].max() for i in range(3)], dtype=float)\n \n receptor_box_center = (upper_receptor_corner + lower_receptor_corner) / 2.\n grid_center = (self._origin_crd + self._uper_most_corner_crd) / 2.\n displacement = grid_center - receptor_box_center\n\n print(\"Receptor is translated by \", displacement)\n\n for atom_ind in range(len(self._crd)):\n self._crd[atom_ind] += displacement\n return None", "def set_center(self, center):\n self._center = center\n self._reset_slot_bounds()", "def SetCurrentPosition(self, point):\n\t\t# So that we can correctly determine the position on a path\n\t\tself.previousPositions.append(self.currentPosition)\n\n\t\tself.currentPosition = point\n\n\t\t# self.Calculate()", "def move_point_circ(mutated_genome,index):\n Xval = random.randint(-int(imagewidth/5.),int(imagewidth*6./5.))\n Yval = random.randint(-int(imageheight/5.),int(imageheight*6./5.))\n circle = mutated_genome[index][2]\n newcircle = (Xval,Yval,circle[2])\n mutated_genome[index][2] = newcircle", "def center_of_geometry(self, selectedAtoms:list=None) -> list:\n if (\"POSITION\" in dir(self)):\n cogx = 0.0\n cogy = 0.0\n cogz = 0.0\n if selectedAtoms is None:\n iterator = self.POSITION.content\n else:\n iterator = []\n for i in selectedAtoms:\n iterator.append(self.POSITION.content[i])\n for i in iterator:\n cogx += i.xp\n cogy += i.yp\n cogz += i.zp\n n = len(self.POSITION.content)\n return [cogx/n, cogy/n, cogz/n]\n else:\n raise ValueError(\"NO POSITION block in cnf-Object: \" + self.path)", "def center(window):\n window.update_idletasks()\n\n # Find the screen resolution\n screen_width = window.winfo_screenwidth()\n screen_height = window.winfo_screenheight()\n\n # Find new (x, y) coordinates\n size = tuple(int(_) for _ in window.geometry().split('+')[0].split('x'))\n x = screen_width/2 - 7 * size[0] / 13\n y = screen_height/2 - 6 * size[1] / 11\n\n # Apply new coordinates\n window.geometry(\"+%d+%d\" % (x, y))", "def center(self):\r\n frameGm = self.frameGeometry()\r\n screen = QtGui.QApplication.desktop().screenNumber(QtGui.QApplication.desktop().cursor().pos())\r\n centerPoint = QtGui.QApplication.desktop().screenGeometry(screen).center()\r\n frameGm.moveCenter(centerPoint)\r\n self.move(frameGm.topLeft())", "def set_center(self, center=0.0):\n\n self.centerFromImage = center\n\n tempCenter = [0] * 2\n\n tempCenter[0] = center[0] - 305 # Xcoord offset\n tempCenter[1] = 313 - center[1] # Ycoord offset\n\n self.centerFromRobot = tempCenter", "def move_point(mutated_genome,index):\n Xval = random.randint(-int(imagewidth/5.),int(imagewidth*6./5.))\n Yval = random.randint(-int(imageheight/5.),int(imageheight*6./5.))\n point = (Xval,Yval)\n point_index = random.randint(0,max(0,len(mutated_genome[index][2])-1))\n mutated_genome[index][2][point_index] = point", "def test_set_center_coord():\r\n da = xr.DataArray(np.arange(2), dims=\"lead\", coords={\"lead\": [\"1-3\", \"2-4\"]})\r\n actual = _set_center_coord(da).lead_center.values\r\n expected = [2.0, 3.0]\r\n assert (actual == expected).all()", "def center(self):\n if self.pos != 0.0:\n self.pos = 0.0", "def MoveToPoint(*args):\n return _gdi_.GraphicsPath_MoveToPoint(*args)", "def moveTo(self, pt: Tuple[float, float]) -> None:\n raise NotImplementedError", "def transformPos(self, point):\n return point / self.scale - self.offsetToCenter()", "def move_center_by_block_id(self, block_id):\n assert isinstance(block_id, int)\n position = self.smd3.search(block_id)\n assert position is not None, \"Block id not found: {}\".format(block_id)\n distance = Vector.distance(position, (16, 16, 16))\n if distance == 0:\n return\n direction_vector = Vector.get_direction_vector_to_center(position)\n self.move_center_by_vector(direction_vector)", "def setPoint(self, point):\n self.position = point.position", "def setCentreCoordinates(self,xcenter,ycenter):\n self.x = xcenter\n self.y = ycenter", "def move(self, center):\n\t\t#print \"made it\"\n\t\tself.rect = self.rect.move(center)", "def set_center(self, x, y, z):\n def _set_center(result):\n self.x, self.y, self.z = result\n\n return self._remote.set_center(x, y, z)\\\n .then(_set_center, self.rpc_error).catch(self.callback_error)", "def updatePos(self):\n self.setPos(self.centerX-self.boundingRect().width()/2.0,\n self.centerY-self.boundingRect().height()/2.0)", "def linecenter(l):\n return scale3(add(l[0],l[1]),0.5)", "def center(self,c, ADDR):\r\n #FIGURE OUT HOW TO DO THIS\r\n #Actually pretty sure this is impossible to do from software\r\n returnValue('Success!')", "def center_coords_cleanup(self, coords, point):\n px, py = point\n for x in range(px - (self.radius * 2), px + (self.radius * 2) + 1):\n for y in range(py - (self.radius * 2), py + (self.radius * 2) + 1):\n if (x, y) in coords:\n coords.remove((x, y))\n\n return coords", "def objectCenter(*args, gl: bool=True, local: bool=True, x: bool=True, y: bool=True, z:\n bool=True, **kwargs)->List[float]:\n pass", "def setCurrentListPosition(*args):", "def setCurrentListPosition(*args):", "def center(self, center):\n\n self._center = center", "def predict_center(self, point):\n point_cluster_num = self.predict_cluster(point)\n center = self.centers[point_cluster_num]\n return center", "def setPoint(self, point):\n self._point = point\n self._point = self.projectPoint(Point.origin(point.dimension))", "def _set_center(self):\n sl_center = np.array(\n [self.sl_list[k].mean_pos for k in range(self.nb_sl)]\n )\n sl_nb_pts = np.array(\n [self.sl_list[k].nb_points for k in range(self.nb_sl)]\n )\n sl_wcenter = [sl_center[k] * sl_nb_pts[k] for k in range(self.nb_sl)]\n self.center = np.sum(sl_wcenter, axis=0) / np.sum(sl_nb_pts)", "def _move_molecule_to(self, location):\n assert len(location) == 3, \"location must have len 3\"\n displacement = np.array(location, dtype=float) - self._get_molecule_center_of_mass()\n for atom_ind in range(len(self._crd)):\n self._crd[atom_ind] += displacement\n return None", "def center(self):\n return [self.position[i]+self.radius for i in range(2)]", "def x(self, destination):\n destination = (destination, self.center[1])\n self.move(destination=destination, origin=self.center, axis=\"x\")", "def move(self):\n x = y = z = 0.0\n for cell in self.cells:\n x += (cell.x)#*n\n y += (cell.y)#*n\n z += (cell.z)#*n\n np = float(len(self.cells))\n med = numpy.array([x/np,y/np,z/np])\n \n dists = []\n for cell in self.cells:\n d = (cell.x-self.x)**2+(cell.y-self.y)**2+(cell.z-self.z)**2\n d = numpy.sqrt(d)\n dists.append(d)\n #md = (cell.x-med[0])**2+(cell.y-med[1])**2+(cell.z-med[2])**2\n #dists[-1] = (dists[-1]+md)/2\n cell = self.cells[numpy.argmin(dists)]\n cc = numpy.array([cell.x, cell.y, cell.z])\n \n t = self.t\n if abs(self.dnp) * ( self.np-self.np_req) > 0:\n t = self.tr\n self.dcenter = (1-t)*(med-self.center + self.u*(cc-med))\n self.x,self.y,self.z = self.center = self.center + self.dcenter", "def center(self, center_mass=False):\n if center_mass:\n com = self.center_of_mass\n self.xyz -= com\n else:\n self.xyz -= self.xyz.mean(0)", "def new_global_center_of_mass_set(self, global_center_of_mass):\n if global_center_of_mass.channel.size != 1:\n raise IndexError(\"Global center of mass should be from one frame only\")\n self.global_center_of_mass = global_center_of_mass\n\n # Remove previous actors from the scene\n for actor in self.global_center_of_mass_actors:\n self.parent_window.ren.RemoveActor(actor)\n self.global_center_of_mass_actors = list()\n\n # Create the geometry of a point (the coordinate) points = vtk.vtkPoints()\n for i in range(global_center_of_mass.channel.size):\n # Create a mapper\n mapper = vtkPolyDataMapper()\n\n # Create an actor\n self.global_center_of_mass_actors.append(vtkActor())\n self.global_center_of_mass_actors[i].SetMapper(mapper)\n\n self.parent_window.ren.AddActor(self.global_center_of_mass_actors[i])\n\n # Update marker position\n self.update_global_center_of_mass(self.global_center_of_mass)", "def move_start_node(self, x, y):", "def centerPoint(featureCollection):\n features = featureCollection[\"features\"]\n center = [0, 0]\n for feature in features:\n geometry = feature[\"geometry\"]\n if geometry[\"type\"] == \"Point\":\n point = feature[\"geometry\"][\"coordinates\"]\n center[0] += point[0]\n center[1] += point[1]\n else:\n raise ValueError(\"expected a point but got a {}\".format(geometry[\"type\"]))\n center[0] /= len(features) \n center[1] /= len(features) \n\n return geojson.Point(coordinates=center)", "def center(self):\n\n ca_atoms = self.ca_atoms\n ca_atom_vectors = ca_atoms[\"ca.atom\"].to_list()\n ca_atom_vectors = [i for i in ca_atom_vectors if i is not None]\n centroid = self.center_of_mass(ca_atom_vectors, geometric=False)\n centroid = Vector(centroid)\n\n return centroid", "def update(self, foodList):\n self.positions = list(map(lambda point: [point['x'], point['y']], foodList['data']))", "def center_of_mass(self, entity, geometric=False):\n\n # Structure, Model, Chain, Residue\n if isinstance(entity, Entity.Entity):\n atom_list = entity.get_atoms()\n # List of Atoms\n elif hasattr(entity, \"__iter__\") and [x for x in entity if x.level == \"A\"]:\n atom_list = entity\n # Some other weirdo object\n else:\n raise ValueError(\n f\"Center of Mass can only be calculated from the following objects:\\n\"\n f\"Structure, Model, Chain, Residue, list of Atoms.\"\n )\n\n masses = []\n positions = [[], [], []] # [ [X1, X2, ..] , [Y1, Y2, ...] , [Z1, Z2, ...] ]\n\n for atom in atom_list:\n masses.append(atom.mass)\n\n for i, coord in enumerate(atom.coord.tolist()):\n positions[i].append(coord)\n\n # If there is a single atom with undefined mass complain loudly.\n if \"ukn\" in set(masses) and not geometric:\n raise ValueError(\n f\"Some atoms don't have an element assigned.\\n\"\n f\"Try adding them manually or calculate the geometrical center of mass instead.\"\n )\n\n if geometric:\n return [sum(coord_list) / len(masses) for coord_list in positions]\n else:\n w_pos = [[], [], []]\n for atom_index, atom_mass in enumerate(masses):\n w_pos[0].append(positions[0][atom_index] * atom_mass)\n w_pos[1].append(positions[1][atom_index] * atom_mass)\n w_pos[2].append(positions[2][atom_index] * atom_mass)\n\n return [sum(coord_list) / sum(masses) for coord_list in w_pos]", "def center(x):\n return x - x.mean()", "def move(self):\n self.center_x += self._vx\n self.center_y += self._vy", "def predict_center(point):\n point_cluster_num = predict_cluster(point)\n center = centers[point_cluster_num]\n return center", "def action_to_spawn(self):\n self.scene.center_on_spawn()", "def translate(self, displacement):\n\n self.center = (self.center[0] + displacement[0],\n self.center[1] + displacement[1])", "def move_point_wline(mutated_genome,index):\n Xval = random.randint(-int(imagewidth/5.),int(imagewidth*6./5.))\n Yval = random.randint(-int(imageheight/5.),int(imageheight*6./5.))\n point = (Xval,Yval)\n point_index = random.randint(1,max(1,len(mutated_genome[index][2])-1))\n mutated_genome[index][2][point_index] = point", "def center(self) -> Point:\n return Point(*np.sum(self.normalized_array[:, :-1], axis=0))", "def return_to_center(): #ignore this for now, use move_to_position_(0,0)\n current_pos = '\\xAA\\xBB\\xCC\\xDD'\n #run command until back to center (0,0)\n while True: #change the byte locations\n current_pos = to_center()\n print(current_pos)\n time.sleep(0.2) #check timing\n if((current_pos[1] == 0) and (current_pos[1] == 0)):\n break\n print('At center')", "def move_to_pos(self, x, y, z):\n try:\n angles = self.ik_to(x, y, z)\n self.move_to_angle(*angles)\n\n self.footPosition = np.array([x, y, z])\n self.angles = angles\n\n except Exception as exc:\n print (exc)", "def dist_from_center_to(x):\n pass", "def center_on(self, x, y):\n\n # Mark that we can start actually drawing now\n self.given_center = True\n\n # Center the view\n (ctr_x, ctr_y) = self.ingame_to_scene(x, y)\n self.parent.centerOn(ctr_x, ctr_y)\n\n # Draw what needs drawing\n self.draw_visible_area()", "def center(x):\n if ispoint(x):\n # return pointcenter(x)\n return point(x)\n elif isline(x):\n return linecenter(x)\n elif isarc(x):\n return arccenter(x)\n elif ispoly(x):\n return polycenter(x)\n elif isgeomlist(x):\n pl = []\n for g in x:\n pl.append(center(g))\n return polycenter(pl)\n else:\n raise ValueError(\"inappropriate type for center(): \",format(x))", "def set_position(self, point, reset=False, render=True):\n if isinstance(point, np.ndarray):\n if point.ndim != 1:\n point = point.ravel()\n self.camera.position = scale_point(self.camera, point, invert=False)\n if reset:\n self.reset_camera(render=render)\n self.camera_set = True\n self.Modified()", "def transform_matrix_offset_center(matrix, y, x):\n o_x = (x - 1) / 2.0\n o_y = (y - 1) / 2.0\n offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])\n reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])\n transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)\n return transform_matrix", "def initial_point(self, initial_point: Sequence[float] | None) -> None:\n self._initial_point = initial_point", "def set_drone_position(self, new_point):\n self.drone.set_drone_position(new_point)", "def center_of_mass(self, matrix):\n # Changing the positions of all objects relative to center of mass, in origo.\n x, y, z = np.sum(matrix[:, 0].reshape(self.numbodies, 1)*matrix[:, 1:4], axis=0)/(np.sum(matrix[:, 0], axis=0))\n print('Center of mass located at (%.4g, %.4g, %.4g)' %(x, y, z))\n # x-direction\n matrix[:, 1] = matrix[:, 1]-x\n # y-direction\n matrix[:, 2] = matrix[:, 2]-y\n # z-direction\n matrix[:, 3] = matrix[:, 3]-z\n # The Suns initial velocity which makes the total momentum of the system zero\n # velcity_sun = sum(mass_planet_i*veocity_planet_i)/(mass_sun)\n u, v, w = np.sum(matrix[:, 0].reshape(self.numbodies, 1)*matrix[:, 4:7], axis=0)/(matrix[0, 0])\n print('The initial velocity of the Sun (%.4g, %.4g, %.4g)' %(u, v, w))\n matrix[0, 4:7] = u, v, w\n # Returning the modified matrix\n return matrix" ]
[ "0.70135695", "0.6748774", "0.6072873", "0.60708416", "0.60040206", "0.59520334", "0.5908273", "0.5892884", "0.57873654", "0.5775457", "0.57008934", "0.56635845", "0.5663209", "0.5656083", "0.5635438", "0.5621651", "0.5585128", "0.5584832", "0.5545488", "0.55017734", "0.5447869", "0.544242", "0.5438934", "0.54364634", "0.5425978", "0.5415568", "0.53619677", "0.5353338", "0.53324926", "0.5298903", "0.52661145", "0.52661145", "0.5251765", "0.52061605", "0.52061605", "0.52061605", "0.5200921", "0.5196826", "0.5185981", "0.5181207", "0.517331", "0.51432693", "0.51278746", "0.5126887", "0.51234293", "0.5117772", "0.5109465", "0.50989705", "0.5098307", "0.5096157", "0.5085699", "0.50856656", "0.50820124", "0.5080176", "0.5079072", "0.5074", "0.50718445", "0.50645417", "0.5056698", "0.504793", "0.5044189", "0.5040157", "0.5037648", "0.503712", "0.5026483", "0.5026019", "0.5020616", "0.5020616", "0.50012416", "0.49993953", "0.49900335", "0.49884847", "0.49809325", "0.4979697", "0.49646887", "0.49445352", "0.49431708", "0.49351546", "0.49331728", "0.4931537", "0.49304727", "0.49087292", "0.49016008", "0.48928633", "0.48859084", "0.48857895", "0.48806778", "0.48792025", "0.48753527", "0.48681307", "0.48642325", "0.4857578", "0.4855039", "0.48514244", "0.48507866", "0.4850684", "0.48478347", "0.48447716", "0.48441795", "0.48414364" ]
0.8722849
0
Rotates the adp with its corresponding rotation matrix.
def rotate_adp_reverse(adp, rotmat): adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])], [float(adp[3]), float(adp[1]), float(adp[5])], [float(adp[4]), float(adp[5]), float(adp[2])]]) rotmatT = np.transpose(rotmat) adp = np.dot(rotmat, adp) adp = np.dot(adp, rotmatT) adp = np.array(adp).flatten().tolist() return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rotate_adp(adp, rotmat):\n\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n rotmatT = np.transpose(rotmat)\n adp = np.dot(rotmatT, adp)\n adp = np.dot(adp, rotmat)\n # print '=\\n',adp,'\\n-------------------------------------------------\\n\\n\\n\\n\\n\\n'\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]", "def rotate(mat,angle):\n return np.dot(Mueller.rotator(angle), np.dot(mat, Mueller.rotator(-angle)))", "def rotate_adp2(adp, rotmat, cell):\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n rotmat = np.linalg.inv(rotmat)\n rotmatT = np.transpose(rotmat)\n Nmat = np.matrix([[1 / cell[0], 0, 0],\n [0, 1 / cell[1], 0],\n [0, 0, 1 / cell[2]]])\n Nmat = np.linalg.inv(Nmat)\n NmatT = np.transpose(Nmat)\n\n adp = np.dot(rotmat, adp)\n adp = np.dot(adp, rotmatT)\n\n adp = np.dot(Nmat, adp)\n adp = np.dot(adp, NmatT)\n\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]", "def rotate_adp3(adp, rotmat, cell):\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n rotmati = np.matrix(rotmat)\n rotmatiT = np.transpose(rotmati)\n rotmat = np.linalg.inv(rotmat)\n\n Nmat = np.matrix([[1 / cell[0], 0, 0],\n [0, 1 / cell[1], 0],\n [0, 0, 1 / cell[2]]])\n Nmat = np.linalg.inv(Nmat)\n NmatT = np.transpose(Nmat)\n adp = np.dot(rotmati, adp)\n adp = np.dot(adp, rotmatiT)\n\n adp = np.dot(Nmat, adp)\n adp = np.dot(adp, NmatT)\n\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]", "def rotate(mat,angle):\n return np.dot(Jones.rotator(angle), np.dot(mat, Jones.rotator(-angle)))", "def rotmat(p, q):\n rot = numpy.dot(refmat(q, -p), refmat(p, -p))\n return rot", "def _rotate(self, affine):\n dims = affine.shape[0]\n if not np.isscalar(self.rotation):\n raise Exception('this class requires exactly one entry for rotation!')\n theta = (self.deformrandomstate.rand() - 0.5) * 2 * self.rotation\n if dims == 4:\n\n # sample unit vector:\n u = np.random.random(3)\n u /= np.sqrt(np.sum([uu ** 2 for uu in u]) + 1e-8)\n ct = np.cos(theta)\n st = np.sin(theta)\n rot = np.eye(4)\n rot[:3, :3] = [\n [ct + u[0] ** 2 * (1 - ct), u[0] * u[1] * (1 - ct) - u[2] * st, u[0] * u[2] * (1 - ct) + u[2] * st],\n [u[1] * u[0] * (1 - ct) + u[2] * st, ct + u[1] ** 2 * (1 - ct), u[1] * u[2] * (1 - ct) - u[0] * st],\n [u[2] * u[0] * (1 - ct) - u[1] * st, u[2] * u[1] * (1 - ct) + u[0] * st, ct + u[2] ** 2 * (1 - ct)]]\n\n elif dims == 3:\n rot = np.eye(3)\n rot[:2, :2] = np.asarray([[np.cos(theta), np.sin(theta)], [-np.sin(theta), np.cos(theta)]])\n else:\n raise Exception(\n 'implement this for each dimension, since not yet implemented for dimension {}'.format(dims))\n\n return np.matmul(rot, affine)", "def rotate_z(p, a=0):\n # turn value to radians\n a = math.radians(a)\n translation_mat = np.matrix([\n [math.cos(a),math.sin(a),0,0],\n [-math.sin(a),math.cos(a),0,0],\n [0,0,0,0],\n [0,0,0,1],\n ], dtype=\"float32\")\n\n new_p = p @ translation_mat\n\n return new_p", "def _rotate(self, angle):\n angle *= self._degreesPerAU\n self._orient = self._orient.rotate(angle)", "def rotate(self, a):\n ca = cos(a)\n sa = sin(a)\n rM = Matrix([\n [ca, -sa],\n [sa, ca]\n ])\n p0 = self.p0\n self.c = p0 + rM @ (self.c - p0)\n dp = p0 - self.c\n self.a0 = atan2(dp.y, dp.x)\n return self", "def rotate(self, angle):\n\t\tif not isinstance(angle, Angle):\n\t\t\tangle = Angle(angle)\n\t\treturn angle.matrix() * self", "def rotate(self,X):\n alpha = random.rand() * 2*pi\n beta = self.beta_sample()\n R = Rotator.rotation_matrix(alpha,beta,0.0)\n X = np.dot(R, X)\n if self.random_flip and (random.rand() > 0.5):\n X[2,:] = -X[2,:]\n X[1,:] = -X[1,:]\n return X", "def apply_rotation(self, eta=0.0, phi=0.0, theta=0.0):\n \n new_rotation_matrix = self.rotation_elements( eta, phi, theta )\n \n #self.rotation_matrix_exp = np.dot( self.rotation_matrix_exp , new_rotation_matrix )\n self.rotation_matrix_exp = np.dot( new_rotation_matrix, self.rotation_matrix_exp )", "def rotate(p,q,A,V): \n n = A.shape[0]\n App, Aqq, Apq = A[p,p], A[q,q], A[p,q] #Initial values\n phi = 0.5*math.atan2(2*Apq, Aqq-App) #Find the rotation value\n c, s = math.cos(phi), math.sin(phi) #Calculate sin and cos\n\n #Update the matrix diagonal elements\n A[p,p] = c*c*App + s*s*Aqq - 2*s*c*Apq \n A[q,q] = s*s*App + c*c*Aqq + 2*s*c*Apq\n A[p,q] = 0 #This is zero by construction\n \n \n #Iterate over and update remaining off-diagonal elements\n for i in range(p):\n Aip, Aiq = A[i,p], A[i,q]\n A[i,p] = c*Aip - s*Aiq\n A[i,q] = c*Aiq + s*Aip\n \n for i in range(p+1,q):\n Api, Aiq = A[p,i], A[i,q]\n A[p,i] = c*Api - s*Aiq\n A[i,q] = c*Aiq + s*Api\n \n for i in range(q+1,n):\n Api, Aqi = A[p,i], A[q,i]\n A[p,i] = c*Api - s*Aqi\n A[q,i] = c*Aqi + s*Api\n \n #Update eigenvectors in matrix V\n for i in range(n):\n Vip, Viq = V[i,p], V[i,q]\n V[i,p] = c*Vip - s*Viq\n V[i,q] = s*Vip + c*Viq\n \n return A, V", "def rotate_x(p, a=0):\n # turn value to radians\n a = math.radians(a)\n translation_mat = np.matrix([\n [1,0,0,0],\n [0,math.cos(a),math.sin(a),0],\n [0,-math.sin(a),math.cos(a),0],\n [0,0,0,1],\n ], dtype=\"float32\")\n\n new_p = p @ translation_mat\n\n return new_p", "def rotate(self, angle, reshape=False):\n return IntensityMap.rotate(self, angle, reshape=reshape)", "def rotate(self, angle):\n perp = TwoDV(-self[1], self[0])\n angle = angle * math.pi / 180.0\n c, s = math.cos(angle), math.sin(angle)\n return TwoDV(self[0]*c+perp[0]*s, self[1]*c+perp[1]*s)", "def rotation(self):\n\t\treturn self.piv.a.rotate.v", "def rotate_3D(atom, source_atom):\n from lauescript.cryst.match import get_transform\n\n lst2 = [np.array([0, 0, 0]), source_atom.orientation[0], source_atom.orientation[1]]\n lst1 = [np.array([0, 0, 0]), atom.orientation[0], atom.orientation[1]]\n\n matrix = get_transform(lst1, lst2, matrix=True)\n\n adp = source_atom.adp['cart_int']\n\n atom.adp['cart_int'] = rotate_adp(adp, matrix)", "def _rotate(self, angle):\n if self.undobuffer:\n self.undobuffer.push((\"rot\", angle, self._degreesPerAU))\n angle *= self._degreesPerAU\n neworient = self._orient.rotate(angle)\n tracing = self.screen._tracing\n if tracing == 1 and self._speed > 0:\n anglevel = 3.0 * self._speed\n steps = 1 + int(abs(angle)/anglevel)\n delta = 1.0*angle/steps\n for _ in range(steps):\n self._orient = self._orient.rotate(delta)\n self._update()\n self._orient = neworient\n self._update()", "def rotate_ADP_about_axis(ADP, angle, axisDirection):\n adp = get_adp_as_matrix(ADP)\n u, v = np.linalg.eig(adp)\n startPoints = [v[:, i].flatten().tolist()[0] for i in xrange(3)]\n endPoints = [rotate_point_about_axis(point, angle, axisDirection, (0, 0, 0)) for point in startPoints]\n rotMat = get_transform(startPoints, endPoints, matrix=True).transpose()\n newadp = np.dot(rotMat.transpose(), np.dot(adp, rotMat))\n return newadp[0, 0], newadp[1, 1], newadp[2, 2], newadp[0, 1], newadp[0, 2], newadp[1, 2]", "def rotation(self, p1, p2, p3):\n return (p2[0] - p1[0]) * (p3[1] - p1[1]) - (p2[1] - p1[1]) * (p3[0] - p1[0])", "def _rotate(self, angles, dj_matrix=None):\n if dj_matrix is None:\n dj_matrix = djpi2(self.lmax + 1)\n self.coeffs = SHRotateRealCoef(self.coeffs, angles, dj_matrix)", "def _rotate(self, angles, dj_matrix=None):\n if dj_matrix is None:\n dj_matrix = djpi2(self.lmax + 1)\n self.coeffs = SHRotateRealCoef(self.coeffs, angles, dj_matrix)", "def rotate(self):\n pass", "def _rotate(self, arr, theta):\n # Rotation Matrix R\n R = [[np.cos(theta), -np.sin(theta)], \n [np.sin(theta), np.cos(theta)]]\n\n return np.matmul(R, arr.T).T", "def rotate(self, a):\n ca = cos(a)\n sa = sin(a)\n self.v = Matrix([\n [ca, -sa],\n [sa, ca]\n ]) @ self.v\n return self", "def rotate(self,r):\n return r.hprod( self.hprod( r.inv() ) )", "def rotateAlignXform(self):\n r = np.concatenate(([self.x], [self.y], [self.z]), 0)\n r = np.concatenate((r, np.array([[0,0,0]])), 0)\n r = np.concatenate((r, np.array([0,0,0,1]).reshape(-1,1)), 1)\n return r", "def rotate(self,X):\n alpha = random.rand() * 2*pi\n R = Rotator.rotation_matrix(alpha,0.0,0.0)\n return np.dot(R,X)", "def _rot(axis, angle):\n if axis == 1:\n return Matrix([[1, 0, 0],\n [0, cos(angle), -sin(angle)],\n [0, sin(angle), cos(angle)]])\n elif axis == 2:\n return Matrix([[cos(angle), 0, sin(angle)],\n [0, 1, 0],\n [-sin(angle), 0, cos(angle)]])\n elif axis == 3:\n return Matrix([[cos(angle), -sin(angle), 0],\n [sin(angle), cos(angle), 0],\n [0, 0, 1]])", "def rotate_y(p, a=0):\n # turn value to radians\n a = math.radians(a)\n translation_mat = np.matrix([\n [math.cos(a),0,-math.sin(a),0],\n [0,1,0,0],\n [math.sin(a),0,math.cos(a),0],\n [0,0,0,1],\n ], dtype=\"float32\")\n\n new_p = p @ translation_mat\n\n return new_p", "def rotate(self, angle=0.0):\n # TODO: Implement the rotate function. Remember to record the value of\n # rotation degree.\n self.rotDegree = angle\n self.x = rotate(self.x, angle = angle, axes=(0, 1), reshape=False, \n output=None, order=3, mode='constant', cval=0.0, prefilter=True)\n # This rotation isn't working correctly. Get shit for non right anlge rotatations\n # raise NotImplementedError\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################", "def rotate(self,X):\n alpha = random.rand() * 2*pi\n\n beta = np.arccos(1.0-2*random.rand())\n psi = random.rand() * 2*pi\n\n R = Rotator.rotation_matrix(alpha,beta,psi)\n return np.dot(R,X)", "def Rotate(*args, **kwargs):\n return _gdi_.GraphicsMatrix_Rotate(*args, **kwargs)", "def mrotate(self):\n result_matrix = [[0 for col in range(len(self.matrix[0]))] for row in range(len(self.matrix))]\n for i in range(len(self.matrix)):\n for j in range(len(self.matrix[0])):\n result_matrix[i][j] = self.matrix[i][len(self.matrix[0]) - 1 - j]\n # left turn -> result_matrix[i][j] = self.matrix[len(self.matrix) - 1 - i][j]\n self.matrix = result_matrix\n pass", "def rotate(self, angle):\n perp = Vec2D(-self[1], self[0])\n angle = angle * math.pi / 180.0\n c, s = math.cos(angle), math.sin(angle)\n return Vec2D(self[0] * c + perp[0] * s, self[1] * c + perp[1] * s)", "def rotate(self, angle):\n rotmat = rotation_matrix_2d(angle)\n rotated = np.dot(rotmat.T, [self.pix_x.value, self.pix_y.value])\n self.pix_x = rotated[0] * self.pix_x.unit\n self.pix_y = rotated[1] * self.pix_x.unit\n self.pix_rotation -= angle", "def rot(self, t=0., transposed=False):\n rotmat = np.array(\n [[np.cos(self._pa+self._omegab*t),np.sin(self._pa+self._omegab*t)],\n [-np.sin(self._pa+self._omegab*t),np.cos(self._pa+self._omegab*t)]])\n if transposed:\n return rotmat.T\n else:\n return rotmat", "def rotate(self, matrix: list[list[int]]) -> None:", "def rotation_matrix(self):\n return self.affine_matrix[0:3][:, 0:3]", "def angle_to_rotation_matrix(angle: torch.Tensor) -> torch.Tensor:\n ang_rad = deg2rad(angle)\n cos_a: torch.Tensor = torch.cos(ang_rad)\n sin_a: torch.Tensor = torch.sin(ang_rad)\n return torch.stack([cos_a, sin_a, -sin_a, cos_a], dim=-1).view(*angle.shape, 2, 2)", "def rotation(self, *args, **kwargs) -> Any:\n pass", "def _rotate_(self, x: np.array, m: np.array) -> (np.array, np.array):\n # get a random angle\n angle = np.random.randint(0, self.rotate)\n # get a random sign for the angle\n sign = np.random.randint(0, 2)\n x = rotate(x, -sign * angle, reshape=False)\n m = rotate(m, -sign * angle, axes=(0, 1),\n mode='nearest',\n reshape=False)\n return x, m", "def test_rotation(self):\n quat_rotated = rowan.rotate(input1, vector_inputs)\n\n matrices = rowan.to_matrix(input1)\n matrix_rotated = np.einsum(\"ijk,ki->ij\", matrices, vector_inputs.T)\n self.assertTrue(np.allclose(matrix_rotated, quat_rotated))", "def RotationMatrix(theta, x, y, z, point=None):\n\treturn mach.rotation_matrix(theta, [x, y, z])", "def getEllipsYZRotMatrix(a1, a2):\n adir = a2 - a1\n amid = a1 + 0.5 * adir\n kath = np.sqrt((adir[0] * adir[0] + adir[1] * adir[1]) / 4.0)\n octantA2 = octant(a2)\n theta = np.arctan( abs( (adir[2]/2) / kath) )\n #[1, 4, 6, 7 ] => left rotation\n #[2, 3, 5, 8 ] => right rotation\n if octantA2 in [2, 3, 5, 8]: \n theta = -theta \n print \"theta =\" , np.rad2deg(theta)\n RotY = np.matrix( [ [ np.cos(theta), 0.0, np.sin(theta) ],\n [ 0.0 , 1.0, 0.0 ],\n [ -np.sin(theta), 0.0, np.cos(theta) ]\n ]) \n \n psi = np.arctan( abs( adir[1] / adir[0] ) )\n #[2, 4, 6, 8 ] => left rotation\n #[1, 3, 5, 7 ] => right rotation\n if octantA2 in [1, 3, 5, 7]:\n psi = -psi\n print \"psi =\" , np.rad2deg(psi)\n RotZ = np.matrix( [ [ np.cos(psi), -np.sin(psi), 0.0 ],\n [ np.sin(psi), np.cos(psi), 0.0 ],\n [ 0.0 , 0.0 , 1.0 ]\n ])\n return np.asarray( RotY * RotZ )", "def rotate(self, vect, angle):\n self.pl.Rotation = Rotation(vect, angle)\n\n self.comp.Placement = self.pl\n self.box.Placement = self.pl", "def rotate(self, angle):\n old_angle, tilt = self.rotation\n new_angle = old_angle + angle\n while new_angle > 90:\n new_angle = new_angle - 90\n while angle < -90:\n new_angle = new_angle + 90\n self.rotation = (new_angle, tilt)", "def rotate_matrix(angle):\n c = np.cos(angle)\n s = np.sin(angle)\n return np.array([[c, s],\n [-s, c]])", "def rotate(img, angle):\n if not _is_numpy(img):\n raise TypeError('img should be Numpy Image. Got {}'.format(type(img)))\n\n aug = iaa.Affine(rotate=angle)\n return aug.augment_image(img)", "def rotMatrix( source = None ):\n if source is None:\n return None,None\n else:\n (x,y,z, a) = source\n if a % TWOPI:\n return tmatrixaccel.rotMatrix( x,y,z,a ),tmatrixaccel.rotMatrix( x,y,z,-a )\n return None,None", "def _altaz_rotation(self, jd):\n R_lon = rot_z(- self.longitude.radians - jd.gast * TAU / 24.0)\n return einsum('ij...,jk...,kl...->il...', self.R_lat, R_lon, jd.M)", "def rotate(self, theta, legs):\n U, onew = rotationTensor(theta, self.symmetries, legs)\n B = U @ self\n new = list(onew)\n old = list(legs)\n if B.internallegs != self.internallegs:\n old.append(self.internallegs[0])\n new.append(B.internallegs[0])\n B.swaplegs({n: o for o, n in zip(old, new)})\n return B.couplingAddapt(self.coupling)", "def rotate(self, angle, point=None):\n if not point:\n point = self.middle\n self.p1.rotate(angle, point)\n self.p2.rotate(angle, point)", "def rotate_z(self, angle):\n angle *= np.pi / 180\n return self.transform(np.matrix([[np.cos(angle), -np.sin(angle), 0],\n [np.sin(angle), np.cos(angle), 0],\n [0, 0, 1]]))", "def rotate(self, angle, axis):\r\n R=self.rotation(angle, axis)\r\n self.mlist = (self*R).mlist\r\n return self", "def rotate(X):\n return X", "def rotate_about(self, p, theta):\n result = self.clone()\n result.slide(-p.x, -p.y)\n result.rotate(theta)\n result.slide(p.x, p.y)\n return result", "def rotate(a, ps, axe=0):\r\n\r\n sin = np.sin(a)\r\n cos = np.cos(a)\r\n rm = np.array([[[1, 0, 0], [0, cos, -sin], [0, sin, cos]],\r\n [[cos, 0, sin], [0, 1, 0], [-sin, 0, cos]],\r\n [[cos, -sin, 0], [sin, cos, 0], [0, 0, 1]]])\r\n m = np.full((len(ps), 3, 3), rm[axe])\r\n ps = map(lambda x, y: np.dot(x, y), m, ps)\r\n return ps", "def rotate(self, rotation):\n\t\tif not isinstance(rotation,Rotation):\n\t\t\trotation = Rotation(*rotation)\n\t\treturn rotation.matrix() * self", "def rotate(self, *args, **kwargs): # real signature unknown\n pass", "def rotate(self):\n\n last_center = self.rect.center\n self.image = pg.transform.rotate(self.image_copy,self.angle)\n self.rect = self.image.get_rect()\n self.rect.center = last_center\n self.angle +=self.rotate_by", "def rotation(self, angle, axis):\r\n\r\n sqr_a = axis.x*axis.x\r\n sqr_b = axis.y*axis.y\r\n sqr_c = axis.z*axis.z\r\n len2 = sqr_a+sqr_b+sqr_c\r\n\r\n k2 = math.cos(angle)\r\n k1 = (1.0-k2)/len2\r\n k3 = math.sin(angle)/math.sqrt(len2)\r\n k1ab = k1*axis.x*axis.y\r\n k1ac = k1*axis.x*axis.z\r\n k1bc = k1*axis.y*axis.z\r\n k3a = k3*axis.x\r\n k3b = k3*axis.y\r\n k3c = k3*axis.z\r\n\r\n return mat4( k1*sqr_a+k2, k1ab-k3c, k1ac+k3b, 0.0,\r\n k1ab+k3c, k1*sqr_b+k2, k1bc-k3a, 0.0,\r\n k1ac-k3b, k1bc+k3a, k1*sqr_c+k2, 0.0,\r\n 0.0, 0.0, 0.0, 1.0)", "def intermediateJacPol2Rot(self,x):\n allS = np.sin(x[0,:])\n allC = np.cos(x[0,:])\n allR = x[1,:]\n \n Jac = Idn(x.shape[1],self._dim)\n Jac[:,0,0] = -allS*allR\n Jac[:,0,1] = allC\n Jac[:,1,0] = allC*allR\n Jac[:,1,1] = allS\n return Jac", "def rotate(self, angle=pi, point=None):\n if not point: point = Point.origin(d=self.dimension)\n v = Vector.createFromTwoPoints(point, self)\n v.rotate(angle)\n self.components = v(point).components", "def rotate(self, angle):\n self.call('rotate', angle)", "def reflect_adp(adp, planev):\n M = np.identity(4)\n M[:3, :3] -= 2.0 * np.outer(planev, planev)\n M[:3, 3] = (2.0 * np.dot(np.array([0, 0, 0]), planev)) * planev\n\n return rotate_adp(adp, M[:3, :3])", "def test_to_rotation(self):\r\n q = np.array([-1, 1, 3, 2])\r\n q = q / np.linalg.norm(q)\r\n R_gt = np.array([\r\n [-1/3., -14/15., -2/15.],\r\n [2/3., -1/3., 2/3.],\r\n [-2/3., 2/15., 11/15.]]).T\r\n R = to_rotation(q)\r\n\r\n zero_matrix = R - R_gt\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)\r\n\r\n for _ in range(20):\r\n q = np.random.randn(4)\r\n q /= np.linalg.norm(q)\r\n q_inv = quaternion_conjugate(q)\r\n\r\n R = to_rotation(q)\r\n R_inv = to_rotation(q_inv)\r\n\r\n zero_matrix = R @ R_inv - np.identity(3)\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)\r\n\r\n # orthogonal matrix\r\n zero_matrix = R @ R.T - np.identity(3)\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)", "def rotate_about(self, p, theta):\n result = self.clone()\n result.translate(-p.x, -p.y)\n result.rotate(theta)\n result.translate(p.x, p.y)\n return result", "def rotateZ(self, angle):\r\n if angle:\r\n c = cos(radians(angle))\r\n s = sin(radians(angle))\r\n self.mtrx = dot([[c, s, 0, 0],\r\n [-s, c, 0, 0],\r\n [0, 0, 1, 0],\r\n [0, 0, 0, 1]],\r\n self.mtrx)\r\n self.rtn[2] = angle\r\n self.was_moved = True", "def orientation_ras_lps(affine: NdarrayTensor) -> NdarrayTensor:\n sr = max(affine.shape[0] - 1, 1) # spatial rank is at least 1\n flip_d = [[-1, 1], [-1, -1, 1], [-1, -1, 1, 1]]\n flip_diag = flip_d[min(sr - 1, 2)] + [1] * (sr - 3)\n if isinstance(affine, torch.Tensor):\n return torch.diag(torch.as_tensor(flip_diag).to(affine)) @ affine # type: ignore\n return np.diag(flip_diag).astype(affine.dtype) @ affine # type: ignore", "def rotate(self, rotation):\n self.coords = dot(rotation, self.coords)\n return self", "def rotate(matrix: List[List[int]]) -> None:\n if matrix is None:\n return\n\n # transpose\n for i in range(0, len(matrix)):\n for j in range(i, len(matrix[0])):\n temp = matrix[i][j]\n matrix[i][j] = matrix[j][i]\n matrix[j][i] = temp\n # reflect\n for i in range(0, len(matrix)):\n for j in range(0, len(matrix[0]) // 2):\n reflection = len(matrix[0]) - j - 1\n temp = matrix[i][j]\n matrix[i][j] = matrix[i][reflection]\n matrix[i][reflection] = temp", "def rotate(x: torch.Tensor, angle: int) -> torch.Tensor:\n # B C H W\n h_dim = 2\n w_dim = 3\n\n if angle == 0:\n return x\n elif angle == 90:\n return x.flip(w_dim).transpose(h_dim, w_dim)\n elif angle == 180:\n return x.flip(w_dim).flip(h_dim)\n elif angle == 270:\n return x.flip(h_dim).transpose(h_dim, w_dim)\n else:\n raise NotImplementedError(\"Must be rotation divisible by 90 degrees\")", "def rotation_matrix(self):\n self._normalise()\n product_matrix = np.dot(self._q_matrix(), self._q_bar_matrix().conj().transpose())\n return product_matrix[1:][:,1:]", "def rotation_matrix(angle, axis):\n about_z = rotation_about_z(angle)\n z_to_axis = z_to_vector(axis)\n axis_to_z = np.linalg.inv(z_to_axis)\n return reduce(np.dot, [z_to_axis, about_z, axis_to_z])", "def apply_rotation_z(self, theta=0.0 ):\n \n theta = radians(theta)\n new_rotation_matrix = [[ +cos(theta) , -sin(theta) , 0 ],\n [ +sin(theta) , +cos(theta) , 0 ],\n [ 0 , 0 , 1 ]] \n \n self.rotation_matrix_exp = np.dot( new_rotation_matrix, self.rotation_matrix_exp )", "def qrotate(points, axis, theta):\n q = Quaternion.rotator(axis, theta)\n return q.rotate(points)", "def rotatePivot(rotation):\r\n # Rotate in object mode X\r\n bpy.ops.object.mode_set(mode='OBJECT')\r\n bpy.ops.transform.rotate(value=rotation.x, axis=(1,0,0), constraint_orientation='GLOBAL')\r\n # rotate in edit mode X\r\n bpy.ops.object.mode_set(mode='EDIT')\r\n bpy.ops.mesh.select_all(action='SELECT')\r\n bpy.ops.transform.rotate(value=-rotation.x, axis=(1,0,0), constraint_orientation='GLOBAL')\r\n # Rotate in object mode Y\r\n bpy.ops.object.mode_set(mode='OBJECT')\r\n bpy.ops.transform.rotate(value=rotation.y, axis=(0,1,0), constraint_orientation='GLOBAL')\r\n # rotate in edit mode Y\r\n bpy.ops.object.mode_set(mode='EDIT')\r\n bpy.ops.transform.rotate(value=-rotation.y, axis=(0,1,0), constraint_orientation='GLOBAL')\r\n # Rotate in object mode Z\r\n bpy.ops.object.mode_set(mode='OBJECT')\r\n bpy.ops.transform.rotate(value=rotation.z, axis=(0,0,1), constraint_orientation='GLOBAL')\r\n # rotate in edit mode Z\r\n bpy.ops.object.mode_set(mode='EDIT')\r\n bpy.ops.transform.rotate(value=-rotation.z, axis=(0,0,1), constraint_orientation='GLOBAL')\r\n # return to object mode\r\n bpy.ops.object.mode_set(mode='OBJECT')", "def srotate(self, angle):\n\n self.angle = self.angle + angle", "def _rotation_from_gradient(self,m):\n\t\ttheta = -np.arctan(m)\n\t\tself.current_theta = theta\n\t\treturn self._rotation_from_angle(theta)", "def rotate(self, matrix):\n n = len(matrix)\n \n for circle in range(n//2):\n r_circle = n - circle - 1\n for i in range(circle, n - circle - 1):\n a = matrix[circle][i]\n b, matrix[i][r_circle] = matrix[i][r_circle], a\n c, matrix[r_circle][n - i - 1] = matrix[r_circle][n - i - 1], b\n d, matrix[n - i - 1][circle] = matrix[n - i - 1][circle], c\n matrix[circle][i] = d", "def align(image):\n angle = find_angle(image)\n image = rotate(image, angle)\n return image", "def rotate(self, *args, **kwargs):\n return _image.image_rotate(self, *args, **kwargs)", "def _rotationMatrix(self, n_dim, theta):\n i = np.identity(n_dim)\n c, s = np.cos(theta)*i, np.sin(theta)*i\n rotation = np.bmat([[c, s], [-s, c]])\n return rotation", "def rotate(self, angle):\n image_center = np.array(self.img.shape[1::-1]) / 2\n rot_mat = cv2.getRotationMatrix2D(tuple(image_center), angle, 1.0)\n\n self.img = cv2.warpAffine(\n self.img, rot_mat, self.img.shape[1::-1], flags=cv2.INTER_LINEAR\n )\n\n self.edits.append(f\"rotate:{angle}\")\n return self", "def rotate(self, yaw):\n rotation_matrix = tfs.rotation_matrix(yaw, (0, 0, 1))[:2, :2]\n return np.matmul(rotation_matrix, self).view(Vector)", "def rotation(self, phi, mode):\n self.circuit.phase_shift(phi, self._remap_modes(mode))", "def rotation_matrix(rotate):\n tx, ty, tz = rotate\n Rx = np.array([[1, 0, 0], [0, np.cos(tx), -np.sin(tx)], [0, np.sin(tx), np.cos(tx)]])\n Ry = np.array([[np.cos(ty), 0, -np.sin(ty)], [0, 1, 0], [np.sin(ty), 0, np.cos(ty)]])\n Rz = np.array([[np.cos(tz), -np.sin(tz), 0], [np.sin(tz), np.cos(tz), 0], [0, 0, 1]])\n return np.dot(Rx, np.dot(Ry, Rz))", "def rotation_mat(self) -> np.ndarray:\n rot = np.zeros((3, 3))\n\n txx = 2 * self.x * self.x\n tyy = 2 * self.y * self.y\n tzz = 2 * self.z * self.z\n twx = 2 * self.w * self.x\n twy = 2 * self.w * self.y\n twz = 2 * self.w * self.z\n txy = 2 * self.x * self.y\n txz = 2 * self.x * self.z\n tyz = 2 * self.y * self.z\n\n rot[0, 0] = 1. - tyy - tzz\n rot[0, 1] = txy - twz\n rot[0, 2] = txz + twy\n rot[1, 0] = txy + twz\n rot[1, 1] = 1. - txx - tzz\n rot[1, 2] = tyz - twx\n rot[2, 0] = txz - twy\n rot[2, 1] = tyz + twx\n rot[2, 2] = 1. - txx - tyy\n\n return rot", "def keypoint_rotate(keypoint, angle, rows, cols, **params):\n center = (cols - 1) * 0.5, (rows - 1) * 0.5\n matrix = cv2.getRotationMatrix2D(center, angle, 1.0)\n x, y, a, s = keypoint[:4]\n x, y = cv2.transform(np.array([[[x, y]]]), matrix).squeeze()\n return x, y, a + math.radians(angle), s", "def rotate(img, angle, resample=False, expand=False, center=None):\r\n \r\n return img.rotate(angle, resample, expand, center)", "def _rotate(polyreg, i=None, j=None, u=None, v=None, theta=None, R=None):\n # determine the rotation matrix based on inputs\n if R is not None:\n logger.debug(\"rotate: R=\\n{}\".format(R))\n if i is not None:\n raise ValueError(i)\n if j is not None:\n raise ValueError(j)\n if theta is not None:\n raise ValueError(theta)\n if u is not None:\n raise ValueError(u)\n if v is not None:\n raise ValueError(v)\n elif i is not None and j is not None and theta is not None:\n logger.info(\"rotate via indices and angle.\")\n if R is not None:\n raise ValueError(R)\n if u is not None:\n raise ValueError(u)\n if v is not None:\n raise ValueError(v)\n if i == j:\n raise ValueError(\"Must provide two unique basis vectors.\")\n R = givens_rotation_matrix(i, j, theta, polyreg.dim)\n elif u is not None and v is not None:\n logger.info(\"rotate via 2 vectors.\")\n if R is not None:\n raise ValueError(R)\n if i is not None:\n raise ValueError(i)\n if j is not None:\n raise ValueError(j)\n if theta is not None:\n raise ValueError(theta)\n R = solve_rotation_ap(u, v)\n else:\n raise ValueError(\"R or (i and j and theta) or (u and v) \"\n \"must be defined.\")\n if isinstance(polyreg, Polytope):\n # Ensure that half space is normalized before rotation\n n, p = _hessian_normal(polyreg.A, polyreg.b)\n # Rotate the hyperplane normals\n polyreg.A = np.inner(n, R)\n polyreg.b = p\n else:\n # Rotate subregions\n for poly in polyreg.list_poly:\n _rotate(poly, None, None, R=R)\n # transform bbox and cheby\n if polyreg.bbox is not None:\n polyreg.bbox = (np.inner(polyreg.bbox[0].T, R).T,\n np.inner(polyreg.bbox[1].T, R).T)\n if polyreg._chebXc is not None:\n polyreg._chebXc = np.inner(polyreg._chebXc, R)\n return R", "def rotator(angle):\n c = np.cos(angle)\n s = np.sin(angle)\n return np.array([[c,-s],[s,c]])", "def fix_rotation(self):\n self.rotate(self.rotation)\n self.annotations.rotate(self.rotation)\n self.rotation = 0", "def rotate(self,center, angle):\n \n self.coord = [x-np.repeat([[center[0],center[1]]],[x.shape[0]],axis = 0) for x in self.coord]\n\n alpha = angle\n R = np.array([[np.cos(alpha),-np.sin(alpha)],[np.sin(alpha),np.cos(alpha)]])\n \n for i in range(len(self.coord)):\n self.coord[i] = np.squeeze([np.dot([x],R) for x in self.coord[i]])\n\n self.coord = [x+np.repeat([[center[0],center[1]]],[x.shape[0]],axis = 0) for x in self.coord]\n\n return self", "def rotation(self, phi, mode):\n self.circuit.phase_shift(phi, mode)", "def __rotate_model(self):\n self.__model_matrix = self.__get_rotation_matrix(\n self.__face.position_cartesian,\n (1 + self.__face.position[2]) * 0.5)", "def apply_rotation_x(self, eta=0.0 ):\n \n eta = radians(eta)\n new_rotation_matrix = [[ 1 , 0 , 0 ],\n [ 0 , +cos(eta) , -sin(eta) ],\n [ 0 , +sin(eta) , +cos(eta) ]] \n \n self.rotation_matrix_exp = np.dot( new_rotation_matrix, self.rotation_matrix_exp )" ]
[ "0.79376036", "0.73167443", "0.73074406", "0.7229753", "0.7113275", "0.7051355", "0.6992302", "0.6686556", "0.66458803", "0.66007537", "0.657009", "0.6538239", "0.6492567", "0.64686793", "0.6467943", "0.6442462", "0.64174455", "0.63961196", "0.6388264", "0.63866466", "0.63760054", "0.6368504", "0.63672537", "0.63672537", "0.6365199", "0.63406956", "0.63355124", "0.6325487", "0.63123417", "0.6312132", "0.6274328", "0.62583435", "0.62266237", "0.62117803", "0.6209411", "0.61521596", "0.6146725", "0.61100966", "0.61060613", "0.61038494", "0.6097206", "0.60961854", "0.6092604", "0.6088147", "0.60809153", "0.6078204", "0.6069734", "0.6041528", "0.6030295", "0.60239816", "0.601961", "0.60133547", "0.5996416", "0.5995912", "0.5989014", "0.59862995", "0.5986245", "0.59836257", "0.59782624", "0.597726", "0.59758997", "0.59740967", "0.5970379", "0.59592766", "0.5957963", "0.5955848", "0.59473217", "0.5944798", "0.5944029", "0.59329593", "0.5908772", "0.5894153", "0.5889308", "0.5888876", "0.58877623", "0.5887166", "0.5879811", "0.58780724", "0.5876205", "0.58728635", "0.5872826", "0.5852149", "0.58506614", "0.58456385", "0.5845091", "0.58430165", "0.5828852", "0.5827741", "0.5826474", "0.5825847", "0.5824189", "0.5821436", "0.5821264", "0.5806832", "0.58050036", "0.58040315", "0.58011824", "0.57980055", "0.57854044", "0.57780397" ]
0.7127177
4
Rotates the adp with its corresponding rotation matrix.
def rotate_adp(adp, rotmat): adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])], [float(adp[3]), float(adp[1]), float(adp[5])], [float(adp[4]), float(adp[5]), float(adp[2])]]) rotmatT = np.transpose(rotmat) adp = np.dot(rotmatT, adp) adp = np.dot(adp, rotmat) # print '=\n',adp,'\n-------------------------------------------------\n\n\n\n\n\n' adp = np.array(adp).flatten().tolist() return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rotate(mat,angle):\n return np.dot(Mueller.rotator(angle), np.dot(mat, Mueller.rotator(-angle)))", "def rotate_adp2(adp, rotmat, cell):\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n rotmat = np.linalg.inv(rotmat)\n rotmatT = np.transpose(rotmat)\n Nmat = np.matrix([[1 / cell[0], 0, 0],\n [0, 1 / cell[1], 0],\n [0, 0, 1 / cell[2]]])\n Nmat = np.linalg.inv(Nmat)\n NmatT = np.transpose(Nmat)\n\n adp = np.dot(rotmat, adp)\n adp = np.dot(adp, rotmatT)\n\n adp = np.dot(Nmat, adp)\n adp = np.dot(adp, NmatT)\n\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]", "def rotate_adp3(adp, rotmat, cell):\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n rotmati = np.matrix(rotmat)\n rotmatiT = np.transpose(rotmati)\n rotmat = np.linalg.inv(rotmat)\n\n Nmat = np.matrix([[1 / cell[0], 0, 0],\n [0, 1 / cell[1], 0],\n [0, 0, 1 / cell[2]]])\n Nmat = np.linalg.inv(Nmat)\n NmatT = np.transpose(Nmat)\n adp = np.dot(rotmati, adp)\n adp = np.dot(adp, rotmatiT)\n\n adp = np.dot(Nmat, adp)\n adp = np.dot(adp, NmatT)\n\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]", "def rotate_adp_reverse(adp, rotmat):\n\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n rotmatT = np.transpose(rotmat)\n adp = np.dot(rotmat, adp)\n adp = np.dot(adp, rotmatT)\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]", "def rotate(mat,angle):\n return np.dot(Jones.rotator(angle), np.dot(mat, Jones.rotator(-angle)))", "def rotmat(p, q):\n rot = numpy.dot(refmat(q, -p), refmat(p, -p))\n return rot", "def _rotate(self, affine):\n dims = affine.shape[0]\n if not np.isscalar(self.rotation):\n raise Exception('this class requires exactly one entry for rotation!')\n theta = (self.deformrandomstate.rand() - 0.5) * 2 * self.rotation\n if dims == 4:\n\n # sample unit vector:\n u = np.random.random(3)\n u /= np.sqrt(np.sum([uu ** 2 for uu in u]) + 1e-8)\n ct = np.cos(theta)\n st = np.sin(theta)\n rot = np.eye(4)\n rot[:3, :3] = [\n [ct + u[0] ** 2 * (1 - ct), u[0] * u[1] * (1 - ct) - u[2] * st, u[0] * u[2] * (1 - ct) + u[2] * st],\n [u[1] * u[0] * (1 - ct) + u[2] * st, ct + u[1] ** 2 * (1 - ct), u[1] * u[2] * (1 - ct) - u[0] * st],\n [u[2] * u[0] * (1 - ct) - u[1] * st, u[2] * u[1] * (1 - ct) + u[0] * st, ct + u[2] ** 2 * (1 - ct)]]\n\n elif dims == 3:\n rot = np.eye(3)\n rot[:2, :2] = np.asarray([[np.cos(theta), np.sin(theta)], [-np.sin(theta), np.cos(theta)]])\n else:\n raise Exception(\n 'implement this for each dimension, since not yet implemented for dimension {}'.format(dims))\n\n return np.matmul(rot, affine)", "def rotate_z(p, a=0):\n # turn value to radians\n a = math.radians(a)\n translation_mat = np.matrix([\n [math.cos(a),math.sin(a),0,0],\n [-math.sin(a),math.cos(a),0,0],\n [0,0,0,0],\n [0,0,0,1],\n ], dtype=\"float32\")\n\n new_p = p @ translation_mat\n\n return new_p", "def _rotate(self, angle):\n angle *= self._degreesPerAU\n self._orient = self._orient.rotate(angle)", "def rotate(self, a):\n ca = cos(a)\n sa = sin(a)\n rM = Matrix([\n [ca, -sa],\n [sa, ca]\n ])\n p0 = self.p0\n self.c = p0 + rM @ (self.c - p0)\n dp = p0 - self.c\n self.a0 = atan2(dp.y, dp.x)\n return self", "def rotate(self, angle):\n\t\tif not isinstance(angle, Angle):\n\t\t\tangle = Angle(angle)\n\t\treturn angle.matrix() * self", "def rotate(self,X):\n alpha = random.rand() * 2*pi\n beta = self.beta_sample()\n R = Rotator.rotation_matrix(alpha,beta,0.0)\n X = np.dot(R, X)\n if self.random_flip and (random.rand() > 0.5):\n X[2,:] = -X[2,:]\n X[1,:] = -X[1,:]\n return X", "def apply_rotation(self, eta=0.0, phi=0.0, theta=0.0):\n \n new_rotation_matrix = self.rotation_elements( eta, phi, theta )\n \n #self.rotation_matrix_exp = np.dot( self.rotation_matrix_exp , new_rotation_matrix )\n self.rotation_matrix_exp = np.dot( new_rotation_matrix, self.rotation_matrix_exp )", "def rotate(p,q,A,V): \n n = A.shape[0]\n App, Aqq, Apq = A[p,p], A[q,q], A[p,q] #Initial values\n phi = 0.5*math.atan2(2*Apq, Aqq-App) #Find the rotation value\n c, s = math.cos(phi), math.sin(phi) #Calculate sin and cos\n\n #Update the matrix diagonal elements\n A[p,p] = c*c*App + s*s*Aqq - 2*s*c*Apq \n A[q,q] = s*s*App + c*c*Aqq + 2*s*c*Apq\n A[p,q] = 0 #This is zero by construction\n \n \n #Iterate over and update remaining off-diagonal elements\n for i in range(p):\n Aip, Aiq = A[i,p], A[i,q]\n A[i,p] = c*Aip - s*Aiq\n A[i,q] = c*Aiq + s*Aip\n \n for i in range(p+1,q):\n Api, Aiq = A[p,i], A[i,q]\n A[p,i] = c*Api - s*Aiq\n A[i,q] = c*Aiq + s*Api\n \n for i in range(q+1,n):\n Api, Aqi = A[p,i], A[q,i]\n A[p,i] = c*Api - s*Aqi\n A[q,i] = c*Aqi + s*Api\n \n #Update eigenvectors in matrix V\n for i in range(n):\n Vip, Viq = V[i,p], V[i,q]\n V[i,p] = c*Vip - s*Viq\n V[i,q] = s*Vip + c*Viq\n \n return A, V", "def rotate_x(p, a=0):\n # turn value to radians\n a = math.radians(a)\n translation_mat = np.matrix([\n [1,0,0,0],\n [0,math.cos(a),math.sin(a),0],\n [0,-math.sin(a),math.cos(a),0],\n [0,0,0,1],\n ], dtype=\"float32\")\n\n new_p = p @ translation_mat\n\n return new_p", "def rotate(self, angle, reshape=False):\n return IntensityMap.rotate(self, angle, reshape=reshape)", "def rotate(self, angle):\n perp = TwoDV(-self[1], self[0])\n angle = angle * math.pi / 180.0\n c, s = math.cos(angle), math.sin(angle)\n return TwoDV(self[0]*c+perp[0]*s, self[1]*c+perp[1]*s)", "def rotation(self):\n\t\treturn self.piv.a.rotate.v", "def rotate_3D(atom, source_atom):\n from lauescript.cryst.match import get_transform\n\n lst2 = [np.array([0, 0, 0]), source_atom.orientation[0], source_atom.orientation[1]]\n lst1 = [np.array([0, 0, 0]), atom.orientation[0], atom.orientation[1]]\n\n matrix = get_transform(lst1, lst2, matrix=True)\n\n adp = source_atom.adp['cart_int']\n\n atom.adp['cart_int'] = rotate_adp(adp, matrix)", "def _rotate(self, angle):\n if self.undobuffer:\n self.undobuffer.push((\"rot\", angle, self._degreesPerAU))\n angle *= self._degreesPerAU\n neworient = self._orient.rotate(angle)\n tracing = self.screen._tracing\n if tracing == 1 and self._speed > 0:\n anglevel = 3.0 * self._speed\n steps = 1 + int(abs(angle)/anglevel)\n delta = 1.0*angle/steps\n for _ in range(steps):\n self._orient = self._orient.rotate(delta)\n self._update()\n self._orient = neworient\n self._update()", "def rotate_ADP_about_axis(ADP, angle, axisDirection):\n adp = get_adp_as_matrix(ADP)\n u, v = np.linalg.eig(adp)\n startPoints = [v[:, i].flatten().tolist()[0] for i in xrange(3)]\n endPoints = [rotate_point_about_axis(point, angle, axisDirection, (0, 0, 0)) for point in startPoints]\n rotMat = get_transform(startPoints, endPoints, matrix=True).transpose()\n newadp = np.dot(rotMat.transpose(), np.dot(adp, rotMat))\n return newadp[0, 0], newadp[1, 1], newadp[2, 2], newadp[0, 1], newadp[0, 2], newadp[1, 2]", "def rotation(self, p1, p2, p3):\n return (p2[0] - p1[0]) * (p3[1] - p1[1]) - (p2[1] - p1[1]) * (p3[0] - p1[0])", "def _rotate(self, angles, dj_matrix=None):\n if dj_matrix is None:\n dj_matrix = djpi2(self.lmax + 1)\n self.coeffs = SHRotateRealCoef(self.coeffs, angles, dj_matrix)", "def _rotate(self, angles, dj_matrix=None):\n if dj_matrix is None:\n dj_matrix = djpi2(self.lmax + 1)\n self.coeffs = SHRotateRealCoef(self.coeffs, angles, dj_matrix)", "def rotate(self):\n pass", "def _rotate(self, arr, theta):\n # Rotation Matrix R\n R = [[np.cos(theta), -np.sin(theta)], \n [np.sin(theta), np.cos(theta)]]\n\n return np.matmul(R, arr.T).T", "def rotate(self, a):\n ca = cos(a)\n sa = sin(a)\n self.v = Matrix([\n [ca, -sa],\n [sa, ca]\n ]) @ self.v\n return self", "def rotate(self,r):\n return r.hprod( self.hprod( r.inv() ) )", "def rotateAlignXform(self):\n r = np.concatenate(([self.x], [self.y], [self.z]), 0)\n r = np.concatenate((r, np.array([[0,0,0]])), 0)\n r = np.concatenate((r, np.array([0,0,0,1]).reshape(-1,1)), 1)\n return r", "def rotate(self,X):\n alpha = random.rand() * 2*pi\n R = Rotator.rotation_matrix(alpha,0.0,0.0)\n return np.dot(R,X)", "def _rot(axis, angle):\n if axis == 1:\n return Matrix([[1, 0, 0],\n [0, cos(angle), -sin(angle)],\n [0, sin(angle), cos(angle)]])\n elif axis == 2:\n return Matrix([[cos(angle), 0, sin(angle)],\n [0, 1, 0],\n [-sin(angle), 0, cos(angle)]])\n elif axis == 3:\n return Matrix([[cos(angle), -sin(angle), 0],\n [sin(angle), cos(angle), 0],\n [0, 0, 1]])", "def rotate_y(p, a=0):\n # turn value to radians\n a = math.radians(a)\n translation_mat = np.matrix([\n [math.cos(a),0,-math.sin(a),0],\n [0,1,0,0],\n [math.sin(a),0,math.cos(a),0],\n [0,0,0,1],\n ], dtype=\"float32\")\n\n new_p = p @ translation_mat\n\n return new_p", "def rotate(self, angle=0.0):\n # TODO: Implement the rotate function. Remember to record the value of\n # rotation degree.\n self.rotDegree = angle\n self.x = rotate(self.x, angle = angle, axes=(0, 1), reshape=False, \n output=None, order=3, mode='constant', cval=0.0, prefilter=True)\n # This rotation isn't working correctly. Get shit for non right anlge rotatations\n # raise NotImplementedError\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################", "def rotate(self,X):\n alpha = random.rand() * 2*pi\n\n beta = np.arccos(1.0-2*random.rand())\n psi = random.rand() * 2*pi\n\n R = Rotator.rotation_matrix(alpha,beta,psi)\n return np.dot(R,X)", "def Rotate(*args, **kwargs):\n return _gdi_.GraphicsMatrix_Rotate(*args, **kwargs)", "def mrotate(self):\n result_matrix = [[0 for col in range(len(self.matrix[0]))] for row in range(len(self.matrix))]\n for i in range(len(self.matrix)):\n for j in range(len(self.matrix[0])):\n result_matrix[i][j] = self.matrix[i][len(self.matrix[0]) - 1 - j]\n # left turn -> result_matrix[i][j] = self.matrix[len(self.matrix) - 1 - i][j]\n self.matrix = result_matrix\n pass", "def rotate(self, angle):\n perp = Vec2D(-self[1], self[0])\n angle = angle * math.pi / 180.0\n c, s = math.cos(angle), math.sin(angle)\n return Vec2D(self[0] * c + perp[0] * s, self[1] * c + perp[1] * s)", "def rotate(self, angle):\n rotmat = rotation_matrix_2d(angle)\n rotated = np.dot(rotmat.T, [self.pix_x.value, self.pix_y.value])\n self.pix_x = rotated[0] * self.pix_x.unit\n self.pix_y = rotated[1] * self.pix_x.unit\n self.pix_rotation -= angle", "def rot(self, t=0., transposed=False):\n rotmat = np.array(\n [[np.cos(self._pa+self._omegab*t),np.sin(self._pa+self._omegab*t)],\n [-np.sin(self._pa+self._omegab*t),np.cos(self._pa+self._omegab*t)]])\n if transposed:\n return rotmat.T\n else:\n return rotmat", "def rotate(self, matrix: list[list[int]]) -> None:", "def rotation_matrix(self):\n return self.affine_matrix[0:3][:, 0:3]", "def angle_to_rotation_matrix(angle: torch.Tensor) -> torch.Tensor:\n ang_rad = deg2rad(angle)\n cos_a: torch.Tensor = torch.cos(ang_rad)\n sin_a: torch.Tensor = torch.sin(ang_rad)\n return torch.stack([cos_a, sin_a, -sin_a, cos_a], dim=-1).view(*angle.shape, 2, 2)", "def rotation(self, *args, **kwargs) -> Any:\n pass", "def _rotate_(self, x: np.array, m: np.array) -> (np.array, np.array):\n # get a random angle\n angle = np.random.randint(0, self.rotate)\n # get a random sign for the angle\n sign = np.random.randint(0, 2)\n x = rotate(x, -sign * angle, reshape=False)\n m = rotate(m, -sign * angle, axes=(0, 1),\n mode='nearest',\n reshape=False)\n return x, m", "def test_rotation(self):\n quat_rotated = rowan.rotate(input1, vector_inputs)\n\n matrices = rowan.to_matrix(input1)\n matrix_rotated = np.einsum(\"ijk,ki->ij\", matrices, vector_inputs.T)\n self.assertTrue(np.allclose(matrix_rotated, quat_rotated))", "def RotationMatrix(theta, x, y, z, point=None):\n\treturn mach.rotation_matrix(theta, [x, y, z])", "def getEllipsYZRotMatrix(a1, a2):\n adir = a2 - a1\n amid = a1 + 0.5 * adir\n kath = np.sqrt((adir[0] * adir[0] + adir[1] * adir[1]) / 4.0)\n octantA2 = octant(a2)\n theta = np.arctan( abs( (adir[2]/2) / kath) )\n #[1, 4, 6, 7 ] => left rotation\n #[2, 3, 5, 8 ] => right rotation\n if octantA2 in [2, 3, 5, 8]: \n theta = -theta \n print \"theta =\" , np.rad2deg(theta)\n RotY = np.matrix( [ [ np.cos(theta), 0.0, np.sin(theta) ],\n [ 0.0 , 1.0, 0.0 ],\n [ -np.sin(theta), 0.0, np.cos(theta) ]\n ]) \n \n psi = np.arctan( abs( adir[1] / adir[0] ) )\n #[2, 4, 6, 8 ] => left rotation\n #[1, 3, 5, 7 ] => right rotation\n if octantA2 in [1, 3, 5, 7]:\n psi = -psi\n print \"psi =\" , np.rad2deg(psi)\n RotZ = np.matrix( [ [ np.cos(psi), -np.sin(psi), 0.0 ],\n [ np.sin(psi), np.cos(psi), 0.0 ],\n [ 0.0 , 0.0 , 1.0 ]\n ])\n return np.asarray( RotY * RotZ )", "def rotate(self, vect, angle):\n self.pl.Rotation = Rotation(vect, angle)\n\n self.comp.Placement = self.pl\n self.box.Placement = self.pl", "def rotate(self, angle):\n old_angle, tilt = self.rotation\n new_angle = old_angle + angle\n while new_angle > 90:\n new_angle = new_angle - 90\n while angle < -90:\n new_angle = new_angle + 90\n self.rotation = (new_angle, tilt)", "def rotate_matrix(angle):\n c = np.cos(angle)\n s = np.sin(angle)\n return np.array([[c, s],\n [-s, c]])", "def rotate(img, angle):\n if not _is_numpy(img):\n raise TypeError('img should be Numpy Image. Got {}'.format(type(img)))\n\n aug = iaa.Affine(rotate=angle)\n return aug.augment_image(img)", "def rotMatrix( source = None ):\n if source is None:\n return None,None\n else:\n (x,y,z, a) = source\n if a % TWOPI:\n return tmatrixaccel.rotMatrix( x,y,z,a ),tmatrixaccel.rotMatrix( x,y,z,-a )\n return None,None", "def _altaz_rotation(self, jd):\n R_lon = rot_z(- self.longitude.radians - jd.gast * TAU / 24.0)\n return einsum('ij...,jk...,kl...->il...', self.R_lat, R_lon, jd.M)", "def rotate(self, theta, legs):\n U, onew = rotationTensor(theta, self.symmetries, legs)\n B = U @ self\n new = list(onew)\n old = list(legs)\n if B.internallegs != self.internallegs:\n old.append(self.internallegs[0])\n new.append(B.internallegs[0])\n B.swaplegs({n: o for o, n in zip(old, new)})\n return B.couplingAddapt(self.coupling)", "def rotate(self, angle, point=None):\n if not point:\n point = self.middle\n self.p1.rotate(angle, point)\n self.p2.rotate(angle, point)", "def rotate_z(self, angle):\n angle *= np.pi / 180\n return self.transform(np.matrix([[np.cos(angle), -np.sin(angle), 0],\n [np.sin(angle), np.cos(angle), 0],\n [0, 0, 1]]))", "def rotate(self, angle, axis):\r\n R=self.rotation(angle, axis)\r\n self.mlist = (self*R).mlist\r\n return self", "def rotate(X):\n return X", "def rotate_about(self, p, theta):\n result = self.clone()\n result.slide(-p.x, -p.y)\n result.rotate(theta)\n result.slide(p.x, p.y)\n return result", "def rotate(a, ps, axe=0):\r\n\r\n sin = np.sin(a)\r\n cos = np.cos(a)\r\n rm = np.array([[[1, 0, 0], [0, cos, -sin], [0, sin, cos]],\r\n [[cos, 0, sin], [0, 1, 0], [-sin, 0, cos]],\r\n [[cos, -sin, 0], [sin, cos, 0], [0, 0, 1]]])\r\n m = np.full((len(ps), 3, 3), rm[axe])\r\n ps = map(lambda x, y: np.dot(x, y), m, ps)\r\n return ps", "def rotate(self, rotation):\n\t\tif not isinstance(rotation,Rotation):\n\t\t\trotation = Rotation(*rotation)\n\t\treturn rotation.matrix() * self", "def rotate(self, *args, **kwargs): # real signature unknown\n pass", "def rotate(self):\n\n last_center = self.rect.center\n self.image = pg.transform.rotate(self.image_copy,self.angle)\n self.rect = self.image.get_rect()\n self.rect.center = last_center\n self.angle +=self.rotate_by", "def rotation(self, angle, axis):\r\n\r\n sqr_a = axis.x*axis.x\r\n sqr_b = axis.y*axis.y\r\n sqr_c = axis.z*axis.z\r\n len2 = sqr_a+sqr_b+sqr_c\r\n\r\n k2 = math.cos(angle)\r\n k1 = (1.0-k2)/len2\r\n k3 = math.sin(angle)/math.sqrt(len2)\r\n k1ab = k1*axis.x*axis.y\r\n k1ac = k1*axis.x*axis.z\r\n k1bc = k1*axis.y*axis.z\r\n k3a = k3*axis.x\r\n k3b = k3*axis.y\r\n k3c = k3*axis.z\r\n\r\n return mat4( k1*sqr_a+k2, k1ab-k3c, k1ac+k3b, 0.0,\r\n k1ab+k3c, k1*sqr_b+k2, k1bc-k3a, 0.0,\r\n k1ac-k3b, k1bc+k3a, k1*sqr_c+k2, 0.0,\r\n 0.0, 0.0, 0.0, 1.0)", "def intermediateJacPol2Rot(self,x):\n allS = np.sin(x[0,:])\n allC = np.cos(x[0,:])\n allR = x[1,:]\n \n Jac = Idn(x.shape[1],self._dim)\n Jac[:,0,0] = -allS*allR\n Jac[:,0,1] = allC\n Jac[:,1,0] = allC*allR\n Jac[:,1,1] = allS\n return Jac", "def rotate(self, angle=pi, point=None):\n if not point: point = Point.origin(d=self.dimension)\n v = Vector.createFromTwoPoints(point, self)\n v.rotate(angle)\n self.components = v(point).components", "def rotate(self, angle):\n self.call('rotate', angle)", "def reflect_adp(adp, planev):\n M = np.identity(4)\n M[:3, :3] -= 2.0 * np.outer(planev, planev)\n M[:3, 3] = (2.0 * np.dot(np.array([0, 0, 0]), planev)) * planev\n\n return rotate_adp(adp, M[:3, :3])", "def test_to_rotation(self):\r\n q = np.array([-1, 1, 3, 2])\r\n q = q / np.linalg.norm(q)\r\n R_gt = np.array([\r\n [-1/3., -14/15., -2/15.],\r\n [2/3., -1/3., 2/3.],\r\n [-2/3., 2/15., 11/15.]]).T\r\n R = to_rotation(q)\r\n\r\n zero_matrix = R - R_gt\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)\r\n\r\n for _ in range(20):\r\n q = np.random.randn(4)\r\n q /= np.linalg.norm(q)\r\n q_inv = quaternion_conjugate(q)\r\n\r\n R = to_rotation(q)\r\n R_inv = to_rotation(q_inv)\r\n\r\n zero_matrix = R @ R_inv - np.identity(3)\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)\r\n\r\n # orthogonal matrix\r\n zero_matrix = R @ R.T - np.identity(3)\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)", "def rotate_about(self, p, theta):\n result = self.clone()\n result.translate(-p.x, -p.y)\n result.rotate(theta)\n result.translate(p.x, p.y)\n return result", "def rotateZ(self, angle):\r\n if angle:\r\n c = cos(radians(angle))\r\n s = sin(radians(angle))\r\n self.mtrx = dot([[c, s, 0, 0],\r\n [-s, c, 0, 0],\r\n [0, 0, 1, 0],\r\n [0, 0, 0, 1]],\r\n self.mtrx)\r\n self.rtn[2] = angle\r\n self.was_moved = True", "def orientation_ras_lps(affine: NdarrayTensor) -> NdarrayTensor:\n sr = max(affine.shape[0] - 1, 1) # spatial rank is at least 1\n flip_d = [[-1, 1], [-1, -1, 1], [-1, -1, 1, 1]]\n flip_diag = flip_d[min(sr - 1, 2)] + [1] * (sr - 3)\n if isinstance(affine, torch.Tensor):\n return torch.diag(torch.as_tensor(flip_diag).to(affine)) @ affine # type: ignore\n return np.diag(flip_diag).astype(affine.dtype) @ affine # type: ignore", "def rotate(self, rotation):\n self.coords = dot(rotation, self.coords)\n return self", "def rotate(matrix: List[List[int]]) -> None:\n if matrix is None:\n return\n\n # transpose\n for i in range(0, len(matrix)):\n for j in range(i, len(matrix[0])):\n temp = matrix[i][j]\n matrix[i][j] = matrix[j][i]\n matrix[j][i] = temp\n # reflect\n for i in range(0, len(matrix)):\n for j in range(0, len(matrix[0]) // 2):\n reflection = len(matrix[0]) - j - 1\n temp = matrix[i][j]\n matrix[i][j] = matrix[i][reflection]\n matrix[i][reflection] = temp", "def rotate(x: torch.Tensor, angle: int) -> torch.Tensor:\n # B C H W\n h_dim = 2\n w_dim = 3\n\n if angle == 0:\n return x\n elif angle == 90:\n return x.flip(w_dim).transpose(h_dim, w_dim)\n elif angle == 180:\n return x.flip(w_dim).flip(h_dim)\n elif angle == 270:\n return x.flip(h_dim).transpose(h_dim, w_dim)\n else:\n raise NotImplementedError(\"Must be rotation divisible by 90 degrees\")", "def rotation_matrix(self):\n self._normalise()\n product_matrix = np.dot(self._q_matrix(), self._q_bar_matrix().conj().transpose())\n return product_matrix[1:][:,1:]", "def rotation_matrix(angle, axis):\n about_z = rotation_about_z(angle)\n z_to_axis = z_to_vector(axis)\n axis_to_z = np.linalg.inv(z_to_axis)\n return reduce(np.dot, [z_to_axis, about_z, axis_to_z])", "def apply_rotation_z(self, theta=0.0 ):\n \n theta = radians(theta)\n new_rotation_matrix = [[ +cos(theta) , -sin(theta) , 0 ],\n [ +sin(theta) , +cos(theta) , 0 ],\n [ 0 , 0 , 1 ]] \n \n self.rotation_matrix_exp = np.dot( new_rotation_matrix, self.rotation_matrix_exp )", "def qrotate(points, axis, theta):\n q = Quaternion.rotator(axis, theta)\n return q.rotate(points)", "def rotatePivot(rotation):\r\n # Rotate in object mode X\r\n bpy.ops.object.mode_set(mode='OBJECT')\r\n bpy.ops.transform.rotate(value=rotation.x, axis=(1,0,0), constraint_orientation='GLOBAL')\r\n # rotate in edit mode X\r\n bpy.ops.object.mode_set(mode='EDIT')\r\n bpy.ops.mesh.select_all(action='SELECT')\r\n bpy.ops.transform.rotate(value=-rotation.x, axis=(1,0,0), constraint_orientation='GLOBAL')\r\n # Rotate in object mode Y\r\n bpy.ops.object.mode_set(mode='OBJECT')\r\n bpy.ops.transform.rotate(value=rotation.y, axis=(0,1,0), constraint_orientation='GLOBAL')\r\n # rotate in edit mode Y\r\n bpy.ops.object.mode_set(mode='EDIT')\r\n bpy.ops.transform.rotate(value=-rotation.y, axis=(0,1,0), constraint_orientation='GLOBAL')\r\n # Rotate in object mode Z\r\n bpy.ops.object.mode_set(mode='OBJECT')\r\n bpy.ops.transform.rotate(value=rotation.z, axis=(0,0,1), constraint_orientation='GLOBAL')\r\n # rotate in edit mode Z\r\n bpy.ops.object.mode_set(mode='EDIT')\r\n bpy.ops.transform.rotate(value=-rotation.z, axis=(0,0,1), constraint_orientation='GLOBAL')\r\n # return to object mode\r\n bpy.ops.object.mode_set(mode='OBJECT')", "def srotate(self, angle):\n\n self.angle = self.angle + angle", "def _rotation_from_gradient(self,m):\n\t\ttheta = -np.arctan(m)\n\t\tself.current_theta = theta\n\t\treturn self._rotation_from_angle(theta)", "def rotate(self, matrix):\n n = len(matrix)\n \n for circle in range(n//2):\n r_circle = n - circle - 1\n for i in range(circle, n - circle - 1):\n a = matrix[circle][i]\n b, matrix[i][r_circle] = matrix[i][r_circle], a\n c, matrix[r_circle][n - i - 1] = matrix[r_circle][n - i - 1], b\n d, matrix[n - i - 1][circle] = matrix[n - i - 1][circle], c\n matrix[circle][i] = d", "def align(image):\n angle = find_angle(image)\n image = rotate(image, angle)\n return image", "def rotate(self, *args, **kwargs):\n return _image.image_rotate(self, *args, **kwargs)", "def _rotationMatrix(self, n_dim, theta):\n i = np.identity(n_dim)\n c, s = np.cos(theta)*i, np.sin(theta)*i\n rotation = np.bmat([[c, s], [-s, c]])\n return rotation", "def rotate(self, angle):\n image_center = np.array(self.img.shape[1::-1]) / 2\n rot_mat = cv2.getRotationMatrix2D(tuple(image_center), angle, 1.0)\n\n self.img = cv2.warpAffine(\n self.img, rot_mat, self.img.shape[1::-1], flags=cv2.INTER_LINEAR\n )\n\n self.edits.append(f\"rotate:{angle}\")\n return self", "def rotate(self, yaw):\n rotation_matrix = tfs.rotation_matrix(yaw, (0, 0, 1))[:2, :2]\n return np.matmul(rotation_matrix, self).view(Vector)", "def rotation(self, phi, mode):\n self.circuit.phase_shift(phi, self._remap_modes(mode))", "def rotation_matrix(rotate):\n tx, ty, tz = rotate\n Rx = np.array([[1, 0, 0], [0, np.cos(tx), -np.sin(tx)], [0, np.sin(tx), np.cos(tx)]])\n Ry = np.array([[np.cos(ty), 0, -np.sin(ty)], [0, 1, 0], [np.sin(ty), 0, np.cos(ty)]])\n Rz = np.array([[np.cos(tz), -np.sin(tz), 0], [np.sin(tz), np.cos(tz), 0], [0, 0, 1]])\n return np.dot(Rx, np.dot(Ry, Rz))", "def rotation_mat(self) -> np.ndarray:\n rot = np.zeros((3, 3))\n\n txx = 2 * self.x * self.x\n tyy = 2 * self.y * self.y\n tzz = 2 * self.z * self.z\n twx = 2 * self.w * self.x\n twy = 2 * self.w * self.y\n twz = 2 * self.w * self.z\n txy = 2 * self.x * self.y\n txz = 2 * self.x * self.z\n tyz = 2 * self.y * self.z\n\n rot[0, 0] = 1. - tyy - tzz\n rot[0, 1] = txy - twz\n rot[0, 2] = txz + twy\n rot[1, 0] = txy + twz\n rot[1, 1] = 1. - txx - tzz\n rot[1, 2] = tyz - twx\n rot[2, 0] = txz - twy\n rot[2, 1] = tyz + twx\n rot[2, 2] = 1. - txx - tyy\n\n return rot", "def keypoint_rotate(keypoint, angle, rows, cols, **params):\n center = (cols - 1) * 0.5, (rows - 1) * 0.5\n matrix = cv2.getRotationMatrix2D(center, angle, 1.0)\n x, y, a, s = keypoint[:4]\n x, y = cv2.transform(np.array([[[x, y]]]), matrix).squeeze()\n return x, y, a + math.radians(angle), s", "def rotate(img, angle, resample=False, expand=False, center=None):\r\n \r\n return img.rotate(angle, resample, expand, center)", "def _rotate(polyreg, i=None, j=None, u=None, v=None, theta=None, R=None):\n # determine the rotation matrix based on inputs\n if R is not None:\n logger.debug(\"rotate: R=\\n{}\".format(R))\n if i is not None:\n raise ValueError(i)\n if j is not None:\n raise ValueError(j)\n if theta is not None:\n raise ValueError(theta)\n if u is not None:\n raise ValueError(u)\n if v is not None:\n raise ValueError(v)\n elif i is not None and j is not None and theta is not None:\n logger.info(\"rotate via indices and angle.\")\n if R is not None:\n raise ValueError(R)\n if u is not None:\n raise ValueError(u)\n if v is not None:\n raise ValueError(v)\n if i == j:\n raise ValueError(\"Must provide two unique basis vectors.\")\n R = givens_rotation_matrix(i, j, theta, polyreg.dim)\n elif u is not None and v is not None:\n logger.info(\"rotate via 2 vectors.\")\n if R is not None:\n raise ValueError(R)\n if i is not None:\n raise ValueError(i)\n if j is not None:\n raise ValueError(j)\n if theta is not None:\n raise ValueError(theta)\n R = solve_rotation_ap(u, v)\n else:\n raise ValueError(\"R or (i and j and theta) or (u and v) \"\n \"must be defined.\")\n if isinstance(polyreg, Polytope):\n # Ensure that half space is normalized before rotation\n n, p = _hessian_normal(polyreg.A, polyreg.b)\n # Rotate the hyperplane normals\n polyreg.A = np.inner(n, R)\n polyreg.b = p\n else:\n # Rotate subregions\n for poly in polyreg.list_poly:\n _rotate(poly, None, None, R=R)\n # transform bbox and cheby\n if polyreg.bbox is not None:\n polyreg.bbox = (np.inner(polyreg.bbox[0].T, R).T,\n np.inner(polyreg.bbox[1].T, R).T)\n if polyreg._chebXc is not None:\n polyreg._chebXc = np.inner(polyreg._chebXc, R)\n return R", "def rotator(angle):\n c = np.cos(angle)\n s = np.sin(angle)\n return np.array([[c,-s],[s,c]])", "def fix_rotation(self):\n self.rotate(self.rotation)\n self.annotations.rotate(self.rotation)\n self.rotation = 0", "def rotate(self,center, angle):\n \n self.coord = [x-np.repeat([[center[0],center[1]]],[x.shape[0]],axis = 0) for x in self.coord]\n\n alpha = angle\n R = np.array([[np.cos(alpha),-np.sin(alpha)],[np.sin(alpha),np.cos(alpha)]])\n \n for i in range(len(self.coord)):\n self.coord[i] = np.squeeze([np.dot([x],R) for x in self.coord[i]])\n\n self.coord = [x+np.repeat([[center[0],center[1]]],[x.shape[0]],axis = 0) for x in self.coord]\n\n return self", "def rotation(self, phi, mode):\n self.circuit.phase_shift(phi, mode)", "def __rotate_model(self):\n self.__model_matrix = self.__get_rotation_matrix(\n self.__face.position_cartesian,\n (1 + self.__face.position[2]) * 0.5)", "def apply_rotation_x(self, eta=0.0 ):\n \n eta = radians(eta)\n new_rotation_matrix = [[ 1 , 0 , 0 ],\n [ 0 , +cos(eta) , -sin(eta) ],\n [ 0 , +sin(eta) , +cos(eta) ]] \n \n self.rotation_matrix_exp = np.dot( new_rotation_matrix, self.rotation_matrix_exp )" ]
[ "0.73167443", "0.73074406", "0.7229753", "0.7127177", "0.7113275", "0.7051355", "0.6992302", "0.6686556", "0.66458803", "0.66007537", "0.657009", "0.6538239", "0.6492567", "0.64686793", "0.6467943", "0.6442462", "0.64174455", "0.63961196", "0.6388264", "0.63866466", "0.63760054", "0.6368504", "0.63672537", "0.63672537", "0.6365199", "0.63406956", "0.63355124", "0.6325487", "0.63123417", "0.6312132", "0.6274328", "0.62583435", "0.62266237", "0.62117803", "0.6209411", "0.61521596", "0.6146725", "0.61100966", "0.61060613", "0.61038494", "0.6097206", "0.60961854", "0.6092604", "0.6088147", "0.60809153", "0.6078204", "0.6069734", "0.6041528", "0.6030295", "0.60239816", "0.601961", "0.60133547", "0.5996416", "0.5995912", "0.5989014", "0.59862995", "0.5986245", "0.59836257", "0.59782624", "0.597726", "0.59758997", "0.59740967", "0.5970379", "0.59592766", "0.5957963", "0.5955848", "0.59473217", "0.5944798", "0.5944029", "0.59329593", "0.5908772", "0.5894153", "0.5889308", "0.5888876", "0.58877623", "0.5887166", "0.5879811", "0.58780724", "0.5876205", "0.58728635", "0.5872826", "0.5852149", "0.58506614", "0.58456385", "0.5845091", "0.58430165", "0.5828852", "0.5827741", "0.5826474", "0.5825847", "0.5824189", "0.5821436", "0.5821264", "0.5806832", "0.58050036", "0.58040315", "0.58011824", "0.57980055", "0.57854044", "0.57780397" ]
0.79376036
0
Rotates the adp with its corresponding rotation matrix.
def rotate_adp2(adp, rotmat, cell): adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])], [float(adp[3]), float(adp[1]), float(adp[5])], [float(adp[4]), float(adp[5]), float(adp[2])]]) rotmat = np.linalg.inv(rotmat) rotmatT = np.transpose(rotmat) Nmat = np.matrix([[1 / cell[0], 0, 0], [0, 1 / cell[1], 0], [0, 0, 1 / cell[2]]]) Nmat = np.linalg.inv(Nmat) NmatT = np.transpose(Nmat) adp = np.dot(rotmat, adp) adp = np.dot(adp, rotmatT) adp = np.dot(Nmat, adp) adp = np.dot(adp, NmatT) adp = np.array(adp).flatten().tolist() return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rotate_adp(adp, rotmat):\n\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n rotmatT = np.transpose(rotmat)\n adp = np.dot(rotmatT, adp)\n adp = np.dot(adp, rotmat)\n # print '=\\n',adp,'\\n-------------------------------------------------\\n\\n\\n\\n\\n\\n'\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]", "def rotate(mat,angle):\n return np.dot(Mueller.rotator(angle), np.dot(mat, Mueller.rotator(-angle)))", "def rotate_adp3(adp, rotmat, cell):\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n rotmati = np.matrix(rotmat)\n rotmatiT = np.transpose(rotmati)\n rotmat = np.linalg.inv(rotmat)\n\n Nmat = np.matrix([[1 / cell[0], 0, 0],\n [0, 1 / cell[1], 0],\n [0, 0, 1 / cell[2]]])\n Nmat = np.linalg.inv(Nmat)\n NmatT = np.transpose(Nmat)\n adp = np.dot(rotmati, adp)\n adp = np.dot(adp, rotmatiT)\n\n adp = np.dot(Nmat, adp)\n adp = np.dot(adp, NmatT)\n\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]", "def rotate_adp_reverse(adp, rotmat):\n\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n rotmatT = np.transpose(rotmat)\n adp = np.dot(rotmat, adp)\n adp = np.dot(adp, rotmatT)\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]", "def rotate(mat,angle):\n return np.dot(Jones.rotator(angle), np.dot(mat, Jones.rotator(-angle)))", "def rotmat(p, q):\n rot = numpy.dot(refmat(q, -p), refmat(p, -p))\n return rot", "def _rotate(self, affine):\n dims = affine.shape[0]\n if not np.isscalar(self.rotation):\n raise Exception('this class requires exactly one entry for rotation!')\n theta = (self.deformrandomstate.rand() - 0.5) * 2 * self.rotation\n if dims == 4:\n\n # sample unit vector:\n u = np.random.random(3)\n u /= np.sqrt(np.sum([uu ** 2 for uu in u]) + 1e-8)\n ct = np.cos(theta)\n st = np.sin(theta)\n rot = np.eye(4)\n rot[:3, :3] = [\n [ct + u[0] ** 2 * (1 - ct), u[0] * u[1] * (1 - ct) - u[2] * st, u[0] * u[2] * (1 - ct) + u[2] * st],\n [u[1] * u[0] * (1 - ct) + u[2] * st, ct + u[1] ** 2 * (1 - ct), u[1] * u[2] * (1 - ct) - u[0] * st],\n [u[2] * u[0] * (1 - ct) - u[1] * st, u[2] * u[1] * (1 - ct) + u[0] * st, ct + u[2] ** 2 * (1 - ct)]]\n\n elif dims == 3:\n rot = np.eye(3)\n rot[:2, :2] = np.asarray([[np.cos(theta), np.sin(theta)], [-np.sin(theta), np.cos(theta)]])\n else:\n raise Exception(\n 'implement this for each dimension, since not yet implemented for dimension {}'.format(dims))\n\n return np.matmul(rot, affine)", "def rotate_z(p, a=0):\n # turn value to radians\n a = math.radians(a)\n translation_mat = np.matrix([\n [math.cos(a),math.sin(a),0,0],\n [-math.sin(a),math.cos(a),0,0],\n [0,0,0,0],\n [0,0,0,1],\n ], dtype=\"float32\")\n\n new_p = p @ translation_mat\n\n return new_p", "def _rotate(self, angle):\n angle *= self._degreesPerAU\n self._orient = self._orient.rotate(angle)", "def rotate(self, a):\n ca = cos(a)\n sa = sin(a)\n rM = Matrix([\n [ca, -sa],\n [sa, ca]\n ])\n p0 = self.p0\n self.c = p0 + rM @ (self.c - p0)\n dp = p0 - self.c\n self.a0 = atan2(dp.y, dp.x)\n return self", "def rotate(self, angle):\n\t\tif not isinstance(angle, Angle):\n\t\t\tangle = Angle(angle)\n\t\treturn angle.matrix() * self", "def rotate(self,X):\n alpha = random.rand() * 2*pi\n beta = self.beta_sample()\n R = Rotator.rotation_matrix(alpha,beta,0.0)\n X = np.dot(R, X)\n if self.random_flip and (random.rand() > 0.5):\n X[2,:] = -X[2,:]\n X[1,:] = -X[1,:]\n return X", "def apply_rotation(self, eta=0.0, phi=0.0, theta=0.0):\n \n new_rotation_matrix = self.rotation_elements( eta, phi, theta )\n \n #self.rotation_matrix_exp = np.dot( self.rotation_matrix_exp , new_rotation_matrix )\n self.rotation_matrix_exp = np.dot( new_rotation_matrix, self.rotation_matrix_exp )", "def rotate(p,q,A,V): \n n = A.shape[0]\n App, Aqq, Apq = A[p,p], A[q,q], A[p,q] #Initial values\n phi = 0.5*math.atan2(2*Apq, Aqq-App) #Find the rotation value\n c, s = math.cos(phi), math.sin(phi) #Calculate sin and cos\n\n #Update the matrix diagonal elements\n A[p,p] = c*c*App + s*s*Aqq - 2*s*c*Apq \n A[q,q] = s*s*App + c*c*Aqq + 2*s*c*Apq\n A[p,q] = 0 #This is zero by construction\n \n \n #Iterate over and update remaining off-diagonal elements\n for i in range(p):\n Aip, Aiq = A[i,p], A[i,q]\n A[i,p] = c*Aip - s*Aiq\n A[i,q] = c*Aiq + s*Aip\n \n for i in range(p+1,q):\n Api, Aiq = A[p,i], A[i,q]\n A[p,i] = c*Api - s*Aiq\n A[i,q] = c*Aiq + s*Api\n \n for i in range(q+1,n):\n Api, Aqi = A[p,i], A[q,i]\n A[p,i] = c*Api - s*Aqi\n A[q,i] = c*Aqi + s*Api\n \n #Update eigenvectors in matrix V\n for i in range(n):\n Vip, Viq = V[i,p], V[i,q]\n V[i,p] = c*Vip - s*Viq\n V[i,q] = s*Vip + c*Viq\n \n return A, V", "def rotate_x(p, a=0):\n # turn value to radians\n a = math.radians(a)\n translation_mat = np.matrix([\n [1,0,0,0],\n [0,math.cos(a),math.sin(a),0],\n [0,-math.sin(a),math.cos(a),0],\n [0,0,0,1],\n ], dtype=\"float32\")\n\n new_p = p @ translation_mat\n\n return new_p", "def rotate(self, angle, reshape=False):\n return IntensityMap.rotate(self, angle, reshape=reshape)", "def rotate(self, angle):\n perp = TwoDV(-self[1], self[0])\n angle = angle * math.pi / 180.0\n c, s = math.cos(angle), math.sin(angle)\n return TwoDV(self[0]*c+perp[0]*s, self[1]*c+perp[1]*s)", "def rotation(self):\n\t\treturn self.piv.a.rotate.v", "def rotate_3D(atom, source_atom):\n from lauescript.cryst.match import get_transform\n\n lst2 = [np.array([0, 0, 0]), source_atom.orientation[0], source_atom.orientation[1]]\n lst1 = [np.array([0, 0, 0]), atom.orientation[0], atom.orientation[1]]\n\n matrix = get_transform(lst1, lst2, matrix=True)\n\n adp = source_atom.adp['cart_int']\n\n atom.adp['cart_int'] = rotate_adp(adp, matrix)", "def _rotate(self, angle):\n if self.undobuffer:\n self.undobuffer.push((\"rot\", angle, self._degreesPerAU))\n angle *= self._degreesPerAU\n neworient = self._orient.rotate(angle)\n tracing = self.screen._tracing\n if tracing == 1 and self._speed > 0:\n anglevel = 3.0 * self._speed\n steps = 1 + int(abs(angle)/anglevel)\n delta = 1.0*angle/steps\n for _ in range(steps):\n self._orient = self._orient.rotate(delta)\n self._update()\n self._orient = neworient\n self._update()", "def rotate_ADP_about_axis(ADP, angle, axisDirection):\n adp = get_adp_as_matrix(ADP)\n u, v = np.linalg.eig(adp)\n startPoints = [v[:, i].flatten().tolist()[0] for i in xrange(3)]\n endPoints = [rotate_point_about_axis(point, angle, axisDirection, (0, 0, 0)) for point in startPoints]\n rotMat = get_transform(startPoints, endPoints, matrix=True).transpose()\n newadp = np.dot(rotMat.transpose(), np.dot(adp, rotMat))\n return newadp[0, 0], newadp[1, 1], newadp[2, 2], newadp[0, 1], newadp[0, 2], newadp[1, 2]", "def rotation(self, p1, p2, p3):\n return (p2[0] - p1[0]) * (p3[1] - p1[1]) - (p2[1] - p1[1]) * (p3[0] - p1[0])", "def _rotate(self, angles, dj_matrix=None):\n if dj_matrix is None:\n dj_matrix = djpi2(self.lmax + 1)\n self.coeffs = SHRotateRealCoef(self.coeffs, angles, dj_matrix)", "def _rotate(self, angles, dj_matrix=None):\n if dj_matrix is None:\n dj_matrix = djpi2(self.lmax + 1)\n self.coeffs = SHRotateRealCoef(self.coeffs, angles, dj_matrix)", "def rotate(self):\n pass", "def _rotate(self, arr, theta):\n # Rotation Matrix R\n R = [[np.cos(theta), -np.sin(theta)], \n [np.sin(theta), np.cos(theta)]]\n\n return np.matmul(R, arr.T).T", "def rotate(self, a):\n ca = cos(a)\n sa = sin(a)\n self.v = Matrix([\n [ca, -sa],\n [sa, ca]\n ]) @ self.v\n return self", "def rotate(self,r):\n return r.hprod( self.hprod( r.inv() ) )", "def rotateAlignXform(self):\n r = np.concatenate(([self.x], [self.y], [self.z]), 0)\n r = np.concatenate((r, np.array([[0,0,0]])), 0)\n r = np.concatenate((r, np.array([0,0,0,1]).reshape(-1,1)), 1)\n return r", "def rotate(self,X):\n alpha = random.rand() * 2*pi\n R = Rotator.rotation_matrix(alpha,0.0,0.0)\n return np.dot(R,X)", "def _rot(axis, angle):\n if axis == 1:\n return Matrix([[1, 0, 0],\n [0, cos(angle), -sin(angle)],\n [0, sin(angle), cos(angle)]])\n elif axis == 2:\n return Matrix([[cos(angle), 0, sin(angle)],\n [0, 1, 0],\n [-sin(angle), 0, cos(angle)]])\n elif axis == 3:\n return Matrix([[cos(angle), -sin(angle), 0],\n [sin(angle), cos(angle), 0],\n [0, 0, 1]])", "def rotate_y(p, a=0):\n # turn value to radians\n a = math.radians(a)\n translation_mat = np.matrix([\n [math.cos(a),0,-math.sin(a),0],\n [0,1,0,0],\n [math.sin(a),0,math.cos(a),0],\n [0,0,0,1],\n ], dtype=\"float32\")\n\n new_p = p @ translation_mat\n\n return new_p", "def rotate(self, angle=0.0):\n # TODO: Implement the rotate function. Remember to record the value of\n # rotation degree.\n self.rotDegree = angle\n self.x = rotate(self.x, angle = angle, axes=(0, 1), reshape=False, \n output=None, order=3, mode='constant', cval=0.0, prefilter=True)\n # This rotation isn't working correctly. Get shit for non right anlge rotatations\n # raise NotImplementedError\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################", "def rotate(self,X):\n alpha = random.rand() * 2*pi\n\n beta = np.arccos(1.0-2*random.rand())\n psi = random.rand() * 2*pi\n\n R = Rotator.rotation_matrix(alpha,beta,psi)\n return np.dot(R,X)", "def Rotate(*args, **kwargs):\n return _gdi_.GraphicsMatrix_Rotate(*args, **kwargs)", "def mrotate(self):\n result_matrix = [[0 for col in range(len(self.matrix[0]))] for row in range(len(self.matrix))]\n for i in range(len(self.matrix)):\n for j in range(len(self.matrix[0])):\n result_matrix[i][j] = self.matrix[i][len(self.matrix[0]) - 1 - j]\n # left turn -> result_matrix[i][j] = self.matrix[len(self.matrix) - 1 - i][j]\n self.matrix = result_matrix\n pass", "def rotate(self, angle):\n perp = Vec2D(-self[1], self[0])\n angle = angle * math.pi / 180.0\n c, s = math.cos(angle), math.sin(angle)\n return Vec2D(self[0] * c + perp[0] * s, self[1] * c + perp[1] * s)", "def rotate(self, angle):\n rotmat = rotation_matrix_2d(angle)\n rotated = np.dot(rotmat.T, [self.pix_x.value, self.pix_y.value])\n self.pix_x = rotated[0] * self.pix_x.unit\n self.pix_y = rotated[1] * self.pix_x.unit\n self.pix_rotation -= angle", "def rot(self, t=0., transposed=False):\n rotmat = np.array(\n [[np.cos(self._pa+self._omegab*t),np.sin(self._pa+self._omegab*t)],\n [-np.sin(self._pa+self._omegab*t),np.cos(self._pa+self._omegab*t)]])\n if transposed:\n return rotmat.T\n else:\n return rotmat", "def rotate(self, matrix: list[list[int]]) -> None:", "def rotation_matrix(self):\n return self.affine_matrix[0:3][:, 0:3]", "def angle_to_rotation_matrix(angle: torch.Tensor) -> torch.Tensor:\n ang_rad = deg2rad(angle)\n cos_a: torch.Tensor = torch.cos(ang_rad)\n sin_a: torch.Tensor = torch.sin(ang_rad)\n return torch.stack([cos_a, sin_a, -sin_a, cos_a], dim=-1).view(*angle.shape, 2, 2)", "def rotation(self, *args, **kwargs) -> Any:\n pass", "def _rotate_(self, x: np.array, m: np.array) -> (np.array, np.array):\n # get a random angle\n angle = np.random.randint(0, self.rotate)\n # get a random sign for the angle\n sign = np.random.randint(0, 2)\n x = rotate(x, -sign * angle, reshape=False)\n m = rotate(m, -sign * angle, axes=(0, 1),\n mode='nearest',\n reshape=False)\n return x, m", "def test_rotation(self):\n quat_rotated = rowan.rotate(input1, vector_inputs)\n\n matrices = rowan.to_matrix(input1)\n matrix_rotated = np.einsum(\"ijk,ki->ij\", matrices, vector_inputs.T)\n self.assertTrue(np.allclose(matrix_rotated, quat_rotated))", "def RotationMatrix(theta, x, y, z, point=None):\n\treturn mach.rotation_matrix(theta, [x, y, z])", "def getEllipsYZRotMatrix(a1, a2):\n adir = a2 - a1\n amid = a1 + 0.5 * adir\n kath = np.sqrt((adir[0] * adir[0] + adir[1] * adir[1]) / 4.0)\n octantA2 = octant(a2)\n theta = np.arctan( abs( (adir[2]/2) / kath) )\n #[1, 4, 6, 7 ] => left rotation\n #[2, 3, 5, 8 ] => right rotation\n if octantA2 in [2, 3, 5, 8]: \n theta = -theta \n print \"theta =\" , np.rad2deg(theta)\n RotY = np.matrix( [ [ np.cos(theta), 0.0, np.sin(theta) ],\n [ 0.0 , 1.0, 0.0 ],\n [ -np.sin(theta), 0.0, np.cos(theta) ]\n ]) \n \n psi = np.arctan( abs( adir[1] / adir[0] ) )\n #[2, 4, 6, 8 ] => left rotation\n #[1, 3, 5, 7 ] => right rotation\n if octantA2 in [1, 3, 5, 7]:\n psi = -psi\n print \"psi =\" , np.rad2deg(psi)\n RotZ = np.matrix( [ [ np.cos(psi), -np.sin(psi), 0.0 ],\n [ np.sin(psi), np.cos(psi), 0.0 ],\n [ 0.0 , 0.0 , 1.0 ]\n ])\n return np.asarray( RotY * RotZ )", "def rotate(self, vect, angle):\n self.pl.Rotation = Rotation(vect, angle)\n\n self.comp.Placement = self.pl\n self.box.Placement = self.pl", "def rotate(self, angle):\n old_angle, tilt = self.rotation\n new_angle = old_angle + angle\n while new_angle > 90:\n new_angle = new_angle - 90\n while angle < -90:\n new_angle = new_angle + 90\n self.rotation = (new_angle, tilt)", "def rotate_matrix(angle):\n c = np.cos(angle)\n s = np.sin(angle)\n return np.array([[c, s],\n [-s, c]])", "def rotate(img, angle):\n if not _is_numpy(img):\n raise TypeError('img should be Numpy Image. Got {}'.format(type(img)))\n\n aug = iaa.Affine(rotate=angle)\n return aug.augment_image(img)", "def rotMatrix( source = None ):\n if source is None:\n return None,None\n else:\n (x,y,z, a) = source\n if a % TWOPI:\n return tmatrixaccel.rotMatrix( x,y,z,a ),tmatrixaccel.rotMatrix( x,y,z,-a )\n return None,None", "def _altaz_rotation(self, jd):\n R_lon = rot_z(- self.longitude.radians - jd.gast * TAU / 24.0)\n return einsum('ij...,jk...,kl...->il...', self.R_lat, R_lon, jd.M)", "def rotate(self, theta, legs):\n U, onew = rotationTensor(theta, self.symmetries, legs)\n B = U @ self\n new = list(onew)\n old = list(legs)\n if B.internallegs != self.internallegs:\n old.append(self.internallegs[0])\n new.append(B.internallegs[0])\n B.swaplegs({n: o for o, n in zip(old, new)})\n return B.couplingAddapt(self.coupling)", "def rotate(self, angle, point=None):\n if not point:\n point = self.middle\n self.p1.rotate(angle, point)\n self.p2.rotate(angle, point)", "def rotate_z(self, angle):\n angle *= np.pi / 180\n return self.transform(np.matrix([[np.cos(angle), -np.sin(angle), 0],\n [np.sin(angle), np.cos(angle), 0],\n [0, 0, 1]]))", "def rotate(self, angle, axis):\r\n R=self.rotation(angle, axis)\r\n self.mlist = (self*R).mlist\r\n return self", "def rotate(X):\n return X", "def rotate_about(self, p, theta):\n result = self.clone()\n result.slide(-p.x, -p.y)\n result.rotate(theta)\n result.slide(p.x, p.y)\n return result", "def rotate(a, ps, axe=0):\r\n\r\n sin = np.sin(a)\r\n cos = np.cos(a)\r\n rm = np.array([[[1, 0, 0], [0, cos, -sin], [0, sin, cos]],\r\n [[cos, 0, sin], [0, 1, 0], [-sin, 0, cos]],\r\n [[cos, -sin, 0], [sin, cos, 0], [0, 0, 1]]])\r\n m = np.full((len(ps), 3, 3), rm[axe])\r\n ps = map(lambda x, y: np.dot(x, y), m, ps)\r\n return ps", "def rotate(self, rotation):\n\t\tif not isinstance(rotation,Rotation):\n\t\t\trotation = Rotation(*rotation)\n\t\treturn rotation.matrix() * self", "def rotate(self, *args, **kwargs): # real signature unknown\n pass", "def rotate(self):\n\n last_center = self.rect.center\n self.image = pg.transform.rotate(self.image_copy,self.angle)\n self.rect = self.image.get_rect()\n self.rect.center = last_center\n self.angle +=self.rotate_by", "def rotation(self, angle, axis):\r\n\r\n sqr_a = axis.x*axis.x\r\n sqr_b = axis.y*axis.y\r\n sqr_c = axis.z*axis.z\r\n len2 = sqr_a+sqr_b+sqr_c\r\n\r\n k2 = math.cos(angle)\r\n k1 = (1.0-k2)/len2\r\n k3 = math.sin(angle)/math.sqrt(len2)\r\n k1ab = k1*axis.x*axis.y\r\n k1ac = k1*axis.x*axis.z\r\n k1bc = k1*axis.y*axis.z\r\n k3a = k3*axis.x\r\n k3b = k3*axis.y\r\n k3c = k3*axis.z\r\n\r\n return mat4( k1*sqr_a+k2, k1ab-k3c, k1ac+k3b, 0.0,\r\n k1ab+k3c, k1*sqr_b+k2, k1bc-k3a, 0.0,\r\n k1ac-k3b, k1bc+k3a, k1*sqr_c+k2, 0.0,\r\n 0.0, 0.0, 0.0, 1.0)", "def intermediateJacPol2Rot(self,x):\n allS = np.sin(x[0,:])\n allC = np.cos(x[0,:])\n allR = x[1,:]\n \n Jac = Idn(x.shape[1],self._dim)\n Jac[:,0,0] = -allS*allR\n Jac[:,0,1] = allC\n Jac[:,1,0] = allC*allR\n Jac[:,1,1] = allS\n return Jac", "def rotate(self, angle=pi, point=None):\n if not point: point = Point.origin(d=self.dimension)\n v = Vector.createFromTwoPoints(point, self)\n v.rotate(angle)\n self.components = v(point).components", "def rotate(self, angle):\n self.call('rotate', angle)", "def reflect_adp(adp, planev):\n M = np.identity(4)\n M[:3, :3] -= 2.0 * np.outer(planev, planev)\n M[:3, 3] = (2.0 * np.dot(np.array([0, 0, 0]), planev)) * planev\n\n return rotate_adp(adp, M[:3, :3])", "def test_to_rotation(self):\r\n q = np.array([-1, 1, 3, 2])\r\n q = q / np.linalg.norm(q)\r\n R_gt = np.array([\r\n [-1/3., -14/15., -2/15.],\r\n [2/3., -1/3., 2/3.],\r\n [-2/3., 2/15., 11/15.]]).T\r\n R = to_rotation(q)\r\n\r\n zero_matrix = R - R_gt\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)\r\n\r\n for _ in range(20):\r\n q = np.random.randn(4)\r\n q /= np.linalg.norm(q)\r\n q_inv = quaternion_conjugate(q)\r\n\r\n R = to_rotation(q)\r\n R_inv = to_rotation(q_inv)\r\n\r\n zero_matrix = R @ R_inv - np.identity(3)\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)\r\n\r\n # orthogonal matrix\r\n zero_matrix = R @ R.T - np.identity(3)\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)", "def rotate_about(self, p, theta):\n result = self.clone()\n result.translate(-p.x, -p.y)\n result.rotate(theta)\n result.translate(p.x, p.y)\n return result", "def rotateZ(self, angle):\r\n if angle:\r\n c = cos(radians(angle))\r\n s = sin(radians(angle))\r\n self.mtrx = dot([[c, s, 0, 0],\r\n [-s, c, 0, 0],\r\n [0, 0, 1, 0],\r\n [0, 0, 0, 1]],\r\n self.mtrx)\r\n self.rtn[2] = angle\r\n self.was_moved = True", "def orientation_ras_lps(affine: NdarrayTensor) -> NdarrayTensor:\n sr = max(affine.shape[0] - 1, 1) # spatial rank is at least 1\n flip_d = [[-1, 1], [-1, -1, 1], [-1, -1, 1, 1]]\n flip_diag = flip_d[min(sr - 1, 2)] + [1] * (sr - 3)\n if isinstance(affine, torch.Tensor):\n return torch.diag(torch.as_tensor(flip_diag).to(affine)) @ affine # type: ignore\n return np.diag(flip_diag).astype(affine.dtype) @ affine # type: ignore", "def rotate(self, rotation):\n self.coords = dot(rotation, self.coords)\n return self", "def rotate(matrix: List[List[int]]) -> None:\n if matrix is None:\n return\n\n # transpose\n for i in range(0, len(matrix)):\n for j in range(i, len(matrix[0])):\n temp = matrix[i][j]\n matrix[i][j] = matrix[j][i]\n matrix[j][i] = temp\n # reflect\n for i in range(0, len(matrix)):\n for j in range(0, len(matrix[0]) // 2):\n reflection = len(matrix[0]) - j - 1\n temp = matrix[i][j]\n matrix[i][j] = matrix[i][reflection]\n matrix[i][reflection] = temp", "def rotate(x: torch.Tensor, angle: int) -> torch.Tensor:\n # B C H W\n h_dim = 2\n w_dim = 3\n\n if angle == 0:\n return x\n elif angle == 90:\n return x.flip(w_dim).transpose(h_dim, w_dim)\n elif angle == 180:\n return x.flip(w_dim).flip(h_dim)\n elif angle == 270:\n return x.flip(h_dim).transpose(h_dim, w_dim)\n else:\n raise NotImplementedError(\"Must be rotation divisible by 90 degrees\")", "def rotation_matrix(self):\n self._normalise()\n product_matrix = np.dot(self._q_matrix(), self._q_bar_matrix().conj().transpose())\n return product_matrix[1:][:,1:]", "def rotation_matrix(angle, axis):\n about_z = rotation_about_z(angle)\n z_to_axis = z_to_vector(axis)\n axis_to_z = np.linalg.inv(z_to_axis)\n return reduce(np.dot, [z_to_axis, about_z, axis_to_z])", "def apply_rotation_z(self, theta=0.0 ):\n \n theta = radians(theta)\n new_rotation_matrix = [[ +cos(theta) , -sin(theta) , 0 ],\n [ +sin(theta) , +cos(theta) , 0 ],\n [ 0 , 0 , 1 ]] \n \n self.rotation_matrix_exp = np.dot( new_rotation_matrix, self.rotation_matrix_exp )", "def qrotate(points, axis, theta):\n q = Quaternion.rotator(axis, theta)\n return q.rotate(points)", "def rotatePivot(rotation):\r\n # Rotate in object mode X\r\n bpy.ops.object.mode_set(mode='OBJECT')\r\n bpy.ops.transform.rotate(value=rotation.x, axis=(1,0,0), constraint_orientation='GLOBAL')\r\n # rotate in edit mode X\r\n bpy.ops.object.mode_set(mode='EDIT')\r\n bpy.ops.mesh.select_all(action='SELECT')\r\n bpy.ops.transform.rotate(value=-rotation.x, axis=(1,0,0), constraint_orientation='GLOBAL')\r\n # Rotate in object mode Y\r\n bpy.ops.object.mode_set(mode='OBJECT')\r\n bpy.ops.transform.rotate(value=rotation.y, axis=(0,1,0), constraint_orientation='GLOBAL')\r\n # rotate in edit mode Y\r\n bpy.ops.object.mode_set(mode='EDIT')\r\n bpy.ops.transform.rotate(value=-rotation.y, axis=(0,1,0), constraint_orientation='GLOBAL')\r\n # Rotate in object mode Z\r\n bpy.ops.object.mode_set(mode='OBJECT')\r\n bpy.ops.transform.rotate(value=rotation.z, axis=(0,0,1), constraint_orientation='GLOBAL')\r\n # rotate in edit mode Z\r\n bpy.ops.object.mode_set(mode='EDIT')\r\n bpy.ops.transform.rotate(value=-rotation.z, axis=(0,0,1), constraint_orientation='GLOBAL')\r\n # return to object mode\r\n bpy.ops.object.mode_set(mode='OBJECT')", "def srotate(self, angle):\n\n self.angle = self.angle + angle", "def _rotation_from_gradient(self,m):\n\t\ttheta = -np.arctan(m)\n\t\tself.current_theta = theta\n\t\treturn self._rotation_from_angle(theta)", "def rotate(self, matrix):\n n = len(matrix)\n \n for circle in range(n//2):\n r_circle = n - circle - 1\n for i in range(circle, n - circle - 1):\n a = matrix[circle][i]\n b, matrix[i][r_circle] = matrix[i][r_circle], a\n c, matrix[r_circle][n - i - 1] = matrix[r_circle][n - i - 1], b\n d, matrix[n - i - 1][circle] = matrix[n - i - 1][circle], c\n matrix[circle][i] = d", "def align(image):\n angle = find_angle(image)\n image = rotate(image, angle)\n return image", "def rotate(self, *args, **kwargs):\n return _image.image_rotate(self, *args, **kwargs)", "def _rotationMatrix(self, n_dim, theta):\n i = np.identity(n_dim)\n c, s = np.cos(theta)*i, np.sin(theta)*i\n rotation = np.bmat([[c, s], [-s, c]])\n return rotation", "def rotate(self, angle):\n image_center = np.array(self.img.shape[1::-1]) / 2\n rot_mat = cv2.getRotationMatrix2D(tuple(image_center), angle, 1.0)\n\n self.img = cv2.warpAffine(\n self.img, rot_mat, self.img.shape[1::-1], flags=cv2.INTER_LINEAR\n )\n\n self.edits.append(f\"rotate:{angle}\")\n return self", "def rotate(self, yaw):\n rotation_matrix = tfs.rotation_matrix(yaw, (0, 0, 1))[:2, :2]\n return np.matmul(rotation_matrix, self).view(Vector)", "def rotation(self, phi, mode):\n self.circuit.phase_shift(phi, self._remap_modes(mode))", "def rotation_matrix(rotate):\n tx, ty, tz = rotate\n Rx = np.array([[1, 0, 0], [0, np.cos(tx), -np.sin(tx)], [0, np.sin(tx), np.cos(tx)]])\n Ry = np.array([[np.cos(ty), 0, -np.sin(ty)], [0, 1, 0], [np.sin(ty), 0, np.cos(ty)]])\n Rz = np.array([[np.cos(tz), -np.sin(tz), 0], [np.sin(tz), np.cos(tz), 0], [0, 0, 1]])\n return np.dot(Rx, np.dot(Ry, Rz))", "def rotation_mat(self) -> np.ndarray:\n rot = np.zeros((3, 3))\n\n txx = 2 * self.x * self.x\n tyy = 2 * self.y * self.y\n tzz = 2 * self.z * self.z\n twx = 2 * self.w * self.x\n twy = 2 * self.w * self.y\n twz = 2 * self.w * self.z\n txy = 2 * self.x * self.y\n txz = 2 * self.x * self.z\n tyz = 2 * self.y * self.z\n\n rot[0, 0] = 1. - tyy - tzz\n rot[0, 1] = txy - twz\n rot[0, 2] = txz + twy\n rot[1, 0] = txy + twz\n rot[1, 1] = 1. - txx - tzz\n rot[1, 2] = tyz - twx\n rot[2, 0] = txz - twy\n rot[2, 1] = tyz + twx\n rot[2, 2] = 1. - txx - tyy\n\n return rot", "def keypoint_rotate(keypoint, angle, rows, cols, **params):\n center = (cols - 1) * 0.5, (rows - 1) * 0.5\n matrix = cv2.getRotationMatrix2D(center, angle, 1.0)\n x, y, a, s = keypoint[:4]\n x, y = cv2.transform(np.array([[[x, y]]]), matrix).squeeze()\n return x, y, a + math.radians(angle), s", "def rotate(img, angle, resample=False, expand=False, center=None):\r\n \r\n return img.rotate(angle, resample, expand, center)", "def _rotate(polyreg, i=None, j=None, u=None, v=None, theta=None, R=None):\n # determine the rotation matrix based on inputs\n if R is not None:\n logger.debug(\"rotate: R=\\n{}\".format(R))\n if i is not None:\n raise ValueError(i)\n if j is not None:\n raise ValueError(j)\n if theta is not None:\n raise ValueError(theta)\n if u is not None:\n raise ValueError(u)\n if v is not None:\n raise ValueError(v)\n elif i is not None and j is not None and theta is not None:\n logger.info(\"rotate via indices and angle.\")\n if R is not None:\n raise ValueError(R)\n if u is not None:\n raise ValueError(u)\n if v is not None:\n raise ValueError(v)\n if i == j:\n raise ValueError(\"Must provide two unique basis vectors.\")\n R = givens_rotation_matrix(i, j, theta, polyreg.dim)\n elif u is not None and v is not None:\n logger.info(\"rotate via 2 vectors.\")\n if R is not None:\n raise ValueError(R)\n if i is not None:\n raise ValueError(i)\n if j is not None:\n raise ValueError(j)\n if theta is not None:\n raise ValueError(theta)\n R = solve_rotation_ap(u, v)\n else:\n raise ValueError(\"R or (i and j and theta) or (u and v) \"\n \"must be defined.\")\n if isinstance(polyreg, Polytope):\n # Ensure that half space is normalized before rotation\n n, p = _hessian_normal(polyreg.A, polyreg.b)\n # Rotate the hyperplane normals\n polyreg.A = np.inner(n, R)\n polyreg.b = p\n else:\n # Rotate subregions\n for poly in polyreg.list_poly:\n _rotate(poly, None, None, R=R)\n # transform bbox and cheby\n if polyreg.bbox is not None:\n polyreg.bbox = (np.inner(polyreg.bbox[0].T, R).T,\n np.inner(polyreg.bbox[1].T, R).T)\n if polyreg._chebXc is not None:\n polyreg._chebXc = np.inner(polyreg._chebXc, R)\n return R", "def rotator(angle):\n c = np.cos(angle)\n s = np.sin(angle)\n return np.array([[c,-s],[s,c]])", "def fix_rotation(self):\n self.rotate(self.rotation)\n self.annotations.rotate(self.rotation)\n self.rotation = 0", "def rotate(self,center, angle):\n \n self.coord = [x-np.repeat([[center[0],center[1]]],[x.shape[0]],axis = 0) for x in self.coord]\n\n alpha = angle\n R = np.array([[np.cos(alpha),-np.sin(alpha)],[np.sin(alpha),np.cos(alpha)]])\n \n for i in range(len(self.coord)):\n self.coord[i] = np.squeeze([np.dot([x],R) for x in self.coord[i]])\n\n self.coord = [x+np.repeat([[center[0],center[1]]],[x.shape[0]],axis = 0) for x in self.coord]\n\n return self", "def rotation(self, phi, mode):\n self.circuit.phase_shift(phi, mode)", "def __rotate_model(self):\n self.__model_matrix = self.__get_rotation_matrix(\n self.__face.position_cartesian,\n (1 + self.__face.position[2]) * 0.5)", "def apply_rotation_x(self, eta=0.0 ):\n \n eta = radians(eta)\n new_rotation_matrix = [[ 1 , 0 , 0 ],\n [ 0 , +cos(eta) , -sin(eta) ],\n [ 0 , +sin(eta) , +cos(eta) ]] \n \n self.rotation_matrix_exp = np.dot( new_rotation_matrix, self.rotation_matrix_exp )" ]
[ "0.79376036", "0.73167443", "0.7229753", "0.7127177", "0.7113275", "0.7051355", "0.6992302", "0.6686556", "0.66458803", "0.66007537", "0.657009", "0.6538239", "0.6492567", "0.64686793", "0.6467943", "0.6442462", "0.64174455", "0.63961196", "0.6388264", "0.63866466", "0.63760054", "0.6368504", "0.63672537", "0.63672537", "0.6365199", "0.63406956", "0.63355124", "0.6325487", "0.63123417", "0.6312132", "0.6274328", "0.62583435", "0.62266237", "0.62117803", "0.6209411", "0.61521596", "0.6146725", "0.61100966", "0.61060613", "0.61038494", "0.6097206", "0.60961854", "0.6092604", "0.6088147", "0.60809153", "0.6078204", "0.6069734", "0.6041528", "0.6030295", "0.60239816", "0.601961", "0.60133547", "0.5996416", "0.5995912", "0.5989014", "0.59862995", "0.5986245", "0.59836257", "0.59782624", "0.597726", "0.59758997", "0.59740967", "0.5970379", "0.59592766", "0.5957963", "0.5955848", "0.59473217", "0.5944798", "0.5944029", "0.59329593", "0.5908772", "0.5894153", "0.5889308", "0.5888876", "0.58877623", "0.5887166", "0.5879811", "0.58780724", "0.5876205", "0.58728635", "0.5872826", "0.5852149", "0.58506614", "0.58456385", "0.5845091", "0.58430165", "0.5828852", "0.5827741", "0.5826474", "0.5825847", "0.5824189", "0.5821436", "0.5821264", "0.5806832", "0.58050036", "0.58040315", "0.58011824", "0.57980055", "0.57854044", "0.57780397" ]
0.73074406
2
Rotates the adp with its corresponding rotation matrix.
def rotate_adp3(adp, rotmat, cell): adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])], [float(adp[3]), float(adp[1]), float(adp[5])], [float(adp[4]), float(adp[5]), float(adp[2])]]) rotmati = np.matrix(rotmat) rotmatiT = np.transpose(rotmati) rotmat = np.linalg.inv(rotmat) Nmat = np.matrix([[1 / cell[0], 0, 0], [0, 1 / cell[1], 0], [0, 0, 1 / cell[2]]]) Nmat = np.linalg.inv(Nmat) NmatT = np.transpose(Nmat) adp = np.dot(rotmati, adp) adp = np.dot(adp, rotmatiT) adp = np.dot(Nmat, adp) adp = np.dot(adp, NmatT) adp = np.array(adp).flatten().tolist() return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rotate_adp(adp, rotmat):\n\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n rotmatT = np.transpose(rotmat)\n adp = np.dot(rotmatT, adp)\n adp = np.dot(adp, rotmat)\n # print '=\\n',adp,'\\n-------------------------------------------------\\n\\n\\n\\n\\n\\n'\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]", "def rotate(mat,angle):\n return np.dot(Mueller.rotator(angle), np.dot(mat, Mueller.rotator(-angle)))", "def rotate_adp2(adp, rotmat, cell):\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n rotmat = np.linalg.inv(rotmat)\n rotmatT = np.transpose(rotmat)\n Nmat = np.matrix([[1 / cell[0], 0, 0],\n [0, 1 / cell[1], 0],\n [0, 0, 1 / cell[2]]])\n Nmat = np.linalg.inv(Nmat)\n NmatT = np.transpose(Nmat)\n\n adp = np.dot(rotmat, adp)\n adp = np.dot(adp, rotmatT)\n\n adp = np.dot(Nmat, adp)\n adp = np.dot(adp, NmatT)\n\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]", "def rotate_adp_reverse(adp, rotmat):\n\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n rotmatT = np.transpose(rotmat)\n adp = np.dot(rotmat, adp)\n adp = np.dot(adp, rotmatT)\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]", "def rotate(mat,angle):\n return np.dot(Jones.rotator(angle), np.dot(mat, Jones.rotator(-angle)))", "def rotmat(p, q):\n rot = numpy.dot(refmat(q, -p), refmat(p, -p))\n return rot", "def _rotate(self, affine):\n dims = affine.shape[0]\n if not np.isscalar(self.rotation):\n raise Exception('this class requires exactly one entry for rotation!')\n theta = (self.deformrandomstate.rand() - 0.5) * 2 * self.rotation\n if dims == 4:\n\n # sample unit vector:\n u = np.random.random(3)\n u /= np.sqrt(np.sum([uu ** 2 for uu in u]) + 1e-8)\n ct = np.cos(theta)\n st = np.sin(theta)\n rot = np.eye(4)\n rot[:3, :3] = [\n [ct + u[0] ** 2 * (1 - ct), u[0] * u[1] * (1 - ct) - u[2] * st, u[0] * u[2] * (1 - ct) + u[2] * st],\n [u[1] * u[0] * (1 - ct) + u[2] * st, ct + u[1] ** 2 * (1 - ct), u[1] * u[2] * (1 - ct) - u[0] * st],\n [u[2] * u[0] * (1 - ct) - u[1] * st, u[2] * u[1] * (1 - ct) + u[0] * st, ct + u[2] ** 2 * (1 - ct)]]\n\n elif dims == 3:\n rot = np.eye(3)\n rot[:2, :2] = np.asarray([[np.cos(theta), np.sin(theta)], [-np.sin(theta), np.cos(theta)]])\n else:\n raise Exception(\n 'implement this for each dimension, since not yet implemented for dimension {}'.format(dims))\n\n return np.matmul(rot, affine)", "def rotate_z(p, a=0):\n # turn value to radians\n a = math.radians(a)\n translation_mat = np.matrix([\n [math.cos(a),math.sin(a),0,0],\n [-math.sin(a),math.cos(a),0,0],\n [0,0,0,0],\n [0,0,0,1],\n ], dtype=\"float32\")\n\n new_p = p @ translation_mat\n\n return new_p", "def _rotate(self, angle):\n angle *= self._degreesPerAU\n self._orient = self._orient.rotate(angle)", "def rotate(self, a):\n ca = cos(a)\n sa = sin(a)\n rM = Matrix([\n [ca, -sa],\n [sa, ca]\n ])\n p0 = self.p0\n self.c = p0 + rM @ (self.c - p0)\n dp = p0 - self.c\n self.a0 = atan2(dp.y, dp.x)\n return self", "def rotate(self, angle):\n\t\tif not isinstance(angle, Angle):\n\t\t\tangle = Angle(angle)\n\t\treturn angle.matrix() * self", "def rotate(self,X):\n alpha = random.rand() * 2*pi\n beta = self.beta_sample()\n R = Rotator.rotation_matrix(alpha,beta,0.0)\n X = np.dot(R, X)\n if self.random_flip and (random.rand() > 0.5):\n X[2,:] = -X[2,:]\n X[1,:] = -X[1,:]\n return X", "def apply_rotation(self, eta=0.0, phi=0.0, theta=0.0):\n \n new_rotation_matrix = self.rotation_elements( eta, phi, theta )\n \n #self.rotation_matrix_exp = np.dot( self.rotation_matrix_exp , new_rotation_matrix )\n self.rotation_matrix_exp = np.dot( new_rotation_matrix, self.rotation_matrix_exp )", "def rotate(p,q,A,V): \n n = A.shape[0]\n App, Aqq, Apq = A[p,p], A[q,q], A[p,q] #Initial values\n phi = 0.5*math.atan2(2*Apq, Aqq-App) #Find the rotation value\n c, s = math.cos(phi), math.sin(phi) #Calculate sin and cos\n\n #Update the matrix diagonal elements\n A[p,p] = c*c*App + s*s*Aqq - 2*s*c*Apq \n A[q,q] = s*s*App + c*c*Aqq + 2*s*c*Apq\n A[p,q] = 0 #This is zero by construction\n \n \n #Iterate over and update remaining off-diagonal elements\n for i in range(p):\n Aip, Aiq = A[i,p], A[i,q]\n A[i,p] = c*Aip - s*Aiq\n A[i,q] = c*Aiq + s*Aip\n \n for i in range(p+1,q):\n Api, Aiq = A[p,i], A[i,q]\n A[p,i] = c*Api - s*Aiq\n A[i,q] = c*Aiq + s*Api\n \n for i in range(q+1,n):\n Api, Aqi = A[p,i], A[q,i]\n A[p,i] = c*Api - s*Aqi\n A[q,i] = c*Aqi + s*Api\n \n #Update eigenvectors in matrix V\n for i in range(n):\n Vip, Viq = V[i,p], V[i,q]\n V[i,p] = c*Vip - s*Viq\n V[i,q] = s*Vip + c*Viq\n \n return A, V", "def rotate_x(p, a=0):\n # turn value to radians\n a = math.radians(a)\n translation_mat = np.matrix([\n [1,0,0,0],\n [0,math.cos(a),math.sin(a),0],\n [0,-math.sin(a),math.cos(a),0],\n [0,0,0,1],\n ], dtype=\"float32\")\n\n new_p = p @ translation_mat\n\n return new_p", "def rotate(self, angle, reshape=False):\n return IntensityMap.rotate(self, angle, reshape=reshape)", "def rotate(self, angle):\n perp = TwoDV(-self[1], self[0])\n angle = angle * math.pi / 180.0\n c, s = math.cos(angle), math.sin(angle)\n return TwoDV(self[0]*c+perp[0]*s, self[1]*c+perp[1]*s)", "def rotation(self):\n\t\treturn self.piv.a.rotate.v", "def rotate_3D(atom, source_atom):\n from lauescript.cryst.match import get_transform\n\n lst2 = [np.array([0, 0, 0]), source_atom.orientation[0], source_atom.orientation[1]]\n lst1 = [np.array([0, 0, 0]), atom.orientation[0], atom.orientation[1]]\n\n matrix = get_transform(lst1, lst2, matrix=True)\n\n adp = source_atom.adp['cart_int']\n\n atom.adp['cart_int'] = rotate_adp(adp, matrix)", "def _rotate(self, angle):\n if self.undobuffer:\n self.undobuffer.push((\"rot\", angle, self._degreesPerAU))\n angle *= self._degreesPerAU\n neworient = self._orient.rotate(angle)\n tracing = self.screen._tracing\n if tracing == 1 and self._speed > 0:\n anglevel = 3.0 * self._speed\n steps = 1 + int(abs(angle)/anglevel)\n delta = 1.0*angle/steps\n for _ in range(steps):\n self._orient = self._orient.rotate(delta)\n self._update()\n self._orient = neworient\n self._update()", "def rotate_ADP_about_axis(ADP, angle, axisDirection):\n adp = get_adp_as_matrix(ADP)\n u, v = np.linalg.eig(adp)\n startPoints = [v[:, i].flatten().tolist()[0] for i in xrange(3)]\n endPoints = [rotate_point_about_axis(point, angle, axisDirection, (0, 0, 0)) for point in startPoints]\n rotMat = get_transform(startPoints, endPoints, matrix=True).transpose()\n newadp = np.dot(rotMat.transpose(), np.dot(adp, rotMat))\n return newadp[0, 0], newadp[1, 1], newadp[2, 2], newadp[0, 1], newadp[0, 2], newadp[1, 2]", "def rotation(self, p1, p2, p3):\n return (p2[0] - p1[0]) * (p3[1] - p1[1]) - (p2[1] - p1[1]) * (p3[0] - p1[0])", "def _rotate(self, angles, dj_matrix=None):\n if dj_matrix is None:\n dj_matrix = djpi2(self.lmax + 1)\n self.coeffs = SHRotateRealCoef(self.coeffs, angles, dj_matrix)", "def _rotate(self, angles, dj_matrix=None):\n if dj_matrix is None:\n dj_matrix = djpi2(self.lmax + 1)\n self.coeffs = SHRotateRealCoef(self.coeffs, angles, dj_matrix)", "def rotate(self):\n pass", "def _rotate(self, arr, theta):\n # Rotation Matrix R\n R = [[np.cos(theta), -np.sin(theta)], \n [np.sin(theta), np.cos(theta)]]\n\n return np.matmul(R, arr.T).T", "def rotate(self, a):\n ca = cos(a)\n sa = sin(a)\n self.v = Matrix([\n [ca, -sa],\n [sa, ca]\n ]) @ self.v\n return self", "def rotate(self,r):\n return r.hprod( self.hprod( r.inv() ) )", "def rotateAlignXform(self):\n r = np.concatenate(([self.x], [self.y], [self.z]), 0)\n r = np.concatenate((r, np.array([[0,0,0]])), 0)\n r = np.concatenate((r, np.array([0,0,0,1]).reshape(-1,1)), 1)\n return r", "def rotate(self,X):\n alpha = random.rand() * 2*pi\n R = Rotator.rotation_matrix(alpha,0.0,0.0)\n return np.dot(R,X)", "def _rot(axis, angle):\n if axis == 1:\n return Matrix([[1, 0, 0],\n [0, cos(angle), -sin(angle)],\n [0, sin(angle), cos(angle)]])\n elif axis == 2:\n return Matrix([[cos(angle), 0, sin(angle)],\n [0, 1, 0],\n [-sin(angle), 0, cos(angle)]])\n elif axis == 3:\n return Matrix([[cos(angle), -sin(angle), 0],\n [sin(angle), cos(angle), 0],\n [0, 0, 1]])", "def rotate_y(p, a=0):\n # turn value to radians\n a = math.radians(a)\n translation_mat = np.matrix([\n [math.cos(a),0,-math.sin(a),0],\n [0,1,0,0],\n [math.sin(a),0,math.cos(a),0],\n [0,0,0,1],\n ], dtype=\"float32\")\n\n new_p = p @ translation_mat\n\n return new_p", "def rotate(self, angle=0.0):\n # TODO: Implement the rotate function. Remember to record the value of\n # rotation degree.\n self.rotDegree = angle\n self.x = rotate(self.x, angle = angle, axes=(0, 1), reshape=False, \n output=None, order=3, mode='constant', cval=0.0, prefilter=True)\n # This rotation isn't working correctly. Get shit for non right anlge rotatations\n # raise NotImplementedError\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################", "def rotate(self,X):\n alpha = random.rand() * 2*pi\n\n beta = np.arccos(1.0-2*random.rand())\n psi = random.rand() * 2*pi\n\n R = Rotator.rotation_matrix(alpha,beta,psi)\n return np.dot(R,X)", "def Rotate(*args, **kwargs):\n return _gdi_.GraphicsMatrix_Rotate(*args, **kwargs)", "def mrotate(self):\n result_matrix = [[0 for col in range(len(self.matrix[0]))] for row in range(len(self.matrix))]\n for i in range(len(self.matrix)):\n for j in range(len(self.matrix[0])):\n result_matrix[i][j] = self.matrix[i][len(self.matrix[0]) - 1 - j]\n # left turn -> result_matrix[i][j] = self.matrix[len(self.matrix) - 1 - i][j]\n self.matrix = result_matrix\n pass", "def rotate(self, angle):\n perp = Vec2D(-self[1], self[0])\n angle = angle * math.pi / 180.0\n c, s = math.cos(angle), math.sin(angle)\n return Vec2D(self[0] * c + perp[0] * s, self[1] * c + perp[1] * s)", "def rotate(self, angle):\n rotmat = rotation_matrix_2d(angle)\n rotated = np.dot(rotmat.T, [self.pix_x.value, self.pix_y.value])\n self.pix_x = rotated[0] * self.pix_x.unit\n self.pix_y = rotated[1] * self.pix_x.unit\n self.pix_rotation -= angle", "def rot(self, t=0., transposed=False):\n rotmat = np.array(\n [[np.cos(self._pa+self._omegab*t),np.sin(self._pa+self._omegab*t)],\n [-np.sin(self._pa+self._omegab*t),np.cos(self._pa+self._omegab*t)]])\n if transposed:\n return rotmat.T\n else:\n return rotmat", "def rotate(self, matrix: list[list[int]]) -> None:", "def rotation_matrix(self):\n return self.affine_matrix[0:3][:, 0:3]", "def angle_to_rotation_matrix(angle: torch.Tensor) -> torch.Tensor:\n ang_rad = deg2rad(angle)\n cos_a: torch.Tensor = torch.cos(ang_rad)\n sin_a: torch.Tensor = torch.sin(ang_rad)\n return torch.stack([cos_a, sin_a, -sin_a, cos_a], dim=-1).view(*angle.shape, 2, 2)", "def rotation(self, *args, **kwargs) -> Any:\n pass", "def _rotate_(self, x: np.array, m: np.array) -> (np.array, np.array):\n # get a random angle\n angle = np.random.randint(0, self.rotate)\n # get a random sign for the angle\n sign = np.random.randint(0, 2)\n x = rotate(x, -sign * angle, reshape=False)\n m = rotate(m, -sign * angle, axes=(0, 1),\n mode='nearest',\n reshape=False)\n return x, m", "def test_rotation(self):\n quat_rotated = rowan.rotate(input1, vector_inputs)\n\n matrices = rowan.to_matrix(input1)\n matrix_rotated = np.einsum(\"ijk,ki->ij\", matrices, vector_inputs.T)\n self.assertTrue(np.allclose(matrix_rotated, quat_rotated))", "def RotationMatrix(theta, x, y, z, point=None):\n\treturn mach.rotation_matrix(theta, [x, y, z])", "def getEllipsYZRotMatrix(a1, a2):\n adir = a2 - a1\n amid = a1 + 0.5 * adir\n kath = np.sqrt((adir[0] * adir[0] + adir[1] * adir[1]) / 4.0)\n octantA2 = octant(a2)\n theta = np.arctan( abs( (adir[2]/2) / kath) )\n #[1, 4, 6, 7 ] => left rotation\n #[2, 3, 5, 8 ] => right rotation\n if octantA2 in [2, 3, 5, 8]: \n theta = -theta \n print \"theta =\" , np.rad2deg(theta)\n RotY = np.matrix( [ [ np.cos(theta), 0.0, np.sin(theta) ],\n [ 0.0 , 1.0, 0.0 ],\n [ -np.sin(theta), 0.0, np.cos(theta) ]\n ]) \n \n psi = np.arctan( abs( adir[1] / adir[0] ) )\n #[2, 4, 6, 8 ] => left rotation\n #[1, 3, 5, 7 ] => right rotation\n if octantA2 in [1, 3, 5, 7]:\n psi = -psi\n print \"psi =\" , np.rad2deg(psi)\n RotZ = np.matrix( [ [ np.cos(psi), -np.sin(psi), 0.0 ],\n [ np.sin(psi), np.cos(psi), 0.0 ],\n [ 0.0 , 0.0 , 1.0 ]\n ])\n return np.asarray( RotY * RotZ )", "def rotate(self, vect, angle):\n self.pl.Rotation = Rotation(vect, angle)\n\n self.comp.Placement = self.pl\n self.box.Placement = self.pl", "def rotate(self, angle):\n old_angle, tilt = self.rotation\n new_angle = old_angle + angle\n while new_angle > 90:\n new_angle = new_angle - 90\n while angle < -90:\n new_angle = new_angle + 90\n self.rotation = (new_angle, tilt)", "def rotate_matrix(angle):\n c = np.cos(angle)\n s = np.sin(angle)\n return np.array([[c, s],\n [-s, c]])", "def rotate(img, angle):\n if not _is_numpy(img):\n raise TypeError('img should be Numpy Image. Got {}'.format(type(img)))\n\n aug = iaa.Affine(rotate=angle)\n return aug.augment_image(img)", "def rotMatrix( source = None ):\n if source is None:\n return None,None\n else:\n (x,y,z, a) = source\n if a % TWOPI:\n return tmatrixaccel.rotMatrix( x,y,z,a ),tmatrixaccel.rotMatrix( x,y,z,-a )\n return None,None", "def _altaz_rotation(self, jd):\n R_lon = rot_z(- self.longitude.radians - jd.gast * TAU / 24.0)\n return einsum('ij...,jk...,kl...->il...', self.R_lat, R_lon, jd.M)", "def rotate(self, theta, legs):\n U, onew = rotationTensor(theta, self.symmetries, legs)\n B = U @ self\n new = list(onew)\n old = list(legs)\n if B.internallegs != self.internallegs:\n old.append(self.internallegs[0])\n new.append(B.internallegs[0])\n B.swaplegs({n: o for o, n in zip(old, new)})\n return B.couplingAddapt(self.coupling)", "def rotate(self, angle, point=None):\n if not point:\n point = self.middle\n self.p1.rotate(angle, point)\n self.p2.rotate(angle, point)", "def rotate_z(self, angle):\n angle *= np.pi / 180\n return self.transform(np.matrix([[np.cos(angle), -np.sin(angle), 0],\n [np.sin(angle), np.cos(angle), 0],\n [0, 0, 1]]))", "def rotate(self, angle, axis):\r\n R=self.rotation(angle, axis)\r\n self.mlist = (self*R).mlist\r\n return self", "def rotate(X):\n return X", "def rotate_about(self, p, theta):\n result = self.clone()\n result.slide(-p.x, -p.y)\n result.rotate(theta)\n result.slide(p.x, p.y)\n return result", "def rotate(a, ps, axe=0):\r\n\r\n sin = np.sin(a)\r\n cos = np.cos(a)\r\n rm = np.array([[[1, 0, 0], [0, cos, -sin], [0, sin, cos]],\r\n [[cos, 0, sin], [0, 1, 0], [-sin, 0, cos]],\r\n [[cos, -sin, 0], [sin, cos, 0], [0, 0, 1]]])\r\n m = np.full((len(ps), 3, 3), rm[axe])\r\n ps = map(lambda x, y: np.dot(x, y), m, ps)\r\n return ps", "def rotate(self, rotation):\n\t\tif not isinstance(rotation,Rotation):\n\t\t\trotation = Rotation(*rotation)\n\t\treturn rotation.matrix() * self", "def rotate(self, *args, **kwargs): # real signature unknown\n pass", "def rotate(self):\n\n last_center = self.rect.center\n self.image = pg.transform.rotate(self.image_copy,self.angle)\n self.rect = self.image.get_rect()\n self.rect.center = last_center\n self.angle +=self.rotate_by", "def rotation(self, angle, axis):\r\n\r\n sqr_a = axis.x*axis.x\r\n sqr_b = axis.y*axis.y\r\n sqr_c = axis.z*axis.z\r\n len2 = sqr_a+sqr_b+sqr_c\r\n\r\n k2 = math.cos(angle)\r\n k1 = (1.0-k2)/len2\r\n k3 = math.sin(angle)/math.sqrt(len2)\r\n k1ab = k1*axis.x*axis.y\r\n k1ac = k1*axis.x*axis.z\r\n k1bc = k1*axis.y*axis.z\r\n k3a = k3*axis.x\r\n k3b = k3*axis.y\r\n k3c = k3*axis.z\r\n\r\n return mat4( k1*sqr_a+k2, k1ab-k3c, k1ac+k3b, 0.0,\r\n k1ab+k3c, k1*sqr_b+k2, k1bc-k3a, 0.0,\r\n k1ac-k3b, k1bc+k3a, k1*sqr_c+k2, 0.0,\r\n 0.0, 0.0, 0.0, 1.0)", "def intermediateJacPol2Rot(self,x):\n allS = np.sin(x[0,:])\n allC = np.cos(x[0,:])\n allR = x[1,:]\n \n Jac = Idn(x.shape[1],self._dim)\n Jac[:,0,0] = -allS*allR\n Jac[:,0,1] = allC\n Jac[:,1,0] = allC*allR\n Jac[:,1,1] = allS\n return Jac", "def rotate(self, angle=pi, point=None):\n if not point: point = Point.origin(d=self.dimension)\n v = Vector.createFromTwoPoints(point, self)\n v.rotate(angle)\n self.components = v(point).components", "def rotate(self, angle):\n self.call('rotate', angle)", "def reflect_adp(adp, planev):\n M = np.identity(4)\n M[:3, :3] -= 2.0 * np.outer(planev, planev)\n M[:3, 3] = (2.0 * np.dot(np.array([0, 0, 0]), planev)) * planev\n\n return rotate_adp(adp, M[:3, :3])", "def test_to_rotation(self):\r\n q = np.array([-1, 1, 3, 2])\r\n q = q / np.linalg.norm(q)\r\n R_gt = np.array([\r\n [-1/3., -14/15., -2/15.],\r\n [2/3., -1/3., 2/3.],\r\n [-2/3., 2/15., 11/15.]]).T\r\n R = to_rotation(q)\r\n\r\n zero_matrix = R - R_gt\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)\r\n\r\n for _ in range(20):\r\n q = np.random.randn(4)\r\n q /= np.linalg.norm(q)\r\n q_inv = quaternion_conjugate(q)\r\n\r\n R = to_rotation(q)\r\n R_inv = to_rotation(q_inv)\r\n\r\n zero_matrix = R @ R_inv - np.identity(3)\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)\r\n\r\n # orthogonal matrix\r\n zero_matrix = R @ R.T - np.identity(3)\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)", "def rotate_about(self, p, theta):\n result = self.clone()\n result.translate(-p.x, -p.y)\n result.rotate(theta)\n result.translate(p.x, p.y)\n return result", "def rotateZ(self, angle):\r\n if angle:\r\n c = cos(radians(angle))\r\n s = sin(radians(angle))\r\n self.mtrx = dot([[c, s, 0, 0],\r\n [-s, c, 0, 0],\r\n [0, 0, 1, 0],\r\n [0, 0, 0, 1]],\r\n self.mtrx)\r\n self.rtn[2] = angle\r\n self.was_moved = True", "def orientation_ras_lps(affine: NdarrayTensor) -> NdarrayTensor:\n sr = max(affine.shape[0] - 1, 1) # spatial rank is at least 1\n flip_d = [[-1, 1], [-1, -1, 1], [-1, -1, 1, 1]]\n flip_diag = flip_d[min(sr - 1, 2)] + [1] * (sr - 3)\n if isinstance(affine, torch.Tensor):\n return torch.diag(torch.as_tensor(flip_diag).to(affine)) @ affine # type: ignore\n return np.diag(flip_diag).astype(affine.dtype) @ affine # type: ignore", "def rotate(self, rotation):\n self.coords = dot(rotation, self.coords)\n return self", "def rotate(matrix: List[List[int]]) -> None:\n if matrix is None:\n return\n\n # transpose\n for i in range(0, len(matrix)):\n for j in range(i, len(matrix[0])):\n temp = matrix[i][j]\n matrix[i][j] = matrix[j][i]\n matrix[j][i] = temp\n # reflect\n for i in range(0, len(matrix)):\n for j in range(0, len(matrix[0]) // 2):\n reflection = len(matrix[0]) - j - 1\n temp = matrix[i][j]\n matrix[i][j] = matrix[i][reflection]\n matrix[i][reflection] = temp", "def rotate(x: torch.Tensor, angle: int) -> torch.Tensor:\n # B C H W\n h_dim = 2\n w_dim = 3\n\n if angle == 0:\n return x\n elif angle == 90:\n return x.flip(w_dim).transpose(h_dim, w_dim)\n elif angle == 180:\n return x.flip(w_dim).flip(h_dim)\n elif angle == 270:\n return x.flip(h_dim).transpose(h_dim, w_dim)\n else:\n raise NotImplementedError(\"Must be rotation divisible by 90 degrees\")", "def rotation_matrix(self):\n self._normalise()\n product_matrix = np.dot(self._q_matrix(), self._q_bar_matrix().conj().transpose())\n return product_matrix[1:][:,1:]", "def rotation_matrix(angle, axis):\n about_z = rotation_about_z(angle)\n z_to_axis = z_to_vector(axis)\n axis_to_z = np.linalg.inv(z_to_axis)\n return reduce(np.dot, [z_to_axis, about_z, axis_to_z])", "def apply_rotation_z(self, theta=0.0 ):\n \n theta = radians(theta)\n new_rotation_matrix = [[ +cos(theta) , -sin(theta) , 0 ],\n [ +sin(theta) , +cos(theta) , 0 ],\n [ 0 , 0 , 1 ]] \n \n self.rotation_matrix_exp = np.dot( new_rotation_matrix, self.rotation_matrix_exp )", "def qrotate(points, axis, theta):\n q = Quaternion.rotator(axis, theta)\n return q.rotate(points)", "def rotatePivot(rotation):\r\n # Rotate in object mode X\r\n bpy.ops.object.mode_set(mode='OBJECT')\r\n bpy.ops.transform.rotate(value=rotation.x, axis=(1,0,0), constraint_orientation='GLOBAL')\r\n # rotate in edit mode X\r\n bpy.ops.object.mode_set(mode='EDIT')\r\n bpy.ops.mesh.select_all(action='SELECT')\r\n bpy.ops.transform.rotate(value=-rotation.x, axis=(1,0,0), constraint_orientation='GLOBAL')\r\n # Rotate in object mode Y\r\n bpy.ops.object.mode_set(mode='OBJECT')\r\n bpy.ops.transform.rotate(value=rotation.y, axis=(0,1,0), constraint_orientation='GLOBAL')\r\n # rotate in edit mode Y\r\n bpy.ops.object.mode_set(mode='EDIT')\r\n bpy.ops.transform.rotate(value=-rotation.y, axis=(0,1,0), constraint_orientation='GLOBAL')\r\n # Rotate in object mode Z\r\n bpy.ops.object.mode_set(mode='OBJECT')\r\n bpy.ops.transform.rotate(value=rotation.z, axis=(0,0,1), constraint_orientation='GLOBAL')\r\n # rotate in edit mode Z\r\n bpy.ops.object.mode_set(mode='EDIT')\r\n bpy.ops.transform.rotate(value=-rotation.z, axis=(0,0,1), constraint_orientation='GLOBAL')\r\n # return to object mode\r\n bpy.ops.object.mode_set(mode='OBJECT')", "def srotate(self, angle):\n\n self.angle = self.angle + angle", "def _rotation_from_gradient(self,m):\n\t\ttheta = -np.arctan(m)\n\t\tself.current_theta = theta\n\t\treturn self._rotation_from_angle(theta)", "def rotate(self, matrix):\n n = len(matrix)\n \n for circle in range(n//2):\n r_circle = n - circle - 1\n for i in range(circle, n - circle - 1):\n a = matrix[circle][i]\n b, matrix[i][r_circle] = matrix[i][r_circle], a\n c, matrix[r_circle][n - i - 1] = matrix[r_circle][n - i - 1], b\n d, matrix[n - i - 1][circle] = matrix[n - i - 1][circle], c\n matrix[circle][i] = d", "def align(image):\n angle = find_angle(image)\n image = rotate(image, angle)\n return image", "def rotate(self, *args, **kwargs):\n return _image.image_rotate(self, *args, **kwargs)", "def _rotationMatrix(self, n_dim, theta):\n i = np.identity(n_dim)\n c, s = np.cos(theta)*i, np.sin(theta)*i\n rotation = np.bmat([[c, s], [-s, c]])\n return rotation", "def rotate(self, angle):\n image_center = np.array(self.img.shape[1::-1]) / 2\n rot_mat = cv2.getRotationMatrix2D(tuple(image_center), angle, 1.0)\n\n self.img = cv2.warpAffine(\n self.img, rot_mat, self.img.shape[1::-1], flags=cv2.INTER_LINEAR\n )\n\n self.edits.append(f\"rotate:{angle}\")\n return self", "def rotate(self, yaw):\n rotation_matrix = tfs.rotation_matrix(yaw, (0, 0, 1))[:2, :2]\n return np.matmul(rotation_matrix, self).view(Vector)", "def rotation(self, phi, mode):\n self.circuit.phase_shift(phi, self._remap_modes(mode))", "def rotation_matrix(rotate):\n tx, ty, tz = rotate\n Rx = np.array([[1, 0, 0], [0, np.cos(tx), -np.sin(tx)], [0, np.sin(tx), np.cos(tx)]])\n Ry = np.array([[np.cos(ty), 0, -np.sin(ty)], [0, 1, 0], [np.sin(ty), 0, np.cos(ty)]])\n Rz = np.array([[np.cos(tz), -np.sin(tz), 0], [np.sin(tz), np.cos(tz), 0], [0, 0, 1]])\n return np.dot(Rx, np.dot(Ry, Rz))", "def rotation_mat(self) -> np.ndarray:\n rot = np.zeros((3, 3))\n\n txx = 2 * self.x * self.x\n tyy = 2 * self.y * self.y\n tzz = 2 * self.z * self.z\n twx = 2 * self.w * self.x\n twy = 2 * self.w * self.y\n twz = 2 * self.w * self.z\n txy = 2 * self.x * self.y\n txz = 2 * self.x * self.z\n tyz = 2 * self.y * self.z\n\n rot[0, 0] = 1. - tyy - tzz\n rot[0, 1] = txy - twz\n rot[0, 2] = txz + twy\n rot[1, 0] = txy + twz\n rot[1, 1] = 1. - txx - tzz\n rot[1, 2] = tyz - twx\n rot[2, 0] = txz - twy\n rot[2, 1] = tyz + twx\n rot[2, 2] = 1. - txx - tyy\n\n return rot", "def keypoint_rotate(keypoint, angle, rows, cols, **params):\n center = (cols - 1) * 0.5, (rows - 1) * 0.5\n matrix = cv2.getRotationMatrix2D(center, angle, 1.0)\n x, y, a, s = keypoint[:4]\n x, y = cv2.transform(np.array([[[x, y]]]), matrix).squeeze()\n return x, y, a + math.radians(angle), s", "def rotate(img, angle, resample=False, expand=False, center=None):\r\n \r\n return img.rotate(angle, resample, expand, center)", "def _rotate(polyreg, i=None, j=None, u=None, v=None, theta=None, R=None):\n # determine the rotation matrix based on inputs\n if R is not None:\n logger.debug(\"rotate: R=\\n{}\".format(R))\n if i is not None:\n raise ValueError(i)\n if j is not None:\n raise ValueError(j)\n if theta is not None:\n raise ValueError(theta)\n if u is not None:\n raise ValueError(u)\n if v is not None:\n raise ValueError(v)\n elif i is not None and j is not None and theta is not None:\n logger.info(\"rotate via indices and angle.\")\n if R is not None:\n raise ValueError(R)\n if u is not None:\n raise ValueError(u)\n if v is not None:\n raise ValueError(v)\n if i == j:\n raise ValueError(\"Must provide two unique basis vectors.\")\n R = givens_rotation_matrix(i, j, theta, polyreg.dim)\n elif u is not None and v is not None:\n logger.info(\"rotate via 2 vectors.\")\n if R is not None:\n raise ValueError(R)\n if i is not None:\n raise ValueError(i)\n if j is not None:\n raise ValueError(j)\n if theta is not None:\n raise ValueError(theta)\n R = solve_rotation_ap(u, v)\n else:\n raise ValueError(\"R or (i and j and theta) or (u and v) \"\n \"must be defined.\")\n if isinstance(polyreg, Polytope):\n # Ensure that half space is normalized before rotation\n n, p = _hessian_normal(polyreg.A, polyreg.b)\n # Rotate the hyperplane normals\n polyreg.A = np.inner(n, R)\n polyreg.b = p\n else:\n # Rotate subregions\n for poly in polyreg.list_poly:\n _rotate(poly, None, None, R=R)\n # transform bbox and cheby\n if polyreg.bbox is not None:\n polyreg.bbox = (np.inner(polyreg.bbox[0].T, R).T,\n np.inner(polyreg.bbox[1].T, R).T)\n if polyreg._chebXc is not None:\n polyreg._chebXc = np.inner(polyreg._chebXc, R)\n return R", "def rotator(angle):\n c = np.cos(angle)\n s = np.sin(angle)\n return np.array([[c,-s],[s,c]])", "def fix_rotation(self):\n self.rotate(self.rotation)\n self.annotations.rotate(self.rotation)\n self.rotation = 0", "def rotate(self,center, angle):\n \n self.coord = [x-np.repeat([[center[0],center[1]]],[x.shape[0]],axis = 0) for x in self.coord]\n\n alpha = angle\n R = np.array([[np.cos(alpha),-np.sin(alpha)],[np.sin(alpha),np.cos(alpha)]])\n \n for i in range(len(self.coord)):\n self.coord[i] = np.squeeze([np.dot([x],R) for x in self.coord[i]])\n\n self.coord = [x+np.repeat([[center[0],center[1]]],[x.shape[0]],axis = 0) for x in self.coord]\n\n return self", "def rotation(self, phi, mode):\n self.circuit.phase_shift(phi, mode)", "def __rotate_model(self):\n self.__model_matrix = self.__get_rotation_matrix(\n self.__face.position_cartesian,\n (1 + self.__face.position[2]) * 0.5)", "def apply_rotation_x(self, eta=0.0 ):\n \n eta = radians(eta)\n new_rotation_matrix = [[ 1 , 0 , 0 ],\n [ 0 , +cos(eta) , -sin(eta) ],\n [ 0 , +sin(eta) , +cos(eta) ]] \n \n self.rotation_matrix_exp = np.dot( new_rotation_matrix, self.rotation_matrix_exp )" ]
[ "0.79376036", "0.73167443", "0.73074406", "0.7127177", "0.7113275", "0.7051355", "0.6992302", "0.6686556", "0.66458803", "0.66007537", "0.657009", "0.6538239", "0.6492567", "0.64686793", "0.6467943", "0.6442462", "0.64174455", "0.63961196", "0.6388264", "0.63866466", "0.63760054", "0.6368504", "0.63672537", "0.63672537", "0.6365199", "0.63406956", "0.63355124", "0.6325487", "0.63123417", "0.6312132", "0.6274328", "0.62583435", "0.62266237", "0.62117803", "0.6209411", "0.61521596", "0.6146725", "0.61100966", "0.61060613", "0.61038494", "0.6097206", "0.60961854", "0.6092604", "0.6088147", "0.60809153", "0.6078204", "0.6069734", "0.6041528", "0.6030295", "0.60239816", "0.601961", "0.60133547", "0.5996416", "0.5995912", "0.5989014", "0.59862995", "0.5986245", "0.59836257", "0.59782624", "0.597726", "0.59758997", "0.59740967", "0.5970379", "0.59592766", "0.5957963", "0.5955848", "0.59473217", "0.5944798", "0.5944029", "0.59329593", "0.5908772", "0.5894153", "0.5889308", "0.5888876", "0.58877623", "0.5887166", "0.5879811", "0.58780724", "0.5876205", "0.58728635", "0.5872826", "0.5852149", "0.58506614", "0.58456385", "0.5845091", "0.58430165", "0.5828852", "0.5827741", "0.5826474", "0.5825847", "0.5824189", "0.5821436", "0.5821264", "0.5806832", "0.58050036", "0.58040315", "0.58011824", "0.57980055", "0.57854044", "0.57780397" ]
0.7229753
3
Returns a list of coordinates where every position is rotated by the the rotation matrix 'R'.
def rotate_list_by(coordlist, R): for coord in xrange(len(coordlist)): value = np.dot(R, coordlist[coord]) value = np.array(value).reshape(-1, ).tolist() coordlist[coord] = value return coordlist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _rotation_matrix_to_euler_angles(R):\n sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])\n\n singular = sy < 1e-6\n\n if not singular:\n x = math.atan2(R[2, 1], R[2, 2])\n y = math.atan2(-R[2, 0], sy)\n z = math.atan2(R[1, 0], R[0, 0])\n else:\n x = math.atan2(-R[1, 2], R[1, 1])\n y = math.atan2(-R[2, 0], sy)\n z = 0\n\n return np.array([x, y, z])", "def _rotation_matrix_to_euler_angles(self, R):\n assert (self._is_rotation_matrix(R))\n\n sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])\n singular = sy < 1e-6\n\n if not singular:\n x = math.atan2(R[2, 1], R[2, 2])\n y = math.atan2(-R[2, 0], sy)\n z = math.atan2(R[1, 0], R[0, 0])\n else:\n x = math.atan2(-R[1, 2], R[1, 1])\n y = math.atan2(-R[2, 0], sy)\n z = 0\n\n return np.array([x, y, z])", "def get_rotation_vector(R):\n v = np.array([R[1,2] - R[2,1],\n R[2,0] - R[0,1],\n R[0,1] - R[1,0]]) # eq. 3.12 in [1], pp.66\n return v", "def rotationMatrixToEulerAngles(R) :\n sy = np.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0])\n singular = sy < 1e-6\n\n if not singular :\n x = np.arctan2(R[2,1] , R[2,2])\n y = np.arctan2(-R[2,0], sy)\n z = np.arctan2(R[1,0], R[0,0])\n else :\n x = np.arctan2(-R[1,2], R[1,1])\n y = np.arctan2(-R[2,0], sy)\n z = 0\n\n return np.array([x, y, z])", "def simple_rotate(self):\n new_positions = []\n for i, pos in enumerate(self.positions):\n new_positions.append((pos[1], pos[0]))\n return new_positions", "def rotationMatrixToEulerAngles(self, R):\n\n assert(self.isRotationMatrix(R))\n\n sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])\n\n singular = sy < 1e-6\n\n if not singular:\n x = math.atan2(R[2, 1], R[2, 2])\n y = math.atan2(-R[2, 0], sy)\n z = math.atan2(R[1, 0], R[0, 0])\n else:\n x = math.atan2(-R[1, 2], R[1, 1])\n y = math.atan2(-R[2, 0], sy)\n z = 0\n\n return np.array([x, y, z])", "def rotation_matrix_to_euler(R):\n sy = math.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0])\n \n singular = sy < 1e-6\n \n if not singular :\n x = math.atan2(R[2,1] , R[2,2])\n y = math.atan2(-R[2,0], sy)\n z = math.atan2(R[1,0], R[0,0])\n else :\n x = math.atan2(-R[1,2], R[1,1])\n y = math.atan2(-R[2,0], sy)\n z = 0\n \n return np.array([x, y, z])", "def rotation_inv(R: np.array) -> np.array:\n return R.T", "def radiiPoints(self, R):\n width = 2*R+1\n xspace = np.arange(width)-R\n yspace = np.arange(width)-R\n xx, yy= np.meshgrid(xspace, yspace)\n dist = np.sqrt(xx**2+yy**2)\n xpts = np.nonzero((dist<R+0.5) & (dist>R-0.5))[0]-R\n ypts = np.nonzero((dist<R+0.5) & (dist>R-0.5))[1]-R\n order = np.argsort(np.arctan2(xpts, ypts))\n return xpts[order], ypts[order]", "def get_coordinates_rot(self):\n return self.get_coordinates()", "def positions(r):\n\n X = []\n Y = []\n\n leftx = -r*(nx - 1) / 2\n topy = -r*(ny - 1) / 2\n\n for i in range(0, nx):\n for j in range(0, ny):\n X.append(leftx + r * i)\n Y.append(topy + r * j)\n\n return (X, Y)", "def _rotate(self):\n new_points = list()\n for i in range(len(self._points)):\n x = self._points[i][0]\n y = self._points[i][1]\n new_x = self._center[0] + self._center[1] - y\n new_y = self._center[1] - self._center[0] + x\n new_points.append((new_x, new_y))\n return new_points", "def rotate(points, angle=0):\n # x = np.cos(R) * X - np.sin(R) * Y\n # y = np.sin(R) * X + np.cos(R) * Y\n X = points[0]\n Y = points[1]\n x = np.cos(angle) * X - np.sin(angle) * Y\n y = np.sin(angle) * X + np.cos(angle) * Y\n return [x, y]", "def transform(self, r):\n n = (self.__degree + 2) * (self.__degree + 1) // 2 - 1\n arr = np.empty((n, r.shape[1]))\n\n k = 0\n for i in range(1, self.__degree+1):\n for j in range(i + 1):\n arr[k,:] = r[0,:]**(i - j) * r[1,:]**j\n k += 1\n return arr", "def expmap2rotmat(r):\n theta = np.linalg.norm( r )\n r0 = np.divide( r, max(theta, np.finfo(np.float32).eps) )\n r0x = np.array([0, -r0[2], r0[1], 0, 0, -r0[0], 0, 0, 0]).reshape(3,3)\n r0x = r0x - r0x.T\n R = np.eye(3,3) + np.sin(theta)*r0x + (1-np.cos(theta))*(r0x).dot(r0x)\n return R", "def rotated_vertices(self):\n rotated_vertices = []\n for v in self.non_rotated_vertices():\n x, y = v[0], v[1]\n x_rotated = self.pos.x + (x-self.pos.x)*math.cos(self.rotation) - (y-self.pos.y)*math.sin(self.rotation)\n y_rotated = self.pos.y + (x-self.pos.x)*math.sin(self.rotation) + (y-self.pos.y)*math.cos(self.rotation)\n rotated_vertices.append([x_rotated, y_rotated])\n return rotated_vertices", "def rotation_matrix(self):\n return self.affine_matrix[0:3][:, 0:3]", "def rotation3D_to_rpy(R: np.array) -> Tuple[float, float, float]:\n if abs(1 - R[2, 0]) < 1e-6:\n roll = 0.0\n pitch = -np.arcsin(R[2, 0])\n if R[2, 0] < 0:\n yaw = np.arctan2(-R[0, 1], R[0, 2])\n else:\n yaw = np.arctan2(-R[0, 1], -R[0, 2])\n else:\n roll = np.arctan2(R[2, 1], R[2, 2])\n yaw = np.arctan2(R[1, 0], R[0, 0])\n pitch = -np.arctan(R[2, 0] * np.cos(roll) / R[2, 2])\n return roll, pitch, yaw", "def expmap2rotmat(r):\n theta = np.linalg.norm(r)\n r0 = np.divide(r, theta + np.finfo(np.float32).eps)\n r0x = np.array([0, -r0[2], r0[1], 0, 0, -r0[0], 0, 0, 0]).reshape(3, 3)\n r0x = r0x - r0x.T\n R = (\n np.eye(3, 3)\n + np.sin(theta) * r0x\n + (1 - np.cos(theta)) * (r0x).dot(r0x)\n )\n return R", "def _euler_angles_to_rotation_matrix(theta):\n R_x = np.array([[1, 0, 0],\n [0, math.cos(theta[0]), -math.sin(theta[0])],\n [0, math.sin(theta[0]), math.cos(theta[0])]\n ])\n\n R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1])],\n [0, 1, 0],\n [-math.sin(theta[1]), 0, math.cos(theta[1])]\n ])\n\n R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],\n [math.sin(theta[2]), math.cos(theta[2]), 0],\n [0, 0, 1]\n ])\n\n R = np.dot(R_z, np.dot(R_y, R_x))\n\n return R", "def rotation_to_transformation_matrix(R):\n R = Matrix(R)\n T = R.col_insert(3, Matrix([0., 0., 0.]))\n T = T.row_insert(3, Matrix([[0., 0., 0., 1.]]))\n return T", "def vrrotvec2mat(r):\n s = np.sin(r[3])\n c = np.cos(r[3])\n t = 1 - c\n \n n = normalize(r[0:3])\n \n x = n[0]\n y = n[1]\n z = n[2]\n \n m = np.array(\n [[t*x*x + c, t*x*y - s*z, t*x*z + s*y],\n [t*x*y + s*z, t*y*y + c, t*y*z - s*x],\n [t*x*z - s*y, t*y*z + s*x, t*z*z + c]]\n )\n return m", "def RotZYX_to_RPY(r: array):\n\n # Make sure that r is an array and reshape it to be a 3x3 matrix\n r = array(r).reshape((3, 3))\n\n # Compute the corresponding quaternions\n q = Rot_to_quaternion(r)\n\n # Compute the euler angles from the quaternion\n return quaternion_to_RPY(q)", "def rotate(self, coord):\r\n [x, y] = coord\r\n xrot = x*math.cos(self.heading) - y*math.sin(self.heading)\r\n yrot = x*math.sin(self.heading) + y*math.cos(self.heading)\r\n return [xrot, yrot]", "def get_R(angles):\n cs, ss = np.cos(angles), np.sin(angles)\n zeros, ones = np.zeros(len(cs)), np.ones(len(cs))\n Rs = np.array(\n [[cs, ss, zeros], [-ss, cs, zeros], [zeros, zeros, ones]], dtype=np.float32\n ) # (3, 3, N)\n\n return Rs.transpose((2, 0, 1))", "def rotate_a(X,vector):\r\n\taxis_vector = (math.radians(-X)) * np.array([1,0,0])\r\n\tr = R.from_rotvec(axis_vector)\r\n\treturn list(r.apply(vector))", "def rotate(self,r):\n return r.hprod( self.hprod( r.inv() ) )", "def get_rot_directions(robot_name):\n target_ctrl_path = get_target_ctrl_path(robot_name)\n\n num_axes = 6\n rot_directions = [0 for i in range(num_axes)]\n\n for i in range(num_axes):\n axis_number = i + 1 # Axes are 1-indexed\n # We use objExists here because most robot rigs don't have\n # offsets defined for every axis\n offset_attr_name = target_ctrl_path + '.flipAxis{}direction'.format(axis_number)\n if pm.objExists(offset_attr_name):\n rot_direction = pm.getAttr(offset_attr_name)\n rot_directions[i] = rot_direction\n\n return rot_directions", "def polar_to_cartesian(self, r, theta):\n # x = rcos(theta), y = rsin(theta)\n x, y = r*math.cos(theta), r*math.sin(theta)\n x, y = self.add((x, y), self.pole)\n return x, y", "def rotation_matrix(rx, ry, rz):\n # Convert from degrees to radians.\n rx = np.pi * rx / 180\n ry = np.pi * ry / 180\n rz = np.pi * rz / 180\n\n # Pre-compute sine and cosine of angles.\n cx, cy, cz = np.cos([rx, ry, rz])\n sx, sy, sz = np.sin([rx, ry, rz])\n\n # Set up euler rotations.\n Rx = np.array([[1, 0, 0, 0],\n [0, cx, -sx, 0],\n [0, sx, cx, 0],\n [0, 0, 0, 1]])\n\n Ry = np.array([[cy, 0, sy, 0],\n [0, 1, 0, 0],\n [-sy, 0, cy, 0],\n [0, 0, 0, 1]])\n\n Rz = np.array([[cz, -sz, 0, 0],\n [sz, cz, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n\n return Rz.dot(Ry.dot(Rx))", "def polarToCartesian(r,theta):\n x = r * np.cos(theta)\n y = r * np.sin(theta)\n return x,y", "def rotate(self,X):\n alpha = random.rand() * 2*pi\n R = Rotator.rotation_matrix(alpha,0.0,0.0)\n return np.dot(R,X)", "def _rotate(self, arr, theta):\n # Rotation Matrix R\n R = [[np.cos(theta), -np.sin(theta)], \n [np.sin(theta), np.cos(theta)]]\n\n return np.matmul(R, arr.T).T", "def get_anglesXY( self ):\n [accel_xout_scaled, accel_yout_scaled, _] = self.get_accelXYZ()\n rot_x = get_x_angle( accel_xout_scaled, accel_yout_scaled, accel_zout_scaled )\n rot_y = get_y_angle( accel_xout_scaled, accel_yout_scaled, accel_zout_scaled )\n return [rot_x, rot_y]", "def rotation2D_to_angle(R: np.array) -> float:\n return np.arctan2(R[1, 0], R[0, 0])", "def rotate(self,X):\n alpha = random.rand() * 2*pi\n\n beta = np.arccos(1.0-2*random.rand())\n psi = random.rand() * 2*pi\n\n R = Rotator.rotation_matrix(alpha,beta,psi)\n return np.dot(R,X)", "def _quaternions(self, R):\n # Simple Wikipedia version\n # en.wikipedia.org/wiki/Rotation_matrix#Quaternion\n # For other options see math.stackexchange.com/questions/2074316/calculating-rotation-axis-from-rotation-matrix\n diag = torch.diagonal(R, dim1=-2, dim2=-1)\n Rxx, Ryy, Rzz = diag.unbind(-1)\n magnitudes = 0.5 * torch.sqrt(torch.abs(1 + torch.stack([\n Rxx - Ryy - Rzz, \n - Rxx + Ryy - Rzz, \n - Rxx - Ryy + Rzz\n ], -1)))\n _R = lambda i,j: R[:,:,:,i,j]\n signs = torch.sign(torch.stack([\n _R(2,1) - _R(1,2),\n _R(0,2) - _R(2,0),\n _R(1,0) - _R(0,1)\n ], -1))\n xyz = signs * magnitudes\n # The relu enforces a non-negative trace\n w = torch.sqrt(F.relu(1 + diag.sum(-1, keepdim=True))) / 2.\n Q = torch.cat((xyz, w), -1)\n Q = F.normalize(Q, dim=-1)\n return Q", "def rotation(X, Y, C, S) :\n Xrot = X*C + Y*S \n Yrot = Y*C - X*S \n return Xrot, Yrot", "def _pole_to_cart(self,angles,distances):\n cart=[]\n for i in xrange(0,len(angles)-1):\n angle = angles[i]\n distance = distances[i] \n xs, ys = distance*cos(angle), distance*sin(angle)\n cart.append(tuple((xs,ys)))\n return cart", "def rotationMatrix(self):\n\n R = Compute3DRotationMatrix(self.exteriorOrientationParameters[3], self.exteriorOrientationParameters[4],\n self.exteriorOrientationParameters[5])\n\n return R", "def eulerAnglesToRotationMatrix(theta):\n\n R_x = np.array([[1, 0, 0 ],\n [0, np.cos(theta[0]), -np.sin(theta[0]) ],\n [0, np.sin(theta[0]), np.cos(theta[0]) ]\n ])\n R_y = np.array([[np.cos(theta[1]), 0, np.sin(theta[1]) ],\n [0, 1, 0 ],\n [-np.sin(theta[1]), 0, np.cos(theta[1]) ]\n ])\n R_z = np.array([[np.cos(theta[2]), -np.sin(theta[2]), 0],\n [np.sin(theta[2]), np.cos(theta[2]), 0],\n [0, 0, 1]\n ])\n R = np.dot(R_z, np.dot( R_y, R_x ))\n return R", "def vecRot(data, seq, euler_angles, **kwargs):\n from scipy.spatial.transform import Rotation as R\n r = R.from_euler(seq, euler_angles, **kwargs)\n return r.apply(data)", "def rotate_c(X,a_set,vector):\r\n\taxis_vector = math.radians(-X) * np.array([0,0,1])\r\n\tr = R.from_rotvec(axis_vector)\r\n\treturn list(r.apply(vector))", "def givens_rotation_matrix(i, j, theta, N):\n R = np.identity(N)\n c = np.cos(theta)\n s = np.sin(theta)\n R[i, i] = c\n R[j, j] = c\n R[i, j] = -s\n R[j, i] = s\n return R", "def rotmat2euler(R):\n if R[0, 2] == 1 or R[0, 2] == -1:\n # special case\n E3 = 0 # set arbitrarily\n dlta = np.arctan2(R[0, 1], R[0, 2])\n\n if R[0, 2] == -1:\n E2 = np.pi / 2\n E1 = E3 + dlta\n else:\n E2 = -np.pi / 2\n E1 = -E3 + dlta\n\n else:\n E2 = -np.arcsin(R[0, 2])\n E1 = np.arctan2(R[1, 2] / np.cos(E2), R[2, 2] / np.cos(E2))\n E3 = np.arctan2(R[0, 1] / np.cos(E2), R[0, 0] / np.cos(E2))\n\n eul = np.array([E1, E2, E3])\n return eul", "def transform_from_rot_trans(R, t):\n R = R.reshape(3, 3)\n t = t.reshape(3, 1)\n return np.hstack([R, t])", "def rotmat2euler(R):\n if R[0,2] == 1 or R[0,2] == -1:\n # special case\n E3 = 0 # set arbitrarily\n dlta = np.arctan2( R[0,1], R[0,2] )\n if R[0,2] == -1:\n E2 = np.pi/2\n E1 = E3 + dlta\n else:\n E2 = -np.pi/2\n E1 = -E3 + dlta\n else:\n E2 = -np.arcsin( R[0,2] )\n E1 = np.arctan2( R[1,2]/np.cos(E2), R[2,2]/np.cos(E2) )\n E3 = np.arctan2( R[0,1]/np.cos(E2), R[0,0]/np.cos(E2) )\n eul = np.array([E1, E2, E3])\n return eul", "def get_rotation_matrix(theta, rot_vector):\n\n ux = rot_vector[0]\n uy = rot_vector[1]\n uz = rot_vector[2]\n cost = np.cos(theta)\n sint = np.sin(theta)\n\n R = np.array([[cost+ux**2*(1-cost), ux*uy*(1-cost)-uz*sint, ux*uz*(1-cost)+uy*sint],\n [uy*ux*(1-cost)+uz*sint, cost+uy**2*(1-cost), uy*uz*(1-cost)-ux*sint],\n [uz*ux*(1-cost)-uy*sint, uz*uy*(1-cost)+ux*sint, cost+uz**2*(1-cost)]])\n\n return R", "def R(theta, u):\n return [[cos(theta) + u[0]**2 * (1-cos(theta)),\n u[0] * u[1] * (1-cos(theta)) - u[2] * sin(theta),\n u[0] * u[2] * (1 - cos(theta)) + u[1] * sin(theta)],\n [u[0] * u[1] * (1-cos(theta)) + u[2] * sin(theta),\n cos(theta) + u[1]**2 * (1-cos(theta)),\n u[1] * u[2] * (1 - cos(theta)) - u[0] * sin(theta)],\n [u[0] * u[2] * (1-cos(theta)) - u[1] * sin(theta),\n u[1] * u[2] * (1-cos(theta)) + u[0] * sin(theta),\n cos(theta) + u[2]**2 * (1-cos(theta))]]", "def _rotation_matrix(theta):\n c, s = np.cos(theta), np.sin(theta)\n return np.array(((c, -s), (s, c)))", "def rotation_matrices_from_angles(angles):\n\n angles = np.atleast_1d(angles)\n npts = len(angles)\n\n sina = np.sin(angles)\n cosa = np.cos(angles)\n\n R = np.zeros((npts, 2, 2))\n R[:, 0, 0] = cosa\n R[:, 1, 1] = cosa\n\n R[:, 0, 1] = -sina\n R[:, 1, 0] = sina\n\n return R", "def rot(theta):\n cos = np.cos(theta)\n sin = np.sin(theta)\n return( np.array( [[cos, sin], [-sin, cos]] ) )", "def rotation(self):\n\t\treturn self.piv.a.rotate.v", "def get_R_torch(angles):\n cs, ss = torch.cos(angles), torch.sin(angles)\n zeros = torch.zeros(len(cs), device=angles.device)\n ones = torch.ones(len(cs), device=angles.device)\n Rs = torch.empty((angles.shape[0], 3, 3), device=angles.device).float() # (N, 3, 3)\n Rs[:, 0] = torch.stack((cs, ss, zeros), dim=1)\n Rs[:, 1] = torch.stack((-ss, cs, zeros), dim=1)\n Rs[:, 2] = torch.stack((zeros, zeros, ones), dim=1)\n\n return Rs", "def get_orbit_points(n=10, r=400):\n orbit_pos = []\n for index, j in enumerate([(2*pi*i-2*pi)/n for i in range(1, n+1)]):\n orbit_pos.append((cos(j)*r, sin(j)*r))\n return orbit_pos", "def rotation_mat(self) -> np.ndarray:\n rot = np.zeros((3, 3))\n\n txx = 2 * self.x * self.x\n tyy = 2 * self.y * self.y\n tzz = 2 * self.z * self.z\n twx = 2 * self.w * self.x\n twy = 2 * self.w * self.y\n twz = 2 * self.w * self.z\n txy = 2 * self.x * self.y\n txz = 2 * self.x * self.z\n tyz = 2 * self.y * self.z\n\n rot[0, 0] = 1. - tyy - tzz\n rot[0, 1] = txy - twz\n rot[0, 2] = txz + twy\n rot[1, 0] = txy + twz\n rot[1, 1] = 1. - txx - tzz\n rot[1, 2] = tyz - twx\n rot[2, 0] = txz - twy\n rot[2, 1] = tyz + twx\n rot[2, 2] = 1. - txx - tyy\n\n return rot", "def _rotate_coords(self, x, y, theta, ox, oy):\n s, c = self._pkgs['numpy'].sin(theta), self._pkgs['numpy'].cos(theta)\n x, y = self._pkgs['numpy'].asarray(x) - ox, self._pkgs['numpy'].asarray(y) - oy\n return x * c - y * s + ox, x * s + y * c + oy", "def r_theta_to_input_coords(r_theta):\n # Extract r and theta from input\n r, theta = r_theta[:,0], r_theta[:,1]\n\n # Theta wraps at the side of the image. That is to say that theta=1.1\n # is equivalent to theta=0.1 => just extract the fractional part of\n # theta\n theta = theta - np.floor(theta)\n\n # Calculate the maximum x- and y-co-ordinates\n max_x, max_y = input_shape[1]-1, input_shape[0]-1\n\n # Calculate x co-ordinates from theta\n xs = theta * max_x\n\n # Calculate y co-ordinates from r noting that r=0 means maximum y\n # and r=1 means minimum y\n ys = (1-r) * max_y\n\n # Return the x- and y-co-ordinates stacked into a single Nx2 array\n return np.hstack((xs, ys))", "def eulerAnglesToRotationMatrix(self, theta):\n\n R_x = np.array([[1, 0, 0],\n [0, math.cos(theta[0]), -math.sin(theta[0])],\n [0, math.sin(theta[0]), math.cos(theta[0])]\n ])\n\n R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1])],\n [0, 1, 0],\n [-math.sin(theta[1]), 0, math.cos(theta[1])]\n ])\n\n R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],\n [math.sin(theta[2]), math.cos(theta[2]), 0],\n [0, 0, 1]\n ])\n\n R = np.dot(R_z, np.dot(R_y, R_x))\n\n return R", "def get_roi_coords(self):\n return [roi.get_coords() for roi in self.rois]", "def rotate(self, matrix: list[list[int]]) -> None:", "def xy_rotation(vector,theta):\r\n R = np.array([[np.cos(theta), -np.sin(theta),0],\r\n [np.sin(theta), np.cos(theta),0],\r\n [0,0,1]\r\n ])\r\n return np.dot(R,vector)", "def get_cartesian_coords(self):\n r = 1\n dec = self.dec + 90\n x = r * math.sin(np.deg2rad(dec)) * math.cos(np.deg2rad(self.ra))\n y = r * math.sin(np.deg2rad(dec)) * math.sin(np.deg2rad(self.ra))\n z = r * math.cos(np.deg2rad(dec))\n\n return [x, y, z]", "def euler_to_rodrigues(X_params):\n data_samples = X_params.shape[0]\n pose_euler = np.array([X_params[:, i:i+3] for i in range(0, 72, 3)])\n #print(pose_euler[0][0])\n #pose_euler = pose_euler.reshape((24, data_samples, 1, 3))\n #print(pose_euler[0][0])\n print(\"pose_euler shape: \" + str(pose_euler.shape))\n #R = np.array([[eulerAnglesToRotationMatrix(vector) for vector in vectors] for vectors in pose_euler])\n #print(\"R shape: \" + str(R.shape))\n #print(R[0][0])\n #R = R.reshape((data_samples, 24, 3, 3))\n\n #pose_params = np.array([[Rot.from_dcm(rot_mat).as_rotvec() for rot_mat in param_rot_mats] for param_rot_mats in R])\n pose_params = np.array([Rot.from_euler('xyz', vectors, degrees=False).as_rotvec() for vectors in pose_euler])\n print(\"pose_params shape: \" + str(pose_params.shape))\n pose_params = pose_params.reshape((data_samples, 72))\n print(\"pose_params shape: \" + str(pose_params.shape))\n print(\"other params shape: \" + str(X_params[:, 72:85].shape))\n X_params = np.concatenate([pose_params, X_params[:, 72:85]], axis=1)\n print(\"X_params shape: \" + str(X_params.shape))\n\n return X_params", "def RZ(rotRadian: float):\n return np.array([\n [np.cos(rotRadian), -np.sin(rotRadian), 0],\n [np.sin(rotRadian), np.cos(rotRadian), 0],\n [0, 0, 1]\n ])", "def _gen_rxx(theta):\n return np.array([[np.cos(theta / 2), 0, 0, -1j * np.sin(theta / 2)],\n [0, np.cos(theta / 2), -1j * np.sin(theta / 2), 0],\n [0, -1j * np.sin(theta / 2), np.cos(theta / 2), 0],\n [-1j * np.sin(theta / 2), 0, 0, np.cos(theta / 2)]])", "def get_coordinates(self):\r\n coordinates_list = []\r\n for i in range(self.__length):\r\n if self.__orientation == Direction.VERTICAL:\r\n temp = (self.__location[0] + i, self.__location[1])\r\n if self.__orientation == Direction.HORIZONTAL:\r\n temp = (self.__location[0], self.__location[1] + i)\r\n coordinates_list.append(temp)\r\n return coordinates_list", "def rot_x_rad(self):\n return self._rot_x_rad", "def multi_rot_Z(angle_rads: numpy.ndarray) -> numpy.ndarray:\n rz = numpy.empty((angle_rads.shape[0], 4, 4))\n rz[...] = numpy.identity(4)\n rz[:, 0, 0] = rz[:, 1, 1] = numpy.cos(angle_rads)\n rz[:, 1, 0] = numpy.sin(angle_rads)\n rz[:, 0, 1] = -rz[:, 1, 0]\n return rz", "def getRotationTrajectory(self) -> SO3Trajectory:\n return SO3Trajectory(self.times,[m[:9] for m in self.milestones])", "def extractRotate(self,groups):\n self.rotate = math.pi * float(groups[0]) / 180\n self.rX = 0\n self.rY = 0\n if len(groups) == 3:\n if groups[1]:\n \tself.rX = float(groups[1])\n if groups[2]:\n \tself.rY = float(groups[2])\n\n #alpha = float(self.rotate)\n alpha = self.rotate\n cx = self.rX\n cy = self.rY\n self.matrix = [ [\n math.cos(alpha),\n -math.sin(alpha),\n -cx * math.cos(alpha) + cy * math.sin(alpha) + cx\n ],\n [\n math.sin(alpha),\n math.cos(alpha),\n -cx * math.sin(alpha) - cy * math.cos(alpha) + cy\n ]\n ]", "def find_plane_angles(self, roof_motor_position):\n\n # Calcolo il punto mediano tra i vertici 2 e 3\n pc_x = (self.roof_vertex_x[1] + self.roof_vertex_x[2]) / 2\n pc_y = (self.roof_vertex_y[1] + self.roof_vertex_y[2]) / 2\n pc_z = (self.roof_vertex_z[1] + self.roof_vertex_z[2]) / 2\n\n # Questa non so cosa sia\n base_r = [[self.roof_vertex_x[0] - pc_x, self.roof_vertex_y[0] - pc_y, self.roof_vertex_z[0] - pc_z],\n [self.roof_vertex_x[1] - pc_x, self.roof_vertex_y[1] - pc_y, self.roof_vertex_z[1] - pc_z],\n [0.0, 0.0, 0.0]]\n\n # Questa e' la costruzione di una matrice\n mat_rot = [[0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0]]\n\n # Non so quale operazione è implementata, ma a me servono solo tre elementi, j=2, i=0,1, j=1, i=0\n # Primo elemento, j=1, i=0\n mr = math.sqrt((base_r[0][0] ** 2) + (base_r[0][1] ** 2) + (base_r[0][2] ** 2))\n mat_rot[1][0] = base_r[0][1] / mr\n # Secondo elemento, j=2, i=0\n mat_rot[2][0] = base_r[0][2] / mr\n # Terzo elemento, j=2, i=1\n mr = math.sqrt((base_r[1][0] ** 2) + (base_r[1][1] ** 2) + (base_r[1][2] ** 2))\n mat_rot[2][1] = base_r[1][2] / mr\n\n # In alternativa posso calcolare tutti gli elementi della matrice\n # for i in range(2):\n # mr = math.sqrt((base_r[i][0] ** 2) + (base_r[i][1] ** 2) + (base_r[i][2] ** 2))\n # for j in range(3):\n # base_r[i][j] /= mr\n # mat_rot[j][i] = base_r[i][j]\n\n # Sono elementi della matrice non utilizzati\n # base_r[2][0] = +base_r[1][1] * base_r[0][2] - base_r[0][1] * base_r[1][2]\n # base_r[2][1] = -base_r[1][0] * base_r[0][2] + base_r[0][0] * base_r[1][2]\n # base_r[2][2] = +base_r[1][0] * base_r[0][1] - base_r[0][0] * base_r[1][1]\n # for i in range(3):\n # mat_rot[i][2] = base_r[2][i]\n\n # Qui estraggo la terna di Tait-Bryan angles usata internamente, la Z1Y2X3\n k17 = mat_rot[2][0]\n k16 = mat_rot[1][0]\n l17 = mat_rot[2][1]\n m20 = math.asin(k17)\n i23 = math.cos(m20)\n i24 = k16 / i23\n i25 = l17 / i23\n m19 = math.asin(i24)\n self.zyx1_r = m19 + roof_motor_position\n self.zyx2_r = math.asin(k17)\n self.zyx3_r = math.asin(i25)\n self.zyx3 = self.zyx3_r / Kinematic.M_TO_RAD\n self.zyx2 = self.zyx2_r / Kinematic.M_TO_RAD\n self.zyx1 = self.zyx1_r / Kinematic.M_TO_RAD\n angles = self.zyx_r_to_xyz(self.zyx3_r, self.zyx2_r, self.zyx1_r)\n self.xyz1 = angles[2]\n self.xyz2 = angles[0]\n self.xyz3 = angles[1]\n self.xyz1_r = angles[5]\n self.xyz2_r = angles[3]\n self.xyz3_r = angles[4]", "def mapr(r):\n return np.rad2deg(np.arctan(r)*2)", "def _get_rotated_coords(x, y, PA):\n x_rot = y * np.cos(np.radians(PA)) + x * np.sin(np.radians(PA))\n y_rot = x * np.cos(np.radians(PA)) - y * np.sin(np.radians(PA))\n return x_rot, y_rot", "def polar_to_cartesian(r, theta):\n\n x = r * cos(theta)\n y = r * sin(theta)\n\n return x, y", "def polarizer(px,py,angle=0):\n M = np.array([[px,0],[0,py]])\n if angle != 0:\n return Jones.rotate(M,angle)\n else:\n return M", "def pos_rot_arm(arm, nparrays=False):\n return pos_rot_cpos(arm.get_current_cartesian_position(), nparrays)", "def polar_to_xy(r, theta):\r\n x = r*np.cos(theta)\r\n y = r*np.sin(theta)\r\n return x, y", "def rotation_matrix(axis, theta):\n axis = np.asarray(axis)\n axis = axis / math.sqrt(np.dot(axis, axis))\n a = math.cos(theta / 2.0)\n b, c, d = -axis * math.sin(theta / 2.0)\n aa, bb, cc, dd = a * a, b * b, c * c, d * d\n bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d\n\n R = np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],\n [2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],\n [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])\n\n T = np.identity(4)\n T[:3, :3] = R\n return T", "def transform_to(self, coordinates) -> np.ndarray:\n coor = self.T @ (coordinates - self.O)\n r = (coor[0] ** 2 + coor[1] ** 2) ** 0.5\n phi = atan2(coor[1], coor[0])\n z = coor[2]\n return np.array([r, phi, z], dtype=float)", "def polar2cartesian(phi, r):\n phi_radians = radians(phi)\n x = r*cos(phi_radians)\n y = r*sin(phi_radians)\n return x, y", "def to_cartesian(r, phi):\n x = r*np.cos(phi)\n y = r*np.sin(phi)\n return x, y", "def transform_from_rot_trans(R, t):\n R = R.reshape(3, 3)\n t = t.reshape(3, 1)\n return np.vstack((np.hstack([R, t]), [0, 0, 0, 1]))", "def transform_from_rot_trans(R, t):\n R = R.reshape(3, 3)\n t = t.reshape(3, 1)\n return np.vstack((np.hstack([R, t]), [0, 0, 0, 1]))", "def get_Grotations(self, x):\n xsh = x.get_shape().as_list()\n angles = [0.,np.pi/2.,np.pi,3.*np.pi/2.]\n rx = []\n for i in range(4):\n # Z4 rotations about the z axis\n perm = [1,0,2,3]\n y = tf.transpose(x, perm=perm)\n y = tf.contrib.image.rotate(y, angles[i])\n y = tf.transpose(y, perm=perm)\n # Rotations in the quotient space (sphere S^2)\n # i) Z4 rotations about y axis\n for j in range(4):\n perm = [2,1,0,3]\n z = tf.transpose(y, perm=perm)\n z = tf.contrib.image.rotate(z, angles[-j])\n z = tf.transpose(z, perm=perm)\n \n rx.append(z)\n # ii) 2 rotations to the poles about the x axis\n perm = [0,2,1,3]\n z = tf.transpose(y, perm=perm)\n z = tf.contrib.image.rotate(z, angles[3])\n z = tf.transpose(z, perm=perm)\n rx.append(z)\n\n z = tf.transpose(y, perm=perm)\n z = tf.contrib.image.rotate(z, angles[1])\n z = tf.transpose(z, perm=perm)\n rx.append(z)\n\n return rx", "def rotation_matrix(self, rotation, rotation_order=\"zyx\"):\n x = math.radians(rotation[0])\n y = math.radians(rotation[1])\n z = math.radians(rotation[2])\n\n cos = math.cos\n sin = math.sin\n if rotation_order == 'zyx':\n index_0 = cos(y) * cos(z)\n index_1 = cos(z) * sin(x) * sin(y) - cos(x) * sin(z)\n index_2 = cos(x) * cos(z) * sin(y) + sin(x) * sin(z)\n\n index_3 = cos(y) * sin(z)\n index_4 = cos(x) * cos(z) + sin(x) * sin(y) * sin(z)\n index_5 = -cos(z) * sin(x) + cos(x) * sin(y) * sin(z)\n\n index_6 = -sin(y)\n index_7 = -cos(y) * sin(x)\n index_8 = cos(x) * cos(y)\n elif rotation_order == 'xyz':\n index_0 = cos(y) * cos(z)\n index_1 = -cos(z) * sin(z)\n index_2 = sin(y)\n\n index_3 = cos(x) * sin(z) + sin(x) * sin(y) * cos(z)\n index_4 = cos(x) * cos(z) - sin(x) * sin(y) * sin(z)\n index_5 = -sin(x) * cos(y)\n\n index_6 = sin(x) * sin(z) - cos(x) * sin(y) * cos(z)\n index_7 = sin(x) * cos(z) + cos(x) * sin(y) * sin(z)\n index_8 = cos(x) * cos(y)\n\n rot_mat = ((index_0, index_1, index_2),\n (index_3, index_4, index_5),\n (index_6, index_7, index_8))\n\n return rot_mat", "def rot(wx, wy, order, dist):\n for _ in range(dist//90):\n if order == \"R\":\n wx, wy = wy, -wx\n elif order == \"L\":\n wx, wy = -wy, wx\n return wx, wy", "def get_rotation(self) -> np.array:\n axis = self.get_arms()[1]\n force = [self.d_x, self.d_y] # \"Force applied on the arm\"\n o_m = [self.target.x_obj - axis.x_obj, self.target.y_obj - axis.y_obj]\n torque = o_m[0]*force[1] - o_m[1] * force[0] # OM vectorial F\n if torque == 1: # Anti clockwise rotation\n rotation = np.array([[0, -1], [1, 0]])\n if torque == -1: # Clockwise rotation\n rotation = np.array([[0, 1], [-1, 0]])\n if torque == 0: # No rotation\n rotation = np.array([[0, 0], [0, 0]])\n return rotation", "def inv_rotation_matrix(self):\n return np.linalg.inv(self._rotation_matrix).tolist()", "def matrix(self):\n return self._rotation", "def rotator(angle):\n c = np.cos(angle)\n s = np.sin(angle)\n return np.array([[c,-s],[s,c]])", "def rotation_matrix(self):\n self._normalise()\n product_matrix = np.dot(self._q_matrix(), self._q_bar_matrix().conj().transpose())\n return product_matrix[1:][:,1:]", "def generate_rotation_matrix(x_angle, y_angle, z_angle):\n return np.array([\n [1, 0, 0],\n [0, np.cos(x_angle), -np.sin(x_angle)],\n [0, np.sin(x_angle), np.cos(x_angle)],\n ]).dot([\n [np.cos(y_angle), 0, np.sin(y_angle)],\n [0, 1, 0],\n [-np.sin(y_angle), 0, np.cos(y_angle)],\n ]).dot([\n [np.cos(z_angle), -np.sin(z_angle), 0],\n [np.sin(z_angle), np.cos(z_angle), 0],\n [0, 0, 1],\n ]).tolist()", "def rotate(xy, theta):\n sin_theta, cos_theta = sin(theta), cos(theta)\n R = np.array([[cos_theta, -sin_theta], [sin_theta, cos_theta]])\n return np.dot(R, xy)", "def rotate(X):\n return X", "def cartesian_decoder(coord, r_E=6371):\n def _to_deg(rad):\n return rad * 180. / np.pi\n\n x, y, z = coord[:, 0], coord[:, 1], coord[:, 2]\n\n theta = np.arcsin(z / r_E)\n phi = np.arctan(y / x)\n\n # Convert to degrees. Longitudes, are bound between -90;90 in decode step, so correct in 3 and 4th quadrant of x-y plane (Asia)\n lat = _to_deg(theta)\n lon = _to_deg(phi) - 180 * ((x < 0) * (y < 0)) + 180 * ((x < 0) * (y > 0))\n\n return np.concatenate([lat.reshape(-1, 1), lon.reshape(-1, 1)], axis=1)", "def rotations(node):\n if 'rotate' in node:\n original_rotate = [\n float(i) for i in normalize(node['rotate']).strip().split(' ')]\n return original_rotate\n return []", "def transform_from_rot_trans(R, t):\n R = R.reshape(3, 3)\n t = t.reshape(3, 1)\n return np.vstack((np.hstack([R, t]), [0, 0, 0, 1]))", "def get_circle_coords(center, r):\n circle = [[r, 180* phi/3.14159265] for phi in range(0, 180, 5)]\n circle = [pol2cart(p[0], p[1]) + (center[0], center[1]) for p in circle]\n return circle", "def get_roi_coords(self):\n return [group.get_roi_coords() for group in self.roi_groups]" ]
[ "0.72974885", "0.7189403", "0.7180745", "0.6643963", "0.6516557", "0.65161246", "0.6515442", "0.65101385", "0.64101136", "0.6373137", "0.63658345", "0.63429064", "0.6326192", "0.6319467", "0.6302939", "0.6279795", "0.62754506", "0.6256248", "0.622159", "0.6209498", "0.6200936", "0.61915994", "0.61461234", "0.6141616", "0.6137323", "0.60741127", "0.6000117", "0.59961087", "0.5994815", "0.59808135", "0.59695095", "0.59530944", "0.5951366", "0.5947218", "0.59457433", "0.59428066", "0.593436", "0.59292436", "0.59237003", "0.590576", "0.5899659", "0.5896234", "0.5895511", "0.58865994", "0.5886266", "0.58686846", "0.58599436", "0.58513355", "0.584551", "0.5838473", "0.5825219", "0.58083695", "0.5802514", "0.5801001", "0.58008105", "0.57986045", "0.5793601", "0.5788012", "0.57861084", "0.5785941", "0.5785024", "0.57835037", "0.5776651", "0.57697034", "0.57631564", "0.5760034", "0.57574236", "0.5738439", "0.5736239", "0.5729548", "0.57278776", "0.5724266", "0.571046", "0.56964904", "0.5686578", "0.5677959", "0.567429", "0.56724244", "0.5667683", "0.5658428", "0.56454164", "0.5630602", "0.56208843", "0.56208843", "0.5614333", "0.56139386", "0.5611972", "0.56110686", "0.560803", "0.5601291", "0.5596179", "0.5595524", "0.55918723", "0.5591584", "0.5588395", "0.5578647", "0.55775666", "0.5575146", "0.55732554", "0.5566654" ]
0.7185248
2
Returns the rotation matrix that rotates a vector around the given axis by the given angle using the "EulerRodrigues formula".
def get_3drotation_matrix(axis, angle): angle = angle #*-1 norm = np.linalg.norm(np.array(axis)) if norm > 0: axis /= norm ax, ay, az = axis[0], axis[1], axis[2] cos, sin = np.cos(angle), np.sin(angle) rotmat = np.array([[cos + ax * ax * (1 - cos), ax * ay * (1 - cos) - az * sin, ax * az * (1 - cos) + ay * sin], [ay * ax * (1 - cos) + az * sin, cos + ay * ay * (1 - cos), ay * az * (1 - cos) - ax * sin], [az * ax * (1 - cos) - ay * sin, az * ay * (1 - cos) + ax * sin, cos + az * az * (1 - cos)]]) return rotmat
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rotateEuler(axis, angle):\n if(axis == 'Z'):\n return np.array([[cos(angle), -sin(angle),0,0],[sin(angle), cos(angle),0,0],[0,0,1,0],[0,0,0,1]])\n if(axis == 'Y'):\n return np.array([[cos(angle),0,sin(angle),0],[0,1,0,0],[-sin(angle),0,cos(angle),0],[0,0,0,1]])\n if(axis == 'X'):\n return np.array([[1,0,0,0],[0,cos(angle), -sin(angle),0],[0,sin(angle), cos(angle),0],[0,0,0,1]])", "def R_axis_angle(axis, angle):\n\n # Trig factors.\n ca = math.cos(angle)\n sa = math.sin(angle)\n C = 1 - ca\n\n # Depack the axis.\n x, y, z = axis\n\n # Multiplications (to remove duplicate calculations).\n xs = x * sa\n ys = y * sa\n zs = z * sa\n xC = x * C\n yC = y * C\n zC = z * C\n xyC = x * yC\n yzC = y * zC\n zxC = z * xC\n\n # Update the rotation matrix.\n matrix = np.zeros((3, 3))\n matrix[0, 0] = x * xC + ca\n matrix[0, 1] = xyC - zs\n matrix[0, 2] = zxC + ys\n matrix[1, 0] = xyC + zs\n matrix[1, 1] = y * yC + ca\n matrix[1, 2] = yzC - xs\n matrix[2, 0] = zxC - ys\n matrix[2, 1] = yzC + xs\n matrix[2, 2] = z * zC + ca\n return matrix", "def rotateEuler(self,axis, angle):\n if(axis == 'Z'):\n return np.array([[cos(angle), -sin(angle),0,0],[sin(angle), cos(angle),0,0],[0,0,1,0],[0,0,0,1]])\n if(axis == 'Y'):\n return np.array([[cos(angle),0,sin(angle),0],[0,1,0,0],[-sin(angle),0,cos(angle),0],[0,0,0,1]])\n if(axis == 'X'):\n return np.array([[1,0,0,0],[0,cos(angle), -sin(angle),0],[0,sin(angle), cos(angle),0],[0,0,0,1]])", "def axis_angle_rm(axis=np.array([1, 0, 0]), angle=-1.57):\n c = math.cos(angle)\n s = math.sin(angle)\n t = 1 - c\n x, y, z = axis[0], axis[1], axis[2]\n rotation_matrix = np.array(\n [\n [t*x*x + c, t*x*y - z*s, t*x*z + y*s],\n [t*x*y + z*s, t*y*y + c, t*y*z - x*s],\n [t*x*z - y*s, t*y*z + x*s, t*z*z + c]\n ])\n return rotation_matrix", "def rotation_matrix_arbitrary_axis(angle, axis):\n axis = normalize_vector(axis)\n\n a = np.cos(angle / 2)\n b, c, d = axis * np.sin(angle / 2)\n\n e11 = np.square(a) + np.square(b) - np.square(c) - np.square(d)\n e12 = 2 * (b * c - a * d)\n e13 = 2 * (b * d + a * c)\n\n e21 = 2 * (b * c + a * d)\n e22 = np.square(a) + np.square(c) - np.square(b) - np.square(d)\n e23 = 2 * (c * d - a * b)\n\n e31 = 2 * (b * d - a * c)\n e32 = 2 * (c * d + a * b)\n e33 = np.square(a) + np.square(d) - np.square(b) - np.square(c)\n\n return np.array([[e11, e12, e13], [e21, e22, e23], [e31, e32, e33]])", "def rotation_matrix(angle, axis):\n about_z = rotation_about_z(angle)\n z_to_axis = z_to_vector(axis)\n axis_to_z = np.linalg.inv(z_to_axis)\n return reduce(np.dot, [z_to_axis, about_z, axis_to_z])", "def angle_axis_to_rotation_matrix(angle_axis):\n def _compute_rotation_matrix(angle_axis, theta2, eps=1e-6):\n # We want to be careful to only evaluate the square root if the\n # norm of the angle_axis vector is greater than zero. Otherwise\n # we get a division by zero.\n k_one = 1.0\n theta = torch.sqrt(theta2)\n wxyz = angle_axis / (theta + eps)\n wx, wy, wz = torch.chunk(wxyz, 3, dim=1)\n cos_theta = torch.cos(theta)\n sin_theta = torch.sin(theta)\n\n r00 = cos_theta + wx * wx * (k_one - cos_theta)\n r10 = wz * sin_theta + wx * wy * (k_one - cos_theta)\n r20 = -wy * sin_theta + wx * wz * (k_one - cos_theta)\n r01 = wx * wy * (k_one - cos_theta) - wz * sin_theta\n r11 = cos_theta + wy * wy * (k_one - cos_theta)\n r21 = wx * sin_theta + wy * wz * (k_one - cos_theta)\n r02 = wy * sin_theta + wx * wz * (k_one - cos_theta)\n r12 = -wx * sin_theta + wy * wz * (k_one - cos_theta)\n r22 = cos_theta + wz * wz * (k_one - cos_theta)\n rotation_matrix = torch.cat(\n [r00, r01, r02, r10, r11, r12, r20, r21, r22], dim=1)\n return rotation_matrix.view(-1, 3, 3)\n\n def _compute_rotation_matrix_taylor(angle_axis):\n rx, ry, rz = torch.chunk(angle_axis, 3, dim=1)\n k_one = torch.ones_like(rx)\n rotation_matrix = torch.cat(\n [k_one, -rz, ry, rz, k_one, -rx, -ry, rx, k_one], dim=1)\n return rotation_matrix.view(-1, 3, 3)\n\n # stolen from ceres/rotation.h\n\n _angle_axis = torch.unsqueeze(angle_axis, dim=1)\n theta2 = torch.matmul(_angle_axis, _angle_axis.transpose(1, 2))\n theta2 = torch.squeeze(theta2, dim=1)\n\n # compute rotation matrices\n rotation_matrix_normal = _compute_rotation_matrix(angle_axis, theta2)\n rotation_matrix_taylor = _compute_rotation_matrix_taylor(angle_axis)\n\n # create mask to handle both cases\n eps = 1e-6\n mask = (theta2 > eps).view(-1, 1, 1).to(theta2.device)\n mask_pos = (mask).type_as(theta2)\n mask_neg = (mask == False).type_as(theta2) # noqa\n\n # create output pose matrix\n batch_size = angle_axis.shape[0]\n rotation_matrix = torch.eye(4).to(angle_axis.device).type_as(angle_axis)\n rotation_matrix = rotation_matrix.view(1, 4, 4).repeat(batch_size, 1, 1)\n # fill output matrix with masked values\n rotation_matrix[..., :3, :3] = \\\n mask_pos * rotation_matrix_normal + mask_neg * rotation_matrix_taylor\n return rotation_matrix # Nx4x4", "def _rot(axis, angle):\n if axis == 1:\n return Matrix([[1, 0, 0],\n [0, cos(angle), -sin(angle)],\n [0, sin(angle), cos(angle)]])\n elif axis == 2:\n return Matrix([[cos(angle), 0, sin(angle)],\n [0, 1, 0],\n [-sin(angle), 0, cos(angle)]])\n elif axis == 3:\n return Matrix([[cos(angle), -sin(angle), 0],\n [sin(angle), cos(angle), 0],\n [0, 0, 1]])", "def rotation_matrix( axis, angle ):\n\n # Trig factors.\n ca = cos(angle)\n sa = sin(angle)\n C = 1 - ca\n\n # Depack the axis.\n x, y, z = tuple( axis )\n\n # Multiplications (to remove duplicate calculations).\n xs = x*sa\n ys = y*sa\n zs = z*sa\n xC = x*C\n yC = y*C\n zC = z*C\n xyC = x*yC\n yzC = y*zC\n zxC = z*xC\n\n # Update the rotation matrix.\n matrix \t = np.zeros( (3,3) )\n matrix[0, 0] = x*xC + ca\n matrix[0, 1] = xyC - zs\n matrix[0, 2] = zxC + ys\n matrix[1, 0] = xyC + zs\n matrix[1, 1] = y*yC + ca\n matrix[1, 2] = yzC - xs\n matrix[2, 0] = zxC - ys\n matrix[2, 1] = yzC + xs\n matrix[2, 2] = z*zC + ca\n return matrix", "def construct_euler_rodriguez_matrix(self, axis, theta):\n axis = numpy.asarray(axis)\n axis = axis / math.sqrt(numpy.dot(axis, axis))\n a = math.cos(theta / 2.0)\n b, c, d = -axis * math.sin(theta / 2.0)\n aa, bb, cc, dd = a * a, b * b, c * c, d * d\n bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d\n return numpy.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],\n [2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],\n [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])", "def rotmat(axis, angle):\n mat = np.eye(3)\n if angle is None or np.isclose(angle, 0.0):\n return mat\n cang = np.cos(angle*radians)\n sang = np.sin(angle*radians)\n if axis == 1:\n mat = np.array(((1, 0, 0), (0, cang, -sang), (0, sang, cang)))\n elif axis == 2:\n mat = np.array(((cang, 0, sang), (0, 1, 0), (-sang, 0, cang)))\n else:\n mat = np.array(((cang, -sang, 0), (sang, cang, 0), (0, 0, 1)))\n return np.matrix(mat)", "def rotation(axis, angle):\n axis = np.asarray(axis)\n try:\n angle = angle[:,None]\n except:\n pass\n return np.hstack([np.asarray(axis)*np.sin(angle/2.),np.cos(angle/2.)])", "def rotation_matrix_3x3_axis(angle, axis):\n assert axis.lower() in ['x','y','z']\n assert -180.0 <= angle <= 180.0\n angle_r = angle * (np.pi / 180.0)\n sa = np.sin(angle_r)\n ca = np.cos(angle_r)\n\n if axis == 'x':\n R = np.array([ [1, 0, 0],\n [0, ca, -sa],\n [0, sa, ca],\n ])\n elif axis == 'y':\n R = np.array([ [ca, 0, sa],\n [0, 1, 0],\n [-sa, 0, ca],\n ])\n elif axis == 'z':\n R = np.array([ [ca, -sa, 0],\n [sa, ca, 0],\n [0, 0, 1],\n ])\n return R", "def rotation(self, angle, axis):\r\n\r\n sqr_a = axis.x*axis.x\r\n sqr_b = axis.y*axis.y\r\n sqr_c = axis.z*axis.z\r\n len2 = sqr_a+sqr_b+sqr_c\r\n\r\n k2 = math.cos(angle)\r\n k1 = (1.0-k2)/len2\r\n k3 = math.sin(angle)/math.sqrt(len2)\r\n k1ab = k1*axis.x*axis.y\r\n k1ac = k1*axis.x*axis.z\r\n k1bc = k1*axis.y*axis.z\r\n k3a = k3*axis.x\r\n k3b = k3*axis.y\r\n k3c = k3*axis.z\r\n\r\n return mat4( k1*sqr_a+k2, k1ab-k3c, k1ac+k3b, 0.0,\r\n k1ab+k3c, k1*sqr_b+k2, k1bc-k3a, 0.0,\r\n k1ac-k3b, k1bc+k3a, k1*sqr_c+k2, 0.0,\r\n 0.0, 0.0, 0.0, 1.0)", "def rotate_around_axis(vec, axis, angle):\n\n axis = normalise(axis)\n a = cos(angle / 2.0)\n b, c, d = -axis * sin(angle / 2.0)\n aa, bb, cc, dd = a * a, b * b, c * c, d * d\n bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d\n rot_matrix = np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],\n [2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],\n [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])\n return vec.dot(rot_matrix)", "def rotation(theta, axis):\n axis = np.asarray(axis)\n axis = axis/math.sqrt(np.dot(axis, axis))\n a = math.cos(theta/2.0)\n b, c, d = -axis*math.sin(theta/2.0)\n aa, bb, cc, dd = a*a, b*b, c*c, d*d\n bc, ad, ac, ab, bd, cd = b*c, a*d, a*c, a*b, b*d, c*d\n return np.array([[aa+bb-cc-dd, 2*(bc+ad), 2*(bd-ac)],\n [2*(bc-ad), aa+cc-bb-dd, 2*(cd+ab)],\n [2*(bd+ac), 2*(cd-ab), aa+dd-bb-cc]])", "def homog_rot_mtx(angle_rads: float, axis: str) -> numpy.array:\n cosang = numpy.cos(angle_rads)\n sinang = numpy.sin(angle_rads)\n\n if \"z\" == axis:\n return numpy.array(\n (\n (cosang, -sinang, 0, 0),\n (sinang, cosang, 0, 0),\n (0, 0, 1, 0),\n (0, 0, 0, 1),\n ),\n dtype=numpy.float64,\n )\n elif \"y\" == axis:\n return numpy.array(\n (\n (cosang, 0, sinang, 0),\n (0, 1, 0, 0),\n (-sinang, 0, cosang, 0),\n (0, 0, 0, 1),\n ),\n dtype=numpy.float64,\n )\n else:\n return numpy.array(\n (\n (1, 0, 0, 0),\n (0, cosang, -sinang, 0),\n (0, sinang, cosang, 0),\n (0, 0, 0, 1),\n ),\n dtype=numpy.float64,\n )", "def axis2rotmat(axis):\n return quat2rotmat(axis2quat(axis))", "def rotate_vector ( angle, axis, old ):\n\n import numpy as np\n \n # Note that the axis vector should be normalized and we test for this\n # In general, the old vector need not be normalized, and the same goes for the result\n # although quite often in our applications they will be\n\n assert old.size == 3, 'Incorrect size of old'\n assert axis.size == 3, 'Incorrect size of axis'\n assert np.isclose(np.sum(axis**2),1.0), 'Non-unit vector {} {} {}'.format(*axis)\n\n c = np.cos ( angle )\n s = np.sin ( angle )\n proj = np.dot ( axis, old ) # The two vectors need not be perpendicular\n\n # Standard (Goldstein) rotation formula\n e = c * old + ( 1.0 - c ) * proj * axis + s * np.cross ( axis, old )\n\n return e", "def get_rotation_matrix(axis, theta):\n\n axis = np.array(list(axis))\n axis = axis / np.linalg.norm(axis)\n axis *= -np.sin(theta/2.0)\n a = np.cos(theta/2.0)\n b, c, d = tuple(axis.tolist())\n aa, bb, cc, dd = a*a, b*b, c*c, d*d\n bc, ad, ac, ab, bd, cd = b*c, a*d, a*c, a*b, b*d, c*d\n return np.array([[aa+bb-cc-dd, 2*(bc+ad), 2*(bd-ac)],\n [2*(bc-ad), aa+cc-bb-dd, 2*(cd+ab)],\n [2*(bd+ac), 2*(cd-ab), aa+dd-bb-cc]])", "def get_rotation_matrix(axis, theta):\n axis = np.asarray(axis)\n axis = axis / math.sqrt(np.dot(axis, axis))\n a = math.cos(theta / 2.0)\n b, c, d = -axis * math.sin(theta / 2.0)\n aa, bb, cc, dd = a * a, b * b, c * c, d * d\n bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d\n return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],\n [2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],\n [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])", "def rotation_matrix(theta, axis=None):\n if axis is None:\n axis = [0, 0, 1]\n axis = np.asarray(axis)\n axis = axis/math.sqrt(np.dot(axis, axis))\n a = math.cos(theta/2.0)\n b, c, d = -axis*math.sin(theta/2.0)\n aa, bb, cc, dd = a*a, b*b, c*c, d*d\n bc, ad, ac, ab, bd, cd = b*c, a*d, a*c, a*b, b*d, c*d\n return np.array([[aa+bb-cc-dd, 2*(bc+ad), 2*(bd-ac)],\n [2*(bc-ad), aa+cc-bb-dd, 2*(cd+ab)],\n [2*(bd+ac), 2*(cd-ab), aa+dd-bb-cc]])", "def rotation_matrix(axis, theta):\n axis = np.asarray(axis)\n axis = axis / math.sqrt(np.dot(axis, axis))\n a = math.cos(theta / 2.0)\n b, c, d = -axis * math.sin(theta / 2.0)\n aa, bb, cc, dd = a * a, b * b, c * c, d * d\n bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d\n\n R = np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],\n [2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],\n [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])\n\n T = np.identity(4)\n T[:3, :3] = R\n return T", "def rotate (vect, angle, axis):\n\n cosine = np.cos (angle)\n sine = np.sin (angle)\n\n return (vect * cosine + \\\n sine * np.cross (axis, vect) + \\\n np.dot (axis, vect) * (1 - cosine) * axis)", "def _get_rotation_matrix(axis, theta):\n\n #import math\n axis = np.asarray(axis)\n theta = np.asarray(theta)\n axis = axis/np.sqrt(np.dot(axis, axis))\n a = np.cos(theta/2)\n b, c, d = -axis*np.sin(theta/2)\n aa, bb, cc, dd = a*a, b*b, c*c, d*d\n bc, ad, ac, ab, bd, cd = b*c, a*d, a*c, a*b, b*d, c*d\n return np.array([[aa+bb-cc-dd, 2*(bc+ad), 2*(bd-ac)],\n [2*(bc-ad), aa+cc-bb-dd, 2*(cd+ab)],\n [2*(bd+ac), 2*(cd-ab), aa+dd-bb-cc]])", "def rotateAroundAxis(self, rotation_axis, angle):\n # For the mathematics look for: Rodrigues rotation formula.\n # http://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula\n unit_rotation_axis = rotation_axis.getNormalizedVector()\n\n rotated_vector = self.scalarMultiplication(np.cos(angle))\n\n tmp_vector = unit_rotation_axis.crossProduct(self)\n tmp_vector = tmp_vector.scalarMultiplication(np.sin(angle))\n rotated_vector = rotated_vector.addVector(tmp_vector)\n\n scalar_factor = self.scalarProduct(unit_rotation_axis) * (1.0 - np.cos(angle))\n tmp_vector = unit_rotation_axis.scalarMultiplication(scalar_factor)\n rotated_vector = rotated_vector.addVector(tmp_vector)\n\n return rotated_vector", "def axis_angle(cls, axis: Union[tuple, Vector], angle: Number):\n if isinstance(axis, (list, tuple)):\n assert(len(axis) == 3)\n axis = Vector(*axis)\n\n assert(isinstance(axis, Vector))\n\n K = Matrix(3, 3)\n\n axis.normalize()\n\n x = axis[0, 0]\n y = axis[1, 0]\n z = axis[2, 0]\n\n K[0, 1] = -z\n K[0, 2] = y\n K[1, 2] = -x\n\n K[1, 0] = z\n K[2, 0] = -y\n K[2, 1] = x\n\n c = np.cos(angle)\n s = np.sin(angle)\n\n I = Matrix.identity(3)\n\n rot = I + (s * I + (1 - c) * K) * K\n\n return cls(rot)", "def rotation_matrix(axis,theta):\n\taxis = np.asarray(axis)\n\ttheta = np.asarray(theta)\n\tif np.all(axis==0): return np.identity(3) \n\taxis = axis/np.sqrt(np.dot(axis,axis))\n\ta = np.cos(theta/2)\n\tb, c, d = -axis*np.sin(theta/2)\n\taa, bb, cc, dd = a*a, b*b, c*c, d*d\n\tbc, ad, ac, ab, bd, cd = b*c, a*d, a*c, a*b, b*d, c*d\n\treturn np.array([[aa+bb-cc-dd,2*(bc+ad),2*(bd-ac)],[2*(bc-ad),aa+cc-bb-dd,2*(cd+ab)],\n\t\t[2*(bd+ac),2*(cd-ab),aa+dd-bb-cc]])", "def rot_from_axisangle(vec):\r\n angle = torch.norm(vec, 2, 2, True)\r\n axis = vec / (angle + 1e-7)\r\n\r\n ca = torch.cos(angle)\r\n sa = torch.sin(angle)\r\n C = 1 - ca\r\n\r\n x = axis[..., 0].unsqueeze(1)\r\n y = axis[..., 1].unsqueeze(1)\r\n z = axis[..., 2].unsqueeze(1)\r\n\r\n xs = x * sa\r\n ys = y * sa\r\n zs = z * sa\r\n xC = x * C\r\n yC = y * C\r\n zC = z * C\r\n xyC = x * yC\r\n yzC = y * zC\r\n zxC = z * xC\r\n\r\n rot = torch.zeros((vec.shape[0], 4, 4)).to(device=vec.device)\r\n\r\n rot[:, 0, 0] = torch.squeeze(x * xC + ca)\r\n rot[:, 0, 1] = torch.squeeze(xyC - zs)\r\n rot[:, 0, 2] = torch.squeeze(zxC + ys)\r\n rot[:, 1, 0] = torch.squeeze(xyC + zs)\r\n rot[:, 1, 1] = torch.squeeze(y * yC + ca)\r\n rot[:, 1, 2] = torch.squeeze(yzC - xs)\r\n rot[:, 2, 0] = torch.squeeze(zxC - ys)\r\n rot[:, 2, 1] = torch.squeeze(yzC + xs)\r\n rot[:, 2, 2] = torch.squeeze(z * zC + ca)\r\n rot[:, 3, 3] = 1\r\n\r\n return rot", "def rotation_matrix(axis, theta): \n \n import numpy as np\n import math\n\n axis = np.asarray(axis)\n theta = np.asarray(theta)\n axis = axis/math.sqrt(np.dot(axis, axis))\n a = math.cos(theta/2)\n b, c, d = -axis*math.sin(theta/2)\n aa, bb, cc, dd = a*a, b*b, c*c, d*d\n bc, ad, ac, ab, bd, cd = b*c, a*d, a*c, a*b, b*d, c*d\n return np.array([[aa+bb-cc-dd, 2*(bc+ad), 2*(bd-ac)],\n [2*(bc-ad), aa+cc-bb-dd, 2*(cd+ab)],\n [2*(bd+ac), 2*(cd-ab), aa+dd-bb-cc]])", "def rotation_matrix(axis, theta):\n\taxis = np.asarray(axis)\n\taxis = axis / np.sqrt(np.dot(axis, axis))\n\ta = np.cos(theta / 2.0)\n\tb, c, d = -axis * np.sin(theta / 2.0)\n\taa, bb, cc, dd = a * a, b * b, c * c, d * d\n\tbc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d\n\treturn np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],\n\t\t\t\t\t [2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],\n\t\t\t\t\t [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])", "def rotation_matrix(axis, theta):\n axis = np.asarray(axis)\n axis = axis / math.sqrt(np.dot(axis, axis))\n a = math.cos(theta / 2.0)\n b, c, d = -axis * math.sin(theta / 2.0)\n aa, bb, cc, dd = a * a, b * b, c * c, d * d\n bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d\n return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],\n [2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],\n [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])", "def rotation_matrix(axis, theta):\n axis = np.asarray(axis)\n axis = axis / math.sqrt(np.dot(axis, axis))\n a = math.cos(theta / 2.0)\n b, c, d = -axis * math.sin(theta / 2.0)\n aa, bb, cc, dd = a * a, b * b, c * c, d * d\n bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d\n return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],\n [2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],\n [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])", "def rotation_matrix(axis, theta):\n axis = np.asarray(axis)\n axis = axis / math.sqrt(np.dot(axis, axis))\n a = math.cos(theta / 2.0)\n b, c, d = -axis * math.sin(theta / 2.0)\n aa, bb, cc, dd = a * a, b * b, c * c, d * d\n bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d\n return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],\n [2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],\n [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])", "def rotation_axis_matrix(phi: numbers.Real, axis: int):\n\n if axis == 0:\n return [[1, 0, 0, 0],\n [0, cos(phi), sin(phi), 0],\n [0, sin(phi), cos(phi), 0],\n [0, 0, 0, 1]]\n elif axis == 1:\n return [[cos(phi), 0, sin(phi), 0],\n [0, 1, 0, 0],\n [-sin(phi), 0, cos(phi), 0],\n [0, 0, 0, 1]]\n elif axis == 2:\n return [[cos(phi), -sin(phi), 0, 0],\n [sin(phi), cos(phi), 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]]\n else:\n raise ValueError(\"only 3d space coordinates as homogeneous vectors are supported\")", "def rotation_matrix(axis, theta):\n axis = np.asarray(axis)\n axis = axis / np.sqrt(np.dot(axis, axis))\n a = np.cos(theta / 2.0)\n b, c, d = -axis * np.sin(theta / 2.0)\n aa, bb, cc, dd = a * a, b * b, c * c, d * d\n bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d\n return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],\n [2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],\n [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])", "def rotation_matrix(axis, theta):\n axis = np.asarray(axis)\n axis = axis/math.sqrt(np.dot(axis, axis))\n a = math.cos(theta/2.0)\n b, c, d = -axis*math.sin(theta/2.0)\n aa, bb, cc, dd = a*a, b*b, c*c, d*d\n bc, ad, ac, ab, bd, cd = b*c, a*d, a*c, a*b, b*d, c*d\n return np.array([[aa+bb-cc-dd, 2*(bc+ad), 2*(bd-ac)],\n [2*(bc-ad), aa+cc-bb-dd, 2*(cd+ab)],\n [2*(bd+ac), 2*(cd-ab), aa+dd-bb-cc]])", "def rotation_around_axis(self,axis,angle,**kwargs):\n xyz = self.get('x,y,z',**kwargs)\n\n # get the data\n ct,st = np.cos(angle),np.sin(angle)\n ux,uy,uz = axis\n\n # get the center of the molecule\n xyz0 = np.mean(xyz,0)\n\n # definition of the rotation matrix\n # see https://en.wikipedia.org/wiki/Rotation_matrix\n rot_mat = np.array([\n [ct + ux**2*(1-ct), ux*uy*(1-ct) - uz*st, ux*uz*(1-ct) + uy*st],\n [uy*ux*(1-ct) + uz*st, ct + uy**2*(1-ct), uy*uz*(1-ct) - ux*st],\n [uz*ux*(1-ct) - uy*st, uz*uy*(1-ct) + ux*st, ct + uz**2*(1-ct) ]])\n\n # apply the rotation\n xyz = np.dot(rot_mat,(xyz-xyz0).T).T + xyz0\n self.update('x,y,z',xyz,**kwargs)\n\n return xyz0", "def rotateMatrix_by_axis(axis, theta):\n axis = np.asarray(axis)\n theta = np.asarray(theta)\n axis = unitVec(axis)\n a = cos(theta / 2)\n b, c, d = -axis * sin(theta / 2)\n aa, bb, cc, dd = a*a, b*b, c*c, d*d\n bc, ad, ac, ab, bd, cd = b*c, a*d, a*c, a*b, b*d, c*d\n return np.array([[aa+bb-cc-dd, 2*(bc+ad), 2*(bd-ac)],\n [2*(bc-ad), aa+cc-bb-dd, 2*(cd+ab)],\n [2*(bd+ac), 2*(cd-ab), aa+dd-bb-cc]])", "def rotate_matrix(axis, theta):\n if np.abs(axis).sum() < 1e-6 or np.abs(theta) < 1e-6:\n return np.eye(3)\n axis = np.asarray(axis)\n axis = axis / math.sqrt(np.dot(axis, axis))\n a = math.cos(theta / 2.0)\n b, c, d = -axis * math.sin(theta / 2.0)\n aa, bb, cc, dd = a * a, b * b, c * c, d * d\n bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d\n return np.array(\n [\n [aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],\n [2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],\n [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc],\n ]\n )", "def rotate(vector, angle):\n return np.cos(angle) * vector[0] + np.sin(angle) * vector[1], \\\n -np.sin(angle) * vector[0] + np.cos(angle) * vector[1]", "def Pivot(rotation, axis, angle):\n # Check for an invalid coordinate axis.\n if axis not in [0, 1, 2]:\n raise Error('Invalid axis {}. Must be [0, 1, 2].'.format(axis))\n\n radians = math.radians(angle)\n c = math.cos(radians)\n s = math.sin(radians)\n\n # We need to maintain the \"right-hand\" rule, no matter which\n # axis was selected. That means we pick (i, j, k) axis order\n # such that the following vector cross product is satisfied:\n # i x j = k\n i = (axis + 1) % 3\n j = (axis + 2) % 3\n k = axis\n\n rot = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\n\n rot[i][i] = c*rotation.rot[i][i] - s*rotation.rot[i][j]\n rot[i][j] = s*rotation.rot[i][i] + c*rotation.rot[i][j]\n rot[i][k] = rotation.rot[i][k]\n\n rot[j][i] = c*rotation.rot[j][i] - s*rotation.rot[j][j]\n rot[j][j] = s*rotation.rot[j][i] + c*rotation.rot[j][j]\n rot[j][k] = rotation.rot[j][k]\n\n rot[k][i] = c*rotation.rot[k][i] - s*rotation.rot[k][j]\n rot[k][j] = s*rotation.rot[k][i] + c*rotation.rot[k][j]\n rot[k][k] = rotation.rot[k][k]\n\n return RotationMatrix(rot)", "def rotation_matrix_xyz(axis, angle, angle_dim):\n assert angle_dim is \"deg\" or angle_dim is \"rad\"\n assert axis is \"x\" or axis is \"y\" or axis is \"z\"\n x = 0\n y = 0\n z = 0\n\n if angle_dim is \"deg\":\n a = np.deg2rad(angle)\n else:\n a = angle\n\n if axis is \"x\":\n x = 1\n y = 0\n z = 0\n if axis is \"y\":\n x = 0\n y = 1\n z = 0\n if axis is \"z\":\n x = 0\n y = 0\n z = 1\n\n s = np.sin(a)\n c = np.cos(a)\n rotation_matrix = np.array([[c + x ** 2 * (1 - c), x * y * (1 - c) - z * s, x * z * (1 - c) + y * s],\n [y * x * (1 - c) + z * s, c + y ** 2 * (1 - c), y * z * (1 - c) - x * s],\n [z * x * (1 - c) - y * s, z * y * (1 - c) + x * s, c + z ** 2 * (1 - c)]])\n\n return rotation_matrix", "def rotate_global(self, angle, axis=(0., 0., 1.)):\n self.rotation = aa2q(angle, glm.vec3(axis)) * self.rotation", "def _rotation_matrix(self, axis, angle):\n axis = axis/np.linalg.norm(axis)\n axis_squared = np.square(axis)\n cos_angle = np.cos(angle)\n sin_angle = np.sin(angle)\n rot_matrix_row_one = np.array([cos_angle+axis_squared[0]*(1-cos_angle),\n axis[0]*axis[1]*(1-cos_angle) - axis[2]*sin_angle,\n axis[0]*axis[2]*(1-cos_angle)+axis[1]*sin_angle])\n\n rot_matrix_row_two = np.array([axis[1]*axis[0]*(1-cos_angle)+axis[2]*sin_angle,\n cos_angle+axis_squared[1]*(1-cos_angle),\n axis[1]*axis[2]*(1-cos_angle) - axis[0]*sin_angle])\n\n rot_matrix_row_three = np.array([axis[2]*axis[0]*(1-cos_angle)-axis[1]*sin_angle,\n axis[2]*axis[1]*(1-cos_angle)+axis[0]*sin_angle,\n cos_angle+axis_squared[2]*(1-cos_angle)])\n\n rotation_matrix = np.array([rot_matrix_row_one, rot_matrix_row_two, rot_matrix_row_three])\n return rotation_matrix", "def rotate(self, axis, theta):\n v = Vector3(self) # ensure vector\n k = Vector3(axis.uv())\n return type(self)(\n cosd(theta) * v\n + sind(theta) * k.cross(v)\n + (1 - cosd(theta)) * k * (k.dot(v))\n )", "def rotdMat(angle, axis=0):\n if axis == 2:\n return np.array([[cosd(angle), -sind(angle), 0],\n [sind(angle), cosd(angle), 0], [0, 0, 1]])\n elif axis == 1:\n return np.array([[cosd(angle), 0, -sind(angle)],\n [0, 1, 0], [sind(angle), 0, cosd(angle)]])\n else:\n return np.array([[1, 0, 0], [0, cosd(angle), -sind(angle)],\n [0, sind(angle), cosd(angle)]])", "def angle_to_rotation_matrix(angle: torch.Tensor) -> torch.Tensor:\n ang_rad = deg2rad(angle)\n cos_a: torch.Tensor = torch.cos(ang_rad)\n sin_a: torch.Tensor = torch.sin(ang_rad)\n return torch.stack([cos_a, sin_a, -sin_a, cos_a], dim=-1).view(*angle.shape, 2, 2)", "def rotation_matrix3(axis, theta):\n R = np.eye(3)\n c = math.cos(theta)\n s = math.sin(theta)\n a1 = (axis + 1) % 3\n a2 = (axis + 2) % 3\n R[a1, a1] = c\n R[a1, a2] = -s\n R[a2, a1] = s\n R[a2, a2] = c\n return np.matrix(R)", "def rotation_matrix(angle) -> np.array:\n return np.array([\n [np.cos(angle), np.sin(angle)],\n [-np.sin(angle), np.cos(angle)]])", "def rotate_matrix(angle):\n c = np.cos(angle)\n s = np.sin(angle)\n return np.array([[c, s],\n [-s, c]])", "def quaternion_about_axis(angle, axis):\r\n q = numpy.array([0.0, axis[0], axis[1], axis[2]])\r\n qlen = vector_norm(q)\r\n if qlen > _EPS:\r\n q *= math.sin(angle/2.0) / qlen\r\n q[0] = math.cos(angle/2.0)\r\n return q", "def _from_axis_angle(cls, axis, angle):\n mag_sq = np.dot(axis, axis)\n if mag_sq == 0.0:\n raise ZeroDivisionError(\"Provided rotation axis has no length\")\n # Ensure axis is in unit vector form\n if (abs(1.0 - mag_sq) > 1e-12):\n axis = axis / sqrt(mag_sq)\n theta = angle / 2.0\n r = cos(theta)\n i = axis * sin(theta)\n\n return cls(r, i[0], i[1], i[2])", "def rotate(vector, angle, inverse=False):\n gamma, beta, alpha = angle[0], angle[1], angle[2]\n\n # Rotation matrices around the X (gamma), Y (beta), and Z (alpha) axis\n RX = rot_axis(gamma, 0)\n RY = rot_axis(beta, 1)\n RZ = rot_axis(alpha, 2)\n\n # Composed rotation matrix with (RX, RY, RZ)\n if inverse:\n return np.dot(np.dot(np.dot(RX.T, RY.T), RZ.T), vector)\n else:\n return np.dot(np.dot(np.dot(RZ, RY), RX), vector)", "def eulerAnglesToRotationMatrix(theta):\n\n R_x = np.array([[1, 0, 0 ],\n [0, np.cos(theta[0]), -np.sin(theta[0]) ],\n [0, np.sin(theta[0]), np.cos(theta[0]) ]\n ])\n R_y = np.array([[np.cos(theta[1]), 0, np.sin(theta[1]) ],\n [0, 1, 0 ],\n [-np.sin(theta[1]), 0, np.cos(theta[1]) ]\n ])\n R_z = np.array([[np.cos(theta[2]), -np.sin(theta[2]), 0],\n [np.sin(theta[2]), np.cos(theta[2]), 0],\n [0, 0, 1]\n ])\n R = np.dot(R_z, np.dot( R_y, R_x ))\n return R", "def forRotation(axis, angle):\n\n if round(axis.norm(),6) != 1.0:\n raise ValueError('rotation axis must be a unit vector!')\n\n half_angle = angle * 0.5\n c = math.cos(half_angle)\n s = math.sin(half_angle)\n return Quaternion.fromScalarVector(c, axis.mults(s))", "def rotator(angle):\n c = np.cos(angle)\n s = np.sin(angle)\n return np.array([[c,-s],[s,c]])", "def rotation_matrix(dt, omega):\n R = np.array([\n [np.cos(omega * dt), -np.sin(omega * dt)],\n [np.sin(omega * dt), np.cos(omega * dt)]\n ])\n return R", "def rotate_a(X,vector):\r\n\taxis_vector = (math.radians(-X)) * np.array([1,0,0])\r\n\tr = R.from_rotvec(axis_vector)\r\n\treturn list(r.apply(vector))", "def angleAxis2rot3D(axis, theta):\n if len(axis) is not 3:\n raise ValueError('Number of axis element must be 3!')\n axis = axis.astype(float)\n axis /= np.linalg.norm(axis)\n a = axis[0]\n b = axis[1]\n c = axis[2]\n cosTheta = np.cos(theta)\n bracket = 1 - cosTheta\n aBracket = a * bracket\n bBracket = b * bracket\n cBracket = c * bracket\n sinTheta = np.sin(theta)\n aSinTheta = a * sinTheta\n bSinTheta = b * sinTheta\n cSinTheta = c * sinTheta\n rot3D = np.array([[a*aBracket+cosTheta, a*bBracket-cSinTheta, a*cBracket+bSinTheta],\n [b*aBracket+cSinTheta, b*bBracket+cosTheta, b*cBracket-aSinTheta],\n [c*aBracket-bSinTheta, c*bBracket+aSinTheta, c*cBracket+cosTheta]])\n return rot3D", "def rotate(self, angle, axis):\r\n R=self.rotation(angle, axis)\r\n self.mlist = (self*R).mlist\r\n return self", "def rotate_local(self, angle, axis=(0., 0., 1.)):\n self.rotation *= aa2q(angle, glm.vec3(axis))", "def axisangle2matrix(angle, direction, point=None):\r\n sina = math.sin(angle)\r\n cosa = math.cos(angle)\r\n direction = unit_vector(direction[:3])\r\n # rotation matrix around unit vector\r\n R = numpy.diag([cosa, cosa, cosa])\r\n R += numpy.outer(direction, direction) * (1.0 - cosa)\r\n direction *= sina\r\n R += numpy.array([[ 0.0, -direction[2], direction[1]],\r\n [ direction[2], 0.0, -direction[0]],\r\n [-direction[1], direction[0], 0.0]])\r\n M = numpy.identity(4)\r\n M[:3, :3] = R\r\n if point is not None:\r\n # rotation not around origin\r\n point = numpy.array(point[:3], dtype=numpy.float64, copy=False)\r\n M[:3, 3] = point - numpy.dot(R, point)\r\n return M", "def rotation_matrix(rx, ry, rz):\n # Convert from degrees to radians.\n rx = np.pi * rx / 180\n ry = np.pi * ry / 180\n rz = np.pi * rz / 180\n\n # Pre-compute sine and cosine of angles.\n cx, cy, cz = np.cos([rx, ry, rz])\n sx, sy, sz = np.sin([rx, ry, rz])\n\n # Set up euler rotations.\n Rx = np.array([[1, 0, 0, 0],\n [0, cx, -sx, 0],\n [0, sx, cx, 0],\n [0, 0, 0, 1]])\n\n Ry = np.array([[cy, 0, sy, 0],\n [0, 1, 0, 0],\n [-sy, 0, cy, 0],\n [0, 0, 0, 1]])\n\n Rz = np.array([[cz, -sz, 0, 0],\n [sz, cz, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n\n return Rz.dot(Ry.dot(Rx))", "def get_rotation(self) -> np.array:\n axis = self.get_arms()[1]\n force = [self.d_x, self.d_y] # \"Force applied on the arm\"\n o_m = [self.target.x_obj - axis.x_obj, self.target.y_obj - axis.y_obj]\n torque = o_m[0]*force[1] - o_m[1] * force[0] # OM vectorial F\n if torque == 1: # Anti clockwise rotation\n rotation = np.array([[0, -1], [1, 0]])\n if torque == -1: # Clockwise rotation\n rotation = np.array([[0, 1], [-1, 0]])\n if torque == 0: # No rotation\n rotation = np.array([[0, 0], [0, 0]])\n return rotation", "def angle_axis_to_rot3d(axis, theta):\n if isinstance(axis, string_types):\n axis = axis.lower()\n if axis == 'x':\n axis = np.array([1., 0., 0.])\n elif axis == 'y':\n axis = np.array([0., 1., 0.])\n elif axis == 'z':\n axis = np.array([0., 0., 1.])\n else:\n raise ValueError(\"Axis should be 'x', 'y', 'z' or a 3D vector.\")\n elif len(axis) != 3:\n raise ValueError(\"Axis should be 'x', 'y', 'z' or a 3D vector.\")\n axis = axis.astype(float)\n axis /= np.linalg.norm(axis)\n a = axis[0]\n b = axis[1]\n c = axis[2]\n cos_theta = np.cos(theta)\n bracket = 1 - cos_theta\n a_bracket = a * bracket\n b_bracket = b * bracket\n c_bracket = c * bracket\n sin_theta = np.sin(theta)\n a_sin_theta = a * sin_theta\n b_sin_theta = b * sin_theta\n c_sin_theta = c * sin_theta\n rot3d = np.array(\n [[a * a_bracket + cos_theta, a * b_bracket - c_sin_theta, a * c_bracket + b_sin_theta],\n [b * a_bracket + c_sin_theta, b * b_bracket + cos_theta, b * c_bracket - a_sin_theta],\n [c * a_bracket - b_sin_theta, c * b_bracket + a_sin_theta, c * c_bracket + cos_theta]])\n return rot3d", "def axisAnglesToRotMat(xrot, yrot, zrot):\n\n xmat = np.eye(3)\n ymat = np.eye(3)\n zmat = np.eye(3)\n\n xmat[1, 1] = np.cos(xrot)\n xmat[1, 2] = -np.sin(xrot)\n xmat[2, 1] = np.sin(xrot)\n xmat[2, 2] = np.cos(xrot)\n\n ymat[0, 0] = np.cos(yrot)\n ymat[0, 2] = np.sin(yrot)\n ymat[2, 0] = -np.sin(yrot)\n ymat[2, 2] = np.cos(yrot)\n\n zmat[0, 0] = np.cos(zrot)\n zmat[0, 1] = -np.sin(zrot)\n zmat[1, 0] = np.sin(zrot)\n zmat[1, 1] = np.cos(zrot)\n\n return concat(zmat, ymat, xmat)", "def axisAnglesToRotMat(xrot, yrot, zrot):\n\n xmat = np.eye(3)\n ymat = np.eye(3)\n zmat = np.eye(3)\n\n xmat[1, 1] = np.cos(xrot)\n xmat[1, 2] = -np.sin(xrot)\n xmat[2, 1] = np.sin(xrot)\n xmat[2, 2] = np.cos(xrot)\n\n ymat[0, 0] = np.cos(yrot)\n ymat[0, 2] = np.sin(yrot)\n ymat[2, 0] = -np.sin(yrot)\n ymat[2, 2] = np.cos(yrot)\n\n zmat[0, 0] = np.cos(zrot)\n zmat[0, 1] = -np.sin(zrot)\n zmat[1, 0] = np.sin(zrot)\n zmat[1, 1] = np.cos(zrot)\n\n return concat(zmat, ymat, xmat)", "def get_rotation_matrix(theta, rot_vector):\n\n ux = rot_vector[0]\n uy = rot_vector[1]\n uz = rot_vector[2]\n cost = np.cos(theta)\n sint = np.sin(theta)\n\n R = np.array([[cost+ux**2*(1-cost), ux*uy*(1-cost)-uz*sint, ux*uz*(1-cost)+uy*sint],\n [uy*ux*(1-cost)+uz*sint, cost+uy**2*(1-cost), uy*uz*(1-cost)-ux*sint],\n [uz*ux*(1-cost)-uy*sint, uz*uy*(1-cost)+ux*sint, cost+uz**2*(1-cost)]])\n\n return R", "def rotate_along(axis: Tensor) -> Tensor:\n W = torch.einsum('ijk,j->ik', levi_civita.to(axis), axis)\n return expm(W)", "def axis_rotation(points, angle, inplace=False, deg=True, axis='z'):\n axis = axis.lower()\n axis_to_vec = {'x': (1, 0, 0), 'y': (0, 1, 0), 'z': (0, 0, 1)}\n\n if axis not in axis_to_vec:\n raise ValueError('Invalid axis. Must be either \"x\", \"y\", or \"z\"')\n\n rot_mat = transformations.axis_angle_rotation(axis_to_vec[axis], angle, deg=deg)\n return transformations.apply_transformation_to_points(rot_mat, points, inplace=inplace)", "def rotate_axis(self, axis: \"Vertex\", angle: float):\n self.vertices = list(\n Matrix44.axis_rotate(axis, angle).transform_vertices(self.vertices)\n )\n return self", "def rotate2(x, angle, origin=(0, 0)):\n origin = np.asarray(origin)\n x = np.asarray(x) - origin\n r = rotation_matrix2(angle)\n return x.dot(r.T) + origin", "def axangle2rotmat(axangles):\r\n\r\n if type(axangles) is not np.ndarray:\r\n raise ValueError('Rodrigues only works on numpy arrays')\r\n \r\n # store original shape\r\n shape = axangles.shape\r\n assert shape[-1] % 3 == 0, \"inputs are not axis angles\"\r\n axangles = axangles.reshape((-1, 3))\r\n\r\n rotmats = []\r\n for i in range(axangles.shape[0]):\r\n rotmat, _ = cv2.Rodrigues(axangles[i])\r\n rotmats.append(rotmat)\r\n\r\n # restore original shape\r\n new_shape = shape[:-1] + (shape[-1]//3*9,)\r\n return np.array(rotmats).reshape(new_shape)", "def rot(vec, angle, degrees=True):\r\n if degrees:\r\n angle = np.radians(angle)\r\n r = np.array([[np.cos(-angle), -np.sin(-angle)],\r\n [np.sin(-angle), np.cos(-angle)]])\r\n return r.dot(vec)", "def rotate( self, degrees, axis ):\n # copy and normalize axis\n axis = Vector3( axis ).normalize()\n\n # get stub of self projected onto axis\n stub = Vector3( self ).project( axis )\n\n # subtract stub from self\n self -= stub\n\n # get new vector crossed with axis\n crossed = Vector3( axis ).cross( self )\n\n # trigify self and crossed to account for rotation\n crossed *= math.sin( math.radians(degrees) )\n self *= math.cos( math.radians(degrees) )\n\n # add crossed and stub components to self\n self += crossed\n self += stub\n \n return self", "def euler2mat(angle):\n B = angle.size(0)\n x, y, z = angle[:,0], angle[:,1], angle[:,2]\n\n cosz = torch.cos(z)\n sinz = torch.sin(z)\n\n zeros = z.detach()*0\n ones = zeros.detach()+1\n zmat = torch.stack([cosz, -sinz, zeros,\n sinz, cosz, zeros,\n zeros, zeros, ones], dim=1).reshape(B, 3, 3)\n\n cosy = torch.cos(y)\n siny = torch.sin(y)\n\n ymat = torch.stack([cosy, zeros, siny,\n zeros, ones, zeros,\n -siny, zeros, cosy], dim=1).reshape(B, 3, 3)\n\n cosx = torch.cos(x)\n sinx = torch.sin(x)\n\n xmat = torch.stack([ones, zeros, zeros,\n zeros, cosx, -sinx,\n zeros, sinx, cosx], dim=1).reshape(B, 3, 3)\n\n rotMat = xmat @ ymat @ zmat\n return rotMat", "def euler2mat(angle):\n B = angle.size(0)\n x, y, z = angle[:,0], angle[:,1], angle[:,2]\n\n cosz = torch.cos(z)\n sinz = torch.sin(z)\n\n zeros = z.detach()*0\n ones = zeros.detach()+1\n zmat = torch.stack([cosz, -sinz, zeros,\n sinz, cosz, zeros,\n zeros, zeros, ones], dim=1).reshape(B, 3, 3)\n\n cosy = torch.cos(y)\n siny = torch.sin(y)\n\n ymat = torch.stack([cosy, zeros, siny,\n zeros, ones, zeros,\n -siny, zeros, cosy], dim=1).reshape(B, 3, 3)\n\n cosx = torch.cos(x)\n sinx = torch.sin(x)\n\n xmat = torch.stack([ones, zeros, zeros,\n zeros, cosx, -sinx,\n zeros, sinx, cosx], dim=1).reshape(B, 3, 3)\n\n rotMat = xmat @ ymat @ zmat\n return rotMat", "def rotationMatrixToEulerAngles(R) :\n sy = np.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0])\n singular = sy < 1e-6\n\n if not singular :\n x = np.arctan2(R[2,1] , R[2,2])\n y = np.arctan2(-R[2,0], sy)\n z = np.arctan2(R[1,0], R[0,0])\n else :\n x = np.arctan2(-R[1,2], R[1,1])\n y = np.arctan2(-R[2,0], sy)\n z = 0\n\n return np.array([x, y, z])", "def from_axisangle(self, axis: np.ndarray, angle: float) -> np.ndarray:\n axis /= np.linalg.norm(axis)\n K = skew(axis)\n return np.identity(3) + np.sin(angle)*K + (1-np.cos(angle))*K@K", "def _rotate_about_origin(self, angle, axis):\n matrix = rotation_matrix(angle, axis)\n self._normal = matrix.dot(self._normal)\n self._position = matrix.dot(self._position)", "def rotaxis2m(theta, vector):\n vector = vector.normalized()\n c = numpy.cos(theta)\n s = numpy.sin(theta)\n t = 1 - c\n x, y, z = vector.get_array()\n rot = numpy.zeros((3, 3))\n # 1st row\n rot[0, 0] = t * x * x + c\n rot[0, 1] = t * x * y - s * z\n rot[0, 2] = t * x * z + s * y\n # 2nd row\n rot[1, 0] = t * x * y + s * z\n rot[1, 1] = t * y * y + c\n rot[1, 2] = t * y * z - s * x\n # 3rd row\n rot[2, 0] = t * x * z - s * y\n rot[2, 1] = t * y * z + s * x\n rot[2, 2] = t * z * z + c\n return rot", "def rotate_quaternion ( angle, axis, old ):\n\n import numpy as np\n\n # Note that the axis vector should be normalized and we test for this\n # In general, the old quaternion need not be normalized, and the same goes for the result\n # although in our applications we only ever use unit quaternions (to represent orientations)\n assert old.size==4, 'Error in old quaternion dimension'\n assert axis.size==3, 'Error in axis dimension'\n assert np.isclose (np.sum(axis**2),1.0), 'axis normalization error {} {} {}'.format(*axis)\n\n # Standard formula for rotation quaternion, using half angles\n rot = np.sin(0.5*angle) * axis\n rot = np.array([np.cos(0.5*angle),rot[0],rot[1],rot[2]],dtype=np.float_)\n\n e = quatmul ( rot, old ) # Apply rotation to old quaternion\n return e", "def euler2mat(angle):\n B = angle.size(0)\n x, y, z = angle[:,0], angle[:,1], angle[:,2]\n\n cosz = torch.cos(z)\n sinz = torch.sin(z)\n\n zeros = z.detach()*0\n ones = zeros.detach()+1\n zmat = torch.stack([cosz, -sinz, zeros,\n sinz, cosz, zeros,\n zeros, zeros, ones], dim=1).reshape(B, 3, 3)\n\n cosy = torch.cos(y)\n siny = torch.sin(y)\n\n ymat = torch.stack([cosy, zeros, siny,\n zeros, ones, zeros,\n -siny, zeros, cosy], dim=1).reshape(B, 3, 3)\n\n cosx = torch.cos(x)\n sinx = torch.sin(x)\n\n xmat = torch.stack([ones, zeros, zeros,\n zeros, cosx, -sinx,\n zeros, sinx, cosx], dim=1).reshape(B, 3, 3)\n\n rotMat = torch.matmul(torch.matmul(xmat, ymat), zmat)\n return rotMat", "def rotation_matrix2(angle):\n c, s = cos(angle), sin(angle)\n return np.array([[c, -s], [s, c]])", "def rotate(v: vect2d, angle: float) -> vect2d:\n vector = ((v.x * math.cos(angle) - v.y * math.sin(angle)),\n (v.x * math.sin(angle) + v.x * math.cos(angle)))\n return vector", "def quaternion_from_axis_angle(x, y, z, theta):\n if x == y == z == 0:\n return np.array([1, 0, 0, 0])\n axis = np.array([x, y, z])\n axis /= np.linalg.norm(axis)\n return rowan.from_axis_angle(axis, theta)", "def so3_matrix_generator(axis, theta):\n theta = np.asarray(theta)\n\n theta = theta[:, None, None]\n x, y, z = axis.T\n zero = np.zeros_like(x)\n k = np.stack([zero, -z, y, z, zero, -x, -y, x, zero], 1).reshape((-1, 3, 3))\n rot = np.eye(3)[None] + np.sin(theta) * k + (1 - np.cos(theta)) * k @ k\n\n return rot", "def vrrotvec2mat(ax_ang):\n\n #file_dir = os.path.dirname(os.path.realpath(__file__))\n #path_dir2 = file_dir + '/../geometry/'\n #sys.path.append(path_dir2)\n\n if ax_ang.ndim == 1:\n if np.size(ax_ang) == 5:\n ax_ang = np.reshape(ax_ang, (5, 1))\n msz = 1\n elif np.size(ax_ang) == 4:\n ax_ang = np.reshape(np.hstack((ax_ang, np.array([1]))), (5, 1))\n msz = 1\n else:\n raise Exception('Wrong Input Type')\n elif ax_ang.ndim == 2:\n if np.shape(ax_ang)[0] == 5:\n msz = np.shape(ax_ang)[1]\n elif np.shape(ax_ang)[1] == 5:\n ax_ang = ax_ang.transpose()\n msz = np.shape(ax_ang)[1]\n else:\n raise Exception('Wrong Input Type')\n else:\n raise Exception('Wrong Input Type')\n\n direction = ax_ang[0:3, :]\n angle = ax_ang[3, :]\n\n d = np.array(direction, dtype=np.float64)\n d /= np.linalg.norm(d, axis=0)\n x = d[0, :]\n y = d[1, :]\n z = d[2, :]\n c = np.cos(angle)\n s = np.sin(angle)\n tc = 1 - c\n\n mt11 = tc*x*x + c\n mt12 = tc*x*y - s*z\n mt13 = tc*x*z + s*y\n\n mt21 = tc*x*y + s*z\n mt22 = tc*y*y + c\n mt23 = tc*y*z - s*x\n\n mt31 = tc*x*z - s*y\n mt32 = tc*y*z + s*x\n mt33 = tc*z*z + c\n\n mtx = np.column_stack((mt11, mt12, mt13, mt21, mt22, mt23, mt31, mt32, mt33))\n\n inds1 = np.where(ax_ang[4, :] == -1)\n mtx[inds1, :] = -mtx[inds1, :]\n\n if msz == 1:\n mtx = mtx.reshape(3, 3)\n else:\n mtx = mtx.reshape(msz, 3, 3)\n\n return mtx", "def rotator(angle):\n\n c = np.cos(2*angle)\n s = np.sin(2*angle)\n return np.array([[1,0,0,0],[0,c,-s,0],[0,s,c,0],[0,0,0,1]])", "def angle_axis_to_quaternion(angle_axis: torch.Tensor) -> torch.Tensor:\n if not torch.is_tensor(angle_axis):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\".format(\n type(angle_axis)))\n\n if not angle_axis.shape[-1] == 3:\n raise ValueError(\"Input must be a tensor of shape Nx3 or 3. Got {}\"\n .format(angle_axis.shape))\n # unpack input and compute conversion\n a0: torch.Tensor = angle_axis[..., 0:1]\n a1: torch.Tensor = angle_axis[..., 1:2]\n a2: torch.Tensor = angle_axis[..., 2:3]\n theta_squared: torch.Tensor = a0 * a0 + a1 * a1 + a2 * a2\n\n theta: torch.Tensor = torch.sqrt(theta_squared)\n half_theta: torch.Tensor = theta * 0.5\n\n mask: torch.Tensor = theta_squared > 0.0\n ones: torch.Tensor = torch.ones_like(half_theta)\n\n k_neg: torch.Tensor = 0.5 * ones\n k_pos: torch.Tensor = torch.sin(half_theta) / theta\n k: torch.Tensor = torch.where(mask, k_pos, k_neg)\n w: torch.Tensor = torch.where(mask, torch.cos(half_theta), ones)\n\n quaternion: torch.Tensor = torch.zeros_like(angle_axis)\n quaternion[..., 0:1] += a0 * k\n quaternion[..., 1:2] += a1 * k\n quaternion[..., 2:3] += a2 * k\n return torch.cat([w, quaternion], dim=-1)", "def getRotationMatrix(x, y, z, angle):\n # impossible to have a rotational matrix around (0, 0 ,0)\n if x == 0 and y == 0 and z == 0:\n raise Exception(\"Cannot have a rotation matrix around (0, 0, 0)\")\n\n # normalize vector\n vec = MatrixExtended([x, y, z])\n length = np.linalg.norm(vec)\n x /= length\n y /= length\n z /= length\n\n # some shortcuts for readability\n xx = x * x\n yy = y * y\n zz = z * z\n C = math.cos\n S = math.sin\n\n # calculate matrix elements\n e11 = xx + (1 - xx) * C(angle)\n e12 = x * y * (1 - C(angle)) - z * S(angle)\n e13 = x * z * (1 - C(angle)) + y * S(angle)\n e21 = x * y * (1 - C(angle)) + z * S(angle)\n e22 = yy + (1 - yy) * C(angle)\n e23 = y * z * (1 - C(angle)) - x * S(angle)\n e31 = x * z * (1 - C(angle)) - y * S(angle)\n e32 = y * z * (1 - C(angle)) + x * S(angle)\n e33 = zz + (1 - zz) * C(angle)\n\n return MatrixExtended([\n [e11, e12, e13, 0],\n [e21, e22, e23, 0],\n [e31, e32, e33, 0],\n [0, 0, 0, 1]])", "def rotation_matrix_2d(angle):\n psi = Angle(angle).rad\n return np.array([[cos(psi), -sin(psi)],\n [sin(psi), cos(psi)]])", "def xy_rotation(vector,theta):\r\n R = np.array([[np.cos(theta), -np.sin(theta),0],\r\n [np.sin(theta), np.cos(theta),0],\r\n [0,0,1]\r\n ])\r\n return np.dot(R,vector)", "def eulerAnglesToRotationMatrix(self, theta):\n\n R_x = np.array([[1, 0, 0],\n [0, math.cos(theta[0]), -math.sin(theta[0])],\n [0, math.sin(theta[0]), math.cos(theta[0])]\n ])\n\n R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1])],\n [0, 1, 0],\n [-math.sin(theta[1]), 0, math.cos(theta[1])]\n ])\n\n R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],\n [math.sin(theta[2]), math.cos(theta[2]), 0],\n [0, 0, 1]\n ])\n\n R = np.dot(R_z, np.dot(R_y, R_x))\n\n return R", "def euler_angle_to_rotation(ea, convention='zyx'):\n axis_names_to_vectors = dict([('x', (1, 0, 0)), ('y', (0, 1, 0)), ('z', (0, 0, 1))])\n axis0, axis1, axis2 = convention\n R0 = so3.rotation(axis_names_to_vectors[axis0], ea[0])\n R1 = so3.rotation(axis_names_to_vectors[axis1], ea[1])\n R2 = so3.rotation(axis_names_to_vectors[axis2], ea[2])\n return so3.mul(R0, so3.mul(R1, R2))", "def Raxis(axis: Tuple[float, float, float], a: float, origin=np.array([0, 0, 0])):\n oneminuscos = 1 - np.cos(a)\n x, y, z = axis\n\n return np.array([\n [np.cos(a) + (x ** 2) * oneminuscos, x * y * oneminuscos - z * np.sin(a), x * z * oneminuscos + y * np.sin(a),\n origin[0]],\n [y * x * oneminuscos + z * np.sin(a), np.cos(a) + (y ** 2) * oneminuscos, y * z * oneminuscos - x * np.sin(a),\n origin[1]],\n [z * x * oneminuscos - y * np.sin(a), z * y * oneminuscos + x * np.sin(a), np.cos(a) + (z ** 2) * oneminuscos,\n origin[2]],\n [0, 0, 0, 1]\n ])", "def _euler_angles_to_rotation_matrix(theta):\n R_x = np.array([[1, 0, 0],\n [0, math.cos(theta[0]), -math.sin(theta[0])],\n [0, math.sin(theta[0]), math.cos(theta[0])]\n ])\n\n R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1])],\n [0, 1, 0],\n [-math.sin(theta[1]), 0, math.cos(theta[1])]\n ])\n\n R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],\n [math.sin(theta[2]), math.cos(theta[2]), 0],\n [0, 0, 1]\n ])\n\n R = np.dot(R_z, np.dot(R_y, R_x))\n\n return R", "def _rotate_about_origin(self, angle, axis):\n matrix = rotation_matrix(angle, axis)\n self._center = matrix.dot(self._center)", "def rotation_matrix(angle, direction, point=None):\r\n sina = math.sin(angle)\r\n cosa = math.cos(angle)\r\n direction = unit_vector(direction[:3])\r\n # rotation matrix around unit vector\r\n R = np.diag([cosa, cosa, cosa])\r\n R += np.outer(direction, direction) * (1.0 - cosa)\r\n direction *= sina\r\n R += np.array([[ 0.0, -direction[2], direction[1]],\r\n [ direction[2], 0.0, -direction[0]],\r\n [-direction[1], direction[0], 0.0]])\r\n M = np.identity(4)\r\n M[:3, :3] = R\r\n if point is not None:\r\n # rotation not around origin\r\n point = np.array(point[:3], dtype=np.float64, copy=False)\r\n M[:3, 3] = point - np.dot(R, point)\r\n return M" ]
[ "0.7911063", "0.788436", "0.7837863", "0.77911395", "0.77726126", "0.77274466", "0.7598185", "0.75942045", "0.753307", "0.7435765", "0.7434122", "0.7428991", "0.74156046", "0.7338308", "0.732471", "0.7321008", "0.7264772", "0.71720546", "0.7159397", "0.71509737", "0.7136716", "0.71145844", "0.71122146", "0.7099143", "0.7097503", "0.70846444", "0.70719177", "0.70683885", "0.70658857", "0.7038549", "0.70300186", "0.7027387", "0.7027387", "0.7027387", "0.70238185", "0.70194536", "0.7018537", "0.6981997", "0.69382894", "0.68737054", "0.6836862", "0.67920065", "0.6772454", "0.6766266", "0.6730908", "0.6709919", "0.66443914", "0.66245323", "0.6606277", "0.66027176", "0.6587102", "0.65579915", "0.6557621", "0.65392727", "0.6531542", "0.65314025", "0.65217835", "0.65202093", "0.64939934", "0.6476307", "0.6475229", "0.6466672", "0.6443872", "0.6406615", "0.63993263", "0.63932294", "0.63808626", "0.63808626", "0.63795424", "0.6379398", "0.6373411", "0.6372907", "0.63527995", "0.63145137", "0.6305863", "0.629555", "0.6292813", "0.6292813", "0.62893313", "0.6286634", "0.6280232", "0.6279035", "0.6273513", "0.62713146", "0.62689644", "0.62641704", "0.62632734", "0.6258945", "0.6245639", "0.62432414", "0.6241872", "0.6239218", "0.62274593", "0.6223716", "0.6223621", "0.6220979", "0.6219515", "0.62062144", "0.6206185", "0.61929256" ]
0.7117221
21
Returns the normal vector of a plane defined by the points p1,p2 and p3.
def get_normal_vector_of_plane(p1, p2, p3): v12 = np.array(p1) - np.array(p2) v13 = np.array(p1) - np.array(p3) nvec = np.cross(v12, v13) ## print 'norm: '+str(np.linalg.norm(nvec)) return nvec / np.linalg.norm(nvec)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normal(self) -> Vec:\n # The three points are in clockwise order, so compute differences\n # in the clockwise direction, then cross to get the normal.\n point_1 = self.planes[1] - self.planes[0]\n point_2 = self.planes[2] - self.planes[1]\n\n return Vec.cross(point_1, point_2).norm()", "def normal_vector_3p(a: Vector, b: Vector, c: Vector) -> Vector:\n return (b - a).cross(c - a).normalize()", "def get_normal_vectors(self, p, x1, y1, z1, x2, y2, z2, x3, y3, z3):\n x1.value, y1.value, z1.value, x2.value, y2.value, z2.value, x3.value, y3.value, z3.value = self._get_normal_vectors(p, x1.value, y1.value, z1.value, x2.value, y2.value, z2.value, x3.value, y3.value, z3.value)", "def plane_equation(point_a, point_b, point_c):\n v1 = np.subtract(point_a, point_c)\n v2 = np.subtract(point_a, point_b)\n normal = np.cross(v1, v2)\n # print 'b4 norm', normal\n unit_normal = norm_vect(normal)\n # print 'unityyy', unit_normal\n return unit_normal", "def _normal_vector(o, p0_3d, p1_3d):\n # The vector between middle point of v1-v2 and object center location\n # is the normal vector I'm looking for\n vn = p0_3d.lerp(p1_3d, 0.5) - o.matrix_world.translation\n # normalize so I can to length computation on it\n vn.normalize()\n return vn", "def PlaneNormalVector(h, k, l):\r\n vec = np.array([h, k, l])\r\n return vec/np.linalg.norm(vec)", "def normal(self) -> Vector:\n return normalize(cross(self.d1, self.d2))", "def uVectNorm(x1,y1,z1, # P\n x2,y2,z2, # Q\n x3,y3,z3): # R\n p1 = np.array([x1,y1,z1])\n p2 = np.array([x2,y2,z2])\n p3 = np.array([x3,y3,z3])\n\n v1 = p3-p1\n v2 = p2-p1\n\n cp = np.cross(v1,v2)\n a,b,c = cp\n\n d = np.dot(cp, p3)\n\n print(a,b,c)", "def normal(point_one, point_two):\n return numpy.array([point_one[1] - point_two[1], point_two[0] - point_one[0]])", "def GetNormal(self, *args):\n return _itkLineSpatialObjectPointPython.itkLineSpatialObjectPoint3_GetNormal(self, *args)", "def surface_norm(self, pt):\n\n return self.normal.normalize()", "def plane_equation(p1, p2, p3):\n a1 = p2[0] - p1[0]\n b1 = p2[1] - p1[1]\n c1 = p2[2] - p1[2]\n a2 = p3[0] - p1[0]\n b2 = p3[1] - p1[1]\n c2 = p3[2] - p1[2]\n a = b1 * c2 - b2 * c1\n b = a2 * c1 - a1 * c2\n c = a1 * b2 - b1 * a2\n # Points are collinear\n if (abs(a) < 1e-6) and (abs(b) < 1e-6) and (abs(c) < 1e-6):\n return None\n # All clear\n d = (- a * p1[0] - b * p1[1] - c * p1[2])\n return a, b, c, d", "def compute_normalvect(self):\n normvect = np.zeros((len(self.tri_pnts),3,3))\n zvec = np.array([0, 0, 1])\n for itri, tri in enumerate(self.tri_pnts):\n #import pdb; pdb.set_trace()\n tri0, tri1, tri2 = tri\n x1,y1 = self.points[tri1]-self.points[tri0]\n v1 = np.array([x1,y1,0])\n x2,y2 = self.points[tri2]-self.points[tri1]\n v2 = np.array([x2,y2,0])\n x3,y3 = self.points[tri0]-self.points[tri2]\n v3 = np.array([x3,y3,0])\n v1 = v1/np.linalg.norm(v1)\n v2 = v2/np.linalg.norm(v2)\n v3 = v3/np.linalg.norm(v3)\n #import pdb; pdb.set_trace()\n normvect[itri,:,:] = np.cross(v1,zvec), np.cross(v2,zvec), np.cross(v3,zvec)\n #import pdb; pdb.set_trace()\n return normvect", "def normal(self,points):\n ez=np.array([[0,0,1]])\n v=((points-self.pos()*ez)*self.C-ez)\n return (v/np.linalg.norm(v,axis=1)[:,np.newaxis])#*np.sign(self.C)", "def plane_point_side_v3(p: np.ndarray, v: np.ndarray) -> Any:\n return p[:3].dot(v) + p[3]", "def unit_normals(p,q,r): \n vx1 = p[0] - r[0] # x1 - x3. \n vy1 = p[1] - r[1] # y1 - y3. \n vz1 = p[2] - r[2] # z1 - z3. \n\n vx2 = q[0] - r[0] # x2 - x3. \n vy2 = q[1] - r[1] # y2 - y3. \n vz2 = q[2] - r[2] # z2 - z3. \n\n vnx = vy1*vz2 - vz1*vy2 \n vny = vz1*vx2 - vx1*vz2 \n vnz = vx1*vy2 - vy1*vx2 \n\n len_vn = math.sqrt(vnx*vnx + vny*vny + vnz*vnz) \n vnx = vnx/len_vn \n vny = vny/len_vn \n vnz = vnz/len_vn \n\n return vnx, vny, vnz", "def normal(self, t=0):\n n = Line3d()\n n.p = self.lerp(t)\n n.v = self.cross\n return n", "def normal(self) -> Vec:\n return abs(self.up_axis.cross(self.forward()))", "def normal_triangle(triangle, unitized=True):\n assert len(triangle) == 3, \"Three points are required.\"\n a, b, c = triangle\n ab = subtract_vectors(b, a)\n ac = subtract_vectors(c, a)\n n = cross_vectors(ab, ac)\n if not unitized:\n return n\n lvec = length_vector(n)\n return n[0] / lvec, n[1] / lvec, n[2] / lvec", "def generate_normals(v1, v2, v3, normalize_result=True):\n # make vectors relative to v2\n # we assume opengl counter-clockwise ordering\n a = v1 - v2\n b = v3 - v2\n n = cross(b, a)\n if normalize_result:\n n = normalize(n)\n return n", "def normal_vector(origin, vectors):\n return np.cross(vectors[0] - origin, vectors[1] - origin)", "def getNormalizedNormalVec(self):\n TriPos = self.position\n # calc normalized normal vecor for Tri\n # get vectors Vert1Vert2 & Vert2Vert3\n TriVectors = np.subtract(TriPos[1:],TriPos[:-1])\n # get crossproduct of Vert1Vert2 & Vert2Vert3 (= surface normal)\n TriNorm = np.cross(TriVectors[0],TriVectors[1])+0.0\n # get length of surface normal\n length = np.linalg.norm(TriNorm)\n # divide each component of surface normal by length (= normalized surface normal)\n NormalizedNormalVec = np.around(TriNorm / length, decimals=5) # rounded, otherwise different values, equals not found\n # create string of tuple for segment dict \n #SegmDict = str(tuple(NormalizedNormalVec))\n return NormalizedNormalVec.tolist()", "def twoDNormal(self):\n return vector((-1) * self.y, self.x, 0)", "def normal_vector(self, facet):\n assert len(facet) == 3\n pos = self.cluster.get_positions()\n v1 = pos[facet[1], :] - pos[facet[0], :]\n v2 = pos[facet[2], :] - pos[facet[0], :]\n n = np.cross(v1, v2)\n length = np.sqrt(np.sum(n**2))\n return n / length", "def project_3d_points_to_plane(points, p1, p2 ,p3, numpoints):\n p1 = np.asarray(p1)\n p2 = np.asarray(p2)\n p3 = np.asarray(p3)\n\n # get vectors in plane\n v1 = p3 - p1\n v2 = p2 - p1\n\n # compute cross product\n cp = np.cross(v1, v2)\n a, b, c = cp # normal to plane is ax + by + cz\n\n # evaluate d\n d = np.dot(cp, p3)\n\n # thus, normal is given by\n plane = vtk.vtkPlane()\n origin = p1\n normal = normalize(np.array([a,b,c]))\n plane.SetOrigin(p1)\n plane.SetNormal(normal)\n\n if numpoints == 1:\n proj = [0,0,0]\n plane.ProjectPoint(points, origin, normal, proj)\n return proj\n else:\n projected_pts = np.zeros((numpoints, 3), dtype=float)\n\n for i in range(numpoints):\n proj = [0,0,0]\n plane.ProjectPoint(points[i], origin, normal, proj)\n projected_pts[i] = proj\n\n return projected_pts", "def normal(self, u, v):\n result = np.cross(self.du(u, v), self.dv(u, v))\n result = result / np.sqrt(vectordot(result, result))[:, None]\n return result", "def normalize(x: float, y: float, z: float) -> Point3D:\n mag = math.sqrt(x*x + y*y + z*z)\n return x/mag, y/mag, z/mag", "def calcular_norma_r3():\n x, y, z = carga_vector()\n norma = math.sqrt(x**2 + y**2 + z**2)\n print('\\nLa norma del vector ({},{},{}) es: {}'.format(x, y, z, norma))", "def normal(vx,vy,n):\n if vx==0:\n if vy==0: \n return (0,0)\n else:\n return (0,n)\n elif vy==0:\n return (n,0)\n else:\n return (n/sqrt(1+(vy/vx)**2),n/sqrt(1+(vx/vy)**2))", "def normalVect(self, n=2):\n L = len(self.vertices)\n normals = []\n while len(normals) < n:\n j = randrange(L)\n v0 = vector(self.vertices[j].coords())\n v1 = vector(self.vertices[int(j + L / 3) % L].coords())\n v2 = vector(self.vertices[int(j + 2 * L / 3) % L].coords())\n try:\n normals.append(((v1 - v0) * (v2 - v0)).normalize())\n except ValueError:\n pass\n return (1 / len(normals)) * sum(normals, vector(0, 0, 0))", "def surface_norm(self, pt):\n\n return (pt - self.origin).normalize()", "def normal(self) -> Vector:\n return self._normal", "def normal_polygon(points, unitized=True):\n p = len(points)\n assert p > 2, \"At least three points required\"\n nx = 0\n ny = 0\n nz = 0\n o = centroid_points(points)\n a = subtract_vectors(points[-1], o)\n for i in range(p):\n b = subtract_vectors(points[i], o)\n n = cross_vectors(a, b)\n a = b\n nx += n[0]\n ny += n[1]\n nz += n[2]\n if not unitized:\n return nx, ny, nz\n l = length_vector([nx, ny, nz])\n return nx / l, ny / l, nz / l", "def find_plane_eq(p1, p2, p3):\n\n p1 = np.asarray(p1)\n p2 = np.asarray(p2)\n p3 = np.asarray(p3)\n\n # These two vectors are in the plane\n v1 = p3 - p1\n v2 = p2 - p1\n\n # the cross product is a vector normal to the plane\n cp = np.cross(v1, v2)\n a, b, c = cp\n\n # This evaluates a * x3 + b * y3 + c * z3 which equals d\n d = np.dot(cp, p3)\n\n plane_eq = np.array([a, b, c, d])\n\n return plane_eq", "def normal(self, uv):\n res = GeomLProp_SLProps(self.surface(), uv[0], uv[1], 1, 1e-9)\n if not res.IsNormalDefined():\n return (0, 0, 0)\n normal = geom_utils.gp_to_numpy(res.Normal())\n if self.reversed():\n normal = -normal\n return normal", "def from_3p(cls, a: Vector, b: Vector, c: Vector) -> 'Plane':\n n = (b - a).cross(c - a).normalize()\n return Plane(n, n.dot(a))", "def Normal(self):\n return Vector(self.normal)", "def normal(axis_direction, axis_origin, point):\n # transform input into numpy arrays\n axis_direction = np.array(axis_direction, float)\n axis_origin = np.array(axis_origin, float)\n point = np.array(point, float)\n\n # vector from axis normal_origin to point\n vector = point - axis_origin\n\n # projection of vector on axis\n projection = np.dot(vector, axis_direction)*axis_direction\n\n # the normal vector from normal_origin to point\n normal_direction = vector - projection\n\n # normalized normal_direction\n normal_direction = normal_direction/np.linalg.norm(normal_direction)\n\n # opposite of the projection of vector on normal\n projection2 = - np.dot(normal_direction, vector)*normal_direction\n\n normal_origin = point + projection2\n\n return normal_direction, normal_origin", "def distance_from_plane(n,p,r,nnorm=None):\n #return np.abs(np.dot(n,(p-r)))/np.linalg.norm(n)\n #return np.abs(np.dot(n,(p-r)))/nnorm\n # the normal vector is already a unit vector!\n return np.abs(np.dot(n,(p-r)))", "def _normal_polygon(points, unitized=True):\n p = len(points)\n assert p > 2, \"At least three points required\"\n nx = 0\n ny = 0\n nz = 0\n for i in range(-1, p - 1):\n p1 = points[i - 1]\n p2 = points[i]\n p3 = points[i + 1]\n v1 = subtract_vectors(p1, p2)\n v2 = subtract_vectors(p3, p2)\n n = cross_vectors(v1, v2)\n nx += n[0]\n ny += n[1]\n nz += n[2]\n if not unitized:\n return nx, ny, nz\n l = length_vector([nx, ny, nz])\n return nx / l, ny / l, nz / l", "def _fit_plane_to_point_cloud(\n points_xyz: NDArrayFloat,\n) -> Tuple[float, float, float, float]:\n center_xyz: NDArrayFloat = np.mean(points_xyz, axis=0)\n out: Tuple[NDArrayFloat, NDArrayFloat, NDArrayFloat] = np.linalg.svd(\n points_xyz - center_xyz\n )\n vh = out[2]\n\n # Get the unitary normal vector\n a, b, c = float(vh[2, 0]), float(vh[2, 1]), float(vh[2, 2])\n d: float = -np.dot([a, b, c], center_xyz)\n return (a, b, c, d)", "def plot_plane(unit_normal, x_array, y_array, fore):\n # print'unit normal = ', unit_normal\n z = (((unit_normal[0] * (fore[0] - x_array)) + (unit_normal[1] * (fore[1] - y_array))) / unit_normal[2]) + fore[2]\n # print 'plane numbers\\n', z\n return z", "def normals(t, v):\n n = numpy.zeros((len(t), 3))\n for i in range(0, len(t)):\n p = vertices(t[i], v)\n n[i] = triangle.normal(p)\n return n", "def normal(self, point):\n point = self._center - np.array(point)\n # if abs(point.dot(point) - self._radius**2) > 1e-15:\n # raise RayTraceError(\n # 'Cannot compute normal. Point is too far from surface ({}).'.format(\n # (abs(point.dot(point) - self._radius**2))))\n return normalize(point / self._radius)", "def norm3d(self) -> float:\n\n return self.v3ddict.norm3d()", "def getNormalVector(self):\n vector = self.unit_vector\n vector.rotate(math.pi / 2)\n return vector", "def get_normal_from_pose(pose):\n # p = Pose()\n # p.orientation = pose.orientation\n # z1 = (quaternion_matrix((p.orientation.x, p.orientation.y, p.orientation.z, p.orientation.w)))[0:3,2:3]\n z = tf_conversions.fromMsg(pose).M.UnitZ()\n normal = np.array([[z[0], z[1], z[2]]]).T\n \n return normal", "def plane_from_points(a, b, c):\n ab = subtract_vectors(b, a)\n ac = subtract_vectors(c, a)\n n = normalize_vector(cross_vectors(ab, ac))\n return a, n", "def normal(self) -> 'MultiVector':\n\n return self / np.sqrt(abs(self.mag2()))", "def GetNormal(self, *args):\n return _itkLineSpatialObjectPointPython.itkLineSpatialObjectPoint2_GetNormal(self, *args)", "def get_surface_normals_o3d(normals, points, scale=2):\n # total number of points:\n N = points.shape[0]\n\n points = np.vstack(\n (points.to_numpy(), points.to_numpy() + scale * normals)\n )\n lines = [[i, i+N] for i in range(N)]\n colors = np.zeros((N, 3)).tolist()\n\n # build pca line set:\n surface_normals_o3d = o3d.geometry.LineSet(\n points=o3d.utility.Vector3dVector(points),\n lines=o3d.utility.Vector2iVector(lines),\n )\n surface_normals_o3d.colors = o3d.utility.Vector3dVector(colors)\n\n return surface_normals_o3d", "def normal_at(self, u, v, world=True):\n u = u * pi\n v = v * PI2\n x = cos(u) * sin(v)\n y = sin(u) * sin(v)\n z = cos(v)\n normal = Vector(x, y, z)\n if world:\n normal.transform(self.transformation)\n return normal", "def vector_3d_magnitude(x, y, z):\n return math.sqrt((x * x) + (y * y) + (z * z))", "def SetNormal(self, *args):\n return _itkSurfaceSpatialObjectPointPython.itkSurfaceSpatialObjectPoint3_SetNormal(self, *args)", "def getOnePerpendicularVector(self):\n vector_y = Vector(0, 1, 0)\n vector_z = Vector(0, 0, 1)\n\n if self.getNormalizedVector() == vector_z:\n return vector_y\n\n vector_perpendicular = vector_z.perpendicularTo(self)\n vector_perpendicular = vector_perpendicular.getNormalizedVector()\n\n return vector_perpendicular", "def get_perpendicular(n: np.ndarray) -> np.ndarray:\n # find smallest component\n i = np.argmin(n)\n\n # get the other two indices\n a = (i + 1) % 3\n b = (i + 2) % 3\n\n result = np.zeros(3)\n result[i] = 0.0\n result[a] = n[b]\n result[b] = -n[a]\n return result", "def plane_distance(p, plane):\n x, y, z = p\n A, B, C, D = plane\n return A*x + B*y + C*z + D", "def norm2(self):\n\t\treturn self.x ** 2 + self.y ** 2 + self.z ** 2", "def normals(self, point):\n vertex_0, vertex_1, vertex_2 = self._vertices(point)\n normals_at_point = 0.5 * gs.cross(vertex_1 - vertex_0, vertex_2 - vertex_0)\n return normals_at_point", "def normal(point0: Point, point1: Point) -> Tuple[Point, float]:\n mid: Point = ((point0[0] + point1[0]) / 2, (point0[1] + point1[1]) / 2)\n v: Vector2 = (point1[0] - point0[0], point1[1] - point0[1])\n normal: Vector2 = (-v[1], v[0])\n\n angle = math.atan(v[1] / v[0])\n angleNorm = math.atan(normal[1] / normal[0])\n assert(abs(abs(angle - angleNorm) - math.pi / 2) < 0.001)\n\n x = [mid[0], mid[0] + normal[0]]\n y = [mid[1], mid[1] + normal[1]]\n plt.plot(x, y, \":\")\n\n return (mid, angleNorm)", "def make_inward_normal(tetrahedron):\n\n convert_to_np_array = lambda v: np.array([v.x, v.y, v.z])\n np_vertices = list(map(convert_to_np_array, [tetrahedron.get_vertex(i) for i in range(4)]))\n # This is the middle point\n # midpoint = np.mean(np_vertices, axis=0)\n\n midpoint = np_vertices[0]\n for i in range(1, 4):\n midpoint += np_vertices[i]\n midpoint = midpoint / 2.0\n\n for i in range(4):\n face = tetrahedron.get_face(i)\n d = distance(face, midpoint)\n if d < 0:\n face.nx *= -1.0\n face.ny *= -1.0\n face.nz *= -1.0\n face.d *= -1.0", "def SetNormal(self, *args):\n return _itkLineSpatialObjectPointPython.itkLineSpatialObjectPoint3_SetNormal(self, *args)", "def get_normal(self):\n c, s = np.cos(self.eangles), np.sin(self.eangles)\n r = np.array([[c, -s], [s, c]])\n u = np.array([1, 0])\n return np.dot(r, u)", "def project_point_plane(point, plane):\n base, normal = plane\n normal = normalize_vector(normal)\n vector = subtract_vectors(point, base)\n snormal = scale_vector(normal, dot_vectors(vector, normal))\n return subtract_vectors(point, snormal)", "def get_normal_fluctuation(hover,target,normal,vec):\n\tvector = hover - target\n\tvector = vector - vec*(vector>(vec/2.)) + vec*(vector<(-1*vec/2.))\n\tprojected = planeproject(vector,normal)\n\t#---get the sign of the projection\n\tplane_point = vector+projected\n\tsign = 1.0-2.0*(np.arccos(np.dot(vecnorm(normal),vecnorm(vector)))>np.pi/2.)\n\treturn sign*np.linalg.norm(plane_point)", "def normal_at(self, world_point: Point) -> Vector:\n # Convert the world point and normal to shape space\n local_point = self.world_to_object(world_point)\n # Calculate the normal in shape space\n local_normal = self.local_normal_at(local_point)\n # Convert the local normal vector back to world space\n return self.normal_to_world(local_normal)", "def _update_surface_normals(self):\n\n # This is the case if there are too few points to\n # compute normals so there can be values to remove\n\n #can be important for parallel\n self.swarm.shadow_particles_fetch()\n\n if self.empty:\n self.director.data[...] = 0.0\n else:\n\n particle_coords = self.swarm.particleCoordinates.data\n\n Nx = np.empty(self.swarm.particleLocalCount)\n Ny = np.empty(self.swarm.particleLocalCount)\n Nz = np.empty(self.swarm.particleLocalCount)\n\n for i, xyz in enumerate(particle_coords):\n r, neighbours = self.kdtree.query(particle_coords[i], k=4)\n\n # this point is neighbour[0] and neighbour points are neighbours[(1,2,3)]\n XYZ1 = self.kdtree.data[neighbours[1]]\n XYZ2 = self.kdtree.data[neighbours[2]]\n XYZ3 = self.kdtree.data[neighbours[3]]\n\n dXYZ1 = XYZ2 - XYZ1\n dXYZ2 = XYZ3 - XYZ1\n\n # Cross product of those 2 vectors can be use as the local normal (perhaps)\n\n Nx[i], Ny[i], Nz[i] = np.cross(dXYZ1, dXYZ2)\n #if i == 0:\n # print(Nx, Ny, Nz)\n # print(xyz[0], xyz[1],xyz[2])\n # print((self.insidePt[0] - xyz[0]) * Nx[i] )\n\n if (self.insidePt):\n sign = np.sign( (self.insidePt[0] - xyz[0]) * Nx[i] +\n (self.insidePt[1] - xyz[1]) * Ny[i] +\n (self.insidePt[2] - xyz[2]) * Nz[i] )\n Nx[i] *= sign\n Ny[i] *= sign\n Nz[i] *= sign\n\n\n for i in range(0, self.swarm.particleLocalCount):\n scale = 1.0 / np.sqrt(Nx[i]**2 + Ny[i]**2 + Nz[i]**2)\n Nx[i] *= scale\n Ny[i] *= scale\n Nz[i] *= scale\n\n\n self.director.data[:,0] = Nx[:]\n self.director.data[:,1] = Ny[:]\n self.director.data[:,2] = Nz[:]\n\n print(\"Surf Norms\")\n\n return", "def vector_3D(pt1, pt2, t):\n x1, y1, z1 = pt1\n x2, y2, z2 = pt2\n \n modulus = np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2 + (z1 - z2) ** 2)\n \n x = x1 + (x2 - x1) / modulus * t\n y = y1 + (y2 - y1) / modulus * t\n z = z1 + (z2 - z1) / modulus * t\n \n return [x, y, z]", "def face_normals(xyz, triangles):\n\n\tabc_xyz = face_attr(xyz, triangles)\n\n\tbc_xyz = abc_xyz[:,:,1:3] - abc_xyz[:,:,0:1]\n\tfn = tf.linalg.cross(bc_xyz[:,:,0], bc_xyz[:,:,1])\n\tfn = tf.math.l2_normalize(fn, -1)\n\treturn fn", "def plane(self):\r\n from lsst.analysis import utils\r\n return utils.fitplane(self.points, self.z)", "def calculate_plane_normal(patches):\n normals = []\n for patch in patches:\n normal = get_normal(patch)\n normals.append(normal)\n # Taken naive mean of normals\n # TODO outlier removal\n normals = np.mean(np.array(normals), axis=0)\n return normals", "def vector_perpendicular_3D(pt1, pt2, which, Z, Sx):\n\n v = ((pt2[0] - pt1[0]), (pt2[1] - pt1[1]), (pt2[2] - pt1[2]))\n \n if which == 1:\n Sx, Sy = (pt1[0] - v[1] / np.sqrt(v[0]**2 + v[1]**2) * Sx,\n pt1[1] + v[0] / np.sqrt(v[0]**2 + v[1]**2) * Sx)\n Sz = pt1[2]\n \n elif which == 2:\n Sx, Sy = (pt2[0] - v[1] / np.sqrt(v[0]**2 + v[1]**2) * Sx,\n pt2[1] + v[0] / np.sqrt(v[0]**2 + v[1]**2) * Sx)\n Sz = pt2[2]\n \n return [Sx, Sy, Sz + Z]", "def normal(self, position):\n return self._normal", "def get_quad_normal(q):\n P0, P1, P2, P3 = q\n p0 = Vector.fromPoint(P0) # fromPoint converts to ECEF\n p1 = Vector.fromPoint(P1)\n p3 = Vector.fromPoint(P3)\n v1 = p1 - p0\n v2 = p3 - p0\n vn = Vector.cross(v2, v1).norm()\n return vn", "def GetNormal(self):\n ...", "def vec_normal(vec):\r\n n = sqrt(sum(x ** 2 for x in vec)) or 1\r\n return [x / n for x in vec]", "def find_perpendicular_vector(vt):\n x, y = vt\n return np.array([y, -x])", "def l2_norm(self):\n return (self.x**2 + self.y**2 + self.z**2)**0.5", "def normal(self, point):\n return self._normal.dup()", "def hyperplane(self):\n origin = (self.a+self.b+self.c)/3.\n normal = np.cross(self.a-self.b, self.a-self.c)\n return Hyperplane(origin, normal)", "def normal(self, param, diff=0, xyz=False):\n tx, tz = self.tangent(param, diff=diff)\n return self._prepare_result(-tz, tx, xyz)", "def plane_from_multiple_points(pnts: Iterable[Point]) -> Plane:\n n = len(pnts)\n x = [pnt.x for pnt in pnts]\n y = [pnt.y for pnt in pnts]\n z = [pnt.z for pnt in pnts]\n pntc = Point(sum(x)/n, sum(y)/n, sum(z)/n)\n x = [pnt.x-pntc.x for pnt in pnts]\n y = [pnt.y-pntc.y for pnt in pnts]\n z = [pnt.z-pntc.z for pnt in pnts]\n sxx = sum([x[i]**2 for i in range(n)])\n sxy = sum([x[i]*y[i] for i in range(n)])\n sxz = sum([x[i]*z[i] for i in range(n)])\n syy = sum([y[i]**2 for i in range(n)])\n syz = sum([y[i]*z[i] for i in range(n)])\n d = sxx*syy-sxy**2\n a = (syz*sxy-sxz*syy)/d\n b = (sxy*sxz-sxx*syz)/d\n nrm = Vector(a, b, 1.0)\n return Plane(pntc, nrm)", "def proj_to_plane(norm, d, pts):\n a = norm[0]\n b = norm[1]\n c = norm[2]\n\n p = []\n\n for i in range(len(pts)):\n x_p = pts[i][0]\n y_p = pts[i][1]\n z_p = pts[i][2]\n\n if a != 0:\n x_0 = (b * b + c * c) * x_p - a * b * y_p - a * c * z_p - a * d\n y_0 = (b * 1.0 / a) * (x_0 - x_p) + y_p\n z_0 = (c * 1.0 / a) * (x_0 - x_p) + z_p\n\n elif b != 0:\n x_0 = x_p \n y_0 = c * c * y_p - b * (d + c)\n z_0 = (c * 1.0 / b) *(y_0 - y_p) + z_p\n\n else:\n x_0 = x_p\n y_0 = y_p\n z_0 = - d * 1.0 / c\n\n p.append([x_0, y_0, z_0])\n \n return p", "def proj_to_plane(norm, d, pts):\n a = norm[0]\n b = norm[1]\n c = norm[2]\n\n p = []\n\n for i in range(len(pts)):\n x_p = pts[i][0]\n y_p = pts[i][1]\n z_p = pts[i][2]\n\n if a != 0:\n x_0 = (b * b + c * c) * x_p - a * b * y_p - a * c * z_p - a * d\n y_0 = (b * 1.0 / a) * (x_0 - x_p) + y_p\n z_0 = (c * 1.0 / a) * (x_0 - x_p) + z_p\n\n elif b != 0:\n x_0 = x_p \n y_0 = c * c * y_p - b * (d + c)\n z_0 = (c * 1.0 / b) *(y_0 - y_p) + z_p\n\n else:\n x_0 = x_p\n y_0 = y_p\n z_0 = - d * 1.0 / c\n\n p.append([x_0, y_0, z_0])\n \n return p", "def normal(self, t=0):\n return Line(self.lerp(t), self.cross_z)", "def vector(x, y, z):\n return point_or_vector(x,y,z,0.0)", "def fit_plane_to_point_cloud(pc: np.ndarray) -> Tuple[Any, Any, Any, Any]:\n center = pc.sum(axis=0) / pc.shape[0]\n u, s, vh = np.linalg.svd(pc - center)\n\n # Get the unitary normal vector\n u_norm = vh[2, :]\n d = -np.dot(u_norm, center)\n a, b, c = u_norm\n return a, b, c, d", "def normal(n):\n m=np.zeros((n,n))\n for i,j in itertools.product(range(n), range(n)):\n m[i][j]=normalvariate(0,1)\n return m", "def getArea(self, p1, p2, p3):\n matrix = [p1.normalVector, p2.normalVector, p3.normalVector, [1,1,1,1]]\n matrix = np.rot90(matrix)\n return abs(np.linalg.det(matrix))/2.0", "def distance_point_plane(point, plane):\n base, normal = plane\n vector = subtract_vectors(point, base)\n return fabs(dot_vectors(vector, normal))", "def vert_normals(xyz, triangles):\n\n\tB, N, _ = _shape(xyz)\n\tM = _shape(triangles)[-2]\n\ttriangles = _i64(triangles)\n\t\n\tfn = face_normals(xyz, triangles)\n\tbfn = tf.reshape(tf.tile(fn, [1,1,3]), [B*M*3, 3])\n\tbt = tf.reshape(\n\t\ttriangles[tf.newaxis,:,:] + _i64(tf.range(B)[:,tf.newaxis,tf.newaxis] * N),\n\t\t[B*M*3])\n\tvn = tf.reshape(tf.math.unsorted_segment_sum(bfn, bt, B*N), [B,N,3])\n\tvn = tf.math.l2_normalize(vn, -1)\n\treturn vn", "def vector3(x, y, z):\n return np.array([x, y, z], dtype=np.float)", "def intersect_triangle(v1, v2, v3, pos):\n #calc normal from two edge vectors v2-v1 and v3-v1\n nVec = cross(subtract(v2, v1), subtract(v3, v1))\n #equation of plane: Ax + By + Cz = kVal where A,B,C are components of normal. x,y,z for point v1 to find kVal\n kVal = dot(nVec,v1)\n #return y val i.e. y = (kVal - Ax - Cz)/B\n return (kVal - nVec[0]*pos[0] - nVec[2]*pos[2])/nVec[1]", "def normal(self):\n M = numpy.sqrt(self.magnitude())\n self.pure = self.pure / M\n self.real = self.real / M", "def calc_dist_and_norm_vec(self, point):\n a, b = self.a, self.b\n x3, y3 = point.x, point.y\n\n # calc crossing point between wall and\n # orthogonal line going through (x3, y3)\n x0 = (a * y3 + x3 - a * b) / (a**2 + 1)\n y0 = (a**2 * y3 + a * x3 + b) / (a**2 + 1)\n\n dist = np.sqrt((x3 - x0)**2 + (y3 - y0)**2)\n norm_vec = np.array([(x3 - x0), (y3 - y0)]) / dist\n\n # shortest distance to the wall is distance to start/end point\n # if crossing point lies beyond them\n if x0 < self.p1.x:\n dist = np.sqrt((x3 - self.p1.x)**2 + (y3 - self.p1.y)**2)\n elif x0 > self.p2.x:\n dist = np.sqrt((x3 - self.p2.x)**2 + (y3 - self.p2.y)**2)\n\n return dist, norm_vec", "def perpendicular(self):\n return tuple.__new__(Vec2, (-self[1], self[0]))", "def cross_pts_triangle(p1, p2, p3):\n return (p1[:, 0] - p3[0]) * (p2[1] - p3[1]) - (p2[0] - p3[0]) * (p1[:, 1] - p3[1])", "def project_onto_plane(vect):\n x, y, z = vect\n \n return (x, y, 0.)", "def fit_plane(xyz,z_pos=None):\n mean = np.mean(xyz,axis=0)\n xyz_c = xyz - mean[None,:]\n l,v = np.linalg.eig(xyz_c.T.dot(xyz_c))\n abc = v[:,np.argmin(l)]\n d = -np.sum(abc*mean)\n # unit-norm the plane-normal:\n abcd = np.r_[abc,d]/np.linalg.norm(abc)\n # flip the normal direction:\n if z_pos is not None:\n if np.sum(abcd[:3]*z_pos) < 0.0:\n abcd *= -1\n return abcd", "def test_perpendicular_to_vectors():\n random_state = np.random.RandomState(0)\n a = pr.norm_vector(pr.random_vector(random_state))\n a1 = pr.norm_vector(pr.random_vector(random_state))\n b = pr.norm_vector(pr.perpendicular_to_vectors(a, a1))\n c = pr.norm_vector(pr.perpendicular_to_vectors(a, b))\n assert_almost_equal(pr.angle_between_vectors(a, b), np.pi / 2.0)\n assert_almost_equal(pr.angle_between_vectors(a, c), np.pi / 2.0)\n assert_almost_equal(pr.angle_between_vectors(b, c), np.pi / 2.0)\n assert_array_almost_equal(pr.perpendicular_to_vectors(b, c), a)\n assert_array_almost_equal(pr.perpendicular_to_vectors(c, a), b)" ]
[ "0.7993883", "0.75612503", "0.7551127", "0.75036335", "0.73087484", "0.70266545", "0.6976466", "0.69476783", "0.69432104", "0.68638694", "0.6826915", "0.6760271", "0.67263806", "0.670698", "0.66756666", "0.66190445", "0.65483314", "0.6541516", "0.6535791", "0.6529659", "0.6512842", "0.6507131", "0.64994264", "0.6490822", "0.6474776", "0.64673924", "0.6458305", "0.6434979", "0.64240444", "0.6362717", "0.6351874", "0.63506407", "0.6350437", "0.6346042", "0.63393986", "0.6308758", "0.6304762", "0.626934", "0.6264796", "0.6259845", "0.6246434", "0.6242153", "0.62323564", "0.6219972", "0.6202798", "0.61939114", "0.6180082", "0.61579657", "0.61518395", "0.61453056", "0.6115457", "0.6098857", "0.6089071", "0.60742325", "0.60684305", "0.6068372", "0.60551405", "0.6030961", "0.60236764", "0.6004039", "0.5983551", "0.5976762", "0.5974754", "0.5957071", "0.594367", "0.594128", "0.5928957", "0.5926175", "0.5926005", "0.59120214", "0.59049517", "0.58841354", "0.5868051", "0.58379877", "0.5828203", "0.582603", "0.5825297", "0.58224297", "0.5812613", "0.5803431", "0.58013415", "0.5793365", "0.5779787", "0.5779787", "0.5776321", "0.57495826", "0.57437885", "0.57435125", "0.5740703", "0.5729273", "0.57250506", "0.5697812", "0.5686872", "0.5662699", "0.5658405", "0.5648152", "0.5643556", "0.5643493", "0.5639706", "0.5631522" ]
0.88373834
0
Returns a list where every element is a list of three atomnames. The second and third names are the closest neighbours of the first names. The argument is a list as returned by frac_to_cart and the number of neighbours to be returned.
def get_closest_neighbours(atomlist, neighbours=2): print('atomlist', atomlist) neighbourlist = [] for atom in atomlist: listline = [atom[0][0]] dists = [] distsc = [] for partner in atomlist: dists.append(np.linalg.norm(atom[1] - partner[1])) distsc.append(np.linalg.norm(atom[1] - partner[1])) dists.remove(min(dists)) for _ in range(neighbours): if min(dists) < 2.5: listline.append(atomlist[distsc.index(min(dists))][0][0]) dists.remove(min(dists)) #listline.append(atomlist[distsc.index(min(dists))][0][0]) neighbourlist.append(listline) return neighbourlist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_CX_neighbours(list_of_atoms, atom_list):\n my_list = []\n atom_numbers = []\n for atom in list_of_atoms:\n for element in identify_bonds(atom, atom_list):\n if (((element[0].atom_name == \"CX\") or (element[0].atom_name == \"CY\")) and (element[0].atom_number not in atom_numbers)):\n my_list.append(element[0])\n atom_numbers.append(element[0].atom_number)\n return my_list", "def neighbours(number: int, number_sectors: int) -> [int, int, int, int]:\n col = number % number_sectors\n row = number // number_sectors\n\n nieg = [number - number_sectors, number + number_sectors, number - 1, number + 1]\n\n if row == 0:\n nieg[0] = -1\n if row == number_sectors - 1:\n nieg[1] = -1\n if col == 0:\n nieg[2] = -1\n if col == number_sectors - 1:\n nieg[3] = -1\n return nieg", "def compose_listofr(atom_name, listofn):\n c = 1.06\n c2 = 1.4\n listofr = []\n for x in range(len(listofn)):\n if (atom_name[0] == \"N\"):\n if (listofn[x].atom_name[0] == \"H\"):\n listofr.append(1.010*c)\n if (listofn[x].atom_name[0] == \"O\"):\n listofr.append(1.060*c)\n if (listofn[x].atom_name[0] == \"C\"):\n listofr.append(1.475*c)\n if (listofn[x].atom_name[0] == \"N\"):\n listofr.append(1.450*c)\n if (atom_name[0] == \"O\"):\n if (listofn[x].atom_name[0] == \"H\"):\n listofr.append(0.970*c)\n if (listofn[x].atom_name[0] == \"O\"):\n listofr.append(1.490*c)\n if (listofn[x].atom_name[0] == \"C\"):\n listofr.append(1.160*c)\n if (listofn[x].atom_name[0] == \"N\"):\n listofr.append(1.060*c)\n if (atom_name[0] == \"C\"):\n if (listofn[x].atom_name[0] == \"H\"):\n listofr.append(1.090*c)\n if (listofn[x].atom_name[0] == \"O\"):\n listofr.append(1.160*c)\n if (listofn[x].atom_name[0] == \"C\"):\n listofr.append(1.540*c)\n if (listofn[x].atom_name[0] == \"N\"):\n listofr.append(1.475*c)\n if (atom_name[0] == \"H\"):\n if (listofn[x].atom_name[0] == \"H\"):\n listofr.append(0.740*c2)\n if (listofn[x].atom_name[0] == \"O\"):\n listofr.append(0.970*c2)\n if (listofn[x].atom_name[0] == \"C\"):\n listofr.append(1.090*c2)\n if (listofn[x].atom_name[0] == \"N\"):\n listofr.append(1.010*c2)\n return listofr", "def get_framework_neighbours(atom, useH=True):\n neighbourlist = []\n for atom2 in atom.partner[:5]:\n #if not 'H(' in atom2.name and np.linalg.norm(atom.cart-atom2.cart)<=1.6:\n if np.linalg.norm(atom.cart - atom2.cart) <= float(covalence_radius[atom.element]) + float(\n covalence_radius[atom2.element]) + .1:\n if not 'H' == atom2.element or useH:\n neighbourlist.append(atom2)\n return neighbourlist", "def define_neighbors(x: int, y: int, z: int) -> list:\n diffs = range(-1, 2)\n coords = []\n # might need to add some if guards (if x > 0) (if x < len(blah) etc)\n xdiffs = (x + diff for diff in diffs)\n ydiffs = (y + diff for diff in diffs)\n zdiffs = (z + diff for diff in diffs)\n neighbors = product(xdiffs, ydiffs, zdiffs)\n for index, neighbor in enumerate(neighbors):\n if neighbor != (x, y, z) and all(c >= 0 for c in neighbor):\n coords.append(neighbor)\n return coords", "def get_neighbours(lat, long):\n # ns = north east, ew = east west (ratio between 1 feet and degree) \n # its different on diferent places on earth (sphere)!!\n ns = 0.0025\n ew = 0.0025\n walk = []\n for i in range(-2, 3):\n for j in range(-2, 3):\n thiscell = CellId.from_lat_lng(LatLng.from_degrees(lat + ns*i, long + ew*j)).parent(S2_CELL_LEVEL)\n if abs(i * j) < 4:\n walk.append(thiscell.id())\n return sorted(walk)", "def rankNeighbors(Data):\r\n strokeDist = []\r\n for i in range(len(Data)):\r\n strokeDist.append([])\r\n index = 0\r\n for point1 in Data:\r\n dist = []\r\n index1=0\r\n for point2 in Data:\r\n #dist.append(math.sqrt((center1[0]-center2[0])**2+(center1[1]-center2[1])**2))\r\n dist.append((index1,math.sqrt((point1[0]-point2[0])**2+(point1[1]-point2[1])**2+(point1[2]-point2[2])**2)))\r\n index1+=1\r\n #x = copy.deepcopy(dist)\r\n #print(x)\r\n dist.sort(key= lambda x:x[1])\r\n #print(x)\r\n # Get rank for each element\r\n idx1 =0\r\n for e in dist:\r\n #i = x.index(e)\r\n strokeDist[index].append(e)\r\n idx1 +=1\r\n index+=1\r\n return strokeDist", "def lonlat(n_lon: int, n_lat: int) -> List[Tuple[float, float]]:\n grid = []\n for lon in np.linspace(0, 360.0 - 360.0 / n_lon, n_lon):\n for lat in np.linspace(-90, 90, n_lat):\n grid.append((lon, lat))\n return grid", "def get_neighbours(pos):\n neighbours = {tuple(sum(x) for x in zip(pos, offset)) for offset in relative_positions}\n return neighbours", "def computeNearestNeighbor(itemName, itemVector, items):\n # \"Chris Cagle/ I Breathe In. I Breathe Out\" [1, 5, 2.5, 1, 1, 5, 1]\n distances = []\n for otherItem in items:\n if otherItem != itemName:\n # print('itemVector =>', itemVector)\n # print('items[otherItem] =>', items[otherItem])\n distance = manhattan(itemVector, items[otherItem])\n distances.append((distance, otherItem))\n # sort based on distance -- closest first\n distances.sort()\n return distances", "def get_all_neighbor_coords(tiles):\n return [add(tile, neighbor) for tile in tiles for neighbor in NEIGHBORS]", "def getDistancesWithNames(twoDList):\n matrix = []\n for i in range(0,len(twoDList)):\n for j in range(len(twoDList) - len(twoDList) + i):\n SD = determineIdenticalBases(data[i][1], data[j][1])\n temp = []\n if SD[1] != 0:\n p = calculateP(SD[0]+SD[1], SD[1])\n temp.append(data[i][0])\n temp.append(data[j][0]) \n temp.append(estimateMutationsPerSite(p))\n matrix.append(temp)\n return matrix", "def getNeighborNodes(self, signature):\n x, y, z = signature[0], signature[1], signature[2]\n return [(x+1, y+1, z+1), (x+1, y, z+1), (x+1, y-1, z+1),\n (x, y+1, z+1), (x, y, z+1), (x, y-1, z+1),\n (x-1, y+1, z+1), (x-1, y, z+1), (x-1, y-1, z+1),\n (x+1, y+1, z-1), (x+1, y, z-1), (x+1, y-1, z-1),\n (x, y+1, z-1), (x, y, z-1), (x, y-1, z-1),\n (x-1, y+1, z-1), (x-1, y, z-1), (x-1, y-1, z-1),\n (x+1, y+1, z), (x+1, y, z), (x+1, y-1, z),\n (x, y+1, z), (x, y, z), (x, y-1, z),\n (x-1, y+1, z), (x-1, y, z), (x-1, y-1, z)]", "def get_contour(atom_list):\n initial = [atom for atom in atom_list if ((0 < len(identify_bonds(atom, atom_list)) < 3) and (check_connected(atom, identify_bonds(atom, atom_list)) == False))]\n \n extra_1 = []\n for atom in atom_list:\n neighbours = [bond[0] for bond in identify_bonds(atom, atom_list)]\n for i in neighbours:\n neighbours2 = [bond[0] for bond in identify_bonds(i, atom_list)]\n for j in neighbours2:\n if j in initial:\n extra_1.append(atom)\n\n extra_2 = []\n for atom in atom_list:\n neighbours = [bond[0] for bond in identify_bonds(atom, atom_list)]\n check = 0\n for i in neighbours:\n if i in initial:\n check += 1\n if ((check == 2) and (atom not in initial)):\n extra_2.append(atom) \n return (initial + extra_1 + extra_2)", "def calc_distances(marker_list, rf_pairs):\n final_distance = [[marker_list[0], 0]]\n\n for i in range(1, len(marker_list)):\n cur_markers = [marker_list[i-1], marker_list[i]]\n for rf_pair in rf_pairs:\n if rf_pair[0] in cur_markers and rf_pair[1] in cur_markers:\n final_distance.append([cur_markers[1], rf_pairs[rf_pair]])\n break\n return final_distance", "def makeNewickList(distancesWithNames):\n i = 0\n oldDistance = 0\n while len(distancesWithNames) > 1:\n smallestindex = findSmallest(distancesWithNames)\n distancesWithNames, oldDistance = newMatrixWithSmallest(distancesWithNames, smallestindex, beforeDistance=oldDistance)\n i+=1\n retString = \"(\" + distancesWithNames[0][0] + \",\" + distancesWithNames[0][1] + \");\"\n return retString", "def find_ngrams(input_list, n=3):\n return zip(*[input_list[i:] for i in range(n)])", "def calculate_distance_matrix(atomlist):\n distlist = []\n for atom in atomlist:\n atomdict = {}\n for partner in atomlist:\n if not str(int(partner[0][1])) in atomdict.keys():\n atomdict[str(int(partner[0][1]))] = []\n atomdict[str(int(partner[0][1]))].append(np.linalg.norm(atom[1] - partner[1]))\n else:\n atomdict[str(int(partner[0][1]))].append(np.linalg.norm(atom[1] - partner[1]))\n atomdict[str(int(partner[0][1]))].sort()\n\n distlist.append(atomdict)\n\n return distlist", "def get_neighbors(n):\n if n < 3:\n return ValueError(\"Integer must be greater than 3.\")\n p = generate()\n q = []\n l = 0\n g = 0\n while g <= n:\n q = next(p)\n g = q[-1]\n if q[-1] == n:\n l = q[0][-2]\n q = next(p)\n g = q[-1]\n elif q[-1] > n:\n l = q[0][-3]\n return l, g", "def nine_to_3x3(listy):\n new_side = []\n k = int(len(listy) / 3)\n \n for i in range(k):\n intermediate = []\n for j in range(3):\n intermediate.append(listy.pop(0))\n \n new_side.append(intermediate)\n return new_side", "def n_closest_waters(coordinates, atom, oxygens, n):\n\n waters = []\n for i in range(n):\n index = find_closest_atom(atom, oxygens)\n closest_oxygen = oxygens[index]\n if closest_oxygen in coordinates:\n oxygen_index = coordinates.index(closest_oxygen)\n OT = coordinates[oxygen_index]\n HT1 = coordinates[oxygen_index+1]\n HT2 = coordinates[oxygen_index+2]\n water = [OT, HT1, HT2]\n waters.append(water)\n oxygens = remove_atom(oxygens, index)\n return waters", "def get_influence_atoms(atomlist):\n enviromentlist = []\n trunclist = []\n neighbourlist = get_closest_neighbours(atomlist, 4)\n for neighbours in neighbourlist:\n if neighbours[0][0] == \"H\":\n neighbours = neighbours[:2]\n if neighbours[0][0] == \"O\":\n neighbours = neighbours[:3]\n trunclist.append(neighbours)\n for atom in trunclist:\n newatom = []\n for atom1partner in atom[1:]:\n for partner in trunclist:\n if partner[0] == atom1partner:\n counter = 0\n\n for atomi in partner:\n if atomi[0] == 'H':\n counter += 1\n\n if counter < 2 or (partner[0] in atom and atom[0][0] == 'H'):\n newatom += atom + partner[1:]\n\n newatom = make_list_unique(newatom)\n newatom.sort()\n enviromentlist.append(newatom)\n return enviromentlist", "def calcDistanceList(work_list):\n distance_list = []\n for swap in work_list: # for every work item find distance\n distance_list.append(Cluster.calcDistance(*swap))\n return distance_list", "def neighbor_list(i, j, k, nx):\n left_center = (i-1, j, k)\n right_center = (i+1, j, k)\n top_center = (i, j+1, k)\n bottom_center = (i, j-1, k)\n left_up = (i, j, k + 1)\n left_down = (i, j, k -1)\n return np.mod([left_center, right_center, top_center, bottom_center, left_up, left_down], nx)", "def GetBonds(Bonds):\n b = sorted([(min(x), max(x)) for x in Bonds])\n Bonds13, Bonds14 = [], []\n for (a1,b1) in b:\n #check for bonds with a1 at the center of a 1-3 interaction,\n #letting b1 be the higher number of the two flanking\n clist = [b2 for (a2,b2) in b if a2 == a1 and b2 < b1] + \\\n [a2 for (a2,b2) in b if b2 == a1 and a2 < b1]\n Bonds13.extend([(min(c,b1), max(c,b1)) for c in clist])\n #check for bonds with b1 at the center of a 1-3 interaction,\n #letting a1 be the higher number of the two flanking\n clist = [b2 for (a2,b2) in b if a2 == b1 and b2 < a1] + \\\n [a2 for (a2,b2) in b if b2 == b1 and a2 < a1]\n Bonds13.extend([(min(c,a1), max(c,a1)) for c in clist])\n #find atoms connected to a1\n clist = [b2 for (a2,b2) in b if a1==a2 and not b1==b2] +\\\n [a2 for (a2,b2) in b if a1==b2 and not b1==a2]\n #find atoms connected to b1\n dlist = [a2 for (a2,b2) in b if b1==b2 and not a1==a2] +\\\n [b2 for (a2,b2) in b if b1==a2 and not a1==b2]\n Bonds14.extend([(min(c,d), max(c,d)) for c in clist for d in dlist])\n Bonds1213 = b + Bonds13\n #sort\n Bonds1213.sort()\n Bonds14.sort()\n #get unique values in case of loops\n Bonds1213 = [x for (i,x) in enumerate(Bonds1213) if i == 0 or x != Bonds1213[i-1]]\n Bonds14 = [x for (i,x) in enumerate(Bonds14) if i == 0 or x != Bonds14[i-1]]\n #convert to arrays \n Bonds1213 = array(Bonds1213, int)\n Bonds14 = array(Bonds14, int)\n return Bonds1213, Bonds14", "def construct_graph_connection(coord_list, radie):\n\n connection_distance = []\n connection = []\n for j, data in enumerate(coord_list):\n '''Calculate the relative distance of the nodes'''\n distance = np.hypot(coord_list[:,0]-data[0], coord_list[:,1]-data[1])\n '''save nodes which are in range'''\n #for i, data in enumerate(distance):\n for i in range(j+1, len(distance)):\n data = distance[i]\n if data < radie:\n connection.append([j, i])\n connection_distance.append(data)\n\n\n connection_distance = np.array(connection_distance)\n connection = np.array(connection)\n return connection, connection_distance", "def get_framework_neighbors(atom, useH=True):\n neighborlist = []\n for atom2 in atom.partner[:5]:\n if np.linalg.norm(atom.cart - atom2.cart) <= float(covalence_radius[atom.element]) + float(\n covalence_radius[atom2.element]) + .1:\n if not 'H' == atom2.element or useH:\n neighborlist.append(atom2)\n return neighborlist", "def moore_neighbourhood(self, grid_position: tuple, radius: int) -> list:\n result = []\n u = [grid_position[0] - radius, grid_position[1] - radius]\n for i in range(2 * radius + 1):\n for j in range(2 * radius + 1):\n # This does not make much sense, since u is a list and i and j are integers\n result.append([u + i, u + j])\n return result", "def neighbors(districts, r, c):\r\n n_list = []\r\n if r>0:\r\n n_list += [districts[r-1,c]]\r\n if r<4:\r\n n_list += [districts[r+1,c]]\r\n if c>0:\r\n n_list += [districts[r,c-1]]\r\n if c<4:\r\n n_list += [districts[r,c+1]]\r\n return n_list", "def _generate_immediate_neighbours(pattern: str) -> list:\n generated = []\n for i in range(len(pattern)):\n if pattern[i] == 'A':\n generated.extend([pattern[:i] + c + pattern[i + 1:] for c in LIST_A])\n elif pattern[i] == 'C':\n generated.extend([pattern[:i] + c + pattern[i + 1:] for c in LIST_C])\n elif pattern[i] == 'T':\n generated.extend([pattern[:i] + c + pattern[i + 1:] for c in LIST_T])\n elif pattern[i] == 'G':\n generated.extend([pattern[:i] + c + pattern[i + 1:] for c in LIST_G])\n\n return generated", "def neighbours(pos):\r\n\t\tnbs = []\r\n\t\tfor direction in directions:\r\n\t\t\tnb = add(pos, direction)\r\n\t\t\tif is_inside(nb):\r\n\t\t\t\tnbs.append(nb)\r\n\t\treturn nbs", "def get_neighbors(self):\n return self.neighbours_names", "def get_species_list() -> list:\n c2h2_xyz = {'symbols': ('C', 'C', 'H', 'H'), 'isotopes': (12, 12, 1, 1),\n 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.203142), (0.0, -0.0, 2.265747), (-0.0, -0.0, -1.062605))}\n ch4_xyz = {'symbols': ('C', 'H', 'H', 'H', 'H'), 'isotopes': (12, 1, 1, 1, 1),\n 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.08744517), (1.02525314, 0.0, -0.36248173),\n (-0.51262658, 0.88789525, -0.36248173), (-0.51262658, -0.88789525, -0.36248173))}\n co2_xyz = {'symbols': ('C', 'O', 'O'), 'isotopes': (12, 16, 16),\n 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.1594846), (0.0, 0.0, -1.1594846))}\n co_xyz = {'symbols': ('O', 'C'), 'isotopes': (16, 12), 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.12960815))}\n f2_xyz = {'symbols': ('F', 'F'), 'isotopes': (19, 19), 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.3952041))}\n ch2o_xyz = {'symbols': ('O', 'C', 'H', 'H'), 'isotopes': (16, 12, 1, 1),\n 'coords': ((0.0, 0.0, 0.674622), (0.0, 0.0, -0.529707),\n (0.0, 0.935488, -1.109367), (0.0, -0.935488, -1.109367))}\n h2o_xyz = {'symbols': ('O', 'H', 'H'), 'isotopes': (16, 1, 1),\n 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 0.95691441), (0.92636305, 0.0, -0.23986808))}\n h2_xyz = {'symbols': ('H', 'H'), 'isotopes': (1, 1), 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 0.74187646))}\n hcn_xyz = {'symbols': ('C', 'N', 'H'), 'isotopes': (12, 14, 1),\n 'coords': ((0.0, 0.0, -0.500365), (0.0, 0.0, 0.65264), (0.0, 0.0, -1.566291))}\n hf_xyz = {'symbols': ('F', 'H'), 'isotopes': (19, 1), 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 0.91538107))}\n n2o_xyz = {'symbols': ('N', 'N', 'O'), 'isotopes': (14, 14, 16),\n 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.12056262), (0.0, 0.0, 2.30761092))}\n n2_xyz = {'symbols': ('N', 'N'), 'isotopes': (14, 14), 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.09710935))}\n nh3_xyz = {'symbols': ('N', 'H', 'H', 'H'), 'isotopes': (14, 1, 1, 1),\n 'coords': ((0.0, 0.0, 0.11289), (0.0, 0.938024, -0.263409),\n (0.812353, -0.469012, -0.263409), (-0.812353, -0.469012, -0.263409))}\n oh_xyz = {'symbols': ('O', 'H'), 'isotopes': (16, 1), 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 0.967))}\n cl2_xyz = {'symbols': ('Cl', 'Cl'), 'isotopes': (35, 35), 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.1))}\n\n c2h2 = ARCSpecies(label='C2H2', smiles='C#C', multiplicity=1, charge=0)\n c2h2.initial_xyz = c2h2_xyz\n\n ch4 = ARCSpecies(label='CH4', smiles='C', multiplicity=1, charge=0)\n ch4.initial_xyz = ch4_xyz\n\n co2 = ARCSpecies(label='CO2', smiles='O=C=O', multiplicity=1, charge=0)\n co2.initial_xyz = co2_xyz\n\n co = ARCSpecies(label='CO', smiles='[C-]#[O+]', multiplicity=1, charge=0)\n co.initial_xyz = co_xyz\n\n f2 = ARCSpecies(label='F2', smiles='[F][F]', multiplicity=1, charge=0)\n f2.initial_xyz = f2_xyz\n\n ch2o = ARCSpecies(label='CH2O', smiles='C=O', multiplicity=1, charge=0)\n ch2o.initial_xyz = ch2o_xyz\n\n h2o = ARCSpecies(label='H2O', smiles='O', multiplicity=1, charge=0)\n h2o.initial_xyz = h2o_xyz\n\n h2 = ARCSpecies(label='H2', smiles='[H][H]', multiplicity=1, charge=0)\n h2.initial_xyz = h2_xyz\n\n hcn = ARCSpecies(label='HCN', smiles='C#N', multiplicity=1, charge=0)\n hcn.initial_xyz = hcn_xyz\n\n hf = ARCSpecies(label='HF', smiles='F', multiplicity=1, charge=0)\n hf.initial_xyz = hf_xyz\n\n n2o = ARCSpecies(label='N2O', smiles='[N-]=[N+]=O', multiplicity=1, charge=0)\n n2o.initial_xyz = n2o_xyz\n\n n2 = ARCSpecies(label='N2', smiles='N#N', multiplicity=1, charge=0)\n n2.initial_xyz = n2_xyz\n\n nh3 = ARCSpecies(label='NH3', smiles='N', multiplicity=1, charge=0)\n nh3.initial_xyz = nh3_xyz\n\n oh = ARCSpecies(label='OH', smiles='[OH]', multiplicity=2, charge=0)\n oh.initial_xyz = oh_xyz\n\n cl2 = ARCSpecies(label='Cl2', smiles='[Cl][Cl]', multiplicity=1, charge=0)\n cl2.initial_xyz = cl2_xyz\n\n species_list = [c2h2, ch4, co2, co, f2, ch2o, h2o, h2, hcn, hf, n2o, n2, nh3, oh, cl2]\n\n return species_list", "def get_neighbours(self):\r\n n = [deepcopy(self.representation) for i in range(len(self.representation) - 1)]\r\n\r\n for count, i in enumerate(n):\r\n i[count], i[count + 1] = i[count + 1], i[count]\r\n\r\n n = [Individual(i) for i in n]\r\n return n", "def ngrams(name_string, n=3):\n\n string = re.sub(r'[,-./]|\\sBD', r'', name_string)\n n_grams = zip(*[string[i:] for i in range(n)])\n return [''.join(n_gram) for n_gram in n_grams]", "def triples():", "def nths(x, n):\n return [l[n] for l in x]", "def discover_new_cluster(\n self,\n n: int,\n items: List[str],\n embeddings: np.ndarray,\n weights: Optional[List[float]] = None,\n k_neighbours: int = 10,\n ) -> List[Tuple[float, str]]:\n # Get all cross-similarities\n similarity = cosine_similarity(embeddings)\n \n # Calculate scores for every row\n scores = []\n sorted_idx = similarity.argsort(axis=1) # Get sorted indices (sort on corresponding values)\n for i, (item, weight) in enumerate(zip(items, weights)):\n # No point in calculating score if weight equals zero\n if not weight:\n scores.append(0)\n continue\n \n # Assign score of zero if labeled entity is in K nearest neighbours\n top_indices = sorted_idx[i, -k_neighbours:]\n if any(items[idx] in self._clusters.keys() for idx in top_indices):\n scores.append(0)\n \n # Use accumulated similarity of K nearest neighbours as score\n else:\n scores.append(weight * similarity[i, top_indices].sum())\n \n # Filter out the highest score item\n return list(sorted(zip(scores, items), key=lambda x: x[0], reverse=True))[:n]", "def get_neighbours(self):\n n = [deepcopy(self.representation) for i in range(len(self.representation) - 1)]\n\n for count, i in enumerate(n):\n i[count], i[count + 1] = i[count + 1], i[count]\n\n n = [Individual(i) for i in n]\n return n", "def neighbors(r, c):\n return [\n (r - 1, c - 1),\n (r + 0, c - 1),\n (r + 1, c - 1),\n (r - 1, c + 0),\n (r + 1, c + 0),\n (r - 1, c + 1),\n (r + 0, c + 1),\n (r + 1, c + 1),\n ]", "def station_from_lat_lon(lat, lon, stations, n_nearest=3):\n lat, lon = float(lat), float(lon)\n distances = [(distance(lat, lon, st['lat'], st['lon']), st)\n for st in stations\n if (st['is_renting'] and st['is_installed'])]\n distances = sorted(distances)\n return [pair[1] for pair in distances[:n_nearest]]", "def get_n_neighbors(values, index, n): # TODO returning same tweets every time?\n neighbors = []\n diff = 0\n # check that n/2 lower neighbors exist\n if index >= (n/2):\n for i in range(int(n/2)):\n neighbors.append(values[index - i])\n # add as many as possible, add extra from end if possible\n else:\n diff = int(n/2) - index\n for i in range(int(n/2) - diff):\n neighbors.append(values[index - i])\n # check that n/2 + diff upper neighbors exist, add them\n if len(values) > (index + int(n/2) + diff):\n for i in range(int(n/2) + diff):\n neighbors.append(values[index - i])\n # not enough stuff, add as many as possible\n else:\n diffUpp = (len(values) - 1) - index\n for i in range(diffUpp):\n neighbors.append(values[index + i])\n return neighbors", "def neighbours_of_position(coords):\n row = coords[0]\n col = coords[1]\n \n #assign each of neighbours corrds\n #top left to top rigt\n top_left = (row - 1, col - 1)\n top_center = (row - 1, col)\n top_right = (row - 1, col + 1)\n \n # left to right (center)\n left = (row, col - 1)\n # the (row, col) cordinates passed into this function are situated here\n right = (row, col + 1)\n \n #bottom-left to bottom-right\n bottom_left = (row +1, col -1)\n bottom_center = (row +1, col)\n bottom_right = (row +1, col +1)\n \n return [top_left, top_center, top_right,\n left , right ,\n bottom_left, bottom_center, bottom_right]", "def get_interactions(list_atoms1, list_atoms2, dist):\n beta_carbons1 = list(filter(lambda x: x.get_id() == \"CB\", list_atoms1))\n beta_carbons2 = list(filter(lambda x: x.get_id() == \"CB\", list_atoms2))\n ns = NeighborSearch(beta_carbons1)\n interactions = []\n\n for atom in beta_carbons2:\n interact = ns.search(atom.get_coord(), dist)\n interactions.extend(\n [tuple(sorted([str(atom.get_parent().resname), str(x.get_parent().resname)])) for x in interact])\n return interactions", "def Get_NMME_index_order(ncInput):\n Var_name = str(ncInput)\n start_index = Var_name.find('prec')\n if Var_name.find('prec')!=-1:\n Var_name = Var_name[start_index+5:start_index+14]\n Order_list = ['a','a','a','a','a']\n for i in range(len(Var_name)):\n if i%2 == 0:\n Order_list[int(i/2)] = Var_name[i] \n elif Var_name.find('prec')==-1:\n start_index = Var_name.find('prate')\n Var_name = Var_name[start_index+6:start_index+11]\n Order_list = ['a','a','a']\n for i in range(len(Var_name)):\n if i%2 == 0:\n Order_list[int(i/2)] = Var_name[i] \n else:\n print ('Error there have no dim')\n os.system(\"pause\")\n exit(0)\n return Order_list", "def construct_fast_graph_connection(coord_list, radie):\n\n connection_distance = []\n connection = []\n coord_list_tree = scipy.spatial.cKDTree(coord_list)\n for j, data in enumerate(coord_list):\n '''save nodes which are in range'''\n connections_ckd = coord_list_tree.query_ball_point(data, radie)\n for i in connections_ckd:\n #only save upper half of the matrix\n if i > j:\n #save the connection\n connection.append([j, i])\n #save the relative distance of the nodes\n connection_distance.append(np.hypot(coord_list[i,0]-data[0], coord_list[i,1]-data[1]))\n\n connection_distance = np.array(connection_distance)\n connection = np.array(connection)\n\n\n return connection, connection_distance", "def nn_distance(nn_tree, lats, lngs=None):\n if lngs is not None:\n lats = np.asarray(lats).reshape(-1, 1)\n lngs = np.asarray(lngs).reshape(-1, 1)\n points = np.hstack((lats, lngs))\n else:\n points = np.asarray(lats).reshape(-1, 2)\n\n dist, neighbors = nn_tree.query(np.deg2rad(points))\n # Distance to kilometers, on the surface of the earth\n dist = haversine_to_km(dist)\n return dist, neighbors", "def calc_pvecs_1mol(mol_crds, act_ats):\n nearest_neighbours = np.zeros((len(act_ats), 3, 3))\n at_inds = np.arange(len(mol_crds))\n at_map = {} # map at num to active at num\n\n # Loop over active atoms and calc nearest neighbours\n for count, iat in enumerate(act_ats):\n at_crd = mol_crds[iat]\n dists = np.linalg.norm(mol_crds - at_crd, axis=1)\n\n dist_mask = dists < 3.5\n nn_ats = at_inds[dist_mask][:3]\n if len(nn_ats) != 3:\n # Set the map at to the next closest one\n closest_at = K_min(list(dists), 2)\n at_map[count] = closest_at[1]\n continue\n else:\n # Make sure iat is the first atom\n nn_ats = nn_ats[nn_ats != iat][:2]\n nn_ats = [iat, *nn_ats]\n assert len(nn_ats) == 3\n\n nearest_neighbours[count] = mol_crds[nn_ats]\n\n # Set pvecs the same as the closest atom if we can't calculate them\n for at in at_map:\n nearest_neighbours[at] = nearest_neighbours[at_map[at]]\n\n pvecs = []\n for a1, a2, a3 in nearest_neighbours:\n v1 = a2 - a1\n v2 = a3 - a1\n pvec = np.cross(v1, v2)\n pvec /= np.linalg.norm(pvec)\n pvecs.append(pvec)\n\n return np.array(pvecs)", "def neighbours_of_position(coords):\n row = coords[0]\n col = coords[1]\n \n #Assign each of the neighbours\n # Top-left to the top-right\n top_left = (row - 1, col - 1)\n top_center = (row - 1, col)\n top_right = (row - 1, col + 1)\n \n # Left to right\n left = (row, col - 1)\n # The '(row, col)' coordinates passed to this\n # function are situated here\n right = (row, col + 1)\n \n # Bottom-left to bottom-right\n bottom_left = (row + 1, col - 1)\n bottom_center = (row + 1, col)\n bottom_right = (row + 1, col + 1)\n \n return [top_left, top_center, top_right,\n left, right,\n bottom_left, bottom_center, bottom_right]", "def split_label_coords(self, coords_list):\n coords_clusters = []\n\n for label in range(np.max(self.dbscan.labels_) + 1):\n idx = tuple(np.where(self.dbscan.labels_ == label)[0])\n coords_cluster = itemgetter(*idx)(coords_list)\n\n coords_clusters.append(coords_cluster)\n return coords_clusters", "def get_neighbours(self):\n return []", "def neighbors(pattern, d):\n\n if d == 0:\n return [pattern]\n if len(pattern) == 1:\n return ['A', 'C', 'G', 'T']\n neighborhood = []\n suffix_pattern = pattern[1:]\n suffix_neighbors = neighbors(suffix_pattern, d)\n for text in suffix_neighbors:\n hdist = compute_hamming_distance(suffix_pattern, text)\n if hdist < d:\n for n in ['A', 'C', 'G', 'T']:\n neighbor = n + text\n neighborhood.append(neighbor)\n else:\n neighbor = pattern[0] + text\n neighborhood.append(neighbor)\n return neighborhood", "def get_near_ones(self):\n near_ones = []\n z_i, z_j = self[0]\n for i, j in [(-1, 0), (0, -1), (1, 0), (0, 1)]:\n try:\n near_ones.append(self[(z_i+i, z_j+j)])\n except KeyError:\n pass\n\n return near_ones", "def basicGetPointsGeodesic(self):\n\n # more geodesic, distance=2 (complicated because random)\n data = numpy.array([[0, 1, 1, 1, 2, 2, 2, 0],\n [0, 1, 1, 1, 2, 2, 2, 0],\n [0, 1, 1, 1, 2, 2, 2, 0]])\n labels = Labels(data=data)\n result = labels.getPoints(ids=[1], mode='geodesic', distance=2, \n connectivity=1)\n result = result.tolist()\n if len(result) == 5:\n desired = [[0, 1], [0, 3], [1, 2], [2, 1], [2, 3]]\n elif len(result) == 4:\n desired = [[0, 2], [1, 1], [1, 3], [2, 2]]\n elif len(result) == 3:\n if [1, 2] in result:\n if [0, 1] in result:\n desired = [[0, 1], [1, 2], [2, 3]] \n elif [0, 3] in result:\n desired = [[0, 3], [1, 2], [2, 1]]\n elif [0, 1] in result:\n if [0, 3] in result:\n desired = [[0, 1], [0, 3], [2, 2]]\n elif [2, 1] in result:\n desired = [[0, 1], [2, 1], [1, 3]]\n else:\n desired = [[0, 1], [1, 3], [2, 2]]\n elif [2, 3] in result:\n if [0, 3] in result:\n desired = [[0, 3], [1, 1], [2, 3]]\n elif [2, 1] in result:\n desired = [[0, 2], [2, 1], [2, 3]]\n else:\n desired = [[2, 3], [1, 1], [0, 2]] \n elif [0, 3] in result:\n desired = [[0, 3], [1, 1], [2, 2]]\n elif [2, 1] in result:\n desired = [[2, 1], [1, 3], [0, 2]]\n for des in desired:\n np_test.assert_equal(des in result, True)\n for res in result:\n np_test.assert_equal(res in desired, True)\n\n # mode geodesic, distance=3, inset\n labels = Labels(data=data[1:3, 2:8])\n labels.setInset([slice(1, 3), slice(2, 8)])\n result = labels.getPoints(ids=[2], mode='geodesic', distance=3, \n connectivity=1)\n result = result.tolist()\n if len(result) == 1:\n np_test.assert_equal(result[0][1], 5)\n elif len(result) == 2:\n desired = []\n if [1, 4] in result:\n desired = [[1, 4], [2, 6]]\n elif [2, 4] in result:\n desired = [[2, 4], [1, 6]]\n for des in desired: \n np_test.assert_equal(des in result, True)\n for res in result:\n np_test.assert_equal(res in desired, True)", "def nearest(coordinate, coordinate_list, limit=None):\r\n distances = []\r\n coordinate_lat=coordinate[0]\r\n coordinate_lon=coordinate[1]\r\n for c in coordinate_list:\r\n if len(c)==5:\r\n distances.append( (distance(coordinate_lat, coordinate_lon, c[3][0], c[3][1]), c))\r\n else:\r\n distances.append( (distance(coordinate_lat, coordinate_lon, c[0], c[1]), c)) \r\n distances.sort()\r\n if limit:\r\n return distances[:limit]\r\n return distances", "def neighbours(num):\n num = str(num)\n num = '0'*(4-len(num))+num # Prepend 0 until length is 4\n\n return [\n int(add_wo_carry(num, '0001')),\n int(add_wo_carry(num, '0010')),\n int(add_wo_carry(num, '0100')),\n int(add_wo_carry(num, '1000')),\n int(sub_wo_carry(num, '0001')),\n int(sub_wo_carry(num, '0010')),\n int(sub_wo_carry(num, '0100')),\n int(sub_wo_carry(num, '1000'))]", "def get_adj_nodes(self):\n return [\n self.nearest_node + PVector(1, 0),\n self.nearest_node + PVector(0, 1),\n self.nearest_node + PVector(-1, 0),\n self.nearest_node + PVector(0, -1)]", "def get_neighbours(self, training_data=(), instance=(), k=3):\n data = []\n neighbours = []\n # calculate the query distance\n for x in range(len(training_data)):\n distance = self.query_distance(instance1=training_data[x][:-1], instance2=instance[:-1])\n data.append([training_data[x], distance])\n # sort the neighbours based on key which is the distance in ascending\n data = sorted(data, key=itemgetter(-1))\n\n # pick the top neighbours of the test data\n for x in range(k):\n neighbours.append(data[:k][x][0])\n\n # return the neighbours of the test data\n return neighbours", "def get_neighbors(pattern, d):\n # if no difference\n if d == 0:\n return [pattern]\n # if no pattern\n if len(pattern) == 1:\n return ['A', 'C', 'T', 'G']\n # initialize the container\n neighborhood = set()\n # checking for the suffix patterns\n neighbors = get_neighbors(pattern[1:], d)\n # iterates through the neighbors\n for kmer in neighbors:\n # check for the allowed distance\n if hamming_distance(pattern[1:], kmer) < d:\n # iterates through the charcater/bases\n for char in ['A', 'C', 'T', 'G']:\n # add the character to the suffix payyern\n neighborhood.add(char + kmer)\n else:\n # otherwise add the first character again\n neighborhood.add(pattern[0] + kmer)\n return sorted(list(neighborhood))", "def make_neighbor_list(self):\n nodeinfo = bytearray()\n\n # the node itself\n for item in self.neighbors.my_info.get_nodeinfo():\n nodeinfo.extend(item)\n count = 1\n\n # neighboring node\n for nd in self.neighbors.nodeinfo_list.keys():\n if self.neighbors.nodeinfo_list[nd].is_alive:\n count += 1\n for item in self.neighbors.nodeinfo_list[nd].get_nodeinfo():\n nodeinfo.extend(item)\n\n nodes = bytearray(count.to_bytes(4, 'big'))\n nodes.extend(nodeinfo)\n return bytes(nodes)", "def generate_pairs_raw(patches, constants):\n # Convert the list of patch norms into numpy arrays\n patch_database = []\n patch_database.append(\n np.vstack([np.reshape(patch.raw_patch, [-1]) for patch in patches[0]])\n )\n # Find list of just 2 nearest neighbours for each patch due to duplicate\n nearest = []\n p1 = np.concatenate(patch_database[0:])\n kdt = KDTree(p1, leaf_size=30, metric='euclidean')\n nn = kdt.query(patch_database[0], k=2, return_distance=False, sort_results=False)\n nearest.append(nn)\n\n return np.concatenate(nearest)", "def calc_nuj_list(theta_list) :\n return theta_list / np.sin(2*theta_list)", "def calc_number_neighbours(num_electrons: int):\r\n if num_electrons < -4 or num_electrons > 4 : \r\n # if number of missing/extra e- higher than 4, then distort 8-num_electrons\r\n num_neighbours = abs(8 - abs(num_electrons) )\r\n elif -4 < num_electrons < 4:\r\n num_neighbours = abs(num_electrons)\r\n elif abs(num_electrons) == 4:\r\n num_neighbours = abs(num_electrons)\r\n \r\n return abs(num_neighbours)", "def nt_3d_centers(cif_file, consider_all_atoms):\n result =[]\n try:\n structure = MMCIFParser().get_structure(cif_file, cif_file)\n except Exception as e:\n warn(f\"\\n{cif_file.split('/')[-1]} : {e}\", error=True)\n with open(runDir + \"/errors.txt\", \"a\") as f:\n f.write(f\"Exception in nt_3d_centers({cif_file.split('/')[-1]})\\n\")\n f.write(str(e))\n f.write(\"\\n\\n\")\n return result\n for model in structure:\n for chain in model:\n for residue in chain:\n if consider_all_atoms:\n temp_list = []\n for atom in residue:\n temp_list.append(atom.get_coord())\n lg = len(temp_list)\n summ = np.sum(temp_list, axis = 0)\n res_isobaricentre = [summ[0]/lg, summ[1]/lg, summ[2]/lg]\n result.append([res_isobaricentre[0], res_isobaricentre[1], res_isobaricentre[2]])\n else:\n coordinates = None\n for atom in residue:\n if atom.get_name() == \"C1'\":\n coordinates = atom.get_coord()\n if coordinates is None:\n # Residue has no C1'\n res = np.nan\n else:\n res = [coordinates[0], coordinates[1], coordinates[2]]\n result.append(res)\n return(result)", "def fermionic_cells(self):\n cells = self.cells()\n cells_and_circles = self.all_cells()\n circles = [x for x in cells_and_circles if x not in cells]\n coords = [(i, jprime)\n for iprime, jprime in circles\n for i, j in circles\n if iprime > i\n ]\n coords.sort()\n return coords", "def nearest(reference, locations):\n return [x[1] for x in distances(reference, locations)]", "def insert_nano_particle(atoms, nanoparticle):\n from scipy.spatial import cKDTree as KDTree\n np_pos = nanoparticle.get_positions()\n com = np.sum(np_pos, axis=0)/len(np_pos)\n np_pos -= com\n nanoparticle.set_positions(np_pos)\n\n cell = atoms.get_cell()\n diag = 0.5 * (cell[:, 0] + cell[:, 1] + cell[:, 2])\n at_pos = atoms.get_positions() - diag\n tree = KDTree(at_pos)\n\n used_indices = []\n for atom in nanoparticle:\n dists, closest_indx = tree.query(atom.position)\n if closest_indx in used_indices:\n raise RuntimeError(\"Two indices map to the same!\")\n atoms[closest_indx].symbol = atom.symbol\n used_indices.append(closest_indx)\n\n symbols = [atom.symbol for atom in atoms]\n return symbols", "def _find_neighbours(self):\n\n neighbours = []\n for i, p in enumerate(self.frame_0):\n nearests = np.where(np.linalg.norm(self.frame_0 - p, axis=1) <= self.R_n)[0]\n # delete self index\n index = np.argwhere(nearests==i)\n nearests = np.delete(nearests, index)\n neighbours.append(nearests)\n\n return neighbours", "def get_coords_by_label_3D(volume, label):\n coords = np.argwhere(volume == label)\n z = [z for z, y, x in coords]\n y = [y for z, y, x in coords]\n x = [x for z, y, x in coords]\n return z, y, x", "def computeCrowdingDist(pareto_fronts):\n nobj = len(pareto_fronts[0][0])\n distances = defaultdict(float)\n\n for front in pareto_fronts:\n for i in range(nobj):\n front.sort(key=itemgetter(i))\n distances[front[-1]] = float(\"inf\")\n distances[front[0]] = float(\"inf\")\n if front[-1][i] == front[0][i]:\n continue\n norm = float(front[-1][i] - front[0][i]) * nobj\n for prev, cur, nex in zip(front[:-2], front[1:-1], front[2:]):\n distances[cur] += (nex[i] - prev[i]) / norm\n return distances", "def filter_carbon_atoms(atom_list, rings):\n list_3 = []\n list_2 = []\n list_2n = []\n for atom in atom_list:\n if (check_connected(atom, identify_bonds(atom, atom_list)) == False):\n if (len(identify_bonds(atom, atom_list)) == 3):\n list_3.append(atom)\n elif (len(identify_bonds(atom, atom_list)) == 2):\n list_2.append(atom)\n for neighbour in identify_bonds(atom, atom_list):\n if (len(identify_bonds(neighbour[0], atom_list)) == 2):\n for ring in rings:\n if( (atom in ring) and (neighbour[0] in ring)):\n list_2n.append(atom) \n return list_3, list_2, list_2n", "def getNeighbors(self):\n targets = set()\n for arc in self._arcsFrom:\n targets.add(arc.getFinish())\n return [ node for node in sorted(targets) ]", "def get_neighbors(self, pos):\r\n neighbors = []\r\n if pos[0] + 1 < self.size:\r\n neighbors.append((pos[0] + 1, pos[1]))\r\n if pos[0] - 1 >= 0:\r\n neighbors.append((pos[0] - 1, pos[1]))\r\n if pos[1] + 1 < self.size:\r\n neighbors.append((pos[0], pos[1] + 1))\r\n if pos[1] - 1 >= 0:\r\n neighbors.append((pos[0], pos[1] - 1))\r\n return neighbors", "def get_neighbours(self, business, num=5, add_self=False):\n\n def radius_step(radius, num_longtidues, num_latitudes, time):\n \"\"\"expand the search-radius exponentially\"\"\"\n step = int(exp(time))\n radius['long_down'] = radius['long_down'] - step\n if radius['long_down'] <= 0:\n radius['long_down'] = 0\n radius['long_up'] = radius['long_up'] + step\n if radius['long_up'] >= num_longtidues - 1:\n radius['long_up'] = num_longtidues - 1\n radius['lat_down'] = radius['lat_down'] - step\n if radius['lat_down'] <= 0:\n radius['lat_down'] = 0\n radius['lat_up'] = radius['lat_up'] + step\n if radius['lat_up'] >= num_latitudes - 1:\n radius['lat_up'] = num_latitudes - 1\n\n cell = self.get_cell(business)\n b_long = business.longitude\n b_lat = business.latitude\n radius = {'long_down': cell[0], 'long_up': cell[0] + 1,\n 'lat_down': cell[1], 'lat_up': cell[1] + 1}\n ret = []\n time = 0\n inner_radius = 0\n while len(ret) < num and inner_radius < 100:\n found = []\n radius_step(radius, self.longitudes.size, self.latitudes.size,\n time)\n time = time + 1\n for row in range(radius['long_down'], radius['long_up']):\n for col in range(radius['lat_down'], radius['lat_up']):\n if row in self.cells and col in self.cells[row]:\n for item in self.cells[row][col]:\n if item not in ret:\n found.append(item)\n if (len(found) + len(ret)) < num:\n continue\n # We approximate the in-radius of the search-rectangle by half of\n # the distance between the centers of left and right border\n # (Not exactly the in-radius on the surface of a sphereoid, but\n # easier to calculate)\n inner_radius = haversine((self.longitudes[radius['long_down']],\n self.latitudes[cell[1]]),\n (self.longitudes[radius['long_up']],\n self.latitudes[cell[1]])) / 2\n for neighbour in found:\n n_long = neighbour['longitude']\n n_lat = neighbour['latitude']\n dist = haversine((b_long, b_lat), (n_long, n_lat))\n # make sure we only include businesses in the in-circle of the\n # search-rectangle\n if dist <= inner_radius and \\\n (add_self or neighbour['index'] != business.name):\n neighbour['distance'] = dist\n ret.append(neighbour)\n return sorted(ret, key=itemgetter('distance'))[:num]", "def get_neighbors(self) -> List['games.saloon.tile.Tile']:\n neighbors = []\n\n for direction in Tile.directions:\n neighbor = getattr(self, \"tile_\" + direction.lower())\n if neighbor:\n neighbors.append(neighbor)\n\n return neighbors", "def get_neighbours(train, test_row, num_neighbours, distance_metrics=\"block\"):\n distances = []\n for train_row in train:\n if distance_metrics == \"block\":\n distance = block_distance(test_row, train_row)\n else:\n distance = euclidean_distance(test_row, train_row)\n distances.append((train_row, distance))\n distances.sort(key=lambda tup: tup[1])\n neigbours = []\n for i in range(num_neighbours):\n neigbours.append(distances[i][0])\n return neigbours", "def dishlist_prices(n: list) -> list:\r\n return [dish.price for dish in n]", "def nearest_neigh_of_same_type(self, atom, cutoff=3.5):\n atoms = []\n while(len(atoms) == 0):\n atoms = self.get_atoms_in_cutoff(atom, cutoff)\n atoms = [x for x in atoms if x.z == atom.z]\n #if atom in atoms: atoms.remove(atom)\n cutoff *= 2\n cutoff /= 2 # set back to the value used in case I want it later\n d = float(\"inf\")\n for atomi in atoms:\n dt = self.dist(atom, atomi)\n if dt < d:\n d = dt\n a = atomi\n if(a.z != atom.z): raise Exception(\"Error! Function 'nearest_neigh_of_same_type' didn't work!\")\n return a", "def _2x3_grid_clusters_close():\n return [mn(mean=np.array([i * 5, j * 5]), cov=np.array([[1.0, 0.0],\n [0.0, 1.0]]))\n for i in range(2)\n for j in range(3)]", "def get_neighbours(self):\n shape=self.cubeshape[1:]\n neighboursx=np.arange(self.xpos-(self.blocksize-1)/2,(self.xpos+(self.blocksize-1)/2)+1,dtype='int' )\n neighboursx=[x if (x>=0) & (x<=shape[1]-1) else np.nan for x in neighboursx ]\n neighboursy=np.arange(self.ypos-(self.blocksize-1)/2,(self.ypos+(self.blocksize-1)/2)+1,dtype='int' )\n neighboursy=[y if (y>=0) & (y<=shape[0]-1) else np.nan for y in neighboursy ]\n keys=[np.ravel_multi_index([y,x], shape) if np.all(np.isfinite(np.asarray([y,x]))) else np.nan for y in neighboursy for x in neighboursx]\n\n return keys", "def test_make_neighbors(position):\n\n def result_row(i, size):\n return [i] + [i + 1] * (size - 2) + [i]\n\n size = position.size\n neigh_counts = [0] * (size ** 2)\n first_row = result_row(2, size)\n last_row = result_row(2, size)\n middle_row = result_row(3, size)\n desired_result = first_row + (middle_row) * (size - 2) + last_row\n\n for c, neighs in go.make_neighbors(size=size):\n for pt in list(neighs):\n neigh_counts[pt] += 1\n\n assert desired_result == neigh_counts", "def _get_neighbours(kmer):\n assert (is_dna(kmer))\n bases = 'ACTG'\n result = set()\n for i in range(len(kmer)):\n for base in bases:\n result.add(kmer[:i] + base + kmer[(i + 1):])\n return result", "def k_neighbors(self, unknown, dataset, k):\n distances = []\n for title in dataset:\n point = dataset[title]\n distance_to_point = distance.euclidean_distance(point, unknown)\n distances.append([distance_to_point, title])\n distances.sort()\n neighbors = distances[0:k]\n return neighbors", "def _get_neighbours(self, position):\n grid = self._grid\n x, y = position\n neighbours = []\n offsets = [(0,1),(1,0),(0,-1),(-1,0)]\n shuffle(offsets)\n for offset in offsets:\n i, j = offset\n position = (x + i, y + j)\n if grid.valid_position(position) and position not in self.shots:\n neighbours.append(position)\n return neighbours", "def _get_neighbours(self, pos, input_data):\r\n neighbours = []\r\n\r\n start = AlignmentOutputData.table_values[pos.y][pos.x]\r\n diagonal = float(strings.NAN)\r\n up = float(strings.NAN)\r\n left = float(strings.NAN)\r\n\r\n cur_char_seq_1 = strings.EMPTY\r\n cur_char_seq_2 = strings.EMPTY\r\n\r\n if pos.y - 1 >= 0 and pos.x - 1 >= 0:\r\n diagonal = AlignmentOutputData.table_values[pos.y - 1][pos.x - 1]\r\n\r\n if pos.y - 1 >= 0:\r\n up = AlignmentOutputData.table_values[pos.y - 1][pos.x]\r\n\r\n if pos.x - 1 >= 0:\r\n left = AlignmentOutputData.table_values[pos.y][pos.x - 1]\r\n\r\n if pos.y - 1 >= 0:\r\n cur_char_seq_1 = input_data.sequence_a[pos.y - 1]\r\n if pos.x - 1 >= 0:\r\n cur_char_seq_2 = input_data.sequence_b[pos.x - 1]\r\n\r\n matching = start == diagonal + input_data.cost_function.get_value(cur_char_seq_1, cur_char_seq_2)\r\n deletion = start == up + input_data.gap_cost\r\n insertion = start == left + input_data.gap_cost\r\n\r\n if matching:\r\n neighbours.append(Vector(pos.x - 1, pos.y - 1))\r\n\r\n if insertion:\r\n neighbours.append(Vector(pos.x - 1, pos.y))\r\n\r\n if deletion:\r\n neighbours.append(Vector(pos.x, pos.y - 1))\r\n\r\n return neighbours", "def mots_Nlettre(L:list, n)->list:\n lst= []\n mot = 0\n for i in range(len(L)):\n mot = L[i] \n cpt = 0\n for e in mot:\n cpt += 1\n if cpt == n:\n lst.append(mot)\n return lst", "def get_common_neighbours(p1: Position, p2: Position) -> List[Position]:\n i, j = p1\n l1 = [(i + x, j + y) for x, y in NEIGHBORS_1]\n i, j = p2\n l2 = [(i + x, j + y) for x, y in NEIGHBORS_1]\n return [k for k in l1 if k in l2]", "def calc_inter_dist(L, pos) :\n ell_list = np.zeros(len(L)-1)\n for n in range(len(L)-1) :\n ell_list[n] = pos[n+1] - pos[n] - (L[n] + L[n+1])\n return ell_list", "def frac_to_cart(cell, positions):\n atomlist = []\n counter = 1\n a, b, c = cell[0], cell[1], cell[2]\n alpha, beta, gamma = cell[3] / 180 * np.pi, cell[4] / 180 * np.pi, cell[5] / 180 * np.pi\n v = np.sqrt(1 - np.cos(alpha) * np.cos(alpha) - np.cos(beta) * np.cos(beta) - np.cos(gamma) * np.cos(gamma) \\\n + 2 * np.cos(alpha) * np.cos(beta) * np.cos(gamma))\n transmatrix = np.matrix([[a, b * np.cos(gamma), c * np.cos(beta)],\n [0, b * np.sin(gamma), c * (np.cos(alpha) - np.cos(beta) * np.cos(gamma)) / np.sin(gamma)],\n [0, 0, c * v / np.sin(gamma)]])\n\n for atom in positions:\n coordmatrix = np.dot(transmatrix, positions[str(atom)])\n coordmatrix = np.array(coordmatrix).flatten().tolist()\n atomlist.append([])\n atomlist[-1].append([atom, atomtable[atom[0]]])\n counter += 1\n atomlist[-1].append(np.array(coordmatrix))\n return atomlist", "def lzs (inlist):\r\n zscores = []\r\n for item in inlist:\r\n zscores.append(z(inlist,item))\r\n return zscores", "def test_get_neighbours(self):\n self.assertEqual(self.game.get_neighbours(2,2), [[1, 1], [1, 2], [1, 3], \n [2, 1], [2, 3], [3, 1], [3, 2], [3, 3]])\n self.assertEqual(self.game.get_neighbours(0,0), [[0, 1], [1, 0], [1, 1]])\n self.assertEqual(self.game.get_neighbours(44,0), [[43, 0], [43, 1], [44, 1]])\n self.assertEqual(self.game.get_neighbours(45,0), [])\n self.assertEqual(self.game.get_neighbours(44,89), [[43, 88], [43, 89], [44, 88]])", "def create_conserved_pos_list(gpcr_pdb,gpcr_aa, i,my_pos, cons_pos_li, multiple_chains,chain_name):\n my_pos_bw=my_pos.split(\"x\")[0]\n add_chain_name=\"\"\n if multiple_chains:\n add_chain_name=\":\"+chain_name\n while i < len(cons_pos_li):\n cons_pos = cons_pos_li[i][0]\n cons_pos_bw=cons_pos[1:]\n cons_aa=cons_pos[0]\n if my_pos_bw==cons_pos_bw:\n pos_range=find_range_from_cons_pos(my_pos, gpcr_pdb)\n if pos_range:\n cons_pos_li[i][2]=pos_range + add_chain_name\n (my_aa,chain)=gpcr_aa[my_pos]\n if my_aa != cons_aa:\n cons_pos_li[i][0]=cons_pos+my_aa\n i+=1", "def nm_152263_exons():\n return [(0, 234), (234, 360), (360, 494), (494, 612), (612, 683), (683, 759),\n (759, 822), (822, 892), (892, 971), (971, 7099)]", "def prefix_spectrum(spectrum: List[float]):\n result = []\n it = iter(spectrum)\n next(it)\n m = inverted_monoisotopic_mass\n for pair in zip(spectrum, it):\n mass = pair[1] - pair[0]\n index = bisect.bisect_left(m, (mass, \"dummy\"))\n index = index - 1 if index == len(m) or\\\n index - 1 > 0 and abs(mass - m[index - 1][0]) < abs(mass - m[index][0]) else index\n result.append(m[index][1])\n return \"\".join(result)", "def neighbours(x, y):\n n = []\n for c in ((y-1, x-1), (y-1, x), (y-1, x+1), (y, x-1), (y, x+1), (y+1, x-1), (y+1, x), (y+1, x+1)):\n n.append(c)\n return n", "def list2FloatPairs(self,in_list):\n n = len(in_list)\n out_list = []\n for i in range(0,n-1,2):\n if not (in_list[i].isalpha() or in_list[i]==''):\n out_list.append((float(in_list[i]),float(in_list[i+1])))\n return out_list", "def get_nearest_neighbours(self, cell):\n\t\tneighs = self.get_neighbours(cell)\n\t\ti, j = cell.find_id()\n\t\tneighbours = []\n\t\tfor neigh in neighs:\n\t\t\tx, y = neigh.find_id()\n\t\t\tif abs(x-i)+abs(y-j) <= 1: \n\t\t\t\tneighbours.append(self.space[y,x])\n\t\treturn neighbours", "def recallFraction(truth_i3, measured_i3, tolerance):\n if (measured_i3.getNumberMolecules() == 0):\n return [0, truth_i3.getNumberMolecules()]\n \n recalled_locs = 0\n total_locs = 0\n for i in range(truth_i3.getNumberFrames()):\n t_locs = truth_i3.getMoleculesInFrame(i+1)\n m_locs = measured_i3.getMoleculesInFrame(i+1, good_only = False)\n \n dist = utilC.peakToPeakDist(t_locs['xc'], t_locs['yc'], m_locs['xc'], m_locs['yc'])\n\n recalled_locs += numpy.count_nonzero((dist < tolerance))\n total_locs += dist.size\n\n return [recalled_locs, total_locs]", "def getNeighbors(training_data, test_row, k):\n\n distances = list()\n for training_row in training_data:\n dist = euclidianDistance(training_row, test_row)\n distances.append([training_row, dist])\n \n #Sort on the basis of dist\n distances.sort(key=lambda row:row[1])\n\n neighbors = list()\n\n for i in range(int(k)):\n neighbors.append(distances[i][0])\n\n return neighbors", "def eq_distance(list_of_numbers):\n\n\t#return the list of three numbers if they exist, return False otherwise\n\tfor i in list_of_numbers:\n\t\tfor j in list_of_numbers:\n\t\t\tif j > i:\n\t\t\t\tdiff = j - i\n\t\t\t\tif (j + diff) in list_of_numbers:\n\t\t\t\t\treturn (i,j,j+diff)\n\t\t\t\telif (i - diff) in list_of_numbers:\n\t\t\t\t\treturn (i-diff, i , j)\n\treturn False" ]
[ "0.5994788", "0.56360227", "0.56093794", "0.55970883", "0.54962254", "0.54728734", "0.5409299", "0.53933924", "0.5324893", "0.53109133", "0.5306111", "0.5260367", "0.5244169", "0.5237841", "0.5218403", "0.5203575", "0.5183413", "0.5180326", "0.5171642", "0.5169028", "0.5147983", "0.5126392", "0.51196516", "0.5116443", "0.5109819", "0.51059335", "0.5102457", "0.51004213", "0.50574064", "0.50545406", "0.5046949", "0.50403965", "0.5021714", "0.50201225", "0.50146025", "0.5001449", "0.49998912", "0.49938622", "0.49932036", "0.4989079", "0.49865678", "0.49858633", "0.4980879", "0.49789172", "0.49735618", "0.4969474", "0.49671486", "0.49645585", "0.49642822", "0.4943997", "0.49353328", "0.49340162", "0.49334893", "0.49318323", "0.49245515", "0.4921399", "0.49185443", "0.49153683", "0.48980248", "0.48946258", "0.48897716", "0.48873478", "0.48822322", "0.48791367", "0.4875119", "0.48730415", "0.48729002", "0.4856554", "0.48434064", "0.48399302", "0.483971", "0.48372775", "0.48299196", "0.48192468", "0.48182723", "0.481197", "0.48104292", "0.48020694", "0.4798007", "0.4796647", "0.4796058", "0.47956282", "0.479315", "0.47926247", "0.4790844", "0.4788331", "0.4776773", "0.47699276", "0.4769894", "0.47694913", "0.47629362", "0.47600406", "0.47595146", "0.47588086", "0.47585943", "0.47468248", "0.47458395", "0.47448492", "0.4739935", "0.4737258" ]
0.67386407
0
Calculates for every atom the distances to all other atoms in atomlist. Returns a list where every element is a list of all distances.
def calculate_distance_matrix(atomlist): distlist = [] for atom in atomlist: atomdict = {} for partner in atomlist: if not str(int(partner[0][1])) in atomdict.keys(): atomdict[str(int(partner[0][1]))] = [] atomdict[str(int(partner[0][1]))].append(np.linalg.norm(atom[1] - partner[1])) else: atomdict[str(int(partner[0][1]))].append(np.linalg.norm(atom[1] - partner[1])) atomdict[str(int(partner[0][1]))].sort() distlist.append(atomdict) return distlist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute_distances(self, atoms: List[CellAtom]):\n muon = self._cell_atoms[self._muon_index]\n\n for atom in atoms:\n atom.distance_from_muon = np.linalg.norm(muon.position - atom.position)", "def calcDistanceList(work_list):\n distance_list = []\n for swap in work_list: # for every work item find distance\n distance_list.append(Cluster.calcDistance(*swap))\n return distance_list", "def _calculate_distances(self):\n all_dists = []\n for ref in range(len(self.atoms)):\n if self.atoms[ref].symbol in self.exclude:\n continue\n indices = list(range(ref+1, len(self.atoms)))\n indices = self._filter_excluded(indices)\n if len(indices) == 0:\n continue\n dists = self.atoms.get_distances(ref, indices, mic=True)\n all_dists += list(dists)\n \n # Normalize by the mean distance\n return np.array(all_dists)/np.mean(all_dists)", "def distance_list(coordinate_list):\n for item1 in coordinate_list:\n L2 = []\n d_list.append(L2)\n for item2 in coordinate_list:\n if item1 != item2:\n distance = math.sqrt((item2[1] - item1[1]) ** 2 \\\n + (item2[2] - item1[2]) ** 2)\n L2.append((item2[0], distance))\n return d_list", "def _get_distances(self):\n for molecule in self.values():\n molecule.get_distances()\n\n # for atom in self.atoms:\n # atom.get_distances()", "def get_distances(self, crds):\n self.all_dist = np.zeros((self.natom, self.natom))\n # Loop over upper triangle of atom pairs\n for iat in range(self.natom-1):\n # Get the atom indices\n at_inds = np.arange(len(crds))\n\n # Calc distances between atoms (only upper triangle though)\n at_msk = at_inds > iat\n all_ut_dist = crds[at_msk] - crds[iat]\n all_ut_dist = np.linalg.norm(all_ut_dist, axis=1)\n\n self.all_dist[iat, iat+1:] = all_ut_dist\n\n # Get lower triangle indices\n self.all_dist = self.all_dist + self.all_dist.T", "def calculate_distances(data_point, centroids):\n distances = []\n for centroid_index, centroid_value in enumerate(centroids):\n distances.append(distance(data_point, centroid_value))\n return distances", "def compute_distances(Ls):\n if not isinstance(Ls, list):\n Ls = [Ls]\n\n dists = []\n for L in Ls:\n N,D = L.shape\n # 1xNxD - Nx1xD (L1 distance)\n dist = (np.abs(L[None,:,:] - L[:,None,:])).sum(axis=2)\n dists.append(dist)\n\n return dists", "def get_closest_neighbours(atomlist, neighbours=2):\n print('atomlist', atomlist)\n neighbourlist = []\n for atom in atomlist:\n listline = [atom[0][0]]\n dists = []\n distsc = []\n for partner in atomlist:\n dists.append(np.linalg.norm(atom[1] - partner[1]))\n distsc.append(np.linalg.norm(atom[1] - partner[1]))\n dists.remove(min(dists))\n for _ in range(neighbours):\n if min(dists) < 2.5:\n listline.append(atomlist[distsc.index(min(dists))][0][0])\n dists.remove(min(dists))\n #listline.append(atomlist[distsc.index(min(dists))][0][0])\n neighbourlist.append(listline)\n return neighbourlist", "def calcDistortionList(work_list):\n distortion_list = []\n for swap in work_list:\n distortion_list.append(Cluster.calcDistortion(*swap)) # call calcDistortion with tuple expansion as args\n return distortion_list", "def get_all_distances(cls, indices, dist_mat):\n distances = []\n for i, j in combinations(indices, 2):\n distances.append(cls.get_dist(dist_mat, i, j))\n return distances", "def measure_distance(self, mat):\n if len(mat) == 1:\n print(\"chain has only one CAatom\")\n return\n self.dists = []\n for num in range(0, len(mat)):\n if num + 1 <= len(mat) - 1:\n c1 = mat[num]\n c2 = mat[num + 1]\n d = c2 - c1\n self.dists.append(math.sqrt(np.sum(d * d)))\n return self.dists", "def link_atoms_by_distance(distlist1, atomlist1, distlist2, atomlist2, keys):\n hitlist = []\n\n for atom in distlist1:\n atomtype = int(atomlist1[distlist1.index(atom)][0][1])\n valuelist = []\n for partner in distlist2:\n partnertype = int(atomlist2[distlist2.index(partner)][0][1])\n if atomtype == partnertype:\n partnervalue = 0\n keylist = partner.keys()\n for key in keylist:\n for element in xrange(len(atom[key])):\n partnervalue += abs(atom[key][element] - partner[key][element])\n else:\n partnervalue = 9999999\n valuelist.append(partnervalue)\n minvalue = min(valuelist)\n besthit = valuelist.index(minvalue)\n hitlist.append(besthit)", "def get_distances(self):\n N = len(self.cells) # Number of cells\n distances = np.zeros([N, N]) # distances between cells\n positions = self.position_matrix() # positions of cells \n \n # get distances between cells (exploit symmetry between upper and lower triangular form)\n for i, position in enumerate(positions[:-1, :]): # Iterate matrix except the last one\n directions = positions[i+1:, :] - position # direction from i to j > i\n distances[i, i+1:] = np.linalg.norm(directions, axis=1) # length of directions\n \n return distances + distances.T # Add lower triangle of matrix to upper ", "def calc_distances(marker_list, rf_pairs):\n final_distance = [[marker_list[0], 0]]\n\n for i in range(1, len(marker_list)):\n cur_markers = [marker_list[i-1], marker_list[i]]\n for rf_pair in rf_pairs:\n if rf_pair[0] in cur_markers and rf_pair[1] in cur_markers:\n final_distance.append([cur_markers[1], rf_pairs[rf_pair]])\n break\n return final_distance", "def distances(self):\n self._sort_measurements()\n return self._distances", "def link_atoms_by_distance_diff(distlist1, atomlist1, distlist2, atomlist2, keys):\n hitlist = []\n\n for atom in distlist1:\n atomtype = int(atomlist1[distlist1.index(atom)][0][1])\n valuelist = []\n for partner in distlist2:\n partnertype = int(atomlist2[distlist2.index(partner)][0][1])\n if atomtype == partnertype:\n partnervalue = 0\n keylist = partner.keys()\n for key in keylist:\n for element in xrange(len(atom[key])):\n value = abs(atom[key][element] - partner[key][element])\n partnervalue += value\n else:\n partnervalue = 9999999\n valuelist.append(partnervalue)\n minvalue = min(valuelist)\n besthit = valuelist.index(minvalue)\n hitlist.append(besthit)", "def pairwise_distances(data):\n distances = []\n for x in data:\n distances_row = []\n for y in data:\n distances_row.append(metric(x, y)**2)\n distances.append(distances_row)\n return distances", "def _calculate_distance(self, ordered_list):\r\n\r\n total_distance = 0\r\n previous_point = None\r\n for point in ordered_list:\r\n if previous_point is not None:\r\n angle, distance = previous_point.angleAndDistanceTo(point) # geodesic distance in meters\r\n total_distance += distance\r\n previous_point = point\r\n\r\n return total_distance", "def _set_distances(results: List[(Place, float)]) -> List[Place]:\n all_entities = []\n\n for entity, distance in results:\n entity.distance = distance\n all_entities.append(entity)\n\n return all_entities", "def find_CX_neighbours(list_of_atoms, atom_list):\n my_list = []\n atom_numbers = []\n for atom in list_of_atoms:\n for element in identify_bonds(atom, atom_list):\n if (((element[0].atom_name == \"CX\") or (element[0].atom_name == \"CY\")) and (element[0].atom_number not in atom_numbers)):\n my_list.append(element[0])\n atom_numbers.append(element[0].atom_number)\n return my_list", "def filter_carbon_atoms(atom_list, rings):\n list_3 = []\n list_2 = []\n list_2n = []\n for atom in atom_list:\n if (check_connected(atom, identify_bonds(atom, atom_list)) == False):\n if (len(identify_bonds(atom, atom_list)) == 3):\n list_3.append(atom)\n elif (len(identify_bonds(atom, atom_list)) == 2):\n list_2.append(atom)\n for neighbour in identify_bonds(atom, atom_list):\n if (len(identify_bonds(neighbour[0], atom_list)) == 2):\n for ring in rings:\n if( (atom in ring) and (neighbour[0] in ring)):\n list_2n.append(atom) \n return list_3, list_2, list_2n", "def _euclidian_distances(stop_list):\n e_dists2 = [transitfeed.approximate_distance_between_stops(stop, tail) for\n (stop, tail) in zip(stop_list, stop_list[1:])]\n\n return e_dists2", "def calculate_distances(coords: List[Tuple[float, float]]) -> List[Dict]:\n miles = 0\n od = []\n for idx in range(len(coords)):\n if idx == 0:\n continue\n dist = distance(coords[idx], coords[idx - 1]).miles\n miles = miles + dist\n od.append(\n {\n \"start\": coords[idx - 1],\n \"stop\": coords[idx],\n \"distance\": dist,\n \"total\": miles,\n }\n )\n return od", "def subtree_distances(self, root):\r\n\r\n nodes = root.get_terminals()\r\n nodes.reverse()\r\n node_pairs = itertools.ifilter(\r\n lambda (a1, a2): a1.name < a2.name,\r\n itertools.product(nodes, nodes))\r\n\r\n distances = [self._node_distance(pair[0], pair[1])\r\n for pair in node_pairs]\r\n\r\n return distances", "def get_distances_list(mid_points):\n n = len(mid_points)\n dist_list = np.zeros((n,n))\n\n for i in range(n):\n for j in range(i+1, n):\n dist_list[i][j] = compute_distance(mid_points[i], mid_points[j])\n \n return dist_list", "def get_influence_atoms(atomlist):\n enviromentlist = []\n trunclist = []\n neighbourlist = get_closest_neighbours(atomlist, 4)\n for neighbours in neighbourlist:\n if neighbours[0][0] == \"H\":\n neighbours = neighbours[:2]\n if neighbours[0][0] == \"O\":\n neighbours = neighbours[:3]\n trunclist.append(neighbours)\n for atom in trunclist:\n newatom = []\n for atom1partner in atom[1:]:\n for partner in trunclist:\n if partner[0] == atom1partner:\n counter = 0\n\n for atomi in partner:\n if atomi[0] == 'H':\n counter += 1\n\n if counter < 2 or (partner[0] in atom and atom[0][0] == 'H'):\n newatom += atom + partner[1:]\n\n newatom = make_list_unique(newatom)\n newatom.sort()\n enviromentlist.append(newatom)\n return enviromentlist", "def atomList(self):\n\n\t\tal = []\t\n\t\tfor chain in self.chain:\n\t\t\tfor res in chain.residue:\n\t\t\t\tfor atom in res.atom:\n\t\t\t\t\tal.append(atom)\n\n\t\treturn al", "def total_cost_2D(self, final_list):\n total_cost = 0\n for i in range(len(final_list) - 1):\n temp = self.pairwise_distance(final_list[i], final_list[i + 1])\n total_cost = total_cost + temp\n print(\"Total distance: \" + str(total_cost))", "def _calc_distance(self, X):\n distances = np.zeros((X.shape[0], self.n_clusters))\n print(distances.shape)\n for i, centroid in enumerate(self.centroids):\n distances[:, i] = np.linalg.norm(X - centroid, axis=1)\n return distances", "def cal_distances(embeddings):\n # calculate\n dist = np.zeros([len(embeddings), len(embeddings)], dtype=float)\n for ii in xrange(len(embeddings)):\n for jj in xrange(ii + 1, len(embeddings)):\n dist[ii, jj] = np.linalg.norm(embeddings[ii] - embeddings[jj])\n dist[jj, ii] = dist[ii, jj] \n \n # return\n return dist", "def bond_distances_v2(molmod_atoms, bonds=None, ignored_elements=None):\n if not ignored_elements:\n ignored_elements = []\n\n m=molmod_atoms\n\n if not bonds:\n bonds = m.graph.edges\n\n bond_dists = []\n indices = []\n\n for ind1, ind2 in bonds:\n if not m.symbols[ind1] in ignored_elements and not m.symbols[ind2] in ignored_elements:\n bond_dists.append(m.distance_matrix[ind1,ind2]/molmod.angstrom)\n indices.append((ind1, ind2))\n\n #we sort by bond index so that comparison between two bdist_inds objects is possible (without sorting we can get variation in the order)\n bdist_inds = zip(bond_dists, indices)\n bdist_inds.sort(key=lambda e: e[1])\n\n return bdist_inds", "def extractAtoms(self, mol):\n\n\t\tself.atomlist = []\n\t\tfor chain in mol.chain:\n\t\t\tfor res in chain.residue:\n\t\t\t\tfor atom in res.atom:\n\t\t\t\t\tself.atomlist.append(atom)", "def calculate_all_distances_to_center(self):\n all_distances = pd.DataFrame()\n for label in np.unique(self.embedding_df['cluster']): \n distance_df = self.calculate_distances_for_cluster(label)\n all_distances = pd.concat([all_distances, distance_df])\n \n self.embedding_df = self.embedding_df.merge(all_distances, left_index=True, right_index=True)", "def getDistancesWithNames(twoDList):\n matrix = []\n for i in range(0,len(twoDList)):\n for j in range(len(twoDList) - len(twoDList) + i):\n SD = determineIdenticalBases(data[i][1], data[j][1])\n temp = []\n if SD[1] != 0:\n p = calculateP(SD[0]+SD[1], SD[1])\n temp.append(data[i][0])\n temp.append(data[j][0]) \n temp.append(estimateMutationsPerSite(p))\n matrix.append(temp)\n return matrix", "def distances(self) -> ndarray:\n return self._distances", "def calculate_all_distances(self):\n self.close_distance = self.calculate_distance(self.close_distance_factor)\n self.medium_distance = self.calculate_distance(self.medium_distance_factor)\n self.far_distance = self.calculate_distance(self.far_distance_factor)", "def iter_dist(self):\n self.makeTree()\n coords = self.coords\n sd = selfdistance\n for i in self.loopindices:\n dists, inds = self.nntree.query(coords[i], self.nnmaxcount,\n distance_upper_bound=self.nncutoff)\n yield coords[i], dists.compress((dists > sd) & ~np.isinf(dists))", "def _compute_hist_distances(\n self,\n all_histograms: Dict,\n n_attr: int\n ) -> np.ndarray:\n all_distances = np.empty((self.n_keep_nuclei, self.n_class_pairs, n_attr))\n for k_id , k in enumerate(self.keep_nuclei_list):\n omega = 0\n for tx in range(self.n_tumors):\n for ty in range(self.n_tumors):\n if tx < ty:\n for attr_id in range(n_attr):\n all_distances[k_id, omega, attr_id] = wasserstein_distance(\n all_histograms[k][tx][attr_id],\n all_histograms[k][ty][attr_id]\n )\n omega += 1\n return all_distances", "def bond_atoms(atom_list):\n pass", "def distances(self):", "def iter_atoms_by_distance(self, max_distance = None):\n listx = []\n\n if max_distance:\n for atm in self.get_structure().iter_atoms():\n d = AtomMath.calc_distance(self, atm)\n if d <= max_distance:\n listx.append((AtomMath.calc_distance(self, atm), atm))\n else:\n for atm in self.get_structure().iter_atoms():\n listx.append((AtomMath.calc_distance(self, atm), atm))\n\n listx.sort()\n return iter(listx)", "def compute_distances(self, X):\n #print(X.shape, self.Xtr.shape)\n dists = np.zeros((X.shape[0], self.Xtr.shape[0]))\n for i in range(X.shape[0]):\n X_r = np.tile(X[i], (self.Xtr.shape[0], 1))\n dists[i] = np.sqrt(np.sum(np.square(self.Xtr - X_r), axis = 1))\n #print(dists.shape)\n return dists", "def all_distances(self):\n points = self.color_lookup_table_points\n\n red = np.repeat(np.expand_dims(points[0], axis=0), points[0].size, axis=0)\n green = np.repeat(np.expand_dims(points[1], axis=0), points[1].size, axis=0)\n blue = np.repeat(np.expand_dims(points[2], axis=0), points[2].size, axis=0)\n\n self.distances = np.sqrt(\n np.square(red - red.transpose())\n + np.square(green - green.transpose())\n + np.square(blue - blue.transpose()))", "def calculate_distances(drives):\n for d in drives:\n d.set_distance()", "def connected_components(self) -> List[list]:\n for n in self.dw_graph.get_all_v().values():\n n.distance=0.0\n mega_list = []\n for n in self.dw_graph.get_all_v().values():\n if n.distance!=-10:\n mega_list.append(self.connected_component(n.node_id))\n return mega_list", "def get_all_grouped_distances(dist_matrix_header, dist_matrix, mapping_header,\r\n mapping, field, within=True,\r\n suppress_symmetry_and_hollowness_check=False):\r\n distances = get_grouped_distances(dist_matrix_header, dist_matrix,\r\n mapping_header, mapping, field, within,\r\n suppress_symmetry_and_hollowness_check)\r\n results = []\r\n for group in distances:\r\n for distance in group[2]:\r\n results.append(distance)\r\n return results", "def distance_matrix(dnas: Collection[str], metric=hamming_distance, relative=True, as_ndarray=False):\n n = len(dnas)\n result = [[0] * n for _ in range(n)]\n for pair in itertools.combinations(zip(range(n), dnas), r=2):\n (idx1, dna1), (idx2, dna2) = pair\n distance = metric(dna1, dna2)\n distance = distance / max(len(dna1), len(dna2)) if relative else distance\n result[idx1][idx2] = distance\n result[idx2][idx1] = distance\n if as_ndarray:\n result = np.asarray(result)\n return result", "def list_distances(configuration, disk_count, j_index ):\n \n for other_index in list(set(configuration)-set([j_index])):\n yield distance(j_index, other_index, disk_count)", "def compute_distance_pairs(cities):\n global distances_pair\n for city_from in cities:\n distances_pair.append([0 for r in range(city_from.index)]) # create\n for city_to in cities[:city_from.index]:\n distances_pair[city_from.index][city_to.index] = city_from.compute_distance_to_city_in_km(city_to)", "def _calc_(self):\n self.data = []\n all_xyz_data = self.Var.data.get_xyz_data()\n all_cols = self.Var.data.get_xyz_cols()\n\n # Loop over all the xyz data and cols we have\n for xyz_data, cols in zip(all_xyz_data, all_cols):\n\n at_crds = np.array([i[cols[0] != 'Ne'] for i in xyz_data])\n self.natom = len(at_crds[0])\n self.nstep = len(at_crds)\n self.step_data = {}\n\n # Calculate the nearest neighbour lists for each step\n for step in range(self.nstep):\n self.step_data[step] = {}\n\n # Get coords\n crds = at_crds[step]\n\n # Get distances between neighbours\n self.get_distances(crds)\n\n # Get a sorted list of atom indices by distance\n self.get_nearest_atom_inds()\n\n # If we have some molecule metadata\n if 'atoms_per_molecule' in self.Var.metadata:\n self.at_per_mol = self.Var.metadata['atoms_per_molecule']\n self.nmol = mol_utils.get_nmol(self.natom, self.at_per_mol)\n self.reshape_at_dist()\n self.get_nearest_atom_inds_per_mol()\n self.step_data[step]['closest_atoms_mol_grouped'] = self.closest_at_per_mol\n self.step_data[step]['distances_mol_grouped'] = self.all_dist_per_mol\n\n # Save data in dict\n self.step_data[step]['distances'] = self.all_dist\n self.step_data[step]['closest_atom_indices'] = self.closest_ats\n\n self.data.append(self.step_data)\n\n return self.data", "def distance_coord(df):\n temp_list_distance=[]\n list_distance=[]\n for i in range(len(df)-1):\n coord1 = (df['lat'][i], df['lon'][i])\n coord2 = (df['lat'][i+1], df['lon'][i+1])\n dist = geopy.distance.geodesic(coord1, coord2).km\n temp_list_distance.append(dist)\n list_distance.append(sum(temp_list_distance)) \n return(list_distance)", "def get_distances(db, start, ignore_people=frozenset(),\n dist_cutoff=None, verbose=False):\n dists = {}\n total_dist = 0\n max_dist = 0\n hist_dist = collections.defaultdict(int)\n for node in bfs_tools.ConnectionBfs(db, start, ignore_people):\n if dist_cutoff and node.dist > dist_cutoff:\n break\n dists[node.person] = node.dist\n hist_dist[node.dist] += 1\n max_dist = node.dist\n total_dist += node.dist\n if verbose and len(dists) % 1_000_000 == 0:\n utils.log(f\" ... {len(dists):_} nodes / Circle {node.dist}\")\n mean_dist = float(total_dist) / len(dists)\n hist_dist_list = [hist_dist[i] for i in range(max(hist_dist.keys()) + 1)]\n return dists, hist_dist_list, mean_dist, max_dist", "def cut_off_distance_calculator(config_data):\n\tatom_type = ((config_data[\"atom_id\"].unique()).astype(np.int64)).tolist()\n\tatom_type.sort()\n\tconfig_size = config_data[\"item\"].size\n\tcut_off_distance = []\n\tfor i in range(len(atom_type)):\n\t\tfor j in range(i+1,len(atom_type)-1):\n\t\t\tpair_dist_fun = pdf_calculator_pair(config_data, atom_type[i],atom_type[j])\n\t\t\tcut_off_distance.append(first_min_pdf(pair_dist_fun))\n\treturn cut_off_distance", "def distances(points, l=2):\n distances = []\n while points:\n baseline = points.pop()\n distances.extend([distance(baseline, point, l) for point in points])\n return distances", "def compute_distances(\n pdb,\n use_tenA_neighbor_residues=True,\n analyze_all_sites=True,\n sites_to_analyze=[],\n):\n \n # Read in PDB as pose and score the pose\n pose = pyrosetta.pose_from_pdb(pdb)\n sf(pose)\n\n # Initiate a dictionary for storing inter-atomic\n # distances\n energies_dict = {\n key : []\n for key in [\n 'd',\n\n 'res_i_n', 'res_i_pdb_n', 'res_i_name', 'res_i_chain',\n 'atom_i_n', 'atom_i_name', 'atom_i_type_name',\n 'atom_i_bb', 'atom_i_lj_radius',\n\n 'res_j_n', 'res_j_pdb_n', 'res_j_name', 'res_j_chain',\n 'atom_j_n', 'atom_j_name', 'atom_j_type_name',\n 'atom_j_bb', 'atom_j_lj_radius',\n ]\n }\n\n # Loop over all residues in the protein\n pose_size = pose.size()\n res_i_ns = list(range(1, pose_size+1))\n for res_i_n in res_i_ns:\n\n # If you specified a subset of sites to analyze,\n # then skip over sites that aren't in this subset\n if not analyze_all_sites:\n if res_i_n not in sites_to_analyze:\n continue\n\n # Make a list of neighboring residues to loop over\n # when computing inter-atomic distances\n res_i = pose.residue(res_i_n)\n if use_tenA_neighbor_residues:\n neighbors = pyrosetta.rosetta.core.select.get_tenA_neighbor_residues(\n pose,\n pyrosetta.Vector1([\n i == res_i_n for i in range(1, pose.size()+1)\n ])\n )\n res_j_ns = [\n res_n for (bool_n, res_n)\n in zip(neighbors, range(1, pose_size+1))\n if bool_n\n ]\n else:\n res_j_ns = list(range(1, pose.size()+1))\n\n # Loop over all neighbors to residue i, computing\n # all inter-atomic distances between residue pairs.\n for res_j_n in res_j_ns:\n \n # Skip over a residue pair of res_i_n => res_j_n so\n # as to avoid double counting and to skip distances\n # between atoms in the same residues. If specified,\n # also skip over sites that shouldn't be analyzed.\n if res_i_n >= res_j_n:\n continue\n if not analyze_all_sites:\n if res_j_n not in sites_to_analyze:\n continue\n res_j = pose.residue(res_j_n)\n for atom_i_n in list(range(1, res_i.natoms()+1)):\n \n # Get atom name and coordinates\n atom_type_i = res_i.atom_type(atom_i_n)\n atom_i_xyz = res_i.xyz(atom_i_n)\n\n for atom_j_n in list(range(1, res_j.natoms()+1)):\n\n # Get atom name and corrdinates\n atom_type_j = res_j.atom_type(atom_j_n)\n atom_j_xyz = res_j.xyz(atom_j_n)\n\n # Record distance and other metadata\n energies_dict['d'].append(\n (atom_i_xyz - atom_j_xyz).norm()\n )\n energies_dict['res_i_n'].append(res_i_n)\n energies_dict['res_i_pdb_n'].append(\n pose.pdb_info().number(res_i_n)\n )\n energies_dict['res_i_name'].append(\n res_i.name3()\n )\n energies_dict['res_i_chain'].append(\n pose.pdb_info().chain(res_i_n)\n )\n energies_dict['atom_i_n'].append(atom_i_n)\n energies_dict['atom_i_name'].append(\n res_i.atom_name(atom_i_n).strip()\n )\n energies_dict['atom_i_type_name'].append(\n atom_type_i.name().strip()\n )\n energies_dict['atom_i_bb'].append(\n res_i.atom_is_backbone(atom_i_n)\n )\n energies_dict['atom_i_lj_radius'].append(\n atom_type_i.lj_radius()\n )\n\n energies_dict['res_j_n'].append(res_j_n)\n energies_dict['res_j_pdb_n'].append(\n pose.pdb_info().number(res_j_n)\n )\n energies_dict['res_j_name'].append(res_j.name3())\n energies_dict['res_j_chain'].append(\n pose.pdb_info().chain(res_j_n)\n )\n energies_dict['atom_j_n'].append(atom_j_n)\n energies_dict['atom_j_name'].append(\n res_j.atom_name(atom_j_n).strip()\n )\n energies_dict['atom_j_type_name'].append(\n atom_type_j.name().strip()\n )\n energies_dict['atom_j_bb'].append(\n res_j.atom_is_backbone(atom_j_n)\n )\n energies_dict['atom_j_lj_radius'].append(\n atom_type_j.lj_radius()\n )\n\n energies_df = pandas.DataFrame(energies_dict)\n \n # Subset to inter-atomic distances of d-o <= 0.5 A,\n # where o is the sum of the VDW radii\n energies_df['o'] = \\\n energies_df['atom_i_lj_radius'] + \\\n energies_df['atom_j_lj_radius']\n energies_df['d-o'] = energies_df['d'] - energies_df['o']\n energies_df = energies_df[\n energies_df['d-o'] <= 0.5\n ]\n \n # Compute the distance between residues in primary\n # sequence. Then, drop rows where the two atoms\n # are from two adjacent residues and at least one\n # atom is a backbone atom\n energies_df['seq_dist'] = \\\n energies_df['res_i_n'] - energies_df['res_j_n']\n energies_df['seq_dist'] = energies_df['seq_dist'].abs()\n energies_df['drop'] = \\\n (energies_df['seq_dist'] < 2) & \\\n (energies_df['atom_i_bb'] | energies_df['atom_j_bb'])\n energies_df = energies_df[~energies_df['drop']]\n \n # Add a column that gives the atom pair, with the\n # two atom atom names sorted in alphabetical order\n energies_df['atom_pair'] = energies_df.apply(\n lambda row: ':'.join(sorted([\n row['atom_i_type_name'], row['atom_j_type_name']\n ])),\n axis=1\n )\n \n # Add columns indicating rows with certain combinations\n # of hydrophobic atoms\n hydrophobic_residues = [\n 'ALA', 'VAL', 'LEU', 'ILE', 'MET',\n 'PHE', 'TRP', 'TYR',\n ]\n hydrophobic_carbons = ['CH1', 'CH2', 'CH3', 'CH0', 'aroC']\n hydrophobic_hydrogens = ['Hapo', 'Haro']\n for x in ['i', 'j']:\n energies_df[f'hydrophobic_carbon_{x}'] = \\\n (energies_df[f'res_{x}_name'].isin(hydrophobic_residues) &\n energies_df[f'atom_{x}_type_name'].isin(hydrophobic_carbons))\n energies_df[f'hydrophobic_hydrogen_{x}'] = \\\n (energies_df[f'res_{x}_name'].isin(hydrophobic_residues) &\n energies_df[f'atom_{x}_type_name'].isin(hydrophobic_hydrogens))\n\n energies_df['hydrophobic_C_C'] = \\\n (energies_df['hydrophobic_carbon_i'] &\n energies_df['hydrophobic_carbon_j'])\n energies_df['hydrophobic_C_H'] = (\n (\n energies_df['hydrophobic_carbon_i'] &\n energies_df['hydrophobic_hydrogen_j']\n ) |\n (\n energies_df['hydrophobic_carbon_j'] &\n energies_df['hydrophobic_hydrogen_i']\n ))\n energies_df['hydrophobic_H_H'] = \\\n ((energies_df['hydrophobic_hydrogen_i']) &\n (energies_df['hydrophobic_hydrogen_j']))\n energies_df['C_Obb'] = energies_df['atom_pair'].isin([\n 'CH1:OCbb', 'CH2:OCbb', 'CH3:OCbb',\n 'OCbb:aroC', 'CH0:OCbb',\n ])\n \n return energies_df", "def iter_all_atoms(self):\n for atm in self.atom_order_list:\n if isinstance(atm, Atom):\n yield atm\n else:\n for atmx in atm:\n yield atmx", "def point_distances(self, params=None):\n if params is None:\n params = self.collocation_points()\n with self.fix_evaluator():\n pts = np.array([self(la) for la in params])\n deltas = np.diff(pts, axis=0)\n distances = norm(deltas, axis=1)\n return distances", "def _calc_distance_features(self):\n d = ()\n for dx, dy in DIRECTIONS:\n if dx and dy:\n d += (list(self.__calc_distance(direction_x=dx, direction_y=dy)), )\n elif dx:\n tmp, _, _ = self.__calc_distance(direction_x=dx, direction_y=dy)\n d += (tmp, )\n elif dy:\n _, tmp, _ = self.__calc_distance(direction_x=dx, direction_y=dy)\n d += (tmp, )\n\n self.dist_features = d\n\n self.direc_dist = self.__calc_direc_distance()", "def compute_distances_one_loop(self, X):\n num_test = X.shape[0]\n num_train = self.X_train.shape[0]\n dists = np.zeros((num_test, num_train))\n for i in range(num_test):\n dists[i, :] = np.sqrt(np.sum(np.square(X[i, :] - self.X_train), axis=1)).transpose()\n return dists", "def distancePairs(self):\n return spsd.squareform(spsd.pdist(self.coordinates()))", "def distance(self):\n try:\n import pdb\n pdb.set_trace()\n s = []\n x0,y0 = self.deriv.T\n for thing in everything:\n x1,y1 = thing.deriv.T\n r,p = pearsonr(y0,y1)\n s.append(( p,thing.label ))\n s.sort()\n #print s[-5:]\n print s\n except:\n return np.inf", "def createDistanceList(gmaps, cityList, convert = 1):\n #First create a list\n distances = []\n print \"Cities Calculated:\"\n for i in range(len(cityList)):\n distances.append([])\n for j in range(len(cityList)):\n d = getDistance(gmaps, cityList[i], cityList[j])\n distances[i].append(d/(convert * 1.0))\n \n print cities[i]\n \n return distances", "def l2_dist_list(list_func, range_x, func_ref = None):\n if(func_ref is None):\n func_ref = list_func[0]\n \n l2_dists = [pFunc_base.square_dist(func_ref, f) for f in list_func] \n return l2_dists", "def _calc_adjacent_discrepancies(self, l_ts_dists):\n return [abs(ts_dist[1] - l_ts_dists[i + 1][1]) for i, ts_dist in enumerate(l_ts_dists[:-1])]", "def get_interactions(list_atoms1, list_atoms2, dist):\n beta_carbons1 = list(filter(lambda x: x.get_id() == \"CB\", list_atoms1))\n beta_carbons2 = list(filter(lambda x: x.get_id() == \"CB\", list_atoms2))\n ns = NeighborSearch(beta_carbons1)\n interactions = []\n\n for atom in beta_carbons2:\n interact = ns.search(atom.get_coord(), dist)\n interactions.extend(\n [tuple(sorted([str(atom.get_parent().resname), str(x.get_parent().resname)])) for x in interact])\n return interactions", "def calculate_distances(train_data, test_datum):\n n = train_data.shape[0]\n dist = []\n for i in range(n):\n distance = np.sqrt(np.sum(np.square(train_data[i]-test_datum)))\n dist.append(distance)\n dist = np.asarray(dist)\n return dist", "def _mbondi2_radii(atom_list):\n radii = [0.0 for atom in atom_list]\n for i, atom in enumerate(atom_list):\n # Radius of H atom depends on element it is bonded to\n if atom.type.atomic_number == 1:\n if atom.bond_partners[0].type.atomic_number == 7:\n radii[i] = 1.3\n else:\n radii[i] = 1.2\n # Radius of C atom depends on what type it is\n elif atom.type.atomic_number == 6:\n radii[i] = 1.7\n # All other elements have fixed radii for all types/partners\n elif atom.type.atomic_number == 7:\n radii[i] = 1.55\n elif atom.type.atomic_number == 8:\n radii[i] = 1.5\n elif atom.type.atomic_number == 9:\n radii[i] = 1.5\n elif atom.type.atomic_number == 14:\n radii[i] = 2.1\n elif atom.type.atomic_number == 15:\n radii[i] = 1.85\n elif atom.type.atomic_number == 16:\n radii[i] = 1.8\n elif atom.type.atomic_number == 17:\n radii[i] = 1.5\n else:\n radii[i] = 1.5\n return radii # Converted to nanometers above", "def reshape_at_dist(self):\n self.all_dist_per_mol = np.zeros((self.nmol, self.at_per_mol, self.at_per_mol))\n for imol in range(self.nmol):\n start, end = self.at_per_mol*imol, (imol+1)*self.at_per_mol\n self.all_dist_per_mol[imol] = self.all_dist[start:end,\n start:end]", "def getDistances(trainingSet, testInstance, distances):\n # Empty list to store distances of between testInstance and each trainSet item\n # Number of dimensions to check\n length=len(testInstance) - 1\n # Iterate through all items in trainingSet and compute the distance, then append to the distances list\n for x in range(len(trainingSet)):\n dist=calculateDistance(testInstance, trainingSet[x], length)\n distances.append((trainingSet[x], dist))\n return distances", "def extract_neighbor_distance_data(\n cell_structure: pmg.Structure, all_neighbors: AllNeighborDistances\n) -> NeighborDistances:\n neighbor_distances: NeighborDistances = {\n \"i\": [],\n \"j\": [],\n \"subspecies_i\": [],\n \"subspecies_j\": [],\n \"distance_ij\": [],\n }\n\n site_i_index: int\n site_i_neighbors: SiteNeighbors\n for site_i_index, site_i_neighbors in enumerate(all_neighbors):\n append_site_i_neighbor_distance_data(\n site_i_index=site_i_index,\n site_i_neighbors=site_i_neighbors,\n cell_structure=cell_structure,\n neighbor_distances=neighbor_distances,\n )\n\n return neighbor_distances", "def calc_distances(client_list):\n distances = {}\n for x in client_list:\n distances[x] = {}\n for y in client_list:\n distances[x][y] = dis(x, y)\n return distances", "def get_euclid_distance_to(self, atom):\n return linalg.norm(self.get_coords() - atom.get_coords())", "def get_nearest_atom_inds_per_mol(self):\n self.closest_at_per_mol = np.zeros((self.nmol,\n self.at_per_mol,\n self.at_per_mol-1), dtype=int)\n\n # Get and sort distances\n all_at_inds = np.arange(self.at_per_mol)\n for imol in range(self.nmol):\n for iat in range(self.at_per_mol):\n at_inds = all_at_inds[all_at_inds != iat]\n dist = self.all_dist_per_mol[imol, iat, at_inds]\n\n at_inds = [i[1] for i in sorted(zip(dist, at_inds))]\n self.closest_at_per_mol[imol, iat] = at_inds", "def get_nearest_atom_inds(self):\n # Create empty data structure\n self.closest_ats = np.zeros((self.natom, self.natom-1), dtype=int)\n\n # Get and sort distances\n all_at_inds = np.arange(self.natom)\n for iat in range(self.natom):\n at_inds = all_at_inds[all_at_inds != iat]\n dist = self.all_dist[iat, at_inds]\n\n at_inds = [i[1] for i in sorted(zip(dist, at_inds))]\n self.closest_ats[iat] = at_inds", "def Cal_Dist(self):\n sum_euclidean_dist = 0\n last_point = None\n for index, this_point in enumerate(self.__traectory_list):\n if last_point is not None:\n sum_euclidean_dist = ((last_point[0]-this_point[0])**2+(last_point[0]-this_point[1])**2)**0.5\n # Debug: Show cumulative geodetic distance\n # Checked with the beginning and the last one\n #print sum_geodetic_dist\n last_point = this_point\n return sum_euclidean_dist", "def all_distances(coords1, coords2):\r\n c1 = np.array(coords1)\r\n c2 = np.array(coords2)\r\n z = (c1[:, None, :] - c2[None, :, :]) ** 2\r\n return np.sum(z, axis=-1) ** 0.5", "def __get_all_combinations(self, list_of_items):\r\n return [itertools.combinations(list_of_items, index+1)\r\n for index in range(len(list_of_items))]", "def get_distances(self):\n return DistanceSensors(*self.bot_client.send_command(_Command.GetDistances))", "def computeCrowdingDist(pareto_fronts):\n nobj = len(pareto_fronts[0][0])\n distances = defaultdict(float)\n\n for front in pareto_fronts:\n for i in range(nobj):\n front.sort(key=itemgetter(i))\n distances[front[-1]] = float(\"inf\")\n distances[front[0]] = float(\"inf\")\n if front[-1][i] == front[0][i]:\n continue\n norm = float(front[-1][i] - front[0][i]) * nobj\n for prev, cur, nex in zip(front[:-2], front[1:-1], front[2:]):\n distances[cur] += (nex[i] - prev[i]) / norm\n return distances", "def __build_distance_matrix(self):\n for i in range(0, len(self.__corpus)):\n doc_i = self.__corpus[i]\n for j in range(i + 1, len(self.__corpus)):\n doc_j = self.__corpus[j]\n distance = doc_i.calc_distance(doc_j)\n self.__distance_matrix.append(distance)", "def get_all_neighbor_coords(tiles):\n return [add(tile, neighbor) for tile in tiles for neighbor in NEIGHBORS]", "def distance(cls, atom_1, atom_2):\n\t\t\n\t\treturn np.linalg.norm((atom_1-atom_2).atom_loc)", "def _computeDistances(self) -> None:\n length = len(self.data)\n for i, sequenceOne in enumerate(self.data):\n print(f\"[SeqCluBaselineOffline] Computing distances is at iteration {i} of {length}.\")\n for j, sequenceTwo in enumerate(self.data):\n if i == j:\n self.distances[i][j] = 0\n continue\n distance = self.distanceMeasure.calculateDistance(sequenceOne, sequenceTwo)\n self.distances[i][j] = distance\n self.distances[j][i] = distance", "def calc_distances_from_central(cluster, embedding):\n\n return calc_distances_in_embedding(cluster, embedding)", "def sp2_dihedrals(atoms):\n\n #problems with atoms inbuilt dihedral method (doesn't match gaussview/jmol at all)\n #so we'll use one taken from http://stackoverflow.com/questions/20305272/dihedral-torsion-angle-from-four-points-in-cartesian-coordinates-in-python\n def get_dihedral(p):\n b = p[:-1] - p[1:]\n b[0] *= -1\n v = np.array([v - (v.dot(b[1])/b[1].dot(b[1])) * b[1] for v in [b[0], b[2]]])\n # Normalize vectors\n v /= np.sqrt(np.einsum('...i,...i', v, v)).reshape(-1,1)\n b1 = b[1] / np.linalg.norm(b[1])\n x = np.dot(v[0], v[1])\n m = np.cross(v[0], b1)\n y = np.dot(m, v[1])\n return np.degrees(np.arctan2(y, x))\n\n mol = to_molmod(atoms)\n data = []\n\n for i in range(len(atoms)):\n if len(mol.graph.neighbors[i]) == 3:\n atom_indices = [i] + list(mol.graph.neighbors[i])\n atom_positions = np.array([atoms[temp_index].position for temp_index in atom_indices])\n #dihedral = atoms.get_dihedral(atom_indices)\n dihedral = get_dihedral(atom_positions)\n result = (i, dihedral)\n data.append(result)\n\n return data", "def get_dist_mat(self):\n n_site = self.status.give(keyword=\"n_site\")\n sites = self.status.give(keyword=\"sites\")\n dist_mat = [[0.0 for j in xrange(n_site)] for i in xrange(n_site)]\n for i in xrange(n_site):\n for j in xrange(n_site):\n ri = sites[i].pos\n rj = sites[j].pos\n dist_mat[i][j] = np.linalg.norm(ri-rj)\n # print ri, rj\n return dist_mat", "def update_agent_distances_vector(self):\n count = 0\n for agent in self.agents:\n agent_loc = agent.getz()\n\n for i, each_task in enumerate(self.tasks):\n dist = euclid_dist(agent_loc, each_task.getloc())\n self.agent_distances[count][i] = dist\n count += 1\n if self.DEBUG:\n print(self.agent_distances)", "def compute_distances(self):\n if self.df is None:\n return\n\n self.origdist = []\n self.transdist = []\n for i in range(len(self.df)):\n for j in range(i+1, len(self.df)):\n self.origdist.append(distance(self.df['LPsol'].iloc[i], self.df['LPsol'].iloc[j]))\n self.transdist.append(distance(self.df[['x', 'y']].iloc[i], self.df[['x', 'y']].iloc[j]))", "def iter_all_atoms(self):\n for strand in self.strand_list:\n for atm in strand.iter_all_atoms():\n yield atm", "def get_cost(self, logits_list):\n\n cost_list = []\n\n for dn, logits in zip(self.find_targetnodes(), logits_list):\n\n if len(dn.receives_from) == 0: continue\n _, y_ = dn.get_tensors()\n cost_list.append(self.cost_function(y_, logits))\n\n return cost_list", "def compute_distances_two_loops(self, X):\n num_test = X.shape[0]\n num_train = self.X_train.shape[0]\n dists = np.zeros((num_test, num_train))\n for i in range(num_test):\n for j in range(num_train):\n dists[i, j] = np.sqrt(np.sum(np.square(X[i] - self.X_train[j])))\n return dists", "def buildBondsByDistance(self):\n\n if self.hasBonds: return\n# atoms = findType(Atom)\n atoms = self.children\n if not self.hasBonds:\n bonds = self.buildBondsByDistanceOnAtoms(atoms)\n self.hasBonds = 1\n #return len(atoms)\n return bonds", "def _mbondi_radii(atom_list):\n radii = [0.0 for atom in atom_list]\n for i, atom in enumerate(atom_list):\n # Radius of H atom depends on element it is bonded to\n if atom.type.atomic_number == 1:\n bondeds = list(atom.bond_partners)\n if bondeds[0].type.atomic_number in (6, 7): # C or N\n radii[i] = 1.3\n elif bondeds[0].type.atomic_number in (8, 16): # O or S\n radii[i] = 0.8\n else:\n radii[i] = 1.2\n # Radius of C atom depends on what type it is\n elif atom.type.atomic_number == 6:\n radii[i] = 1.7\n # All other elements have fixed radii for all types/partners\n elif atom.type.atomic_number == 7:\n radii[i] = 1.55\n elif atom.type.atomic_number == 8:\n radii[i] = 1.5\n elif atom.type.atomic_number == 9:\n radii[i] = 1.5\n elif atom.type.atomic_number == 14:\n radii[i] = 2.1\n elif atom.type.atomic_number == 15:\n radii[i] = 1.85\n elif atom.type.atomic_number == 16:\n radii[i] = 1.8\n elif atom.type.atomic_number == 17:\n radii[i] = 1.5\n else:\n radii[i] = 1.5\n return radii # converted to nanometers above", "def iter_atoms(self):\n for chain in self.chain_list:\n for frag in chain.fragment_list:\n for atm in frag.atom_list:\n yield atm", "def calculate_distance(atom1,atom2): #dot string to show when you go into the help doc of this function\n x_distance = atom1[0]-atom2[0]\n y_distance = atom1[1]-atom2[1]\n z_distance = atom1[2]-atom2[2]\n distance = numpy.sqrt(x_distance**2+ y_distance**2+z_distance**2)\n return distance", "def distance_matrix(cities):\n\n return [[city1.distance(city2) for city2 in cities]\n for city1 in cities]", "def _compute_pairwise_distance(self, column: List[List[Token]]) -> np.array:\n pairwise = NeedlemanWunschAligner()\n l = len(column)\n distances = np.empty((l, l))\n for u in range(l):\n # compute only half of the distances\n for v in range(u, l):\n au, av = pairwise.align([column[u], column[v]]) # get aligned\n distances[u][v] = distances[v][u] = self.distance.compute(au, av)\n\n return distances", "def combine_atoms(\n mols, clash_param=1.5, min_cluster_size=2, smooth_param=0.5, smooth_grad=0.1\n):\n rdkit_atom = RDKitAtom()\n molecule_atoms = []\n for i, mol in enumerate(mols):\n atoms = rdkit_atom.generate_atoms_for_mol(mol)\n molecule_atoms.append([i, mol, atoms])", "def sum_for_list(lst):\n list_of_nods = []\n for num in lst:\n temp_list = simple_nod(abs(num))\n for item in temp_list:\n if item not in list_of_nods:\n list_of_nods.append(item)\n result = []\n for nod in list_of_nods:\n flag = False\n sum = 0\n for num in lst:\n if not num % nod:\n sum += num\n flag = True\n if flag:\n result.append([nod, sum])\n return sorted(result, key=lambda x: x[0])" ]
[ "0.73647845", "0.7127275", "0.7080569", "0.67939436", "0.66801125", "0.66210407", "0.6395463", "0.6352536", "0.60293406", "0.59641975", "0.5953034", "0.5883599", "0.5874572", "0.5861031", "0.5844821", "0.5841001", "0.5830898", "0.5818882", "0.5806686", "0.57949203", "0.5784324", "0.5754123", "0.5749961", "0.5739553", "0.57180345", "0.56448495", "0.562841", "0.5622595", "0.5604913", "0.55522674", "0.55271953", "0.5488316", "0.54865247", "0.54863906", "0.54705894", "0.5470054", "0.54601806", "0.5447083", "0.5434687", "0.54235655", "0.54110944", "0.537245", "0.53438854", "0.53319997", "0.5302156", "0.52819073", "0.5252426", "0.5234674", "0.5224045", "0.5223471", "0.52131575", "0.5212194", "0.520116", "0.5195205", "0.51862526", "0.51831865", "0.5172779", "0.51699114", "0.5167248", "0.51658386", "0.516451", "0.51636195", "0.5146383", "0.5141682", "0.51359874", "0.51171875", "0.5114546", "0.5110953", "0.51070106", "0.510698", "0.5098375", "0.5094065", "0.5077998", "0.50702804", "0.5067543", "0.5064678", "0.5062709", "0.5060602", "0.5058284", "0.5043402", "0.5040913", "0.50308824", "0.50298697", "0.5015552", "0.50134236", "0.50068825", "0.49880826", "0.49875602", "0.49696067", "0.49654433", "0.49561426", "0.49482608", "0.4932965", "0.492403", "0.49184167", "0.49144956", "0.49053368", "0.49015248", "0.48999998", "0.48998433" ]
0.78830993
0
The function is able to identify equal atoms of one molecule in different coordinate systems independent of the molecule's orientaion.
def link_atoms_by_distance(distlist1, atomlist1, distlist2, atomlist2, keys): hitlist = [] for atom in distlist1: atomtype = int(atomlist1[distlist1.index(atom)][0][1]) valuelist = [] for partner in distlist2: partnertype = int(atomlist2[distlist2.index(partner)][0][1]) if atomtype == partnertype: partnervalue = 0 keylist = partner.keys() for key in keylist: for element in xrange(len(atom[key])): partnervalue += abs(atom[key][element] - partner[key][element]) else: partnervalue = 9999999 valuelist.append(partnervalue) minvalue = min(valuelist) besthit = valuelist.index(minvalue) hitlist.append(besthit)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_isomorphic_general(self):\n # check that hill formula fails are caught\n ethanol = create_ethanol()\n acetaldehyde = create_acetaldehyde()\n assert ethanol.is_isomorphic_with(acetaldehyde) is False\n assert acetaldehyde.is_isomorphic_with(ethanol) is False\n # check that different orderings work with full matching\n ethanol_reverse = create_reversed_ethanol()\n assert ethanol.is_isomorphic_with(ethanol_reverse) is True\n # check a reference mapping between ethanol and ethanol_reverse matches that calculated\n ref_mapping = {0: 8, 1: 7, 2: 6, 3: 3, 4: 4, 5: 5, 6: 1, 7: 2, 8: 0}\n assert (\n Molecule.are_isomorphic(ethanol, ethanol_reverse, return_atom_map=True)[1]\n == ref_mapping\n )\n # check matching with nx.Graph atomic numbers and connectivity only\n assert (\n Molecule.are_isomorphic(\n ethanol,\n ethanol_reverse.to_networkx(),\n aromatic_matching=False,\n formal_charge_matching=False,\n bond_order_matching=False,\n atom_stereochemistry_matching=False,\n bond_stereochemistry_matching=False,\n )[0]\n is True\n )\n # check matching with nx.Graph with full matching\n assert ethanol.is_isomorphic_with(ethanol_reverse.to_networkx()) is True\n # check matching with a TopologyMolecule class\n from openforcefield.topology.topology import Topology, TopologyMolecule\n\n topology = Topology.from_molecules(ethanol)\n topmol = TopologyMolecule(ethanol, topology)\n assert (\n Molecule.are_isomorphic(\n ethanol,\n topmol,\n aromatic_matching=False,\n formal_charge_matching=False,\n bond_order_matching=False,\n atom_stereochemistry_matching=False,\n bond_stereochemistry_matching=False,\n )[0]\n is True\n )\n # test hill formula passes but isomorphic fails\n mol1 = Molecule.from_smiles(\"Fc1ccc(F)cc1\")\n mol2 = Molecule.from_smiles(\"Fc1ccccc1F\")\n assert mol1.is_isomorphic_with(mol2) is False\n assert mol2.is_isomorphic_with(mol1) is False", "def test_isomorphic_general(self):\n # check that hill formula fails are caught\n ethanol = create_ethanol()\n acetaldehyde = create_acetaldehyde()\n assert ethanol.is_isomorphic_with(acetaldehyde) is False\n assert acetaldehyde.is_isomorphic_with(ethanol) is False\n # check that different orderings work with full matching\n ethanol_reverse = create_reversed_ethanol()\n assert ethanol.is_isomorphic_with(ethanol_reverse) is True\n # check a reference mapping between ethanol and ethanol_reverse matches that calculated\n ref_mapping = {0: 8, 1: 7, 2: 6, 3: 3, 4: 4, 5: 5, 6: 1, 7: 2, 8: 0}\n assert (\n Molecule.are_isomorphic(ethanol, ethanol_reverse, return_atom_map=True)[1]\n == ref_mapping\n )\n # check matching with nx.Graph atomic numbers and connectivity only\n assert (\n Molecule.are_isomorphic(\n ethanol,\n ethanol_reverse.to_networkx(),\n aromatic_matching=False,\n formal_charge_matching=False,\n bond_order_matching=False,\n atom_stereochemistry_matching=False,\n bond_stereochemistry_matching=False,\n )[0]\n is True\n )\n # check matching with nx.Graph with full matching\n assert ethanol.is_isomorphic_with(ethanol_reverse.to_networkx()) is True\n\n from openff.toolkit.topology.topology import Topology\n\n topology = Topology.from_molecules(ethanol)\n assert (\n Molecule.are_isomorphic(\n ethanol,\n [*topology.molecules][0],\n aromatic_matching=False,\n formal_charge_matching=False,\n bond_order_matching=False,\n atom_stereochemistry_matching=False,\n bond_stereochemistry_matching=False,\n )[0]\n is True\n )\n # test hill formula passes but isomorphic fails\n mol1 = Molecule.from_smiles(\"Fc1ccc(F)cc1\")\n mol2 = Molecule.from_smiles(\"Fc1ccccc1F\")\n assert mol1.is_isomorphic_with(mol2) is False\n assert mol2.is_isomorphic_with(mol1) is False", "def test_add_lone_pairs_by_atom_valance(self):\n adj1 = \"\"\"multiplicity 3\n1 N u2 p1 c0 {2,S}\n2 S u0 p2 c0 {1,S} {3,S}\n3 H u0 p0 c0 {2,S}\"\"\"\n mol1 = Molecule().from_adjacency_list(adjlist=adj1)\n converter.add_lone_pairs_by_atom_valance(mol1)\n self.assertEqual(mol1.to_smiles(), '[N]S')\n mol1.multiplicity = 1\n converter.add_lone_pairs_by_atom_valance(mol1)\n self.assertEqual(mol1.to_smiles(), 'N#S')\n\n adj2 = \"\"\"multiplicity 3\n1 N u2 p1 c0 {2,S}\n2 N u0 p1 c0 {1,S} {3,S} {4,S}\n3 H u0 p0 c0 {2,S}\n4 H u0 p0 c0 {2,S}\"\"\"\n mol2 = Molecule().from_adjacency_list(adjlist=adj2)\n converter.add_lone_pairs_by_atom_valance(mol2)\n self.assertEqual(mol2.to_smiles(), '[N]N')\n mol2.multiplicity = 1\n converter.add_lone_pairs_by_atom_valance(mol2)\n self.assertEqual(mol2.to_smiles(), '[N-]=[NH2+]')\n\n adj3 = \"\"\"multiplicity 3\n1 C u0 p0 c0 {2,S} {5,S} {6,S} {7,S}\n2 C u0 p0 c0 {1,S} {3,S} {8,S} {9,S}\n3 C u2 p0 c0 {2,S} {4,S}\n4 H u0 p0 c0 {3,S}\n5 H u0 p0 c0 {1,S}\n6 H u0 p0 c0 {1,S}\n7 H u0 p0 c0 {1,S}\n8 H u0 p0 c0 {2,S}\n9 H u0 p0 c0 {2,S}\"\"\"\n mol3 = Molecule().from_adjacency_list(adjlist=adj3)\n converter.add_lone_pairs_by_atom_valance(mol3)\n self.assertEqual(mol3.to_smiles(), '[CH]CC')\n mol3.multiplicity = 1\n converter.add_lone_pairs_by_atom_valance(mol3)\n self.assertEqual(mol3.to_adjacency_list(), \"\"\"1 C u0 p0 c0 {2,S} {3,S} {4,S} {5,S}\n2 C u0 p0 c0 {1,S} {6,S} {7,S} {8,S}\n3 C u0 p1 c0 {1,S} {9,S}\n4 H u0 p0 c0 {1,S}\n5 H u0 p0 c0 {1,S}\n6 H u0 p0 c0 {2,S}\n7 H u0 p0 c0 {2,S}\n8 H u0 p0 c0 {2,S}\n9 H u0 p0 c0 {3,S}\n\"\"\")\n\n adj4 = \"\"\"multiplicity 3\n1 C u0 p0 c0 {2,S} {4,S} {5,S} {6,S}\n2 C u0 p0 c0 {1,S} {3,S} {7,S} {8,S}\n3 N u2 p1 c0 {2,S}\n4 H u0 p0 c0 {1,S}\n5 H u0 p0 c0 {1,S}\n6 H u0 p0 c0 {1,S}\n7 H u0 p0 c0 {2,S}\n8 H u0 p0 c0 {2,S}\"\"\"\n mol4 = Molecule().from_adjacency_list(adjlist=adj4)\n converter.add_lone_pairs_by_atom_valance(mol4)\n self.assertEqual(mol4.to_smiles(), 'CC[N]')\n mol4.multiplicity = 1\n converter.add_lone_pairs_by_atom_valance(mol4)\n self.assertEqual(mol4.to_adjacency_list(), \"\"\"1 N u0 p2 c0 {3,S}\n2 C u0 p0 c0 {3,S} {4,S} {5,S} {6,S}\n3 C u0 p0 c0 {1,S} {2,S} {7,S} {8,S}\n4 H u0 p0 c0 {2,S}\n5 H u0 p0 c0 {2,S}\n6 H u0 p0 c0 {2,S}\n7 H u0 p0 c0 {3,S}\n8 H u0 p0 c0 {3,S}\n\"\"\")", "def test_check_isomorphism(self):\n mol1 = Molecule(smiles='[O-][N+]#N')\n mol2 = Molecule(smiles='[N-]=[N+]=O')\n self.assertTrue(converter.check_isomorphism(mol1, mol2))", "def assert_molecules_match_after_remap(self, mol1, mol2):\n for atoms in zip(mol1.atoms, mol2.atoms):\n assert atoms[0].to_dict() == atoms[1].to_dict()\n # bonds will not be in the same order in the molecule and the atom1 and atom2 indecies could be out of\n # order make a dict to compare them both\n remapped_bonds = dict(\n ((bond.atom1_index, bond.atom2_index), bond) for bond in mol2.bonds\n )\n for bond in mol1.bonds:\n key = (bond.atom1_index, bond.atom2_index)\n if key not in remapped_bonds:\n key = tuple(reversed(key))\n assert key in remapped_bonds\n # now compare each attribute of the bond except the atom indexes\n bond_dict = bond.to_dict()\n del bond_dict[\"atom1\"]\n del bond_dict[\"atom2\"]\n remapped_bond_dict = remapped_bonds[key].to_dict()\n del remapped_bond_dict[\"atom1\"]\n del remapped_bond_dict[\"atom2\"]\n assert mol1.n_bonds == mol2.n_bonds\n assert mol1.n_angles == mol2.n_angles\n assert mol1.n_propers == mol2.n_propers\n assert mol1.n_impropers == mol2.n_impropers\n assert mol1.total_charge == mol2.total_charge\n assert mol1.partial_charges.all() == mol2.partial_charges.all()", "def test_is_isomorphic(self):\n mol1 = converter.s_bonds_mol_from_xyz(self.xyz1['dict'])\n mol2 = converter.s_bonds_mol_from_xyz(self.xyz1['dict_diff_order'])\n self.assertTrue(mol1.is_isomorphic(mol2, save_order=True, strict=False))\n\n mol1 = converter.s_bonds_mol_from_xyz(self.xyz11['dict'])\n mol2 = converter.s_bonds_mol_from_xyz(self.xyz11['dict_diff_order'])\n self.assertTrue(mol1.is_isomorphic(mol2, save_order=True, strict=False))\n\n mol1 = converter.s_bonds_mol_from_xyz(self.xyz10['dict'])\n mol2 = converter.s_bonds_mol_from_xyz(self.xyz10['dict_diff_order1'])\n mol3 = converter.s_bonds_mol_from_xyz(self.xyz10['dict_diff_order2'])\n self.assertTrue(mol1.is_isomorphic(mol2, save_order=True, strict=False))\n self.assertTrue(mol1.is_isomorphic(mol3, save_order=True, strict=False))", "def assert_molecules_match_after_remap(mol1, mol2):\n for atoms in zip(mol1.atoms, mol2.atoms):\n assert atoms[0].to_dict() == atoms[1].to_dict()\n # bonds will not be in the same order in the molecule and the atom1 and atom2 indecies could be out of order\n # make a dict to compare them both\n remapped_bonds = dict(\n ((bond.atom1_index, bond.atom2_index), bond) for bond in mol2.bonds\n )\n for bond in mol1.bonds:\n key = (bond.atom1_index, bond.atom2_index)\n if key not in remapped_bonds:\n key = tuple(reversed(key))\n assert key in remapped_bonds\n # now compare each attribute of the bond except the atom indexes\n bond_dict = bond.to_dict()\n del bond_dict[\"atom1\"]\n del bond_dict[\"atom2\"]\n remapped_bond_dict = remapped_bonds[key].to_dict()\n del remapped_bond_dict[\"atom1\"]\n del remapped_bond_dict[\"atom2\"]\n assert mol1.n_bonds == mol2.n_bonds\n assert mol1.n_angles == mol2.n_angles\n assert mol1.n_propers == mol2.n_propers\n assert mol1.n_impropers == mol2.n_impropers\n assert mol1.total_charge == mol2.total_charge\n assert mol1.partial_charges.all() == mol2.partial_charges.all()", "def test_order_atoms(self):\n mol1 = converter.s_bonds_mol_from_xyz(self.xyz10['dict'])\n mol2 = converter.s_bonds_mol_from_xyz(self.xyz10['dict_diff_order1'])\n mol3 = converter.s_bonds_mol_from_xyz(self.xyz10['dict_diff_order2'])\n converter.order_atoms(ref_mol=mol1, mol=mol2)\n for atom1, atom2 in zip(mol1.atoms, mol2.atoms):\n self.assertEqual(atom1.symbol, atom2.symbol)\n converter.order_atoms(ref_mol=mol3, mol=mol1)\n for atom1, atom2 in zip(mol3.atoms, mol1.atoms):\n self.assertEqual(atom1.symbol, atom2.symbol)\n\n ref_mol = Molecule(smiles='[CH](CC[CH]c1ccccc1)c1ccccc1')\n mol_list = ref_mol.copy(deep=True).generate_resonance_structures(keep_isomorphic=False, filter_structures=True)\n for mol in mol_list:\n converter.order_atoms(ref_mol=ref_mol, mol=mol)\n bond_dict = dict()\n for index1, atom1 in enumerate(ref_mol.atoms):\n for atom2 in atom1.edges.keys():\n index2 = ref_mol.atoms.index(atom2)\n if index1 < index2:\n if index1 not in bond_dict:\n bond_dict[index1] = [index2]\n else:\n bond_dict[index1].append(index2)\n for index1, atom1 in enumerate(mol.atoms):\n for atom2 in atom1.edges.keys():\n index2 = mol.atoms.index(atom2)\n if index1 < index2:\n self.assertIn(index2, bond_dict[index1]) # check that these atoms are connected in all mols", "def makeResidueAtomSets(residue, aromaticsEquivalent=True):\n \n getResidueMapping(residue)\n \n equivalent = {}\n elementSymbolDict = {}\n nonequivalent = {}\n multiSet = {}\n chemAtomSetDict = {}\n inMultiSet = {}\n molType = residue.molResidue.molType\n \n for atom in residue.atoms: \n chemAtom = atom.chemAtom\n chemAtomSetDict[atom] = chemAtom\n elementSymbol = chemAtom.elementSymbol\n chemAtomSet = chemAtom.chemAtomSet\n\n if chemAtomSet is None:\n name = chemAtom.name\n makeAtomSet(name,(atom,),None,'simple')\n \n else:\n name = chemAtomSet.name\n elementSymbolDict[name] = elementSymbol\n chemAtomSetDict[name] = chemAtomSet\n if chemAtomSet.isEquivalent:\n if equivalent.get(name) is None:\n equivalent[name] = []\n equivalent[name].append(atom)\n \n elif (chemAtomSet.isEquivalent is None) and atom.atomSet and (len(atom.atomSet.atoms) > 1):\n # aromatic rotation prev set\n if equivalent.get(name) is None:\n equivalent[name] = []\n equivalent[name].append(atom)\n \n elif (chemAtomSet.isEquivalent is None) and (not atom.atomSet) and aromaticsEquivalent:\n # aromatic rotation to be set\n if equivalent.get(name) is None:\n equivalent[name] = []\n equivalent[name].append(atom)\n \n else:\n if nonequivalent.get(name) is None:\n nonequivalent[name] = []\n nonequivalent[name].append(atom)\n \n if chemAtomSet.chemAtomSet is not None:\n multiName = chemAtomSet.chemAtomSet.name\n chemAtomSetDict[multiName] = chemAtomSet.chemAtomSet\n elementSymbolDict[multiName] = elementSymbol\n if multiSet.get(multiName) is None:\n multiSet[multiName] = {}\n multiSet[multiName][name] = 1\n inMultiSet[name] = multiName\n\n for groupName in equivalent.keys():\n atoms = equivalent[groupName]\n elementSymbol = elementSymbolDict[groupName]\n chemAtomSet = chemAtomSetDict[groupName]\n if len(atoms)==2:\n # not enough atoms for multi sets!\n makeAtomSet(groupName,atoms,chemAtomSet,'simple')\n else:\n if inMultiSet.get(groupName):\n # e.g. for Val Hg1*\n makeAtomSet(groupName,atoms,chemAtomSet,'stereo')\n \n else:\n makeAtomSet(groupName,atoms,chemAtomSet,'simple')\n\n for groupName in nonequivalent.keys():\n atoms = nonequivalent[groupName]\n elementSymbol = elementSymbolDict[groupName]\n chemAtomSet = chemAtomSetDict[groupName]\n atomSetNames = []\n \n if len(atoms) == 1:\n atom = atoms[0]\n # not enough atoms for prochiral. Corrupt ChemComp\n makeAtomSet(atom.name, atoms, None, 'simple')\n continue\n \n for atom in atoms:\n name = chemAtomSetDict[atom].name\n makeAtomSet(name,(atom,),chemAtomSet,'stereo')\n atomSetNames.append(name)\n\n for n, atom in enumerate(atoms):\n \n #name = chemAtomSetDict[atom].name\n #name2 = makeNonStereoName(molType, name, n)\n # Shouldn't have to do this if non-equiv groups have paired names\n \n name2 = makeNonStereoName(molType, '%s%d' % (chemAtomSet.name[:-1], n), n)\n \n makeGuiMultiAtomSet(residue, name2, atomSetNames,\n elementSymbol,'nonstereo',chemAtomSet)\n\n makeGuiMultiAtomSet(residue, groupName, atomSetNames,\n elementSymbol,'ambiguous',chemAtomSet)\n\n for groupName in multiSet.keys():\n atomSetNames = multiSet[groupName].keys()\n elementSymbol = elementSymbolDict[groupName]\n chemAtomSet = chemAtomSetDict[groupName]\n if \"|\" in groupName:\n # we don't do these pseudoatoms in Analysis\n continue\n\n # e.g. for Val Hga*\n for n, atomSetName in enumerate(atomSetNames):\n name2 = makeNonStereoName(molType, atomSetName, n)\n makeGuiMultiAtomSet(residue, name2, atomSetNames,\n elementSymbol,'nonstereo',chemAtomSet)\n \n makeGuiMultiAtomSet(residue, groupName, atomSetNames,\n elementSymbol,'ambiguous',chemAtomSet)", "def is_valid_single_attempt(self, atoms_init, atoms_final):\n from scipy.spatial import cKDTree as KDTree\n from random import shuffle\n atoms1 = atoms_init.copy()\n atoms2 = atoms_final.copy()\n\n vol1 = atoms1.get_volume()\n vol2 = atoms2.get_volume()\n if vol2 > vol1:\n ratio = (vol2/vol1)**(1.0/3.0)\n cell1 = atoms1.get_cell()\n atoms1.set_cell(cell1*ratio, scale_atoms=True)\n else:\n ratio = (vol1/vol2)**(1.0/3.0)\n cell2 = atoms2.get_cell()\n atoms2.set_cell(cell2*ratio, scale_atoms=True)\n\n # Try construct the relation\n used_indices = []\n tree = KDTree(atoms2.get_positions())\n indices = list(range(0, len(atoms1)))\n shuffle(indices)\n for atom in atoms1:\n if atom.symbol in self.exclude:\n continue\n dist, closest = tree.query(atom.position, k=12)\n srt_indx = np.argsort(dist)\n dist = [dist[indx] for indx in srt_indx]\n closest = [closest[indx] for indx in srt_indx]\n\n if all(c in used_indices for c in closest):\n # More than one atom is closest to this\n # structure\n self.rejected_reason = \"More than one atom mapped onto the \"\n self.rejected_reason += \"same atoms in the initial structure\"\n return False\n\n # First, unused with mathing symbol\n closest_indx = None\n closest_dist = None\n for i, indx in enumerate(closest):\n if atoms2[indx].symbol == atom.symbol and indx not in used_indices:\n closest_indx = indx\n closest_dist = dist[i]\n break\n\n if closest_indx is None:\n self.rejected_reason = \"No unused atoms with macthing symbol!\"\n return False\n \n used_indices.append(closest_indx)\n if closest_dist > self.max_displacement:\n # The displacement is larger than the tolereance\n self.rejected_reason = \"Max displacement too large\"\n return False\n \n if atom.symbol != atoms2[closest_indx].symbol:\n self.rejected_reason = \"Mapped symbol does not match!\"\n return False\n return True", "def basic_compare(self, other: \"Molecule\") -> bool:\n return self.inchi_key[:14] == other.inchi_key[:14]", "def test_chemical_environment_matches_OE(self):\n # TODO: Move this to test_toolkits, test all available toolkits\n # Create chiral molecule\n toolkit_wrapper = OpenEyeToolkitWrapper()\n molecule = Molecule()\n atom_C = molecule.add_atom(6, 0, False, stereochemistry=\"R\", name=\"C\")\n atom_H = molecule.add_atom(1, 0, False, name=\"H\")\n atom_Cl = molecule.add_atom(17, 0, False, name=\"Cl\")\n atom_Br = molecule.add_atom(35, 0, False, name=\"Br\")\n atom_F = molecule.add_atom(9, 0, False, name=\"F\")\n molecule.add_bond(atom_C, atom_H, 1, False)\n molecule.add_bond(atom_C, atom_Cl, 1, False)\n molecule.add_bond(atom_C, atom_Br, 1, False)\n molecule.add_bond(atom_C, atom_F, 1, False)\n # Test known cases\n matches = molecule.chemical_environment_matches(\n \"[#6:1]\", toolkit_registry=toolkit_wrapper\n )\n assert (\n len(matches) == 1\n ) # there should be a unique match, so one atom tuple is returned\n assert len(matches[0]) == 1 # it should have one tagged atom\n assert set(matches[0]) == set([atom_C])\n matches = molecule.chemical_environment_matches(\n \"[#6:1]~[#1:2]\", toolkit_registry=toolkit_wrapper\n )\n assert (\n len(matches) == 1\n ) # there should be a unique match, so one atom tuple is returned\n assert len(matches[0]) == 2 # it should have two tagged atoms\n assert set(matches[0]) == set([atom_C, atom_H])\n matches = molecule.chemical_environment_matches(\n \"[Cl:1]-[C:2]-[H:3]\", toolkit_registry=toolkit_wrapper\n )\n assert (\n len(matches) == 1\n ) # there should be a unique match, so one atom tuple is returned\n assert len(matches[0]) == 3 # it should have three tagged atoms\n assert set(matches[0]) == set([atom_Cl, atom_C, atom_H])\n matches = molecule.chemical_environment_matches(\n \"[#6:1]~[*:2]\", toolkit_registry=toolkit_wrapper\n )\n assert len(matches) == 4 # there should be four matches\n for match in matches:\n assert len(match) == 2 # each match should have two tagged atoms\n # Test searching for stereo-specific SMARTS\n matches = molecule.chemical_environment_matches(\n \"[#6@:1](-[F:2])(-[Cl])(-[Br])(-[H])\", toolkit_registry=toolkit_wrapper\n )\n assert len(matches) == 1 # there should be one match\n for match in matches:\n assert len(match) == 2 # each match should have two tagged atoms\n matches = molecule.chemical_environment_matches(\n \"[#6@@:1](-[F:2])(-[Cl])(-[Br])(-[H])\", toolkit_registry=toolkit_wrapper\n )\n assert (\n len(matches) == 0\n ) # this is the wrong stereochemistry, so there shouldn't be any matches", "def test_chemical_environment_matches_OE(self):\n # TODO: Move this to test_toolkits, test all available toolkits\n # Create chiral molecule\n from simtk.openmm.app import element\n\n toolkit_wrapper = OpenEyeToolkitWrapper()\n molecule = Molecule()\n atom_C = molecule.add_atom(\n element.carbon.atomic_number, 0, False, stereochemistry=\"R\", name=\"C\"\n )\n atom_H = molecule.add_atom(element.hydrogen.atomic_number, 0, False, name=\"H\")\n atom_Cl = molecule.add_atom(element.chlorine.atomic_number, 0, False, name=\"Cl\")\n atom_Br = molecule.add_atom(element.bromine.atomic_number, 0, False, name=\"Br\")\n atom_F = molecule.add_atom(element.fluorine.atomic_number, 0, False, name=\"F\")\n molecule.add_bond(atom_C, atom_H, 1, False)\n molecule.add_bond(atom_C, atom_Cl, 1, False)\n molecule.add_bond(atom_C, atom_Br, 1, False)\n molecule.add_bond(atom_C, atom_F, 1, False)\n # Test known cases\n matches = molecule.chemical_environment_matches(\n \"[#6:1]\", toolkit_registry=toolkit_wrapper\n )\n assert (\n len(matches) == 1\n ) # there should be a unique match, so one atom tuple is returned\n assert len(matches[0]) == 1 # it should have one tagged atom\n assert set(matches[0]) == set([atom_C])\n matches = molecule.chemical_environment_matches(\n \"[#6:1]~[#1:2]\", toolkit_registry=toolkit_wrapper\n )\n assert (\n len(matches) == 1\n ) # there should be a unique match, so one atom tuple is returned\n assert len(matches[0]) == 2 # it should have two tagged atoms\n assert set(matches[0]) == set([atom_C, atom_H])\n matches = molecule.chemical_environment_matches(\n \"[Cl:1]-[C:2]-[H:3]\", toolkit_registry=toolkit_wrapper\n )\n assert (\n len(matches) == 1\n ) # there should be a unique match, so one atom tuple is returned\n assert len(matches[0]) == 3 # it should have three tagged atoms\n assert set(matches[0]) == set([atom_Cl, atom_C, atom_H])\n matches = molecule.chemical_environment_matches(\n \"[#6:1]~[*:2]\", toolkit_registry=toolkit_wrapper\n )\n assert len(matches) == 4 # there should be four matches\n for match in matches:\n assert len(match) == 2 # each match should have two tagged atoms", "def test_molecules_from_xyz(self):\n s_mol, b_mol = converter.molecules_from_xyz(self.xyz6['dict'])\n\n # check that the atom order is the same\n self.assertTrue(s_mol.atoms[0].is_sulfur())\n self.assertTrue(b_mol.atoms[0].is_sulfur())\n self.assertTrue(s_mol.atoms[1].is_oxygen())\n self.assertTrue(b_mol.atoms[1].is_oxygen())\n self.assertTrue(s_mol.atoms[2].is_oxygen())\n self.assertTrue(b_mol.atoms[2].is_oxygen())\n self.assertTrue(s_mol.atoms[3].is_nitrogen())\n self.assertTrue(b_mol.atoms[3].is_nitrogen())\n self.assertTrue(s_mol.atoms[4].is_carbon())\n self.assertTrue(b_mol.atoms[4].is_carbon())\n self.assertTrue(s_mol.atoms[5].is_hydrogen())\n self.assertTrue(b_mol.atoms[5].is_hydrogen())\n self.assertTrue(s_mol.atoms[6].is_hydrogen())\n self.assertTrue(b_mol.atoms[6].is_hydrogen())\n self.assertTrue(s_mol.atoms[7].is_hydrogen())\n self.assertTrue(b_mol.atoms[7].is_hydrogen())\n self.assertTrue(s_mol.atoms[8].is_hydrogen())\n self.assertTrue(b_mol.atoms[8].is_hydrogen())\n self.assertTrue(s_mol.atoms[9].is_hydrogen())\n self.assertTrue(b_mol.atoms[9].is_hydrogen())\n\n s_mol, b_mol = converter.molecules_from_xyz(self.xyz7['dict'])\n self.assertTrue(s_mol.atoms[0].is_oxygen())\n self.assertTrue(b_mol.atoms[0].is_oxygen())\n self.assertTrue(s_mol.atoms[2].is_carbon())\n self.assertTrue(b_mol.atoms[2].is_carbon())\n\n expected_bonded_adjlist = \"\"\"multiplicity 2\n1 O u0 p2 c0 {6,S} {10,S}\n2 O u0 p2 c0 {3,S} {28,S}\n3 C u0 p0 c0 {2,S} {8,S} {14,S} {15,S}\n4 C u0 p0 c0 {7,S} {16,S} {17,S} {18,S}\n5 C u0 p0 c0 {7,S} {19,S} {20,S} {21,S}\n6 C u0 p0 c0 {1,S} {22,S} {23,S} {24,S}\n7 C u1 p0 c0 {4,S} {5,S} {9,S}\n8 C u0 p0 c0 {3,S} {10,D} {11,S}\n9 C u0 p0 c0 {7,S} {11,D} {12,S}\n10 C u0 p0 c0 {1,S} {8,D} {13,S}\n11 C u0 p0 c0 {8,S} {9,D} {25,S}\n12 C u0 p0 c0 {9,S} {13,D} {26,S}\n13 C u0 p0 c0 {10,S} {12,D} {27,S}\n14 H u0 p0 c0 {3,S}\n15 H u0 p0 c0 {3,S}\n16 H u0 p0 c0 {4,S}\n17 H u0 p0 c0 {4,S}\n18 H u0 p0 c0 {4,S}\n19 H u0 p0 c0 {5,S}\n20 H u0 p0 c0 {5,S}\n21 H u0 p0 c0 {5,S}\n22 H u0 p0 c0 {6,S}\n23 H u0 p0 c0 {6,S}\n24 H u0 p0 c0 {6,S}\n25 H u0 p0 c0 {11,S}\n26 H u0 p0 c0 {12,S}\n27 H u0 p0 c0 {13,S}\n28 H u0 p0 c0 {2,S}\n\"\"\"\n expected_mol = Molecule().from_adjacency_list(expected_bonded_adjlist)\n self.assertEqual(b_mol.to_adjacency_list(), expected_bonded_adjlist)\n # the is_isomorphic test must come after the adjlist test since it changes the atom order\n self.assertTrue(b_mol.is_isomorphic(expected_mol))\n\n s_mol, b_mol = converter.molecules_from_xyz(self.xyz10['dict'], multiplicity=1, charge=0)\n self.assertIsNotNone(s_mol)\n self.assertIsNotNone(b_mol)\n for atom1, atom2 in zip(s_mol.atoms, b_mol.atoms):\n self.assertEqual(atom1.symbol, atom2.symbol)\n for atom1, symbol in zip(s_mol.atoms, self.xyz10['dict']['symbols']):\n self.assertEqual(atom1.symbol, symbol)\n\n s_mol, b_mol = converter.molecules_from_xyz(self.xyz_dict_13, multiplicity=1, charge=0)\n for atom1, atom2 in zip(s_mol.atoms, b_mol.atoms):\n self.assertEqual(atom1.symbol, atom2.symbol)\n for atom1, symbol in zip(s_mol.atoms, self.xyz_dict_13['symbols']):\n self.assertEqual(atom1.symbol, symbol)\n self.assertEqual(s_mol.multiplicity, 1)\n self.assertEqual(b_mol.multiplicity, 1)\n self.assertFalse(any(atom.radical_electrons for atom in b_mol.atoms))", "def test_canonical_ordering_openeye(self):\n from openff.toolkit.utils.toolkits import OpenEyeToolkitWrapper\n\n openeye = OpenEyeToolkitWrapper()\n # get ethanol in canonical order\n ethanol = create_ethanol()\n # get reversed non canonical ethanol\n reversed_ethanol = create_reversed_ethanol()\n # get the canonical ordering\n canonical_ethanol = reversed_ethanol.canonical_order_atoms(openeye)\n # make sure the mapping between the ethanol and the openeye ref canonical form is the same\n assert (\n True,\n {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8},\n ) == Molecule.are_isomorphic(canonical_ethanol, ethanol, True)", "def test_canonical_ordering_openeye(self):\n from openforcefield.utils.toolkits import OpenEyeToolkitWrapper\n\n openeye = OpenEyeToolkitWrapper()\n # get ethanol in canonical order\n ethanol = create_ethanol()\n # get reversed non canonical ethanol\n reversed_ethanol = create_reversed_ethanol()\n # get the canonical ordering\n canonical_ethanol = reversed_ethanol.canonical_order_atoms(openeye)\n # make sure the mapping between the ethanol and the openeye ref canonical form is the same\n assert (\n True,\n {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8},\n ) == Molecule.are_isomorphic(canonical_ethanol, ethanol, True)", "def test_isomorphic_perumtations(self, inputs):\n # get benzene with all aromatic atoms/bonds labeled\n benzene = Molecule.from_smiles(\"c1ccccc1\")\n # get benzene with no aromatic labels\n benzene_no_aromatic = create_benzene_no_aromatic()\n # now test all of the variations\n assert (\n Molecule.are_isomorphic(\n benzene,\n benzene_no_aromatic,\n aromatic_matching=inputs[\"aromatic_matching\"],\n formal_charge_matching=inputs[\"formal_charge_matching\"],\n bond_order_matching=inputs[\"bond_order_matching\"],\n atom_stereochemistry_matching=inputs[\"atom_stereochemistry_matching\"],\n bond_stereochemistry_matching=inputs[\"bond_stereochemistry_matching\"],\n )[0]\n is inputs[\"result\"]\n )\n\n assert (\n benzene.is_isomorphic_with(\n benzene_no_aromatic,\n aromatic_matching=inputs[\"aromatic_matching\"],\n formal_charge_matching=inputs[\"formal_charge_matching\"],\n bond_order_matching=inputs[\"bond_order_matching\"],\n atom_stereochemistry_matching=inputs[\"atom_stereochemistry_matching\"],\n bond_stereochemistry_matching=inputs[\"bond_stereochemistry_matching\"],\n )\n is inputs[\"result\"]\n )", "def test_isomorphic_perumtations(self, inputs):\n # get benzene with all aromatic atoms/bonds labeled\n benzene = Molecule.from_smiles(\"c1ccccc1\")\n # get benzene with no aromatic labels\n benzene_no_aromatic = create_benzene_no_aromatic()\n # now test all of the variations\n assert (\n Molecule.are_isomorphic(\n benzene,\n benzene_no_aromatic,\n aromatic_matching=inputs[\"aromatic_matching\"],\n formal_charge_matching=inputs[\"formal_charge_matching\"],\n bond_order_matching=inputs[\"bond_order_matching\"],\n atom_stereochemistry_matching=inputs[\"atom_stereochemistry_matching\"],\n bond_stereochemistry_matching=inputs[\"bond_stereochemistry_matching\"],\n )[0]\n is inputs[\"result\"]\n )", "def invariants(mol):\n atom_ids = {}\n for a in mol.atoms:\n components = []\n components.append(a.number)\n components.append(len(a.oatoms))\n components.append(a.hcount)\n components.append(a.charge)\n components.append(a.mass)\n if len(a.rings) > 0:\n components.append(1)\n\n atom_ids[a.index] = gen_hash(components)\n\n return atom_ids", "def assert_molecule_is_equal(molecule1, molecule2, msg):\n if not (molecule1.is_isomorphic_with(molecule2)):\n raise AssertionError(msg)", "def assert_molecule_is_equal(molecule1, molecule2, msg):\n if not (molecule1.is_isomorphic_with(molecule2)):\n raise AssertionError(msg)", "def test_coords_same_direction(self): # test_change_coords = method\n mi = (0,1,1.5708)\n mj = (0,2,1.5708)\n result = new_mj_coords(mi, mj)\n self.assertEqual(result, (0.3317021649341794, 0.9433841602327115, 0.0))\n\n '''\n the method .assertEqual(a,b) is equivalent to a == b\n other methods include: .assertIs(a,b) = a is b, .assertIsNone(x) = x is None,\n .assertIn(a,b) = a in b, and .assertIsInstance(a,b) = isinstance(a, b)\n\n\n '''", "def test_get_molecule_least_similar_to(self):\n csv_fpath = self.smiles_seq_to_xl_or_csv(ftype=\"csv\")\n for descriptor in SUPPORTED_FPRINTS:\n for similarity_measure in SUPPORTED_SIMILARITIES:\n molecule_set = MoleculeSet(\n molecule_database_src=csv_fpath,\n molecule_database_src_type=\"csv\",\n fingerprint_type=descriptor,\n similarity_measure=similarity_measure,\n is_verbose=False,\n )\n for mol_smile, mol in zip(TEST_SMILES,\n molecule_set.molecule_database):\n compare_task = CompareTargetMolecule(\n target_molecule_smiles=mol_smile)\n [furthest_mol], [similarity] = compare_task.\\\n get_hits_dissimilar_to(molecule_set)\n mol_similarities = molecule_set.compare_against_molecule(\n mol)\n self.assertEqual(\n np.min(mol_similarities),\n mol.get_similarity_to(\n molecule_set.molecule_database[furthest_mol],\n molecule_set.similarity_measure\n ),\n f\"Expected furthest mol to have minimum \"\n f\"similarity to target molecule \"\n f\"using similarity measure: {similarity_measure}, \"\n f\"descriptor: {descriptor}, \"\n f\"for molecule {mol.mol_text}\",\n )\n self.assertGreaterEqual(similarity, 0.,\n \"Expected similarity value to \"\n \"be >= 0.\"\n f\"using similarity measure: \"\n f\"{similarity_measure}, \"\n f\"descriptor: {descriptor}, \"\n f\"for molecule {mol.mol_text}\")\n self.assertLessEqual(similarity, 1.,\n \"Expected similarity value to \"\n \"be <= 1.\"\n f\"using similarity measure: \"\n f\"{similarity_measure}, \"\n f\"descriptor: {descriptor}, \"\n f\"for molecule {mol.mol_text}\"\n )", "def _overlapping(self, atom1, atom2):\n\n if np.linalg.norm(atom1.pos-atom2.pos) < (atom1.rad+atom2.rad):\n return True\n else:\n return False", "def test_unique_atom_names(self, molecule):\n # The dataset we load in has atom names, so let's strip them first\n # to ensure that we can fail the uniqueness check\n for atom in molecule.atoms:\n atom.name = \"\"\n assert not (molecule.has_unique_atom_names)\n # Then genreate unique atom names using the built in algorithm\n molecule.generate_unique_atom_names()\n # Check that the molecule has unique atom names\n assert molecule.has_unique_atom_names\n # Check molecule.has_unique_atom_names is working correctly\n assert (\n len(set([atom.name for atom in molecule.atoms])) == molecule.n_atoms\n ) == molecule.has_unique_atom_names\n molecule.atoms[1].name = molecule.atoms[0].name # no longer unique\n assert (\n len(set([atom.name for atom in molecule.atoms])) == molecule.n_atoms\n ) == molecule.has_unique_atom_names\n assert all(\"x\" in a.name for a in molecule.atoms)", "def are_clone_sequences(atoms1, atoms2):\n\n for a1, a2 in it.zip_longest(atoms1, atoms2):\n assert a1 is not a2\n assert a1.get_id() == a2.get_id()\n assert a1.get_charge() == a2.get_charge()\n assert a1.__class__ is a2.__class__", "def assert_equal_matrices(array, matrix1, matrix2, periodic):\n nonlocal CUTOFF\n indices = np.where(matrix1 != matrix2)\n for index in range(len(indices[0])):\n if len(indices) == 2:\n # multi_model = False -> AtomArray\n m = None\n i = indices[0][index]\n j = indices[1][index]\n box = array.box if periodic else None\n distance = struc.distance(array[i], array[j], box=box)\n if len(indices) == 3:\n # multi_model = True -> AtomArrayStack\n m = indices[0][index]\n i = indices[1][index]\n j = indices[2][index]\n box = array.box[m] if periodic else None\n distance = struc.distance(array[m,i], array[m,j], box=box)\n try:\n assert distance == pytest.approx(CUTOFF, abs=1e-4)\n except AssertionError:\n print(f\"Model {m}, Atoms {i} and {j}\")\n raise", "def test_single_molecule(self, single_mol_system, sequence):\n expected = list(itertools.chain(*([element] * 3 for element in sequence)))\n processor = dssp.AnnotateResidues(\"test\", sequence)\n processor.run_system(single_mol_system)\n found = self.sequence_from_system(single_mol_system, \"test\")\n assert found == expected", "def test_cx_equivalence_1cx(self, seed=1):\n state = np.random.default_rng(seed)\n rnd = 2 * np.pi * state.random(size=12)\n\n qr = QuantumRegister(2, name=\"q\")\n qc = QuantumCircuit(qr)\n\n qc.u(rnd[0], rnd[1], rnd[2], qr[0])\n qc.u(rnd[3], rnd[4], rnd[5], qr[1])\n\n qc.cx(qr[1], qr[0])\n\n qc.u(rnd[6], rnd[7], rnd[8], qr[0])\n qc.u(rnd[9], rnd[10], rnd[11], qr[1])\n\n sim = UnitarySimulatorPy()\n unitary = execute(qc, sim).result().get_unitary()\n self.assertEqual(two_qubit_cnot_decompose.num_basis_gates(unitary), 1)\n self.assertTrue(Operator(two_qubit_cnot_decompose(unitary)).equiv(unitary))", "def find_connected_atoms(struct, tolerance=0.45, ldict=JmolNN().el_radius):\n n_atoms = len(struct.species)\n fc = np.array(struct.frac_coords)\n fc_copy = np.repeat(fc[:, :, np.newaxis], 27, axis=2)\n neighbors = np.array(list(itertools.product([0, 1, -1], [0, 1, -1], [0, 1, -1]))).T\n neighbors = np.repeat(neighbors[np.newaxis, :, :], 1, axis=0)\n fc_diff = fc_copy - neighbors\n species = list(map(str, struct.species))\n # in case of charged species\n for i, item in enumerate(species):\n if not item in ldict.keys():\n species[i] = str(Specie.from_string(item).element)\n latmat = struct.lattice.matrix\n connected_matrix = np.zeros((n_atoms,n_atoms))\n\n for i in range(n_atoms):\n for j in range(i + 1, n_atoms):\n max_bond_length = ldict[species[i]] + ldict[species[j]] + tolerance\n frac_diff = fc_diff[j] - fc_copy[i]\n distance_ij = np.dot(latmat.T, frac_diff)\n # print(np.linalg.norm(distance_ij,axis=0))\n if sum(np.linalg.norm(distance_ij, axis=0) < max_bond_length) > 0:\n connected_matrix[i, j] = 1\n connected_matrix[j, i] = 1\n return connected_matrix", "def test_isomorphic_striped_stereochemistry(self):\n mol1 = Molecule.from_smiles(\"CCC[N@](C)CC\")\n mol2 = Molecule.from_smiles(\"CCC[N@@](C)CC\")\n\n # Ensure default value is respected and order does not matter\n assert Molecule.are_isomorphic(mol1, mol2, strip_pyrimidal_n_atom_stereo=True)\n assert Molecule.are_isomorphic(mol1, mol2)\n assert Molecule.are_isomorphic(mol2, mol1)\n\n assert mol1 == mol2\n assert Molecule.from_smiles(\"CCC[N@](C)CC\") == Molecule.from_smiles(\n \"CCC[N@@](C)CC\"\n )\n\n mol1 = Molecule.from_smiles(\"CCC[N@](C)CC\")\n mol2 = Molecule.from_smiles(\"CCC[N@@](C)CC\")\n\n assert not Molecule.are_isomorphic(\n mol1,\n mol2,\n strip_pyrimidal_n_atom_stereo=False,\n atom_stereochemistry_matching=True,\n bond_stereochemistry_matching=True,\n )[0]", "def test_oblique_sequence_match(self):\n dna = self._create_dna()\n\n # Another codon pair\n other_pair = self._create_codon_pair()\n\n self.assertFalse(dna.has_sequence(other_pair))\n\n # Existing codon pair\n self.assertTrue(dna.has_sequence(dna.top_left_oblique_pair))\n self.assertTrue(dna.has_sequence(dna.bottom_left_oblique_pair))", "def test_unique_atom_names(self, molecule):\n # The dataset we load in has atom names, so let's strip them first\n # to ensure that we can fail the uniqueness check\n for atom in molecule.atoms:\n atom.name = \"\"\n assert not (molecule.has_unique_atom_names)\n # Then genreate unique atom names using the built in algorithm\n molecule.generate_unique_atom_names()\n # Check that the molecule has unique atom names\n assert molecule.has_unique_atom_names\n # Check molecule.has_unique_atom_names is working correctly\n assert (\n len(set([atom.name for atom in molecule.atoms])) == molecule.n_atoms\n ) == molecule.has_unique_atom_names\n molecule.atoms[1].name = molecule.atoms[0].name # no longer unique\n assert (\n len(set([atom.name for atom in molecule.atoms])) == molecule.n_atoms\n ) == molecule.has_unique_atom_names", "def test_run_molecule(self, single_mol_system):\n sequence = \"ABCDE\"\n expected = list(itertools.chain(*([element] * 3 for element in sequence)))\n processor = dssp.AnnotateResidues(\"test\", sequence)\n processor.run_molecule(single_mol_system.molecules[0])\n found = self.sequence_from_system(single_mol_system, \"test\")\n assert found == expected", "def test_consistent_ids(self) -> None:\n bnode = BNode()\n g0_ts: _TripleSet = {\n (bnode, FOAF.name, Literal(\"Golan Trevize\")),\n (bnode, RDF.type, FOAF.Person),\n }\n bnode = BNode()\n g1_ts: _TripleSet = {\n (bnode, FOAF.name, Literal(\"Janov Pelorat\")),\n (bnode, RDF.type, FOAF.Person),\n }\n\n g0 = Graph()\n g0 += g0_ts\n cg0 = to_canonical_graph(g0)\n cg0_ts = GraphHelper.triple_set(cg0)\n\n g1 = Graph()\n g1 += g1_ts\n cg1 = to_canonical_graph(g1)\n cg1_ts = GraphHelper.triple_set(cg1)\n\n assert cg0_ts.issubset(\n cg1_ts\n ), \"canonical triple set cg0_ts should be a subset of canonical triple set cg1_ts\"", "def get_atom_connectivity(self):\n m, connectivity = self.owner, []\n for index, i in enumerate(self.rix):\n for j in self.rix[index + 1:]:\n a1 = m.rings[i].aix\n a2 = m.rings[j].aix\n if set(a1).intersection(a2):\n connectivity.append((i, j))\n return tuple(connectivity)", "def test_return_molecule(self):\n\n # Ask for a few molecules.\n mol1 = mol_res_spin.return_molecule('#Ap4Aase')\n mol2 = mol_res_spin.return_molecule(selection='#RNA', pipe='orig')\n\n # Test the data of molecule 1.\n self.assertEqual(mol1.name, 'Ap4Aase')\n\n # Test the data of molecule 2.\n self.assertEqual(mol2.name, 'RNA')", "def comaIsSymmetric(self):\n\t\tfor i in range(2*self.totalBins):\n\t\t\tfor j in range(2*self.totalBins):\n\t\t\t\tif not self.coma[i,j] == self.coma[j,i]:\n\t\t\t\t\tprint i,j,self.coma[i,j],self.coma[j,i]\n\t\t\t\t\treturn False\n\t\treturn True", "def test_clashing_atoms():\n benzene_path = examples_paths()['benzene']\n toluene_path = examples_paths()['toluene']\n with mmtools.utils.temporary_directory() as tmp_dir:\n yaml_content = get_template_script(tmp_dir, keep_openeye=True)\n system_id = 'explicit-system'\n system_description = yaml_content['systems'][system_id]\n system_description['pack'] = True\n system_description['solvent'] = utils.CombinatorialLeaf(['vacuum', 'PME'])\n\n # Sanity check: at the beginning molecules clash\n toluene_pos = utils.get_oe_mol_positions(utils.load_oe_molecules(toluene_path, molecule_idx=0))\n benzene_pos = utils.get_oe_mol_positions(utils.load_oe_molecules(benzene_path, molecule_idx=0))\n assert pipeline.compute_min_dist(toluene_pos, benzene_pos) < pipeline.SetupDatabase.CLASH_THRESHOLD\n\n exp_builder = ExperimentBuilder(yaml_content)\n\n for sys_id in [system_id + '_vacuum', system_id + '_PME']:\n system_dir = os.path.dirname(\n exp_builder._db.get_system(sys_id)[0].position_path)\n\n # Get positions of molecules in the final system\n prmtop = openmm.app.AmberPrmtopFile(os.path.join(system_dir, 'complex.prmtop'))\n inpcrd = openmm.app.AmberInpcrdFile(os.path.join(system_dir, 'complex.inpcrd'))\n positions = inpcrd.getPositions(asNumpy=True).value_in_unit(unit.angstrom)\n topography = Topography(prmtop.topology, ligand_atoms='resname TOL')\n benzene_pos2 = positions.take(topography.receptor_atoms, axis=0)\n toluene_pos2 = positions.take(topography.ligand_atoms, axis=0)\n # atom_indices = pipeline.find_components(prmtop.createSystem(), prmtop.topology, 'resname TOL')\n # benzene_pos2 = positions.take(atom_indices['receptor'], axis=0)\n # toluene_pos2 = positions.take(atom_indices['ligand'], axis=0)\n\n # Test that clashes are resolved in the system\n min_dist, max_dist = pipeline.compute_min_max_dist(toluene_pos2, benzene_pos2)\n assert min_dist >= pipeline.SetupDatabase.CLASH_THRESHOLD\n\n # For solvent we check that molecule is within the box\n if sys_id == system_id + '_PME':\n assert max_dist <= exp_builder._db.solvents['PME']['clearance'].value_in_unit(unit.angstrom)", "def nearby():\n for i in ids:\n for j in ids:\n if i != j:\n if sum([1 for x,y in zip(i,j) if x!=y]) == 1:\n print(\"\".join([x for x,y in zip(i,j) if x==y]))\n return", "def __eq__(self, other):\r\n return self.id_map == other.id_map and self.matrix == other.matrix\\\r\n and self.size == other.size", "def is_same(source_molecule, target_molecule):\n return source_molecule.mol_text == target_molecule.mol_text", "def test_order_atoms_in_mol_list(self):\n ref_mol = Molecule(smiles='[CH](CC[CH]c1ccccc1)c1ccccc1')\n mol_list = ref_mol.copy(deep=True).generate_resonance_structures(keep_isomorphic=False, filter_structures=True)\n success = converter.order_atoms_in_mol_list(ref_mol=ref_mol, mol_list=mol_list)\n self.assertTrue(success)\n bond_dict = dict()\n for index1, atom1 in enumerate(ref_mol.atoms):\n for atom2 in atom1.edges.keys():\n index2 = ref_mol.atoms.index(atom2)\n if index1 < index2:\n if index1 not in bond_dict:\n bond_dict[index1] = [index2]\n else:\n bond_dict[index1].append(index2)\n for mol in mol_list:\n for index1, atom1 in enumerate(mol.atoms):\n for atom2 in atom1.edges.keys():\n index2 = mol.atoms.index(atom2)\n if index1 < index2:\n self.assertIn(index2, bond_dict[index1]) # check that these atoms are connected in all mols", "def pseudopotentialise_molecule(self, sysargs=None, execute_deletion=True):\n\n # Find atoms to replace\n deletion_list = []\n if len(sysargs) > 2:\n if 'del' in sysargs:\n deletion_list = self.parse_coord_list(sysargs[4])\n replacement_list = self.parse_coord_list(sysargs[2])\n atoms_to_potentialise = list(item for item in self.coord_list if item[\"#\"] in replacement_list)\n else:\n atoms_to_potentialise = (item for item in self.coord_list if item[\"el\"] == 'c')\n deletion_list = (item for item in self.coord_list if item[\"el\"] == 'h')\n print('Pseudo-potentialising carbon atoms %s ...' % [atom['#'] for atom in atoms_to_potentialise])\n\n potential_coords_list = []\n\n for atom in atoms_to_potentialise:\n distanced_atom_list = self.order_atoms_by_distance_from(atom['#'])\n distanced_carbon_list = self.order_atoms_by_distance_from(atom['#'], element='c')\n\n if len(distanced_carbon_list) == 1:\n primary_vector = None\n for non_c_atom in distanced_atom_list[1:4]:\n if non_c_atom['el'] != 'h':\n primary_vector = self.vectorise_atom(non_c_atom['#']) - self.vectorise_atom(atom['#'])\n if primary_vector is None:\n primary_vector = self.vectorise_atom(distanced_atom_list[1]['#']) - self.vectorise_atom(atom['#'])\n else:\n primary_vector = self.vectorise_atom(distanced_carbon_list[1]['#']) - self.vectorise_atom(atom['#'])\n\n normal_vector = numpy.cross(\n self.vectorise_atom(distanced_atom_list[1]['#']) - self.vectorise_atom(atom['#']),\n self.vectorise_atom(distanced_atom_list[2]['#']) - self.vectorise_atom(atom['#'])\n )\n\n primary_potential_vector = self.lengtherise_vector(primary_vector, self.atom_potential_set_distance)\n potential_set_split_vector = self.lengtherise_vector(normal_vector, self.potential_set_split_distance)\n\n relative_potential_vectors = [\n primary_potential_vector + potential_set_split_vector,\n primary_potential_vector - potential_set_split_vector\n ]\n\n for potential_set in range(self.no_potential_sets_per_atom-1):\n\n pps_positive = numpy.dot(self.construct_euler_rodriguez_matrix(\n normal_vector,\n 2*numpy.pi/self.no_potential_sets_per_atom),\n relative_potential_vectors[-2],\n )\n pps_negative = numpy.dot(self.construct_euler_rodriguez_matrix(\n normal_vector,\n 2*numpy.pi/self.no_potential_sets_per_atom),\n relative_potential_vectors[-1]\n )\n\n relative_potential_vectors.append(pps_positive)\n relative_potential_vectors.append(pps_negative)\n\n if self.add_primary_vector_potentials_as_coords is False:\n del relative_potential_vectors[0]\n del relative_potential_vectors[0]\n\n # potential coords are still relative to their atom, now make them real.\n for vector in relative_potential_vectors:\n potential_coords_list.append(\n {'#': 0, 'el': self.sp2_pseudo_element, 'x': vector[0]+atom['x'], 'y': vector[1]+atom['y'], 'z': vector[2]+atom['z']},\n )\n\n # Now add potentials to coord list, after removing the 'real' hydrogen atoms.\n if execute_deletion is True:\n self.delete_specified_atoms(deletion_list)\n for potential_coord in potential_coords_list:\n self.write_coord(potential_coord, overwrite=False)", "def neopentane():\n coords = [\n [0.000000, 0.0, 0.0],\n [0.881905, 0.881905, 0.881905],\n [-0.881905, -0.881905, 0.881905],\n [0.881905, -0.881905, -0.881905],\n [-0.881905, 0.881905, -0.881905],\n [-1.524077, 0.276170, -1.524077],\n [1.524077, 1.524077, 0.276170],\n [1.524077, -0.276170, -1.524077],\n [1.524077, 0.276170, 1.524077],\n [-1.524077, -0.276170, 1.524077],\n [1.524077, -1.524077, -0.276170],\n [-0.276170, 1.524077, -1.524077],\n [0.276170, 1.524077, 1.524077],\n [0.276170, -1.524077, -1.524077],\n [-0.276170, -1.524077, 1.524077],\n [-1.524077, 1.524077, -0.276170],\n [-1.524077, -1.524077, 0.276170],\n ]\n coords = [[float(j) / Bohr for j in i] for i in coords]\n\n symbols = [\n \"C\",\n \"C\",\n \"C\",\n \"C\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n ]\n\n atoms = []\n for i, _ in enumerate(coords):\n atoms.append(Atom(symbols[i], position=coords[i]))\n return Molecule(symbols=atoms)", "def sameLinking(el_a, el_b, system_el2kbid, gold_el2kbid):\n\n sys_el_a_id = system_el2kbid[el_a]\n sys_el_b_id = system_el2kbid[el_b]\n gol_el_a_id = gold_el2kbid[el_a]\n gol_el_b_id = gold_el2kbid[el_b]\n\n if sys_el_a_id.startswith('NIL'): sys_el_a_id = 'NIL'\n if sys_el_b_id.startswith('NIL'): sys_el_b_id = 'NIL'\n if gol_el_a_id.startswith('NIL'): gol_el_a_id = 'NIL'\n if gol_el_b_id.startswith('NIL'): gol_el_b_id = 'NIL'\n\n #print system_el2kbid\n \n return sys_el_a_id == sys_el_b_id == gol_el_a_id == gol_el_b_id", "def calculate_dihedral_atom_equivalences(mol1, mol2):\n\n # Check that the mols are identical-ish\n if mol1.GetNumHeavyAtoms() != mol2.GetNumHeavyAtoms():\n raise EqualityError('Molecules are not identical (Num Atoms) {!s} != {!s}.\\n{!s}\\n{!s}'.format(mol1.GetNumHeavyAtoms(),mol2.GetNumHeavyAtoms(),Chem.MolToSmiles(mol1),Chem.MolToSmiles(mol2)))\n if mol1.GetNumBonds() != mol2.GetNumBonds():\n raise EqualityError('Molecules are not identical (Num Bonds) {!s} != {!s}:\\n{!s}\\n{!s}'.format(mol1.GetNumBonds(),mol2.GetNumBonds(),Chem.MolToSmiles(mol1), Chem.MolToSmiles(mol2)))\n\n # Gets a list of lists of atoms in mol1 (12,16,3, ...) that match the atoms in mol2 (1,2,3, ...)\n match_patterns = mol1.GetSubstructMatches(mol2, uniquify=False)\n # Get the quadruplets to calculate the dihedrals from for mol1\n mol1_atom_sets = identify_rotatable_bond_atom_pairs(mol1)\n num_atms = mol1.GetNumHeavyAtoms()\n # List for returning\n paired_atom_sets = []\n # Iterate through the different ways of overlaying the molecule (ensures we get the minimum rmsd)\n for match_pattern in match_patterns:\n # Translate from the atoms in mol1 to the atoms in mol2 (for this match_pattern)\n trans_dict = dict(zip(match_pattern, range(0,num_atms)))\n # Translate the atoms in mol1 to the atoms in mol2\n mol2_atom_sets = [ tuple([trans_dict[atm] for atm in bond_set]) for bond_set in mol1_atom_sets]\n # Add to list\n paired_atom_sets.append((mol1_atom_sets, mol2_atom_sets))\n # Check that the atom types are identical (test)\n mol1_atom_types = [ tuple([mol1.GetAtomWithIdx(atm).GetAtomicNum() for atm in bond_set]) for bond_set in mol1_atom_sets]\n mol2_atom_types = [ tuple([mol2.GetAtomWithIdx(atm).GetAtomicNum() for atm in bond_set]) for bond_set in mol2_atom_sets]\n assert mol1_atom_types == mol2_atom_types, \"ATOM TYPES ARE NOT THE SAME ON THE DIHEDRAL ANGLE TO BE CALCULATED - THERE'S BEEN A MATCHING ERROR\"\n # Return the list of lists of paired atoms between the structures\n return paired_atom_sets", "def _compute_sims(self):\n no_duplicates = defaultdict(list)\n for num, lineset1, idx1, lineset2, idx2 in self._iter_sims():\n duplicate = no_duplicates[num]\n for couples in duplicate:\n if (lineset1, idx1) in couples or (lineset2, idx2) in couples:\n couples.add((lineset1, idx1))\n couples.add((lineset2, idx2))\n break\n else:\n duplicate.append({(lineset1, idx1), (lineset2, idx2)})\n sims = []\n for num, ensembles in no_duplicates.items():\n for couples in ensembles:\n sims.append((num, couples))\n sims.sort()\n sims.reverse()\n return sims", "def calc_asymmetric_unit_cell_indexes(n_abc, full_symm_elems):\n n_a, n_b, n_c = n_abc[0], n_abc[1], n_abc[2]\n\n point_index = numpy.stack(numpy.meshgrid(\n numpy.arange(n_a), numpy.arange(n_b), numpy.arange(n_c),\n indexing=\"ij\"), axis=0)\n point_index = point_index.reshape(point_index.shape[0], numpy.prod(point_index.shape[1:]))\n \n elem_r = full_symm_elems[4:13]\n elem_b = full_symm_elems[:4]\n\n r_ind = calc_m_v(\n numpy.expand_dims(elem_r, axis=1),\n numpy.expand_dims(point_index, axis=2), flag_m=False, flag_v=False)[0]\n\n div, mod = numpy.divmod(numpy.expand_dims(n_abc, axis=1), numpy.expand_dims(elem_b[3], axis=0))\n if not(numpy.all(mod == 0)):\n raise KeyError(\"Symmetry elements do not match with number of points\")\n point_index_s = numpy.mod(r_ind + numpy.expand_dims(div * elem_b[:3], axis=1),\n numpy.expand_dims(numpy.expand_dims(n_abc, axis=1), axis=2))\n value_index_s = n_c*n_b*point_index_s[0] + n_c*point_index_s[1] + point_index_s[2]\n value_index_s_sorted = numpy.sort(value_index_s, axis=1)\n\n a, ind_a_u_c, counts_a_u_c = numpy.unique(\n value_index_s_sorted[:, 0], return_index=True, return_counts=True)\n\n point_index_s_a_u_c = point_index[:, ind_a_u_c]\n\n return point_index_s_a_u_c, counts_a_u_c", "def really_covalent_isomorphic(mol1, mol2):\n return nx.is_isomorphic(\n mol1.covalent_graph,\n mol2.covalent_graph,\n node_match = iso.categorical_node_match('specie', None)\n )", "def testEquality(self):\n pass", "def __eq__(self, other):\n\t\treturn self._coords == other._coords", "def test_equivalence():\n\t\n\tfrom . import spectra as sp\n\t\n\t#analytic\n\tp_dict = {'Bfield':15000,'rb85frac':1,'Btheta':0*np.pi/180,'Bphi':0*np.pi/180,'lcell':1e-3,'T':84,'Dline':'D2','Elem':'Rb'}\n\tchiL1,chiR1,chiZ1 = sp.calc_chi([-18400],p_dict)\n\tRotMat1, n11, n21 = solve_diel(chiL1,chiR1,chiZ1,0,150,force_numeric=False)\n\t\n\t#numeric\n\tchiL2, chiR2, chiZ2 = chiL1, chiR1, chiZ1\n\t#chiL2,chiR2,chiZ2 = sp.calc_chi([-18400],p_dict)\n\tRotMat2, n12, n22 = solve_diel(chiL2,chiR2,chiZ2,0,150,force_numeric=True)\n\t\n\tprint('RM 1')\n\tprint(RotMat1)\n\n\tprint('RM 2')\n\tprint(RotMat2)\t\n\t\n\tprint('n1_1 (analytic)')\n\tprint(n11)\n\tprint('n1_2')\n\tprint(n12)\n\tprint('n2_1 (analytic)')\n\tprint(n21)\n\tprint('n2_2')\n\tprint(n22)\n\t\n\tprint('chi1')\n\tprint((chiL1, chiR1, chiZ1))\n\n\tprint('chi2')\n\tprint((chiL2, chiR2, chiZ2))", "def grid_equal (grid1, grid2):\r\n s=0 \r\n for h in range(4):\r\n for m in range(4):\r\n if grid1[h][m]==grid2[h][m]:\r\n s+=1\r\n else:\r\n ()\r\n if s==16:\r\n return True\r\n else:\r\n return False", "def test5(self):\n tgts = [ ('CCC(O)C(=O)O',\n ('O[CH-][CH2-]','O[CH-][C-]=O')),\n ]\n for smi,matches in tgts:\n m = Chem.MolFromSmiles(smi)\n fp1 = Chem.RDKFingerprint(m,2,7,9192,4,1)\n obs = fp1.GetOnBits()\n for match in matches:\n m2 = Chem.MolFromSmiles(match)\n fp2 = Chem.RDKFingerprint(m2,2,7,9192,4,1)\n v1,v2 = DataStructs.OnBitProjSimilarity(fp2,fp1)\n assert feq(v1,1.0000),'substruct %s not properly contained in %s'%(match,smi)", "def test4(self):\n tgts = [ ('CCC(O)C(=O)O',\n ('CCC','OCC','OCC=O','OCCO','CCCC','OC=O','CC(O)C')),\n ]\n for smi,matches in tgts:\n m = Chem.MolFromSmiles(smi)\n fp1 = Chem.RDKFingerprint(m,2,7,9192,4,0)\n obs = fp1.GetOnBits()\n for match in matches:\n m2 = Chem.MolFromSmiles(match)\n fp2 = Chem.RDKFingerprint(m2,2,7,9192,4,0)\n v1,v2 = DataStructs.OnBitProjSimilarity(fp2,fp1)\n assert feq(v1,1.0000),'substruct %s not properly contained in %s'%(match,smi)", "def compare(self, m2):\n assert self.natoms == m2.natoms\n for atom1 in self.atoms:\n found = False\n for atom2 in m2.atoms:\n if(atom1 == atom2):\n found = True\n break\n else:\n raise Exception(\"Atom not found! {0}\".format(atom1))\n else:\n pass", "def estimate_nc(self):\n mol = self.m\n torsma = '[!$(*#*)&!D1]~[!$(*#*)&!D1]'\n q = Chem.MolFromSmarts(torsma)\n matches = mol.GetSubstructMatches(q)\n nmat = len(matches)\n #torsions = []\n\n # since mostly the molecules concerned here are amons\n # with N_I <=7, we care about 3- to 7-membered rings\n atsr = _get_ring_nodes(mol,3,7,F)\n #print ' -- atsr = ', atsr\n inrs = np.zeros(self.na, dtype=int) # [this atom is] in [how many] number of rings\n for ia in self.ias_heav:\n _sets = []\n for _ats in atsr:\n if ia in _ats:\n _sets.append(_ats)\n #print ' -- ia, _sets = ', ia, _sets\n inr = find_number_of_unique_set(_sets)\n inrs[ia] = inr\n #print ' -- inrs = ', inrs\n if nmat == 0:\n ns = [1]\n if self.debug: print(' |__ ns = ', ns)\n nc = 1\n self.nc = nc\n else:\n ns = []; patts = []\n scale = 0\n for match in matches:\n j = match[0]\n k = match[1]\n cb = set([j,k])\n bond = mol.GetBondBetweenAtoms(j, k)\n aj = mol.GetAtomWithIdx(j)\n ak = mol.GetAtomWithIdx(k)\n hj, hk = [ _hyb[_a.GetHybridization()] for _a in [aj,ak] ]\n iok1 = (hj != 2); iok2 = (hj != 3)\n iok3 = (hk != 2); iok4 = (hk != 3)\n if (iok1 and iok2) or (iok3 and iok4): continue\n\n # do not allow internal rotation about two adjacent sp2 atoms are in a ring\n if inrs[j] and inrs[k] and hj==2 and hk==2: continue\n\n pjk = []\n jk = [j,k]\n hsjk = [hj,hk]\n for _ in range(2):\n ia1 = jk[_]\n ia2 = j if ia1==k else k\n hyb = hsjk[_]\n nbrs = np.setdiff1d(self.ias[self.bom[ia1]>0], [ia2])\n ihs = (self.zs[nbrs]==1)\n if np.all(ihs): # case 'a', e.g., 'a1','a2','a3'\n # check ~X-CH3, ~X-NH2, ...\n nh = len(ihs)\n if hyb==3:\n # for rotor X-C in ~X-CH3, one torsion is allowed\n sn = {1:'a3', 2:'a2', 3:'a1'}[nh]\n else: # hyb==2\n sn = {1:'a2', 2:'a1', 3:'a1'}[nh]\n else: # case 'b', e.g., 'b1','b2','b3'\n inr = inrs[ia1]\n if self.cns[ia1]==2 and inr: # e.g., O<, S<, Se<,\n sn = 1\n else:\n if hyb==3:\n sn = 2 if inr <= 1 else 1 # {0:'b3', 1:'b3', 2:'b2', 3:'b1', 4:'b1'}[inr]\n else: # hyb==2:\n sn = 'b2' if inr == 0 else 'b1'\n #sn = {0:'b2', 1:'b1', 2:'b1', 3:'b1'}[inr]\n _patt = '%d%s'%(hyb,sn)\n pjk.append(_patt)\n #print 'j,k = ', j,k, ', pjk = ', pjk\n nci = min([ int(patt[-1]) for patt in pjk ]) # ndic[patt]; sci = scdic[patt]\n if nci > 1:\n ns.append( nci )\n if not np.any([inrs[j],inrs[k]]):\n scale += 1\n if scale == 0: scale = 1\n nc = np.int(np.floor(np.product(ns))) * scale #* 2\n self.nc = nc if nc > 99 else 99\n if self.debug: print(' |__ ns = ', ns)\n if self.debug: print(' |__ scale = %d, nc = %d'%(scale, nc))\n self.ns = np.array(ns, np.int)", "def commutation_equivalence(self, position_list=[]):\n\t\treturn self.__class__(_replace_match_at(self, position_list, [\n\t\t\t[ (A, disj, B), (B, disj, A) ],\n\t\t\t[ (A, conj, B), (B, conj, A) ],\n\t\t\t[ (A, iff, B), (B, iff, A) ]\n\t\t]))", "def test_single_molecules_cycle_one(self, single_mol_system):\n sequence = \"A\"\n expected = [sequence] * (5 * 3)\n processor = dssp.AnnotateResidues(\"test\", sequence)\n processor.run_system(single_mol_system)\n found = self.sequence_from_system(single_mol_system, \"test\")\n assert found == expected", "def __eq__(self, other):\n if not isinstance(other, PantsMappingClass):\n # print(\"A\")\n return False\n # if other._pants_decomposition != self._pants_decomposition:\n # print(\"B\")\n # return False\n # print(\"C\")\n return (self * other.inverse()).is_identity()", "def toluene():\n coords = [\n [1.2264, 0.0427, 0.0670],\n [1.0031, -1.3293, 0.0600],\n [-0.2945, -1.8256, -0.0060],\n [-1.3704, -0.9461, -0.0646],\n [-1.1511, 0.4266, -0.0578],\n [0.1497, 0.9292, 0.0066],\n [0.3871, 2.3956, -0.0022],\n [2.2495, 0.4310, 0.1211],\n [1.8510, -2.0202, 0.1071],\n [-0.4688, -2.9062, -0.0109],\n [-2.3926, -1.3347, -0.1157],\n [-2.0006, 1.1172, -0.1021],\n [0.5024, 2.7582, -1.0330],\n [1.2994, 2.6647, 0.5466],\n [-0.4475, 2.9470, 0.4506],\n ]\n coords = [[float(j) / Bohr for j in i] for i in coords]\n\n symbols = [\n \"C\",\n \"C\",\n \"C\",\n \"C\",\n \"C\",\n \"C\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n ]\n\n atoms = []\n for i, _ in enumerate(coords):\n atoms.append(Atom(symbols[i], position=coords[i]))\n return Molecule(symbols=atoms)", "def test_isomorphic_stripped_stereochemistry(self):\n mol1 = Molecule.from_smiles(\"CCC[N@](C)CC\")\n mol2 = Molecule.from_smiles(\"CCC[N@@](C)CC\")\n\n # Ensure default value is respected and order does not matter\n assert Molecule.are_isomorphic(mol1, mol2, strip_pyrimidal_n_atom_stereo=True)\n assert Molecule.are_isomorphic(mol1, mol2)\n assert Molecule.are_isomorphic(mol2, mol1)\n\n assert mol1 == mol2\n assert Molecule.from_smiles(\"CCC[N@](C)CC\") == Molecule.from_smiles(\n \"CCC[N@@](C)CC\"\n )", "def test_unsorted_xyz_mol_from_xyz(self):\n n3h5 = ARCSpecies(label='N3H5', xyz=self.xyz8['str'], smiles='NNN')\n expected_adjlist = \"\"\"1 N u0 p1 c0 {2,S} {4,S} {5,S}\n2 H u0 p0 c0 {1,S}\n3 H u0 p0 c0 {4,S}\n4 N u0 p1 c0 {1,S} {3,S} {6,S}\n5 H u0 p0 c0 {1,S}\n6 N u0 p1 c0 {4,S} {7,S} {8,S}\n7 H u0 p0 c0 {6,S}\n8 H u0 p0 c0 {6,S}\n\"\"\"\n self.assertEqual(n3h5.mol.to_adjacency_list(), expected_adjlist)\n self.assertEqual(n3h5.conformers[0], self.xyz8['dict'])", "def test_terminal_rotamer_filtering(self):\n LIGAND_PATH = 'ligands/oleic_acid.pdb'\n\n ligand_path = get_data_file_path(LIGAND_PATH)\n molecule = Molecule(ligand_path, exclude_terminal_rotamers=True)\n\n rotamers_per_branch = molecule.rotamers\n\n assert len(rotamers_per_branch) == 2, \"Found an invalid number \" + \\\n \"of branches: {}\".format(len(rotamers_per_branch))\n\n atom_list_1 = list()\n atom_list_2 = list()\n rotamers = rotamers_per_branch[0]\n for rotamer in rotamers:\n atom_list_1.append(set([rotamer.index1, rotamer.index2]))\n\n rotamers = rotamers_per_branch[1]\n for rotamer in rotamers:\n atom_list_2.append(set([rotamer.index1, rotamer.index2]))\n\n EXPECTED_INDICES_1 = [set([9, 10]), set([8, 9]), set([7, 8]),\n set([6, 7]), set([5, 6]), set([2, 5]),\n set([0, 2]), set([0, 1])]\n\n EXPECTED_INDICES_2 = [set([12, 11]), set([12, 13]), set([13, 14]),\n set([14, 15]), set([15, 16]), set([16, 17]),\n set([17, 18])]\n\n where_1 = list()\n for atom_pair in atom_list_1:\n if atom_pair in EXPECTED_INDICES_1:\n where_1.append(1)\n elif atom_pair in EXPECTED_INDICES_2:\n where_1.append(2)\n else:\n where_1.append(0)\n\n where_2 = list()\n for atom_pair in atom_list_2:\n if atom_pair in EXPECTED_INDICES_1:\n where_2.append(1)\n elif atom_pair in EXPECTED_INDICES_2:\n where_2.append(2)\n else:\n where_2.append(0)\n\n assert (all(i == 1 for i in where_1)\n and all(i == 2 for i in where_2)) or \\\n (all(i == 2 for i in where_1)\n and all(i == 1 for i in where_2)), \"Invalid rotamer library \" + \\\n \"{}, {}\".format(where_1, where_2)\n\n assert (all(i == 1 for i in where_1)\n and all(i == 2 for i in where_2)\n and len(where_1) == len(EXPECTED_INDICES_1)\n and len(where_2) == len(EXPECTED_INDICES_2)) or \\\n (all(i == 2 for i in where_1)\n and all(i == 1 for i in where_2)\n and len(where_1) == len(EXPECTED_INDICES_2)\n and len(where_2) == len(EXPECTED_INDICES_1)), \"Unexpected \" + \\\n \"number of rotamers\"", "def same(self, x, y):\n return self.find(x) == self.find(y)", "def is_atomic(self):\n \n symbols=set()\n for e in self.symbols:\n if not e=='':\n symbols.add(e)\n\n for s in symbols: #unicity first\n count=0\n for e in symbols:\n if s==e:\n count+=1\n if count!=1:\n return False\n else:\n continue \n temp=symbols.copy()\n for s in symbols:\n temp.remove(s)\n for e in temp:\n if s in e:\n return False\n else:\n continue\n temp=symbols.copy()\n\n return True", "def overlap_similarity(box, other_boxes):\n return jaccard(np.expand_dims(box, axis=0), other_boxes).squeeze(0)", "def __eq__(self, other):\n return self._coords == other._coords", "def __eq__(self, other):\n return self._coords == other._coords", "def __eq__(self, other):\n return (type(self) == type(other) and\n self.n == other.n and self.m == other.m and\n self.from_grid == other.from_grid and\n self.to_grid == other.to_grid)", "def testStereo(self):\r\n smi_and_cansmi = [\r\n ('OC(=O)[C@@H](CCC(N)=O)N', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('OC(=O)[C@H](CCC(N)=O)N', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('N[C@@H](C(O)=O)CCC(N)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('N[C@H](C(O)=O)CCC(N)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('OC(=O)[C@H](N)CCC(N)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('OC(=O)[C@@H](N)CCC(N)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('N[C@H](CCC(N)=O)C(O)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('N[C@@H](CCC(N)=O)C(O)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@@H](N)C(O)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@H](N)C(O)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@H](C(O)=O)N', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@@H](C(O)=O)N', 'NC(=O)CC[C@H](N)C(=O)O')]\r\n for smi, cansmi in smi_and_cansmi:\r\n mol = pybel.readstring(\"smi\", smi)\r\n self.assertEqual(mol.write(\"can\").split()[0],\r\n cansmi)", "def test_cx_equivalence_2cx(self, seed=2):\n state = np.random.default_rng(seed)\n rnd = 2 * np.pi * state.random(size=18)\n\n qr = QuantumRegister(2, name=\"q\")\n qc = QuantumCircuit(qr)\n\n qc.u(rnd[0], rnd[1], rnd[2], qr[0])\n qc.u(rnd[3], rnd[4], rnd[5], qr[1])\n\n qc.cx(qr[1], qr[0])\n\n qc.u(rnd[6], rnd[7], rnd[8], qr[0])\n qc.u(rnd[9], rnd[10], rnd[11], qr[1])\n\n qc.cx(qr[0], qr[1])\n\n qc.u(rnd[12], rnd[13], rnd[14], qr[0])\n qc.u(rnd[15], rnd[16], rnd[17], qr[1])\n\n sim = UnitarySimulatorPy()\n unitary = execute(qc, sim).result().get_unitary()\n self.assertEqual(two_qubit_cnot_decompose.num_basis_gates(unitary), 2)\n self.assertTrue(Operator(two_qubit_cnot_decompose(unitary)).equiv(unitary))", "def position_is_valid(x1, y1, z1, x2, y2, z2, degXY_1, degYZ_1, degXY_2, degYZ_2, user_rand):\n\n # return max X,Y,Z locations from all the atoms in vecs\n def get_max_XYZ(vecs):\n return max(vecs, key=lambda v: v[0])[0], max(vecs, key=lambda v: v[1])[1], max(vecs, key=lambda v: v[2])[2]\n\n # return min X,Y,Z locations from all the atoms in vecs\n def get_min_XYZ(vecs):\n return min(vecs, key=lambda v: v[0])[0], min(vecs, key=lambda v: v[1])[1], min(vecs, key=lambda v: v[2])[2]\n\n # get the atoms of the first protein after moving it in x1,y1,z1\n vecs1 = get_atoms('media/files/' + user_rand + '/' + '_1_.pdb')\n translate_vecs(x1, y1, z1, vecs1)\n rotate_molecular(x1, y1, z1, degXY_1, degYZ_1, vecs1)\n\n # get the atoms of the second protein after moving it in x2,y2,z2\n vecs2 = get_atoms('media/files/' + user_rand + '/' + '_2_.pdb')\n translate_vecs(x2, y2, z2, vecs2)\n rotate_molecular(x2, y2, z2, degXY_2, degYZ_2, vecs2)\n\n maxX1, maxY1, maxZ1 = get_max_XYZ(vecs1)\n maxX2, maxY2, maxZ2 = get_max_XYZ(vecs2)\n\n minX1, minY1, minZ1 = get_min_XYZ(vecs1)\n minX2, minY2, minZ2 = get_min_XYZ(vecs2)\n\n dist = 1\n\n # check overlap in axis X, axis Y and axis Z\n resultX = (maxX1 + dist) >= minX2 and (maxX2 + dist) >= minX1\n resultY = (maxY1 + dist) >= minY2 and (maxY2 + dist) >= minY1\n resultZ = (maxZ1 + dist) >= minZ2 and (maxZ2 + dist) >= minZ1\n\n # check overlap of whole \"boxes\" of proteins\n isOverlap = resultX and resultY and resultZ\n\n return not isOverlap", "def isIsosceles(self):\n\t\treturn self.a == self.b or self.a == self.c or self.b == self.c", "def __eq__(self, other):\n from numpy.linalg import norm\n from numpy import array\n\n # Upcast to an Atom\n othercomp = Atom(other)\n\n # Compare Symbols\n sym1 = self.sym\n sym2 = othercomp.sym\n if sym1 != sym2:\n return False\n\n # Compare position\n pos1 = array(self.get_position())\n pos2 = array(othercomp.get_position())\n\n return norm(pos1 - pos2) < 1e-3", "def test_graphid_operator_eq_and_neq():\n\n for xstr, ystr in itertools.product([\"g1\", \"g2\", \"y7\", \"z123\"], repeat=2):\n x = _ir.GraphId(xstr)\n y = _ir.GraphId(ystr)\n\n if xstr == ystr:\n assert x == y\n assert not (x != y)\n else:\n assert not (x == y)\n assert x != y", "def test_remap(self):\n # the order here is CCO\n ethanol = create_ethanol()\n # get ethanol in reverse order OCC\n ethanol_reverse = create_reversed_ethanol()\n # get the mapping between the molecules\n mapping = Molecule.are_isomorphic(ethanol, ethanol_reverse, True)[1]\n ethanol.add_bond_charge_virtual_site([0, 1], 0.3 * unit.angstrom)\n # make sure that molecules with virtual sites raises an error\n with pytest.raises(NotImplementedError):\n remapped = ethanol.remap(mapping, current_to_new=True)\n\n # remake with no virtual site and remap to match the reversed ordering\n ethanol = create_ethanol()\n\n new_ethanol = ethanol.remap(mapping, current_to_new=True)\n\n def assert_molecules_match_after_remap(mol1, mol2):\n \"\"\"Check all of the attributes in a molecule match after being remapped\"\"\"\n for atoms in zip(mol1.atoms, mol2.atoms):\n assert atoms[0].to_dict() == atoms[1].to_dict()\n # bonds will not be in the same order in the molecule and the atom1 and atom2 indecies could be out of order\n # make a dict to compare them both\n remapped_bonds = dict(\n ((bond.atom1_index, bond.atom2_index), bond) for bond in mol2.bonds\n )\n for bond in mol1.bonds:\n key = (bond.atom1_index, bond.atom2_index)\n if key not in remapped_bonds:\n key = tuple(reversed(key))\n assert key in remapped_bonds\n # now compare each attribute of the bond except the atom indexes\n bond_dict = bond.to_dict()\n del bond_dict[\"atom1\"]\n del bond_dict[\"atom2\"]\n remapped_bond_dict = remapped_bonds[key].to_dict()\n del remapped_bond_dict[\"atom1\"]\n del remapped_bond_dict[\"atom2\"]\n assert mol1.n_bonds == mol2.n_bonds\n assert mol1.n_angles == mol2.n_angles\n assert mol1.n_propers == mol2.n_propers\n assert mol1.n_impropers == mol2.n_impropers\n assert mol1.total_charge == mol2.total_charge\n assert mol1.partial_charges.all() == mol2.partial_charges.all()\n\n # check all of the properties match as well, torsions and impropers will be in a different order\n # due to the bonds being out of order\n assert_molecules_match_after_remap(new_ethanol, ethanol_reverse)\n\n # test round trip (double remapping a molecule)\n new_ethanol = ethanol.remap(mapping, current_to_new=True)\n isomorphic, round_trip_mapping = Molecule.are_isomorphic(\n new_ethanol, ethanol, return_atom_map=True\n )\n assert isomorphic is True\n round_trip_ethanol = new_ethanol.remap(round_trip_mapping, current_to_new=True)\n assert_molecules_match_after_remap(round_trip_ethanol, ethanol)", "def __eq__(self, other):\n return np.all(self.grid == other.grid) and np.all(self.pos == other.pos)", "def similarity(self, e1, e2):\n\t\tpass", "def invariants(mol):\n atoms_dict={}\n \n for idxs,atom in enumerate(mol.GetAtoms()):\n components=[]\n components.append(atomic_number(mol,idxs))\n components.append(heavy_count(mol,idxs))\n components.append(H_count(mol,idxs))\n components.append(valence(mol,idxs))\n #components.append(charge(mol,idxs))\n components.append(negativity(mol,idxs))\n components.append(mass(mol,idxs))\n \n atoms_dict[idxs]=get_hash(components)\n return atoms_dict", "def getAtomStrainDuplicates(self, tol_mag = 7, verbose = 1, sort = \"angle_same\"):\n\n \"\"\"Favor cells by first making the desired sorting and then removing duplicates\n In relevant cases lexsort by the number of atoms as well\"\"\"\n if isinstance(sort, (int, np.integer, float)):\n p = np.abs(self.getBaseAngles(cell = 1) - np.deg2rad(sort))\n #si = np.argsort(p)\n si = np.lexsort((self.atoms, p))\n self.indexSortInterfaces(index = si)\n string = \"Favoring: Specified angle %.2f deg\" % sort\n elif isinstance(sort, (list, np.ndarray)):\n ang = np.tile(self.getBaseAngles(cell = 1), (np.shape(sort)[0], 1))\n p = np.abs(ang - np.deg2rad(np.array(sort))[:, None])\n #si = np.argsort(np.min(p, axis = 0))\n si = np.lexsort((self.atoms, np.min(p, axis = 0)))\n self.indexSortInterfaces(index = si)\n string = \"Favoring: Specified angles %s deg\" % (\", \".join([str(i) for i in sort]))\n elif sort.lower() == \"length\":\n p = np.sum(self.getCellLengths(cell = 1), axis = 1)\n si = np.argsort(p)\n self.indexSortInterfaces(index = si)\n string = \"Favoring: Minimum Circumference\"\n elif sort.lower() == \"angle_right\":\n p = np.abs(np.pi / 2 - self.getBaseAngles(cell = 1))\n #si = np.argsort(p)\n si = np.lexsort((self.atoms, p))\n self.indexSortInterfaces(index = si)\n string = \"Favoring: Right Angles\"\n elif sort.lower() == \"angle_same\":\n p = np.abs(ut.getCellAngle(self.base_1[:2, :2], verbose = verbose) -\\\n self.getBaseAngles(cell = 1))\n #si = np.argsort(p)\n si = np.lexsort((self.atoms, p))\n self.indexSortInterfaces(index = si)\n string = \"Favoring: Base Angle Match\"\n else:\n string = \"Favoring: As Constructed\"\n\n \"\"\"Find unique strains within specified tolerances\"\"\"\n values = np.zeros((self.atoms.shape[0], 2))\n values[:, 0] = self.atoms.copy()\n values[:, 1] = np.round(self.eps_mas.copy(), tol_mag)\n unique = np.unique(values, axis = 0, return_index = True)[1]\n index = np.in1d(np.arange(self.atoms.shape[0]), unique)\n\n if verbose > 0:\n ut.infoPrint(string)\n string = \"Unique strain/atom combinations found: %i, tol: 1e-%i (all exact matches keept)\"\\\n % (np.sum(index), tol_mag)\n ut.infoPrint(string)\n\n return index", "def setResNameCheckCoords(self):\n exit = False\n localDir = os.path.abspath('.')\n if not os.path.exists(self.tmpDir):\n os.mkdir(self.tmpDir)\n #if not os.path.exists(os.path.join(tmpDir, self.inputFile)):\n copy2(self.absInputFile, self.tmpDir)\n os.chdir(self.tmpDir)\n\n if self.ext == '.pdb':\n tmpFile = open(self.inputFile, 'r')\n else:\n cmd = '%s -i %s -fi %s -o tmp -fo ac -pf y' % \\\n (self.acExe, self.inputFile, self.ext[1:])\n self.printDebug(cmd)\n out = getoutput(cmd)\n if not out.isspace():\n self.printDebug(out)\n try:\n tmpFile = open('tmp', 'r')\n except:\n rmtree(self.tmpDir)\n raise\n\n tmpData = tmpFile.readlines()\n residues = set()\n coords = {}\n for line in tmpData:\n if 'ATOM ' in line or 'HETATM' in line:\n residues.add(line[17:20])\n at = line[0:17]\n cs = line[30:54]\n if coords.has_key(cs):\n coords[cs].append(at)\n else:\n coords[cs] = [at]\n #self.printDebug(coords)\n\n if len(residues) > 1:\n self.printError(\"more than one residue detected '%s'\" % str(residues))\n self.printError(\"verify your input file '%s'. Aborting ...\" % self.inputFile)\n sys.exit(1)\n\n dups = \"\"\n short = \"\"\n long = \"\"\n longSet = set()\n id = 0\n items = coords.items()\n l = len(items)\n for item in items:\n id += 1\n if len(item[1]) > 1: # if True means atoms with same coordinates\n for i in item[1]:\n dups += \"%s %s\\n\" % (i, item[0])\n\n# for i in xrange(0,len(data),f):\n# fdata += (data[i:i+f])+' '\n\n for id2 in xrange(id,l):\n item2 = items[id2]\n c1 = map(float,[item[0][i:i+8] for i in xrange(0,24,8)])\n c2 = map(float,[item2[0][i:i+8] for i in xrange(0,24,8)])\n dist2 = self.distance(c1,c2)\n if dist2 < minDist2:\n dist = math.sqrt(dist2)\n short += \"%8.5f %s %s\\n\" % (dist, item[1], item2[1])\n if dist2 < maxDist2: # and not longOK:\n longSet.add(str(item[1]))\n longSet.add(str(item2[1]))\n if str(item[1]) not in longSet:\n long += \"%s\\n\" % item[1]\n\n if dups:\n self.printError(\"Atoms with same coordinates in '%s'!\" % self.inputFile)\n self.printQuoted(dups[:-1])\n exit = True\n\n if short:\n self.printError(\"Atoms TOO close (< %s Ang.)\" % minDist)\n self.printQuoted(\"Dist (Ang.) Atoms\\n\" + short[:-1])\n exit = True\n\n if long:\n self.printError(\"Atoms TOO alone (> %s Ang.)\" % maxDist)\n self.printQuoted(long[:-1])\n exit = True\n\n if exit:\n if self.force:\n self.printWarn(\"You chose to proceed anyway with '-f' option. GOOD LUCK!\")\n else:\n self.printError(\"Use '-f' option if you want to proceed anyway. Aborting ...\")\n rmtree(self.tmpDir)\n sys.exit(1)\n\n resname = list(residues)[0]\n newresname = resname\n\n if resname.isdigit() or 'E' in resname[1:3].upper() or 'ADD' in resname.upper():\n newresname = 'R' + resname\n if not resname.isalnum():\n newresname = 'MOL'\n if newresname != resname:\n self.printWarn(\"In %s.lib, residue name will be '%s' instead of '%s' elsewhere\"\n % (self.acBaseName, newresname, resname))\n\n self.resName = newresname\n\n os.chdir(localDir)\n self.printDebug(\"setResNameCheckCoords done\")", "def hassimilarcluster(ind, clusters):\n item = op.itemgetter\n global opt\n found = False\n tx = min(clusters[ind],key=item(0))[0]\n ty = min(clusters[ind],key=item(1))[1]\n for i, cl in enumerate(clusters):\n if i != ind:\n cx = min(cl,key=item(0))[0]\n cy = min(cl,key=item(1))[1]\n dx, dy = cx - tx, cy - ty\n specdist = Hausdorff_distance(clusters[ind],cl,None,(dx,dy))\n if specdist <= int(opt.rgsim):\n found = True\n break\n return found", "def conflict_check() ->None:\r\n global conflict_space\r\n conflict_space = np.zeros(mShape)\r\n for x in range(shape):\r\n for y in range(shape):\r\n for z in range(y+1, shape):\r\n if example[x, y] == example[x, z]:\r\n conflict_space[x, y] = example[x, y]\r\n conflict_space[x, z] = example[x, z]\r\n if example[y, x] == example[z, x]:\r\n conflict_space[y, x] = example[y, x]\r\n conflict_space[z, x] = example[z, x]", "def test_multi_molecules_cycle_one(self, multi_mol_system_irregular):\n sequence = \"A\"\n expected = [sequence] * (15 * 3)\n processor = dssp.AnnotateResidues(\"test\", sequence)\n processor.run_system(multi_mol_system_irregular)\n found = self.sequence_from_system(multi_mol_system_irregular, \"test\")\n assert found == expected", "def test_equality(self):\n\n s3 = space(curvature=1/5)\n for k in (0, -1, 1, 1.75, 0.325, 1/7, -1.75, -0.325, -1/7):\n s1 = space(fake_curvature=k)\n s2 = space(fake_curvature=k)\n self.assertTrue(s1 == s2)\n self.assertTrue(hash(s1) == hash(s2))\n self.assertTrue(str(s1) == str(s2))\n self.assertTrue(repr(s1) == repr(s2))\n self.assertTrue(s1 != s3)", "def __eq__(self, other):\n\n return(self.cell == other.cell and\n self._lastUsedIteration == other._lastUsedIteration and\n (sorted(self.__synapses, key=lambda x: x._ordinal) ==\n sorted(other.__synapses, key=lambda x: x._ordinal)))", "def test_chemical_environment_matches_RDKit(self):\n # Create chiral molecule\n toolkit_wrapper = RDKitToolkitWrapper()\n molecule = Molecule()\n atom_C = molecule.add_atom(6, 0, False, stereochemistry=\"R\", name=\"C\")\n atom_H = molecule.add_atom(1, 0, False, name=\"H\")\n atom_Cl = molecule.add_atom(17, 0, False, name=\"Cl\")\n atom_Br = molecule.add_atom(35, 0, False, name=\"Br\")\n atom_F = molecule.add_atom(9, 0, False, name=\"F\")\n molecule.add_bond(atom_C, atom_H, 1, False)\n molecule.add_bond(atom_C, atom_Cl, 1, False)\n molecule.add_bond(atom_C, atom_Br, 1, False)\n molecule.add_bond(atom_C, atom_F, 1, False)\n # Test known cases\n matches = molecule.chemical_environment_matches(\n \"[#6:1]\", toolkit_registry=toolkit_wrapper\n )\n assert (\n len(matches) == 1\n ) # there should be a unique match, so one atom tuple is returned\n assert len(matches[0]) == 1 # it should have one tagged atom\n assert set(matches[0]) == set([atom_C])\n matches = molecule.chemical_environment_matches(\n \"[#6:1]~[#1:2]\", toolkit_registry=toolkit_wrapper\n )\n assert (\n len(matches) == 1\n ) # there should be a unique match, so one atom tuple is returned\n assert len(matches[0]) == 2 # it should have two tagged atoms\n assert set(matches[0]) == set([atom_C, atom_H])\n matches = molecule.chemical_environment_matches(\n \"[Cl:1]-[C:2]-[H:3]\", toolkit_registry=toolkit_wrapper\n )\n assert (\n len(matches) == 1\n ) # there should be a unique match, so one atom tuple is returned\n assert len(matches[0]) == 3 # it should have three tagged atoms\n assert set(matches[0]) == set([atom_Cl, atom_C, atom_H])\n matches = molecule.chemical_environment_matches(\n \"[#6:1]~[*:2]\", toolkit_registry=toolkit_wrapper\n )\n assert len(matches) == 4 # there should be four matches\n for match in matches:\n assert len(match) == 2 # each match should have two tagged atoms\n # Test searching for stereo-specific SMARTS\n matches = molecule.chemical_environment_matches(\n \"[#6@:1](-[F:2])(-[Cl])(-[Br])(-[H])\", toolkit_registry=toolkit_wrapper\n )\n assert len(matches) == 1 # there should be one match\n for match in matches:\n assert len(match) == 2 # each match should have two tagged atoms\n matches = molecule.chemical_environment_matches(\n \"[#6@@:1](-[F:2])(-[Cl])(-[Br])(-[H])\", toolkit_registry=toolkit_wrapper\n )\n assert (\n len(matches) == 0\n ) # this is the wrong stereochemistry, so there shouldn't be any matches", "def test_canonize_neighborhood_same_graph(nauty, ref_graph, ref_graph2):\n key = nauty.canonize_neighborhood(ref_graph, 2, 1)\n key2 = nauty.canonize_neighborhood(ref_graph2, 3, 1)\n assert key == key2", "def test_hash_equality(self):\n origin = np.random.randn(3)\n normal = np.random.randn(3)\n up_vector = np.random.randn(3)\n up_vector2 = np.random.randn(3)\n p1 = shapes_3d.CoordinatePlane(origin, normal, up_vector)\n p2 = shapes_3d.CoordinatePlane(origin, normal, up_vector)\n p3 = shapes_3d.CoordinatePlane(origin, normal, up_vector2)\n \n self.assertEqual(p1, p2)\n self.assertNotEqual(p1, p3)", "def __eq__(self,other):\n boul0 = self.linked[0].coordinates[0]==other.linked[0].coordinates[0] and self.linked[0].coordinates[1]==other.linked[0].coordinates[1]\n boul1 = self.linked[1].coordinates[0]==other.linked[1].coordinates[0] and self.linked[1].coordinates[1]==other.linked[1].coordinates[1]\n boulid = self.id==other.id\n return boul0 and boul1 and boulid", "def are_torsions_same2(geo, geoi, idxs_lst):\n dtol = 0.09\n same_dihed = True\n for idxs in idxs_lst:\n val = dihedral_angle(geo, *idxs)\n vali = dihedral_angle(geoi, *idxs)\n valip = vali+2.*numpy.pi\n valim = vali-2.*numpy.pi\n vchk1 = abs(val - vali)\n vchk2 = abs(val - valip)\n vchk3 = abs(val - valim)\n if vchk1 > dtol and vchk2 > dtol and vchk3 > dtol:\n same_dihed = False\n return same_dihed", "def test_chemical_environment_matches_RDKit(self):\n # Create chiral molecule\n from simtk.openmm.app import element\n\n toolkit_wrapper = RDKitToolkitWrapper()\n molecule = Molecule()\n atom_C = molecule.add_atom(\n element.carbon.atomic_number, 0, False, stereochemistry=\"R\", name=\"C\"\n )\n atom_H = molecule.add_atom(element.hydrogen.atomic_number, 0, False, name=\"H\")\n atom_Cl = molecule.add_atom(element.chlorine.atomic_number, 0, False, name=\"Cl\")\n atom_Br = molecule.add_atom(element.bromine.atomic_number, 0, False, name=\"Br\")\n atom_F = molecule.add_atom(element.fluorine.atomic_number, 0, False, name=\"F\")\n molecule.add_bond(atom_C, atom_H, 1, False)\n molecule.add_bond(atom_C, atom_Cl, 1, False)\n molecule.add_bond(atom_C, atom_Br, 1, False)\n molecule.add_bond(atom_C, atom_F, 1, False)\n # Test known cases\n matches = molecule.chemical_environment_matches(\n \"[#6:1]\", toolkit_registry=toolkit_wrapper\n )\n assert (\n len(matches) == 1\n ) # there should be a unique match, so one atom tuple is returned\n assert len(matches[0]) == 1 # it should have one tagged atom\n assert set(matches[0]) == set([atom_C])\n matches = molecule.chemical_environment_matches(\n \"[#6:1]~[#1:2]\", toolkit_registry=toolkit_wrapper\n )\n assert (\n len(matches) == 1\n ) # there should be a unique match, so one atom tuple is returned\n assert len(matches[0]) == 2 # it should have two tagged atoms\n assert set(matches[0]) == set([atom_C, atom_H])\n matches = molecule.chemical_environment_matches(\n \"[Cl:1]-[C:2]-[H:3]\", toolkit_registry=toolkit_wrapper\n )\n assert (\n len(matches) == 1\n ) # there should be a unique match, so one atom tuple is returned\n assert len(matches[0]) == 3 # it should have three tagged atoms\n assert set(matches[0]) == set([atom_Cl, atom_C, atom_H])\n matches = molecule.chemical_environment_matches(\n \"[#6:1]~[*:2]\", toolkit_registry=toolkit_wrapper\n )\n assert len(matches) == 4 # there should be four matches\n for match in matches:\n assert len(match) == 2 # each match should have two tagged atoms", "def test_cx_equivalence_0cx(self, seed=0):\n state = np.random.default_rng(seed)\n rnd = 2 * np.pi * state.random(size=6)\n\n qr = QuantumRegister(2, name=\"q\")\n qc = QuantumCircuit(qr)\n\n qc.u(rnd[0], rnd[1], rnd[2], qr[0])\n qc.u(rnd[3], rnd[4], rnd[5], qr[1])\n\n sim = UnitarySimulatorPy()\n unitary = execute(qc, sim).result().get_unitary()\n self.assertEqual(two_qubit_cnot_decompose.num_basis_gates(unitary), 0)\n self.assertTrue(Operator(two_qubit_cnot_decompose(unitary)).equiv(unitary))", "def __eq__(self, other):\n\n equalCoordinates = self.getCoordinate() == other.getCoordinate()\n equalMasses = self.getMass() == other.getMass()\n equalVelocities = self.getVelocity() == other.getVelocity()\n equalAccelerations = self.getAcceleration() == other.getAcceleration()\n\n return equalCoordinates & equalMasses & equalVelocities & equalAccelerations", "def GalacticToEquatorial(Galactic):\n \n # l,b,s => ra, dec, s\n l = Galactic[:,0]\n b = Galactic[:,1]\n cb = np.cos(b)\n sb = np.sin(b)\n dec = np.arcsin(np.cos(decgp)*cb*np.cos(l-lcp)+sb*np.sin(decgp))\n ra = ragp+np.arctan2(cb*np.sin(lcp-l),sb*np.cos(decgp)-cb*np.sin(decgp)*np.cos(l-lcp))\n ra[ra>2.*np.pi] -= 2.*np.pi\n if (len(Galactic[0,:])==3):\n Equatorial = np.column_stack([ra,dec,Galactic[:,2]])\n else:\n # vlos, mulcos(b), mub => vlos, muracos(dec), mudec\n cd = np.cos(dec)\n sd = np.sin(dec)\n A11 = (np.sin(decgp)*cd-np.cos(decgp)*sd*np.cos(ra-ragp))/cb\n A12 = -np.cos(decgp)*np.sin(ra-ragp)/cb\n A21 = (np.cos(decgp)*cd+np.sin(decgp)*sd*np.cos(ra-ragp)+sb*np.cos(lcp-l)*A11)/np.sin(lcp-l)\n A22 = (np.sin(decgp)*np.sin(ra-ragp)+sb*np.cos(lcp-l)*A12)/np.sin(lcp-l)\n index = np.where(np.fabs(np.cos(lcp-l))>np.fabs(np.sin(lcp-l)))\n A21[index] = (sd[index]*np.sin(ra[index]-ragp)-sb[index]*np.sin(lcp-l[index])*A11[index])/np.cos(lcp-l[index])\n A22[index] =-(np.cos(ra[index]-ragp)+sb[index]*np.sin(lcp-l[index])*A12[index])/np.cos(lcp-l[index])\n Prod = A11*A22-A12*A21\n Equatorial = np.column_stack((ra,dec,Galactic[:,2],Galactic[:,3],\n (A11*Galactic[:,4]-A21*Galactic[:,5])/Prod,\n (A22*Galactic[:,5]-A12*Galactic[:,4])/Prod))\n \n return Equatorial", "def test_canonical_ordering_rdkit(self):\n from openff.toolkit.utils.toolkits import RDKitToolkitWrapper\n\n rdkit = RDKitToolkitWrapper()\n # get ethanol in canonical order\n ethanol = create_ethanol()\n # get reversed non canonical ethanol\n reversed_ethanol = create_reversed_ethanol()\n # get the canonical ordering\n canonical_ethanol = reversed_ethanol.canonical_order_atoms(rdkit)\n # make sure the mapping between the ethanol and the rdkit ref canonical form is the same\n assert (\n True,\n {0: 2, 1: 0, 2: 1, 3: 8, 4: 3, 5: 4, 6: 5, 7: 6, 8: 7},\n ) == Molecule.are_isomorphic(canonical_ethanol, ethanol, True)", "def _compare(smi1, smi2):\n return _canonicalize(smi1) == _canonicalize(smi2)", "def is_isomorphic(self, other, return_map=False):\n if return_map:\n if not(self.degree() == other.degree() and\n self.length() == other.length()):\n return False, None\n sn, sn_map = self.relabel(return_map=True)\n on, on_map = other.relabel(return_map=True)\n if sn != on:\n return False, None\n return True, sn_map * ~on_map\n\n return (self.degree() == other.degree() and\n self.length() == other.length() and\n self.relabel() == other.relabel())", "def __eq__(self, other):\n return self.master.phy2abs(pos=other)" ]
[ "0.6392542", "0.63092023", "0.6111575", "0.6041727", "0.6018316", "0.6011343", "0.60099506", "0.6008797", "0.5996518", "0.59733444", "0.5879561", "0.58772373", "0.586719", "0.58652085", "0.5841096", "0.58306956", "0.58160913", "0.5800217", "0.5774613", "0.5731002", "0.5731002", "0.56939614", "0.5670275", "0.56444037", "0.56355435", "0.56159496", "0.55979335", "0.5591457", "0.5582785", "0.55531836", "0.55480033", "0.5537763", "0.5519269", "0.5516233", "0.5505597", "0.5487078", "0.54862875", "0.54855543", "0.54805595", "0.5457694", "0.54575855", "0.54572296", "0.5442693", "0.5436206", "0.54341865", "0.54307824", "0.5426236", "0.5414951", "0.5398883", "0.53866065", "0.5383952", "0.53836375", "0.53824717", "0.53810585", "0.53801876", "0.5378164", "0.53753406", "0.5372856", "0.537272", "0.5367485", "0.5359911", "0.5358566", "0.53513825", "0.5345134", "0.534381", "0.53312", "0.53223985", "0.531956", "0.5312091", "0.5312091", "0.5307619", "0.53046745", "0.5295437", "0.5289835", "0.52826405", "0.5280561", "0.5280096", "0.527646", "0.5271906", "0.5268194", "0.5263212", "0.5260092", "0.52591413", "0.5251997", "0.52458495", "0.52413404", "0.52394134", "0.5235426", "0.5235201", "0.52327025", "0.52238613", "0.5223839", "0.5213422", "0.52110887", "0.5210963", "0.52073216", "0.5202605", "0.5202579", "0.51918626", "0.5188469", "0.51884145" ]
0.0
-1
Determines the atoms defining the chemical enviroment of a given atom by checking their bonding partners. Only the first and second neighbours are considered.
def get_influence_atoms(atomlist): enviromentlist = [] trunclist = [] neighbourlist = get_closest_neighbours(atomlist, 4) for neighbours in neighbourlist: if neighbours[0][0] == "H": neighbours = neighbours[:2] if neighbours[0][0] == "O": neighbours = neighbours[:3] trunclist.append(neighbours) for atom in trunclist: newatom = [] for atom1partner in atom[1:]: for partner in trunclist: if partner[0] == atom1partner: counter = 0 for atomi in partner: if atomi[0] == 'H': counter += 1 if counter < 2 or (partner[0] in atom and atom[0][0] == 'H'): newatom += atom + partner[1:] newatom = make_list_unique(newatom) newatom.sort() enviromentlist.append(newatom) return enviromentlist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def identify_bonds(chosen_atom, atom_list):\n list_of_hydrogens = ['H15', 'H14', 'H13', 'H12', 'H11', 'H10', 'H9', 'H8', 'H7', 'H6', 'H5', 'H4', 'H3', 'H2', 'H1'] \n if ((chosen_atom.atom_name not in list_of_hydrogens) and (chosen_atom.residue_name != \"P1A\")):\n nearby_atoms_crude = [atom for atom in atom_list if ((abs(chosen_atom.x - atom.x) <= 2) and (abs(chosen_atom.y - atom.y) <= 2) and (abs(chosen_atom.z - atom.z) <= 2))]\n nearby_atoms = [atom for atom in nearby_atoms_crude if (0 < calculate_3D_distance_2_atoms(chosen_atom,atom) <= 2)]\n identified_bonds = [[atom, calculate_3D_distance_2_atoms(chosen_atom, atom)] for atom in nearby_atoms if (check_bond(chosen_atom, atom) == True)] \n elif ((chosen_atom.atom_name not in list_of_hydrogens) and (chosen_atom.residue_name == \"P1A\")):\n nearby_atoms_crude = [atom for atom in atom_list if ((abs(chosen_atom.x - atom.x) <= 2) and (abs(chosen_atom.y - atom.y) <= 2) and (abs(chosen_atom.z - atom.z) <= 2))]\n nearby_atoms = [atom for atom in nearby_atoms_crude if (0 < calculate_3D_distance_2_atoms(chosen_atom,atom) <= 1.8)]\n identified_bonds = [[atom, calculate_3D_distance_2_atoms(chosen_atom, atom)] for atom in nearby_atoms if (check_bond(chosen_atom, atom) == True)] \n else:\n nearby_atoms_crude = [atom for atom in atom_list if ((abs(chosen_atom.x - atom.x) <= 1.6) and (abs(chosen_atom.y - atom.y) <= 1.6) and (abs(chosen_atom.z - atom.z) <= 1.6))]\n nearby_atoms = [atom for atom in nearby_atoms_crude if (0 < calculate_3D_distance_2_atoms(chosen_atom,atom) <= 1.6)]\n identified_bonds = [[atom, calculate_3D_distance_2_atoms(chosen_atom, atom)] for atom in nearby_atoms if (check_bond(chosen_atom, atom) == True)] \n for elements in nearby_atoms:\n if (check_if_no_bond(chosen_atom, elements, bond_list, bond_list_3) == True):\n nearby_atoms.remove(elements)\n if (len(nearby_atoms) == len(identified_bonds)):\n return identified_bonds\n else:\n return []", "def get_framework_neighbours(atom, useH=True):\n neighbourlist = []\n for atom2 in atom.partner[:5]:\n #if not 'H(' in atom2.name and np.linalg.norm(atom.cart-atom2.cart)<=1.6:\n if np.linalg.norm(atom.cart - atom2.cart) <= float(covalence_radius[atom.element]) + float(\n covalence_radius[atom2.element]) + .1:\n if not 'H' == atom2.element or useH:\n neighbourlist.append(atom2)\n return neighbourlist", "def find_contour(hole_atoms, atom_list):\n contour_atoms = []\n extra_atoms = []\n global bond_list\n bond_list = bond_list_1\n for atom in hole_atoms:\n c = [bond[0] for bond in identify_bonds(atom, atom_list) if ((bond[0] not in hole_atoms) and (bond[0] not in contour_atoms))]\n for element in c:\n contour_atoms.append(element)\n for atom in atom_list:\n c = [bond[0] for bond in identify_bonds(atom, atom_list)]\n count = 0\n for element in c:\n if element in contour_atoms:\n count += 1\n if (count >= 2):\n extra_atoms.append(atom)\n for atom in atom_list:\n c = [bond[0] for bond in identify_bonds(atom, atom_list)]\n for element in c:\n if ((element in contour_atoms) or (element in extra_atoms)):\n for i in [bond[0] for bond in identify_bonds(element, atom_list)]:\n if ((i in hole_atoms) and (atom not in hole_atoms) and (atom not in contour_atoms) and (atom not in extra_atoms)):\n extra_atoms.append(atom) \n \n contour_atoms = contour_atoms + extra_atoms\n \n extra_atoms2 = []\n for atom in contour_atoms:\n for atom2 in contour_atoms:\n if (atom != atom2):\n c = [bond[0] for bond in identify_bonds(atom, atom_list) if ((bond in identify_bonds(atom2, atom_list)) and (bond[0] not in (contour_atoms)))]\n if (len(c) != 0):\n extra_atoms2.append(c[0]) \n for element in extra_atoms2:\n contour_atoms.append(element)\n return contour_atoms", "def get_contour(atom_list):\n initial = [atom for atom in atom_list if ((0 < len(identify_bonds(atom, atom_list)) < 3) and (check_connected(atom, identify_bonds(atom, atom_list)) == False))]\n \n extra_1 = []\n for atom in atom_list:\n neighbours = [bond[0] for bond in identify_bonds(atom, atom_list)]\n for i in neighbours:\n neighbours2 = [bond[0] for bond in identify_bonds(i, atom_list)]\n for j in neighbours2:\n if j in initial:\n extra_1.append(atom)\n\n extra_2 = []\n for atom in atom_list:\n neighbours = [bond[0] for bond in identify_bonds(atom, atom_list)]\n check = 0\n for i in neighbours:\n if i in initial:\n check += 1\n if ((check == 2) and (atom not in initial)):\n extra_2.append(atom) \n return (initial + extra_1 + extra_2)", "def validBond(index1, index2, direction):\n #print \"?valid bond: \", allAtoms[index1].pos, \" , \", allAtoms[index2].pos, direction\n cell1 = index1/numAtomsPerCell\n cell2 = index2/numAtomsPerCell\n #Find the coordinates of the cell in units of interaction cells\n posInX1 = int(cell1/(size*size))\n posInX2 = int(cell1/(size*size))\n leftover1 = cell1%(size*size)\n leftover2 = cell2%(size*size)\n posInY1 = int(leftover1/size)\n posInY2 = int(leftover2/size)\n posInZ1 = leftover1%size\n posInZ2 = leftover2%size\n \n #Now, a valid interaction can cross an interaction cell boundary in any direction,\n #but it has a maximum length of one interaction cell. However, I have made the minimum\n #size of this larger translated lattice equal to 3*3*3 interaction cells. Therefore,\n #when we hit an edge and get in invalid interaction, the cells will be at least 2\n #interaction cells apart in the direction of the interaction.\n if(direction[0]):\n if numpy.abs(posInX1 - posInX2)>1:\n #print \"false\"\n return False\n if(direction[1]):\n if numpy.abs(posInY1 - posInY2)>1:\n #print \"false\"\n return False\n if(direction[2]):\n if numpy.abs(posInZ1 - posInZ2)>1:\n #print \"false\"\n return False\n print #\"true\"\n return True\n\n #Old (incorrect) method:\n if 0:\r\n print \"?valid bond: \", allAtoms[index1].pos, \" , \", allAtoms[index2].pos, direction\r\n cell1 = index1/numAtomsPerCell\r\n cell2 = index2/numAtomsPerCell\r\n zRow1 = cell1/size#this relies on the list being created in the nested for loop that was used, z within y within x\r\n zRow2 = cell2/size\r\n if(zRow1 != zRow2 and direction[2]):\n print \"false\"\r\n return False\r\n xLayer1 = cell1/(size*size)\r\n xLayer2 = cell2/(size*size)\r\n if(xLayer1 != xLayer2 and direction[1]):\n print \"false\"\r\n return False\r\n #shouldn't have to check z, because if it's not valid in z direction, it would be off the list (>len(allAtoms))\n print \"true\"\r\n return True", "def find_CX_neighbours(list_of_atoms, atom_list):\n my_list = []\n atom_numbers = []\n for atom in list_of_atoms:\n for element in identify_bonds(atom, atom_list):\n if (((element[0].atom_name == \"CX\") or (element[0].atom_name == \"CY\")) and (element[0].atom_number not in atom_numbers)):\n my_list.append(element[0])\n atom_numbers.append(element[0].atom_number)\n return my_list", "def get_framework_neighbors(atom, useH=True):\n neighborlist = []\n for atom2 in atom.partner[:5]:\n if np.linalg.norm(atom.cart - atom2.cart) <= float(covalence_radius[atom.element]) + float(\n covalence_radius[atom2.element]) + .1:\n if not 'H' == atom2.element or useH:\n neighborlist.append(atom2)\n return neighborlist", "def ecfp(mol,radius):\n #mol=Chem.AddHs(mol)\n bitInfo={}\n atoms_dict=invariants(mol)\n \n for idxs,i in atoms_dict.items():\n bitInfo[i]=bitInfo.get(i,())+((idxs,0),)\n \n neighborhoods=[]\n atom_neighborhoods=[len(mol.GetBonds())*bitarray('0') for a in mol.GetAtoms()]\n dead_atoms=len(mol.GetAtoms())*bitarray('0')\n \n for r in range(1,radius+1):\n round_ids={} #new bit ID this iteration\n round_atom_neighborhoods=copy.deepcopy(atom_neighborhoods) #bond to include under this r\n neighborhoods_this_round=[] #(round_atom_neighborhoods,round_ids,idxs)\n \n for idxs,a in enumerate(mol.GetAtoms()):\n if dead_atoms[idxs]:\n continue\n nbsr=[] #list to hash this iteration\n o_bond=bond(mol,idxs)\n for b in o_bond:\n round_atom_neighborhoods[idxs][b[2]] = True\n round_atom_neighborhoods[idxs] |= atom_neighborhoods[b[1]]\n nbsr.append((b[0],atoms_dict[b[1]]))\n nbsr=sorted(nbsr)\n nbsr=[item for sublist in nbsr for item in sublist]\n nbsr.insert(0,atoms_dict[idxs])\n nbsr.insert(0,r)\n \n round_ids[idxs]=get_hash(nbsr)\n neighborhoods_this_round.append((round_atom_neighborhoods[idxs],round_ids[idxs],idxs))\n for lst in neighborhoods_this_round:\n if lst[0] not in neighborhoods:\n bitInfo[lst[1]] = bitInfo.get(lst[1],())+((lst[2],r),)\n neighborhoods.append(lst[0])\n else:\n dead_atoms[lst[2]]=True\n atoms_dict=round_ids\n atom_neighborhoods=copy.deepcopy(round_atom_neighborhoods)\n return bitInfo", "def find_rings(atom_list): \n CX_list = [atom0 for atom0 in atom_list if ((atom0.atom_name == \"CX\") or (atom0.atom_name == \"CY\"))]\n atom_dict = {}\n for atom0 in CX_list:\n if (len(identify_bonds(atom0, atom_list)) >= 2):\n atom_dict[atom0] = {}\n for atom1 in identify_bonds(atom0, atom_list):\n if ( ((atom1[0].atom_name == \"CX\") or (atom1[0].atom_name == \"CY\")) and (len(identify_bonds(atom1[0], atom_list)) >= 2) ):\n atom_dict[atom0][atom1[0]] = {}\n for atom2 in identify_bonds(atom1[0], atom_list):\n if ( ((atom2[0].atom_name == \"CX\") or (atom2[0].atom_name == \"CY\")) and (atom2[0] != atom0) and (len(identify_bonds(atom2[0], atom_list)) >= 2)):\n atom_dict[atom0][atom1[0]][atom2[0]] = {}\n for atom3 in identify_bonds(atom2[0], atom_list):\n if ( ((atom3[0].atom_name == \"CX\") or (atom3[0].atom_name == \"CY\")) and (atom3[0] != atom0) and (len(identify_bonds(atom3[0], atom_list)) >= 2)):\n atom_dict[atom0][atom1[0]][atom2[0]][atom3[0]] = [atom3[0].atom_number]\n rings = []\n for key in atom_dict.keys():\n for key2 in atom_dict[key].keys():\n for key3 in atom_dict[key][key2].keys():\n for key4 in atom_dict[key][key2][key3].keys():\n rings.append([key, key2, key3, key4])\n finite_rings = []\n for element in rings:\n for element2 in rings:\n if ((element[0] == element2[0]) and (element[3] == element2[3]) and (element[1] != element2[1]) and (element[1] != element2[2]) and (element[2] != element2[1]) and (element[2] != element2[2]) and (element[0] != element2[1] != element[3]) and (element[0] != element2[2] != element[3])):\n check = True\n for el in finite_rings:\n if ((element[0] in el) and (element[1] in el) and (element[2] in el) and (element[3] in el) and (element2[0] in el) and (element2[1] in el) and (element2[2] in el) and (element2[3] in el)):\n check = False\n if (check == True):\n finite_rings.append([element[0], element[1], element[2], element[3], element2[1], element2[2]])\n return finite_rings", "def test_get_atom_features(self):\n atom_features = np.array([[40, 41, 42, 43], [44, 45, 46, 47],\n [48, 49, 50, 51], [52, 53, 54, 55],\n [56, 57, 58, 59]])\n canon_adj_list = [[1, 2], [0, 3], [0, 3], [1, 2, 4], [3]]\n mol = ConvMol(atom_features, canon_adj_list)\n # atom 4 has 0 neighbors\n # atom 0 has 2 neighbors\n # atom 1 has 2 neighbors\n # atom 2 has 2 neighbors\n # atom 3 has 3 neighbors.\n # Verify that atom features have been sorted by atom degree.\n assert np.array_equal(\n mol.get_atom_features(),\n np.array([[56, 57, 58, 59], [40, 41, 42, 43], [44, 45, 46, 47],\n [48, 49, 50, 51], [52, 53, 54, 55]]))", "def iter_bonded_atoms(self):\n for bond in self.iter_bonds():\n partner = bond.get_partner(self)\n assert partner is not None\n yield partner", "def filter_carbon_atoms(atom_list, rings):\n list_3 = []\n list_2 = []\n list_2n = []\n for atom in atom_list:\n if (check_connected(atom, identify_bonds(atom, atom_list)) == False):\n if (len(identify_bonds(atom, atom_list)) == 3):\n list_3.append(atom)\n elif (len(identify_bonds(atom, atom_list)) == 2):\n list_2.append(atom)\n for neighbour in identify_bonds(atom, atom_list):\n if (len(identify_bonds(neighbour[0], atom_list)) == 2):\n for ring in rings:\n if( (atom in ring) and (neighbour[0] in ring)):\n list_2n.append(atom) \n return list_3, list_2, list_2n", "def hbond(atoms, selection1=None, selection2=None, selection1_type='both',\n cutoff_dist=2.5, cutoff_angle=120,\n donor_elements=('O', 'N', 'S'), acceptor_elements=('O', 'N', 'S'),\n periodic=False):\n if not (atoms.element == \"H\").any():\n warnings.warn(\n \"Input structure does not contain hydrogen atoms, \"\n \"hence no hydrogen bonds can be identified\"\n )\n\n # Create AtomArrayStack from AtomArray\n if not isinstance(atoms, AtomArrayStack):\n atoms = stack([atoms])\n single_model = True\n else:\n single_model = False\n \n if periodic:\n box = atoms.box\n else:\n box = None\n \n # Mask for donor/acceptor elements\n donor_element_mask = np.isin(atoms.element, donor_elements)\n acceptor_element_mask = np.isin(atoms.element, acceptor_elements)\n\n if selection1 is None:\n selection1 = np.ones(atoms.array_length(), dtype=bool)\n if selection2 is None:\n selection2 = np.ones(atoms.array_length(), dtype=bool)\n\n if selection1_type == 'both':\n # The two selections are separated into three selections:\n # the original ones without the overlaping part\n # and one containing the overlap\n # This prevents redundant triplets and unnecessary computation \n overlap_selection = selection1 & selection2\n # Original selections without overlaping part\n exclusive_selection1 = selection1 & (~overlap_selection)\n exclusive_selection2 = selection2 & (~overlap_selection)\n \n # Put selections to list for cleaner iteration\n selections = [\n exclusive_selection1, exclusive_selection2, overlap_selection\n ]\n selection_combinations = [\n #(0,0), is not included, would be same selection\n # as donor and acceptor simultaneously\n (0,1),\n (0,2),\n (1,0),\n #(1,1), # same reason above\n (1,2),\n (2,0),\n (2,1),\n (2,2) # overlaping part, combination is necessary\n ]\n \n all_comb_triplets = []\n all_comb_mask = []\n for selection_index1, selection_index2 in selection_combinations:\n donor_mask = selections[selection_index1]\n acceptor_mask = selections[selection_index2]\n if np.count_nonzero(donor_mask) != 0 and \\\n np.count_nonzero(acceptor_mask) != 0:\n # Calculate triplets and mask\n triplets, mask = _hbond(\n atoms, donor_mask, acceptor_mask,\n donor_element_mask, acceptor_element_mask,\n cutoff_dist, cutoff_angle,\n box\n )\n all_comb_triplets.append(triplets)\n all_comb_mask.append(mask)\n # Merge results from all combinations\n triplets = np.concatenate(all_comb_triplets, axis=0)\n mask = np.concatenate(all_comb_mask, axis=1)\n\n elif selection1_type == 'donor':\n triplets, mask = _hbond(\n atoms, selection1, selection2,\n donor_element_mask, acceptor_element_mask,\n cutoff_dist, cutoff_angle,\n box\n )\n \n elif selection1_type == 'acceptor':\n triplets, mask = _hbond(\n atoms, selection2, selection1,\n donor_element_mask, acceptor_element_mask,\n cutoff_dist, cutoff_angle,\n box\n )\n \n else:\n raise ValueError(f\"Unkown selection type '{selection1_type}'\")\n\n if single_model:\n # For a atom array (not stack),\n # hbond_mask contains only 'True' values,\n # since all interaction are in the one model\n # -> Simply return triplets without hbond_mask\n return triplets\n else:\n return triplets, mask", "def test_chemical_environment_matches_OE(self):\n # TODO: Move this to test_toolkits, test all available toolkits\n # Create chiral molecule\n toolkit_wrapper = OpenEyeToolkitWrapper()\n molecule = Molecule()\n atom_C = molecule.add_atom(6, 0, False, stereochemistry=\"R\", name=\"C\")\n atom_H = molecule.add_atom(1, 0, False, name=\"H\")\n atom_Cl = molecule.add_atom(17, 0, False, name=\"Cl\")\n atom_Br = molecule.add_atom(35, 0, False, name=\"Br\")\n atom_F = molecule.add_atom(9, 0, False, name=\"F\")\n molecule.add_bond(atom_C, atom_H, 1, False)\n molecule.add_bond(atom_C, atom_Cl, 1, False)\n molecule.add_bond(atom_C, atom_Br, 1, False)\n molecule.add_bond(atom_C, atom_F, 1, False)\n # Test known cases\n matches = molecule.chemical_environment_matches(\n \"[#6:1]\", toolkit_registry=toolkit_wrapper\n )\n assert (\n len(matches) == 1\n ) # there should be a unique match, so one atom tuple is returned\n assert len(matches[0]) == 1 # it should have one tagged atom\n assert set(matches[0]) == set([atom_C])\n matches = molecule.chemical_environment_matches(\n \"[#6:1]~[#1:2]\", toolkit_registry=toolkit_wrapper\n )\n assert (\n len(matches) == 1\n ) # there should be a unique match, so one atom tuple is returned\n assert len(matches[0]) == 2 # it should have two tagged atoms\n assert set(matches[0]) == set([atom_C, atom_H])\n matches = molecule.chemical_environment_matches(\n \"[Cl:1]-[C:2]-[H:3]\", toolkit_registry=toolkit_wrapper\n )\n assert (\n len(matches) == 1\n ) # there should be a unique match, so one atom tuple is returned\n assert len(matches[0]) == 3 # it should have three tagged atoms\n assert set(matches[0]) == set([atom_Cl, atom_C, atom_H])\n matches = molecule.chemical_environment_matches(\n \"[#6:1]~[*:2]\", toolkit_registry=toolkit_wrapper\n )\n assert len(matches) == 4 # there should be four matches\n for match in matches:\n assert len(match) == 2 # each match should have two tagged atoms\n # Test searching for stereo-specific SMARTS\n matches = molecule.chemical_environment_matches(\n \"[#6@:1](-[F:2])(-[Cl])(-[Br])(-[H])\", toolkit_registry=toolkit_wrapper\n )\n assert len(matches) == 1 # there should be one match\n for match in matches:\n assert len(match) == 2 # each match should have two tagged atoms\n matches = molecule.chemical_environment_matches(\n \"[#6@@:1](-[F:2])(-[Cl])(-[Br])(-[H])\", toolkit_registry=toolkit_wrapper\n )\n assert (\n len(matches) == 0\n ) # this is the wrong stereochemistry, so there shouldn't be any matches", "def test_chemical_environment_matches_OE(self):\n # TODO: Move this to test_toolkits, test all available toolkits\n # Create chiral molecule\n from simtk.openmm.app import element\n\n toolkit_wrapper = OpenEyeToolkitWrapper()\n molecule = Molecule()\n atom_C = molecule.add_atom(\n element.carbon.atomic_number, 0, False, stereochemistry=\"R\", name=\"C\"\n )\n atom_H = molecule.add_atom(element.hydrogen.atomic_number, 0, False, name=\"H\")\n atom_Cl = molecule.add_atom(element.chlorine.atomic_number, 0, False, name=\"Cl\")\n atom_Br = molecule.add_atom(element.bromine.atomic_number, 0, False, name=\"Br\")\n atom_F = molecule.add_atom(element.fluorine.atomic_number, 0, False, name=\"F\")\n molecule.add_bond(atom_C, atom_H, 1, False)\n molecule.add_bond(atom_C, atom_Cl, 1, False)\n molecule.add_bond(atom_C, atom_Br, 1, False)\n molecule.add_bond(atom_C, atom_F, 1, False)\n # Test known cases\n matches = molecule.chemical_environment_matches(\n \"[#6:1]\", toolkit_registry=toolkit_wrapper\n )\n assert (\n len(matches) == 1\n ) # there should be a unique match, so one atom tuple is returned\n assert len(matches[0]) == 1 # it should have one tagged atom\n assert set(matches[0]) == set([atom_C])\n matches = molecule.chemical_environment_matches(\n \"[#6:1]~[#1:2]\", toolkit_registry=toolkit_wrapper\n )\n assert (\n len(matches) == 1\n ) # there should be a unique match, so one atom tuple is returned\n assert len(matches[0]) == 2 # it should have two tagged atoms\n assert set(matches[0]) == set([atom_C, atom_H])\n matches = molecule.chemical_environment_matches(\n \"[Cl:1]-[C:2]-[H:3]\", toolkit_registry=toolkit_wrapper\n )\n assert (\n len(matches) == 1\n ) # there should be a unique match, so one atom tuple is returned\n assert len(matches[0]) == 3 # it should have three tagged atoms\n assert set(matches[0]) == set([atom_Cl, atom_C, atom_H])\n matches = molecule.chemical_environment_matches(\n \"[#6:1]~[*:2]\", toolkit_registry=toolkit_wrapper\n )\n assert len(matches) == 4 # there should be four matches\n for match in matches:\n assert len(match) == 2 # each match should have two tagged atoms", "def bond_atoms(atom_list):\n pass", "def get_bond_connectivity(self):\n m, connectivity = self.owner, []\n for index, i in enumerate(self.rix):\n for j in self.rix[index + 1:]:\n b1 = m.rings[i].bix\n b2 = m.rings[j].bix\n if set(b1).intersection(b2):\n connectivity.append((i, j))\n return tuple(connectivity)", "def create_bonds(self,\n atom = None,\n bond_type = None,\n atom1_symop = None,\n atom2_symop = None,\n standard_res_bond = False):\n assert isinstance(atom, Atom)\n\n if self.altloc is None:\n if atom.altloc is None:\n ## case 1: self has no alt_loc, atom no alt_loc\n self.create_bond(\n atom = atom,\n bond_type = bond_type,\n atom1_symop = atom1_symop,\n atom2_symop = atom2_symop,\n standard_res_bond = standard_res_bond)\n else:\n ## case 2: self.has no alt_loc, atom has alt_loc\n for atmx in atom.altloc.itervalues():\n self.create_bond(\n atom = atmx,\n bond_type = bond_type,\n atom1_symop = atom1_symop,\n atom2_symop = atom2_symop,\n standard_res_bond = standard_res_bond)\n\n\n else:\n if atom.altloc is None:\n ## case 3: self has alt_loc, atom has no alt_loc\n for (alt_loc, atmx) in self.altloc.iteritems():\n atmx.create_bond(\n atom = atom,\n bond_type = bond_type,\n atom1_symop = atom1_symop,\n atom2_symop = atom2_symop,\n standard_res_bond = standard_res_bond)\n\n else:\n ## case 4: self has alt_loc, atom has alt_loc\n for (alt_loc, atmx) in self.altloc.iteritems():\n if atom.altloc.has_key(alt_loc):\n atmx.create_bond(\n atom = atom.altloc[alt_loc],\n bond_type = bond_type,\n atom1_symop = atom1_symop,\n atom2_symop = atom2_symop,\n standard_res_bond = standard_res_bond)", "def find_conn_CXCY(atom, atom_list):\n le_list = []\n for element in identify_bonds(atom, atom_list):\n if ((element[0].atom_name == \"CX\") or (element[0].atom_name == \"CY\")):\n if (atom.z - element[0].z > 0):\n le_list.append([element[0], 1])\n else:\n le_list.append([element[0], -1])\n return le_list", "def guess_potentialisation(self, sysargs):\n\n print(\"Guessing potentialisation...\")\n print(\"Copying reference basis...\")\n shutil.copyfile(self.reference_guess_basis_path, os.path.join(os.getcwd(), 'basis'))\n\n sp2_replacement_list = []\n sp2_deletion_list = []\n sp2_carbon_list = []\n sp3_replacement_list = []\n sp3_deletion_list = []\n sp3_carbon_list =[]\n carbon_atoms = [atom for atom in self.coord_list if atom[\"el\"] == 'c']\n\n # Sort through carbons to decide what needs potentialising. Find atoms bonded to each carbon\n for atom in carbon_atoms:\n distanced_atoms = self.order_atoms_by_distance_from(atom['#'])\n nearest_4_distances = [self.measure_atom_atom_dist(atom['#'], distanced_atom['#']) for distanced_atom in\n distanced_atoms[1:5]]\n bonded_distances = [less_than_distance for less_than_distance in nearest_4_distances if\n less_than_distance < self.bond_deciding_distance]\n\n # if 3 bonded atoms, may be sp2, check if they're hydrogens\n if len(bonded_distances) == 3:\n hydrogens_bonded_to_this_atom = [distanced_atom for distanced_atom in distanced_atoms[1:5] if\n distanced_atom['el'] == 'h' and self.measure_atom_atom_dist(atom['#'], distanced_atom['#']) < self.bond_deciding_distance]\n sp2_deletion_list.extend([hydrogen['#'] for hydrogen in hydrogens_bonded_to_this_atom])\n sp2_replacement_list.append(str(atom['#']))\n sp2_carbon_list.append(atom)\n\n # if 4 bonded atoms, may be sp3, check if they're hydrogens\n elif len(bonded_distances) == 4:\n hydrogens_bonded_to_this_atom = [distanced_atom for distanced_atom in distanced_atoms[1:5] if\n distanced_atom['el'] == 'h' and self.measure_atom_atom_dist(atom['#'], distanced_atom['#']) < self.bond_deciding_distance]\n if len(hydrogens_bonded_to_this_atom) == 3:\n sp3_replacement_list.extend([str(hydrogen['#']) for hydrogen in hydrogens_bonded_to_this_atom])\n sp3_deletion_list.extend([hydrogen['#'] for hydrogen in hydrogens_bonded_to_this_atom])\n sp3_carbon_list.append(atom)\n\n log_file = open('pseudification.log', 'w+')\n log_file.writelines(\n 'sp2 carbon indices: %s \\nsp3 carbon indices: %s \\n' % (\n ','.join(str(carbon['#']) for carbon in sp2_carbon_list),\n ','.join(str(carbon['#']) for carbon in sp3_carbon_list)\n ))\n\n sp2_coord_command = 'mn sp2 %s' % (','.join(sp2_replacement_list))\n print(\"sp2 command: %s\" % sp2_coord_command)\n sp3_coord_command = 'mn sp3 %s' % (','.join(sp3_replacement_list))\n print(\"sp3 command: %s\" % sp3_coord_command)\n\n if 'nosp3' not in sysargs:\n self.pseudopotentialise_ethane_like_molecule(sp3_coord_command.split(), execute_deletion=False)\n self.pseudopotentialise_molecule(sp2_coord_command.split(), execute_deletion=False)\n\n self.delete_specified_atoms(sp2_deletion_list + sp3_deletion_list)\n\n print(\"Identifying 2-electron sp2 carbons...\")\n # Now need to work out where the 2e sp2 carbons are\n self.coord_list = []\n self.read_coords()\n carbon_atoms = [atom for atom in self.coord_list if atom[\"el\"] == 'c']\n sp2_pseudocarbon_list = []\n\n for atom in carbon_atoms:\n carbon_pseudos = self.identify_pseudocarbon_potentials(atom['#'])\n # if 6 atoms within pseudo-distance this is an sp2 pseudo-carbon\n if len(carbon_pseudos) == 6:\n sp2_pseudocarbon_list.append(atom)\n print(\"Re-discovered %s sp2 carbons.\" % str(len(sp2_pseudocarbon_list)))\n\n # Now check for ncore=4 sp2 pseudocarbons\n pseudopotential_hashes_to_delete = []\n for atom in sp2_pseudocarbon_list:\n distanced_carbon_list = self.order_atoms_by_distance_from(atom['#'], element='c')\n carbons_bonded_to_this_atom = [distanced_atom for distanced_atom in distanced_carbon_list[1:5] if\n self.measure_atom_atom_dist(atom['#'],\n distanced_atom[\n '#']) < self.bond_deciding_distance]\n print(\"Carbons bonded to atom %s: %s\" % (str(atom['#']),\n str([carbon['#'] for carbon in carbons_bonded_to_this_atom])))\n\n for carbon_bonded_to_this_atom in carbons_bonded_to_this_atom:\n if carbon_bonded_to_this_atom not in sp2_pseudocarbon_list:\n def distance_from(list_atom):\n return self.measure_atom_atom_dist(carbon_bonded_to_this_atom['#'], list_atom['#'])\n carbon_pseudos = self.identify_pseudocarbon_potentials(atom['#'])\n # find pseudos closest to the other carbon\n pseudos_distanced_from_sp2_2e = sorted(carbon_pseudos, key=distance_from)\n pseudopotential_hashes_to_delete.append(pseudos_distanced_from_sp2_2e[0]['#'])\n pseudopotential_hashes_to_delete.append(pseudos_distanced_from_sp2_2e[1]['#'])\n\n self.delete_specified_atoms(pseudopotential_hashes_to_delete)\n\n # Read final coordinates\n self.coord_list = []\n self.read_coords()\n carbon_atoms = [atom for atom in self.coord_list if atom[\"el\"] == 'c']\n sp2_pseudocarbon_list = []\n sp2_2e_pseudocarbon_list = []\n sp2_2e_pseudohydrogen_list = []\n sp3_pseudocarbon_list = []\n\n for atom in carbon_atoms:\n carbon_pseudos = self.identify_pseudocarbon_potentials(atom['#'])\n\n # if 3 atoms within pseudo-distance this is an sp3 pseudo-carbon\n if len(carbon_pseudos) == 3:\n sp3_pseudocarbon_list.append(atom)\n\n # if 4 atoms within pseudo-distance this is an sp2 2e pseudo-carbon\n elif len(carbon_pseudos) == 4:\n sp2_2e_pseudocarbon_list.append(atom)\n sp2_2e_pseudohydrogen_list.extend(carbon_pseudos)\n\n # if 6 atoms within pseudo-distance this is an sp2 pseudo-carbon\n elif len(carbon_pseudos) == 6:\n sp2_pseudocarbon_list.append(atom)\n\n\n log_file.writelines(\n 'sp2 pseudocarbon indices: %s \\nsp3 pseudocarbon indices: %s\\nsp2 2e pseudocarbon indices: %s\\nsp2 2e pseudohydrogen indices: %s\\n' % (\n ','.join(str(carbon['#']) for carbon in sp2_pseudocarbon_list),\n ','.join(str(carbon['#']) for carbon in sp3_pseudocarbon_list),\n ','.join(str(carbon['#']) for carbon in sp2_2e_pseudocarbon_list),\n ','.join(str(carbon['#']) for carbon in sp2_2e_pseudohydrogen_list)\n ))\n\n # Need to supply potentials to atoms\n define_cmds_path = 'define_add_pseudos'\n with open(os.path.join(define_cmds_path), 'w') as var_file:\n var_file.writelines(define_cmds % (\n # sp2 potentials\n self.supply_ecps_bases_to_define([carbon['#'] for carbon in sp2_pseudocarbon_list], 'b', self.pseudo_carbon_basis),\n self.supply_ecps_bases_to_define([carbon['#'] for carbon in sp2_pseudocarbon_list], 'ecp', self.sp2_carbon_ecp),\n self.supply_ecps_bases_to_define(self.sp2_pseudo_element, 'b', 'none'),\n self.supply_ecps_bases_to_define(self.sp2_pseudo_element, 'ecp', self.sp2_hydrogen_ecp),\n # sp3 potentials\n self.supply_ecps_bases_to_define([carbon['#'] for carbon in sp3_pseudocarbon_list], 'b', self.pseudo_carbon_basis),\n self.supply_ecps_bases_to_define([carbon['#'] for carbon in sp3_pseudocarbon_list], 'ecp', self.sp3_carbon_ecp),\n self.supply_ecps_bases_to_define(self.sp3_pseudo_element, 'b', 'none'),\n self.supply_ecps_bases_to_define(self.sp3_pseudo_element, 'ecp', self.sp3_hydrogen_ecp),\n # sp2 2e potentials\n self.supply_ecps_bases_to_define(self.sp2_pseudo_element, 'b', 'none'),\n self.supply_ecps_bases_to_define([hydrogen['#'] for hydrogen in sp2_2e_pseudohydrogen_list], 'ecp', self.sp2_2e_hydrogen_ecp),\n self.supply_ecps_bases_to_define([carbon['#'] for carbon in sp2_2e_pseudocarbon_list], 'b', self.pseudo_carbon_basis),\n self.supply_ecps_bases_to_define([carbon['#'] for carbon in sp2_2e_pseudocarbon_list], 'ecp', self.sp2_2e_carbon_ecp),\n ))\n\n self.run_define('define_add_pseudos')", "def cleaveSurfAtom(entry,max_bonds=1,supercell=2,group_structs=True):\n \n \n struct = copy.deepcopy(entry[0])\n results = getStructureType(entry,supercell=supercell,returnSS=True)\n \n # If the crystal is 3D\n if results[0]=='conventional':\n struct = copy.deepcopy(entry[0])\n og_binary_matrix = getDistMat(struct,entry[1]-1)\n og_num_bonds = sum(sum(np.array(og_binary_matrix)))\n struct.make_supercell(supercell)\n binary_matrix= getDistMat(struct,entry[1]-1)\n bonds = []\n \n # Get list of bonded atoms\n for i in range(len(og_binary_matrix)):\n for pair in [(i,j) for j in range(i+1,len(og_binary_matrix)) \n if og_binary_matrix[i][j]==1]:\n bonds.append(pair)\n allCombos = []\n combNum = 0\n \n # Get list of all combinations of bonds\n for i in range(max_bonds+1):\n for com in list(itertools.combinations(bonds,i)):\n allCombos.append(com)\n combNum+=1\n\n combos = allCombos\n jjj=0\n all_structs = []\n \n # For each bond combination\n for combo in combos:\n broken=0\n jjj+=1\n modified_matrix = np.array(binary_matrix)\n for pair in combo:\n i,j = pair\n i=i*supercell**3\n j=j*supercell**3\n # Break bonds in the loop\n for shift in range(supercell**3):\n for shift2 in range(supercell**3):\n modified_matrix[i+shift][j+shift2]=0\n modified_matrix[j+shift][i+shift2]=0\n\n new_num_bonds=sum(sum(modified_matrix))\n broken=int(og_num_bonds-new_num_bonds) \n seed_index=0\n old_cluster_size=len(buildNetwork(binary_matrix,seed_index))/supercell**3\n cluster = buildNetwork(modified_matrix,seed_index)\n hetero=False\n # If the new set of atoms is not empty\n if cluster!=set():\n scale = len(cluster)/old_cluster_size\n compo = Composition.from_dict(Counter([struct[l].specie.name \n for l in list(cluster)]))\n if compo.reduced_formula != struct.composition.reduced_formula:\n # i.e. the cluster does not have the same composition\n # as the overall crystal; therefore there are other\n # clusters of varying composition.\n hetero = True\n motiif = getDim(scale,supercell)\n\n # If formula of new network matches the original cell\n if not hetero:\n if motiif=='layered':\n cluster_sites = [struct.sites[n] for n in cluster]\n all_structs.append(struct.from_sites(cluster_sites))\n\n if group_structs:\n matched = [x[0] for x in \n StructureMatcher(stol=1E-6,primitive_cell=False,\n scale=False).group_structures(all_structs)]\n else:\n matched=all_structs\n return(matched) \n\n else:\n print('Material is does not have a 3D motiif')\n print('Try increasing radii tolerance if appropriate')\n return([])", "def find_neighbors(self):\n #checked#\n ###your code here###\n for address in self.homes:\n for i in range(-1, 2):\n for j in range(-1,2):\n neighbor_address=(address[0]+i, address[1]+j)\n if neighbor_address in self.homes and neighbor_address!=address:\n self.homes[address].neighbors.append(self.homes[neighbor_address])", "def __determineElement(self, atom):\n\n\t\tc1 = atom.name[0:1]\n\t\tc2 = atom.name[1:2]\n\n\t\t# virtual atoms\n\t\tif c1 == \"V\":\n\t\t\tatom.element = \"V\"\n\t\t\tatom.radius = 0.0\n\t\t\treturn\n\n\t\tfor element in self.periodic.element_name:\n\t\t\tif c2 == element:\n\t\t\t\tatom.element = c2\n\t\t\t\tatom.radius = self.periodic.element_radius[c2]\n\t\t\t\treturn\n\n\t\tfor element in self.periodic.element_name:\n\t\t\tif c1 == element:\n\t\t\t\tatom.element = c1\n\t\t\t\tatom.radius = self.periodic.element_radius[c1]\n\t\t\t\treturn\n\n\t\tif atom.name in _molelement.keys():\n\t\t\tatom.element = _molelement[atom.name]\n\t\t\tatom.radius = self.periodic.element_radius[atom.element]", "def build_bonds(self):\n shape_prime = np.array([self.shape[0]-1,self.shape[1]-1,self.shape[2]-1])\n zeros = np.array([0,0,0])\n for i in range(self.shape[0]):\n for j in range(self.shape[1]):\n for k in range(self.shape[2]):\n for b,bond in enumerate(self.cell.bonds):\n newbond = copy.deepcopy(bond)\n newbond.cell1 += [i,j,k]\n newbond.cell2 += [i,j,k]\n #ToDo make a function to shorten those lines\n if np.prod(newbond.cell1 <= shape_prime) and np.prod(newbond.cell2<=shape_prime) and np.prod(zeros <=newbond.cell1) and np.prod(zeros <= newbond.cell2):\n newbond.coordinate1 = self.sites[newbond.cell1[0],newbond.cell1[1],newbond.cell1[2],newbond.site1].coordinate\n newbond.coordinate2 = self.sites[newbond.cell2[0],newbond.cell2[1],newbond.cell2[2],newbond.site2].coordinate\n self.bonds.append(newbond)", "def get_contact_atoms(self,cutoff=8.5,chain1='A',chain2='B',\n extend_to_residue=False,only_backbone_atoms=False,\n excludeH=False,return_only_backbone_atoms=False,return_contact_pairs=False):\n\n # xyz of the chains\n xyz1 = np.array(self.get('x,y,z',chainID=chain1))\n xyz2 = np.array(self.get('x,y,z',chainID=chain2))\n\n # index of b\n index2 = self.get('rowID',chainID=chain2)\n\n # resName of the chains\n resName1 = np.array(self.get('resName',chainID=chain1))\n #resName2 = np.array(self.get('resName',chainID=chain2))\n\n # atomnames of the chains\n atName1 = np.array(self.get('name',chainID=chain1))\n atName2 = np.array(self.get('name',chainID=chain2))\n\n\n # loop through the first chain\n # TO DO : loop through the smallest chain instead ...\n index_contact_1,index_contact_2 = [],[]\n index_contact_pairs = {}\n\n for i,x0 in enumerate(xyz1):\n\n # compute the contact atoms\n contacts = np.where(np.sqrt(np.sum((xyz2-x0)**2,1)) <= cutoff )[0]\n\n # exclude the H if required\n if excludeH and atName1[i][0] == 'H':\n continue\n\n if len(contacts)>0 and any([not only_backbone_atoms, atName1[i] in self.backbone_type]):\n\n # the contact atoms\n index_contact_1 += [i]\n index_contact_2 += [index2[k] for k in contacts if ( any( [atName2[k] in self.backbone_type, not only_backbone_atoms]) and not (excludeH and atName2[k][0]=='H') ) ]\n\n # the pairs\n pairs = [index2[k] for k in contacts if any( [atName2[k] in self.backbone_type, not only_backbone_atoms] ) and not (excludeH and atName2[k][0]=='H') ]\n if len(pairs) > 0:\n index_contact_pairs[i] = pairs\n\n # get uniques\n index_contact_1 = sorted(set(index_contact_1))\n index_contact_2 = sorted(set(index_contact_2))\n\n # if no atoms were found\n if len(index_contact_1)==0:\n print('Warning : No contact atoms detected in pdb2sql')\n\n # extend the list to entire residue\n if extend_to_residue:\n index_contact_1,index_contact_2 = self._extend_contact_to_residue(index_contact_1,index_contact_2,only_backbone_atoms)\n\n\n # filter only the backbone atoms\n if return_only_backbone_atoms and not only_backbone_atoms:\n\n # get all the names\n # there are better ways to do that !\n atNames = np.array(self.get('name'))\n\n # change the index_contacts\n index_contact_1 = [ ind for ind in index_contact_1 if atNames[ind] in self.backbone_type ]\n index_contact_2 = [ ind for ind in index_contact_2 if atNames[ind] in self.backbone_type ]\n\n # change the contact pairs\n tmp_dict = {}\n for ind1,ind2_list in index_contact_pairs.items():\n\n if atNames[ind1] in self.backbone_type:\n tmp_dict[ind1] = [ind2 for ind2 in ind2_list if atNames[ind2] in self.backbone_type]\n\n index_contact_pairs = tmp_dict\n\n # not sure that's the best way of dealing with that\n if return_contact_pairs:\n return index_contact_pairs\n else:\n return index_contact_1,index_contact_2", "def _compute_hydrogen_bonds(self, entity):\n\n for (aa1, aa2) in combinations(entity, 2):\n\n # do not consider this pair if the number of atoms of the\n # residues is not sufficient\n if not (validate(aa1) and validate(aa2)):\n continue\n\n # stores both potentials between aa1 and aa2\n potentials = []\n\n segid1 = get_pos(aa1)\n segid2 = get_pos(aa2)\n\n # distance\n dist = np.abs(segid1 - segid2)\n\n # take care of the minimal sequence distance criterion\n # between aa1 and aa2\n if dist < self.min_seq_distance:\n continue\n\n # extract atoms from both amino acids\n atoms = [aa1.get_unpacked_list(),\n aa2.get_unpacked_list()]\n\n for i in range(0, len(atoms)):\n c_carboxyl = np.array(atoms[i][2].get_coord())\n o_carboxyl = np.array(atoms[i][3].get_coord())\n\n nitrogen = np.array(atoms[1-i][0].get_coord())\n hydrogen = None\n for atom in atoms[1-i]:\n if atom.get_name().strip() == 'H':\n hydrogen = np.array(atom.get_coord())\n\n if hydrogen is None:\n potentials.append(0)\n continue\n\n # compute relevant distances\n r_ON = np.linalg.norm(o_carboxyl - nitrogen)\n r_CH = np.linalg.norm(c_carboxyl - hydrogen)\n r_OH = np.linalg.norm(o_carboxyl - hydrogen)\n r_CN = np.linalg.norm(c_carboxyl - nitrogen)\n\n # compute potential\n pot = potential(r_ON, r_CH, r_OH, r_CN)\n\n potentials.append(pot if pot < co.HBOND_THRESHOLD else 0)\n\n # we return this as an result if at least one potential\n # is below the threshold , so they are not both 0\n if sum(potentials) != 0:\n yield (aa1, aa2, potentials[0], potentials[1])", "def check_neighbours(self, grid):\n if self.bomba:\n self.bombs_around = -1\n return\n\n total = 0\n for x in range(-1, 2):\n for y in range(-1, 2):\n i = self.i + x\n j = self.j + y\n if i > -1 and i < len(grid) and j > -1 and j < len(grid[1]):\n neighbor = grid[i][j]\n\n if neighbor.bomba:\n total += 1\n \n self.bombs_around = total", "def iter_bonds(self):\n visited = {}\n for atm in self.iter_atoms():\n for bond in atm.iter_bonds():\n if not visited.has_key(bond):\n yield bond\n visited[bond] = True", "def ecfp(mol, radius=2):\n\n atom_ids = invariants(mol)\n\n fp = {}\n for i in atom_ids.values():\n fp[i] = fp.get(i, 0) + 1\n\n neighborhoods = []\n atom_neighborhoods = [ len(mol.bonds) * bitarray('0') for a in mol.atoms]\n dead_atoms = len(mol.atoms) * bitarray('0')\n\n for layer in range(1, radius+1):\n round_ids = {}\n round_atom_neighborhoods = copy.deepcopy(atom_neighborhoods)\n neighborhoods_this_round = []\n\n for a in mol.atoms:\n if dead_atoms[a.index]: continue\n\n nbsr = []\n for b in a.bonds:\n round_atom_neighborhoods[a.index][b.index] = True\n oidx = b.xatom(a).index\n round_atom_neighborhoods[a.index] |= atom_neighborhoods[oidx]\n nbsr.append((b.bondtype, atom_ids[oidx]))\n\n nbsr = sorted(nbsr)\n nbsr = [item for sublist in nbsr for item in sublist]\n nbsr.insert(0, atom_ids[a.index])\n nbsr.insert(0, layer)\n\n round_ids[a.index] = gen_hash(nbsr)\n neighborhoods_this_round.append(\n (round_atom_neighborhoods[a.index], round_ids[a.index], a.index)\n )\n\n for lst in neighborhoods_this_round:\n if lst[0] not in neighborhoods:\n fp[lst[1]] = fp.get(lst[1], 0) + 1\n neighborhoods.append(lst[0])\n else:\n dead_atoms[lst[2]] = True\n\n atom_ids = round_ids\n atom_neighborhoods = copy.deepcopy(round_atom_neighborhoods)\n return fp", "def get_atom_connectivity(self):\n m, connectivity = self.owner, []\n for index, i in enumerate(self.rix):\n for j in self.rix[index + 1:]:\n a1 = m.rings[i].aix\n a2 = m.rings[j].aix\n if set(a1).intersection(a2):\n connectivity.append((i, j))\n return tuple(connectivity)", "def check_all_neighbors_present_local(duthosts, per_host, asic, neighbors, all_cfg_facts,\n nbrhosts, nbr_macs, check_nbr_state=True):\n cfg_facts = all_cfg_facts[per_host.hostname][asic.asic_index]['ansible_facts']\n neighs = cfg_facts['BGP_NEIGHBOR']\n\n fail_cnt = 0\n\n # Grab dumps of the asicdb, appdb, voqdb, and arp table\n asicdb = AsicDbCli(asic)\n asic_dump = asicdb.dump_neighbor_table()\n\n appdb = AppDbCli(asic)\n app_dump = appdb.dump_neighbor_table()\n\n encaps = {}\n\n if per_host.is_multi_asic:\n arptable = per_host.switch_arptable(namespace=asic.namespace)['ansible_facts']\n else:\n arptable = per_host.switch_arptable()['ansible_facts']\n\n if len(duthosts.supervisor_nodes) == 1:\n voqdb = VoqDbCli(duthosts.supervisor_nodes[0])\n voq_dump = voqdb.dump_neighbor_table()\n elif per_host.is_multi_asic:\n # look on linecard for pizzabox multiasic\n voqdb = VoqDbCli(per_host)\n voq_dump = voqdb.dump_neighbor_table()\n else:\n voq_dump = {}\n\n for neighbor in neighbors:\n nbr_vm = get_vm_with_ip(neighbor, nbrhosts)\n neigh_mac = nbr_macs[nbr_vm['vm']][nbr_vm['port']]\n local_ip = neighs[neighbor]['local_addr']\n local_port = get_port_by_ip(cfg_facts, local_ip)\n\n sysport_info = {'slot': cfg_facts['DEVICE_METADATA']['localhost']['hostname'],\n 'asic': cfg_facts['DEVICE_METADATA']['localhost']['asic_name']}\n\n # Validate the asic db entries\n for entry in asic_dump:\n matchstr = '\"%s\",' % neighbor\n if matchstr in entry:\n\n if neigh_mac.lower() != asic_dump[entry]['value']['SAI_NEIGHBOR_ENTRY_ATTR_DST_MAC_ADDRESS'].lower():\n logger.error(\"Asic neighbor macs for %s do not match: %s != %s\", neighbor, neigh_mac.lower(),\n asic_dump[entry]['value']['SAI_NEIGHBOR_ENTRY_ATTR_DST_MAC_ADDRESS'].lower())\n fail_cnt += 1\n else:\n logger.debug(\"Asic neighbor macs for %s match: %s == %s\", neighbor, neigh_mac.lower(),\n asic_dump[entry]['value']['SAI_NEIGHBOR_ENTRY_ATTR_DST_MAC_ADDRESS'].lower())\n encaps[neighbor] = asic_dump[entry]['value']['SAI_NEIGHBOR_ENTRY_ATTR_ENCAP_INDEX']\n break\n else:\n logger.error(\"Did not find neighbor in asictable for IP: %s\" % neighbor)\n fail_cnt += 1\n\n # Validate the APP db entries\n for entry in app_dump:\n matchstr = ':%s' % neighbor\n if entry.endswith(matchstr):\n if neigh_mac.lower() != app_dump[entry]['value']['neigh'].lower():\n logger.error(\"App neighbor macs for %s do not match: %s != %s\", neighbor, neigh_mac.lower(),\n app_dump[entry]['value']['neigh'].lower())\n fail_cnt += 1\n else:\n logger.debug(\"App neighbor macs for %s match: %s == %s\", neighbor, neigh_mac.lower(),\n app_dump[entry]['value']['neigh'].lower())\n\n pytest_assert(\":{}:\".format(local_port) in entry, \"Port for %s does not match\" % entry)\n break\n else:\n logger.error(\"Did not find neighbor in app for IP: %s\" % neighbor)\n fail_cnt += 1\n\n # Validate the arp table entries\n if check_nbr_state:\n check_host_arp_table(per_host, asic, neighbor, neigh_mac, local_port, 'REACHABLE', arptable=arptable)\n else:\n check_host_arp_table(per_host, asic, neighbor, neigh_mac, local_port, None, arptable=arptable)\n\n # supervisor checks\n for entry in voq_dump:\n if entry.endswith('|%s' % neighbor) or entry.endswith(':%s' % neighbor):\n\n if \"portchannel\" in local_port.lower():\n slotname = cfg_facts['DEVICE_METADATA']['localhost']['hostname']\n asicname = cfg_facts['DEVICE_METADATA']['localhost']['asic_name']\n else:\n slotname = sysport_info['slot']\n asicname = sysport_info['asic']\n\n logger.debug(\"Neigh key: %s, slotnum: %s\", entry, slotname)\n pytest_assert(\"|%s|\" % slotname in entry,\n \"Slot for %s does not match %s\" % (entry, slotname))\n pytest_assert(\"|%s:\" % local_port in entry or \"|%s|\" % local_port in entry,\n \"Port for %s does not match %s\" % (entry, local_port))\n pytest_assert(\"|%s|\" % asicname in entry,\n \"Asic for %s does not match %s\" % (entry, asicname))\n\n pytest_assert(voq_dump[entry]['value']['neigh'].lower() == neigh_mac.lower(),\n \"Voq: neighbor: %s mac does not match: %s\" %\n (neighbor, voq_dump[entry]['value']['neigh'].lower()))\n pytest_assert(voq_dump[entry]['value']['encap_index'].lower() == encaps[neighbor],\n \"Voq: encap: %s mac does not match: %s\" %\n (neighbor, voq_dump[entry]['value']['encap_index'].lower()))\n break\n else:\n logger.error(\"Neighbor: %s on slot: %s, asic: %s not present in voq\",\n neighbor, sysport_info['slot'], sysport_info['asic'])\n fail_cnt += 1\n\n logger.info(\"Local %s/%s and chassisdb neighbor validation of %s is successful (mac: %s, idx: %s)\",\n per_host.hostname, asic.asic_index, neighbor, neigh_mac, encaps[neighbor])\n\n return {'encaps': encaps, 'fail_cnt': fail_cnt}", "def check_connected(chosen_atom, identified_bonds):\n check = False\n for bond in identified_bonds:\n if ((\"E1AE1A\" in str(get_bond_id(chosen_atom, bond[0])[0])) or (\"C1AC1A\" in str(get_bond_id(chosen_atom, bond[0])[0])) or (\"H1AH1A\" in str(get_bond_id(chosen_atom, bond[0])[0])) or (\"P1AP1A\" in str(get_bond_id(chosen_atom, bond[0])[0]))):\n check = True\n return check", "def iter_bonds(self):\n visited = {}\n for atm in self.iter_atoms():\n for bond in atm.iter_bonds():\n if visited.has_key(bond):\n continue\n yield bond\n visited[bond] = True", "def iter_bonds(self):\n visited = {}\n for atm in self.iter_atoms():\n for bond in atm.iter_bonds():\n if visited.has_key(bond):\n continue\n yield bond\n visited[bond] = True", "def iter_bonds(self):\n visited = {}\n for atm in self.iter_atoms():\n for bond in atm.iter_bonds():\n if visited.has_key(bond):\n continue\n yield bond\n visited[bond] = True", "def get_core_bonds(core_xyz, inp):\n core_bonds = []\n\n if inp.core_en:\n dists = cdist(core_xyz, core_xyz)\n if inp.core_shape != \"shell\":\n logger.info(\"\\tBuilding elastic network based on first neighbors...\")\n close_dists = dists <= (2*inp.bead_radius+0.01)\n for i in range(len(dists)):\n ndx1 = i*1\n close_ndxs = np.where(close_dists[i])[0]\n if len(close_ndxs) == 1:\n dists_sorted = np.argsort(dists[i])\n close_ndxs = dists_sorted[[1,2,3,4,5,6]]\n for ndx2 in close_ndxs:\n if ndx2 != i and [ndx1, ndx2] not in core_bonds and [ndx2, ndx1] not in core_bonds:\n core_bonds.append([ndx1, ndx2])\n\n else:\n logger.info(\"\\tBuilding elastic network based on six nearest neighbours and one farthest neighbour...\")\n neighboring_bonds = []\n antipodal_bonds = []\n dists_sorted = np.argsort(dists, axis=1)\n for i in range(len(dists)):\n ndx1 = i*1\n close_ndxs = dists_sorted[i,[1,2,3,4,5,6]]\n for ndx2 in close_ndxs:\n if ndx2 != i and [ndx1, ndx2] not in core_bonds and [ndx2, ndx1] not in core_bonds:\n neighboring_bonds.append([ndx1, ndx2])\n antipodal_ndx = dists_sorted[i,-1]\n if antipodal_ndx != i and [ndx1, antipodal_ndx] not in core_bonds and [antipodal_ndx, ndx1] not in core_bonds:\n antipodal_bonds.append([ndx1, antipodal_ndx, \"antipodal\"])\n core_bonds = neighboring_bonds + antipodal_bonds\n\n return core_bonds", "def check_all_neighbors_present(duthosts, nbrhosts, all_cfg_facts, nbr_macs, check_nbr_state=True):\n for per_host in duthosts.frontend_nodes:\n for asic in per_host.asics:\n logger.info(\"Checking local neighbors on host: %s, asic: %s\", per_host.hostname, asic.asic_index)\n cfg_facts = all_cfg_facts[per_host.hostname][asic.asic_index]['ansible_facts']\n if 'BGP_NEIGHBOR' in cfg_facts:\n neighs = cfg_facts['BGP_NEIGHBOR']\n else:\n logger.info(\"No local neighbors for host: %s/%s, skipping\", per_host.hostname, asic.asic_index)\n continue\n\n dump_and_verify_neighbors_on_asic(duthosts, per_host, asic, list(neighs.keys()),\n nbrhosts, all_cfg_facts, nbr_macs, check_nbr_state=check_nbr_state)", "def test_is_in_ring(self):\n molecule = Molecule.from_smiles(\"c1ccccc1\")\n\n for atom in molecule.atoms:\n if atom.atomic_number == 6:\n assert atom.is_in_ring()\n\n for bond in molecule.bonds:\n if 1 in (bond.atom1.atomic_number, bond.atom2.atomic_number):\n continue\n assert bond.is_in_ring()", "def get_interactions(list_atoms1, list_atoms2, dist):\n beta_carbons1 = list(filter(lambda x: x.get_id() == \"CB\", list_atoms1))\n beta_carbons2 = list(filter(lambda x: x.get_id() == \"CB\", list_atoms2))\n ns = NeighborSearch(beta_carbons1)\n interactions = []\n\n for atom in beta_carbons2:\n interact = ns.search(atom.get_coord(), dist)\n interactions.extend(\n [tuple(sorted([str(atom.get_parent().resname), str(x.get_parent().resname)])) for x in interact])\n return interactions", "def test_ethane(self):\n bond_topology = text_format.Parse(\"\"\"\n atoms: ATOM_C\n atoms: ATOM_C\n bonds: {\n atom_a: 0,\n atom_b: 1,\n bond_type: BOND_SINGLE\n }\n\"\"\", dataset_pb2.BondTopology())\n cc = text_format.Parse(\"\"\"\n atoms: ATOM_C\n atoms: ATOM_C\n\"\"\", dataset_pb2.BondTopology())\n scores = np.array([0.1, 1.1, 2.1, 3.1], dtype=np.float32)\n bonds_to_scores = {(0, 1): scores}\n mol = smu_molecule.SmuMolecule(cc, bonds_to_scores)\n state = mol.generate_search_state()\n self.assertEqual(len(state), 1)\n self.assertEqual(state, [[0, 1, 2, 3]])\n\n for i, s in enumerate(itertools.product(*state)):\n res = mol.place_bonds(s)\n self.assertIsNotNone(res)\n self.assertAlmostEqual(res.score, scores[i])", "def _get_neighbours(kmer):\n assert (is_dna(kmer))\n bases = 'ACTG'\n result = set()\n for i in range(len(kmer)):\n for base in bases:\n result.add(kmer[:i] + base + kmer[(i + 1):])\n return result", "def check_one_neighbor_present(duthosts, per_host, asic, neighbor, nbrhosts, all_cfg_facts):\n cfg_facts = all_cfg_facts[per_host.hostname][asic.asic_index]['ansible_facts']\n\n neighs = cfg_facts['BGP_NEIGHBOR']\n inband_info = get_inband_info(cfg_facts)\n local_ip = neighs[neighbor]['local_addr']\n\n if local_ip == inband_info['ipv4_addr'] or local_ip == inband_info['ipv6_addr']:\n # skip inband neighbors\n return\n\n # Check neighbor on local linecard\n local_port = get_port_by_ip(cfg_facts, local_ip)\n if local_port is None:\n logger.error(\"Did not find port for this neighbor %s, must skip\", local_ip)\n return\n\n neigh_mac = get_neighbor_info(neighbor, nbrhosts)['mac']\n if neigh_mac is None:\n logger.error(\"Could not find neighbor MAC, must skip. IP: %s, port: %s\", local_ip, local_port)\n\n local_dict = check_local_neighbor(per_host, asic, neighbor, neigh_mac, local_port)\n logger.info(\"Local_dict: %s\", local_dict)\n\n # Check the same neighbor entry on the supervisor nodes\n slotname = cfg_facts['DEVICE_METADATA']['localhost']['hostname']\n asicname = cfg_facts['DEVICE_METADATA']['localhost']['asic_name']\n\n if per_host.is_multi_asic and len(duthosts.supervisor_nodes) == 0:\n check_voq_neighbor_on_sup(per_host, slotname, asicname, local_port,\n neighbor, local_dict['encap_index'], neigh_mac)\n else:\n for sup in duthosts.supervisor_nodes:\n check_voq_neighbor_on_sup(sup, slotname, asicname, local_port,\n neighbor, local_dict['encap_index'], neigh_mac)\n\n # Check the neighbor entry on each remote linecard\n for rem_host in duthosts.frontend_nodes:\n\n for rem_asic in rem_host.asics:\n if rem_host == per_host and rem_asic == asic:\n # skip remote check on local host\n continue\n rem_cfg_facts = all_cfg_facts[rem_host.hostname][rem_asic.asic_index]['ansible_facts']\n remote_inband_info = get_inband_info(rem_cfg_facts)\n if remote_inband_info == {}:\n logger.info(\"No inband configuration on this asic: %s/%s, will be skipped.\", rem_host.hostname,\n rem_asic.asic_index)\n continue\n remote_inband_mac = get_sonic_mac(rem_host, rem_asic.asic_index, remote_inband_info['port'])\n check_voq_remote_neighbor(rem_host, rem_asic, neighbor, neigh_mac, remote_inband_info['port'],\n local_dict['encap_index'], remote_inband_mac)", "def get_neighbors(self, atom):\n return self._graph.get_connected_vertices(atom)", "def is_three_memebered_ring_torsion(torsion):\n # A set of atom indices for the atoms in the torsion.\n torsion_atom_indices = set(a.molecule_atom_index for a in torsion)\n\n # Collect all the bonds involving exclusively atoms in the torsion.\n bonds_by_atom_idx = {i: set() for i in torsion_atom_indices}\n for atom in torsion:\n for bond in atom.bonds:\n # Consider the bond only if both atoms are in the torsion.\n if (\n bond.atom1_index in torsion_atom_indices\n and bond.atom2_index in torsion_atom_indices\n ):\n bonds_by_atom_idx[bond.atom1_index].add(bond.atom2_index)\n bonds_by_atom_idx[bond.atom2_index].add(bond.atom1_index)\n\n # Find the central atom, which is connected to all other atoms.\n atom_indices = [i for i in torsion_atom_indices if len(bonds_by_atom_idx[i]) == 3]\n if len(atom_indices) != 1:\n return False\n central_atom_idx = atom_indices[0]\n\n # Find the atom outside the ring.\n atom_indices = [i for i in torsion_atom_indices if len(bonds_by_atom_idx[i]) == 1]\n if (\n len(atom_indices) != 1\n or central_atom_idx not in bonds_by_atom_idx[atom_indices[0]]\n ):\n return False\n outside_atom_idx = atom_indices[0]\n\n # Check that the remaining two atoms are non-central atoms in the membered ring.\n atom1, atom2 = [\n i for i in torsion_atom_indices if i not in [central_atom_idx, outside_atom_idx]\n ]\n # The two atoms are bonded to each other.\n if atom2 not in bonds_by_atom_idx[atom1] or atom1 not in bonds_by_atom_idx[atom2]:\n return False\n # Check that they are both bonded to the central atom and none other.\n for atom_idx in [atom1, atom2]:\n if (\n central_atom_idx not in bonds_by_atom_idx[atom_idx]\n or len(bonds_by_atom_idx[atom_idx]) != 2\n ):\n return False\n\n # This is a torsion including a three-membered ring.\n return True", "def cleaveSurfBond(entry,max_bonds=1,supercell=2,group_structs=True,prec=1E-4):\n \n \n struct = copy.deepcopy(entry[0])\n results = getStructureType(entry,supercell=supercell,returnSS=True)\n \n # Proceed only if the structure is classified as periodic\n # in all directions\n if results[0]=='conventional':\n struct.make_supercell(supercell)\n binary_matrix= getDistMat(struct,entry[1]-1)\n og_num_bonds = sum(sum(np.array(binary_matrix)))/2\n \n # Get dictionary of directional bonds in the system, \n # and the associated atomic species\n bond_dir = getBondVectors(struct,entry[1]-1,prec)\n\n \n # Create the list of bonds to be broken\n all_structs=[]\n combos=[]\n for s1 in bond_dir:\n for s2 in bond_dir[s1]:\n for cleave in bond_dir[s1][s2]: \n combos.append(cleave[1])\n \n # Create pairings of bonds to be broken, up to \n # max_bonds number of bonds\n \n final_combos=[]\n for i in range(1,max_bonds+1):\n for mix in list(itertools.combinations(combos,max_bonds)):\n final_combos.append(mix)\n seed_index=0\n old_cluster_size=len(buildNetwork(binary_matrix,seed_index))/supercell**3\n for combo in final_combos:\n modified_matrix = np.array(binary_matrix)\n for sett in combo:\n for pair in sett:\n i,j = pair\n modified_matrix[i][j]=0\n modified_matrix[j][i]=0\n new_num_bonds=sum(sum(modified_matrix))/2\n \n # Number of bonds broken in the search. Not necessarily\n # the number of bonds broken to cleave the surface\n \n broken=int(og_num_bonds-new_num_bonds)\n \n cluster = buildNetwork(modified_matrix,seed_index)\n hetero=False\n if cluster!=set():\n scale = len(cluster)/old_cluster_size\n compo = Composition.from_dict(Counter([struct[l].specie.name \n for l in list(cluster)]))\n if compo.reduced_formula != struct.composition.reduced_formula:\n # i.e. the cluster does not have the same composition\n # as the overall crystal; therefore there are other\n # clusters of varying composition.\n hetero = True\n motiif = getDim(scale,supercell)\n\n if not hetero:\n if motiif=='layered':\n cluster_sites = [struct.sites[n] for n in cluster]\n all_structs.append(struct.from_sites(cluster_sites))\n\n if group_structs:\n matched = [x[0] for x in \n StructureMatcher(stol=1E-6,primitive_cell=False,\n scale=False).group_structures(all_structs)]\n else:\n matched=all_structs\n return(matched) \n\n\n else:\n print('Material is does not have a 3D motiif')\n print('Try increasing radii tolerance if appropriate')\n return([])", "def create_bonds(self):\n mdesc = Library.library_get_monomer_desc(self.res_name)\n if mdesc is None:\n return\n\n def find_atom(name):\n try:\n return self[name]\n except KeyError:\n return self[mdesc.alt_atom_dict[name]]\n for bond in mdesc.bond_list:\n try:\n atm1 = find_atom(bond[\"atom1\"])\n atm2 = find_atom(bond[\"atom2\"])\n except KeyError:\n continue\n else:\n atm1.create_bonds(atom = atm2, standard_res_bond = True)", "def finetune_acceptor_boundaries(abfgp_genemodel,introndata,\n array_algpresence,array_algsimilarity,verbose=True):\n\n # Global Variable Imports\n from settings.genestructure import MIN_INTRON_NT_LENGTH\n FINETUNE_ACCEPTOR_NT_OFFSET = 18\n\n # list with adjusted boundaries\n refined_boundaries = []\n\n # recreate lists of ABGFP exons & introns\n abfgp_exons = [ abfgp_genemodel[pos] for pos in range(0,len(abfgp_genemodel),2) ]\n abfgp_introns = [ abfgp_genemodel[pos] for pos in range(1,len(abfgp_genemodel),2) ]\n\n for intron_pos in range(0,len(abfgp_introns)):\n intron = abfgp_introns[intron_pos]\n if not intron: continue\n if intron.__class__.__name__ == 'SequenceErrorConnectingOrfs': continue\n\n # assign branchpoint in current intron\n intron.assign_bp_and_ppts()\n\n has_been_printed = False\n\n # list of alternatives & associated scores\n alternatives = []\n finetune_range = range(intron.acceptor.pos-FINETUNE_ACCEPTOR_NT_OFFSET,\n intron.acceptor.pos+FINETUNE_ACCEPTOR_NT_OFFSET+1,3)\n\n for acceptor in intron.orfAcceptor._acceptor_sites:\n if acceptor.pos != intron.acceptor.pos and\\\n acceptor.pos in finetune_range:\n # get the next exon (3'of this intron)\n next_exon = abfgp_exons[intron_pos+1]\n if not has_been_printed:\n has_been_printed = True\n ############################################################\n if verbose: print \"FINETUNING ACCEPTOR::\", intron\n ############################################################\n\n # get data on this alternative acceptor position\n test_intron = IntronConnectingOrfs(intron.donor,acceptor,None,intron.orfDonor,intron.orfAcceptor)\n test_intron.assign_bp_and_ppts()\n\n # test if refinement will result in a long enough intron\n if test_intron.length < MIN_INTRON_NT_LENGTH: continue\n\n scorelist = []\n # score 1: is acceptor.pssm_score `higher`?\n scorelist.append( _finetune_splicesite_comparison(intron.acceptor,test_intron.acceptor) )\n # score 2: branchpoint comparison?\n scorelist.append( _branchpoint_comparison(intron,test_intron) )\n # score 3: ppt comparison?\n scorelist.append( _polypyrimidinetract_comparison(intron,test_intron) )\n # score 4: is algsimilarity ratio increased (==better)?\n scorelist.append( _algsimilarity_comparison(intron,test_intron,None,next_exon,array_algsimilarity) )\n\n # evaluate scorelist; improved intron boundary or not?\n # use acceptor, branchpoint & ppt, do *NOT* use algsim score\n if scorelist[0:3].count(False) == 0 and scorelist[0:3].count(True) >= 1:\n alternatives.append( ( acceptor, scorelist ) )\n is_accepted = True\n else:\n is_accepted = False\n\n ################################################################\n if verbose:\n print \"alternative:\", acceptor,\n print intron.acceptor.pos - acceptor.pos, scorelist,\n print is_accepted, \"BP:\",\n print intron.get_branchpoint_nt_distance(),\n print \"alt:\",\n print test_intron.get_branchpoint_nt_distance()\n ################################################################\n\n # now evaluate the alternatived and take the best one\n if not alternatives:\n continue\n elif len(alternatives) == 1:\n refined_boundaries.append( ( intron.acceptor, alternatives[0][0] ) )\n else:\n # multiple! again, take the *best* one\n pass\n\n # return list of refined_boundaries\n return refined_boundaries", "def test_neighbor():\n UP = (0, -1)\n LEFT = (-1, 0)\n DOWN_RIGHT = (1, 1)\n HERE = (0, 0)\n\n c1 = Cell(2, 6, 100)\n c1_up = c1.neighbor(UP)\n c1_left = c1.neighbor(LEFT)\n c1_down_right = c1.neighbor(DOWN_RIGHT)\n c1_here = c1.neighbor(HERE)\n\n assert c1_up == (2, 5)\n assert c1_up[0] == 2\n assert c1_left == (1, 6)\n assert c1_left[1] == 6\n assert c1_down_right == (3, 7)\n assert c1_here == (2, 6)\n\n c2 = Cell(4, 2, 200)\n c2_up = c2.neighbor(UP)\n c2_left = c2.neighbor(LEFT)\n c2_down_right = c2.neighbor(DOWN_RIGHT)\n c2_here = c2.neighbor(HERE)\n\n assert c2_up == (4, 1)\n assert c2_left == (3, 2)\n assert c2_down_right == (5, 3)\n assert c2_here == (4, 2)", "def GetBonds(Bonds):\n b = sorted([(min(x), max(x)) for x in Bonds])\n Bonds13, Bonds14 = [], []\n for (a1,b1) in b:\n #check for bonds with a1 at the center of a 1-3 interaction,\n #letting b1 be the higher number of the two flanking\n clist = [b2 for (a2,b2) in b if a2 == a1 and b2 < b1] + \\\n [a2 for (a2,b2) in b if b2 == a1 and a2 < b1]\n Bonds13.extend([(min(c,b1), max(c,b1)) for c in clist])\n #check for bonds with b1 at the center of a 1-3 interaction,\n #letting a1 be the higher number of the two flanking\n clist = [b2 for (a2,b2) in b if a2 == b1 and b2 < a1] + \\\n [a2 for (a2,b2) in b if b2 == b1 and a2 < a1]\n Bonds13.extend([(min(c,a1), max(c,a1)) for c in clist])\n #find atoms connected to a1\n clist = [b2 for (a2,b2) in b if a1==a2 and not b1==b2] +\\\n [a2 for (a2,b2) in b if a1==b2 and not b1==a2]\n #find atoms connected to b1\n dlist = [a2 for (a2,b2) in b if b1==b2 and not a1==a2] +\\\n [b2 for (a2,b2) in b if b1==a2 and not a1==b2]\n Bonds14.extend([(min(c,d), max(c,d)) for c in clist for d in dlist])\n Bonds1213 = b + Bonds13\n #sort\n Bonds1213.sort()\n Bonds14.sort()\n #get unique values in case of loops\n Bonds1213 = [x for (i,x) in enumerate(Bonds1213) if i == 0 or x != Bonds1213[i-1]]\n Bonds14 = [x for (i,x) in enumerate(Bonds14) if i == 0 or x != Bonds14[i-1]]\n #convert to arrays \n Bonds1213 = array(Bonds1213, int)\n Bonds14 = array(Bonds14, int)\n return Bonds1213, Bonds14", "def get_molecule_dict(chemfile):\n molecule_dict={}\n with open(chemfile,'r') as f:\n for line in f:\n line=line.strip().split('\\t')\n ikey=line[0]\n smi=line[1]\n mol = Chem.MolFromSmiles(smi)\n if not mol:\n raise ValueError(\"Could not generate Mol from SMILES string:\", smi)\n #Chem.SanitizeMol(mol)\n\n atoms={} #atom_idx -> atom features\n bonds={} #bond_idx -> bond features\n atoms2bond={} #(atom_idx1,atom_idx2) -> bond_idx\n \n nodes_by_degree = {d: [] for d in degrees}\n for atom in mol.GetAtoms():\n atom_feature = atom_features(atom)\n atom_id = smi+str(atom.GetIdx())\n atoms[atom.GetIdx()]=atom_feature \n atom_neighbors={aid: [] for aid in atoms.keys()} #atom_idx -> neighbor atom idxs\n bond_neighbors={aid: [] for aid in atoms.keys()} #atom_idx -> neighbor bond idxs\n\n for bond in mol.GetBonds():\n src_atom_idx = bond.GetBeginAtom().GetIdx()\n tgt_atom_idx = bond.GetEndAtom().GetIdx()\n bond_idx = bond.GetIdx()\n bond_neighbors[src_atom_idx].append(bond_idx)\n bond_neighbors[tgt_atom_idx].append(bond_idx)\n bond_feature = bond_features(bond)\n bonds[bond.GetIdx()] = bond_feature\n atom_neighbors[src_atom_idx].append(tgt_atom_idx)\n atom_neighbors[tgt_atom_idx].append(src_atom_idx)\n atoms2bond[(src_atom_idx,tgt_atom_idx)]=bond_idx\n atoms2bond[(tgt_atom_idx,src_atom_idx)]=bond_idx\n \n atoms_by_degree={d: [] for d in degrees}\n bonds_by_degree={d: [] for d in degrees}\n for aid in atom_neighbors:\n neighbor_atoms = atom_neighbors[aid]\n d = len(neighbor_atoms) #degree of the atom\n atoms_by_degree[d].append(aid) #current atom is degree=d\n neighbor_bonds=[]\n for neighbor in neighbor_atoms:\n bond_idx=atoms2bond[(aid,neighbor)]\n neighbor_bonds.append(bond_idx)\n bonds_by_degree[d].append(neighbor_bonds)\n\n neighbor_by_degree = []\n for degree in degrees:\n neighbor_by_degree.append({\n 'atom': atoms_by_degree[degree],\n 'bond': bonds_by_degree[degree]\n })\n \n molecule_dict[ikey]={'smiles':str(smi),\n 'neighbor_by_degree':neighbor_by_degree,\n 'atoms':atoms,'bonds':bonds,\n 'atom_neighbor':atom_neighbors,\n 'bond_neighbor':bond_neighbors}\n return molecule_dict", "def _get_relevant_bond(self, atom1, atom2):\n bonds_1 = set(atom1.bonds)\n bonds_2 = set(atom2.bonds)\n relevant_bond_set = bonds_1.intersection(bonds_2)\n relevant_bond = relevant_bond_set.pop()\n if relevant_bond.type is None:\n return None\n relevant_bond_with_units = self._add_bond_units(relevant_bond)\n\n check_dimensionality(relevant_bond_with_units.type.req, unit.nanometers)\n check_dimensionality(relevant_bond_with_units.type.k, unit.kilojoules_per_mole/unit.nanometers**2)\n return relevant_bond_with_units", "def get_bond(self, atom):\n assert isinstance(atom, Atom)\n assert atom != self\n\n for bond in self.bond_list:\n if atom == bond.atom1 or atom == bond.atom2:\n return bond\n return None", "def add_bonds_from_covalent_distance(self):\n for model in self.iter_models():\n xyzdict = GeometryDict.XYZDict(2.0)\n\n for atm in model.iter_all_atoms():\n if atm.position is not None:\n xyzdict.add(atm.position, atm)\n\n for (p1,atm1),(p2,atm2),dist in xyzdict.iter_contact_distance(2.5):\n\n if (atm1.alt_loc == \"\" or atm2.alt_loc == \"\") or (atm1.alt_loc == atm2.alt_loc):\n\n ## calculate the expected bond distance by adding the\n ## covalent radii + 0.54A\n edesc1 = Library.library_get_element_desc(atm1.element)\n edesc2 = Library.library_get_element_desc(atm2.element)\n\n ## this will usually occur if an atom name does not match\n ## the one found in the associated monomer library\n if edesc1 is None or edesc2 is None:\n continue\n\n bond_dist = edesc1.covalent_radius + edesc2.covalent_radius + 0.54\n\n ## this will usually occur if the bond distance between\n ## between two atoms does not match the description in\n ## in the monomer library\n if dist > bond_dist:\n continue\n\n if atm1.get_bond(atm2) is None:\n atm1.create_bond(atom = atm2, standard_res_bond = False)", "def extract_activities(ncfile):\n\n # Get current dimensions.\n (niterations, nstates) = ncfile.variables['energies'].shape\n\n # Extract energies.\n print \"Reading energies...\"\n energies = ncfile.variables['energies']\n u_kln_replica = zeros([nstates, nstates, niterations], float64)\n for n in range(niterations):\n u_kln_replica[:,:,n] = energies[n,:,:]\n print \"Done.\"\n\n # Deconvolute replicas\n print \"Deconvoluting replicas...\"\n u_kln = zeros([nstates, nstates, niterations], float64)\n for iteration in range(niterations):\n state_indices = ncfile.variables['states'][iteration,:]\n u_kln[state_indices,:,iteration] = energies[iteration,:,:]\n print \"Done.\"\n\n # Show all self-energies\n print 'all self-energies for all replicas'\n for iteration in range(niterations):\n for replica in range(nstates):\n state = int(ncfile.variables['states'][iteration,replica])\n print '%12.1f' % energies[iteration, replica, state],\n print ''\n\n # If no energies are 'nan', we're clean.\n if not any(isnan(energies[:,:,:])):\n return\n\n # There are some energies that are 'nan', so check if the first iteration has nans in their *own* energies:\n u_k = diag(energies[0,:,:])\n if any(isnan(u_k)):\n print \"First iteration has exploded replicas. Check to make sure structures are minimized before dynamics\"\n print \"Energies for all replicas after equilibration:\"\n print u_k\n sys.exit(1)\n\n # There are some energies that are 'nan' past the first iteration. Find the first instances for each replica and write PDB files.\n first_nan_k = zeros([nstates], int32)\n for iteration in range(niterations):\n for k in range(nstates):\n if isnan(energies[iteration,k,k]) and first_nan_k[k]==0:\n first_nan_k[k] = iteration\n if not all(first_nan_k == 0):\n print \"Some replicas exploded during the simulation.\"\n print \"Iterations where explosions were detected for each replica:\"\n print first_nan_k\n print \"Writing PDB files immediately before explosions were detected...\"\n for replica in range(nstates): \n if (first_nan_k[replica] > 0):\n state = ncfile.variables['states'][iteration,replica]\n iteration = first_nan_k[replica] - 1\n filename = 'replica-%d-before-explosion.pdb' % replica\n title = 'replica %d state %d iteration %d' % (replica, state, iteration)\n write_pdb(atoms, filename, iteration, replica, title, ncfile)\n filename = 'replica-%d-before-explosion.crd' % replica \n write_crd(filename, iteration, replica, title, ncfile)\n sys.exit(1)\n\n # There are some energies that are 'nan', but these are energies at foreign lambdas. We'll just have to be careful with MBAR.\n # Raise a warning.\n print \"WARNING: Some energies at foreign lambdas are 'nan'. This is recoverable.\"\n \n return", "def check_bond(atom1, atom2):\n check = False\n for bond in bond_list:\n if (((bond.identity == get_bond_id(atom1, atom2)[0]) or (bond.identity == get_bond_id(atom1, atom2)[1])) and 0.975 * bond.length <= calculate_3D_distance_2_atoms(atom1, atom2) <= 1.025 * bond.length):\n check = True\n break\n return check", "def getChemBonds(self):\n dataDict = self.__dict__\n chemAtomVars = self.chemAtoms\n result = frozenset(xx for xx in self.chemComp.chemBonds if xx.chemAtoms.issubset(chemAtomVars))\n return result", "def test_chemical_environment_matches_RDKit(self):\n # Create chiral molecule\n from simtk.openmm.app import element\n\n toolkit_wrapper = RDKitToolkitWrapper()\n molecule = Molecule()\n atom_C = molecule.add_atom(\n element.carbon.atomic_number, 0, False, stereochemistry=\"R\", name=\"C\"\n )\n atom_H = molecule.add_atom(element.hydrogen.atomic_number, 0, False, name=\"H\")\n atom_Cl = molecule.add_atom(element.chlorine.atomic_number, 0, False, name=\"Cl\")\n atom_Br = molecule.add_atom(element.bromine.atomic_number, 0, False, name=\"Br\")\n atom_F = molecule.add_atom(element.fluorine.atomic_number, 0, False, name=\"F\")\n molecule.add_bond(atom_C, atom_H, 1, False)\n molecule.add_bond(atom_C, atom_Cl, 1, False)\n molecule.add_bond(atom_C, atom_Br, 1, False)\n molecule.add_bond(atom_C, atom_F, 1, False)\n # Test known cases\n matches = molecule.chemical_environment_matches(\n \"[#6:1]\", toolkit_registry=toolkit_wrapper\n )\n assert (\n len(matches) == 1\n ) # there should be a unique match, so one atom tuple is returned\n assert len(matches[0]) == 1 # it should have one tagged atom\n assert set(matches[0]) == set([atom_C])\n matches = molecule.chemical_environment_matches(\n \"[#6:1]~[#1:2]\", toolkit_registry=toolkit_wrapper\n )\n assert (\n len(matches) == 1\n ) # there should be a unique match, so one atom tuple is returned\n assert len(matches[0]) == 2 # it should have two tagged atoms\n assert set(matches[0]) == set([atom_C, atom_H])\n matches = molecule.chemical_environment_matches(\n \"[Cl:1]-[C:2]-[H:3]\", toolkit_registry=toolkit_wrapper\n )\n assert (\n len(matches) == 1\n ) # there should be a unique match, so one atom tuple is returned\n assert len(matches[0]) == 3 # it should have three tagged atoms\n assert set(matches[0]) == set([atom_Cl, atom_C, atom_H])\n matches = molecule.chemical_environment_matches(\n \"[#6:1]~[*:2]\", toolkit_registry=toolkit_wrapper\n )\n assert len(matches) == 4 # there should be four matches\n for match in matches:\n assert len(match) == 2 # each match should have two tagged atoms", "def check_molecule_constraints(cls, molecule, system, bond_elements, bond_length):\n for constraint_idx in range(system.getNumConstraints()):\n atom1_idx, atom2_idx, distance = system.getConstraintParameters(\n constraint_idx\n )\n atom_elements = {\n molecule.atoms[atom1_idx].symbol,\n molecule.atoms[atom2_idx].symbol,\n }\n assert atom_elements == bond_elements\n assert np.isclose(\n distance.value_in_unit(openmm_unit.angstrom),\n bond_length.m_as(unit.angstrom),\n )", "def getBondVectors(struct,tol,prec): \n \n \n binary_matrix= getDistMat(struct,tol)\n bond_dir = {}\n distance_matrix = struct.distance_matrix\n lattice = np.array(struct.lattice.as_dict()['matrix'])\n iterations = list(itertools.product([1,0,-1],repeat=3))\n # Loop over list of atoms\n for i in range(len(binary_matrix)):\n for j in range(i+1,len(binary_matrix)):\n # Proceed if the entries are listed as \"bonded\" \n if binary_matrix[i][j]==1: \n s1 = struct.species[i]\n s2 = struct.species[j]\n # Organize dictionary so it is always in order of increasing\n # atomic number\n if s1.number>s2.number:\n s1 = struct.species[j]\n s2 = struct.species[i] \n if s1 not in bond_dir:\n bond_dir[s1]={}\n if s2 not in bond_dir[s1]:\n bond_dir[s1][s2]=[]\n valid_vs = []\n \n # Get the vector between atomic positions\n \n bond_vector = np.array(struct.sites[j].coords-\n struct.sites[i].coords) \n \n # The positions of the atoms may not be in the right locations\n # to be the minimum distance from each other. As a result,\n # a translation is applied to the resulting \"bond vector\" \n # (alternatively, one of the atoms is translated)\n for shift in iterations:\n bondShift = bond_vector + np.dot(lattice.T,shift)\n if abs(distance_matrix[i][j]-magni(bondShift))<=prec:\n valid_vs.append(bondShift)\n break\n # See if the vector is already present in the collection of \n # vectors. If so, add the coordinates to the entry. Else,\n # create a new entry for the direction of the bond.\n for v in valid_vs:\n if np.any([magni(v-x[0])<=prec for x in bond_dir[s1][s2]]):\n for k in range(len(bond_dir[s1][s2])):\n if magni(v-bond_dir[s1][s2][k][0])<=prec:\n bond_dir[s1][s2][k][1].append([i,j])\n break\n \n else:\n bond_dir[s1][s2].append([v,[[i,j]]])\n return(bond_dir)", "def test_order_atoms(self):\n mol1 = converter.s_bonds_mol_from_xyz(self.xyz10['dict'])\n mol2 = converter.s_bonds_mol_from_xyz(self.xyz10['dict_diff_order1'])\n mol3 = converter.s_bonds_mol_from_xyz(self.xyz10['dict_diff_order2'])\n converter.order_atoms(ref_mol=mol1, mol=mol2)\n for atom1, atom2 in zip(mol1.atoms, mol2.atoms):\n self.assertEqual(atom1.symbol, atom2.symbol)\n converter.order_atoms(ref_mol=mol3, mol=mol1)\n for atom1, atom2 in zip(mol3.atoms, mol1.atoms):\n self.assertEqual(atom1.symbol, atom2.symbol)\n\n ref_mol = Molecule(smiles='[CH](CC[CH]c1ccccc1)c1ccccc1')\n mol_list = ref_mol.copy(deep=True).generate_resonance_structures(keep_isomorphic=False, filter_structures=True)\n for mol in mol_list:\n converter.order_atoms(ref_mol=ref_mol, mol=mol)\n bond_dict = dict()\n for index1, atom1 in enumerate(ref_mol.atoms):\n for atom2 in atom1.edges.keys():\n index2 = ref_mol.atoms.index(atom2)\n if index1 < index2:\n if index1 not in bond_dict:\n bond_dict[index1] = [index2]\n else:\n bond_dict[index1].append(index2)\n for index1, atom1 in enumerate(mol.atoms):\n for atom2 in atom1.edges.keys():\n index2 = mol.atoms.index(atom2)\n if index1 < index2:\n self.assertIn(index2, bond_dict[index1]) # check that these atoms are connected in all mols", "def check_boundary(self,x):\n b_cells = np.zeros(self.n_c)\n b_cells[self.n_C:] = 1\n vBC = b_cells[self.tris]\n considered_triangles = vBC.sum(axis=1) == 2\n add_extra = ((self.Angles*(1-vBC)>np.pi/2).T*considered_triangles.T).T\n if add_extra.any():\n I,J = np.nonzero(add_extra)\n for k,i in enumerate(I):\n j = J[k]\n xs = x[self.tris[i]]\n re = xs[np.mod(j-1,3)] - xs[np.mod(j+1,3)]\n re = re/np.linalg.norm(re)\n re = np.array([re[1],-re[0]])\n rpe = xs[j]\n x_new = 2*np.dot(xs[np.mod(j-1,3)]-rpe,re)*re + rpe\n x = np.vstack((x,x_new))\n self.n_c = x.shape[0]\n self._triangulate(x)\n self.assign_vertices()\n\n C = get_C_boundary(self.n_c,self.CV_matrix)\n #\n # #Remove extra cells\n # keep_mask = C[self.n_C:, :self.n_C].sum(axis=1)>0 #I'm assuming this is the same thing. This removes all boundary centroids that are not connected to at least one real centroid.\n # if keep_mask.any():\n # c_keep = np.nonzero(keep_mask)[0]\n # x = np.concatenate((x[:self.n_C],x[c_keep + self.n_C]))\n # self.n_c = x.shape[0]\n # self._triangulate(x)\n # self.assign_vertices()\n #\n\n #Remove all boundary particles not connected to exactly two other boundary particles\n remove_mask = C[self.n_C:, self.n_C:].sum(axis=1)!=2\n if remove_mask.any():\n c_keep = np.nonzero(~remove_mask)[0]\n x = np.concatenate((x[:self.n_C],x[c_keep + self.n_C]))\n self.n_c = x.shape[0]\n self._triangulate(x)\n self.assign_vertices()\n self.Angles = tri_angles(x, self.tris)\n #\n # remove_mask = C[self.n_C:, self.n_C:].sum(axis=1)==0\n # if remove_mask.any():\n # c_keep = np.nonzero(~remove_mask)[0]\n # x = np.concatenate((x[:self.n_C],x[c_keep + self.n_C]))\n # self.n_c = x.shape[0]\n # self._triangulate(x)\n # self.assign_vertices()\n # self.Angles = tri_angles(x, self.tris)\n\n\n return x", "def test_chemical_environment_matches_RDKit(self):\n # Create chiral molecule\n toolkit_wrapper = RDKitToolkitWrapper()\n molecule = Molecule()\n atom_C = molecule.add_atom(6, 0, False, stereochemistry=\"R\", name=\"C\")\n atom_H = molecule.add_atom(1, 0, False, name=\"H\")\n atom_Cl = molecule.add_atom(17, 0, False, name=\"Cl\")\n atom_Br = molecule.add_atom(35, 0, False, name=\"Br\")\n atom_F = molecule.add_atom(9, 0, False, name=\"F\")\n molecule.add_bond(atom_C, atom_H, 1, False)\n molecule.add_bond(atom_C, atom_Cl, 1, False)\n molecule.add_bond(atom_C, atom_Br, 1, False)\n molecule.add_bond(atom_C, atom_F, 1, False)\n # Test known cases\n matches = molecule.chemical_environment_matches(\n \"[#6:1]\", toolkit_registry=toolkit_wrapper\n )\n assert (\n len(matches) == 1\n ) # there should be a unique match, so one atom tuple is returned\n assert len(matches[0]) == 1 # it should have one tagged atom\n assert set(matches[0]) == set([atom_C])\n matches = molecule.chemical_environment_matches(\n \"[#6:1]~[#1:2]\", toolkit_registry=toolkit_wrapper\n )\n assert (\n len(matches) == 1\n ) # there should be a unique match, so one atom tuple is returned\n assert len(matches[0]) == 2 # it should have two tagged atoms\n assert set(matches[0]) == set([atom_C, atom_H])\n matches = molecule.chemical_environment_matches(\n \"[Cl:1]-[C:2]-[H:3]\", toolkit_registry=toolkit_wrapper\n )\n assert (\n len(matches) == 1\n ) # there should be a unique match, so one atom tuple is returned\n assert len(matches[0]) == 3 # it should have three tagged atoms\n assert set(matches[0]) == set([atom_Cl, atom_C, atom_H])\n matches = molecule.chemical_environment_matches(\n \"[#6:1]~[*:2]\", toolkit_registry=toolkit_wrapper\n )\n assert len(matches) == 4 # there should be four matches\n for match in matches:\n assert len(match) == 2 # each match should have two tagged atoms\n # Test searching for stereo-specific SMARTS\n matches = molecule.chemical_environment_matches(\n \"[#6@:1](-[F:2])(-[Cl])(-[Br])(-[H])\", toolkit_registry=toolkit_wrapper\n )\n assert len(matches) == 1 # there should be one match\n for match in matches:\n assert len(match) == 2 # each match should have two tagged atoms\n matches = molecule.chemical_environment_matches(\n \"[#6@@:1](-[F:2])(-[Cl])(-[Br])(-[H])\", toolkit_registry=toolkit_wrapper\n )\n assert (\n len(matches) == 0\n ) # this is the wrong stereochemistry, so there shouldn't be any matches", "def set_molecules(atom_list):\n from sys import setrecursionlimit, getrecursionlimit\n # Since we use a recursive function here, we make sure that the recursion\n # limit is large enough to handle the maximum possible recursion depth we'll\n # need (NATOM). We don't want to shrink it, though, since we use list\n # comprehensions in list constructors in some places that have an implicit\n # (shallow) recursion, therefore, reducing the recursion limit too much here\n # could raise a recursion depth exceeded exception during a _Type/Atom/XList\n # creation. Therefore, set the recursion limit to the greater of the current\n # limit or the number of atoms\n setrecursionlimit(max(len(atom_list), getrecursionlimit()))\n\n # Unmark all atoms so we can track which molecule each goes into\n atom_list.unmark()\n\n # The molecule \"ownership\" list\n owner = []\n # The way I do this is via a recursive algorithm, in which\n # the \"set_owner\" method is called for each bonded partner an atom\n # has, which in turn calls set_owner for each of its partners and\n # so on until everything has been assigned.\n molecule_number = 1 # which molecule number we are on\n for i in range(len(atom_list)):\n # If this atom has not yet been \"owned\", make it the next molecule\n # However, we only increment which molecule number we're on if\n # we actually assigned a new molecule (obviously)\n if not atom_list[i].marked:\n tmp = [i]\n _set_owner(atom_list, tmp, i, molecule_number)\n # Make sure the atom indexes are sorted\n tmp.sort()\n owner.append(tmp)\n molecule_number += 1\n return owner", "def neighboring_consumers(self, position_list):\n agent_list = []\n #loop over all neighbors\n for position in position_list:\n agents_in_cell = self.model.grid.get_cell_list_contents(position)\n #loop over all agents in the cell to find if agent is present\n for agent in agents_in_cell:\n if type(agent).__name__ == \"Consumer\":\n agent_list.append(agent)\n \n return agent_list", "def segment_neighborhood(self, complexes, reactions):\n\t\t\t\t\n\t\t# First we initialize the graph variables that will be used for\n\t\t# Tarjan's algorithm\n\t\t\n\t\tself._tarjan_index = 0\n\t\tself._tarjan_stack = []\n\t\tself._SCC_stack = []\n\n\t\t# Set up for Tarjan's algorithm\n\t\tfor node in complexes:\n\t\t\tnode._outward_edges = []\n\t\t\tnode._full_outward_edges = []\n\t\t\tnode._index = -1\n\t\tfor reaction in reactions:\n\t\t\tfor product in reaction.products:\n\t\t\t\tproduct._outward_edges = []\n\t\t\t\tproduct._full_outward_edges = []\n\t\t\t\tproduct._index = -1\n\t\t\t\t\n\t\t# Detect which products are actually in the neighborhood\t\n\t\tfor reaction in reactions:\n\t\t\tfor product in reaction.products:\n\t\t\t\tproduct_in_N = False\n\t\t\t\t\n\t\t\t\tfor complex in complexes:\n\t\t\t\t\tif (complex == product):\n\t\t\t\t\t\tproduct_in_N = True\n\t\t\t\t\t\tbreak\n\t\t\t\t\n\t\t\t\t# If this product is in the neighborhood, we have an edge\n\t\t\t\tif product_in_N:\n\t\t\t\t\t# We know all these reactions are unimolecular\n\t\t\t\t\treaction.reactants[0]._outward_edges.append(product)\n\t\t\t\treaction.reactants[0]._full_outward_edges += (reaction.products)\n\n\t\t\t\t\t\n\t\t\tnode._lowlink = -1\t\t\t\n\t\t\t\n\t\t# We now perform Tarjan's algorithm, marking nodes as appropriate\n\t\tfor node in complexes:\n\t\t\tif node._index == -1:\n\t\t\t\tself.tarjans(node)\n\t\t\n\t\t# Now check to see which of the SCCs are resting states\n\t\tresting_states = []\n\t\tresting_state_complexes = []\n\t\ttransient_state_complexes = []\n\t\tfor scc in self._SCC_stack:\n\t\t\tscc_products = []\n\t\t\tis_resting_state = True\n\t\t\t\n\t\t\tfor node in scc:\n\t\t\t\tfor product in node._full_outward_edges:\n\t\t\t\t\tscc_products.append(product)\n\t\t\t\n\t\t\tfor product in scc_products:\n\t\t\t\tproduct_in_scc = False\n\t\t\t\tfor complex in scc:\n\t\t\t\t\tif product == complex:\n\t\t\t\t\t\tproduct_in_scc = True\n\t\t\t\t\t\tbreak\n\t\t\t\t\n\t\t\t\t# If the product is not in the SCC, then there is a fast edge\n\t\t\t\t# leading out of the SCC, so this is not a resting state\n\t\t\t\tif not product_in_scc:\n\t\t\t\t\tis_resting_state = False\n\t\t\t\t\tbreak\n\t\t\t\n\t\t\tif is_resting_state:\n\t\t\t\tresting_state_complexes += (scc)\n\t\t\t\tresting_state = RestingState(self.get_auto_name(), scc[:])\n\t\t\t\tresting_states.append(resting_state)\n\t\t\t\t\n\t\t\telse:\n\t\t\t\ttransient_state_complexes += (scc)\n\t\tresting_states.sort()\n\t\tresting_state_complexes.sort()\n\t\ttransient_state_complexes.sort()\n\t\treturn {\n\t\t\t\t'resting_states': resting_states, \n\t\t\t 'resting_state_complexes': resting_state_complexes,\n\t\t\t\t'transient_state_complexes': transient_state_complexes\n\t\t\t\t}", "def is_three_membered_ring_torsion(torsion):\n # A set of atom indices for the atoms in the torsion.\n torsion_atom_indices = set(a.molecule_atom_index for a in torsion)\n\n # Collect all the bonds involving exclusively atoms in the torsion.\n bonds_by_atom_idx = {i: set() for i in torsion_atom_indices}\n for atom in torsion:\n for bond in atom.bonds:\n # Consider the bond only if both atoms are in the torsion.\n if (\n bond.atom1_index in torsion_atom_indices\n and bond.atom2_index in torsion_atom_indices\n ):\n bonds_by_atom_idx[bond.atom1_index].add(bond.atom2_index)\n bonds_by_atom_idx[bond.atom2_index].add(bond.atom1_index)\n\n # Find the central atom, which is connected to all other atoms.\n atom_indices = [i for i in torsion_atom_indices if len(bonds_by_atom_idx[i]) == 3]\n if len(atom_indices) != 1:\n return False\n central_atom_idx = atom_indices[0]\n\n # Find the atom outside the ring.\n atom_indices = [i for i in torsion_atom_indices if len(bonds_by_atom_idx[i]) == 1]\n if (\n len(atom_indices) != 1\n or central_atom_idx not in bonds_by_atom_idx[atom_indices[0]]\n ):\n return False\n outside_atom_idx = atom_indices[0]\n\n # Check that the remaining two atoms are non-central atoms in the membered ring.\n atom1, atom2 = [\n i for i in torsion_atom_indices if i not in [central_atom_idx, outside_atom_idx]\n ]\n # The two atoms are bonded to each other.\n if atom2 not in bonds_by_atom_idx[atom1] or atom1 not in bonds_by_atom_idx[atom2]:\n return False\n # Check that they are both bonded to the central atom and none other.\n for atom_idx in [atom1, atom2]:\n if (\n central_atom_idx not in bonds_by_atom_idx[atom_idx]\n or len(bonds_by_atom_idx[atom_idx]) != 2\n ):\n return False\n\n # This is a torsion including a three-membered ring.\n return True", "def _initializeAdjacencyList(self):\n\n if self.comm.rank == 0:\n # First, create a dictionary of common edges shared by components\n edgeToFace = {}\n for elemID in self.bdfInfo.elements:\n elemInfo = self.bdfInfo.elements[elemID]\n elemConn = elemInfo.nodes\n compID = self.meshLoader.nastranToTACSCompIDDict[elemInfo.pid]\n nnodes = len(elemConn)\n if nnodes >= 2:\n for j in range(nnodes):\n nodeID1 = elemConn[j]\n nodeID2 = elemConn[(j + 1) % nnodes]\n\n if nodeID1 < nodeID2:\n key = (nodeID1, nodeID2)\n else:\n key = (nodeID2, nodeID1)\n\n if key not in edgeToFace:\n edgeToFace[key] = [compID]\n elif compID not in edgeToFace[key]:\n edgeToFace[key].append(compID)\n\n # Now we loop back over each element and each edge. By\n # using the edgeToFace dictionary, we can now determine\n # which components IDs (jComp) are connected to the\n # current component ID (iComp).\n self.adjacentComps = []\n\n for edgeKey in edgeToFace:\n if len(edgeToFace[edgeKey]) >= 2:\n for i, iComp in enumerate(edgeToFace[edgeKey][:-1]):\n for jComp in edgeToFace[edgeKey][i + 1 :]:\n if iComp < jComp:\n dvKey = (iComp, jComp)\n else:\n dvKey = (jComp, iComp)\n if dvKey not in self.adjacentComps:\n self.adjacentComps.append(dvKey)\n\n else:\n self.adjacentComps = None\n\n # Wait for root\n self.comm.barrier()", "def reached_final_point():\n return all(point.constraints[b.atom_indexes] == b.final_dist\n for b in self.bonds)", "def get_subm(self, nodes):\n na1 = len(nodes)\n bonds = []\n for i in range(na1):\n for j in range(i+1,na1):\n if self.bom[i,j] > 0:\n bonds.append([i,j])\n return nodes,bonds", "def check_all_neighbors_present_remote(local_host, rem_host, rem_asic, neighs,\n encaps, all_cfg_facts, nbrhosts, nbr_macs):\n\n rem_cfg_facts = all_cfg_facts[rem_host.hostname][rem_asic.asic_index]['ansible_facts']\n remote_inband_info = get_inband_info(rem_cfg_facts)\n if remote_inband_info == {}:\n logger.info(\"No inband configuration on this asic: %s/%s, will be skipped.\",\n rem_host.hostname, rem_asic.asic_index)\n return {'fail_cnt': 0}\n remote_inband_mac = get_sonic_mac(rem_host, rem_asic.asic_index, remote_inband_info['port'])\n fail_cnt = 0\n\n # Grab dumps of the asicdb, appdb, routing table, and arp table\n\n # bgp routes\n docker = \"bgp\"\n if rem_host.facts[\"num_asic\"] > 1:\n docker = \"bgp\" + str(rem_asic.asic_index)\n\n v4_output = rem_host.command(\"docker exec \" + docker + \" vtysh -c \\\"show ip route kernel json\\\"\")\n v6_output = rem_host.command(\"docker exec \" + docker + \" vtysh -c \\\"show ipv6 route kernel json\\\"\")\n v4_parsed = json.loads(v4_output[\"stdout\"])\n v6_parsed = json.loads(v6_output[\"stdout\"])\n\n # kernel routes\n if rem_host.is_multi_asic:\n v4cmd = \"ip netns exec {} ip -4 route show scope link\".format(rem_asic.namespace)\n v6cmd = \"ip netns exec {} ip -6 route show\".format(rem_asic.namespace)\n else:\n v4cmd = \"ip -4 route show scope link\"\n v6cmd = \"ip -6 route show\"\n\n v4_kern = rem_host.command(v4cmd)['stdout_lines']\n v6_kern = rem_host.command(v6cmd)['stdout_lines']\n\n # databases and ARP table\n asicdb = AsicDbCli(rem_asic)\n asic_dump = asicdb.dump_neighbor_table()\n\n appdb = AppDbCli(rem_asic)\n app_dump = appdb.dump_neighbor_table()\n\n if rem_host.is_multi_asic:\n arptable = rem_host.switch_arptable(namespace=rem_asic.namespace)['ansible_facts']\n else:\n arptable = rem_host.switch_arptable()['ansible_facts']\n\n for neighbor in neighs:\n neighbor_mac_on_dut = remote_inband_mac\n if rem_host.get_facts()['asic_type'] == 'vs':\n # For vs platform, the mac programmed will be remote asic's mac as required for datapath to work.\n neighbor_mac_on_dut = local_host.get_facts()['router_mac']\n logger.info(\"Check remote host: %s, asic: %s, for neighbor %s\", rem_host.hostname, rem_asic.asic_index,\n neighbor)\n nbr_vm = get_vm_with_ip(neighbor, nbrhosts)\n neigh_mac = nbr_macs[nbr_vm['vm']][nbr_vm['port']]\n encap_id = encaps[neighbor]\n\n # Verify ASIC DB entries\n for entry in asic_dump:\n matchstr = '\"%s\",' % neighbor\n if matchstr in entry:\n\n if neigh_mac.lower() != asic_dump[entry]['value']['SAI_NEIGHBOR_ENTRY_ATTR_DST_MAC_ADDRESS'].lower():\n logger.error(\"Asic neighbor macs for %s do not match: %s != %s\", neighbor, neigh_mac.lower(),\n asic_dump[entry]['value']['SAI_NEIGHBOR_ENTRY_ATTR_DST_MAC_ADDRESS'].lower())\n fail_cnt += 1\n else:\n logger.debug(\"Asic neighbor macs for %s match: %s == %s\", neighbor, neigh_mac.lower(),\n asic_dump[entry]['value']['SAI_NEIGHBOR_ENTRY_ATTR_DST_MAC_ADDRESS'].lower())\n\n if encap_id != asic_dump[entry]['value']['SAI_NEIGHBOR_ENTRY_ATTR_ENCAP_INDEX']:\n logger.error(\"Asic neighbor encap for %s do not match: %s != %s\", neighbor, encap_id,\n asic_dump[entry]['value']['SAI_NEIGHBOR_ENTRY_ATTR_ENCAP_INDEX'])\n fail_cnt += 1\n else:\n logger.debug(\"Asic neighbor encap for %s match: %s == %s\", neighbor, encap_id,\n asic_dump[entry]['value']['SAI_NEIGHBOR_ENTRY_ATTR_ENCAP_INDEX'])\n\n pytest_assert(asic_dump[entry]['value']['SAI_NEIGHBOR_ENTRY_ATTR_IS_LOCAL'] == \"false\",\n \"is local is not false in asicDB\")\n\n break\n else:\n logger.error(\"Did not find neighbor in asictable for IP: %s on remote %s\", neighbor, rem_host.hostname)\n fail_cnt += 1\n\n # Verify APP DB entries\n for entry in app_dump:\n matchstr = ':%s' % neighbor\n if entry.endswith(matchstr):\n if neighbor_mac_on_dut.lower() != app_dump[entry]['value']['neigh'].lower():\n logger.error(\"App neighbor macs for %s do not match: %s != %s\",\n neighbor, remote_inband_mac.lower(),\n app_dump[entry]['value']['neigh'].lower())\n fail_cnt += 1\n else:\n logger.debug(\"App neighbor macs for %s match: %s == %s\", neighbor, remote_inband_mac.lower(),\n app_dump[entry]['value']['neigh'].lower())\n\n pytest_assert(\":{}:\".format(remote_inband_info['port']) in entry, \"Port for %s does not match\" % entry)\n break\n else:\n logger.error(\"Did not find neighbor in appdb for IP: %s on remote %s\", neighbor, rem_host.hostname)\n fail_cnt += 1\n\n # Verify ARP table\n\n check_host_arp_table(rem_host, rem_asic, neighbor, neighbor_mac_on_dut,\n remote_inband_info['port'], 'PERMANENT', arptable=arptable)\n\n # Verify routing tables\n if \":\" in neighbor:\n ipver = \"ipv6\"\n prefix = neighbor + \"/128\"\n bgp_parse = v6_parsed\n kern_route = v6_kern\n else:\n ipver = \"ip\"\n prefix = neighbor + \"/32\"\n bgp_parse = v4_parsed\n kern_route = v4_kern\n\n # bgp routing table\n check_bgp_kernel_route(rem_host, rem_asic.asic_index, prefix, ipver,\n remote_inband_info['port'], present=True, parsed=bgp_parse)\n\n # kernel routing table\n for route in kern_route:\n if route.startswith(\"%s \" % neighbor):\n pytest_assert(\"dev %s\" % remote_inband_info['port'] in route,\n \"Neigbor: %s, Route device not inband port: %s\" % (neighbor, remote_inband_info['port']))\n break\n else:\n logger.error(\"Neighbor: %s not in kernel table\" % neighbor)\n fail_cnt += 1\n\n logger.info(\"Check remote host: %s, asic: %s, check for neighbor %s successful\",\n rem_host.hostname, rem_asic.asic_index, neighbor)\n return {'fail_cnt': fail_cnt}", "def neighbours(input_configuration, position):\n\n row_pos, seat_pos = position\n return [(check_row, check_seat)\n for check_row in range (row_pos-1, row_pos + 2) for check_seat in range (seat_pos-1, seat_pos+2)\n if (check_row != row_pos or check_seat != seat_pos)\n and (check_row, check_seat) in input_configuration.keys()]", "def dump_and_verify_neighbors_on_asic(duthosts, per_host, asic, neighs, nbrhosts,\n all_cfg_facts, nbr_macs, check_nbr_state=True):\n\n logger.info(\"Checking local neighbors on host: %s, asic: %s\", per_host.hostname, asic.asic_index)\n ret = check_all_neighbors_present_local(duthosts, per_host, asic, neighs, all_cfg_facts,\n nbrhosts, nbr_macs, check_nbr_state=check_nbr_state)\n encaps = ret['encaps']\n fail_cnt = ret['fail_cnt']\n\n # Check the neighbor entry on each remote linecard\n for rem_host in duthosts.frontend_nodes:\n\n for rem_asic in rem_host.asics:\n if rem_host == per_host and rem_asic == asic:\n # skip remote check on local host\n continue\n ret = check_all_neighbors_present_remote(per_host, rem_host, rem_asic, neighs, encaps,\n all_cfg_facts, nbrhosts, nbr_macs)\n\n fail_cnt += ret['fail_cnt']\n\n if fail_cnt > 1:\n pytest.fail(\"Test failed because of previous errors.\")\n else:\n logger.info(\"Verification of all neighbors succeeded.\")", "def inner(pos, camefrom):\r\n\t\tlabyrinth[pos[0]][pos[1]] = VISITED\r\n\t\tif pos == GOAL:\r\n\t\t\treturn [pos], True\r\n\t\tfor neighbour in neighbours(pos):\r\n\t\t\tif neighbour != camefrom and is_inside(neighbour):\r\n\t\t\t\tif labyrinth[neighbour[0]][neighbour[1]] != BLOCKED and labyrinth[neighbour[0]][neighbour[1]] != VISITED:\r\n\t\t\t\t\tway, success = inner(neighbour, pos)\r\n\t\t\t\t\tif success == True:\r\n\t\t\t\t\t\treturn [pos]+way, True\r\n\t\treturn None, False", "def finetune_intron_boundaries(abfgp_genemodel,introndata,\n array_algpresence,array_algsimilarity,verbose=True):\n\n # Global Variable Imports\n FINETUNE_ACCEPTOR_NT_OFFSET = 12\n FINETUNE_DONOR_NT_OFFSET = 12\n FINETUNE_ACCEPTOR_NT_OFFSET = 18\n FINETUNE_DONOR_NT_OFFSET = 18\n from settings.genestructure import MIN_INTRON_NT_LENGTH\n\n # list with adjusted boundaries\n refined_boundaries = []\n\n # recreate lists of ABGFP exons & introns\n abfgp_exons = [ abfgp_genemodel[pos] for pos in range(0,len(abfgp_genemodel),2) ]\n abfgp_introns = [ abfgp_genemodel[pos] for pos in range(1,len(abfgp_genemodel),2) ]\n\n for intron_pos in range(0,len(abfgp_introns)):\n intron = abfgp_introns[intron_pos]\n if not intron: continue\n if intron.__class__.__name__ == 'SequenceErrorConnectingOrfs': continue\n has_been_printed = False\n finetune_acceptor_range = range(intron.acceptor.pos-FINETUNE_ACCEPTOR_NT_OFFSET,\n intron.acceptor.pos+FINETUNE_ACCEPTOR_NT_OFFSET+1)\n finetune_donor_range = range(intron.donor.pos-FINETUNE_DONOR_NT_OFFSET,\n intron.donor.pos+FINETUNE_DONOR_NT_OFFSET+1)\n\n # assign branchpoint in current intron\n intron.assign_bp_and_ppts()\n\n # start searching acceptor based\n alternatives = []\n for acceptor in intron.orfAcceptor._acceptor_sites:\n if acceptor.pos != intron.acceptor.pos and\\\n acceptor.phase != intron.acceptor.phase and\\\n acceptor.pos in finetune_acceptor_range:\n # now see if we can find a donor for this phase too\n for donor in intron.orfDonor._donor_sites:\n if donor.pos != intron.donor.pos and\\\n donor.phase != intron.donor.phase and\\\n donor.phase == acceptor.phase and\\\n donor.pos in finetune_donor_range:\n # get the next exon (3'of this intron)\n next_exon = abfgp_exons[intron_pos+1]\n prev_exon = abfgp_exons[intron_pos]\n\n if not has_been_printed:\n has_been_printed = True\n ####################################################\n if verbose: print \"FINETUNING INTRON::\", intron\n ####################################################\n\n # get data on this alternative acceptor/donor combination\n test_intron = IntronConnectingOrfs(donor,acceptor,None,intron.orfDonor,intron.orfAcceptor)\n test_intron.assign_bp_and_ppts()\n\n # test if refinement will result in a long enough intron\n if test_intron.length < MIN_INTRON_NT_LENGTH: continue\n\n scorelist = []\n # score 1: is donor.pssm_score `higher`?\n scorelist.append( _finetune_splicesite_comparison(intron.donor,donor) )\n # score 2: is acceptor.pssm_score `higher`?\n scorelist.append( _finetune_splicesite_comparison(intron.acceptor,acceptor) )\n # score 3: branchpoint comparison?\n scorelist.append( _branchpoint_comparison(intron,test_intron) )\n # score 4: ppt comparison?\n scorelist.append( _polypyrimidinetract_comparison(intron,test_intron) )\n # score 5: is algsimilarity ratio increased (==better)?\n scorelist.append( _algsimilarity_comparison(intron,test_intron,prev_exon,next_exon,array_algsimilarity) )\n\n # evaluate scorelist; improved intron boundary or not?\n # use donor, acceptor, branchpoint & ppt, do *NOT* use algsim score\n if scorelist[0:4].count(False) == 0 and scorelist[0:4].count(True) >= 1:\n alternatives.append( ( donor, acceptor, scorelist ) )\n is_accepted = True\n else:\n is_accepted = False\n\n ########################################################\n if verbose:\n print \"alternatives:\", donor, acceptor,\n print intron.donor.pos - donor.pos,\n print intron.acceptor.pos - acceptor.pos,\n print scorelist, is_accepted,\n print \"BPcur:\",intron.get_branchpoint_nt_distance(),\n print \"alt:\",\n print test_intron.get_branchpoint_nt_distance()\n ########################################################\n\n # now evaluate the alternatived and take the best one\n if not alternatives:\n continue\n elif len(alternatives) == 1:\n refined_boundaries.append( ( intron.donor, alternatives[0][0] ) )\n refined_boundaries.append( ( intron.acceptor, alternatives[0][1] ) )\n else:\n # multiple! again, take the *best* one\n pass\n\n # return list of refined_boundaries\n return refined_boundaries", "def get_further_neighbours(self, cell):\n\t\tneighs = self.get_neighbours(cell)\n\t\ti, j = cell.find_id()\n\t\tneighbours = []\n\t\tfor neigh in neighs:\n\t\t\tx, y = neigh.find_id()\n\t\t\tif abs(x-i)+abs(y-j) > 1 or abs(x-i)+abs(y-j) == 0: \n\t\t\t\tneighbours.append(self.space[y,x])\n\t\treturn neighbours", "def test_molecules_from_xyz(self):\n s_mol, b_mol = converter.molecules_from_xyz(self.xyz6['dict'])\n\n # check that the atom order is the same\n self.assertTrue(s_mol.atoms[0].is_sulfur())\n self.assertTrue(b_mol.atoms[0].is_sulfur())\n self.assertTrue(s_mol.atoms[1].is_oxygen())\n self.assertTrue(b_mol.atoms[1].is_oxygen())\n self.assertTrue(s_mol.atoms[2].is_oxygen())\n self.assertTrue(b_mol.atoms[2].is_oxygen())\n self.assertTrue(s_mol.atoms[3].is_nitrogen())\n self.assertTrue(b_mol.atoms[3].is_nitrogen())\n self.assertTrue(s_mol.atoms[4].is_carbon())\n self.assertTrue(b_mol.atoms[4].is_carbon())\n self.assertTrue(s_mol.atoms[5].is_hydrogen())\n self.assertTrue(b_mol.atoms[5].is_hydrogen())\n self.assertTrue(s_mol.atoms[6].is_hydrogen())\n self.assertTrue(b_mol.atoms[6].is_hydrogen())\n self.assertTrue(s_mol.atoms[7].is_hydrogen())\n self.assertTrue(b_mol.atoms[7].is_hydrogen())\n self.assertTrue(s_mol.atoms[8].is_hydrogen())\n self.assertTrue(b_mol.atoms[8].is_hydrogen())\n self.assertTrue(s_mol.atoms[9].is_hydrogen())\n self.assertTrue(b_mol.atoms[9].is_hydrogen())\n\n s_mol, b_mol = converter.molecules_from_xyz(self.xyz7['dict'])\n self.assertTrue(s_mol.atoms[0].is_oxygen())\n self.assertTrue(b_mol.atoms[0].is_oxygen())\n self.assertTrue(s_mol.atoms[2].is_carbon())\n self.assertTrue(b_mol.atoms[2].is_carbon())\n\n expected_bonded_adjlist = \"\"\"multiplicity 2\n1 O u0 p2 c0 {6,S} {10,S}\n2 O u0 p2 c0 {3,S} {28,S}\n3 C u0 p0 c0 {2,S} {8,S} {14,S} {15,S}\n4 C u0 p0 c0 {7,S} {16,S} {17,S} {18,S}\n5 C u0 p0 c0 {7,S} {19,S} {20,S} {21,S}\n6 C u0 p0 c0 {1,S} {22,S} {23,S} {24,S}\n7 C u1 p0 c0 {4,S} {5,S} {9,S}\n8 C u0 p0 c0 {3,S} {10,D} {11,S}\n9 C u0 p0 c0 {7,S} {11,D} {12,S}\n10 C u0 p0 c0 {1,S} {8,D} {13,S}\n11 C u0 p0 c0 {8,S} {9,D} {25,S}\n12 C u0 p0 c0 {9,S} {13,D} {26,S}\n13 C u0 p0 c0 {10,S} {12,D} {27,S}\n14 H u0 p0 c0 {3,S}\n15 H u0 p0 c0 {3,S}\n16 H u0 p0 c0 {4,S}\n17 H u0 p0 c0 {4,S}\n18 H u0 p0 c0 {4,S}\n19 H u0 p0 c0 {5,S}\n20 H u0 p0 c0 {5,S}\n21 H u0 p0 c0 {5,S}\n22 H u0 p0 c0 {6,S}\n23 H u0 p0 c0 {6,S}\n24 H u0 p0 c0 {6,S}\n25 H u0 p0 c0 {11,S}\n26 H u0 p0 c0 {12,S}\n27 H u0 p0 c0 {13,S}\n28 H u0 p0 c0 {2,S}\n\"\"\"\n expected_mol = Molecule().from_adjacency_list(expected_bonded_adjlist)\n self.assertEqual(b_mol.to_adjacency_list(), expected_bonded_adjlist)\n # the is_isomorphic test must come after the adjlist test since it changes the atom order\n self.assertTrue(b_mol.is_isomorphic(expected_mol))\n\n s_mol, b_mol = converter.molecules_from_xyz(self.xyz10['dict'], multiplicity=1, charge=0)\n self.assertIsNotNone(s_mol)\n self.assertIsNotNone(b_mol)\n for atom1, atom2 in zip(s_mol.atoms, b_mol.atoms):\n self.assertEqual(atom1.symbol, atom2.symbol)\n for atom1, symbol in zip(s_mol.atoms, self.xyz10['dict']['symbols']):\n self.assertEqual(atom1.symbol, symbol)\n\n s_mol, b_mol = converter.molecules_from_xyz(self.xyz_dict_13, multiplicity=1, charge=0)\n for atom1, atom2 in zip(s_mol.atoms, b_mol.atoms):\n self.assertEqual(atom1.symbol, atom2.symbol)\n for atom1, symbol in zip(s_mol.atoms, self.xyz_dict_13['symbols']):\n self.assertEqual(atom1.symbol, symbol)\n self.assertEqual(s_mol.multiplicity, 1)\n self.assertEqual(b_mol.multiplicity, 1)\n self.assertFalse(any(atom.radical_electrons for atom in b_mol.atoms))", "def get_conjugated_nodes(self):\n sets = []\n self.get_backbone()\n m = self.mbb\n for bi in m.GetBonds():\n #print ' -- idx = ', bi.GetIdx()\n n = len(sets)\n iconj = bi.GetIsConjugated()\n ins = ( bt2bo[ bi.GetBondType() ] > 1 ) # is non-single bond?\n if iconj or ins:\n ia1, ia2 = bi.GetBeginAtomIdx(), bi.GetEndAtomIdx()\n set_i = set([ia1, ia2])\n if n == 0:\n sets.append( set_i )\n else:\n for j, set_j in enumerate(sets):\n if set_i.intersection( set_j ) > set([]):\n sets[j].update( set_i )\n else:\n if set_i not in sets: sets.append( set_i )\n #print '-- sets = ', sets\n sets_u = cim.merge_sets(sets)\n return sets_u", "def find_all_edge_conflicts(self, agents_to_check=None):\n\n # A dictionary containing the different edges being traversed in the solution and the times and agents\n # traversing them.\n positions = defaultdict(set)\n cn = defaultdict(list) # A dictionary mapping {edge -> (agent1, agent2, agent1_time, agent2_time)}\n count = 0\n if not agents_to_check:\n agents_to_check = list(self.sol.paths.keys())\n for agent in agents_to_check:\n path = self.sol.tuple_solution[agent]\n for move in path:\n edge = move[1]\n positions[edge].add((agent, move[0], move[2]))\n for pres in positions[edge]:\n if pres[0] != agent: # Use different overlap tests for same direction or different directions\n if pres[2] != move[2] and not Cas.overlapping(move[0], pres[1]): # Opposite directions\n continue # no conflict\n if pres[2] == move[2]: # Trickier conflict option, same direction\n occ_i = move[0][0]+1, move[0][1]-1 # Actual occupation times\n occ_j = pres[1][0]+1, pres[1][1]-1\n if not Cas.overlapping(occ_i, occ_j):\n continue\n cn[edge].append((pres[0], agent, pres[1], move[0], pres[2], move[2]))\n count += 1 # (t_rng[1] - t_rng[0])\n return cn, count\n\n \"\"\"\n if move[0][1] - move[0][0] > 1: # Edge weight is more than 1\n for pres in positions[edge]:\n if pres[0] != agent: # Use different overlap tests for same direction or different directions\n if pres[2] != move[2] and not Cas.overlapping(move[0], pres[1]):\n continue # no conflict\n if pres[2] == move[2]:\n # The actual times the agents occupy the edge:\n occ_1 = move[0][0], move[0][1] - 1 # Same direction -> last time tick doesn't matter\n occ_2 = pres[1][0], pres[1][1] - 1\n if not self.strong_overlapping(occ_1, occ_2):\n continue\n #t_rng = max(occ_time[0], pres[1][0]), min(occ_time[1], pres[1][1])\n cn[edge].append((pres[0], agent, pres[1], move[0], pres[2], move[2]))\n count += 1 # (t_rng[1] - t_rng[0])\n else: # Edge weight is 1\n # Agent begins to travel at move[0][0] and arrives at move[0][1]\n for pres in positions[edge]:\n if pres[0] != agent and self.strong_overlapping(move[0], pres[1]):\n cn[edge].append((pres[0], agent, pres[1], move[0], pres[2], move[2]))\n count += 1\n \"\"\"", "def overlaps(self, atom, check_up_to, get_all_overlapping_atoms=True):\n if (check_up_to == 0):\n return True, []\n distances = self.structure.get_distances(atom, [i for i in range(0, check_up_to)], mic=True)\n minimum_percentage_allowed = 0.99\n valid = True\n overlappingAtoms = []\n\n init_distance = self.Atoms[atom][\"radius\"]\n\n for i in range(0, check_up_to):\n if (i == atom):\n continue\n minimum_distance = init_distance + self.Atoms[i][\"radius\"]\n if (distances[i] / minimum_distance < minimum_percentage_allowed):\n overlappingAtoms.append([i, minimum_distance - distances[i]])\n #print(\"Minimum allowed: \" + str(minimum_distance) + \", dist: \" + str(distances[i]))\n valid = False\n if (not get_all_overlapping_atoms):\n break\n\n return valid, overlappingAtoms", "def infra_neighbors (self, node_id):\n return (self.network.node[id] for id in self.network.neighbors_iter(node_id)\n if self.network.node[id].type == Node.INFRA)", "def overlapping_atoms(cifs):\n errors = []\n\n # catch pymatgen warnings for overlapping atoms\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n for cif in cifs:\n try:\n s = CifParser(cif).get_structures(primitive=True)[0]\n assert s.is_ordered\n except (ValueError,AssertionError) as exc:\n s = CifParser(cif, occupancy_tolerance=1000).get_structures(primitive=True)[0]\n s.to(filename=cif)\n print(f'Fixed overlapping atoms in {cif}')\n except Exception as exc:\n errors.append(f'Unable to parse file {cif}')\n \n if errors:\n print('\\n'.join(errors))\n sys.exit(1)", "def cfdProcessElementTopology(self):\r\n ## (list of lists) List where each index represents an element in the domain. Each index has an associated list which contains the elements for which is shares a face (i.e. the neighouring elements). Do not confuse a faces 'neighbour cell', which refers to a face's neighbour element, with the neighbouring elements of a cell. \r\n self.elementNeighbours = [[] for i in range(0,self.numberOfElements)]\r\n\r\n ## (list of lists) list of face indices forming each element\r\n self.elementFaces = [[] for i in range(0,self.numberOfElements)]\r\n \r\n #populates self.elementNeighbours\r\n for iFace in range(self.numberOfInteriorFaces):\r\n own=self.owners[iFace]\r\n nei=self.neighbours[iFace]\r\n \r\n #adds indices of neighbour cells\r\n self.elementNeighbours[own].append(nei)\r\n self.elementNeighbours[nei].append(own)\r\n \r\n #adds interior faces\r\n self.elementFaces[own].append(iFace)\r\n self.elementFaces[nei].append(iFace)\r\n \r\n #adds boundary faces ('patches')\r\n for iFace in range(self.numberOfInteriorFaces,self.numberOfFaces):\r\n own=self.owners[iFace]\r\n self.elementFaces[own].append(iFace)\r\n \r\n ## List of lists containing points forming each element\r\n self.elementNodes = [[] for i in range(0,self.numberOfElements)]\r\n \r\n for iElement in range(self.numberOfElements):\r\n \r\n for faceIndex in self.elementFaces[iElement]:\r\n self.elementNodes[iElement].append(self.faceNodes[faceIndex])\r\n \r\n self.elementNodes[iElement] = list(set([item for sublist in self.elementNodes[iElement] for item in sublist]))\r\n \r\n ## Upper coefficient indices (owners)\r\n self.upperAnbCoeffIndex=[[] for i in range(0,self.numberOfInteriorFaces)]\r\n \r\n ## Lower coefficient indices (owners)\r\n self.lowerAnbCoeffIndex=[[] for i in range(0,self.numberOfInteriorFaces)]\r\n \r\n for iElement in range(self.numberOfElements):\r\n ## Element number from 1 to numberOfElements + 1\r\n iNb=1\r\n for faceIndex in self.elementFaces[iElement]:\r\n \r\n #skip if it is a boundary face\r\n if faceIndex > self.numberOfInteriorFaces-1:\r\n continue\r\n \r\n own = self.owners[faceIndex]\r\n nei = self.neighbours[faceIndex]\r\n \r\n if iElement == own:\r\n self.upperAnbCoeffIndex[faceIndex] = iNb\r\n elif iElement == nei:\r\n self.lowerAnbCoeffIndex[faceIndex] = iNb\r\n \r\n iNb = iNb +1", "def elemental_descriptor(A1_ion, A2_ion, B_ion):\n ele_A1 = mg.Element(A1_ion)\n ele_A2 = mg.Element(A2_ion)\n ele_B = mg.Element(B_ion)\n ele_O = mg.Element('O') \n # A/B ion oxidation state \n common_oxidation_states_A1 = ele_A1.common_oxidation_states[0]\n common_oxidation_states_A2 = ele_A2.common_oxidation_states[0]\n common_oxidation_states_A = np.mean(common_oxidation_states_A1 + common_oxidation_states_A2)\n common_oxidation_states_B = ele_B.common_oxidation_states[0]\n # ionic radius property\n ionic_radius_A1 = float(str(ele_A1.average_ionic_radius)[:-4])\n ionic_radius_A2 = float(str(ele_A2.average_ionic_radius)[:-4])\n ionic_radius_A = (ionic_radius_A1+ ionic_radius_A2)/2\n ionic_radius_B = float(str(ele_B.average_ionic_radius)[:-4])\n ionic_radius_O = float(str(ele_O.average_ionic_radius)[:-4])\n # Tolerance factor \n TF = (ionic_radius_A + ionic_radius_O)/(np.sqrt(2)*(ionic_radius_B + ionic_radius_O))\n # Octahedral factor\n OF = ionic_radius_B/ionic_radius_O \n # ionic_radius ratios\n ionic_ration_AO = ionic_radius_A / ionic_radius_O\n ionic_ration_BO = ionic_radius_B / ionic_radius_O\n # averaged electronegativity for A and B atoms\n Pauling_electronegativity_A1 = ele_A1.X\n Pauling_electronegativity_A2 = ele_A2.X\n Pauling_electronegativity_A = (Pauling_electronegativity_A1 + Pauling_electronegativity_A2)/2\n Pauling_electronegativity_B = ele_B.X\n Pauling_electronegativity_O = ele_O.X\n # Difference in the electronegativity for A-O and B-O\n Diff_A_O = Pauling_electronegativity_A - Pauling_electronegativity_O\n Diff_B_O = Pauling_electronegativity_B - Pauling_electronegativity_O\n return [common_oxidation_states_A, common_oxidation_states_B, Pauling_electronegativity_A, Pauling_electronegativity_B, TF, OF, ionic_ration_AO, ionic_ration_BO, Diff_A_O, Diff_B_O]", "def check_intfs_and_nbrs(duthosts, all_cfg_facts, nbrhosts, nbr_macs):\n for host in duthosts.frontend_nodes:\n for asic in host.asics:\n cfg_facts = all_cfg_facts[host.hostname][asic.asic_index]['ansible_facts']\n check_voq_interfaces(duthosts, host, asic, cfg_facts)\n\n logger.info(\"Checking local neighbors on host: %s, asic: %s\", host.hostname, asic.asic_index)\n if 'BGP_NEIGHBOR' in cfg_facts:\n neighs = cfg_facts['BGP_NEIGHBOR']\n else:\n logger.info(\"No local neighbors for host: %s/%s, skipping\", host.hostname, asic.asic_index)\n continue\n\n dump_and_verify_neighbors_on_asic(duthosts, host, asic, neighs, nbrhosts, all_cfg_facts, nbr_macs)", "def isHetatmChain(self):\n n = [x for x in self.residues.atoms if not x.hetatm]\n if n: return 0\n else: return 1", "def GetInteriorEdgesPent(self):\n\n if not isinstance(self.all_edges,np.ndarray):\n self.GetEdgesPent()\n if not isinstance(self.edges,np.ndarray):\n self.GetBoundaryEdgesPent()\n\n sorted_all_edges = np.sort(self.all_edges,axis=1)\n sorted_boundary_edges = np.sort(self.edges,axis=1)\n\n x = []\n for i in range(self.edges.shape[0]):\n current_sorted_boundary_edge = np.tile(sorted_boundary_edges[i,:],\n self.all_edges.shape[0]).reshape(self.all_edges.shape[0],self.all_edges.shape[1])\n interior_edges = np.linalg.norm(current_sorted_boundary_edge - sorted_all_edges,axis=1)\n pos_interior_edges = np.where(interior_edges==0)[0]\n if pos_interior_edges.shape[0] != 0:\n x.append(pos_interior_edges)\n\n edge_aranger = np.arange(self.all_edges.shape[0])\n edge_aranger = np.setdiff1d(edge_aranger,np.array(x)[:,0])\n interior_edges = self.all_edges[edge_aranger,:]\n\n # GET FLAGS FOR BOUNDRAY AND INTERIOR\n edge_flags = np.ones(self.all_edges.shape[0],dtype=np.int64)\n edge_flags[edge_aranger] = 0\n\n self.interior_edges = interior_edges\n return interior_edges, edge_flags", "def Neighbourgs(abcd, h):\n\n Nelem = len(abcd)\n\n a = abcd[h][0]\n b = abcd[h][1]\n c = abcd[h][2]\n d = abcd[h][3]\n\n el1, el2, el3, el4 = 0, 0, 0, 0\n\n N = 0\n\n for j in range(0, Nelem - 1):\n\n if N == 4:\n break\n\n if a in abcd[j, :] and b in abcd[j, :] and j != h:\n N += 1\n el1 = j + 1\n\n if b in abcd[j, :] and c in abcd[j, :] and j != h:\n N += 1\n el2 = j + 1\n\n if c in abcd[j, :] and d in abcd[j, :] and j != h:\n N += 1\n el3 = j + 1\n\n if d in abcd[j, :] and a in abcd[j, :] and j != h:\n N += 1\n el4 = j + 1\n\n return [el1, el2, el3, el4]", "def optimize_hydrogens(self):\n _LOGGER.debug(\"Optimization progress:\")\n optlist = self.optlist\n connectivity = {}\n # Initialize the detection progress\n if len(optlist) == 0:\n return\n _LOGGER.debug(\" Detecting potential hydrogen bonds\")\n progress = 0.0\n increment = 1.0 / len(optlist)\n for obj in optlist:\n connectivity[obj] = []\n for atom in obj.atomlist:\n closeatoms = self.debumper.cells.get_near_cells(atom)\n for closeatom in closeatoms:\n # Conditions for continuing\n if atom.residue == closeatom.residue:\n continue\n if not (closeatom.hacceptor or closeatom.hdonor):\n continue\n if atom.hdonor and not atom.hacceptor:\n if not closeatom.hacceptor:\n continue\n if atom.hacceptor:\n if not atom.hdonor and not closeatom.hdonor:\n continue\n dist = util.distance(atom.coords, closeatom.coords)\n if dist < 4.3:\n residue = atom.residue\n hbond = PotentialBond(atom, closeatom, dist)\n # Store the potential bond\n obj.hbonds.append(hbond)\n # Keep track of connectivity\n if closeatom in self.atomlist:\n closeobj = self.resmap[closeatom.residue]\n if closeobj not in connectivity[obj]:\n connectivity[obj].append(closeobj)\n progress += increment\n while progress >= 0.0499:\n progress -= 0.05\n # Some residues might have no nearby hbonds - if so, place at\n # default state\n for obj in optlist:\n if len(obj.hbonds) == 0:\n if obj.residue.fixed:\n continue\n _LOGGER.debug(\n f\"{obj.residue} has no nearby partners - fixing.\"\n )\n obj.finalize()\n # Determine the distinct networks\n networks = []\n seen = []\n for obj1 in optlist:\n if obj1.residue.fixed:\n continue\n if obj1 in seen:\n continue\n network = util.analyze_connectivity(connectivity, obj1)\n for obj2 in network:\n if obj2 not in seen:\n seen.append(obj2)\n networks.append(network)\n # Initialize the output progress\n if len(networks) > 0:\n _LOGGER.debug(\"Optimizing hydrogen bonds\")\n progress = 0.0\n increment = 1.0 / len(networks)\n # Work on the networks\n for network in networks:\n txt = \"\"\n for obj in network:\n txt += f\"{obj}, \"\n _LOGGER.debug(f\"Starting network {txt[:-2]}\")\n # FIRST: Only optimizeable to backbone atoms\n _LOGGER.debug(\"* Optimizeable to backbone *\")\n hbondmap = {}\n for obj in network:\n for hbond in obj.hbonds:\n if hbond.atom2 not in self.atomlist:\n hbondmap[hbond] = hbond.dist\n hbondlist = util.sort_dict_by_value(hbondmap)\n hbondlist.reverse()\n for hbond in hbondlist:\n atom = hbond.atom1\n atom2 = hbond.atom2\n obj = self.resmap[atom.residue]\n\n if atom.residue.fixed:\n continue\n if atom.hdonor:\n obj.try_donor(atom, atom2)\n if atom.hacceptor:\n obj.try_acceptor(atom, atom2)\n # SECOND: Non-dual water Optimizeable to Optimizeable\n _LOGGER.debug(\"* Optimizeable to optimizeable *\")\n hbondmap = {}\n seenlist = []\n for obj in network:\n for hbond in obj.hbonds:\n if hbond.atom2 in self.atomlist:\n if not isinstance(hbond.atom1.residue, aa.WAT):\n if not isinstance(hbond.atom2.residue, aa.WAT):\n # Only get one hbond pair\n if (hbond.atom2, hbond.atom1) not in seenlist:\n hbondmap[hbond] = hbond.dist\n seenlist.append((hbond.atom1, hbond.atom2))\n hbondlist = util.sort_dict_by_value(hbondmap)\n hbondlist.reverse()\n for hbond in hbondlist:\n atom = hbond.atom1\n atom2 = hbond.atom2\n obj1 = self.resmap[atom.residue]\n obj2 = self.resmap[atom2.residue]\n # Atoms may no longer exist if already optimized\n if not atom.residue.has_atom(atom.name):\n continue\n if not atom2.residue.has_atom(atom2.name):\n continue\n res = 0\n if atom.hdonor and atom2.hacceptor:\n res = obj1.try_both(atom, atom2, obj2)\n if atom.hacceptor and atom2.hdonor and res == 0:\n obj2.try_both(atom2, atom, obj1)\n # THIRD: All water-water residues\n _LOGGER.debug(\"* Water to Water *\")\n hbondmap = {}\n seenlist = []\n for obj in network:\n for hbond in obj.hbonds:\n residue = hbond.atom1.residue\n if isinstance(residue, aa.WAT):\n if isinstance(hbond.atom2.residue, aa.WAT):\n if (hbond.atom2, hbond.atom1) not in seenlist:\n hbondmap[hbond] = hbond.dist\n seenlist.append((hbond.atom1, hbond.atom2))\n hbondlist = util.sort_dict_by_value(hbondmap)\n hbondlist.reverse()\n for hbond in hbondlist:\n atom = hbond.atom1\n atom2 = hbond.atom2\n obj1 = self.resmap[atom.residue]\n obj2 = self.resmap[atom2.residue]\n res = 0\n if atom.hdonor and atom2.hacceptor:\n res = obj1.try_both(atom, atom2, obj2)\n if atom.hacceptor and atom2.hdonor and res == 0:\n obj2.try_both(atom2, atom, obj1)\n # FOURTH: Complete all residues\n for obj in network:\n obj.complete()\n # STEP 5: Update progress meter\n progress += 100.0 * increment\n while progress >= 5.0:\n progress -= 5.0", "def Hbond_acceptors(self):\n num_acceptors = 0\n for a in self.allAtoms:\n num_acceptors += a.element in ('O', 'N')\n return num_acceptors", "def check(m) :\n #find Connected-component\n lst = find_c(m)\n for e in lst :\n # verify len , 3 is the len of large boat\n if len(e) > 3 :\n return False\n if not is_vert(e) and not is_hori(e):\n return False\n return True", "def all_bees_raised_flag(self):\n pos, com, success = self.perception\n if len(pos) > 0:\n return all(map(lambda x: x[1][\"flag\"] == (self.nr_of_possible_neighbors + 1), com))\n else:\n return True", "def is_organic(fragment):\n # TODO: Consider a different definition?\n # Could allow only H, C, N, O, S, P, F, Cl, Br, I\n for a in fragment.GetAtoms():\n if a.GetAtomicNum() == 6:\n return True\n return False", "def calc_chasa_solv2(mol, atoms, fai, residue_names, numint_loos, numvirt_loos, bbtot, numbb,ext_atoms, solv_list):\n #atoms = mol.atoms\n #fai = mol.residue_first_atom_indices\n #residue_names = mol.residue_names\n minres, maxres = construct.get_res_extents(mol)\n\n use_ext = 0\n ext_coords = None\n use_data = 1\n data = zeros(len(atoms), 'd')\n \n if ext_atoms:\n ext_coords = []\n map(lambda x: map(lambda y: ext_coords.append(y), x), ext_atoms)\n ext_coords = array(ext_coords, 'd')\n use_ext = len(ext_atoms)\n\n flags = construct.make_asa_list(mol)\n probe = 1.4\n ndiv = 3\n ext_radius = 1.4\n tot_asa = asa_evaluate(atoms, data, ext_coords, flags, probe,\n ext_radius, use_data, use_ext, ndiv)\n p_solv_nrg = 0.0\n ap_solv_nrg = 0.0\n Gamma_p = 3.0/5.0\n Gamma_hb_oxy = 0.6\n Gamma_ap = 0.03\n CHASA = 0.0\n for i in xrange(minres,maxres):\n rname = residue_names[i]\n start = fai[i]\n end = fai[i+1]\n occ = 0.0\n for j in range(start, end):\n atom = atoms[j]\n residue_num = int(mol.res_pdb_number[atom.resnum])\n if atom.name == ' N ':\n if solv_list[i][0][2] > 0:\n p_solv_nrg = p_solv_nrg - (Gamma_p *(solv_list[i][0][2]))\n# elif solv_list[i][0][2] < 0:\n# p_solv_nrg = p_solv_nrg + non_hbd_score\n\n elif atom.name == ' O ':\n if solv_list[i][1][2] > 0:\n if solv_list[i][1][3] == 0:\n p_solv_nrg = p_solv_nrg - (Gamma_p *(solv_list[i][1][2]))\n elif solv_list[i][1][3] > 0:\n p_solv_nrg = p_solv_nrg - (Gamma_hb_oxy)\n# elif solv_list[i][1][2] < 0:\n# p_solv_nrg = p_solv_nrg + non_hbd_score\n\n elif 'C' in atom.name:\n ap_solv_nrg = ap_solv_nrg + (Gamma_ap * data[j])\n# CHASA = CHASA + data[j]\n\n tot_solv_nrg = ap_solv_nrg + p_solv_nrg\n# print ap_solv_nrg, p_solv_nrg\n\n return tot_solv_nrg", "def get_excluded_pairs(self, max_exclusion = 3):\n\n excluded_pairs = []\n\n # construct a matrix of size n by n where n is the number of atoms in this fragment\n # a value of 1 in row a and column b means that atom a and b are bonded\n connectivity_matrix = [[0 for k in range(self.get_num_atoms())] for i in range(self.get_num_atoms())]\n\n # loop over each pair of atoms\n for index1, atom1 in enumerate(self.get_atoms()):\n for index2, atom2 in enumerate(self.get_atoms()[index1 + 1:]):\n index2 += index1 + 1\n\n # if these atoms are bonded, set their values in the connectivity matrix to 1.\n if atom1.is_bonded(atom2):\n connectivity_matrix[index1][index2] = 1\n connectivity_matrix[index2][index1] = 1\n\n # current matrix represents connectivity_matrix^x where x is the same as as in the excluded_1x pairs we are currently generating\n current_matrix = connectivity_matrix\n\n excluded_pairs_12 = set()\n\n # loop over each pair of atoms\n for index1, atom1 in enumerate(self.get_atoms()):\n for index2, atom2 in enumerate(self.get_atoms()[index1 + 1:]):\n index2 += index1 + 1\n\n # if the value in the current matrix is at least 1, then these atoms are 1 bond apart, and are added to the excluded_pairs_12 list.\n if current_matrix[index1][index2] > 0:\n excluded_pairs_12.add((index1, index2))\n\n # add the excluded_pairs_12 to the list of all excluded pairs\n excluded_pairs.append(excluded_pairs_12)\n\n for i in range(max_exclusion - 1):\n\n # current matrix is multiplied by connectivity_matrix so that each iteration of the loop current_matrix = connectivity_matrix^(i + 1)\n current_matrix = numpy.matmul(current_matrix, connectivity_matrix)\n\n excluded_pairs_1x = set()\n\n # loop over each pair of atoms\n for index1, atom1 in enumerate(self.get_atoms()):\n for index2, atom2 in enumerate(self.get_atoms()[index1 + 1:]):\n index2 += index1 + 1\n\n # if the value in the connectivity matrix is at least 1, then these atoms are x bonds apart, and are added to the excluded_pairs_1x list.\n if current_matrix[index1][index2] > 0:\n excluded_pairs_1x.add((index1, index2))\n\n # filter out all terms inside other excluded lists from the new excluded list\n for excluded_pairs_1y in excluded_pairs:\n excluded_pairs_1x -= excluded_pairs_1y\n\n # add the excluded_pairs_1x to the list of all excluded pairs\n excluded_pairs.append(excluded_pairs_1x)\n\n return [[list(pair) for pair in excluded_pairs_1x] for excluded_pairs_1x in excluded_pairs]", "def get_map_edge(atom_list):\n edge_map = [atom for atom in atom_list if ((0 < len(identify_bonds(atom, atom_list)) < 3) and (check_connected(atom, identify_bonds(atom, atom_list)) == False))]\n return edge_map", "def check_neighbours(self):\n for p in self.targetCell.possibilities:\n if p != 0:\n if p not in self.targetCell.row_neighbour_possibilities:\n self.targetCell.solve(p)\n return True\n elif p not in self.targetCell.column_neighbour_possibilities:\n self.targetCell.solve(p)\n return True\n elif p not in self.targetCell.box_neighbour_possibilities:\n self.targetCell.solve(p)\n return True\n return False", "def bond_checker(atom, dict, bond_dict):\n bound = []\n for item, values in dict.items():\n bond_range = check_bond_len(bond_dict, atom[0], values[\"element\"]) + 0.2\n if distance_checker(atom[1:], values[\"coor\"]) <= bond_range:\n bound.append(item)\n return bound", "def extract_bonds(self):\n atom_types = self.contents['Sub_ID']\n atom_ids = self.contents['ID']\n bond_list = []\n for key, value in self.bonds.items():\n a = value[0]\n b = value[1]\n\n A = np.asarray(atom_types).reshape(-1, 3)\n B = np.asarray(atom_ids).reshape(-1, 3)\n\n D = np.where(A == a, B, np.nan)\n E = np.where(A == b, B, np.nan)\n\n D = D[:, ~np.all(np.isnan(D), axis=0)]\n E = E[:, ~np.all(np.isnan(E), axis=0)]\n\n D_ = np.tile(D, (1, E.shape[1]))\n E_ = np.repeat(E, D.shape[1], axis=1)\n\n F = np.asarray([D_, E_]).T\n\n idd = np.ones((F.shape[1], F.shape[0])) * key\n # g = np.arange(1, )\n fi = np.arange(F.shape[1])\n iff = np.repeat(fi[:,np.newaxis], 2, axis=1)\n\n concate = np.concatenate((iff[:,:,np.newaxis], idd[:,:,np.newaxis], F.swapaxes(0, 1)), axis=-1)\n concate = concate.reshape(-1, 4)\n df = pd.DataFrame(data=concate, columns=['Mol_ID', 'Bond_type', 'Atom_1', 'Atom_2'])\n bond_list.append(df)\n self.bond_df = pd.concat(bond_list)\n self.num_bonds = len(self.bond_df)", "def assign_electrons(molecule, electrons):\n # Assign electrons based upon unperturbed atoms and ignore impact of\n # fractional nuclear charge.\n nuclei = [int(round(atom.charge)) for atom in molecule]\n total_charge = sum(nuclei) - sum(electrons)\n # Construct a dummy iso-electronic neutral system.\n neutral_molecule = [copy.copy(atom) for atom in molecule]\n if total_charge != 0:\n logging.warning(\n 'Charged system. Using heuristics to set initial electron positions')\n charge = 1 if total_charge > 0 else -1\n while total_charge != 0:\n # Poor proxy for electronegativity.\n atom_index = nuclei.index(max(nuclei) if total_charge < 0 else min(nuclei))\n atom = neutral_molecule[atom_index]\n atom.charge -= charge\n atom.atomic_number = int(round(atom.charge))\n if int(round(atom.charge)) == 0:\n neutral_molecule.pop(atom_index)\n else:\n atom.symbol = elements.ATOMIC_NUMS[atom.atomic_number].symbol\n total_charge -= charge\n nuclei = [int(round(atom.charge)) for atom in neutral_molecule]\n\n spin_pol = lambda electrons: electrons[0] - electrons[1]\n abs_spin_pol = abs(spin_pol(electrons))\n if len(neutral_molecule) == 1:\n elecs_atom = [electrons]\n else:\n elecs_atom = []\n spin_pol_assigned = 0\n for ion in neutral_molecule:\n # Greedily assign up and down electrons based upon the ground state spin\n # configuration of an isolated atom.\n atom_spin_pol = elements.ATOMIC_NUMS[ion.atomic_number].spin_config\n nelec = ion.atomic_number\n na = (nelec + atom_spin_pol) // 2\n nb = nelec - na\n # Attempt to keep spin polarisation as close to 0 as possible.\n if (spin_pol_assigned > 0 and\n spin_pol_assigned + atom_spin_pol > abs_spin_pol):\n elec_atom = [nb, na]\n else:\n elec_atom = [na, nb]\n spin_pol_assigned += spin_pol(elec_atom)\n elecs_atom.append(elec_atom)\n\n electrons_assigned = [sum(e) for e in zip(*elecs_atom)]\n spin_pol_assigned = spin_pol(electrons_assigned)\n if np.sign(spin_pol_assigned) == -np.sign(abs_spin_pol):\n # Started with the wrong guess for spin-up vs spin-down.\n elecs_atom = [e[::-1] for e in elecs_atom]\n spin_pol_assigned = -spin_pol_assigned\n\n if spin_pol_assigned != abs_spin_pol:\n logging.info('Spin polarisation does not match isolated atoms. '\n 'Using heuristics to set initial electron positions.')\n while spin_pol_assigned != abs_spin_pol:\n atom_spin_pols = [abs(spin_pol(e)) for e in elecs_atom]\n atom_index = atom_spin_pols.index(max(atom_spin_pols))\n elec_atom = elecs_atom[atom_index]\n if spin_pol_assigned < abs_spin_pol and elec_atom[0] <= elec_atom[1]:\n elec_atom[0] += 1\n elec_atom[1] -= 1\n spin_pol_assigned += 2\n elif spin_pol_assigned < abs_spin_pol and elec_atom[0] > elec_atom[1]:\n elec_atom[0] -= 1\n elec_atom[1] += 1\n spin_pol_assigned += 2\n elif spin_pol_assigned > abs_spin_pol and elec_atom[0] > elec_atom[1]:\n elec_atom[0] -= 1\n elec_atom[1] += 1\n spin_pol_assigned -= 2\n else:\n elec_atom[0] += 1\n elec_atom[1] -= 1\n spin_pol_assigned -= 2\n\n electrons_assigned = [sum(e) for e in zip(*elecs_atom)]\n if spin_pol(electrons_assigned) == -spin_pol(electrons):\n elecs_atom = [e[::-1] for e in elecs_atom]\n electrons_assigned = electrons_assigned[::-1]\n\n logging.info(\n 'Electrons assigned %s.', ', '.join([\n '{}: {}'.format(atom.symbol, elec_atom)\n for atom, elec_atom in zip(molecule, elecs_atom)\n ]))\n if any(e != e_assign for e, e_assign in zip(electrons, electrons_assigned)):\n raise RuntimeError(\n 'Assigned incorrect number of electrons ([%s instead of %s]' %\n (electrons_assigned, electrons))\n if any(min(ne) < 0 for ne in zip(*elecs_atom)):\n raise RuntimeError('Assigned negative number of electrons!')\n electron_positions = np.concatenate([\n np.tile(atom.coords, e[0])\n for atom, e in zip(neutral_molecule, elecs_atom)\n ] + [\n np.tile(atom.coords, e[1])\n for atom, e in zip(neutral_molecule, elecs_atom)\n ])\n return electron_positions", "def test_library_charges_to_three_ethanols_different_atom_ordering(self):\n # Define a library charge parameter for ethanol (C1-C2-O3) where C1 has charge -0.2, and its Hs have -0.02,\n # C2 has charge -0.1 and its Hs have -0.01, and O3 has charge 0.3, and its H has charge 0.08\n\n ff = ForceField(\n get_data_file_path(\"test_forcefields/test_forcefield.offxml\"),\n xml_ethanol_library_charges_ff,\n )\n\n # ethanol.sdf\n # H5 H8\n # | |\n # H6 - C1 - C2 - O3 - H4\n # | |\n # H7 H9\n #\n # ethanol_reordered.sdf (The middle C and O switch indices)\n # H5 H8\n # | |\n # H6 - C1 - C3 - O2 - H4\n # | |\n # H7 H9\n #\n # create_reversed_ethanol()\n # H5 H2\n # | |\n # H4 - C8 - C7 - O6 - H0\n # | |\n # H3 H1\n\n molecules = [\n Molecule.from_file(get_data_file_path(\"molecules/ethanol.sdf\")),\n Molecule.from_file(get_data_file_path(\"molecules/ethanol_reordered.sdf\")),\n create_reversed_ethanol(),\n ]\n top = Topology.from_molecules(molecules)\n omm_system = ff.create_openmm_system(top)\n nonbondedForce = [\n f for f in omm_system.getForces() if type(f) == NonbondedForce\n ][0]\n expected_charges = [\n -0.2,\n -0.1,\n 0.3,\n 0.08,\n -0.02,\n -0.02,\n -0.02,\n -0.01,\n -0.01,\n -0.2,\n 0.3,\n -0.1,\n 0.08,\n -0.02,\n -0.02,\n -0.02,\n -0.01,\n -0.01,\n 0.08,\n -0.01,\n -0.01,\n -0.02,\n -0.02,\n -0.02,\n 0.3,\n -0.1,\n -0.2,\n ] * openmm_unit.elementary_charge\n for particle_index, expected_charge in enumerate(expected_charges):\n q, _, _ = nonbondedForce.getParticleParameters(particle_index)\n assert q == expected_charge" ]
[ "0.6319188", "0.6093581", "0.6006863", "0.5989993", "0.592804", "0.59155035", "0.58293885", "0.56470066", "0.5599455", "0.5575535", "0.5540133", "0.54949856", "0.5488254", "0.54558873", "0.54485685", "0.5412311", "0.54049903", "0.5360718", "0.5342761", "0.5333482", "0.53124976", "0.52920586", "0.5284127", "0.52641386", "0.5246182", "0.5231596", "0.52188843", "0.52050084", "0.5203844", "0.51922995", "0.5184192", "0.51804006", "0.51780474", "0.51780474", "0.51780474", "0.5174574", "0.5171002", "0.51610637", "0.5159914", "0.514516", "0.5125389", "0.512503", "0.51186484", "0.5110222", "0.50933844", "0.5068456", "0.5034734", "0.5021077", "0.50188875", "0.50061744", "0.500493", "0.5001402", "0.49922276", "0.49891487", "0.4985643", "0.49627987", "0.49570516", "0.49394855", "0.49297166", "0.49296445", "0.4929081", "0.49225584", "0.49149594", "0.4913246", "0.49116772", "0.49076065", "0.48763183", "0.48755634", "0.48730925", "0.48668054", "0.48614997", "0.4859218", "0.48552224", "0.48421854", "0.48309016", "0.4824576", "0.481875", "0.48119137", "0.48102203", "0.47973245", "0.4794734", "0.47864312", "0.4784259", "0.4777448", "0.47757718", "0.4756617", "0.47518831", "0.47511318", "0.4749718", "0.47496215", "0.47489816", "0.47385108", "0.47381872", "0.47378817", "0.472908", "0.47213632", "0.47192743", "0.4718529", "0.47178635", "0.47160584" ]
0.60876876
2
The function is able to identify equivalent atoms in different molecules in different coordinate systems independent of the molecule's orientaion.
def link_atoms_by_distance_diff(distlist1, atomlist1, distlist2, atomlist2, keys): hitlist = [] for atom in distlist1: atomtype = int(atomlist1[distlist1.index(atom)][0][1]) valuelist = [] for partner in distlist2: partnertype = int(atomlist2[distlist2.index(partner)][0][1]) if atomtype == partnertype: partnervalue = 0 keylist = partner.keys() for key in keylist: for element in xrange(len(atom[key])): value = abs(atom[key][element] - partner[key][element]) partnervalue += value else: partnervalue = 9999999 valuelist.append(partnervalue) minvalue = min(valuelist) besthit = valuelist.index(minvalue) hitlist.append(besthit)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def assert_molecules_match_after_remap(self, mol1, mol2):\n for atoms in zip(mol1.atoms, mol2.atoms):\n assert atoms[0].to_dict() == atoms[1].to_dict()\n # bonds will not be in the same order in the molecule and the atom1 and atom2 indecies could be out of\n # order make a dict to compare them both\n remapped_bonds = dict(\n ((bond.atom1_index, bond.atom2_index), bond) for bond in mol2.bonds\n )\n for bond in mol1.bonds:\n key = (bond.atom1_index, bond.atom2_index)\n if key not in remapped_bonds:\n key = tuple(reversed(key))\n assert key in remapped_bonds\n # now compare each attribute of the bond except the atom indexes\n bond_dict = bond.to_dict()\n del bond_dict[\"atom1\"]\n del bond_dict[\"atom2\"]\n remapped_bond_dict = remapped_bonds[key].to_dict()\n del remapped_bond_dict[\"atom1\"]\n del remapped_bond_dict[\"atom2\"]\n assert mol1.n_bonds == mol2.n_bonds\n assert mol1.n_angles == mol2.n_angles\n assert mol1.n_propers == mol2.n_propers\n assert mol1.n_impropers == mol2.n_impropers\n assert mol1.total_charge == mol2.total_charge\n assert mol1.partial_charges.all() == mol2.partial_charges.all()", "def assert_molecules_match_after_remap(mol1, mol2):\n for atoms in zip(mol1.atoms, mol2.atoms):\n assert atoms[0].to_dict() == atoms[1].to_dict()\n # bonds will not be in the same order in the molecule and the atom1 and atom2 indecies could be out of order\n # make a dict to compare them both\n remapped_bonds = dict(\n ((bond.atom1_index, bond.atom2_index), bond) for bond in mol2.bonds\n )\n for bond in mol1.bonds:\n key = (bond.atom1_index, bond.atom2_index)\n if key not in remapped_bonds:\n key = tuple(reversed(key))\n assert key in remapped_bonds\n # now compare each attribute of the bond except the atom indexes\n bond_dict = bond.to_dict()\n del bond_dict[\"atom1\"]\n del bond_dict[\"atom2\"]\n remapped_bond_dict = remapped_bonds[key].to_dict()\n del remapped_bond_dict[\"atom1\"]\n del remapped_bond_dict[\"atom2\"]\n assert mol1.n_bonds == mol2.n_bonds\n assert mol1.n_angles == mol2.n_angles\n assert mol1.n_propers == mol2.n_propers\n assert mol1.n_impropers == mol2.n_impropers\n assert mol1.total_charge == mol2.total_charge\n assert mol1.partial_charges.all() == mol2.partial_charges.all()", "def test_isomorphic_general(self):\n # check that hill formula fails are caught\n ethanol = create_ethanol()\n acetaldehyde = create_acetaldehyde()\n assert ethanol.is_isomorphic_with(acetaldehyde) is False\n assert acetaldehyde.is_isomorphic_with(ethanol) is False\n # check that different orderings work with full matching\n ethanol_reverse = create_reversed_ethanol()\n assert ethanol.is_isomorphic_with(ethanol_reverse) is True\n # check a reference mapping between ethanol and ethanol_reverse matches that calculated\n ref_mapping = {0: 8, 1: 7, 2: 6, 3: 3, 4: 4, 5: 5, 6: 1, 7: 2, 8: 0}\n assert (\n Molecule.are_isomorphic(ethanol, ethanol_reverse, return_atom_map=True)[1]\n == ref_mapping\n )\n # check matching with nx.Graph atomic numbers and connectivity only\n assert (\n Molecule.are_isomorphic(\n ethanol,\n ethanol_reverse.to_networkx(),\n aromatic_matching=False,\n formal_charge_matching=False,\n bond_order_matching=False,\n atom_stereochemistry_matching=False,\n bond_stereochemistry_matching=False,\n )[0]\n is True\n )\n # check matching with nx.Graph with full matching\n assert ethanol.is_isomorphic_with(ethanol_reverse.to_networkx()) is True\n # check matching with a TopologyMolecule class\n from openforcefield.topology.topology import Topology, TopologyMolecule\n\n topology = Topology.from_molecules(ethanol)\n topmol = TopologyMolecule(ethanol, topology)\n assert (\n Molecule.are_isomorphic(\n ethanol,\n topmol,\n aromatic_matching=False,\n formal_charge_matching=False,\n bond_order_matching=False,\n atom_stereochemistry_matching=False,\n bond_stereochemistry_matching=False,\n )[0]\n is True\n )\n # test hill formula passes but isomorphic fails\n mol1 = Molecule.from_smiles(\"Fc1ccc(F)cc1\")\n mol2 = Molecule.from_smiles(\"Fc1ccccc1F\")\n assert mol1.is_isomorphic_with(mol2) is False\n assert mol2.is_isomorphic_with(mol1) is False", "def test_order_atoms(self):\n mol1 = converter.s_bonds_mol_from_xyz(self.xyz10['dict'])\n mol2 = converter.s_bonds_mol_from_xyz(self.xyz10['dict_diff_order1'])\n mol3 = converter.s_bonds_mol_from_xyz(self.xyz10['dict_diff_order2'])\n converter.order_atoms(ref_mol=mol1, mol=mol2)\n for atom1, atom2 in zip(mol1.atoms, mol2.atoms):\n self.assertEqual(atom1.symbol, atom2.symbol)\n converter.order_atoms(ref_mol=mol3, mol=mol1)\n for atom1, atom2 in zip(mol3.atoms, mol1.atoms):\n self.assertEqual(atom1.symbol, atom2.symbol)\n\n ref_mol = Molecule(smiles='[CH](CC[CH]c1ccccc1)c1ccccc1')\n mol_list = ref_mol.copy(deep=True).generate_resonance_structures(keep_isomorphic=False, filter_structures=True)\n for mol in mol_list:\n converter.order_atoms(ref_mol=ref_mol, mol=mol)\n bond_dict = dict()\n for index1, atom1 in enumerate(ref_mol.atoms):\n for atom2 in atom1.edges.keys():\n index2 = ref_mol.atoms.index(atom2)\n if index1 < index2:\n if index1 not in bond_dict:\n bond_dict[index1] = [index2]\n else:\n bond_dict[index1].append(index2)\n for index1, atom1 in enumerate(mol.atoms):\n for atom2 in atom1.edges.keys():\n index2 = mol.atoms.index(atom2)\n if index1 < index2:\n self.assertIn(index2, bond_dict[index1]) # check that these atoms are connected in all mols", "def test_isomorphic_general(self):\n # check that hill formula fails are caught\n ethanol = create_ethanol()\n acetaldehyde = create_acetaldehyde()\n assert ethanol.is_isomorphic_with(acetaldehyde) is False\n assert acetaldehyde.is_isomorphic_with(ethanol) is False\n # check that different orderings work with full matching\n ethanol_reverse = create_reversed_ethanol()\n assert ethanol.is_isomorphic_with(ethanol_reverse) is True\n # check a reference mapping between ethanol and ethanol_reverse matches that calculated\n ref_mapping = {0: 8, 1: 7, 2: 6, 3: 3, 4: 4, 5: 5, 6: 1, 7: 2, 8: 0}\n assert (\n Molecule.are_isomorphic(ethanol, ethanol_reverse, return_atom_map=True)[1]\n == ref_mapping\n )\n # check matching with nx.Graph atomic numbers and connectivity only\n assert (\n Molecule.are_isomorphic(\n ethanol,\n ethanol_reverse.to_networkx(),\n aromatic_matching=False,\n formal_charge_matching=False,\n bond_order_matching=False,\n atom_stereochemistry_matching=False,\n bond_stereochemistry_matching=False,\n )[0]\n is True\n )\n # check matching with nx.Graph with full matching\n assert ethanol.is_isomorphic_with(ethanol_reverse.to_networkx()) is True\n\n from openff.toolkit.topology.topology import Topology\n\n topology = Topology.from_molecules(ethanol)\n assert (\n Molecule.are_isomorphic(\n ethanol,\n [*topology.molecules][0],\n aromatic_matching=False,\n formal_charge_matching=False,\n bond_order_matching=False,\n atom_stereochemistry_matching=False,\n bond_stereochemistry_matching=False,\n )[0]\n is True\n )\n # test hill formula passes but isomorphic fails\n mol1 = Molecule.from_smiles(\"Fc1ccc(F)cc1\")\n mol2 = Molecule.from_smiles(\"Fc1ccccc1F\")\n assert mol1.is_isomorphic_with(mol2) is False\n assert mol2.is_isomorphic_with(mol1) is False", "def test_add_lone_pairs_by_atom_valance(self):\n adj1 = \"\"\"multiplicity 3\n1 N u2 p1 c0 {2,S}\n2 S u0 p2 c0 {1,S} {3,S}\n3 H u0 p0 c0 {2,S}\"\"\"\n mol1 = Molecule().from_adjacency_list(adjlist=adj1)\n converter.add_lone_pairs_by_atom_valance(mol1)\n self.assertEqual(mol1.to_smiles(), '[N]S')\n mol1.multiplicity = 1\n converter.add_lone_pairs_by_atom_valance(mol1)\n self.assertEqual(mol1.to_smiles(), 'N#S')\n\n adj2 = \"\"\"multiplicity 3\n1 N u2 p1 c0 {2,S}\n2 N u0 p1 c0 {1,S} {3,S} {4,S}\n3 H u0 p0 c0 {2,S}\n4 H u0 p0 c0 {2,S}\"\"\"\n mol2 = Molecule().from_adjacency_list(adjlist=adj2)\n converter.add_lone_pairs_by_atom_valance(mol2)\n self.assertEqual(mol2.to_smiles(), '[N]N')\n mol2.multiplicity = 1\n converter.add_lone_pairs_by_atom_valance(mol2)\n self.assertEqual(mol2.to_smiles(), '[N-]=[NH2+]')\n\n adj3 = \"\"\"multiplicity 3\n1 C u0 p0 c0 {2,S} {5,S} {6,S} {7,S}\n2 C u0 p0 c0 {1,S} {3,S} {8,S} {9,S}\n3 C u2 p0 c0 {2,S} {4,S}\n4 H u0 p0 c0 {3,S}\n5 H u0 p0 c0 {1,S}\n6 H u0 p0 c0 {1,S}\n7 H u0 p0 c0 {1,S}\n8 H u0 p0 c0 {2,S}\n9 H u0 p0 c0 {2,S}\"\"\"\n mol3 = Molecule().from_adjacency_list(adjlist=adj3)\n converter.add_lone_pairs_by_atom_valance(mol3)\n self.assertEqual(mol3.to_smiles(), '[CH]CC')\n mol3.multiplicity = 1\n converter.add_lone_pairs_by_atom_valance(mol3)\n self.assertEqual(mol3.to_adjacency_list(), \"\"\"1 C u0 p0 c0 {2,S} {3,S} {4,S} {5,S}\n2 C u0 p0 c0 {1,S} {6,S} {7,S} {8,S}\n3 C u0 p1 c0 {1,S} {9,S}\n4 H u0 p0 c0 {1,S}\n5 H u0 p0 c0 {1,S}\n6 H u0 p0 c0 {2,S}\n7 H u0 p0 c0 {2,S}\n8 H u0 p0 c0 {2,S}\n9 H u0 p0 c0 {3,S}\n\"\"\")\n\n adj4 = \"\"\"multiplicity 3\n1 C u0 p0 c0 {2,S} {4,S} {5,S} {6,S}\n2 C u0 p0 c0 {1,S} {3,S} {7,S} {8,S}\n3 N u2 p1 c0 {2,S}\n4 H u0 p0 c0 {1,S}\n5 H u0 p0 c0 {1,S}\n6 H u0 p0 c0 {1,S}\n7 H u0 p0 c0 {2,S}\n8 H u0 p0 c0 {2,S}\"\"\"\n mol4 = Molecule().from_adjacency_list(adjlist=adj4)\n converter.add_lone_pairs_by_atom_valance(mol4)\n self.assertEqual(mol4.to_smiles(), 'CC[N]')\n mol4.multiplicity = 1\n converter.add_lone_pairs_by_atom_valance(mol4)\n self.assertEqual(mol4.to_adjacency_list(), \"\"\"1 N u0 p2 c0 {3,S}\n2 C u0 p0 c0 {3,S} {4,S} {5,S} {6,S}\n3 C u0 p0 c0 {1,S} {2,S} {7,S} {8,S}\n4 H u0 p0 c0 {2,S}\n5 H u0 p0 c0 {2,S}\n6 H u0 p0 c0 {2,S}\n7 H u0 p0 c0 {3,S}\n8 H u0 p0 c0 {3,S}\n\"\"\")", "def makeResidueAtomSets(residue, aromaticsEquivalent=True):\n \n getResidueMapping(residue)\n \n equivalent = {}\n elementSymbolDict = {}\n nonequivalent = {}\n multiSet = {}\n chemAtomSetDict = {}\n inMultiSet = {}\n molType = residue.molResidue.molType\n \n for atom in residue.atoms: \n chemAtom = atom.chemAtom\n chemAtomSetDict[atom] = chemAtom\n elementSymbol = chemAtom.elementSymbol\n chemAtomSet = chemAtom.chemAtomSet\n\n if chemAtomSet is None:\n name = chemAtom.name\n makeAtomSet(name,(atom,),None,'simple')\n \n else:\n name = chemAtomSet.name\n elementSymbolDict[name] = elementSymbol\n chemAtomSetDict[name] = chemAtomSet\n if chemAtomSet.isEquivalent:\n if equivalent.get(name) is None:\n equivalent[name] = []\n equivalent[name].append(atom)\n \n elif (chemAtomSet.isEquivalent is None) and atom.atomSet and (len(atom.atomSet.atoms) > 1):\n # aromatic rotation prev set\n if equivalent.get(name) is None:\n equivalent[name] = []\n equivalent[name].append(atom)\n \n elif (chemAtomSet.isEquivalent is None) and (not atom.atomSet) and aromaticsEquivalent:\n # aromatic rotation to be set\n if equivalent.get(name) is None:\n equivalent[name] = []\n equivalent[name].append(atom)\n \n else:\n if nonequivalent.get(name) is None:\n nonequivalent[name] = []\n nonequivalent[name].append(atom)\n \n if chemAtomSet.chemAtomSet is not None:\n multiName = chemAtomSet.chemAtomSet.name\n chemAtomSetDict[multiName] = chemAtomSet.chemAtomSet\n elementSymbolDict[multiName] = elementSymbol\n if multiSet.get(multiName) is None:\n multiSet[multiName] = {}\n multiSet[multiName][name] = 1\n inMultiSet[name] = multiName\n\n for groupName in equivalent.keys():\n atoms = equivalent[groupName]\n elementSymbol = elementSymbolDict[groupName]\n chemAtomSet = chemAtomSetDict[groupName]\n if len(atoms)==2:\n # not enough atoms for multi sets!\n makeAtomSet(groupName,atoms,chemAtomSet,'simple')\n else:\n if inMultiSet.get(groupName):\n # e.g. for Val Hg1*\n makeAtomSet(groupName,atoms,chemAtomSet,'stereo')\n \n else:\n makeAtomSet(groupName,atoms,chemAtomSet,'simple')\n\n for groupName in nonequivalent.keys():\n atoms = nonequivalent[groupName]\n elementSymbol = elementSymbolDict[groupName]\n chemAtomSet = chemAtomSetDict[groupName]\n atomSetNames = []\n \n if len(atoms) == 1:\n atom = atoms[0]\n # not enough atoms for prochiral. Corrupt ChemComp\n makeAtomSet(atom.name, atoms, None, 'simple')\n continue\n \n for atom in atoms:\n name = chemAtomSetDict[atom].name\n makeAtomSet(name,(atom,),chemAtomSet,'stereo')\n atomSetNames.append(name)\n\n for n, atom in enumerate(atoms):\n \n #name = chemAtomSetDict[atom].name\n #name2 = makeNonStereoName(molType, name, n)\n # Shouldn't have to do this if non-equiv groups have paired names\n \n name2 = makeNonStereoName(molType, '%s%d' % (chemAtomSet.name[:-1], n), n)\n \n makeGuiMultiAtomSet(residue, name2, atomSetNames,\n elementSymbol,'nonstereo',chemAtomSet)\n\n makeGuiMultiAtomSet(residue, groupName, atomSetNames,\n elementSymbol,'ambiguous',chemAtomSet)\n\n for groupName in multiSet.keys():\n atomSetNames = multiSet[groupName].keys()\n elementSymbol = elementSymbolDict[groupName]\n chemAtomSet = chemAtomSetDict[groupName]\n if \"|\" in groupName:\n # we don't do these pseudoatoms in Analysis\n continue\n\n # e.g. for Val Hga*\n for n, atomSetName in enumerate(atomSetNames):\n name2 = makeNonStereoName(molType, atomSetName, n)\n makeGuiMultiAtomSet(residue, name2, atomSetNames,\n elementSymbol,'nonstereo',chemAtomSet)\n \n makeGuiMultiAtomSet(residue, groupName, atomSetNames,\n elementSymbol,'ambiguous',chemAtomSet)", "def test_canonical_ordering_openeye(self):\n from openforcefield.utils.toolkits import OpenEyeToolkitWrapper\n\n openeye = OpenEyeToolkitWrapper()\n # get ethanol in canonical order\n ethanol = create_ethanol()\n # get reversed non canonical ethanol\n reversed_ethanol = create_reversed_ethanol()\n # get the canonical ordering\n canonical_ethanol = reversed_ethanol.canonical_order_atoms(openeye)\n # make sure the mapping between the ethanol and the openeye ref canonical form is the same\n assert (\n True,\n {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8},\n ) == Molecule.are_isomorphic(canonical_ethanol, ethanol, True)", "def test_canonical_ordering_openeye(self):\n from openff.toolkit.utils.toolkits import OpenEyeToolkitWrapper\n\n openeye = OpenEyeToolkitWrapper()\n # get ethanol in canonical order\n ethanol = create_ethanol()\n # get reversed non canonical ethanol\n reversed_ethanol = create_reversed_ethanol()\n # get the canonical ordering\n canonical_ethanol = reversed_ethanol.canonical_order_atoms(openeye)\n # make sure the mapping between the ethanol and the openeye ref canonical form is the same\n assert (\n True,\n {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8},\n ) == Molecule.are_isomorphic(canonical_ethanol, ethanol, True)", "def test_check_isomorphism(self):\n mol1 = Molecule(smiles='[O-][N+]#N')\n mol2 = Molecule(smiles='[N-]=[N+]=O')\n self.assertTrue(converter.check_isomorphism(mol1, mol2))", "def test_molecules_from_xyz(self):\n s_mol, b_mol = converter.molecules_from_xyz(self.xyz6['dict'])\n\n # check that the atom order is the same\n self.assertTrue(s_mol.atoms[0].is_sulfur())\n self.assertTrue(b_mol.atoms[0].is_sulfur())\n self.assertTrue(s_mol.atoms[1].is_oxygen())\n self.assertTrue(b_mol.atoms[1].is_oxygen())\n self.assertTrue(s_mol.atoms[2].is_oxygen())\n self.assertTrue(b_mol.atoms[2].is_oxygen())\n self.assertTrue(s_mol.atoms[3].is_nitrogen())\n self.assertTrue(b_mol.atoms[3].is_nitrogen())\n self.assertTrue(s_mol.atoms[4].is_carbon())\n self.assertTrue(b_mol.atoms[4].is_carbon())\n self.assertTrue(s_mol.atoms[5].is_hydrogen())\n self.assertTrue(b_mol.atoms[5].is_hydrogen())\n self.assertTrue(s_mol.atoms[6].is_hydrogen())\n self.assertTrue(b_mol.atoms[6].is_hydrogen())\n self.assertTrue(s_mol.atoms[7].is_hydrogen())\n self.assertTrue(b_mol.atoms[7].is_hydrogen())\n self.assertTrue(s_mol.atoms[8].is_hydrogen())\n self.assertTrue(b_mol.atoms[8].is_hydrogen())\n self.assertTrue(s_mol.atoms[9].is_hydrogen())\n self.assertTrue(b_mol.atoms[9].is_hydrogen())\n\n s_mol, b_mol = converter.molecules_from_xyz(self.xyz7['dict'])\n self.assertTrue(s_mol.atoms[0].is_oxygen())\n self.assertTrue(b_mol.atoms[0].is_oxygen())\n self.assertTrue(s_mol.atoms[2].is_carbon())\n self.assertTrue(b_mol.atoms[2].is_carbon())\n\n expected_bonded_adjlist = \"\"\"multiplicity 2\n1 O u0 p2 c0 {6,S} {10,S}\n2 O u0 p2 c0 {3,S} {28,S}\n3 C u0 p0 c0 {2,S} {8,S} {14,S} {15,S}\n4 C u0 p0 c0 {7,S} {16,S} {17,S} {18,S}\n5 C u0 p0 c0 {7,S} {19,S} {20,S} {21,S}\n6 C u0 p0 c0 {1,S} {22,S} {23,S} {24,S}\n7 C u1 p0 c0 {4,S} {5,S} {9,S}\n8 C u0 p0 c0 {3,S} {10,D} {11,S}\n9 C u0 p0 c0 {7,S} {11,D} {12,S}\n10 C u0 p0 c0 {1,S} {8,D} {13,S}\n11 C u0 p0 c0 {8,S} {9,D} {25,S}\n12 C u0 p0 c0 {9,S} {13,D} {26,S}\n13 C u0 p0 c0 {10,S} {12,D} {27,S}\n14 H u0 p0 c0 {3,S}\n15 H u0 p0 c0 {3,S}\n16 H u0 p0 c0 {4,S}\n17 H u0 p0 c0 {4,S}\n18 H u0 p0 c0 {4,S}\n19 H u0 p0 c0 {5,S}\n20 H u0 p0 c0 {5,S}\n21 H u0 p0 c0 {5,S}\n22 H u0 p0 c0 {6,S}\n23 H u0 p0 c0 {6,S}\n24 H u0 p0 c0 {6,S}\n25 H u0 p0 c0 {11,S}\n26 H u0 p0 c0 {12,S}\n27 H u0 p0 c0 {13,S}\n28 H u0 p0 c0 {2,S}\n\"\"\"\n expected_mol = Molecule().from_adjacency_list(expected_bonded_adjlist)\n self.assertEqual(b_mol.to_adjacency_list(), expected_bonded_adjlist)\n # the is_isomorphic test must come after the adjlist test since it changes the atom order\n self.assertTrue(b_mol.is_isomorphic(expected_mol))\n\n s_mol, b_mol = converter.molecules_from_xyz(self.xyz10['dict'], multiplicity=1, charge=0)\n self.assertIsNotNone(s_mol)\n self.assertIsNotNone(b_mol)\n for atom1, atom2 in zip(s_mol.atoms, b_mol.atoms):\n self.assertEqual(atom1.symbol, atom2.symbol)\n for atom1, symbol in zip(s_mol.atoms, self.xyz10['dict']['symbols']):\n self.assertEqual(atom1.symbol, symbol)\n\n s_mol, b_mol = converter.molecules_from_xyz(self.xyz_dict_13, multiplicity=1, charge=0)\n for atom1, atom2 in zip(s_mol.atoms, b_mol.atoms):\n self.assertEqual(atom1.symbol, atom2.symbol)\n for atom1, symbol in zip(s_mol.atoms, self.xyz_dict_13['symbols']):\n self.assertEqual(atom1.symbol, symbol)\n self.assertEqual(s_mol.multiplicity, 1)\n self.assertEqual(b_mol.multiplicity, 1)\n self.assertFalse(any(atom.radical_electrons for atom in b_mol.atoms))", "def is_valid_single_attempt(self, atoms_init, atoms_final):\n from scipy.spatial import cKDTree as KDTree\n from random import shuffle\n atoms1 = atoms_init.copy()\n atoms2 = atoms_final.copy()\n\n vol1 = atoms1.get_volume()\n vol2 = atoms2.get_volume()\n if vol2 > vol1:\n ratio = (vol2/vol1)**(1.0/3.0)\n cell1 = atoms1.get_cell()\n atoms1.set_cell(cell1*ratio, scale_atoms=True)\n else:\n ratio = (vol1/vol2)**(1.0/3.0)\n cell2 = atoms2.get_cell()\n atoms2.set_cell(cell2*ratio, scale_atoms=True)\n\n # Try construct the relation\n used_indices = []\n tree = KDTree(atoms2.get_positions())\n indices = list(range(0, len(atoms1)))\n shuffle(indices)\n for atom in atoms1:\n if atom.symbol in self.exclude:\n continue\n dist, closest = tree.query(atom.position, k=12)\n srt_indx = np.argsort(dist)\n dist = [dist[indx] for indx in srt_indx]\n closest = [closest[indx] for indx in srt_indx]\n\n if all(c in used_indices for c in closest):\n # More than one atom is closest to this\n # structure\n self.rejected_reason = \"More than one atom mapped onto the \"\n self.rejected_reason += \"same atoms in the initial structure\"\n return False\n\n # First, unused with mathing symbol\n closest_indx = None\n closest_dist = None\n for i, indx in enumerate(closest):\n if atoms2[indx].symbol == atom.symbol and indx not in used_indices:\n closest_indx = indx\n closest_dist = dist[i]\n break\n\n if closest_indx is None:\n self.rejected_reason = \"No unused atoms with macthing symbol!\"\n return False\n \n used_indices.append(closest_indx)\n if closest_dist > self.max_displacement:\n # The displacement is larger than the tolereance\n self.rejected_reason = \"Max displacement too large\"\n return False\n \n if atom.symbol != atoms2[closest_indx].symbol:\n self.rejected_reason = \"Mapped symbol does not match!\"\n return False\n return True", "def test_is_isomorphic(self):\n mol1 = converter.s_bonds_mol_from_xyz(self.xyz1['dict'])\n mol2 = converter.s_bonds_mol_from_xyz(self.xyz1['dict_diff_order'])\n self.assertTrue(mol1.is_isomorphic(mol2, save_order=True, strict=False))\n\n mol1 = converter.s_bonds_mol_from_xyz(self.xyz11['dict'])\n mol2 = converter.s_bonds_mol_from_xyz(self.xyz11['dict_diff_order'])\n self.assertTrue(mol1.is_isomorphic(mol2, save_order=True, strict=False))\n\n mol1 = converter.s_bonds_mol_from_xyz(self.xyz10['dict'])\n mol2 = converter.s_bonds_mol_from_xyz(self.xyz10['dict_diff_order1'])\n mol3 = converter.s_bonds_mol_from_xyz(self.xyz10['dict_diff_order2'])\n self.assertTrue(mol1.is_isomorphic(mol2, save_order=True, strict=False))\n self.assertTrue(mol1.is_isomorphic(mol3, save_order=True, strict=False))", "def commutation_equivalence(self, position_list=[]):\n\t\treturn self.__class__(_replace_match_at(self, position_list, [\n\t\t\t[ (A, disj, B), (B, disj, A) ],\n\t\t\t[ (A, conj, B), (B, conj, A) ],\n\t\t\t[ (A, iff, B), (B, iff, A) ]\n\t\t]))", "def pseudopotentialise_molecule(self, sysargs=None, execute_deletion=True):\n\n # Find atoms to replace\n deletion_list = []\n if len(sysargs) > 2:\n if 'del' in sysargs:\n deletion_list = self.parse_coord_list(sysargs[4])\n replacement_list = self.parse_coord_list(sysargs[2])\n atoms_to_potentialise = list(item for item in self.coord_list if item[\"#\"] in replacement_list)\n else:\n atoms_to_potentialise = (item for item in self.coord_list if item[\"el\"] == 'c')\n deletion_list = (item for item in self.coord_list if item[\"el\"] == 'h')\n print('Pseudo-potentialising carbon atoms %s ...' % [atom['#'] for atom in atoms_to_potentialise])\n\n potential_coords_list = []\n\n for atom in atoms_to_potentialise:\n distanced_atom_list = self.order_atoms_by_distance_from(atom['#'])\n distanced_carbon_list = self.order_atoms_by_distance_from(atom['#'], element='c')\n\n if len(distanced_carbon_list) == 1:\n primary_vector = None\n for non_c_atom in distanced_atom_list[1:4]:\n if non_c_atom['el'] != 'h':\n primary_vector = self.vectorise_atom(non_c_atom['#']) - self.vectorise_atom(atom['#'])\n if primary_vector is None:\n primary_vector = self.vectorise_atom(distanced_atom_list[1]['#']) - self.vectorise_atom(atom['#'])\n else:\n primary_vector = self.vectorise_atom(distanced_carbon_list[1]['#']) - self.vectorise_atom(atom['#'])\n\n normal_vector = numpy.cross(\n self.vectorise_atom(distanced_atom_list[1]['#']) - self.vectorise_atom(atom['#']),\n self.vectorise_atom(distanced_atom_list[2]['#']) - self.vectorise_atom(atom['#'])\n )\n\n primary_potential_vector = self.lengtherise_vector(primary_vector, self.atom_potential_set_distance)\n potential_set_split_vector = self.lengtherise_vector(normal_vector, self.potential_set_split_distance)\n\n relative_potential_vectors = [\n primary_potential_vector + potential_set_split_vector,\n primary_potential_vector - potential_set_split_vector\n ]\n\n for potential_set in range(self.no_potential_sets_per_atom-1):\n\n pps_positive = numpy.dot(self.construct_euler_rodriguez_matrix(\n normal_vector,\n 2*numpy.pi/self.no_potential_sets_per_atom),\n relative_potential_vectors[-2],\n )\n pps_negative = numpy.dot(self.construct_euler_rodriguez_matrix(\n normal_vector,\n 2*numpy.pi/self.no_potential_sets_per_atom),\n relative_potential_vectors[-1]\n )\n\n relative_potential_vectors.append(pps_positive)\n relative_potential_vectors.append(pps_negative)\n\n if self.add_primary_vector_potentials_as_coords is False:\n del relative_potential_vectors[0]\n del relative_potential_vectors[0]\n\n # potential coords are still relative to their atom, now make them real.\n for vector in relative_potential_vectors:\n potential_coords_list.append(\n {'#': 0, 'el': self.sp2_pseudo_element, 'x': vector[0]+atom['x'], 'y': vector[1]+atom['y'], 'z': vector[2]+atom['z']},\n )\n\n # Now add potentials to coord list, after removing the 'real' hydrogen atoms.\n if execute_deletion is True:\n self.delete_specified_atoms(deletion_list)\n for potential_coord in potential_coords_list:\n self.write_coord(potential_coord, overwrite=False)", "def test_remap(self):\n # the order here is CCO\n ethanol = create_ethanol()\n # get ethanol in reverse order OCC\n ethanol_reverse = create_reversed_ethanol()\n # get the mapping between the molecules\n mapping = Molecule.are_isomorphic(ethanol, ethanol_reverse, True)[1]\n ethanol.add_bond_charge_virtual_site([0, 1], 0.3 * unit.angstrom)\n # make sure that molecules with virtual sites raises an error\n with pytest.raises(NotImplementedError):\n remapped = ethanol.remap(mapping, current_to_new=True)\n\n # remake with no virtual site and remap to match the reversed ordering\n ethanol = create_ethanol()\n\n new_ethanol = ethanol.remap(mapping, current_to_new=True)\n\n def assert_molecules_match_after_remap(mol1, mol2):\n \"\"\"Check all of the attributes in a molecule match after being remapped\"\"\"\n for atoms in zip(mol1.atoms, mol2.atoms):\n assert atoms[0].to_dict() == atoms[1].to_dict()\n # bonds will not be in the same order in the molecule and the atom1 and atom2 indecies could be out of order\n # make a dict to compare them both\n remapped_bonds = dict(\n ((bond.atom1_index, bond.atom2_index), bond) for bond in mol2.bonds\n )\n for bond in mol1.bonds:\n key = (bond.atom1_index, bond.atom2_index)\n if key not in remapped_bonds:\n key = tuple(reversed(key))\n assert key in remapped_bonds\n # now compare each attribute of the bond except the atom indexes\n bond_dict = bond.to_dict()\n del bond_dict[\"atom1\"]\n del bond_dict[\"atom2\"]\n remapped_bond_dict = remapped_bonds[key].to_dict()\n del remapped_bond_dict[\"atom1\"]\n del remapped_bond_dict[\"atom2\"]\n assert mol1.n_bonds == mol2.n_bonds\n assert mol1.n_angles == mol2.n_angles\n assert mol1.n_propers == mol2.n_propers\n assert mol1.n_impropers == mol2.n_impropers\n assert mol1.total_charge == mol2.total_charge\n assert mol1.partial_charges.all() == mol2.partial_charges.all()\n\n # check all of the properties match as well, torsions and impropers will be in a different order\n # due to the bonds being out of order\n assert_molecules_match_after_remap(new_ethanol, ethanol_reverse)\n\n # test round trip (double remapping a molecule)\n new_ethanol = ethanol.remap(mapping, current_to_new=True)\n isomorphic, round_trip_mapping = Molecule.are_isomorphic(\n new_ethanol, ethanol, return_atom_map=True\n )\n assert isomorphic is True\n round_trip_ethanol = new_ethanol.remap(round_trip_mapping, current_to_new=True)\n assert_molecules_match_after_remap(round_trip_ethanol, ethanol)", "def calculate_dihedral_atom_equivalences(mol1, mol2):\n\n # Check that the mols are identical-ish\n if mol1.GetNumHeavyAtoms() != mol2.GetNumHeavyAtoms():\n raise EqualityError('Molecules are not identical (Num Atoms) {!s} != {!s}.\\n{!s}\\n{!s}'.format(mol1.GetNumHeavyAtoms(),mol2.GetNumHeavyAtoms(),Chem.MolToSmiles(mol1),Chem.MolToSmiles(mol2)))\n if mol1.GetNumBonds() != mol2.GetNumBonds():\n raise EqualityError('Molecules are not identical (Num Bonds) {!s} != {!s}:\\n{!s}\\n{!s}'.format(mol1.GetNumBonds(),mol2.GetNumBonds(),Chem.MolToSmiles(mol1), Chem.MolToSmiles(mol2)))\n\n # Gets a list of lists of atoms in mol1 (12,16,3, ...) that match the atoms in mol2 (1,2,3, ...)\n match_patterns = mol1.GetSubstructMatches(mol2, uniquify=False)\n # Get the quadruplets to calculate the dihedrals from for mol1\n mol1_atom_sets = identify_rotatable_bond_atom_pairs(mol1)\n num_atms = mol1.GetNumHeavyAtoms()\n # List for returning\n paired_atom_sets = []\n # Iterate through the different ways of overlaying the molecule (ensures we get the minimum rmsd)\n for match_pattern in match_patterns:\n # Translate from the atoms in mol1 to the atoms in mol2 (for this match_pattern)\n trans_dict = dict(zip(match_pattern, range(0,num_atms)))\n # Translate the atoms in mol1 to the atoms in mol2\n mol2_atom_sets = [ tuple([trans_dict[atm] for atm in bond_set]) for bond_set in mol1_atom_sets]\n # Add to list\n paired_atom_sets.append((mol1_atom_sets, mol2_atom_sets))\n # Check that the atom types are identical (test)\n mol1_atom_types = [ tuple([mol1.GetAtomWithIdx(atm).GetAtomicNum() for atm in bond_set]) for bond_set in mol1_atom_sets]\n mol2_atom_types = [ tuple([mol2.GetAtomWithIdx(atm).GetAtomicNum() for atm in bond_set]) for bond_set in mol2_atom_sets]\n assert mol1_atom_types == mol2_atom_types, \"ATOM TYPES ARE NOT THE SAME ON THE DIHEDRAL ANGLE TO BE CALCULATED - THERE'S BEEN A MATCHING ERROR\"\n # Return the list of lists of paired atoms between the structures\n return paired_atom_sets", "def get_atom_connectivity(self):\n m, connectivity = self.owner, []\n for index, i in enumerate(self.rix):\n for j in self.rix[index + 1:]:\n a1 = m.rings[i].aix\n a2 = m.rings[j].aix\n if set(a1).intersection(a2):\n connectivity.append((i, j))\n return tuple(connectivity)", "def test_chemical_environment_matches_OE(self):\n # TODO: Move this to test_toolkits, test all available toolkits\n # Create chiral molecule\n toolkit_wrapper = OpenEyeToolkitWrapper()\n molecule = Molecule()\n atom_C = molecule.add_atom(6, 0, False, stereochemistry=\"R\", name=\"C\")\n atom_H = molecule.add_atom(1, 0, False, name=\"H\")\n atom_Cl = molecule.add_atom(17, 0, False, name=\"Cl\")\n atom_Br = molecule.add_atom(35, 0, False, name=\"Br\")\n atom_F = molecule.add_atom(9, 0, False, name=\"F\")\n molecule.add_bond(atom_C, atom_H, 1, False)\n molecule.add_bond(atom_C, atom_Cl, 1, False)\n molecule.add_bond(atom_C, atom_Br, 1, False)\n molecule.add_bond(atom_C, atom_F, 1, False)\n # Test known cases\n matches = molecule.chemical_environment_matches(\n \"[#6:1]\", toolkit_registry=toolkit_wrapper\n )\n assert (\n len(matches) == 1\n ) # there should be a unique match, so one atom tuple is returned\n assert len(matches[0]) == 1 # it should have one tagged atom\n assert set(matches[0]) == set([atom_C])\n matches = molecule.chemical_environment_matches(\n \"[#6:1]~[#1:2]\", toolkit_registry=toolkit_wrapper\n )\n assert (\n len(matches) == 1\n ) # there should be a unique match, so one atom tuple is returned\n assert len(matches[0]) == 2 # it should have two tagged atoms\n assert set(matches[0]) == set([atom_C, atom_H])\n matches = molecule.chemical_environment_matches(\n \"[Cl:1]-[C:2]-[H:3]\", toolkit_registry=toolkit_wrapper\n )\n assert (\n len(matches) == 1\n ) # there should be a unique match, so one atom tuple is returned\n assert len(matches[0]) == 3 # it should have three tagged atoms\n assert set(matches[0]) == set([atom_Cl, atom_C, atom_H])\n matches = molecule.chemical_environment_matches(\n \"[#6:1]~[*:2]\", toolkit_registry=toolkit_wrapper\n )\n assert len(matches) == 4 # there should be four matches\n for match in matches:\n assert len(match) == 2 # each match should have two tagged atoms\n # Test searching for stereo-specific SMARTS\n matches = molecule.chemical_environment_matches(\n \"[#6@:1](-[F:2])(-[Cl])(-[Br])(-[H])\", toolkit_registry=toolkit_wrapper\n )\n assert len(matches) == 1 # there should be one match\n for match in matches:\n assert len(match) == 2 # each match should have two tagged atoms\n matches = molecule.chemical_environment_matches(\n \"[#6@@:1](-[F:2])(-[Cl])(-[Br])(-[H])\", toolkit_registry=toolkit_wrapper\n )\n assert (\n len(matches) == 0\n ) # this is the wrong stereochemistry, so there shouldn't be any matches", "def are_clone_sequences(atoms1, atoms2):\n\n for a1, a2 in it.zip_longest(atoms1, atoms2):\n assert a1 is not a2\n assert a1.get_id() == a2.get_id()\n assert a1.get_charge() == a2.get_charge()\n assert a1.__class__ is a2.__class__", "def test_chemical_environment_matches_OE(self):\n # TODO: Move this to test_toolkits, test all available toolkits\n # Create chiral molecule\n from simtk.openmm.app import element\n\n toolkit_wrapper = OpenEyeToolkitWrapper()\n molecule = Molecule()\n atom_C = molecule.add_atom(\n element.carbon.atomic_number, 0, False, stereochemistry=\"R\", name=\"C\"\n )\n atom_H = molecule.add_atom(element.hydrogen.atomic_number, 0, False, name=\"H\")\n atom_Cl = molecule.add_atom(element.chlorine.atomic_number, 0, False, name=\"Cl\")\n atom_Br = molecule.add_atom(element.bromine.atomic_number, 0, False, name=\"Br\")\n atom_F = molecule.add_atom(element.fluorine.atomic_number, 0, False, name=\"F\")\n molecule.add_bond(atom_C, atom_H, 1, False)\n molecule.add_bond(atom_C, atom_Cl, 1, False)\n molecule.add_bond(atom_C, atom_Br, 1, False)\n molecule.add_bond(atom_C, atom_F, 1, False)\n # Test known cases\n matches = molecule.chemical_environment_matches(\n \"[#6:1]\", toolkit_registry=toolkit_wrapper\n )\n assert (\n len(matches) == 1\n ) # there should be a unique match, so one atom tuple is returned\n assert len(matches[0]) == 1 # it should have one tagged atom\n assert set(matches[0]) == set([atom_C])\n matches = molecule.chemical_environment_matches(\n \"[#6:1]~[#1:2]\", toolkit_registry=toolkit_wrapper\n )\n assert (\n len(matches) == 1\n ) # there should be a unique match, so one atom tuple is returned\n assert len(matches[0]) == 2 # it should have two tagged atoms\n assert set(matches[0]) == set([atom_C, atom_H])\n matches = molecule.chemical_environment_matches(\n \"[Cl:1]-[C:2]-[H:3]\", toolkit_registry=toolkit_wrapper\n )\n assert (\n len(matches) == 1\n ) # there should be a unique match, so one atom tuple is returned\n assert len(matches[0]) == 3 # it should have three tagged atoms\n assert set(matches[0]) == set([atom_Cl, atom_C, atom_H])\n matches = molecule.chemical_environment_matches(\n \"[#6:1]~[*:2]\", toolkit_registry=toolkit_wrapper\n )\n assert len(matches) == 4 # there should be four matches\n for match in matches:\n assert len(match) == 2 # each match should have two tagged atoms", "def invariants(mol):\n atom_ids = {}\n for a in mol.atoms:\n components = []\n components.append(a.number)\n components.append(len(a.oatoms))\n components.append(a.hcount)\n components.append(a.charge)\n components.append(a.mass)\n if len(a.rings) > 0:\n components.append(1)\n\n atom_ids[a.index] = gen_hash(components)\n\n return atom_ids", "def pseudopotentialise_ethane_like_molecule(self, sysargs, execute_deletion=True):\n\n # Find atoms to replace\n deletion_list = []\n potential_coords_list = []\n if len(sysargs) > 2:\n if 'del' in sysargs:\n deletion_list = self.parse_coord_list(sysargs[4])\n replacement_list = self.parse_coord_list(sysargs[2])\n atoms_to_replace = list(item for item in self.coord_list if item[\"#\"] in replacement_list)\n else:\n atoms_to_replace = (item for item in self.coord_list if item[\"el\"] == 'c')\n deletion_list = (item for item in self.coord_list if item[\"el\"] == 'h')\n print('Pseudo-potentialising atoms %s ...' % [atom['#'] for atom in atoms_to_replace])\n\n # Option to place a potential on the *opposite* side of the carbon as well.\n dipolar_potentials = False\n if 'dipole' in sysargs:\n print('Dipolar potentialisation activated...')\n dipolar_potentials = True\n\n for atom in atoms_to_replace:\n # Find vector from nearest carbon.\n distanced_carbon_list = self.order_atoms_by_distance_from(atom['#'], element='c')\n\n vector_from_nearest_carbon = self.vectorise_atom(atom['#']) \\\n - self.vectorise_atom(distanced_carbon_list[0]['#'])\n vector_to_nearest_carbon = self.vectorise_atom(distanced_carbon_list[0]['#']) \\\n - self.vectorise_atom(atom['#'])\n\n # Lengtherise vector from carbon to give relative pp coordinates.\n vector_c_to_new_pp = self.lengtherise_vector(vector_from_nearest_carbon, self.atom_potential_set_distance)\n vector_c_to_new_dipole_pp = self.lengtherise_vector(vector_to_nearest_carbon, self.atom_potential_set_distance)\n\n # Add to carbon coords to get new pp coords.\n potential_coords_list.append(\n {'#': 0, 'el': self.sp3_pseudo_element,\n 'x': vector_c_to_new_pp[0] + distanced_carbon_list[0]['x'],\n 'y': vector_c_to_new_pp[1] + distanced_carbon_list[0]['y'],\n 'z': vector_c_to_new_pp[2] + distanced_carbon_list[0]['z']},\n )\n if dipolar_potentials is True:\n # Add to carbon coords to get new pp coords.\n potential_coords_list.append(\n {'#': 0, 'el': self.sp3_pseudo_element,\n 'x': vector_c_to_new_dipole_pp[0] + distanced_carbon_list[0]['x'],\n 'y': vector_c_to_new_dipole_pp[1] + distanced_carbon_list[0]['y'],\n 'z': vector_c_to_new_dipole_pp[2] + distanced_carbon_list[0]['z']},\n )\n\n # Now add potentials to coord list, after removing the 'real' atoms.\n if execute_deletion is True:\n self.delete_specified_atoms(deletion_list)\n for potential_coord in potential_coords_list:\n self.write_coord(potential_coord, overwrite=False)", "def test_isomorphic_perumtations(self, inputs):\n # get benzene with all aromatic atoms/bonds labeled\n benzene = Molecule.from_smiles(\"c1ccccc1\")\n # get benzene with no aromatic labels\n benzene_no_aromatic = create_benzene_no_aromatic()\n # now test all of the variations\n assert (\n Molecule.are_isomorphic(\n benzene,\n benzene_no_aromatic,\n aromatic_matching=inputs[\"aromatic_matching\"],\n formal_charge_matching=inputs[\"formal_charge_matching\"],\n bond_order_matching=inputs[\"bond_order_matching\"],\n atom_stereochemistry_matching=inputs[\"atom_stereochemistry_matching\"],\n bond_stereochemistry_matching=inputs[\"bond_stereochemistry_matching\"],\n )[0]\n is inputs[\"result\"]\n )\n\n assert (\n benzene.is_isomorphic_with(\n benzene_no_aromatic,\n aromatic_matching=inputs[\"aromatic_matching\"],\n formal_charge_matching=inputs[\"formal_charge_matching\"],\n bond_order_matching=inputs[\"bond_order_matching\"],\n atom_stereochemistry_matching=inputs[\"atom_stereochemistry_matching\"],\n bond_stereochemistry_matching=inputs[\"bond_stereochemistry_matching\"],\n )\n is inputs[\"result\"]\n )", "def position_is_valid(x1, y1, z1, x2, y2, z2, degXY_1, degYZ_1, degXY_2, degYZ_2, user_rand):\n\n # return max X,Y,Z locations from all the atoms in vecs\n def get_max_XYZ(vecs):\n return max(vecs, key=lambda v: v[0])[0], max(vecs, key=lambda v: v[1])[1], max(vecs, key=lambda v: v[2])[2]\n\n # return min X,Y,Z locations from all the atoms in vecs\n def get_min_XYZ(vecs):\n return min(vecs, key=lambda v: v[0])[0], min(vecs, key=lambda v: v[1])[1], min(vecs, key=lambda v: v[2])[2]\n\n # get the atoms of the first protein after moving it in x1,y1,z1\n vecs1 = get_atoms('media/files/' + user_rand + '/' + '_1_.pdb')\n translate_vecs(x1, y1, z1, vecs1)\n rotate_molecular(x1, y1, z1, degXY_1, degYZ_1, vecs1)\n\n # get the atoms of the second protein after moving it in x2,y2,z2\n vecs2 = get_atoms('media/files/' + user_rand + '/' + '_2_.pdb')\n translate_vecs(x2, y2, z2, vecs2)\n rotate_molecular(x2, y2, z2, degXY_2, degYZ_2, vecs2)\n\n maxX1, maxY1, maxZ1 = get_max_XYZ(vecs1)\n maxX2, maxY2, maxZ2 = get_max_XYZ(vecs2)\n\n minX1, minY1, minZ1 = get_min_XYZ(vecs1)\n minX2, minY2, minZ2 = get_min_XYZ(vecs2)\n\n dist = 1\n\n # check overlap in axis X, axis Y and axis Z\n resultX = (maxX1 + dist) >= minX2 and (maxX2 + dist) >= minX1\n resultY = (maxY1 + dist) >= minY2 and (maxY2 + dist) >= minY1\n resultZ = (maxZ1 + dist) >= minZ2 and (maxZ2 + dist) >= minZ1\n\n # check overlap of whole \"boxes\" of proteins\n isOverlap = resultX and resultY and resultZ\n\n return not isOverlap", "def test_isomorphic_perumtations(self, inputs):\n # get benzene with all aromatic atoms/bonds labeled\n benzene = Molecule.from_smiles(\"c1ccccc1\")\n # get benzene with no aromatic labels\n benzene_no_aromatic = create_benzene_no_aromatic()\n # now test all of the variations\n assert (\n Molecule.are_isomorphic(\n benzene,\n benzene_no_aromatic,\n aromatic_matching=inputs[\"aromatic_matching\"],\n formal_charge_matching=inputs[\"formal_charge_matching\"],\n bond_order_matching=inputs[\"bond_order_matching\"],\n atom_stereochemistry_matching=inputs[\"atom_stereochemistry_matching\"],\n bond_stereochemistry_matching=inputs[\"bond_stereochemistry_matching\"],\n )[0]\n is inputs[\"result\"]\n )", "def test_coords_same_direction(self): # test_change_coords = method\n mi = (0,1,1.5708)\n mj = (0,2,1.5708)\n result = new_mj_coords(mi, mj)\n self.assertEqual(result, (0.3317021649341794, 0.9433841602327115, 0.0))\n\n '''\n the method .assertEqual(a,b) is equivalent to a == b\n other methods include: .assertIs(a,b) = a is b, .assertIsNone(x) = x is None,\n .assertIn(a,b) = a in b, and .assertIsInstance(a,b) = isinstance(a, b)\n\n\n '''", "def test_order_atoms_in_mol_list(self):\n ref_mol = Molecule(smiles='[CH](CC[CH]c1ccccc1)c1ccccc1')\n mol_list = ref_mol.copy(deep=True).generate_resonance_structures(keep_isomorphic=False, filter_structures=True)\n success = converter.order_atoms_in_mol_list(ref_mol=ref_mol, mol_list=mol_list)\n self.assertTrue(success)\n bond_dict = dict()\n for index1, atom1 in enumerate(ref_mol.atoms):\n for atom2 in atom1.edges.keys():\n index2 = ref_mol.atoms.index(atom2)\n if index1 < index2:\n if index1 not in bond_dict:\n bond_dict[index1] = [index2]\n else:\n bond_dict[index1].append(index2)\n for mol in mol_list:\n for index1, atom1 in enumerate(mol.atoms):\n for atom2 in atom1.edges.keys():\n index2 = mol.atoms.index(atom2)\n if index1 < index2:\n self.assertIn(index2, bond_dict[index1]) # check that these atoms are connected in all mols", "def is_same(source_molecule, target_molecule):\n return source_molecule.mol_text == target_molecule.mol_text", "def testStereo(self):\r\n smi_and_cansmi = [\r\n ('OC(=O)[C@@H](CCC(N)=O)N', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('OC(=O)[C@H](CCC(N)=O)N', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('N[C@@H](C(O)=O)CCC(N)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('N[C@H](C(O)=O)CCC(N)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('OC(=O)[C@H](N)CCC(N)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('OC(=O)[C@@H](N)CCC(N)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('N[C@H](CCC(N)=O)C(O)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('N[C@@H](CCC(N)=O)C(O)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@@H](N)C(O)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@H](N)C(O)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@H](C(O)=O)N', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@@H](C(O)=O)N', 'NC(=O)CC[C@H](N)C(=O)O')]\r\n for smi, cansmi in smi_and_cansmi:\r\n mol = pybel.readstring(\"smi\", smi)\r\n self.assertEqual(mol.write(\"can\").split()[0],\r\n cansmi)", "def test_oblique_sequence_match(self):\n dna = self._create_dna()\n\n # Another codon pair\n other_pair = self._create_codon_pair()\n\n self.assertFalse(dna.has_sequence(other_pair))\n\n # Existing codon pair\n self.assertTrue(dna.has_sequence(dna.top_left_oblique_pair))\n self.assertTrue(dna.has_sequence(dna.bottom_left_oblique_pair))", "def _overlapping(self, atom1, atom2):\n\n if np.linalg.norm(atom1.pos-atom2.pos) < (atom1.rad+atom2.rad):\n return True\n else:\n return False", "def neopentane():\n coords = [\n [0.000000, 0.0, 0.0],\n [0.881905, 0.881905, 0.881905],\n [-0.881905, -0.881905, 0.881905],\n [0.881905, -0.881905, -0.881905],\n [-0.881905, 0.881905, -0.881905],\n [-1.524077, 0.276170, -1.524077],\n [1.524077, 1.524077, 0.276170],\n [1.524077, -0.276170, -1.524077],\n [1.524077, 0.276170, 1.524077],\n [-1.524077, -0.276170, 1.524077],\n [1.524077, -1.524077, -0.276170],\n [-0.276170, 1.524077, -1.524077],\n [0.276170, 1.524077, 1.524077],\n [0.276170, -1.524077, -1.524077],\n [-0.276170, -1.524077, 1.524077],\n [-1.524077, 1.524077, -0.276170],\n [-1.524077, -1.524077, 0.276170],\n ]\n coords = [[float(j) / Bohr for j in i] for i in coords]\n\n symbols = [\n \"C\",\n \"C\",\n \"C\",\n \"C\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n ]\n\n atoms = []\n for i, _ in enumerate(coords):\n atoms.append(Atom(symbols[i], position=coords[i]))\n return Molecule(symbols=atoms)", "def transposition_equivalence(self, position_list=[]):\n\t\treturn self.__class__(_replace_match_at(self, position_list, [\n\t\t\t[ ((neg, B), impl, (neg, A)), (A, impl, B) ],\n\t\t\t[ (A, impl, B), ((neg, B), impl, (neg, A)) ]\n\t\t]))", "def _cartesian_to_internal(self, atom_position, bond_position, angle_position, torsion_position):\n # TODO: _cartesian_to_internal and _internal_to_cartesian should accept/return units and have matched APIs\n\n check_dimensionality(atom_position, unit.nanometers)\n check_dimensionality(bond_position, unit.nanometers)\n check_dimensionality(angle_position, unit.nanometers)\n check_dimensionality(torsion_position, unit.nanometers)\n\n # Convert to internal coordinates once everything is dimensionless\n # Make sure positions are float64 arrays implicitly in units of nanometers for numba\n from perses.rjmc import coordinate_numba\n internal_coords = coordinate_numba.cartesian_to_internal(\n atom_position.value_in_unit(unit.nanometers).astype(np.float64),\n bond_position.value_in_unit(unit.nanometers).astype(np.float64),\n angle_position.value_in_unit(unit.nanometers).astype(np.float64),\n torsion_position.value_in_unit(unit.nanometers).astype(np.float64))\n # Return values are also in floating point implicitly in nanometers and radians\n r, theta, phi = internal_coords\n\n # Compute absolute value of determinant of Jacobian\n detJ = np.abs(r**2*np.sin(theta))\n\n check_dimensionality(r, float)\n check_dimensionality(theta, float)\n check_dimensionality(phi, float)\n check_dimensionality(detJ, float)\n\n return internal_coords, detJ", "def test_isomorphic_striped_stereochemistry(self):\n mol1 = Molecule.from_smiles(\"CCC[N@](C)CC\")\n mol2 = Molecule.from_smiles(\"CCC[N@@](C)CC\")\n\n # Ensure default value is respected and order does not matter\n assert Molecule.are_isomorphic(mol1, mol2, strip_pyrimidal_n_atom_stereo=True)\n assert Molecule.are_isomorphic(mol1, mol2)\n assert Molecule.are_isomorphic(mol2, mol1)\n\n assert mol1 == mol2\n assert Molecule.from_smiles(\"CCC[N@](C)CC\") == Molecule.from_smiles(\n \"CCC[N@@](C)CC\"\n )\n\n mol1 = Molecule.from_smiles(\"CCC[N@](C)CC\")\n mol2 = Molecule.from_smiles(\"CCC[N@@](C)CC\")\n\n assert not Molecule.are_isomorphic(\n mol1,\n mol2,\n strip_pyrimidal_n_atom_stereo=False,\n atom_stereochemistry_matching=True,\n bond_stereochemistry_matching=True,\n )[0]", "def equivalent(kls, first, second):\n if first.empty() and second.empty():\n return True\n elif first.vertices.shape[0] != second.vertices.shape[0]:\n return False\n elif first.edges.shape[0] != second.edges.shape[0]:\n return False\n\n EPSILON = 1e-7\n\n vertex1, ct1 = np.unique(first.vertices, axis=0, return_counts=True)\n vertex2, ct2 = np.unique(second.vertices, axis=0, return_counts=True)\n \n vertex_match = np.all(np.abs(vertex1 - vertex2) < EPSILON)\n ct_match = np.all(ct1 == ct2)\n if not (vertex_match and ct_match):\n return False\n\n g1 = nx.Graph()\n g1.add_edges_from(first.edges)\n g2 = nx.Graph()\n g2.add_edges_from(second.edges)\n edges_match = nx.is_isomorphic(g1, g2)\n del g1 \n del g2\n\n if not edges_match:\n return False\n\n second_verts = {}\n for i, vert in enumerate(second.vertices):\n second_verts[tuple(vert)] = i\n \n attrs = [ attr['id'] for attr in first.extra_attributes ]\n for attr in attrs:\n buf1 = getattr(first, attr)\n buf2 = getattr(second, attr)\n if len(buf1) != len(buf2):\n return False\n\n for i in range(len(buf1)):\n i2 = second_verts[tuple(first.vertices[i])]\n if buf1[i] != buf2[i2]:\n return False\n\n return True", "def test_modify_coords(self):\n xyz1 = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((1.53830201, 0.86423425, 0.07482439), (0.94923576, -0.20847619, -0.03881977),\n (-0.56154542, -0.31516675, -0.05011465), (-1.18981166, 0.93489731, 0.17603211),\n (1.49712659, -1.15833718, -0.15458647), (-0.87737433, -0.70077243, -1.02287491),\n (-0.87053611, -1.01071746, 0.73427128), (-0.48610273, 1.61361259, 0.11915705))}\n xyz2 = {'symbols': ('C', 'C', 'N', 'H', 'H', 'H'), 'isotopes': (12, 12, 14, 1, 1, 1),\n 'coords': ((-0.48629842, 0.00448354, 0.00136213), (0.97554967, -0.0089943, -0.00273253),\n (2.13574353, -0.01969098, -0.00598223), (-0.88318669, -0.63966273, -0.78887729),\n (-0.87565097, -0.35336611, 0.95910491), (-0.86615712, 1.01723058, -0.16287498))}\n xyz3 = {'symbols': ('O', 'C', 'C', 'S', 'O', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 32, 16, 12, 12, 1, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': ((-2.77437517, 0.17200669, 0.18524832), (-1.64613785, -0.19208096, 0.80434075),\n (-0.40774525, 0.26424657, -0.07952902), (-0.26203276, 2.09580334, -0.05090198),\n (-0.67096595, -0.16397552, -1.42109845), (0.89264107, -0.40136991, 0.41083574),\n (2.12441624, -0.1300863, -0.44918504), (-1.50623429, -1.27619307, 0.9524955),\n (-1.45114032, 0.18501518, 1.82167553), (-1.59654975, 2.25615634, -0.09052499),\n (-1.65730431, -0.11079255, -1.400057), (0.74870779, -1.48997779, 0.41386971),\n (1.10331691, -0.11082471, 1.44762119), (2.41262211, 0.92463409, -0.42840126),\n (1.95758158, -0.4244074, -1.48990015), (2.97418137, -0.70882619, -0.0719403))}\n xyz4 = {'symbols': ('C', 'C', 'O', 'C', 'C', 'O', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (12, 12, 16, 12, 12, 16, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': ((-1.2713687423422115, -0.7423678681688866, -0.6322577211421921),\n (-0.08008635702808505, -0.40741599130374034, 0.2550353232234618),\n (-0.5452666768773297, -0.20159898814584978, 1.588840559327411),\n (0.6158080809151276, 0.8623086771891557, -0.21553636846891006),\n (1.9196775903993375, 1.0155396004927764, 0.5174563928754532),\n (3.0067486097953653, 1.0626738453913969, -0.05177300486677717),\n (-2.012827991034863, 0.06405231524730193, -0.6138583677564631),\n (-0.9611224758801538, -0.9119047827586647, -1.6677831987437075),\n (-1.7781253059828275, -1.6433798866337939, -0.27003123559560865),\n (0.6204384954940876, -1.2502614603989448, 0.2715082028581114),\n (-1.0190238747695064, -1.007069904421531, 1.8643494196872146),\n (0.014234510343435022, 1.753076784716312, -0.005169050775340246),\n (0.827317336700949, 0.8221266348378934, -1.2893801191974432),\n (1.8498494882204641, 1.107064846374729, 1.6152311353151314))}\n xyz5 = {'symbols': ('N', 'C', 'C', 'C', 'H', 'H', 'C', 'C', 'C', 'C', 'H', 'H', 'C', 'C', 'C', 'H', 'C', 'C',\n 'N', 'H', 'H', 'C', 'H', 'C', 'C', 'C', 'H', 'H', 'H', 'H', 'C', 'C', 'C', 'H', 'H', 'H',\n 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'O', 'O', 'C', 'O', 'H', 'H', 'H'),\n 'isotopes': (14, 12, 12, 12, 1, 1, 12, 12, 12, 12, 1, 1, 12, 12, 12, 1, 12, 12, 14, 1, 1, 12, 1, 12, 12,\n 12, 1, 1, 1, 1, 12, 12, 12, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 16, 16, 12, 16, 1, 1, 1),\n 'coords': ((-0.766219, -0.248648, -0.347086), (0.667812, -0.150498, -0.496932),\n (-1.490842, 1.000959, -0.245328), (1.311194, -1.339578, -1.19388),\n (0.976451, 0.831716, -0.911173), (1.231101, -0.062221, 0.660162),\n (-1.346406, -1.400789, 0.294395), (-1.022138, 2.069095, 0.533928),\n (-2.673271, 1.125443, -1.008282), (2.575265, -0.94966, -1.974365),\n (1.534634, -2.14679, -0.467576), (0.584227, -1.791819, -1.905459),\n (-0.574689, -2.103356, 1.24726), (-2.643838, -1.861964, -0.035016),\n (-1.73741, 3.268914, 0.549347), (-0.105632, 1.96688, 1.126589),\n (-3.134563, -0.04419, -1.826788), (-3.378705, 2.332664, -0.970971),\n (3.611589, -0.28425, -1.113057), (2.30114, -0.222978, -2.774031),\n (2.969795, -1.853671, -2.489377), (-1.04268, -3.284134, 1.815898),\n (0.388329, -1.696921, 1.570938), (-3.645512, -1.174123, -0.925823),\n (-3.088386, -3.061615, 0.555145), (-2.911462, 3.400813, -0.198004),\n (-1.376219, 4.102013, 1.150524), (-3.935589, 0.254447, -2.531702),\n (-2.298405, -0.411572, -2.461402), (-4.293927, 2.444159, -1.549116),\n (4.776265, 0.123769, -1.959689), (4.064268, -1.169457, 0.001273),\n (-2.30222, -3.77607, 1.457834), (-0.433782, -3.814872, 2.545573),\n (-4.135291, -1.935447, -1.571709), (-4.453058, -0.768805, -0.272612),\n (-4.078335, -3.442593, 0.302875), (-3.465321, 4.337257, -0.179068),\n (5.500278, 0.67338, -1.336133), (5.30611, -0.707961, -2.446036),\n (4.433161, 0.821539, -2.74083), (4.954327, -0.743379, 0.488676),\n (4.300156, -2.200598, -0.295594), (3.265545, -1.194959, 0.769181),\n (-2.671885, -4.702569, 1.890597), (1.78286, 0.089948, 1.873468),\n (1.758606, 1.382484, 2.130308), (2.973471, 2.040706, 1.623336),\n (2.813335, 2.256698, 0.248083), (2.919925, 3.030613, 2.105087),\n (3.858517, 1.438684, 1.858856), (3.005024, 1.410381, -0.277159))}\n xyz6 = {'symbols': ('N', 'C', 'C', 'H', 'C', 'H', 'H', 'N', 'H', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H', 'H',\n 'H', 'H', 'O', 'O', 'H', 'C', 'H', 'H', 'O', 'H'),\n 'isotopes': (14, 12, 12, 1, 12, 1, 1, 14, 1, 12, 12, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 16, 16, 1, 12, 1, 1, 16, 1),\n 'coords': ((2.608231, -0.458895, 1.629197), (2.408715, 0.132166, 0.318653),\n (1.174426, -0.323822, -0.471554), (3.304408, -0.071078, -0.291093),\n (-0.13532, 0.016735, 0.225918), (1.210534, 0.150539, -1.46601),\n (1.221625, -1.416078, -0.631885), (-1.316045, -0.574442, -0.379686),\n (-0.086456, -0.362851, 1.260573), (-1.468231, -0.411368, -1.77232),\n (-2.505886, -0.419831, 0.432347), (-2.403425, -0.886127, -2.107496),\n (-0.621099, -0.850903, -2.320815), (-3.364172, -0.88926, -0.068909),\n (-2.767365, 0.637288, 0.628231), (-2.360065, -0.927144, 1.400068),\n (2.574849, -1.475283, 1.579253), (1.886591, -0.170591, 2.284831),\n (2.375177, 1.228181, 0.441157), (-0.231725, 1.121336, 0.301367),\n (-1.455199, 0.947478, -2.255384), (-2.58006, 1.611276, -1.811891),\n (-3.315019, 1.53868, -2.760245), (-3.713498, 1.338038, -4.025244),\n (-4.754452, 0.99077, -4.021055), (-3.584519, 2.351475, -4.444827),\n (-2.87635, 0.381401, -4.513467), (-1.966974, 0.665311, -4.338804))}\n mol1 = converter.molecules_from_xyz(xyz1)[1]\n mol2 = converter.molecules_from_xyz(xyz2)[1]\n mol3 = converter.molecules_from_xyz(xyz3)[1]\n mol4 = converter.molecules_from_xyz(xyz4)[1]\n mol5 = converter.molecules_from_xyz(xyz5)[1] # a TS\n mol6 = converter.molecules_from_xyz(xyz6)[1] # a TS\n\n # test atom modification types\n modification_type = 'atom'\n\n # test R_atom modification\n indices, new_val = [0, 1], 1.5\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((0.06385450948408691, 1.6253138441202686, 0.042870253583423557),\n (-0.02582727173313104, 0.39833637030950975, 0.9010563970736782),\n (-0.02582727173313104, -1.003336361301907, 0.3272239637891734),\n (-0.02582727173313104, -1.003336361301907, -1.0899990532469916),\n (-0.08138177769352953, 0.465646654907214, 2.0002403496097383),\n (0.865704477722866, -1.5264119285073852, 0.6825623354173815),\n (-0.9185767861007101, -1.5268489957651346, 0.6785930201570352),\n (0.14577602706217008, -0.07998849407327513, -1.367625604543457))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val, 5)\n\n indices, new_val = [1, 0], -1.5\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((0.01167393998357115, -0.5225807439329089, -0.9899595616178738),\n (-0.040525509131742084, 0.26844387347263365, -2.2633625897949208),\n (0.01167393998357115, -0.5225807439329089, 1.4216698859880004),\n (0.01167393998357115, 0.8926022581407576, 1.3456557382334218),\n (0.11202785529567173, -2.2718515121487206, 0.04691079079738447),\n (-0.8954040276884763, -0.8508241498293034, 1.9356427400340799),\n (0.8880330020652463, -0.8439168226596885, 1.990234136037933),\n (-0.13167393678263156, 1.1200467154192293, 0.4039467156910099))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), - new_val, 5)\n\n # test A_atom modification\n indices, new_val = [2, 1, 0], 140\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((0.011940763595588438, -0.90654939253321, -1.1784203714214114),\n (0.011940763595588438, -0.90654939253321, 0.05065327345758153),\n (-0.02531707366035523, 0.06629439921242253, 1.2108932996837143),\n (0.011940763595588438, 1.5283906429141458, 0.05806971900412017),\n (0.03285612994605798, -1.8458593499019589, 0.6277855724118742),\n (-0.9645745795119229, 0.3758422785924207, 1.4467600455414558),\n (0.8166299978590752, 0.37902049128771864, 1.551524925579085),\n (-0.10465928281651019, 1.2266969334608921, -0.8663115945839973))}\n\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val, 5)\n\n # test changing an angle to 180 degrees\n indices, new_val = [0, 1, 2], 180\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((-0.0019281473980474666, 1.559641181574566, 1.013927346529066),\n (-0.0019281473980474772, 0.42219553322547265, 0.548267146825631),\n (-0.0019281473980474772, -0.9794771983859442, -0.025565286458873793),\n (-0.0019281473980474772, -0.9794771983859442, -1.4427883034950388),\n (-0.05748265335844597, 0.4895058178231769, 1.6474510993616909),\n (0.8896036020579495, -1.5025527655914221, 0.32977308516933435),\n (-0.8946776617656266, -1.5029898328491718, 0.32580376990898796),\n (0.16967515139725364, -0.05612933115731222, -1.7204148547915041))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val)\n\n # test changing a 180 degree angle to something else\n indices, new_val = [0, 1, 2], 120\n expected_xyz = {'symbols': ('C', 'C', 'N', 'H', 'H', 'H'), 'isotopes': (12, 12, 14, 1, 1, 1),\n 'coords': ((0.7757362507465277, 0.4478716325630875, 0.7767867108403768),\n (-0.3207007101270898, -0.18515666614565915, 0.04582870107149262),\n (-0.3207007101270898, -0.18515666614565915, -1.1144190466784232),\n (-0.3207007101270898, 0.8374974028016162, 1.8964626512298475),\n (-1.2063452316056904, -0.6964838693490394, 1.8964625790172804),\n (0.5649437124447699, -0.6964840572534022, 1.896462566459638))}\n new_xyz = converter.modify_coords(coords=xyz2, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol2)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol2), new_val, 5)\n\n # test D_atom modification\n indices, new_val = [0, 1, 2, 3], 30\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((-0.3812553590829658, 1.4249753409811934, 0.24885596109763952),\n (0.13588307254069157, 0.47112021672976, 0.8262208968300058),\n (0.13588307254069157, -0.9305525148816568, 0.25238846354550093),\n (0.13588307254069157, -0.9305525148816568, -1.1648345534906641),\n (0.08032856658029308, 0.5384305013274643, 1.9254048493660656),\n (1.0274148219966885, -1.4536280820871348, 0.6077268351737091),\n (-0.7568664418268876, -1.4540651493448844, 0.6037575199133627),\n (0.30748637133599266, -0.007204647653024865, -1.4424611047871294))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val, 5)\n\n indices, new_val = [3, 2, 1, 0], -30\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((-0.17268751280677364, -0.941696827801256, -1.1487068217042242),\n (-0.17268751280677364, -0.941696827801256, 0.08036682317476873),\n (-0.17268751280677364, 0.3328411496875977, 0.8986107061160642),\n (0.4830966870190505, 1.3983204216355287, 0.23286144075770054),\n (-0.18773471865125574, -1.8811191078717768, 0.6574991306756568),\n (-1.0994105700891015, 0.3771264916699556, 1.4764735369276594),\n (0.6806108103574798, 0.3121359507669669, 1.5812384626874982),\n (-0.2075631130119835, 1.1944491200970329, -0.8365980489813365))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1),\n 360 + new_val, 5)\n\n indices, new_val = [0, 1, 2, 3], -30\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((0.37739906428687087, 1.4249753409811934, 0.24885596109763952),\n (-0.13973936733678652, 0.47112021672976, 0.8262208968300058),\n (-0.13973936733678652, -0.9305525148816568, 0.25238846354550093),\n (-0.13973936733678652, -0.9305525148816568, -1.1648345534906641),\n (-0.195293873297185, 0.5384305013274643, 1.9254048493660656),\n (0.7517923821192105, -1.4536280820871348, 0.6077268351737091),\n (-1.0324888817043656, -1.4540651493448844, 0.6037575199133627),\n (0.0318639314585146, -0.007204647653024865, -1.4424611047871294))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1),\n 360 + new_val, 5)\n\n # test group modification types\n modification_type = 'group'\n\n # test R_group modification\n indices, new_val = [0, 1], 1.5\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((0.06385450815440741, 1.625313844153823, 0.04287025350146201),\n (-0.02582727144301671, 0.39833637029935165, 0.9010563970984908),\n (-0.02582727144301671, -1.0033363613120652, 0.327223963813986),\n (-0.02582727144301671, -1.0033363613120652, -1.089999053222179),\n (-0.0813817733100206, 0.4656466548101805, 2.0002403498467567),\n (0.8657044801882787, -1.5264119271233758, 0.6825623320367284),\n (-0.9185767836497759, -1.5268489971713646, 0.6785930235919653),\n (0.1457760273522844, -0.07998849408343323, -1.3676256045186443))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val, 5)\n\n # test A_group modification\n indices, new_val = [0, 1, 2], 160\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((0.01997925208754263, 1.625852603711386, 0.708691800251658),\n (-0.009887200766722545, 0.3981406366172051, 0.6591605436173553),\n (-0.009887200766722545, -1.0035320949942117, 0.08532811033285048),\n (-0.009887200766722545, -1.0035320949942117, -1.3318949067033146),\n (-0.06544170263372645, 0.465450921128034, 1.7583444963656214),\n (0.8816445508645728, -1.5266076608055221, 0.44066647855559316),\n (-0.9026367129734817, -1.5270447308535111, 0.4366971701108293),\n (0.16171609802857856, -0.08018422776557976, -1.6095214579997799))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val, 5)\n\n indices, new_val = [1, 2, 5], 160\n expected_xyz = {'symbols': ('O', 'C', 'C', 'S', 'O', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 32, 16, 12, 12, 1, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': ((-0.45549818019466204, 1.8548729964273216, 0.8440028131622062),\n (-0.2667929723517851, 0.6671106629415136, 1.42912314652022),\n (-0.2163066356464933, -0.45426196440936106, 0.30526758056697156),\n (1.3109140692843337, 0.4741705899686004, -0.12165329723035323),\n (-1.3557392716759613, 0.27771606050413156, -0.16203238949855803),\n (-0.2163066356464933, -1.8492005047245035, -0.34944907261899716),\n (-0.2163066356464933, -1.8492005047245035, -1.87604687202156),\n (-1.0601386155429, 0.3401156691690679, 2.122303234960202),\n (0.6302934527577109, 0.5164940342603479, 2.051815682570846),\n (1.143418340718557, 1.3271327629309078, 0.9043191341647172),\n (-1.5046641822171405, 0.8405156651772538, 0.6362234563562041),\n (-1.1248176985937233, -2.3816433802478305, -0.03815279071754074),\n (0.6330922017716909, -2.4415422695908298, 0.013011559357363423),\n (0.707681641272436, -1.4302805756837962, -2.2843133571390752),\n (-1.061876978104781, -1.2808214124615414, -2.27542464397285),\n (-0.30131566361820894, -2.876339919190297, -2.2463334380185054))}\n new_xyz = converter.modify_coords(coords=xyz3, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol3)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol3), new_val, 5)\n\n indices, new_val = [5, 2, 1], 160\n expected_xyz = {'symbols': ('O', 'C', 'C', 'S', 'O', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 32, 16, 12, 12, 1, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': ((0.2917048572251579, -1.5727341554069034, -1.3423072397835754),\n (0.2917048572251579, -1.5727341554069034, -0.0048638500194817524),\n (0.2917048572251579, -0.06886266257406626, 0.5064553318371674),\n (-1.363795569744117, -0.1202634403830567, -0.28936363114537844),\n (1.2964570556359054, 0.04149003667864859, -0.508809719558267),\n (0.4099139249017979, 1.1367441270166645, 1.4588451220109844),\n (0.29481769872300884, 2.504661621457458, 0.7909713103796479),\n (1.1685736645928884, -2.0373473546555556, 0.47685945259484286),\n (-0.5312728539867155, -2.0767912763680947, 0.5278926826114716),\n (-1.2231052441089643, -1.4156454828005882, -0.6216441060907665),\n (1.4364524039686508, -0.9213654475865127, -0.6804052856633311),\n (1.3966722481626304, 1.107137467791805, 1.9397033126698722),\n (-0.33241474313836356, 1.0625526837349102, 2.2633130452338497),\n (-0.7009351031697479, 2.671307058557274, 0.3706911401148234),\n (1.0334518240640673, 2.6225101662569066, -0.007826505507309234),\n (0.474437928409419, 3.293432289151483, 1.52916604039102))}\n new_xyz = converter.modify_coords(coords=xyz3, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol3)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol3), new_val, 4)\n\n # test D_group modification\n indices, new_val = [0, 1, 2, 3], 98.7\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((-0.751853407099498, 1.1325746654576616, 0.9630889493590222),\n (0.2705229494881336, 0.5773506493576217, 0.5667369568416694),\n (0.2705229494881336, -0.8243220822537951, -0.00709547644283548),\n (0.2705229494881336, -0.8243220822537951, -1.4243184934790005),\n (0.21496844352773511, 0.644660933955326, 1.6659209093777292),\n (1.1620546989441305, -1.347397649459273, 0.34824289518537266),\n (-0.6222265648794455, -1.3478347167170226, 0.3442735799250263),\n (0.4421262482834347, 0.09902578497483683, -1.7019450447754658))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val, 5)\n\n indices, new_val = [5, 2, 1, 0], 180\n expected_xyz = {'symbols': ('O', 'C', 'C', 'S', 'O', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 32, 16, 12, 12, 1, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': ((0.3034340517195509, -1.6113639549493641, -1.7901391417129255),\n (0.3034340517195509, -1.6113639549493641, -0.45269575194883194),\n (0.3034340517195509, -0.10749246211652697, 0.058623429907817215),\n (-1.3193844356755215, 0.6746571866866746, -0.30380395501671575),\n (1.3282593544657135, 0.581298860926198, -0.6678526090506967),\n (0.30343405171955073, -0.05040119820033895, 1.5985091447581203),\n (0.26233878444784786, 1.3540223173114139, 2.1955071424316666),\n (1.1803028491569083, -2.0759771588261957, 0.029027564277707585),\n (-0.5195436704231056, -2.115421071566818, 0.08006076790649397),\n (-1.414911803320983, 0.05150877481380545, -1.4915662613668217),\n (1.2907872270567131, 0.05736052141866721, -1.5046434284929022),\n (1.2266505257705096, -0.5178979180455376, 1.965811882691859),\n (-0.5283478351927398, -0.6406189828710822, 2.0028687871657294),\n (-0.6775241224477067, 1.8658969637383576, 1.9706253328328829),\n (1.0896028263747624, 1.9687229189733981, 1.8276430689661958),\n (0.35031987670665765, 1.2957313570336282, 3.285560142931404))}\n new_xyz = converter.modify_coords(coords=xyz3, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol3)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol3), new_val, 5)\n\n # test groups modification types\n modification_type = 'groups'\n\n # test D_groups modification\n indices, new_val = [0, 1, 2, 3], 98.7\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((-0.7692326765134374, 1.1252152574374596, 0.9810655314575423),\n (0.25314357064244697, 0.5699912505374165, 0.5847135445433043),\n (0.25314357064244697, -0.8316815836112654, 0.010881153979294123),\n (0.25314357064244697, -0.8316815836112654, -1.4063419471715688),\n (1.2326181278103254, 1.0755945976230115, 0.6133000157238186),\n (1.1446752957640132, -1.3547571699433192, 0.3662195585064876),\n (-0.6396059141384572, -1.3551941756763426, 0.3622501790547312),\n (0.4247468609767439, 0.09166629658280878, -1.6839684605765641))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val, 5)\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=xyz1, indices=[4, 1, 2, 3], mol=mol1),\n 176.7937925, 5)\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=[4, 1, 2, 3], mol=mol1),\n 279.5679938, 5)\n\n indices, new_val = [5, 2, 1, 0], 100\n expected_xyz = {'symbols': ('O', 'C', 'C', 'S', 'O', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 32, 16, 12, 12, 1, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': ((0.17617288317697363, -1.4263876505749937, -1.3907356765118228),\n (0.17617288317697363, -1.4263876505749937, -0.05329233131383648),\n (0.17617288317697363, 0.07748361087633482, 0.4580268316508156),\n (0.8541264407563205, 1.1799297944814306, -0.8464435250524343),\n (1.0315484892431994, 0.12891222316318918, 1.606136465715537),\n (-1.2415001838455297, 0.5175023395992786, 0.8716616732793354),\n (-2.371148423802697, -0.377635430276555, 0.3685473045279144),\n (1.0530416597996317, -1.8910009834245878, 0.42843102214143425),\n (-0.646804798256715, -1.930444842122042, 0.47946418053365614),\n (1.322524386187, 0.1392850561843193, -1.55769653865906),\n (1.5807657244329665, 0.9071634481807671, 1.3438012611373469),\n (-1.4308626545937098, 1.5181627982792263, 0.46103575662853813),\n (-1.3101730016766409, 0.6090291604729325, 1.9628224613881304),\n (-2.328405219901557, -1.376683205512397, 0.811273322532136),\n (-2.345556604764221, -0.47877786163003033, -0.7207928024513892),\n (-3.3382397150969996, 0.059047399283163715, 0.6394658008190603))}\n new_xyz = converter.modify_coords(coords=xyz3, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol3)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol3), new_val, 5)\n\n indices, new_val = [4, 3, 1, 0], 236.02\n expected_xyz = {'symbols': ('C', 'C', 'O', 'C', 'C', 'O', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (12, 12, 16, 12, 12, 16, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': ((-0.3420713780282814, -0.726846939196746, -1.8608060734620697),\n (-0.3420713780282814, -0.726846939196746, -0.33809952744080163),\n (-1.5199121786498575, -1.3903247017047589, 0.12046140490433599),\n (-0.3420713780282814, 0.692986716189357, 0.21142750813209843),\n (0.8346249371329908, 0.870417947793265, 1.130523629422891),\n (1.8415843350511496, 1.49899165752528, 0.8160475329621943),\n (-1.232802341934429, -0.22348356564525385, -2.2527724067647172),\n (0.5474409007790566, -0.2291658204558631, -2.2587884226234842),\n (-0.36650899336409903, -1.7525658745827613, -2.2443893713107435),\n (0.5235538883628821, -1.286773819894118, 0.03414982827280788),\n (-1.525486055520759, -2.2842579938670644, -0.2668197974505191),\n (-1.246930807816442, 0.9000033565709169, 0.7927934676101465),\n (-0.26242043164905693, 1.4290013064896112, -0.5956842516835208),\n (0.739203033547077, 0.4163114365921572, 2.132044487804084))}\n new_xyz = converter.modify_coords(coords=xyz4, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol4)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol4), new_val, 5)\n\n # test 1-indexed input\n indices = [5, 4, 2, 1]\n new_xyz = converter.modify_coords(coords=xyz4, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol4, index=1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol4, index=1),\n new_val, 5)\n\n # test TSs\n indices = [19, 10, 4, 2]\n fragments = [[46, 47, 48, 49, 50, 51, 52], [f + 1 for f in range(45)]]\n self.assertAlmostEqual(calculate_dihedral_angle(coords=xyz5, torsion=indices, index=1), 56.83358841, 3)\n new_xyz = converter.modify_coords(coords=xyz5,\n indices=indices,\n new_value=300,\n modification_type='groups',\n mol=mol5,\n index=1,\n fragments=fragments,\n )\n self.assertAlmostEqual(calculate_dihedral_angle(coords=new_xyz, torsion=indices, index=1), 300, places=3)\n\n indices = [1, 2, 3, 5]\n fragments = [[f + 1 for f in range(23)], [24, 25, 26, 27, 28]]\n self.assertAlmostEqual(calculate_dihedral_angle(coords=xyz6, torsion=indices, index=1), 62.30597206, 3)\n new_xyz = converter.modify_coords(coords=xyz6,\n indices=indices,\n new_value=200,\n modification_type='groups',\n mol=mol6,\n index=1,\n fragments=fragments,\n )\n self.assertAlmostEqual(calculate_dihedral_angle(coords=new_xyz, torsion=indices, index=1), 200, places=3)\n \n coords={'coords': ((-0.7862825353221515, -0.28824023055636216, 0.4782944637692894),\n (0.21968869054702736, 0.40094256193652866, -0.2919820499085219),\n (-0.07796443595084417, 0.5692847962524797, -1.6621913220858304),\n (-1.102200211589376, -1.1132157833188596, -0.01879031191901484),\n (-1.5973749070505925, 0.29546848172306867, 0.6474145668621136),\n (0.4237940503863438, 1.3660724867336205, 0.19101403432872205),\n (1.1352054736534014, -0.1980893380251006, -0.2652264470061931),\n (-0.7497944593402266, 1.258221857416732, -1.7507029654486272)),\n 'isotopes': (14, 12, 16, 1, 1, 1, 1, 1),\n 'symbols': ('N', 'C', 'O', 'H', 'H', 'H', 'H', 'H')}\n indices=[3, 0, 1, 2]\n new_value=53.76\n modification_type=\"groups\"\n mol=Molecule(smiles=\"NCO\")\n new_xyz = converter.modify_coords(coords=coords,\n indices=indices,\n new_value=new_value,\n modification_type=modification_type,\n mol=mol)\n self.assertTrue(type(new_xyz[\"coords\"][0][0] is float))", "def test_isomorphic_stripped_stereochemistry(self):\n mol1 = Molecule.from_smiles(\"CCC[N@](C)CC\")\n mol2 = Molecule.from_smiles(\"CCC[N@@](C)CC\")\n\n # Ensure default value is respected and order does not matter\n assert Molecule.are_isomorphic(mol1, mol2, strip_pyrimidal_n_atom_stereo=True)\n assert Molecule.are_isomorphic(mol1, mol2)\n assert Molecule.are_isomorphic(mol2, mol1)\n\n assert mol1 == mol2\n assert Molecule.from_smiles(\"CCC[N@](C)CC\") == Molecule.from_smiles(\n \"CCC[N@@](C)CC\"\n )", "def assert_molecule_is_equal(molecule1, molecule2, msg):\n if not (molecule1.is_isomorphic_with(molecule2)):\n raise AssertionError(msg)", "def assert_molecule_is_equal(molecule1, molecule2, msg):\n if not (molecule1.is_isomorphic_with(molecule2)):\n raise AssertionError(msg)", "def equivalent(self):\n Gl, Gr = self._symmetry\n if Gl._tuples == Gr._tuples: # Grain exchange case\n orientations = Orientation.stack([self, ~self]).flatten()\n equivalent = Gr.outer(orientations.outer(Gl))\n return self.__class__(equivalent).flatten()", "def final_homography(pts1, pts2, feats1, feats2):\n\n #\n # Your code here\n #\n\n idxs1, idxs2 = find_matches(feats1, feats2)\n ransac_return = ransac(pts1[idxs1], pts2[idxs2])\n\n return ransac_return, idxs1, idxs2", "def test_equivalent():\n # Positive test\n assert u.equivalent(np.arange(10)*q.um, q.cm)\n\n # Negative units test\n assert not u.equivalent(np.arange(10)*q.um, q.Jy)\n\n # Negative dtype test\n assert not u.equivalent(np.arange(10), q.um)", "def test_oblique_pairs_built(self):\n dna = self._create_dna()\n\n p1 = dna.data[0]\n p2 = dna.data[1]\n p3 = dna.data[2]\n p4 = dna.data[3]\n p5 = dna.data[4]\n p6 = dna.data[5]\n\n # Let's build the sequences according to what we have in DNA columns\n top_down_seq = f'{p1.codon1[0].name}{p2.codon1[1].name}{p3.codon1[2].name}'\n top_down_seq += f'{p4.codon2[0].name}{p5.codon2[1].name}{p6.codon2[2].name}'\n\n bottom_up = f'{p6.codon1[0].name}{p5.codon1[1].name}{p4.codon1[2].name}'\n bottom_up += f'{p3.codon2[0].name}{p2.codon2[1].name}{p1.codon2[2].name}'\n\n # Let's compare the sequences above to the ONES built internally in\n # DNA\n self.assertEqual(top_down_seq, dna.top_left_oblique_pair.sequence)\n self.assertEqual(bottom_up, dna.bottom_left_oblique_pair.sequence)", "def toluene():\n coords = [\n [1.2264, 0.0427, 0.0670],\n [1.0031, -1.3293, 0.0600],\n [-0.2945, -1.8256, -0.0060],\n [-1.3704, -0.9461, -0.0646],\n [-1.1511, 0.4266, -0.0578],\n [0.1497, 0.9292, 0.0066],\n [0.3871, 2.3956, -0.0022],\n [2.2495, 0.4310, 0.1211],\n [1.8510, -2.0202, 0.1071],\n [-0.4688, -2.9062, -0.0109],\n [-2.3926, -1.3347, -0.1157],\n [-2.0006, 1.1172, -0.1021],\n [0.5024, 2.7582, -1.0330],\n [1.2994, 2.6647, 0.5466],\n [-0.4475, 2.9470, 0.4506],\n ]\n coords = [[float(j) / Bohr for j in i] for i in coords]\n\n symbols = [\n \"C\",\n \"C\",\n \"C\",\n \"C\",\n \"C\",\n \"C\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n ]\n\n atoms = []\n for i, _ in enumerate(coords):\n atoms.append(Atom(symbols[i], position=coords[i]))\n return Molecule(symbols=atoms)", "def test_equivalence():\n\t\n\tfrom . import spectra as sp\n\t\n\t#analytic\n\tp_dict = {'Bfield':15000,'rb85frac':1,'Btheta':0*np.pi/180,'Bphi':0*np.pi/180,'lcell':1e-3,'T':84,'Dline':'D2','Elem':'Rb'}\n\tchiL1,chiR1,chiZ1 = sp.calc_chi([-18400],p_dict)\n\tRotMat1, n11, n21 = solve_diel(chiL1,chiR1,chiZ1,0,150,force_numeric=False)\n\t\n\t#numeric\n\tchiL2, chiR2, chiZ2 = chiL1, chiR1, chiZ1\n\t#chiL2,chiR2,chiZ2 = sp.calc_chi([-18400],p_dict)\n\tRotMat2, n12, n22 = solve_diel(chiL2,chiR2,chiZ2,0,150,force_numeric=True)\n\t\n\tprint('RM 1')\n\tprint(RotMat1)\n\n\tprint('RM 2')\n\tprint(RotMat2)\t\n\t\n\tprint('n1_1 (analytic)')\n\tprint(n11)\n\tprint('n1_2')\n\tprint(n12)\n\tprint('n2_1 (analytic)')\n\tprint(n21)\n\tprint('n2_2')\n\tprint(n22)\n\t\n\tprint('chi1')\n\tprint((chiL1, chiR1, chiZ1))\n\n\tprint('chi2')\n\tprint((chiL2, chiR2, chiZ2))", "def basic_compare(self, other: \"Molecule\") -> bool:\n return self.inchi_key[:14] == other.inchi_key[:14]", "def DeMorgan_equivalence(self, position_list=[]):\n\t\treturn self.__class__(_replace_match_at(self, position_list, [\n\t\t\t[ ((neg, A), disj, (neg, B)), (neg, (A, conj, B)) ],\n\t\t\t[ (neg, (A, conj, B)), ((neg, A), disj, (neg, B)) ],\n\t\t\t[ ((neg, A), conj, (neg, B)), (neg, (A, disj, B)) ],\n\t\t\t[ (neg, (A, disj, B)), ((neg, A), conj, (neg, B)) ]\n\t\t]))", "def areEquivalent(*args):\n return _libsbml.Unit_areEquivalent(*args)", "def nearby():\n for i in ids:\n for j in ids:\n if i != j:\n if sum([1 for x,y in zip(i,j) if x!=y]) == 1:\n print(\"\".join([x for x,y in zip(i,j) if x==y]))\n return", "def testMoreStereo(self):\r\n smi_and_cansmi = [\r\n ('Cl[C@](C)(I)Br', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@@](C)(I)Br', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@](C)(Br)Cl', 'C[C@](Cl)(Br)I'),\r\n ('I[C@@](C)(Br)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@@](Cl)(Br)C', 'C[C@](Cl)(Br)I'),\r\n ('I[C@](Cl)(Br)C', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@@](Br)(I)C', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@](Br)(I)C', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@](C)(Cl)I', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@@](C)(Cl)I', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@@](C)(Cl)Br', 'C[C@](Cl)(Br)I'),\r\n ('I[C@](C)(Cl)Br', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@@](Cl)(I)Br', 'C[C@](Cl)(Br)I'),\r\n ('C[C@](Cl)(I)Br', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@@](Br)(Cl)I', 'C[C@](Cl)(Br)I'),\r\n ('C[C@](Br)(Cl)I', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@](Br)(Cl)C', 'C[C@](Cl)(Br)I'),\r\n ('I[C@@](Br)(Cl)C', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@@](C)(Br)I', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@](C)(Br)I', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@](I)(Br)C', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@@](I)(Br)C', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@@](I)(Br)Cl', 'C[C@](Cl)(Br)I'),\r\n ('C[C@](I)(Br)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@](Cl)(C)Br', 'C[C@](Cl)(Br)I'),\r\n ('I[C@@](Cl)(C)Br', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@](Cl)(Br)I', 'C[C@](Cl)(Br)I'),\r\n ('C[C@@](Cl)(Br)I', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@@](C)(I)Cl', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@](C)(I)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@@](Br)(C)Cl', 'C[C@](Cl)(Br)I'),\r\n ('I[C@](Br)(C)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@](Br)(I)Cl', 'C[C@](Cl)(Br)I'),\r\n ('C[C@@](Br)(I)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@](I)(Cl)Br', 'C[C@](Cl)(Br)I'),\r\n ('C[C@@](I)(Cl)Br', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@@](I)(C)Br', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@](I)(C)Br', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@](I)(C)Cl', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@@](I)(C)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@](Br)(C)I', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@@](Br)(C)I', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@@](Cl)(C)I', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@](Cl)(C)I', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@@](I)(Cl)C', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@](I)(Cl)C', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@](Cl)(I)C', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@@](Cl)(I)C', 'C[C@@](Cl)(Br)I')]\r\n for smi, cansmi in smi_and_cansmi:\r\n mol = pybel.readstring(\"smi\", smi)\r\n self.assertEqual(mol.write(\"can\").split()[0],\r\n cansmi)", "def _compare(smi1, smi2):\n return _canonicalize(smi1) == _canonicalize(smi2)", "def validBond(index1, index2, direction):\n #print \"?valid bond: \", allAtoms[index1].pos, \" , \", allAtoms[index2].pos, direction\n cell1 = index1/numAtomsPerCell\n cell2 = index2/numAtomsPerCell\n #Find the coordinates of the cell in units of interaction cells\n posInX1 = int(cell1/(size*size))\n posInX2 = int(cell1/(size*size))\n leftover1 = cell1%(size*size)\n leftover2 = cell2%(size*size)\n posInY1 = int(leftover1/size)\n posInY2 = int(leftover2/size)\n posInZ1 = leftover1%size\n posInZ2 = leftover2%size\n \n #Now, a valid interaction can cross an interaction cell boundary in any direction,\n #but it has a maximum length of one interaction cell. However, I have made the minimum\n #size of this larger translated lattice equal to 3*3*3 interaction cells. Therefore,\n #when we hit an edge and get in invalid interaction, the cells will be at least 2\n #interaction cells apart in the direction of the interaction.\n if(direction[0]):\n if numpy.abs(posInX1 - posInX2)>1:\n #print \"false\"\n return False\n if(direction[1]):\n if numpy.abs(posInY1 - posInY2)>1:\n #print \"false\"\n return False\n if(direction[2]):\n if numpy.abs(posInZ1 - posInZ2)>1:\n #print \"false\"\n return False\n print #\"true\"\n return True\n\n #Old (incorrect) method:\n if 0:\r\n print \"?valid bond: \", allAtoms[index1].pos, \" , \", allAtoms[index2].pos, direction\r\n cell1 = index1/numAtomsPerCell\r\n cell2 = index2/numAtomsPerCell\r\n zRow1 = cell1/size#this relies on the list being created in the nested for loop that was used, z within y within x\r\n zRow2 = cell2/size\r\n if(zRow1 != zRow2 and direction[2]):\n print \"false\"\r\n return False\r\n xLayer1 = cell1/(size*size)\r\n xLayer2 = cell2/(size*size)\r\n if(xLayer1 != xLayer2 and direction[1]):\n print \"false\"\r\n return False\r\n #shouldn't have to check z, because if it's not valid in z direction, it would be off the list (>len(allAtoms))\n print \"true\"\r\n return True", "def mvee(atoms, tol = 0.00001):\n points_asarray = np.array([atom.coordinates for atom in atoms])\n points = np.asmatrix(points_asarray)\n N, d = points.shape\n Q = np.column_stack((points, np.ones(N))).T\n err = tol+1.0\n u = np.ones(N)/N\n try:\n while err > tol:\n # assert u.sum() == 1 # invariant\n X = Q * np.diag(u) * Q.T\n M = np.diag(Q.T * la.inv(X) * Q)\n jdx = np.argmax(M)\n step_size = (M[jdx]-d-1.0)/((d+1)*(M[jdx]-1.0))\n new_u = (1-step_size)*u\n new_u[jdx] += step_size\n err = la.norm(new_u-u)\n u = new_u\n c = u*points\n A = la.inv(points.T*np.diag(u)*points - c.T*c)/d \n except: # For singular matrix errors i.e. motif is ellipse rather than ellipsoid\n centroid = np.average(points_asarray,axis=0)\n plane = Plane(atoms)\n normal = np.array([plane.a,plane.b,plane.c])\n norm_mag = np.sqrt(np.dot(normal,normal))\n for i, norm in enumerate(normal):\n normal[i] = norm * 1 / norm_mag\n centroid = np.average(points,axis=0).reshape(-1,3)\n p1 = centroid + normal*0.00001\n p2 = centroid - normal*0.00001\n points_asarray = np.concatenate([points_asarray,p1,p2],axis=0)\n points = np.asmatrix(points_asarray)\n N, d = points.shape\n Q = np.column_stack((points, np.ones(N))).T\n err = tol+1.0\n u = np.ones(N)/N\n while err > tol:\n # assert u.sum() == 1 # invariant\n X = Q * np.diag(u) * Q.T\n M = np.diag(Q.T * la.inv(X) * Q)\n jdx = np.argmax(M)\n step_size = (M[jdx]-d-1.0)/((d+1)*(M[jdx]-1.0))\n new_u = (1-step_size)*u\n new_u[jdx] += step_size\n err = la.norm(new_u-u)\n u = new_u\n c = u*points\n A = la.inv(points.T*np.diag(u)*points - c.T*c)/d \n \n return np.asarray(A), np.squeeze(np.asarray(c))", "def are_similar(first_coords: List[Tuple[int, int]], second_coords: List[Tuple[int, int]]) -> bool:\n # Step 1: Get angles of each triangle\n # Step 2: Compare grades of two triangles\n # Step 3: If two angles are equal then first triangle is similar to second triangle\n pass", "def _compute_sims(self):\n no_duplicates = defaultdict(list)\n for num, lineset1, idx1, lineset2, idx2 in self._iter_sims():\n duplicate = no_duplicates[num]\n for couples in duplicate:\n if (lineset1, idx1) in couples or (lineset2, idx2) in couples:\n couples.add((lineset1, idx1))\n couples.add((lineset2, idx2))\n break\n else:\n duplicate.append({(lineset1, idx1), (lineset2, idx2)})\n sims = []\n for num, ensembles in no_duplicates.items():\n for couples in ensembles:\n sims.append((num, couples))\n sims.sort()\n sims.reverse()\n return sims", "def similarity(self, e1, e2):\n\t\tpass", "def GalacticToEquatorial(Galactic):\n \n # l,b,s => ra, dec, s\n l = Galactic[:,0]\n b = Galactic[:,1]\n cb = np.cos(b)\n sb = np.sin(b)\n dec = np.arcsin(np.cos(decgp)*cb*np.cos(l-lcp)+sb*np.sin(decgp))\n ra = ragp+np.arctan2(cb*np.sin(lcp-l),sb*np.cos(decgp)-cb*np.sin(decgp)*np.cos(l-lcp))\n ra[ra>2.*np.pi] -= 2.*np.pi\n if (len(Galactic[0,:])==3):\n Equatorial = np.column_stack([ra,dec,Galactic[:,2]])\n else:\n # vlos, mulcos(b), mub => vlos, muracos(dec), mudec\n cd = np.cos(dec)\n sd = np.sin(dec)\n A11 = (np.sin(decgp)*cd-np.cos(decgp)*sd*np.cos(ra-ragp))/cb\n A12 = -np.cos(decgp)*np.sin(ra-ragp)/cb\n A21 = (np.cos(decgp)*cd+np.sin(decgp)*sd*np.cos(ra-ragp)+sb*np.cos(lcp-l)*A11)/np.sin(lcp-l)\n A22 = (np.sin(decgp)*np.sin(ra-ragp)+sb*np.cos(lcp-l)*A12)/np.sin(lcp-l)\n index = np.where(np.fabs(np.cos(lcp-l))>np.fabs(np.sin(lcp-l)))\n A21[index] = (sd[index]*np.sin(ra[index]-ragp)-sb[index]*np.sin(lcp-l[index])*A11[index])/np.cos(lcp-l[index])\n A22[index] =-(np.cos(ra[index]-ragp)+sb[index]*np.sin(lcp-l[index])*A12[index])/np.cos(lcp-l[index])\n Prod = A11*A22-A12*A21\n Equatorial = np.column_stack((ra,dec,Galactic[:,2],Galactic[:,3],\n (A11*Galactic[:,4]-A21*Galactic[:,5])/Prod,\n (A22*Galactic[:,5]-A12*Galactic[:,4])/Prod))\n \n return Equatorial", "def isIsosceles(self):\n\t\treturn self.a == self.b or self.a == self.c or self.b == self.c", "def find_connected_atoms(struct, tolerance=0.45, ldict=JmolNN().el_radius):\n n_atoms = len(struct.species)\n fc = np.array(struct.frac_coords)\n fc_copy = np.repeat(fc[:, :, np.newaxis], 27, axis=2)\n neighbors = np.array(list(itertools.product([0, 1, -1], [0, 1, -1], [0, 1, -1]))).T\n neighbors = np.repeat(neighbors[np.newaxis, :, :], 1, axis=0)\n fc_diff = fc_copy - neighbors\n species = list(map(str, struct.species))\n # in case of charged species\n for i, item in enumerate(species):\n if not item in ldict.keys():\n species[i] = str(Specie.from_string(item).element)\n latmat = struct.lattice.matrix\n connected_matrix = np.zeros((n_atoms,n_atoms))\n\n for i in range(n_atoms):\n for j in range(i + 1, n_atoms):\n max_bond_length = ldict[species[i]] + ldict[species[j]] + tolerance\n frac_diff = fc_diff[j] - fc_copy[i]\n distance_ij = np.dot(latmat.T, frac_diff)\n # print(np.linalg.norm(distance_ij,axis=0))\n if sum(np.linalg.norm(distance_ij, axis=0) < max_bond_length) > 0:\n connected_matrix[i, j] = 1\n connected_matrix[j, i] = 1\n return connected_matrix", "def are_torsions_same2(geo, geoi, idxs_lst):\n dtol = 0.09\n same_dihed = True\n for idxs in idxs_lst:\n val = dihedral_angle(geo, *idxs)\n vali = dihedral_angle(geoi, *idxs)\n valip = vali+2.*numpy.pi\n valim = vali-2.*numpy.pi\n vchk1 = abs(val - vali)\n vchk2 = abs(val - valip)\n vchk3 = abs(val - valim)\n if vchk1 > dtol and vchk2 > dtol and vchk3 > dtol:\n same_dihed = False\n return same_dihed", "def register_com(vol_a: Volume, vol_b: Volume) -> Tuple[Volume, Volume]:\n from dipy.align.imaffine import transform_centers_of_mass\n\n affine = transform_centers_of_mass(vol_a, vol_a.grid_to_world, vol_b, vol_b.grid_to_world)\n\n vol_b.world_transform[:] = np.array(affine.affine)\n return vol_a, vol_b", "def setResNameCheckCoords(self):\n exit = False\n localDir = os.path.abspath('.')\n if not os.path.exists(self.tmpDir):\n os.mkdir(self.tmpDir)\n #if not os.path.exists(os.path.join(tmpDir, self.inputFile)):\n copy2(self.absInputFile, self.tmpDir)\n os.chdir(self.tmpDir)\n\n if self.ext == '.pdb':\n tmpFile = open(self.inputFile, 'r')\n else:\n cmd = '%s -i %s -fi %s -o tmp -fo ac -pf y' % \\\n (self.acExe, self.inputFile, self.ext[1:])\n self.printDebug(cmd)\n out = getoutput(cmd)\n if not out.isspace():\n self.printDebug(out)\n try:\n tmpFile = open('tmp', 'r')\n except:\n rmtree(self.tmpDir)\n raise\n\n tmpData = tmpFile.readlines()\n residues = set()\n coords = {}\n for line in tmpData:\n if 'ATOM ' in line or 'HETATM' in line:\n residues.add(line[17:20])\n at = line[0:17]\n cs = line[30:54]\n if coords.has_key(cs):\n coords[cs].append(at)\n else:\n coords[cs] = [at]\n #self.printDebug(coords)\n\n if len(residues) > 1:\n self.printError(\"more than one residue detected '%s'\" % str(residues))\n self.printError(\"verify your input file '%s'. Aborting ...\" % self.inputFile)\n sys.exit(1)\n\n dups = \"\"\n short = \"\"\n long = \"\"\n longSet = set()\n id = 0\n items = coords.items()\n l = len(items)\n for item in items:\n id += 1\n if len(item[1]) > 1: # if True means atoms with same coordinates\n for i in item[1]:\n dups += \"%s %s\\n\" % (i, item[0])\n\n# for i in xrange(0,len(data),f):\n# fdata += (data[i:i+f])+' '\n\n for id2 in xrange(id,l):\n item2 = items[id2]\n c1 = map(float,[item[0][i:i+8] for i in xrange(0,24,8)])\n c2 = map(float,[item2[0][i:i+8] for i in xrange(0,24,8)])\n dist2 = self.distance(c1,c2)\n if dist2 < minDist2:\n dist = math.sqrt(dist2)\n short += \"%8.5f %s %s\\n\" % (dist, item[1], item2[1])\n if dist2 < maxDist2: # and not longOK:\n longSet.add(str(item[1]))\n longSet.add(str(item2[1]))\n if str(item[1]) not in longSet:\n long += \"%s\\n\" % item[1]\n\n if dups:\n self.printError(\"Atoms with same coordinates in '%s'!\" % self.inputFile)\n self.printQuoted(dups[:-1])\n exit = True\n\n if short:\n self.printError(\"Atoms TOO close (< %s Ang.)\" % minDist)\n self.printQuoted(\"Dist (Ang.) Atoms\\n\" + short[:-1])\n exit = True\n\n if long:\n self.printError(\"Atoms TOO alone (> %s Ang.)\" % maxDist)\n self.printQuoted(long[:-1])\n exit = True\n\n if exit:\n if self.force:\n self.printWarn(\"You chose to proceed anyway with '-f' option. GOOD LUCK!\")\n else:\n self.printError(\"Use '-f' option if you want to proceed anyway. Aborting ...\")\n rmtree(self.tmpDir)\n sys.exit(1)\n\n resname = list(residues)[0]\n newresname = resname\n\n if resname.isdigit() or 'E' in resname[1:3].upper() or 'ADD' in resname.upper():\n newresname = 'R' + resname\n if not resname.isalnum():\n newresname = 'MOL'\n if newresname != resname:\n self.printWarn(\"In %s.lib, residue name will be '%s' instead of '%s' elsewhere\"\n % (self.acBaseName, newresname, resname))\n\n self.resName = newresname\n\n os.chdir(localDir)\n self.printDebug(\"setResNameCheckCoords done\")", "def test_xyz_to_ase(self):\n atoms_1 = converter.xyz_to_ase(self.xyz1['dict'])\n self.assertIsInstance(atoms_1, Atoms)\n self.assertEqual(str(atoms_1.symbols), 'CH4')\n np.testing.assert_array_equal(atoms_1.positions, [[0., 0., 0.],\n [0.6300326, 0.6300326, 0.6300326],\n [-0.6300326, -0.6300326, 0.6300326],\n [-0.6300326, 0.6300326, -0.6300326],\n [0.6300326, -0.6300326, -0.6300326]])", "def test_canonical_ordering_rdkit(self):\n from openforcefield.utils.toolkits import RDKitToolkitWrapper\n\n rdkit = RDKitToolkitWrapper()\n # get ethanol in canonical order\n ethanol = create_ethanol()\n # get reversed non canonical ethanol\n reversed_ethanol = create_reversed_ethanol()\n # get the canonical ordering\n canonical_ethanol = reversed_ethanol.canonical_order_atoms(rdkit)\n # make sure the mapping between the ethanol and the rdkit ref canonical form is the same\n assert (\n True,\n {0: 2, 1: 0, 2: 1, 3: 8, 4: 3, 5: 4, 6: 5, 7: 6, 8: 7},\n ) == Molecule.are_isomorphic(canonical_ethanol, ethanol, True)", "def process_atoms(self, molecule_info):\n # Set atoms from molecule information.\n atoms = [x.split()[3] for x in molecule_info]\n atom_ids = [re.search(r'[\\D_]', x).group(0) for x in atoms]\n\n return atoms, atom_ids", "def test_canonical_ordering_rdkit(self):\n from openff.toolkit.utils.toolkits import RDKitToolkitWrapper\n\n rdkit = RDKitToolkitWrapper()\n # get ethanol in canonical order\n ethanol = create_ethanol()\n # get reversed non canonical ethanol\n reversed_ethanol = create_reversed_ethanol()\n # get the canonical ordering\n canonical_ethanol = reversed_ethanol.canonical_order_atoms(rdkit)\n # make sure the mapping between the ethanol and the rdkit ref canonical form is the same\n assert (\n True,\n {0: 2, 1: 0, 2: 1, 3: 8, 4: 3, 5: 4, 6: 5, 7: 6, 8: 7},\n ) == Molecule.are_isomorphic(canonical_ethanol, ethanol, True)", "def test_terminal_rotamer_filtering(self):\n LIGAND_PATH = 'ligands/oleic_acid.pdb'\n\n ligand_path = get_data_file_path(LIGAND_PATH)\n molecule = Molecule(ligand_path, exclude_terminal_rotamers=True)\n\n rotamers_per_branch = molecule.rotamers\n\n assert len(rotamers_per_branch) == 2, \"Found an invalid number \" + \\\n \"of branches: {}\".format(len(rotamers_per_branch))\n\n atom_list_1 = list()\n atom_list_2 = list()\n rotamers = rotamers_per_branch[0]\n for rotamer in rotamers:\n atom_list_1.append(set([rotamer.index1, rotamer.index2]))\n\n rotamers = rotamers_per_branch[1]\n for rotamer in rotamers:\n atom_list_2.append(set([rotamer.index1, rotamer.index2]))\n\n EXPECTED_INDICES_1 = [set([9, 10]), set([8, 9]), set([7, 8]),\n set([6, 7]), set([5, 6]), set([2, 5]),\n set([0, 2]), set([0, 1])]\n\n EXPECTED_INDICES_2 = [set([12, 11]), set([12, 13]), set([13, 14]),\n set([14, 15]), set([15, 16]), set([16, 17]),\n set([17, 18])]\n\n where_1 = list()\n for atom_pair in atom_list_1:\n if atom_pair in EXPECTED_INDICES_1:\n where_1.append(1)\n elif atom_pair in EXPECTED_INDICES_2:\n where_1.append(2)\n else:\n where_1.append(0)\n\n where_2 = list()\n for atom_pair in atom_list_2:\n if atom_pair in EXPECTED_INDICES_1:\n where_2.append(1)\n elif atom_pair in EXPECTED_INDICES_2:\n where_2.append(2)\n else:\n where_2.append(0)\n\n assert (all(i == 1 for i in where_1)\n and all(i == 2 for i in where_2)) or \\\n (all(i == 2 for i in where_1)\n and all(i == 1 for i in where_2)), \"Invalid rotamer library \" + \\\n \"{}, {}\".format(where_1, where_2)\n\n assert (all(i == 1 for i in where_1)\n and all(i == 2 for i in where_2)\n and len(where_1) == len(EXPECTED_INDICES_1)\n and len(where_2) == len(EXPECTED_INDICES_2)) or \\\n (all(i == 2 for i in where_1)\n and all(i == 1 for i in where_2)\n and len(where_1) == len(EXPECTED_INDICES_2)\n and len(where_2) == len(EXPECTED_INDICES_1)), \"Unexpected \" + \\\n \"number of rotamers\"", "def _get_positions_mapping(orbitals, real_space_operator, position_tolerance):\n positions = [orbital.position for orbital in orbitals]\n res = {}\n for i, pos1 in enumerate(positions):\n new_pos = real_space_operator.apply(pos1)\n res[i] = [\n j for j, pos2 in enumerate(positions) if _is_same_position(\n new_pos, pos2, position_tolerance=position_tolerance\n )\n ]\n return res", "def test_unsorted_xyz_mol_from_xyz(self):\n n3h5 = ARCSpecies(label='N3H5', xyz=self.xyz8['str'], smiles='NNN')\n expected_adjlist = \"\"\"1 N u0 p1 c0 {2,S} {4,S} {5,S}\n2 H u0 p0 c0 {1,S}\n3 H u0 p0 c0 {4,S}\n4 N u0 p1 c0 {1,S} {3,S} {6,S}\n5 H u0 p0 c0 {1,S}\n6 N u0 p1 c0 {4,S} {7,S} {8,S}\n7 H u0 p0 c0 {6,S}\n8 H u0 p0 c0 {6,S}\n\"\"\"\n self.assertEqual(n3h5.mol.to_adjacency_list(), expected_adjlist)\n self.assertEqual(n3h5.conformers[0], self.xyz8['dict'])", "def compare(self, m2):\n assert self.natoms == m2.natoms\n for atom1 in self.atoms:\n found = False\n for atom2 in m2.atoms:\n if(atom1 == atom2):\n found = True\n break\n else:\n raise Exception(\"Atom not found! {0}\".format(atom1))\n else:\n pass", "def invariants(mol):\n atoms_dict={}\n \n for idxs,atom in enumerate(mol.GetAtoms()):\n components=[]\n components.append(atomic_number(mol,idxs))\n components.append(heavy_count(mol,idxs))\n components.append(H_count(mol,idxs))\n components.append(valence(mol,idxs))\n #components.append(charge(mol,idxs))\n components.append(negativity(mol,idxs))\n components.append(mass(mol,idxs))\n \n atoms_dict[idxs]=get_hash(components)\n return atoms_dict", "def assert_equal_matrices(array, matrix1, matrix2, periodic):\n nonlocal CUTOFF\n indices = np.where(matrix1 != matrix2)\n for index in range(len(indices[0])):\n if len(indices) == 2:\n # multi_model = False -> AtomArray\n m = None\n i = indices[0][index]\n j = indices[1][index]\n box = array.box if periodic else None\n distance = struc.distance(array[i], array[j], box=box)\n if len(indices) == 3:\n # multi_model = True -> AtomArrayStack\n m = indices[0][index]\n i = indices[1][index]\n j = indices[2][index]\n box = array.box[m] if periodic else None\n distance = struc.distance(array[m,i], array[m,j], box=box)\n try:\n assert distance == pytest.approx(CUTOFF, abs=1e-4)\n except AssertionError:\n print(f\"Model {m}, Atoms {i} and {j}\")\n raise", "def overlapping_atoms(cifs):\n errors = []\n\n # catch pymatgen warnings for overlapping atoms\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n for cif in cifs:\n try:\n s = CifParser(cif).get_structures(primitive=True)[0]\n assert s.is_ordered\n except (ValueError,AssertionError) as exc:\n s = CifParser(cif, occupancy_tolerance=1000).get_structures(primitive=True)[0]\n s.to(filename=cif)\n print(f'Fixed overlapping atoms in {cif}')\n except Exception as exc:\n errors.append(f'Unable to parse file {cif}')\n \n if errors:\n print('\\n'.join(errors))\n sys.exit(1)", "def con_2_eon_atoms(path):\n at = et.Atoms(path)\n# at = et.Atoms(0)\n# at.read_con(path)\n return at", "def is_canonical(hybrids):\n mrhyb = hybrids[2].upper().replace(\"U\", \"T\")\n mirhyb = hybrids[0].upper().replace(\"U\", \"T\")\n hybrid = hybrids[1]\n \"\"\"\n 2-8\n \"\"\"\n if hybrid[1:8] == \"|||||||\":\n guwoble = False\n for mirnuc, mrnuc in zip(mirhyb[1:8], mrhyb[1:8]):\n if (mirnuc == 'G' and mrnuc == 'T') or (mirnuc == 'T' and mrnuc == 'G'):\n guwoble = True\n if guwoble:\n return False, \"2-8-Gwoble\"\n else:\n return True, \"2-8\"\n elif (hybrid[1:7] == \"||||||\" and mrhyb[0] == 'A'):\n guwoble = False\n for mirnuc, mrnuc in zip(mirhyb[1:7], mrhyb[1:7]):\n if (mirnuc == 'G' and mrnuc == 'T') or (mirnuc == 'T' and mrnuc == 'G'):\n guwoble = True\n if guwoble:\n return False, \"2-7-A-Gwoble\"\n else:\n return True, \"2-7-A\"\n else:\n if hybrid[0:7] == \"|||||||\":\n return False, \"1-7-ElMMo\"\n elif hybrid[1:7] == \"||||||\":\n return False, \"6-mer\"\n if \"v\" in hybrid[0:8]:\n return False, \"mRNAbulge\"\n elif \"^\" in hybrid[0:8]:\n return False, \"miRNAbulge\"\n elif \"O\" in hybrid[0:8]:\n return False, \"symmetric_loop\"\n else:\n return False, \"unknown\"", "def is_atomic(self):\n \n symbols=set()\n for e in self.symbols:\n if not e=='':\n symbols.add(e)\n\n for s in symbols: #unicity first\n count=0\n for e in symbols:\n if s==e:\n count+=1\n if count!=1:\n return False\n else:\n continue \n temp=symbols.copy()\n for s in symbols:\n temp.remove(s)\n for e in temp:\n if s in e:\n return False\n else:\n continue\n temp=symbols.copy()\n\n return True", "def _check_transformation_matrix_homogeneity(self):\n transformation_matrices_similar = True # assume they are all similar\n first = True\n rows = None\n cols = None\n for transform in self:\n if first:\n rows = transform.rows\n cols = transform.cols\n first = False\n else:\n if transform.rows != rows or transform.cols != cols:\n transformation_matrices_similar = False\n break\n return transformation_matrices_similar, rows, cols", "def test_get_molecule_least_similar_to(self):\n csv_fpath = self.smiles_seq_to_xl_or_csv(ftype=\"csv\")\n for descriptor in SUPPORTED_FPRINTS:\n for similarity_measure in SUPPORTED_SIMILARITIES:\n molecule_set = MoleculeSet(\n molecule_database_src=csv_fpath,\n molecule_database_src_type=\"csv\",\n fingerprint_type=descriptor,\n similarity_measure=similarity_measure,\n is_verbose=False,\n )\n for mol_smile, mol in zip(TEST_SMILES,\n molecule_set.molecule_database):\n compare_task = CompareTargetMolecule(\n target_molecule_smiles=mol_smile)\n [furthest_mol], [similarity] = compare_task.\\\n get_hits_dissimilar_to(molecule_set)\n mol_similarities = molecule_set.compare_against_molecule(\n mol)\n self.assertEqual(\n np.min(mol_similarities),\n mol.get_similarity_to(\n molecule_set.molecule_database[furthest_mol],\n molecule_set.similarity_measure\n ),\n f\"Expected furthest mol to have minimum \"\n f\"similarity to target molecule \"\n f\"using similarity measure: {similarity_measure}, \"\n f\"descriptor: {descriptor}, \"\n f\"for molecule {mol.mol_text}\",\n )\n self.assertGreaterEqual(similarity, 0.,\n \"Expected similarity value to \"\n \"be >= 0.\"\n f\"using similarity measure: \"\n f\"{similarity_measure}, \"\n f\"descriptor: {descriptor}, \"\n f\"for molecule {mol.mol_text}\")\n self.assertLessEqual(similarity, 1.,\n \"Expected similarity value to \"\n \"be <= 1.\"\n f\"using similarity measure: \"\n f\"{similarity_measure}, \"\n f\"descriptor: {descriptor}, \"\n f\"for molecule {mol.mol_text}\"\n )", "def equiv(subdiagram1, subdiagram2):\n # TODO: Make sure arguments are the right type\n # TODO: Make this work for subdiagrams of length >= 1\n # subdiagrams are not equivalent if they have different numbers of crossings\n # print \"sub1\\t\", subdiagram1, len(subdiagram1[0])\n # print \"sub2\\t\", subdiagram2, len(subdiagram2[0])\n if len(subdiagram1[0]) != len(subdiagram2[0]):\n return False\n # look for a match\n for i in range(len(subdiagram1[0])-1):\n crossing1 = subdiagram1[0][i]\n typeMatch = False\n for j in range(len(subdiagram2[0])-1):\n crossing2 = subdiagram2[0][j]\n print \"\\tc1 \",crossing1\n print \"\\tc2 \",crossing2\n # check for same crossing type\n # TODO: check for empty crossing\n if len(crossing1) == 5 and len(crossing2) == 5:\n if crossing1[0] == crossing2[0]:\n print \" :)\"\n typeMatch = True\n \n\n return True", "def sameLinking(el_a, el_b, system_el2kbid, gold_el2kbid):\n\n sys_el_a_id = system_el2kbid[el_a]\n sys_el_b_id = system_el2kbid[el_b]\n gol_el_a_id = gold_el2kbid[el_a]\n gol_el_b_id = gold_el2kbid[el_b]\n\n if sys_el_a_id.startswith('NIL'): sys_el_a_id = 'NIL'\n if sys_el_b_id.startswith('NIL'): sys_el_b_id = 'NIL'\n if gol_el_a_id.startswith('NIL'): gol_el_a_id = 'NIL'\n if gol_el_b_id.startswith('NIL'): gol_el_b_id = 'NIL'\n\n #print system_el2kbid\n \n return sys_el_a_id == sys_el_b_id == gol_el_a_id == gol_el_b_id", "def test_clashing_atoms():\n benzene_path = examples_paths()['benzene']\n toluene_path = examples_paths()['toluene']\n with mmtools.utils.temporary_directory() as tmp_dir:\n yaml_content = get_template_script(tmp_dir, keep_openeye=True)\n system_id = 'explicit-system'\n system_description = yaml_content['systems'][system_id]\n system_description['pack'] = True\n system_description['solvent'] = utils.CombinatorialLeaf(['vacuum', 'PME'])\n\n # Sanity check: at the beginning molecules clash\n toluene_pos = utils.get_oe_mol_positions(utils.load_oe_molecules(toluene_path, molecule_idx=0))\n benzene_pos = utils.get_oe_mol_positions(utils.load_oe_molecules(benzene_path, molecule_idx=0))\n assert pipeline.compute_min_dist(toluene_pos, benzene_pos) < pipeline.SetupDatabase.CLASH_THRESHOLD\n\n exp_builder = ExperimentBuilder(yaml_content)\n\n for sys_id in [system_id + '_vacuum', system_id + '_PME']:\n system_dir = os.path.dirname(\n exp_builder._db.get_system(sys_id)[0].position_path)\n\n # Get positions of molecules in the final system\n prmtop = openmm.app.AmberPrmtopFile(os.path.join(system_dir, 'complex.prmtop'))\n inpcrd = openmm.app.AmberInpcrdFile(os.path.join(system_dir, 'complex.inpcrd'))\n positions = inpcrd.getPositions(asNumpy=True).value_in_unit(unit.angstrom)\n topography = Topography(prmtop.topology, ligand_atoms='resname TOL')\n benzene_pos2 = positions.take(topography.receptor_atoms, axis=0)\n toluene_pos2 = positions.take(topography.ligand_atoms, axis=0)\n # atom_indices = pipeline.find_components(prmtop.createSystem(), prmtop.topology, 'resname TOL')\n # benzene_pos2 = positions.take(atom_indices['receptor'], axis=0)\n # toluene_pos2 = positions.take(atom_indices['ligand'], axis=0)\n\n # Test that clashes are resolved in the system\n min_dist, max_dist = pipeline.compute_min_max_dist(toluene_pos2, benzene_pos2)\n assert min_dist >= pipeline.SetupDatabase.CLASH_THRESHOLD\n\n # For solvent we check that molecule is within the box\n if sys_id == system_id + '_PME':\n assert max_dist <= exp_builder._db.solvents['PME']['clearance'].value_in_unit(unit.angstrom)", "def calc_asymmetric_unit_cell_indexes(n_abc, full_symm_elems):\n n_a, n_b, n_c = n_abc[0], n_abc[1], n_abc[2]\n\n point_index = numpy.stack(numpy.meshgrid(\n numpy.arange(n_a), numpy.arange(n_b), numpy.arange(n_c),\n indexing=\"ij\"), axis=0)\n point_index = point_index.reshape(point_index.shape[0], numpy.prod(point_index.shape[1:]))\n \n elem_r = full_symm_elems[4:13]\n elem_b = full_symm_elems[:4]\n\n r_ind = calc_m_v(\n numpy.expand_dims(elem_r, axis=1),\n numpy.expand_dims(point_index, axis=2), flag_m=False, flag_v=False)[0]\n\n div, mod = numpy.divmod(numpy.expand_dims(n_abc, axis=1), numpy.expand_dims(elem_b[3], axis=0))\n if not(numpy.all(mod == 0)):\n raise KeyError(\"Symmetry elements do not match with number of points\")\n point_index_s = numpy.mod(r_ind + numpy.expand_dims(div * elem_b[:3], axis=1),\n numpy.expand_dims(numpy.expand_dims(n_abc, axis=1), axis=2))\n value_index_s = n_c*n_b*point_index_s[0] + n_c*point_index_s[1] + point_index_s[2]\n value_index_s_sorted = numpy.sort(value_index_s, axis=1)\n\n a, ind_a_u_c, counts_a_u_c = numpy.unique(\n value_index_s_sorted[:, 0], return_index=True, return_counts=True)\n\n point_index_s_a_u_c = point_index[:, ind_a_u_c]\n\n return point_index_s_a_u_c, counts_a_u_c", "def test_run_molecule(self, single_mol_system):\n sequence = \"ABCDE\"\n expected = list(itertools.chain(*([element] * 3 for element in sequence)))\n processor = dssp.AnnotateResidues(\"test\", sequence)\n processor.run_molecule(single_mol_system.molecules[0])\n found = self.sequence_from_system(single_mol_system, \"test\")\n assert found == expected", "def is_equivalent(self, other, name, logger, tolerance=0.):\n if not isinstance(other, Vector):\n logger.debug('other is not a Vector object.')\n return False\n for component in ('x', 'y', 'z', 'r', 't'):\n if not self._check_equivalent(other, name, component, logger,\n tolerance):\n return False\n return True", "def unpruned_atom_pairs(\n molecules: List[masm.Molecule], idx_map: List[Tuple[int, int]], distance_bounds: Tuple[int, int]\n) -> Set[Tuple[int, int]]:\n\n def structure_idx(c: int, i: int) -> int:\n return idx_map.index((c, i))\n\n pairs: Set[Tuple[int, int]] = set()\n\n for component, molecule in enumerate(molecules):\n for i in molecule.graph.atoms():\n distances = np.array(masm.distance(i, molecule.graph))\n partners = np.nonzero((distances <= max(distance_bounds)) & (distances >= min(distance_bounds)))[0]\n\n # Back-transform to structure indices and add to set\n s_i = structure_idx(component, i)\n s_partners = [structure_idx(component, j) for j in partners]\n pairs |= set(make_sorted_pair(s_i, s_j) for s_j in s_partners)\n\n return pairs", "def compute_orientation(x,y,lx,ly,nfil):\n # number of molecules\n natoms = len(x)\n nmol = natoms/nfil\n # allocate aray for results\n phi = np.zeros((natoms), dtype = np.float64)\n tx = np.zeros((natoms), dtype = np.float64)\n ty = np.zeros((natoms), dtype = np.float64)\n # loop over all polymers\n k = 0\n for i in range(nmol):\n for j in range(nfil):\n if j == 0:\n x1 = x[k]\n y1 = y[k]\n x2 = x[k+1]\n y2 = y[k+1]\n elif j == nfil-1:\n x1 = x[k-1]\n y1 = y[k-1]\n x2 = x[k]\n y2 = y[k]\n else:\n x1 = x[k-1]\n y1 = y[k-1]\n x2 = x[k+1]\n y2 = y[k+1]\n # compute nearest neighbor\n dx = neigh_min(x2-x1,lx)\n dy = neigh_min(y2-y1,ly)\n # compute angle using atan2\n pi = math.atan2(dy,dx)\n phi[k] = pi\n tx[k] = dx / np.sqrt(dx**2 + dy**2)\n ty[k] = dy / np.sqrt(dx**2 + dy**2)\n # increment k\n k = k + 1\n return phi, tx, ty", "def _is_same_position(pos1, pos2, position_tolerance):\n return np.isclose(_pos_distance(pos1, pos2), 0, atol=position_tolerance)", "def comaIsSymmetric(self):\n\t\tfor i in range(2*self.totalBins):\n\t\t\tfor j in range(2*self.totalBins):\n\t\t\t\tif not self.coma[i,j] == self.coma[j,i]:\n\t\t\t\t\tprint i,j,self.coma[i,j],self.coma[j,i]\n\t\t\t\t\treturn False\n\t\treturn True", "def get_interactions(list_atoms1, list_atoms2, dist):\n beta_carbons1 = list(filter(lambda x: x.get_id() == \"CB\", list_atoms1))\n beta_carbons2 = list(filter(lambda x: x.get_id() == \"CB\", list_atoms2))\n ns = NeighborSearch(beta_carbons1)\n interactions = []\n\n for atom in beta_carbons2:\n interact = ns.search(atom.get_coord(), dist)\n interactions.extend(\n [tuple(sorted([str(atom.get_parent().resname), str(x.get_parent().resname)])) for x in interact])\n return interactions", "def overlap_similarity(box, other_boxes):\n return jaccard(np.expand_dims(box, axis=0), other_boxes).squeeze(0)", "def sanitizeOEMolecule(molecule):\n mol_copy = molecule.CreateCopy()\n\n # Check if the molecule has 3D coordinates\n if not oechem.OEGetDimensionFromCoords(mol_copy):\n raise ValueError(\"The molecule coordinates are set to zero\")\n # Check if the molecule has hydrogens\n if not oechem.OEHasExplicitHydrogens(mol_copy):\n oechem.OEAddExplicitHydrogens(mol_copy)\n # Check if the molecule has assigned aromaticity\n if not mol_copy.HasPerceived(oechem.OEPerceived_Aromaticity):\n oechem.OEAssignAromaticFlags(mol_copy, oechem.OEAroModelOpenEye)\n\n # Check for any missing and not unique atom names.\n # If found reassign all of them as Tripos atom names\n\n atm_list_names = []\n\n for atom in mol_copy.GetAtoms():\n atm_list_names.append(atom.GetName())\n\n reassign_names = False\n\n if len(set(atm_list_names)) != len(atm_list_names):\n reassign_names = True\n\n if '' in atm_list_names:\n reassign_names = True\n\n if reassign_names:\n oechem.OETriposAtomNames(mol_copy)\n\n return mol_copy", "def elementCom(Paire1,Paire2) :\n elem_com=\" \"\n elementPaire1=\" \"\n elementPaire2=\" \"\n p1 = Paire1[1]\n p2 = Paire2[1]\n if p1 != p2 :\n for i in range (2):\n for j in range (2):\n if p1[i] == p2[j]:\n elem_com = p1[i] \n elementPaire1 = p1[1-i] \n elementPaire2 = p2[1-j] \n return elem_com, elementPaire1, elementPaire2", "def test_cx_equivalence_1cx(self, seed=1):\n state = np.random.default_rng(seed)\n rnd = 2 * np.pi * state.random(size=12)\n\n qr = QuantumRegister(2, name=\"q\")\n qc = QuantumCircuit(qr)\n\n qc.u(rnd[0], rnd[1], rnd[2], qr[0])\n qc.u(rnd[3], rnd[4], rnd[5], qr[1])\n\n qc.cx(qr[1], qr[0])\n\n qc.u(rnd[6], rnd[7], rnd[8], qr[0])\n qc.u(rnd[9], rnd[10], rnd[11], qr[1])\n\n sim = UnitarySimulatorPy()\n unitary = execute(qc, sim).result().get_unitary()\n self.assertEqual(two_qubit_cnot_decompose.num_basis_gates(unitary), 1)\n self.assertTrue(Operator(two_qubit_cnot_decompose(unitary)).equiv(unitary))", "def __eq__(self, other):\n if not isinstance(other, PantsMappingClass):\n # print(\"A\")\n return False\n # if other._pants_decomposition != self._pants_decomposition:\n # print(\"B\")\n # return False\n # print(\"C\")\n return (self * other.inverse()).is_identity()", "def test_to_from_oemol(self, molecule):\n from openforcefield.utils.toolkits import UndefinedStereochemistryError\n\n # Known failures raise an UndefinedStereochemistryError, but\n # the round-trip SMILES representation with the OpenEyeToolkit\n # doesn't seem to be affected.\n # ZINC test set known failures.\n # known_failures = {'ZINC05964684', 'ZINC05885163', 'ZINC05543156', 'ZINC17211981',\n # 'ZINC17312986', 'ZINC06424847', 'ZINC04963126'}\n\n undefined_stereo = molecule.name in openeye_drugbank_undefined_stereo_mols\n\n toolkit_wrapper = OpenEyeToolkitWrapper()\n\n oemol = molecule.to_openeye()\n molecule_smiles = molecule.to_smiles(toolkit_registry=toolkit_wrapper)\n\n # First test making a molecule using the Molecule(oemol) method\n\n # If this is a known failure, check that it raises UndefinedStereochemistryError\n # and proceed with the test ignoring it.\n test_mol = None\n if undefined_stereo:\n with pytest.raises(UndefinedStereochemistryError):\n Molecule(oemol)\n test_mol = Molecule(oemol, allow_undefined_stereo=True)\n else:\n test_mol = Molecule(oemol)\n\n test_mol_smiles = test_mol.to_smiles(toolkit_registry=toolkit_wrapper)\n assert molecule_smiles == test_mol_smiles\n\n # Check that the two topologies are isomorphic.\n assert_molecule_is_equal(\n molecule,\n test_mol,\n \"Molecule.to_openeye()/Molecule(oemol) round trip failed\",\n )\n\n # Second, test making a molecule using the Molecule.from_openeye(oemol) method\n\n # If this is a known failure, check that it raises UndefinedStereochemistryError\n # and proceed with the test.\n if undefined_stereo:\n with pytest.raises(UndefinedStereochemistryError):\n Molecule.from_openeye(oemol)\n test_mol = Molecule.from_openeye(oemol, allow_undefined_stereo=True)\n else:\n test_mol = Molecule.from_openeye(oemol)\n\n test_mol_smiles = test_mol.to_smiles(toolkit_registry=toolkit_wrapper)\n assert molecule_smiles == test_mol_smiles\n\n # Check that the two topologies are isomorphic.\n assert_molecule_is_equal(\n molecule, test_mol, \"Molecule.to_openeye()/from_openeye() round trip failed\"\n )", "def _similar_stereo(geo, geoi, arg=None):\n _ = arg # Added just to make wrapper function work\n ich = inchi(geo)\n ichi = inchi(geoi)\n return bool(ich == ichi)", "def test_unique_atom_names(self, molecule):\n # The dataset we load in has atom names, so let's strip them first\n # to ensure that we can fail the uniqueness check\n for atom in molecule.atoms:\n atom.name = \"\"\n assert not (molecule.has_unique_atom_names)\n # Then genreate unique atom names using the built in algorithm\n molecule.generate_unique_atom_names()\n # Check that the molecule has unique atom names\n assert molecule.has_unique_atom_names\n # Check molecule.has_unique_atom_names is working correctly\n assert (\n len(set([atom.name for atom in molecule.atoms])) == molecule.n_atoms\n ) == molecule.has_unique_atom_names\n molecule.atoms[1].name = molecule.atoms[0].name # no longer unique\n assert (\n len(set([atom.name for atom in molecule.atoms])) == molecule.n_atoms\n ) == molecule.has_unique_atom_names\n assert all(\"x\" in a.name for a in molecule.atoms)", "def test_xyz_to_pybel_mol(self):\n pbmol1 = converter.xyz_to_pybel_mol(self.xyz1['dict'])\n pbmol2 = converter.xyz_to_pybel_mol(self.xyz5['dict'])\n pbmol3 = converter.xyz_to_pybel_mol(self.xyz2['dict'])\n pbmol4 = converter.xyz_to_pybel_mol(self.xyz6['dict'])\n\n # These tests check that the atoms we expect appear in the correct order:\n\n self.assertEqual(pbmol1.atoms[0].idx, 1) # C\n self.assertAlmostEqual(pbmol1.atoms[0].atomicmass, 12.0107, 2)\n self.assertEqual(pbmol1.atoms[0].coords, (0.0, 0.0, 0.0))\n self.assertAlmostEqual(pbmol1.atoms[1].atomicmass, 1.00794, 2) # H\n self.assertEqual(pbmol1.atoms[1].coords, (0.6300326, 0.6300326, 0.6300326))\n\n self.assertAlmostEqual(pbmol2.atoms[0].atomicmass, 15.9994, 2) # O\n self.assertEqual(pbmol2.atoms[0].coords, (1.1746411, -0.15309781, 0.0))\n self.assertAlmostEqual(pbmol2.atoms[1].atomicmass, 14.0067, 2) # N\n self.assertEqual(pbmol2.atoms[1].coords, (0.06304988, 0.35149648, 0.0))\n self.assertAlmostEqual(pbmol2.atoms[2].atomicmass, 12.0107, 2) # C\n self.assertEqual(pbmol2.atoms[2].coords, (-1.12708952, -0.11333971, 0.0))\n self.assertAlmostEqual(pbmol2.atoms[3].atomicmass, 1.00794, 2) # H\n self.assertEqual(pbmol2.atoms[3].coords, (-1.93800144, 0.60171738, 0.0))\n\n self.assertAlmostEqual(pbmol3.atoms[0].atomicmass, 32.065, 2) # S\n self.assertEqual(pbmol3.atoms[0].coords, (1.02558264, -0.04344404, -0.07343859))\n self.assertAlmostEqual(pbmol3.atoms[1].atomicmass, 15.9994, 2) # O\n self.assertEqual(pbmol3.atoms[1].coords, (-0.25448248, 1.10710477, 0.18359696))\n self.assertAlmostEqual(pbmol3.atoms[2].atomicmass, 14.0067, 2) # N\n self.assertEqual(pbmol3.atoms[2].coords, (-1.30762173, 0.15796567, -0.1048929))\n self.assertAlmostEqual(pbmol3.atoms[3].atomicmass, 12.0107, 2) # C\n self.assertEqual(pbmol3.atoms[3].coords, (-0.49011438, -1.0370438, 0.15365747))\n self.assertAlmostEqual(pbmol3.atoms[-1].atomicmass, 1.00794, 2) # H\n self.assertEqual(pbmol3.atoms[-1].coords, (-1.43009127, 0.23517346, -1.11797908))\n\n self.assertAlmostEqual(pbmol4.atoms[0].atomicmass, 32.065, 2) # S\n self.assertEqual(pbmol4.atoms[0].coords, (-0.06618943, -0.12360663, -0.07631983))\n self.assertAlmostEqual(pbmol4.atoms[3].atomicmass, 14.0067, 2) # N\n self.assertEqual(pbmol4.atoms[3].coords, (0.01546439, -1.54297548, 0.44580391))", "def test_to_networkx(self, molecule):\n graph = molecule.to_networkx()\n\n assert graph.number_of_nodes() == molecule.n_atoms\n assert graph.number_of_edges() == molecule.n_bonds\n\n for bond in molecule.bonds:\n edge = graph.get_edge_data(bond.atom1_index, bond.atom2_index)\n\n for attr in [\"stereochemistry\", \"bond_order\", \"is_aromatic\"]:\n assert edge[attr] == getattr(bond, attr)\n\n for node_index, node in graph.nodes(data=True):\n atom = molecule.atom(node_index)\n\n for attr in [\n \"atomic_number\",\n \"is_aromatic\",\n \"stereochemistry\",\n \"formal_charge\",\n ]:\n assert node[attr] == getattr(atom, attr)" ]
[ "0.6269365", "0.62579566", "0.6199401", "0.619143", "0.6133642", "0.6082986", "0.6040619", "0.59960866", "0.59847355", "0.5943765", "0.59333813", "0.58742005", "0.5850207", "0.5810267", "0.5768853", "0.5726057", "0.5721131", "0.5681214", "0.5680995", "0.5673212", "0.5653497", "0.5653488", "0.56251657", "0.56207335", "0.5604927", "0.55940914", "0.55410546", "0.55398095", "0.5536452", "0.5533132", "0.54951566", "0.5491809", "0.5473222", "0.54604864", "0.54504895", "0.5431411", "0.5416524", "0.54158205", "0.54007095", "0.5390873", "0.5390873", "0.53696656", "0.5368061", "0.5358693", "0.5349826", "0.53413635", "0.53394336", "0.533567", "0.5331075", "0.5327413", "0.5318313", "0.5317622", "0.53003645", "0.52967346", "0.529437", "0.5293386", "0.5291935", "0.5288568", "0.5287322", "0.52867633", "0.52647454", "0.52590936", "0.52571565", "0.52520794", "0.52485996", "0.5234727", "0.52343816", "0.5219129", "0.52191085", "0.52165306", "0.52137935", "0.5211939", "0.5211634", "0.52043676", "0.5203022", "0.52024364", "0.52023435", "0.519688", "0.51928306", "0.5192483", "0.51868165", "0.5182768", "0.5179325", "0.51784194", "0.5172675", "0.51716065", "0.5171358", "0.51703095", "0.5168389", "0.51615846", "0.5159875", "0.51589835", "0.5150593", "0.5150078", "0.514729", "0.51467663", "0.51450187", "0.5135738", "0.5133618", "0.513113", "0.5124871" ]
0.0
-1
Calls read_coordinates and frac_to_cart for every path=name in fragmentnames and returns a dictionary where every returnvalue of frac_to_cart is keyed to its fragment name.
def read_multiple_coordinates(fragmentnames): fragdict = {} for name in fragmentnames: path = name + '/' cell, pos = read_coordinates(path) atomlist = frac_to_cart(cell, pos) atomdict = {} for atom in atomlist: atomdict[atom[0][0]] = atom[1] fragdict[name] = atomlist return fragdict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_files():\n with open(\"CvixLerC9.loc\") as loc, open(\"CvixLerC9.qua\") as qua:\n qua_file = (qua.read().split('\\n'))\n qua_file = qua_file[8:-1]\n new_qua = []\n for q in qua_file:\n new_qua.append(q.split('\\t')) # [['1', '1.279502474'], ['3', '0.303712231']....]\n\n new_loc = {}\n header = ''\n read = False\n for i in loc:\n i = i.replace(\"\\n\", '')\n if read:\n for j in i:\n if \" \" != j:\n if header in new_loc.keys():\n new_loc[header].append(j)\n else:\n new_loc[header] = [j]\n if \"(a,b)\" in i:\n header = i\n read = True\n else:\n read = False\n\n elif read:\n for j in i:\n if \" \" != j:\n if header in new_loc.keys():\n new_loc[header].append(j)\n else:\n new_loc[header] = [j]\n\n return new_loc, new_qua", "def readSurfaceGeo(b18path):\n if not os.path.isfile(b18path):\n print(\"b18 building file not found! Please check!\")\n pass\n else:\n b18file = open(b18path,\"r\")\n b18data = b18file.readlines()\n srfGeoBlock = getDataParagraph(\"_EXTENSION_BuildingGeometry_START_\", \"_EXTENSION_BuildingGeometry_END_\", b18data)\n #now get vertex's coordinate xyz\n vertexdict = dict() #{vertexID:[x,y,z]}\n srfbasicinfo = dict() #{surfaceID:[vertexID]}\n srfInfo = dict() #{surfaceID:[vertices coordinate]}\n for line in srfGeoBlock:\n dline = line.split()\n if \"vertex\" in dline:\n vertexdict[int(dline[1])] = [float(xyz) for xyz in dline[2:]] #{vertexID:[x,y,z]}\n if \"wall\" in dline or \"window\" in dline or \"floor\" in dline or \"ceiling\" in dline or \"roof\" in dline:\n srfbasicinfo[int(dline[1])] = [[int(nrID) for nrID in dline[2:]],dline[0]] #{surfaceID:[[vertexID],construction]}\n #print srfbasicinfo[int(dline[1])]\n for key in srfbasicinfo.keys():\n srfInfo[key] = []\n for vertices in srfbasicinfo[key][0]:\n srfInfo[key].append(vertexdict[vertices])\n b18file.close()\n return srfInfo,vertexdict,srfbasicinfo\n #actually only need srfInfo\n #just getting everything out for now, incase will need to use those", "def _get_positions(self):\n position_map = dict()\n # Assumes that the positions are indexed in the order of Row-->Well-->FOV\n for well in self.wells:\n for pos in self.store[well].attrs.get('well').get('images'):\n pos_name = pos['path']\n # pos name is 'Pos_xxx'\n pos_idx = int(pos_name.split('_')[-1])\n position_map[pos_idx] = {'name': pos_name, 'well': well}\n return position_map", "def load_fragGC_pickle(inFH):\n fojb = pickle.load(inFH)\n\n d = dict()\n for x in fojb:\n taxon_name = x[0]\n d[taxon_name] = dict()\n d[taxon_name]['fragLength'] = []\n d[taxon_name]['fragGC'] = []\n \n for scaf,v in x[1].items(): \n for z in v:\n # fragStart, fragLength, fragGC\n d[taxon_name]['fragLength'].append(z[1])\n d[taxon_name]['fragGC'].append(z[2]) \n return d", "def get_electrode_positions():\n positions = dict()\n with io.open(\"electrode_positions.txt\", \"r\") as pos_file:\n for line in pos_file:\n parts = line.split()\n positions[parts[0]] = tuple([float(part) for part in parts[1:]])\n return positions", "def get_electrode_positions():\n positions = dict()\n with io.open(\"electrode_positions.txt\", \"r\") as pos_file:\n for line in pos_file:\n parts = line.split()\n positions[parts[0]] = tuple([float(part) for part in parts[1:]])\n return positions", "def _calculate_fragmentation(buddyinfo_output):\n\n frag_dict = {}\n \n for line in buddyinfo_output:\n node, frag_info = line.split(',')\n zone, free_pages = frag_info.split()[1], frag_info.split()[2:]\n\n # Convert all the strings to ints\n free_pages = map(int, free_pages)\n\n frag_dict.setdefault(node, {})\n frag_dict[node][zone] = {}\n\n total_free_pages = 0\n\n for order, free_count in enumerate(free_pages):\n total_free_pages += (2**order) * free_count\n\n for order, free_count in enumerate(free_pages):\n frag_pct = 0\n\n # really inefficient, but who cares\n for _order, _free_count in enumerate(free_pages[order:]):\n frag_pct += (2**(_order + order)) * _free_count\n \n frag_pct = float(total_free_pages - frag_pct)/total_free_pages\n \n frag_dict[node][zone][order] = (free_count, frag_pct)\n\n return frag_dict", "def build_dict(infile):\n\n coords = {}\n sizes = {}\n\n for line in infile:\n fields = line.split()\n ref_st, ref_end, qry_st, qry_end = map(int, fields[0:4])\n qry_chr, qry_size = fields[14], int(fields[8])\n if qry_chr not in coords:\n coords[qry_chr] = {0:[], 1:[]} # 0=ref; 1=qry\n sizes[qry_chr] = qry_size\n coords[qry_chr][0].append([ref_st, ref_end])\n coords[qry_chr][1].append(sorted([qry_st, qry_end]))\n \n return coords, sizes", "def create_chunks(file_names):\n\n\tnew_chunks = []\n\n\tfor name in file_names:\n\n\t\t# Find the .inf file and read the details stored within\n\t\ttry:\n\t\t\tdetails = open(name + suffix + 'inf', 'r').readline()\n\t\texcept IOError:\n\n\t\t\ttry:\n\t\t\t\tdetails = open(name + suffix + 'INF', 'r').readline()\n\t\t\texcept IOError:\n\t\t\t\tprint(\"Couldn't open information file, %s\" % name+suffix+'inf')\n\t\t\t\tsys.exit()\n\n\t\t# Parse the details\n\t\tdetails = [string.rstrip(details)]\n\n\t\tsplitters = [' ', '\\011']\n\n\t\t# Split the details up where certain whitespace characters occur\n\t\tfor s in splitters:\n\n\t\t\tnew_details = []\n\n\t\t\t# Split up each substring (list entry)\n\t\t\tfor d in details:\n\n\t\t\t\tnew_details = new_details + string.split(d, s)\n\n\t\t\tdetails = new_details\n\n\t\t# We should have details about the load and execution addresses\n\n\t\t# Open the file\n\t\ttry:\n\t\t\tin_file = open(name, 'rb')\n\t\texcept IOError:\n\t\t\tprint(\"Couldn't open file, %s\" % name)\n\t\t\tsys.exit()\n\n\t\t# Find the length of the file (don't rely on the .inf file)\n\t\tin_file.seek(0, 2)\n\t\tlength = in_file.tell()\n\t\tin_file.seek(0, 0)\n\n\t\t# Examine the name entry and take the load and execution addresses\n\t\tdot_at = string.find(details[0], '.')\n\t\tif dot_at != -1:\n\t\t\treal_name = details[0][dot_at+1:]\n\t\t\tload, exe = details[1], details[2]\n\t\telse:\n\t\t\treal_name = get_leafname(name)\n\t\t\tload, exe = details[0], details[1]\n\n\t\tload = hex2num(load)\n\t\texe = hex2num(exe)\n\n\t\tif load == None or exe == None:\n\t\t\tprint('Problem with %s: information is possibly incorrect.' % name+suffix+'inf')\n\t\t\tsys.exit()\n\n\t\t# Reset the block number to zero\n\t\tblock_number = 0\n\n\t\t# Long gap\n\t\tgap = 1\n\t\n\t\t# Write block details\n\t\twhile True:\n\t\t\tblock, last = write_block(in_file, real_name, load, exe, length, block_number)\n\n\t\t\tif gap == 1:\n\t\t\t\tnew_chunks.append((0x110, number(2,0x05dc)))\n\t\t\t\tgap = 0\n\t\t\telse:\n\t\t\t\tnew_chunks.append((0x110, number(2,0x0258)))\n\n\t\t\t# Write the block to the list of new chunks\n\n\t\t\t# For old versions, just write the block\n\t\t\tif UEF_major == 0 and UEF_minor < 9:\n\t\t\t\tnew_chunks.append((0x100, block))\n\t\t\telse:\n\t\t\t\tnew_chunks.append((0x100, block))\n\n\t\t\tif last == 1:\n\t\t\t\tbreak\n\n\t\t\t# Increment the block number\n\t\t\tblock_number = block_number + 1\n\n\t\t# Close the input file\n\t\tin_file.close()\n\n\t# Write some finishing bytes to the list of new chunks\n#\tnew_chunks.append((0x110, number(2,0x0258)))\n#\tnew_chunks.append((0x112, number(2,0x0258)))\n\n\t# Return the list of new chunks\n\treturn new_chunks", "def fragment_to_keys(fragment):\n return fragment.strip(\"#\").strip(\"/\").split(\"/\")", "def extract_segment_props(self):\n props = {}\n num_segments = int(self.general['force-scan-series.force-segments.count'])\n for segment in range(num_segments):\n segment_props = ForceArchive(self.file_path).read_properties(\n 'segments/{}/segment-header.properties'.format(segment))\n # noinspection SpellCheckingInspection\n name_jpk = segment_props['force-segment-header.name.name'].replace('-cellhesion200', '')\n normal_name = self.convert_segment_name(name_jpk)\n props[normal_name] = segment_props\n props[normal_name][\"name_jpk\"] = name_jpk\n props[normal_name][\"name\"] = normal_name\n props[normal_name][\"segment_number\"] = str(segment)\n\n return props", "def load_store(filename):\n result = {}\n # Open file\n with open(filename, 'r') as file:\n # Read first character\n char = file.read(1)\n while char:\n # ; defines a new point\n if char == \";\":\n # The next characters are of the form (x,y,e)\n char = file.read(1) # left bracket\n\n char = file.read(1) # x\n x = char\n char = file.read(1) # comma or second digit\n\n # This means x is a two digit number\n if char != ',':\n # Add the second digit and then cast\n x += char\n x = int(x)\n char = file.read(1) # Now read the comma\n else:\n # One digit number so just cast\n print(char)\n x = int(x)\n \n # Follow a similar process for y and e\n char = file.read(1) # y\n\n y = char\n char = file.read(1) # comma or second digit\n if char != ',':\n y += char\n y = int(y)\n char = file.read(1)\n else:\n y = int(y)\n\n char = file.read(1) # encoded product\n e = char\n char = file.read(1)\n if char != ')':\n e += char\n e = int(e)\n char = file.read(1)\n else:\n e = int(e)\n \n # Add to the dictionary\n coords = (x,y)\n result[(x,y)] = e\n\n char = file.read(1)\n return result", "def read_data(self, path, **kwargs):\n\n from glob import glob\n import os\n sc = self.sc\n pdt_lc = np.dtype([('pos', 'f4', 3),('vel', 'f4', 3)])\n\n blockids = kwargs['blockids']\n\n def set_particle_IDs_partition(index, iterator): \n \"\"\"\n Use the aggregate partition counts to set monotonically increasing \n particle indices\n \"\"\"\n p_counts = partition_counts.value\n local_index = 0\n start_index = sum([p_counts[i] for i in range(index)])\n for arr in iterator:\n arr['iOrder'] = range(start_index + local_index, start_index + local_index + len(arr))\n arr['iGroup'] = loc_to_glob_map_b.value[index]\n local_index += len(arr)\n yield arr\n \n def read_file(index, i, chunksize=102400): \n for part,filename in i:\n timein = time.time()\n with open(filename,'rb') as f: \n header = f.read(62500)\n while True:\n chunk = f.read(chunksize*24)\n if len(chunk): \n p_arr = np.frombuffer(chunk, pdt_lc)\n new_arr = np.zeros(len(p_arr), dtype=pdt)\n new_arr['pos'] = p_arr['pos']\n yield new_arr\n else: \n t_elapsed = time.time()-timein\n rate = os.path.getsize(filename)/1e6/t_elapsed\n print 'spark_fof: reading %s took %d seconds in partition %d, %f MB/sec'%(filename, t_elapsed, index, rate)\n break\n \n # determine which files to read\n get_block_ids = re.compile('blk\\.(\\d+)\\.(\\d+)\\.(\\d+)?')\n\n if blockids is None: \n files = glob(os.path.join(self.path,'*/*'))\n else: \n files = []\n for dirname, subdirlist, filelist in os.walk(path):\n try: \n dirnum = int(os.path.basename(dirname))\n if dirnum in blockids: \n for f in filelist:\n ids = get_block_ids.findall(f)\n if len(ids) > 0:\n if all(int(x) in blockids for x in ids[0]):\n files.append(os.path.join(dirname,f))\n except ValueError: \n pass\n\n files.sort()\n nfiles = len(files) \n self.nPartitions = nfiles\n\n print 'spark_fof: Number of input files: ', nfiles\n\n # get particle counts per partition\n nparts = {i:_get_nparts(filename,62500,pdt_lc.itemsize) for i,filename in enumerate(files)}\n\n print 'spark_fof: Total number of particles: ', np.array(nparts.values()).sum()\n \n # set up the map from x,y,z to partition id \n ids = map(lambda x: tuple(map(int, get_block_ids.findall(x)[0])), files)\n ids_map = {x:i for i,x in enumerate(ids)}\n self.ids_map = ids_map\n loc_to_glob_map_b = self.local_to_global_map\n \n ids_map_b = sc.broadcast(ids_map)\n loc_to_glob_map_b = sc.broadcast(loc_to_glob_map_b)\n\n partition_counts = sc.broadcast(nparts)\n\n rec_rdd = (sc.parallelize(zip(ids,files), numSlices=self.nPartitions)\n .map(lambda (id,filename): (ids_map_b.value[id],filename))\n .partitionBy(self.nPartitions).cache()\n .mapPartitionsWithIndex(read_file, preservesPartitioning=True)\n .mapPartitionsWithIndex(set_particle_IDs_partition, \n preservesPartitioning=True))\n \n return rec_rdd", "def parse_triangle_files(self):\n nodes = {}\n boundary_nodes = []\n\n # parse node file into nodes\n with open(self.files['node']) as node_file:\n header = True\n for line in node_file:\n if header:\n header = False\n continue\n content = list(filter(lambda a: bool(a), line.split(' '))) # pylint: disable=W0108\n if not '#' in content[0]:\n is_boundary = content[3] == '1\\n'\n nodes[int(content[0])] = {\n 'id': int(content[0]),\n 'coords': [int(content[1]), int(content[2])],\n 'distance': 0 if is_boundary else None,\n 'relations': [],\n 'level_cycles': [], # ids of any level cycles this node is a part of\n 'level_paths': [], # ids of any level paths this node is a part of\n 'is_root_element': False,\n 'betweener_paths': []\n }\n if is_boundary:\n boundary_nodes.append(int(content[0]))\n node_file.close()\n\n # parse edge files into node relations\n with open(self.files['edge']) as edge_file:\n header = True\n for line in edge_file:\n if header:\n header = False\n continue\n content = list(filter(bool, line.split(' ')))\n if not '#' in content[0]:\n nodes[int(content[1])]['relations'].append(int(content[2]))\n nodes[int(content[2])]['relations'].append(int(content[1]))\n edge_file.close()\n\n # with open(self.files['ele']) as ele_file:\n # header = True\n # for line in edge_file:\n # if header:\n # header = False\n # continue\n # content = list(filter(bool, line.split(' ')))\n # if not '#' in content[0]:\n # nodes[int(content[1])]['relations'].append(int(content[2]))\n # nodes[int(content[2])]['relations'].append(int(content[1]))\n # edge_file.close()\n\n # sorts relations clockwise\n for node_id, node in nodes.items():\n nodes[node_id]['relations'] = sorted(node['relations'], key=(\n lambda related_node_id: (\n self.calculate_clockwise_angle_and_distance(node, nodes.get(related_node_id)) # pylint: disable=W0640\n )\n ))\n\n levels = self.get_levels(nodes, boundary_nodes)\n\n for level in levels:\n for node_id in level['node_ids']:\n self.identify_special_nodes(nodes, node_id)\n\n return nodes, boundary_nodes, levels", "def _txt_to_basis_dict(basis_txt):\n\n symbol = basis_txt[0].split()[0]\n\n def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n\n basis_pure = basis_txt[1:]\n\n section_marks = []\n for i, line in enumerate(basis_pure):\n if not is_number(line.split()[0]):\n section_marks.append(i)\n\n shells = []\n for i in section_marks[:-1]:\n type, n_func, _ = basis_pure[i].split()\n n_func = int(n_func)\n\n if type.upper() in ['SP']:\n p_exponent, con_coefficients, p_con_coefficients = np.array([line.split()\n for line in basis_pure[i + 1:i + n_func + 1]],\n dtype=float).T\n else:\n p_exponent, con_coefficients = np.array([line.split()\n for line in basis_pure[i + 1:i + n_func + 1]],\n dtype=float).T\n p_con_coefficients = np.zeros_like(p_exponent)\n\n\n shells.append({'shell_type': type,\n 'p_exponents': list(p_exponent),\n 'con_coefficients': list(con_coefficients),\n 'p_con_coefficients': list(p_con_coefficients)})\n\n return {'symbol': symbol,\n 'shells': shells}", "def get_expressions( useful_genes, expr_file):\n\n\t#open expressions file\n\texpression_stream = gzip.open(expr_file, \"r\")\n \n\t#reset line number\n\tlinenum = 0\n\n\texpressions_dict = {}\n\n\texpressions_header = [] \n\n\t#initialize progress bar\n\tfor line in expression_stream:\n\n\t\tlinenum += 1\n \n\t\t#skip first line, as those are the labels\n\n\n\t\tif isinstance(line, bytes) and not isinstance(line, str):\n\n\t\t\t\t\tline = line.decode()\n\t\tif line[0] != \"#\":\n\n\t\t\t#parse line\n\t\t\tline_content = line.rstrip().split(\",\")\n\t\t\t#if variant pos and gene match some value\n\t\t\tif line_content[0].split(\".\")[0] in useful_genes :\n\n\t\t\t\t#save the expression data for all the samples\n\n\t\t\t\tvar_expr = line_content[1:]\n\t\t\t\texpressions_dict[line_content[0].split(\".\")[0]] = var_expr\n\t\t\t\t#processed another variant\n\n\n\n\n\t\t\telif line.split(',')[0] == 'Name':\n \n\t\t\t\t#this is our header\n\t\t\t\texpressions_header = line.replace(\"\\n\",\"\").split(',')\n\n\treturn [expressions_dict, expressions_header]", "def _read_expression_direct(cls):\n\n expression_data = {}\n expression_columns = cls._get_columns(EXPRESSION_MANIFEST)\n expression_psvs = cls._get_component_psvs(EXPRESSION_MANIFEST)\n\n for expression_psv in expression_psvs:\n for row in gzip.GzipFile(fileobj=io.BytesIO(cls._read_s3_url(expression_psv))):\n row_dict = dict(zip(expression_columns, row.strip().split(b'|')))\n expression_data.setdefault(\n row_dict[\"cellkey\"].decode(), {})[row_dict[\"featurekey\"].decode()] = \\\n float(row_dict[\"exrpvalue\"])\n\n return expression_data", "def extract_energies(self):\n path2save = 'Analysis/energies.pkl'\n #check, if I have to extract them, or they are already extracted. This the latter case, load them.\n if os.path.exists(path2save):\n print(\"extraction of the polarizaion has already been done. Loading polarizations from from pkl\")\n # TODO delete to check if exists above and do load without doing\n with open('Analysis/energies.pkl', 'rb') as fid:\n [self.E0_plus, self.E0_0, self.E0_minus,\n self.V0_plus, self.V0_0, self.V0_minus,\n self.V_env_plus, self.V_env_0, self.V_env_minus,\n self.E_env_plus, self.E_env_0, self.E_env_minus,\n self.n_mols] \\\n = pickle.load(fid)\n else:\n print('Energies are being extracting and will be saved to pkl')\n for i, radius in enumerate(self.radii):\n self.E_sd_plus[radius] = {}\n self.E_sd_0[radius] = {}\n self.E_sd_minus[radius] = {}\n\n self.E_sum_env_plus[radius] = {}\n self.E_sum_env_0[radius] = {}\n self.E_sum_env_minus[radius] = {}\n\n self.V0_plus[radius] = {}\n self.V0_0[radius] = {}\n self.V0_minus[radius] = {}\n\n self.E_env_plus[radius] = {}\n self.E_env_0[radius] = {}\n self.E_env_minus[radius] = {}\n\n self.V_env_plus[radius] = {}\n self.V_env_0[radius] = {}\n self.V_env_minus[radius] = {}\n\n self.n_mols[radius] = {}\n\n for j, core_id in enumerate(self.core_ids):\n #path2file_ip = \\\n # 'Analysis/' + self.dict_radii_folder_IP[radius] + '/Matrix-analysis-IP_' \\\n # + self.mol_name + '-Mol_' + str(core_id) + '_C_1.yml'\n\n path2file_ip = \\\n 'Analysis/IP_by_radius/' + self.dict_radii_folder_IP[radius]\\\n + '/Matrix-analysis-IP_' + self.mol_name + '.yml' # new\n path2file_ea = \\\n 'Analysis/EA_by_radius/' + self.dict_radii_folder_EA[radius]\\\n + '/Matrix-analysis-EA_' + self.mol_name + '.yml'\n\n # IP. Charged states: \"+\" and \"0\"\n with open(path2file_ip) as fid:\n ip_dict = yaml.load(fid, Loader=yaml.SafeLoader)\n with open(path2file_ea) as fid:\n ea_dict = yaml.load(fid, Loader=yaml.SafeLoader)\n\n\n # number of mols extraction\n self.n_mols[radius][core_id] = len(ip_dict[int(core_id)]['energies'])\n\n # sd extraction. E_sd = E_0 + V_0\n self.E_sd_plus[radius][core_id] = ip_dict[int(core_id)]['energies'][int(core_id)]['total_e_charged'] #new\n self.E_sd_0[radius][core_id] = ip_dict[core_id]['energies'][int(core_id)]['total_e_uncharged']\n self.E_sd_minus[radius][core_id] = ea_dict[int(core_id)]['energies'][int(core_id)]['total_e_charged']\n # E_0\n self.E0_plus[core_id] = ip_dict[int(core_id)]['energies'][int(core_id)]['total_e_charged_vacuum']\n self.E0_0[core_id] = ip_dict[int(core_id)]['energies'][int(core_id)]['total_e_uncharged_vacuum']\n self.E0_minus[core_id] = ea_dict[int(core_id)]['energies'][int(core_id)]['total_e_charged_vacuum']\n # # E_0_vacuum\n # self.E0_plus_vacuum[core_id] =\n # self.E0_0_vacuum[core_id] =\n # self.E0_minus_vacuum[core_id] =\n\n\n # V_0\n self.V0_plus[radius][core_id] = self.E_sd_plus[radius][core_id] - self.E0_plus[core_id]\n self.V0_0[radius][core_id] = self.E_sd_0[radius][core_id] - self.E0_0[core_id]\n self.V0_minus[radius][core_id] = self.E_sd_minus[radius][core_id] - self.E0_minus[core_id]\n\n # E_sum_env = \\sum_i\\ne 0 E_i \\sum_{j=0}^{N} V_{ij}\n ip_env_sub_dict = ip_dict[int(core_id)]['energies']#new\n del ip_env_sub_dict[int(core_id)]\n # del ip_env_sub_dict['info'] # TODO: do I need to dlt this?\n\n\n ea_env_sub_dict = ea_dict[int(core_id)]['energies'] # new\n del ea_env_sub_dict[int(core_id)]\n # del ea_env_sub_dict['info'] # TODO: do I need to dlt this?\n\n # tmp = ip_env_sub_dict['energies'][]\n\n list_total_e_env_plus = [ip_env_sub_dict[env_id]['total_e_charged'] for env_id in ip_env_sub_dict]\n self.E_sum_env_plus[radius][int(core_id)] = np.sum(list_total_e_env_plus) if not list_total_e_env_plus == [] else 0.0\n list_total_e_env_0 = [ip_env_sub_dict[env_id]['total_e_uncharged'] for env_id in ip_env_sub_dict]\n self.E_sum_env_0[radius][int(core_id)] = np.sum(list_total_e_env_0) if not list_total_e_env_0 == [] else 0.0\n list_total_e_env_minus = [ea_env_sub_dict[env_id]['total_e_charged'] for env_id in ea_env_sub_dict]\n self.E_sum_env_minus[radius][int(core_id)] = np.sum(list_total_e_env_minus) if not list_total_e_env_minus == [] else 0.0\n\n # E_env = \\sum_i \\ne 0 E_i. sum of DFT env energies.\n list_vacuum_env_e_plus = [ip_env_sub_dict[env_id]['total_e_charged_vacuum'] for env_id in ip_env_sub_dict]\n self.E_env_plus[radius][int(core_id)] = np.sum(list_vacuum_env_e_plus) if not list_vacuum_env_e_plus == [] else 0.0\n list_vacuum_env_e_0 = [ip_env_sub_dict[env_id]['total_e_uncharged_vacuum'] for env_id in ip_env_sub_dict]\n self.E_env_0[radius][int(core_id)] = np.sum(list_vacuum_env_e_0) if not list_vacuum_env_e_0 == [] else 0.0\n list_vacuum_env_e_minus = [ea_env_sub_dict[env_id]['total_e_charged_vacuum'] for env_id in ea_env_sub_dict]\n self.E_env_minus[radius][int(core_id)] = np.sum(list_vacuum_env_e_minus) if not list_vacuum_env_e_minus == [] else 0.0\n\n # V_env = 0.5 (\\sum_{i=1} \\sum_{j=1} V_{ij}). classical interaction of env. mols\n self.V_env_plus[radius][core_id] = 0.5 * (self.E_sum_env_plus[radius][core_id]\n - self.E_env_plus[radius][core_id]\n - self.V0_plus[radius][core_id])\n\n self.V_env_0[radius][core_id] = 0.5 * (self.E_sum_env_0[radius][core_id]\n - self.E_env_0[radius][core_id]\n - self.V0_0[radius][core_id])\n\n self.V_env_minus[radius][core_id] = 0.5 * (self.E_sum_env_minus[radius][core_id]\n - self.E_env_minus[radius][core_id]\n - self.V0_minus[radius][core_id])\n\n\n append_dict_with_mean(self.V0_plus, self.V0_0, self.V0_minus,\n self.V_env_plus, self.V_env_0, self.V_env_minus,\n self.E_env_plus, self.E_env_0, self.E_env_minus,\n self.E0_plus, self.E0_0, self.E0_minus,\n self.n_mols) # compute and add \"mean\" to all mentioned dicts\n\n with open('Analysis/energies.pkl', 'wb') as fid:\n pickle.dump([self.E0_plus, self.E0_0, self.E0_minus,\n self.V0_plus, self.V0_0, self.V0_minus,\n self.V_env_plus, self.V_env_0, self.V_env_minus,\n self.E_env_plus, self.E_env_0, self.E_env_minus,\n self.n_mols],\n fid)\n print(\"Energies are extracted and dumped to pkl\")", "def get_positions(self) -> Dict[str, int]:\n\n with self._lock:\n return {\n name: self._return_factor * i\n for name, i in self._current_positions.items()\n }", "def read_geometry(filepath, read_metadata=False, read_stamp=False):\n volume_info = OrderedDict()\n\n TRIANGLE_MAGIC = 16777214\n QUAD_MAGIC = 16777215\n NEW_QUAD_MAGIC = 16777213\n with open(filepath, \"rb\") as fobj:\n magic = _fread3(fobj)\n if magic in (QUAD_MAGIC, NEW_QUAD_MAGIC): # Quad file\n nvert = _fread3(fobj)\n nquad = _fread3(fobj)\n (fmt, div) = (\">i2\", 100.) if magic == QUAD_MAGIC else (\">f4\", 1.)\n coords = np.fromfile(fobj, fmt, nvert * 3).astype(np.float) / div\n coords = coords.reshape(-1, 3)\n quads = _fread3_many(fobj, nquad * 4)\n quads = quads.reshape(nquad, 4)\n #\n # Face splitting follows\n #\n faces = np.zeros((2 * nquad, 3), dtype=np.int)\n nface = 0\n for quad in quads:\n if (quad[0] % 2) == 0:\n faces[nface] = quad[0], quad[1], quad[3]\n nface += 1\n faces[nface] = quad[2], quad[3], quad[1]\n nface += 1\n else:\n faces[nface] = quad[0], quad[1], quad[2]\n nface += 1\n faces[nface] = quad[0], quad[2], quad[3]\n nface += 1\n\n elif magic == TRIANGLE_MAGIC: # Triangle file\n create_stamp = fobj.readline().rstrip(b'\\n').decode('utf-8')\n fobj.readline()\n vnum = np.fromfile(fobj, \">i4\", 1)[0]\n fnum = np.fromfile(fobj, \">i4\", 1)[0]\n coords = np.fromfile(fobj, \">f4\", vnum * 3).reshape(vnum, 3)\n faces = np.fromfile(fobj, \">i4\", fnum * 3).reshape(fnum, 3)\n\n if read_metadata:\n volume_info = _read_volume_info(fobj)\n else:\n raise ValueError(\"File does not appear to be a Freesurfer surface\")\n\n coords = coords.astype(np.float) # XXX: due to mayavi bug on mac 32bits\n\n ret = (coords, faces)\n if read_metadata:\n if len(volume_info) == 0:\n warnings.warn('No volume information contained in the file')\n ret += (volume_info,)\n if read_stamp:\n ret += (create_stamp,)\n\n return ret", "def loci_parsed(loci_file):\n #\n ga_list = [\"Ang_30\",\"Ang_29\"]\n\n gb_list = [\"Ang_67\", \"Ang_21\"]\n\n cc_list = [\"Cg12063\", \"Cg125212\", \"Cg126212\", \"Cg12758\", \"Cg_432\"]\n\n loci_dic = {}\n\n loci_list = {\"ga\": None, \"gb\": None, \"cc\": None}\n\n\n\n for files in loci_file:\n\n name= files.strip().split (\"/\")\n name_loci = name[12].split(\"_\")\n name_loci_1 = name_loci[1].split(\".\")\n real_name_loci = name_loci_1[0]\n\n loci_file = open(files)\n\n\n for line in loci_file:\n\n if line[:1] in \"0123456789\":\n pass\n else:\n\n line_information = line.strip().split()\n isolate = line_information[0]\n sequence = line_information [1]\n\n # if \"-\" in sequence:\n # sequence = sequence.replace (\"-\", \"\")\n\n if isolate in ga_list and loci_list[\"ga\"] == None:\n loci_list[\"ga\"] = sequence\n if isolate in gb_list and loci_list[\"gb\"] == None:\n loci_list[\"gb\"] = sequence\n if isolate in cc_list and loci_list[\"cc\"] == None:\n loci_list[\"cc\"] = sequence\n loci_dic[real_name_loci] = loci_list\n\n\n\n loci_list = {\"ga\": None, \"gb\": None, \"cc\": None}\n\n return loci_dic", "def cereal_protein_fractions(cereals):\n result = {}\n for cereal in cereals:\n total_grams = float(cereal[\"weight\"]) * 28.35\n result[cereal[\"name\"]] = float(cereal[\"protein\"]) / total_grams\n\n return result", "def read_zp(file):\n with open(file) as f_in:\n head = f_in.readline()\n units = f_in.readline()\n for line in f_in:\n try:\n zpWave[line.split(' ')[0].replace('\"', '')] = float(line.split(' ')[1])\n zpF0[line.split(' ')[0].replace('\"', '')] = float(line.split(' ')[2])\n \n except NameError:\n zpWave = {line.split(' ')[0].replace('\"', '') : float(line.split(' ')[1])}\n zpF0 = {line.split(' ')[0].replace('\"', '') : float(line.split(' ')[2])}\n \n return zpWave, zpF0", "def get_shape_dicts(route_short_name, septa_fn):\n \n #modify this path to a sqlite file with\n #the gtfs data in it. \n #to create this file, i used\n #https://github.com/jarondl/pygtfs.git\n e = create_engine(septa_fn)\n Session = sessionmaker(bind = e)\n s = Session()\n\n route_block_to_shape = {}\n q = \"SELECT routes.route_short_name, trips.block_id, trips.shape_id \\\n FROM routes INNER JOIN trips \\\n ON routes.route_id == trips.route_id \\\n WHERE routes.route_short_name == :rsn \\\n GROUP BY trips.block_id\"\n results = s.execute(q, {\"rsn\":route_short_name})\n \n for r in results:\n route_block_to_shape[(r.route_short_name, r.block_id)] = r.shape_id\n\n s_ids = set(route_block_to_shape.values())\n shape_to_path = {}\n for s_id in s_ids:\n q = \"SELECT shapes.shape_pt_lat, shapes.shape_pt_lon \\\n FROM shapes \\\n WHERE shapes.shape_id == :s_id\"\n\n results = s.execute(q, {'s_id':s_id})\n path = [tuple(r) for r in results]\n shape_to_path[s_id] = path\n \n s.close()\n\n return route_block_to_shape, shape_to_path", "def test_split_str_zmat(self):\n zmat_str_1 = \"\"\" C\n H 1 R1\n H 1 R1 2 A1\n H 1 R1 2 A1 3 D1\n H 1 R1 2 A1 3 D2\nVariables:\nA1=109.4712\nD1=120.0000\nD2=240.0000\nR1=1.0912\n\"\"\"\n coords, vars_ = converter.split_str_zmat(zmat_str_1)\n expected_coords = \"\"\" C\n H 1 R1\n H 1 R1 2 A1\n H 1 R1 2 A1 3 D1\n H 1 R1 2 A1 3 D2\"\"\"\n expected_vars = \"\"\"A1=109.4712\nD1=120.0000\nD2=240.0000\nR1=1.0912\"\"\"\n self.assertEqual(coords, expected_coords)\n self.assertEqual(vars_, expected_vars)\n\n zmat_str_2 = \"\"\"A1=109.4712\nD1=120.0000\nD2=240.0000\nR1=1.0912\n\n C\n H, 1, R1\n H, 1, R1, 2, A1\n H, 1, R1, 2, A1, 3, D1\n H, 1, R1, 2, A1, 3, D2\n\"\"\"\n coords, vars_ = converter.split_str_zmat(zmat_str_2)\n expected_coords = \"\"\" C\n H, 1, R1\n H, 1, R1, 2, A1\n H, 1, R1, 2, A1, 3, D1\n H, 1, R1, 2, A1, 3, D2\"\"\"\n expected_vars = \"\"\"A1=109.4712\nD1=120.0000\nD2=240.0000\nR1=1.0912\"\"\"\n self.assertEqual(coords, expected_coords)\n self.assertEqual(vars_, expected_vars)\n\n zmat_str_3 = \"\"\" C\n H 1 R1\n H 1 R1 2 A1\n H 1 R1 2 A1 3 D1\n H 1 R1 2 A1 3 D2\n\nA1 109.4712\nD1 120.0000\nD2 240.0000\nR1 1.0912\n\"\"\"\n coords, vars_ = converter.split_str_zmat(zmat_str_3)\n expected_coords = \"\"\" C\n H 1 R1\n H 1 R1 2 A1\n H 1 R1 2 A1 3 D1\n H 1 R1 2 A1 3 D2\"\"\"\n expected_vars = \"\"\"A1 109.4712\nD1 120.0000\nD2 240.0000\nR1 1.0912\"\"\"\n self.assertEqual(coords, expected_coords)\n self.assertEqual(vars_, expected_vars)\n\n zmat_str_4 = \"\"\" C\n H 1 R1\n H 1 R1 2 A1\n H 1 R1 2 A1 3 D1\n H 1 R1 2 A1 3 D2\n\nA1=109.4712\nD1=120.0000\nD2=240.0000\nR1=1.0912\n\"\"\"\n coords, vars_ = converter.split_str_zmat(zmat_str_4)\n expected_coords = \"\"\" C\n H 1 R1\n H 1 R1 2 A1\n H 1 R1 2 A1 3 D1\n H 1 R1 2 A1 3 D2\"\"\"\n expected_vars = \"\"\"A1=109.4712\nD1=120.0000\nD2=240.0000\nR1=1.0912\"\"\"\n self.assertEqual(coords, expected_coords)\n self.assertEqual(vars_, expected_vars)\n\n zmat_str_5 = \"\"\" C\n H 1 1.0912\n H 1 1.0912 2 109.4712\n H 1 1.0912 2 109.4712 3 120.0000\n H 1 1.0912 2 109.4712 3 240.0000\n\"\"\"\n coords, vars_ = converter.split_str_zmat(zmat_str_5)\n self.assertEqual(coords, zmat_str_5)\n self.assertIsNone(vars_)", "def frac_to_cart(cell, positions):\n atomlist = []\n counter = 1\n a, b, c = cell[0], cell[1], cell[2]\n alpha, beta, gamma = cell[3] / 180 * np.pi, cell[4] / 180 * np.pi, cell[5] / 180 * np.pi\n v = np.sqrt(1 - np.cos(alpha) * np.cos(alpha) - np.cos(beta) * np.cos(beta) - np.cos(gamma) * np.cos(gamma) \\\n + 2 * np.cos(alpha) * np.cos(beta) * np.cos(gamma))\n transmatrix = np.matrix([[a, b * np.cos(gamma), c * np.cos(beta)],\n [0, b * np.sin(gamma), c * (np.cos(alpha) - np.cos(beta) * np.cos(gamma)) / np.sin(gamma)],\n [0, 0, c * v / np.sin(gamma)]])\n\n for atom in positions:\n coordmatrix = np.dot(transmatrix, positions[str(atom)])\n coordmatrix = np.array(coordmatrix).flatten().tolist()\n atomlist.append([])\n atomlist[-1].append([atom, atomtable[atom[0]]])\n counter += 1\n atomlist[-1].append(np.array(coordmatrix))\n return atomlist", "def read(self, filePath):\n \n result = {\n 'coordinates': {\n 'count': 0,\n 'nodes': []\n },\n 'element_groups': { \n 'number_of_elements': 0,\n 'count': 0,\n 'groups': []\n },\n 'bars': [],\n 'materials': {\n 'count': 0,\n 'materials': []\n },\n 'geometric_properties': {\n 'count': 0\n },\n 'bcnodes': {\n 'count': 0\n },\n 'loads': {\n 'count': 0\n }\n }\n # print(result['coordinates']['nodes'])\n \n with open(filePath,'r') as f:\n lines = f.readlines()\n elementCounter = 0\n groupCounter = 0\n geometricCounter = 0\n\n for line in lines:\n line = line.strip()\n el = line.split(' ')\n \n if len(line) == 0:\n continue\n\n if len(line) != 0 and line[0] == \"*\":\n section = line[1:].lower()\n continue\n \n if section == 'coordinates':\n if len(el) == 1 :\n result[section]['count'] = el[0]\n else:\n result[section]['nodes'].append(Node(int(el[0]), float(el[1]), float(el[2])))\n \n elif section == 'element_groups':\n if len(line) == 1:\n result[section]['count'] = int(el[0])\n else: \n result[section]['groups'].append(Group(el[0], el[1], el[2]))\n result[section]['number_of_elements'] += int(el[1])\n\n elif section == 'incidences':\n groups = result['element_groups']['groups']\n nodes = result['coordinates']['nodes']\n print(el)\n\n currentGroup = groups[groupCounter]\n if (currentGroup.amount == 0):\n groupCounter += 1\n currentGroup = groups[groupCounter]\n \n print(\"Group n: {} count: {}\".format(currentGroup.n, currentGroup.amount))\n \n bar = Bar(el[0], nodes[int(el[1])-1], nodes[int(el[2])-1], groups[groupCounter])\n print(\n \"\"\"\n Bar {} created \n Start node: {} End Node: {} Group: {}\n \"\"\".format(bar.id, bar.startNode.n, bar.endNode.n, bar.group))\n result['bars'].append(bar)\n currentGroup.amount -= 1\n \n elif section == 'materials':\n if len(el) == 1:\n result[section]['count'] = el[0]\n groupCounter = 0\n else:\n material = Material(el[0], el[1], el[2])\n result[section]['materials'].append(material)\n result['element_groups']['groups'][groupCounter].setMaterial(material)\n groupCounter += 1\n\n elif section == 'geometric_properties':\n if geometricCounter == 0:\n result[section]['count'] = el[0]\n else:\n result['element_groups']['groups'][geometricCounter - 1].setSectionArea(\n el[0]\n )\n geometricCounter += 1\n\n elif section == 'bcnodes':\n if len(el) == 1:\n result[section]['count'] = el[0]\n else:\n nodeIndex = next((e for e, item in enumerate(\n result['coordinates']['nodes']) if item.n == int(el[0])), None\n )\n result['coordinates']['nodes'][nodeIndex].setRestriction(int(el[1]))\n\n elif section == 'loads':\n if len(el) == 1:\n result[section]['count'] = el[0]\n else:\n load = Load(el[1], el[2])\n nodeIndex = next((e for e, item in enumerate(\n result['coordinates']['nodes']) if item.n == int(el[0])), None\n )\n result['coordinates']['nodes'][nodeIndex].addLoad(load)\n\n for bar in result['bars']:\n bar.createLocalArray()\n\n print('---------- Parsing complete! ----------')\n pprint(result)\n print('---------------------------------------')\n\n return result", "def parse_gff3(filename):\n genes = OrderedDict()\n transcript_to_locus = {}\n\n count_per_transcript = defaultdict(lambda: 1)\n\n with open(filename) as gff_in:\n for line in gff_in:\n # Skip comments\n if not line.strip()[0] == '#':\n line_data = parse_line(line)\n\n # Parts (e.g. CDS or Exon) might not have an ID. One will be added here\n if ID_ATTRIBUTE not in line_data['attributes'].keys() and line_data['feature'] in PARTS_FEATURES:\n if PARENT_ATTRIBUTE in line_data['attributes'].keys():\n counter_id = line_data['attributes'][PARENT_ATTRIBUTE] + '.' + line_data['feature'] + '.'\n new_id = counter_id + str(count_per_transcript[counter_id])\n count_per_transcript[counter_id] += 1\n line_data['attributes'][ID_ATTRIBUTE] = new_id\n\n # Every line needs a valid ID\n if ID_ATTRIBUTE in line_data['attributes'].keys():\n\n if line_data['feature'] in LOCUS_FEATURES:\n genes[line_data['attributes'][ID_ATTRIBUTE]] = {\n 'data': line_data,\n 'transcripts': OrderedDict()\n }\n\n elif line_data['feature'] in TRANSCRIPT_FEATURES:\n if PARENT_ATTRIBUTE in line_data['attributes'].keys():\n parent_id = line_data['attributes'][PARENT_ATTRIBUTE]\n\n if parent_id in genes.keys():\n genes[parent_id]['transcripts'][line_data['attributes'][ID_ATTRIBUTE]] = {\n 'data': line_data,\n 'parts': []\n }\n\n transcript_to_locus[line_data['attributes'][ID_ATTRIBUTE]] = \\\n line_data['attributes'][PARENT_ATTRIBUTE]\n\n elif line_data['feature'] in PARTS_FEATURES:\n\n if PARENT_ATTRIBUTE in line_data['attributes'].keys():\n parent_id = line_data['attributes'][PARENT_ATTRIBUTE]\n grandparent_id = transcript_to_locus[parent_id]\n\n genes[grandparent_id]['transcripts'][parent_id]['parts'].append(line_data)\n\n return genes", "def load_n3d_coords(file_path): \n \n import core.nuc_io as io\n\n seq_pos_dict = {}\n coords_dict = {} \n \n with io.open_file(file_path) as file_obj:\n chromo = None\n \n for line in file_obj:\n \n data = line.split()\n n_items = len(data)\n \n if not n_items:\n continue\n \n elif data[0] == '#':\n continue\n \n elif n_items == 3:\n chromo, n_coords, n_models = data\n \n #if chromo.lower()[:3] == 'chr':\n # chromo = chromo[3:]\n \n if chromo in coords_dict:\n raise Exception('Duplicate chromosome \"%s\" records in file %s' % (chromo, file_path))\n \n n_coords = int(n_coords)\n n_models = int(n_models)\n \n chromo_seq_pos = np.empty(n_coords, int)\n chromo_coords = np.empty((n_models, n_coords, 3), float)\n \n coords_dict[chromo] = chromo_coords\n seq_pos_dict[chromo] = chromo_seq_pos\n \n check = (n_models * 3) + 1\n i = 0\n \n elif not chromo:\n raise Exception('Missing chromosome record in file %s' % file_path)\n \n elif n_items != check:\n msg = 'Data size in file %s does not match Position + Models * Positions * 3'\n raise Exception(msg % file_path)\n \n else:\n chromo_seq_pos[i] = int(data[0])\n \n coord = [float(x) for x in data[1:]]\n coord = np.array(coord).reshape(n_models, 3)\n chromo_coords[:,i] = coord\n i += 1\n \n return seq_pos_dict, coords_dict", "def get_phi_comps_from_recfile(recfile):\n iiter = 1\n iters = {}\n f = open(recfile, \"r\")\n while True:\n line = f.readline()\n if line == \"\":\n break\n if (\n \"starting phi for this iteration\" in line.lower()\n or \"final phi\" in line.lower()\n ):\n contributions = {}\n while True:\n line = f.readline()\n if line == \"\":\n break\n if \"contribution to phi\" not in line.lower():\n iters[iiter] = contributions\n iiter += 1\n break\n raw = line.strip().split()\n val = float(raw[-1])\n group = raw[-3].lower().replace('\"', \"\")\n contributions[group] = val\n return iters", "def load_data_from_disk(self):\n data = dict()\n Omega_M = self.theta_fid[0]\n der_den = 1. / (2. * self.delta_theta)\n\n print (\"Loading data from disk.. Omega_M = \", Omega_M, \"delta_theta = \", self.delta_theta[0])\n\n for key in ['x_central', 'x_m', 'x_p', 'x_central_test', 'x_m_test', 'x_p_test']:\n data[key] = np.load(f'./preloaded_data/{Omega_M}_{self.delta_theta[0]}_{key}.npy')\n\n return data, der_den", "def readQrelsDict(fileName):\n result = {}\n for e in readQrels(fileName):\n result.setdefault(e.queryId, {})[e.docId] = int(e.relGrade)\n return result", "def galaxy_positions():\n hdulist1 = pf.open(source+'/kids_data/KiDS_DR3.1_G9_ugri_shear.fits')\n '''\n hdulist2 = pf.open('../kids_data/KiDS_DR3.1_G12_ugri_shear.fits')\n hdulist3 = pf.open('../kids_data/KiDS_DR3.1_G15_ugri_shear.fits')\n hdulist4 = pf.open('../kids_data/KiDS_DR3.1_G23_ugri_shear.fits')\n hdulist5 = pf.open('../kids_data/KiDS_DR3.1_GS_ugri_shear.fits')\n '''\n ra = hdulist1[1].data['RAJ2000'][:sample]\n dec = hdulist1[1].data['DECJ2000'][:sample]\n global maxra\n maxra = max(ra)\n global minra\n minra = min(ra)\n global maxdec\n maxdec = max(dec)\n global mindec\n mindec = min(dec)\n global bsize\n bsize = abs(max(maxra, maxdec) - min(mindec, minra))\n coords = np.column_stack([ra, dec])\n global SIZE\n SIZE = len(coords)\n print(maxra, maxdec, minra, mindec, SIZE)\n ctree = cKDTree(coords)\n return ctree", "def dbase():\r\n albums_data = {}\r\n song_dict = {}\r\n songs_list = []\r\n with open(PATH, 'r') as f:\r\n data = f.read()\r\n temp = data.split(\"#\")\r\n for album in temp[1:]:\r\n index = album.find(\"::\")\r\n albums_data[album[:index]] = \"\"\r\n for album in temp[1:]:\r\n album = album.split(\"*\")\r\n album_name = album[0][:-7]\r\n release_Date = album[0][-5:]\r\n del album[0]\r\n for song in album:\r\n info = song.split(\"::\")\r\n song_name = info[0]\r\n del info[0]\r\n songs_list = info\r\n song_dict[song_name] = songs_list\r\n albums_data[album_name] = (song_dict.copy(), release_Date)\r\n song_dict.clear()\r\n return albums_data", "def get_named_volumes(blocks_partition, block_shape):\n logger.debug(\"== Function == get_named_volumes\")\n d = dict()\n logger.debug(\"[Arg] blocks_partition: %s\", blocks_partition)\n logger.debug(\"[Arg] block_shape: %s\", block_shape)\n for i in range(blocks_partition[0]):\n for j in range(blocks_partition[1]):\n for k in range(blocks_partition[2]):\n bl_corner = (block_shape[0] * i,\n block_shape[1] * j,\n block_shape[2] * k)\n tr_corner = (block_shape[0] * (i+1),\n block_shape[1] * (j+1),\n block_shape[2] * (k+1)) \n index = _3d_to_numeric_pos((i, j, k), blocks_partition, order='F')\n d[index] = Volume(index, bl_corner, tr_corner)\n logger.debug(\"Indices of names volumes found: %s\", d.keys())\n logger.debug(\"End\\n\")\n return d", "def path_to_dict(path):\n global path_dict\n path_dict.overwrite({\n 'path': [sorted(x) for x in pairwise(path)],\n 'endpoints': (path[0], path[-1])\n })", "def from_h5_file_and_quad_positions(cls, path, positions, unit=1e-3):\n assert len(positions) == 4\n modules = []\n\n quads_x_orientation = [-1, -1, 1, 1]\n quads_y_orientation = [1, 1, -1, -1]\n\n with h5py.File(path, 'r') as f:\n for Q, M in product(range(1, 5), range(1, 5)):\n quad_pos = np.array(positions[Q - 1])\n mod_grp = f['Q{}/M{}'.format(Q, M)]\n mod_offset = mod_grp['Position'][:2]\n\n # Which way round is this quadrant\n x_orient = quads_x_orientation[Q - 1]\n y_orient = quads_y_orientation[Q - 1]\n\n tiles = []\n for T in range(1, 3):\n corner_pos = np.zeros(3)\n tile_offset = mod_grp['T{:02}/Position'.format(T)][:2]\n corner_pos[:2] = quad_pos + mod_offset + tile_offset\n\n # Convert units (mm) to metres\n corner_pos *= unit\n\n # Measuring in terms of the step within a row, the\n # step to the next row of hexagons is 1.5/sqrt(3).\n ss_vec = np.array([0, y_orient, 0]) * cls.pixel_size * 1.5/np.sqrt(3)\n fs_vec = np.array([x_orient, 0, 0]) * cls.pixel_size\n\n # Corner position is measured at low-x, low-y corner (bottom\n # right as plotted). We want the position of the corner\n # with the first pixel, which is either high-x low-y or\n # low-x high-y.\n if x_orient == -1:\n first_px_pos = corner_pos - (fs_vec * cls.frag_fs_pixels)\n else:\n first_px_pos = corner_pos - (ss_vec * cls.frag_ss_pixels)\n\n tiles.append(GeometryFragment(\n corner_pos=first_px_pos,\n ss_vec=ss_vec,\n fs_vec=fs_vec,\n ss_pixels=cls.frag_ss_pixels,\n fs_pixels=cls.frag_fs_pixels,\n ))\n modules.append(tiles)\n\n return cls(modules, filename=path)", "def convert_arrpart_to_dict(particles):\n partdict = {}\n partdict['m'] = particles[:,0]\n partdict['Z'] = particles[:,2]\n partdict['rho'] = particles[:,5]\n partdict['R'] = particles[:,7]\n partdict['vphi'] = particles[:,9]\n partdict['vR'] = particles[:,10]\n partdict['vz'] = particles[:,11]\n normV = np.sqrt(partdict['vphi']*partdict['vphi']+\\\n partdict['vR']*partdict['vR']+\\\n partdict['vz']*partdict['vz'])\n partdict['Bphi'] = particles[:,16]\n partdict['BR'] = particles[:,17]\n partdict['Bz'] = particles[:,18]\n normB = np.sqrt(partdict['Bphi']*partdict['Bphi']+\\\n partdict['BR']*partdict['BR']+\\\n partdict['Bz']*partdict['Bz'])\n partdict['pitch'] = (partdict['Bphi']*partdict['vphi']+partdict['Bz']*partdict['vz']\\\n +partdict['BR']*partdict['vR'])/(normB*normV)\n \n return partdict", "def read_coordinates(path='', sort=True):\n maspointer = open(path + 'xd.mas', 'r')\n respointer = open(path + 'xd.res', 'r')\n\n positions = {}\n keylist = [] #Needed to keep the atomlist order. This is important for the frequency read function.\n for line in maspointer.readlines():\n if 'CELL ' in line:\n cell = [float(i) for i in line.split(\" \") if '.' in i]\n break\n for line in respointer.readlines():\n if '(' in line and not '!' in line:\n coords = [float(i) for i in line.split(\" \") if '.' in i]\n coords = coords[:-1]\n key = line.split(\" \")[0]\n keylist.append(key)\n positions[key] = coords\n if sort:\n sortkeylist = []\n for i in xrange(len(keylist)):\n j = i + 1\n for key in keylist:\n number = get_number(key)\n if j == int(number):\n sortkeylist.append(key)\n else:\n sortkeylist = keylist\n return cell, positions, sortkeylist", "def reduce_file_xyz(self, positions, names=None, steps=None):\n if steps is not None:\n steps = self._confirm_step_selection(steps)\n\n new_result_blocks = []\n for r_block in self.frd.result_blocks:\n\n if steps is not None and r_block.numstep not in steps:\n self._remove_result_param_header(r_block)\n self.frd.blocks.remove(r_block)\n continue\n elif names is not None and r_block.name not in names:\n self._remove_result_param_header(r_block)\n self.frd.blocks.remove(r_block)\n continue\n\n r_block.numnod = len(positions)\n new_results = []\n for pos in positions:\n res = FRDNodeResult()\n new_results.append(res)\n res.node = len(new_results)\n res.data = self.get_results_pos(\n pos, names=[r_block.name], steps=[r_block.numstep])[0]\n r_block.results = new_results\n new_result_blocks.append(r_block)\n\n self.frd.result_blocks = new_result_blocks\n\n self.frd.node_block.numnod = len(positions)\n self.frd.node_block.nodes = []\n for pos in positions:\n frd_node = FRDNode()\n self.frd.node_block.nodes.append(frd_node)\n frd_node.number = len(self.frd.node_block.nodes)\n frd_node.pos = pos\n\n self.frd.blocks.remove(self.frd.elem_block)\n self.frd.elem_block = None", "def gene_coords_by_name(probes, names):\n names = list(filter(None, set(names)))\n if not names:\n return {}\n\n # Create an index of gene names\n gene_index = collections.defaultdict(set)\n for i, gene in enumerate(probes[\"gene\"]):\n for gene_name in gene.split(\",\"):\n if gene_name in names:\n gene_index[gene_name].add(i)\n # Retrieve coordinates by name\n all_coords = collections.defaultdict(lambda: collections.defaultdict(set))\n for name in names:\n gene_probes = probes.data.take(sorted(gene_index.get(name, [])))\n if not len(gene_probes):\n raise ValueError(f\"No targeted gene named {name!r} found\")\n # Find the genomic range of this gene's probes\n start = gene_probes[\"start\"].min()\n end = gene_probes[\"end\"].max()\n chrom = core.check_unique(gene_probes[\"chromosome\"], name)\n # Deduce the unique set of gene names for this region\n uniq_names = set()\n for oname in set(gene_probes[\"gene\"]):\n uniq_names.update(oname.split(\",\"))\n all_coords[chrom][start, end].update(uniq_names)\n # Consolidate each region's gene names into a string\n uniq_coords = {}\n for chrom, hits in all_coords.items():\n uniq_coords[chrom] = [\n (start, end, \",\".join(sorted(gene_names)))\n for (start, end), gene_names in hits.items()\n ]\n return uniq_coords", "def _getFacesAndMaterials_bpy(self):\r\n obj = self.obj\r\n mesh = obj.data\r\n polygonDict = {} # a dict that holds faces (dict), their vertices (dict: positions and materials)\r\n # self._checkForUndoMess()\r\n\r\n for n in range (0, len(mesh.polygons)):\r\n f = mesh.polygons[n] # current face\r\n\r\n # create local dict\r\n d = {}\r\n\r\n # get face material\r\n slot = obj.material_slots[f.material_index]\r\n mat = slot.material\r\n d['material'] = mat.name\r\n\r\n # get face vertices\r\n v_list = []\r\n for v in f.vertices: # browse through vertice index\r\n vect = obj.matrix_world * mesh.vertices[v].co\r\n v_list.append(vect)\r\n \r\n # add third twice for triangle face (expected by evertims raytracing client)\r\n if( len(f.vertices) == 3 ): \r\n vect = obj.matrix_world * mesh.vertices[ f.vertices[2] ].co\r\n v_list.append(vect)\r\n\r\n d['vertices'] = v_list\r\n\r\n # store local dict\r\n polygonDict[n] = d\r\n return polygonDict", "def _get_queried_pts(self):\n queries = {}\n for f_name in self.f_names:\n queries[f_name] = np.asarray([qi.pt\n for qi in self.query_history[f_name]])\n return queries", "def dump_sections(self, name: Union[str,\n Pattern[str]]) -> Dict[str, bytes]:\n name_regex = re.compile(name)\n\n sections: Dict[str, bytes] = {}\n for section in self.sections:\n if name_regex.match(section.name):\n self._elf.seek(section.file_offset + section.offset)\n sections[section.name] = self._elf.read(section.size)\n\n return sections", "def _create_dnp3_object_map(self):\n\n feeders = self.file_dict.get(\"feeders\", [])\n measurements = list()\n capacitors = list()\n regulators = list()\n switches = list()\n solarpanels = list()\n batteries = list()\n fuses = list()\n breakers = list()\n reclosers = list()\n energyconsumers = list()\n for x in feeders:\n measurements = x.get(\"measurements\", [])\n capacitors = x.get(\"capacitors\", [])\n regulators = x.get(\"regulators\", [])\n switches = x.get(\"switches\", [])\n solarpanels = x.get(\"solarpanels\", [])\n batteries = x.get(\"batteries\", [])\n fuses = x.get(\"fuses\", [])\n breakers = x.get(\"breakers\", [])\n reclosers = x.get(\"reclosers\", [])\n energyconsumers = x.get(\"energyconsumers\", [])\n\n # Unique grouping of measurements - GroupBy Name, Type and Connectivity node\n groupByNameTypeConNode = defaultdict(list) \n for m in measurements:\n groupByNameTypeConNode[m['name']+m.get(\"measurementType\")+m.get(\"ConnectivityNode\")].append(m)\n\n # Create Net Phase DNP3 Points\n for grpM in groupByNameTypeConNode.values():\n\n if grpM[0]['MeasurementClass'] == \"Analog\" and grpM[0].get(\"measurementType\") == \"VA\":\n measurement_type = grpM[0].get(\"measurementType\")\n measurement_id = m.get(\"mRID\")\n \n\n name1 = grpM[0]['name'] + '-' + \"Phases:ABC\" + '-net-VAR-value'\n name2 = grpM[0]['name'] + '-' + \"Phases:ABC\" + '-net-Watts-value'\n name3 = grpM[0]['name'] + '-' + \"Phases:ABC\" + '-net-VA-value'\n\n description1 = \"Name:\" + grpM[0]['name'] + \",MeasurementType:\" + \"net-VAR\" + \",ConnectivityNode:\" + grpM[0].get(\"ConnectivityNode\") +\",SimObject:\" + grpM[0].get(\"SimObject\")\n description2 = \"Name:\" + grpM[0]['name'] + \",MeasurementType:\" + \"net-Watts\" + \",ConnectivityNode:\" + grpM[0].get(\"ConnectivityNode\") +\",SimObject:\" + grpM[0].get(\"SimObject\")\n description3 = \"Name:\" + grpM[0]['name'] + \",MeasurementType:\" + \"net-VA\" + \",ConnectivityNode:\" + grpM[0].get(\"ConnectivityNode\") +\",SimObject:\" + grpM[0].get(\"SimObject\")\n\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name1, description1, measurement_type, measurement_id)\n self.c_ai += 1\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name2, description2, measurement_type, measurement_id)\n self.c_ai += 1\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name3, description3, measurement_type, measurement_id)\n self.c_ai += 1\n\n # Create Each Phase DNP3 Points\n for m in measurements:\n attribute = attribute_map['regulators']['attribute']\n measurement_type = m.get(\"measurementType\")\n measurement_id = m.get(\"mRID\")\n name= m['name'] + '-' + m['phases']\n description = \"Name:\" + m['name'] + \",Phase:\" + m['phases'] + \",MeasurementType:\" + measurement_type + \",ConnectivityNode:\" + m.get(\"ConnectivityNode\") +\",SimObject:\" + m.get(\"SimObject\")\n if m['MeasurementClass'] == \"Analog\":\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name, description, measurement_type, measurement_id)\n self.c_ai += 1\n\n if m.get(\"measurementType\") == \"VA\":\n measurement_id = m.get(\"mRID\")\n name1 = m['name'] + '-' + m['phases'] + '-VAR-value'\n name2 = m['name'] + '-' + m['phases'] + '-Watts-value'\n name3 = m['name'] + '-' + m['phases'] + '-angle'\n\n description1 = \"Name:\" + m['name'] + \",Phase:\" + m['phases'] + \",MeasurementType:\" + \"VAR\" + \",ConnectivityNode:\" + m.get(\"ConnectivityNode\") +\",SimObject:\" + m.get(\"SimObject\")\n description2 = \"Name:\" + m['name'] + \",Phase:\" + m['phases'] + \",MeasurementType:\" + \"Watt\" + \",ConnectivityNode:\" + m.get(\"ConnectivityNode\") +\",SimObject:\" + m.get(\"SimObject\")\n description3 = \"Name:\" + m['name'] + \",Phase:\" + m['phases'] + \",MeasurementType:\" + \"angle\" + \",ConnectivityNode:\" + m.get(\"ConnectivityNode\") + \",SimObject:\" + m.get(\"SimObject\")\n if m['MeasurementClass'] == \"Analog\":\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name1, description1, measurement_type, measurement_id)\n self.c_ai += 1\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name2, description2, measurement_type, measurement_id)\n self.c_ai += 1\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name3, description3, measurement_type, measurement_id)\n self.c_ai += 1\n\n\n elif m['MeasurementClass'] == \"Discrete\" and measurement_type == \"Pos\":\n if \"RatioTapChanger\" in m['name'] or \"reg\" in m[\"SimObject\"]:\n # TODO: Do we need step?\n for r in range(5, 7): # [r==4]: Step, [r==5]: LineDropR, [r==6]:LineDropX \n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description, measurement_id, attribute[r])\n self.c_ao += 1\n else:\n self.assign_val_a(\"DI\", 1, 2, self.c_di, name, description, measurement_type, measurement_id)\n self.c_di += 1\n\n for m in capacitors:\n measurement_id = m.get(\"mRID\")\n cap_attribute = attribute_map['capacitors']['attribute'] # type: List[str]\n\n for l in range(0, 4):\n # publishing attribute value for capacitors as Bianry/Analog Input points based on phase attribute\n name = m['name']\n description = \"Name:\" + m['name'] + \"ConductingEquipment_type:LinearShuntCompensator\" + \",Attribute:\" + cap_attribute[l] + \",Phase:\" + m['phases']\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description, measurement_id, cap_attribute[l])\n self.c_ao += 1\n for p in range(0, len(m['phases'])):\n name = m['name'] + m['phases'][p]\n description = \"Name:\" + m['name'] + \",ConductingEquipment_type:LinearShuntCompensator\" + \",controlAttribute:\" + cap_attribute[p] + \",Phase:\" + m['phases'][p]\n # description = \"Capacitor, \" + m['name'] + \",\" + \"phase -\" + m['phases'][p] + \", and attribute is - \" + cap_attribute[4]\n self.assign_val_d(\"DO\", 12, 1, self.c_do, name, description, measurement_id, cap_attribute[4])\n self.c_do += 1\n\n for m in regulators:\n reg_attribute = attribute_map['regulators']['attribute']\n # bank_phase = list(m['bankPhases'])\n for n in range(0, 4):\n measurement_id = m.get(\"mRID\")\n name = m['bankName'] + '-' + m['bankPhases']\n description = \"Name:\" + m['bankName'] + \",ConductingEquipment_type:RatioTapChanger_Reg\" +\",Phase:\" + m['bankPhases'] + \",Attribute:\" + reg_attribute[n]\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description, measurement_id[0], reg_attribute[n])\n self.c_ao += 1\n self.assign_val_d(\"AI\", 30, 1, self.c_ai, name, description, measurement_id[0], reg_attribute[n])\n self.c_ai += 1\n for i in range(5, 7):\n for j in range(0, len(m['bankPhases'])):\n measurement_id = m.get(\"mRID\")[j]\n name = m['tankName'][j] + '-' + m['bankPhases'][j]\n description = \"Name:\" + m['tankName'][j] + \",ConductingEquipment_type:RatioTapChanger_Reg\"+ \",Phase:\" + m['bankPhases'][j] + \",controlAttribute:\" + reg_attribute[i]\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description, measurement_id,reg_attribute[i])\n self.c_ao += 1\n self.assign_val_d(\"AI\", 30, 1, self.c_ai, name, description, measurement_id,reg_attribute[i])\n self.c_ai += 1\n \n for m in solarpanels:\n for k in range(0, len(m['phases'])):\n measurement_id = m.get(\"mRID\")\n name = \"Solar\" + m['name'] + '-' + m['phases'][k] + '-Watts-value'\n description = \"Solarpanel:\" + m['name'] + \",Phase:\" + m['phases'] + \",measurementID:\" + measurement_id\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description, measurement_id, \"PowerElectronicsConnection.p\")\n self.c_ao += 1\n \n name1 = \"Solar\" + m['name'] + '-' + m['phases'][k] + '-VAR-value'\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name1, description, measurement_id, \"PowerElectronicsConnection.q\")\n self.c_ao += 1\n \n name2 = \"Solar\" + m['name'] + '-' + m['phases'][k] + '-VAR-Net-value'\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name2, description, measurement_id, \"PowerElectronicsConnection.q\")\n self.c_ao += 1\n \n name3 = \"Solar\"+ m['name'] + '-' + m['phases'][k] + '-Watts-Net-value'\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name3, description, measurement_id, \"PowerElectronicsConnection.p\")\n self.c_ao += 1\n\t\t\t\n for m in batteries:\n for l in range(0, len(m['phases'])):\n measurement_id = m.get(\"mRID\")\n name = m['name'] + '-' + m['phases'][l] + '-Watts-value'\n description = \"Battery, \" + m['name'][l] + \",Phase: \" + m['phases'] + \",ConductingEquipment_type:PowerElectronicConnections\"\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description,measurement_id, \"PowerElectronicsConnection.p\")\n self.c_ao += 1\n name1 = m['name'] + '-' + m['phases'][l] + '-VAR-value'\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name1, description,measurement_id, \"PowerElectronicsConnection.q\")\n self.c_ao += 1\n \n for m in switches:\n measurement_id = m.get(\"mRID\")\n switch_attribute = attribute_map['switches']['attribute']\n for k in range(0, len(m['phases'])):\n phase_value = list(m['phases'])\n name = m['name'] + \"Phase:\" + m['phases'][k]\n description = \"Name:\" + m[\"name\"] + \",ConductingEquipment_type:LoadBreakSwitch\" + \"Phase:\" + phase_value[k] +\",controlAttribute:\"+switch_attribute\n self.assign_val_d(\"DO\", 12, 1, self.c_do, name, description, measurement_id, switch_attribute)\n self.c_do += 1\n\n for m in fuses:\n measurement_id = m.get(\"mRID\")\n switch_attribute = attribute_map['switches']['attribute']\n for l in range(0, len(m['phases'])):\n phase_value = list(m['phases'])\n name = m['name'] + \"Phase:\" + m['phases'][l]\n description = \"Name:\" + m[\"name\"] + \",Phase:\" + phase_value[l] + \",Attribute:\" + switch_attribute + \",mRID\" + measurement_id\n self.assign_val_d(\"DO\", 12, 1, self.c_do, name, description, measurement_id, switch_attribute)\n self.c_do += 1\n\n for m in breakers:\n measurement_id = m.get(\"mRID\")\n switch_attribute = attribute_map['switches']['attribute']\n for n in range(0, len(m['phases'])):\n phase_value = list(m['phases'])\n name = m['name'] + \"Phase:\" + m['phases'][n]\n description = \"Name: \" + m[\"name\"] + \",Phase:\" + phase_value[n] + \",ConductingEquipment_type:Breaker\" + \",controlAttribute:\" + switch_attribute\n self.assign_val_d(\"DO\", 12, 1, self.c_do, name, description, measurement_id, switch_attribute)\n self.c_do += 1\n \n for m in reclosers:\n measurement_id = m.get(\"mRID\")\n switch_attribute = attribute_map['switches']['attribute']\n for i in range(0, len(m['phases'])):\n phase_value = list(m['phases'])\n name = m['name'] + \"Phase:\" + m['phases'][i]\n description = \"Recloser, \" + m[\"name\"] + \"Phase: - \" + phase_value[i] + \",ConductingEquipment_type:Recloser\"+\"controlAttribute:\" + switch_attribute\n self.assign_val_d(\"DO\", 12, 1, self.c_do, name, description, measurement_id, switch_attribute)\n self.c_do += 1\n\n for m in energyconsumers:\n measurement_id = m.get(\"mRID\")\n for k in range(0, len(m['phases'])):\n phase_value = list(m['phases'])\n name = m['name']+\"phase:\" + m['phases'][k]\n description = \"EnergyConsumer, \" + m[\"name\"] + \"Phase: \" + phase_value[k] \n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description, measurement_id, \"EnergyConsumer.p\")\n self.c_ao += 1\n \n name1 = m['name']+\"phase:\" + m['phases'][k] + \"control\"\n self.assign_val_d(\"DO\", 12, 1, self.c_do, name1, description, measurement_id, \"EnergyConsumer.p\")\n self.c_do += 1\n\n return self.out_json", "def read_locations(namefile):\n db = shelve.open(namefile)\n hashes = db['hashes']\n key_firms = db['nif']\n year = db['year']\n locs = db['locations']\n methodvalues = db['methodvalues']\n db.close()\n return hashes, key_firms, year, locs, methodvalues", "def extended_xyz_parse(xyz_d):\n \n s_properties = ['rot_A', \n 'rot_B', \n 'rot_C', \n 'dipole', \n 'polarizability', \n 'homo', \n 'lumo', \n 'band_gap', \n 'ese', \n 'zpe', \n 'u_0K', \n 'u_298.15K', \n 'h_298.15K', \n 'f_298.15K', \n 'cp_298.15K']\n\n mol_properties = {}\n\n\n lines = xyz_d.replace('*^','e').splitlines()\n \n r_no_atoms = lines[0]\n no_atoms = int(r_no_atoms)\n\n r_scalars = lines[1]\n mol_id = r_scalars.split()[:2]\n scalar_properties = np.array(r_scalars.split()[2:], np.float32)\n\n r_mcoords = lines[2:2+no_atoms]\n symbols = [m.split()[0] for m in r_mcoords]\n coords = np.array([m.split()[1:4] for m in r_mcoords], dtype=np.float32)\n \n charges = np.array([m.split()[4] for m in r_mcoords], dtype=np.float32)\n\n r_vibfreqs = lines[2+ no_atoms]\n vib_freqs = np.array([float(freq) for freq in r_vibfreqs.split()], dtype=np.float32)\n\n smiles = lines[3+no_atoms].split()\n inchi = lines[4+no_atoms].split()\n\n mol_properties['no_atoms'] = no_atoms\n mol_properties['mol_id'] = mol_id\n \n for i, p in enumerate(s_properties):\n mol_properties[p] = scalar_properties[i]\n\n mol_properties['symbols'] = symbols\n mol_properties['coords'] = coords\n mol_properties['charges'] = charges\n mol_properties['vib_freqs'] = vib_freqs\n mol_properties['smiles'] = smiles\n mol_properties['inchi'] = inchi\n \n return mol_properties", "def filecoords(self):\n coords = sorted(self.map.keys())\n for coord in coords:\n yield coord, self.map[coord]", "def _get_gedi1b_main_data_dict(self) -> dict:\n data = {\n # General identifiable data\n \"granule_name\": [self.parent_granule.filename] * self.n_shots,\n \"shot_number\": self[\"shot_number\"][:],\n \"beam_type\": [self.beam_type] * self.n_shots,\n \"beam_name\": [self.name] * self.n_shots,\n # Temporal data\n \"delta_time\": self[\"delta_time\"][:],\n # Quality data\n \"degrade\": self[\"geolocation/degrade\"][:],\n \"stale_return_flag\": self[\"stale_return_flag\"][:],\n \"solar_elevation\": self[\"geolocation/solar_elevation\"][:],\n \"solar_azimuth\": self[\"geolocation/solar_elevation\"][:],\n \"rx_energy\": self[\"rx_energy\"][:],\n # DEM\n \"dem_tandemx\": self[\"geolocation/digital_elevation_model\"][:],\n \"dem_srtm\": self[\"geolocation/digital_elevation_model_srtm\"][:],\n # geolocation bin0\n \"latitude_bin0\": self[\"geolocation/latitude_bin0\"][:],\n \"latitude_bin0_error\": self[\"geolocation/latitude_bin0_error\"][:],\n \"longitude_bin0\": self[\"geolocation/longitude_bin0\"][:],\n \"longitude_bin0_error\": self[\"geolocation/longitude_bin0_error\"][:],\n \"elevation_bin0\": self[\"geolocation/elevation_bin0\"][:],\n \"elevation_bin0_error\": self[\"geolocation/elevation_bin0_error\"][:],\n # geolocation lastbin\n \"latitude_lastbin\": self[\"geolocation/latitude_lastbin\"][:],\n \"latitude_lastbin_error\": self[\"geolocation/latitude_lastbin_error\"][:],\n \"longitude_lastbin\": self[\"geolocation/longitude_lastbin\"][:],\n \"longitude_lastbin_error\": self[\"geolocation/longitude_lastbin_error\"][:],\n \"elevation_lastbin\": self[\"geolocation/elevation_lastbin\"][:],\n \"elevation_lastbin_error\": self[\"geolocation/elevation_lastbin_error\"][:],\n # relative waveform position info in beam and ssub-granule\n \"waveform_start\": self[\"rx_sample_start_index\"][:] - 1,\n \"waveform_count\": self[\"rx_sample_count\"][:],\n }\n return data", "def calculate_components(self, parts):\n target = {}\n for part in parts:\n rank = part[0]\n\n try:\n face = part[1]\n except IndexError:\n face = '*'\n\n try:\n target[rank][face] += 1\n except KeyError:\n if rank not in target:\n target[rank] = {}\n target[rank][face] = 1\n\n return target", "def create_dicts(self, path):\n line_d = {}\n rel_d = {}\n\n with open(path) as f:\n for line in islice(f, 0, None, 4):\n lister = line.split('\"')\n line_number = int(lister[0].split('\\t')[0])\n line_d[line_number] = ''.join(str(s) for s in lister[1:])\n \n with open(path) as f:\n for i, line in enumerate(islice(f, 1, None, 4)):\n rel_d[i] = line.split('\\n')[0]\n \n return (line_d, rel_d)", "def read(self):\n dictionary = {}\n with open(self.path) as file:\n key_header = \"\"\n for line in file:\n entry = line.strip().split()\n if len(entry) == 0:\n continue\n if len(entry) == 1:\n key_header = entry[0]+\"_\"\n else:\n key = entry[0].strip()\n value = reduce(lambda x1, y1: x1+\" \" + y1, entry[1:])\n dictionary[key_header+key] = value\n return dictionary", "def get_coordinates_genes(path: str = \"\", data_files: dict = {}):\n\n essential_coordinates = {}\n\n # Get position genes\n if \"gff3\" in data_files:\n file_path = os.path.join(path, data_files[\"gff3\"])\n gene_coordinates = gene_position(file_path)\n else:\n raise ValueError(\"gff3 type not found in data\")\n\n # Get all annotated essential genes\n if \"essential_genes\" in data_files:\n file_path = os.path.join(path, data_files[\"essentials\"])\n with open(file_path, \"r\") as f:\n genes = f.readlines()[1:]\n for gene in genes:\n name = gene.strip(\"\\n\")\n essential_coordinates[name] = gene_coordinates.get(name).copy()\n else:\n raise ValueError(\"essentials not found in data\")\n\n # Get aliases of all genes\n if \"gene_names\" in data_files:\n file_path = os.path.join(path, \"Yeast_Protein_Names.txt\")\n aliases_designation = gene_aliases(file_path)[0] #'YMR056C' \\ ['AAC1'], ...\n else:\n raise ValueError(\"gene_names not found in data\")\n\n return essential_coordinates, aliases_designation", "def LookupExports(self, names):\n cache = {}\n cache[()] = self\n for name in names:\n if name == '.':\n name = ''\n parts = name.split('.')\n parts, param = tuple(parts[:-1]), parts[-1]\n o = self\n for i in xrange(len(parts), -1, -1):\n before, after = parts[:i], parts[i:]\n o = cache.get(before, None)\n if o is not None:\n break\n assert o is not None\n for i in after:\n before = tuple(list(before) + [i])\n try:\n cache[before] = o = o.Sub(i)\n except KeyError as e:\n # Fill in the full path to the missing element, rather than just\n # its basename (which is often something unhelpful like '1').\n e.args = tuple(['.'.join(tuple([o.basename]) + tuple(e.args))])\n raise\n yield o, param", "def create_zip_dict() -> dict:\n with open('zip_coordinates.json', 'r') as zip_map:\n return json.loads(zip_map.read())", "def _parse_refraction(line, lines):\n split_line = line.split()\n\n energy = float(split_line[0])\n ref_ind_xx = float(split_line[1])\n ref_ind_zz = float(split_line[2])\n extinct_xx = float(split_line[3])\n extinct_zz = float(split_line[4])\n\n return {\"energy\": energy, \"ref_ind_xx\": ref_ind_xx, \"ref_ind_zz\": ref_ind_zz, \"extinct_xx\": extinct_xx,\n \"extinct_zz\": extinct_zz}", "def coordinates(self, name, start=None, end=None):\n if \"|\" in name:\n self.name = name.split(\"|\")[0]\n else:\n self.name = name\n positions = {}\n match_positions = []\n records = []\n segments = []\n result_segments = []\n for record in self.process(self.name):\n records.append(record)\n records.sort(key=lambda x: int(x.exon_number))\n\n if records[0].strand == '+':\n _start = 1\n for record in records:\n for relative, actual in enumerate(range(record.start, record.end + 1),\n start=_start):\n positions[relative] = actual\n _start = relative + 1\n for pos in range(start, end):\n match_positions.append(positions[pos])\n for key, group in groupby(enumerate(match_positions),\n lambda x: x[0] - x[-1]):\n segment = list(map(itemgetter(1), group))\n segments.append([segment[0], segment[-1]])\n for segment in segments:\n for record in records:\n if segment[0] >= record.start and segment[1] <= record.end:\n result_segments.append((record.scaffold, segment[0], segment[1],\n record.transcript_id + '|' + record.gene_name, 0, record.strand))\n\n elif records[0].strand == '-':\n _start = 1\n for record in records:\n for relative, actual in enumerate(reversed(range(record.start,\n record.end + 1)), start=_start):\n positions[relative] = actual\n _start = relative + 1\n for pos in range(start, end):\n match_positions.append(positions[pos])\n for key, group in groupby(enumerate(reversed(match_positions)),\n lambda x: x[0] - x[-1]):\n segment = list(map(itemgetter(1), group))\n segments.append([segment[0], segment[-1]])\n for segment in segments:\n for record in records:\n if segment[0] >= record.start and segment[1] <= record.end:\n result_segments.append((record.scaffold, segment[0], segment[1],\n record.transcript_id + '|' + record.gene_name, 0, record.strand))\n\n if len(result_segments) == 0:\n logger.debug('%s, %s, %s' % (name, start, end))\n logger.debug('%s' % str(segments))\n for r in records:\n logger.debug('%s %s %s %s' % (r.scaffold, r.strand,\n r.start, r.end))\n\n return result_segments", "def parse_file(filepath):\n with fitz.open(filepath) as doc:\n block_dict = {(idx + 1): page.getText(\"blocks\") for idx, page in enumerate(doc)}\n block_dict = {\n key: [block[4] for block in value] for key, value in block_dict.items()\n }\n return block_dict", "def get_phot_pars(self, obsmode):\n npars, strp_obsmode, par_dict = self._parse_obsmode(obsmode)\n par_struct = self._make_par_struct(npars, par_dict)\n result_dict = {}\n\n for par in self._parkeys:\n row = self._get_row(strp_obsmode, par)\n row_struct = self._make_row_struct(row, npars)\n\n # compute_value returns a float\n if par in self._compute_keys:\n result_dict[par] = self._compute_value(row_struct, par_struct)\n else:\n result_dict[par] = row_struct['results'][0]\n\n result_dict[\"PHOTZPT\"] = self.imphttab_fits[0].header['PHOTZPT']\n return result_dict", "def fCZs(self) -> Dict[Tuple[int, ...], Optional[float]]:\n return {tuple(es.targets): es.fCZ for es in self.edges_specs}", "def read_CASTEP_cell(directory):\n with dir_context(directory):\n nruter=dict()\n nruter[\"lattvec\"]=np.empty((3,3))\n nruter[\"elements\"]=[] \n f=open(seedname+\".cell\",\"r\")\n castep_cell = f.readlines()\n atoms_list = []\n for index, line in enumerate(castep_cell):\n if '%BLOCK LATTICE_CART' in line.upper(): \n for i in xrange(3):\n new_line = index + i + 1\n nruter[\"lattvec\"][:,i]=[float(j) \n\t\t\t\tfor j in castep_cell[new_line].split()] \n elif '%BLOCK POSITIONS_FRAC' in line.upper():\n index_start = index\n elif '%ENDBLOCK POSITIONS_FRAC' in line.upper():\n index_end = index\n nruter[\"lattvec\"]*=0.1\n for i in range(index_start+1, index_end):\n atoms_list.append(castep_cell[i].split()) \n\tatoms_list = filter(None,atoms_list)\n atoms_list.sort(key=sort_elements)\n natoms1 = len(atoms_list)\n nruter[\"positions\"]=np.empty((3,natoms1))\n for i in range(natoms1): \n nruter[\"positions\"][:,i]=[float(atoms_list[i][j]) for j in xrange(1,4)]\n nruter[\"elements\"].append(str(atoms_list[i][0]))\n create_indices = nruter[\"elements\"]\n nruter[\"elements\"]=list(set(nruter[\"elements\"]))\n nruter[\"elements\"].sort(key=sort_elements2)\n nruter[\"numbers\"]= np.array([int(create_indices.count(nruter[\"elements\"][i]))\n\t\t\t for i in range(len(nruter[\"elements\"]))],dtype=np.intc) \n nruter[\"types\"]=[]\n for i in xrange(len(nruter[\"numbers\"])):\n nruter[\"types\"]+=[i]*nruter[\"numbers\"][i] \n return nruter", "def _get_geometry_properties_by_name(self, names):\n geometry_properties = {}\n for name in names:\n try:\n prop = self.gui.geometry_properties[name]\n except KeyError:\n continue\n geometry_properties[name] = prop\n return geometry_properties", "def _get_data(self, position):\n index = self._indexes[position]\n basename = self._waves[index].with_suffix(\".npy\").name\n return tuple(np.load(self._path / x / basename) for x in self._variables)", "def get_material_mapping(self):\n return {name: self.get_material(name) for name in self.parts.keys()}", "def _parse_proxy_files(self):\n out = dict()\n\n # name may be something like \"ssp1_[YEAR].tif\", which actually refers to multiple files\n # such as \"ssp1_2010.tif\" and \"ssp1_2020.tif\" when info['years'] == [2010, 2020]\n for name, info in self.proxy_files.items():\n # promote strs to list\n if isinstance(info['variables'], str):\n info['variables'] = [info['variables']]\n\n if isinstance(info['years'], int):\n info['years'] = [info['years']]\n\n # flags are optional\n if 'flags' in info:\n if isinstance(info['flags'], str):\n info['flags'] = [info['flags']]\n else:\n info['flags'] = []\n\n for variable in info['variables']:\n\n # file name may use an abbreviation of the variable name\n # if info['variables'] is a dict of form {variable: abbreviation}\n abbreviation = info['variables'][variable] if isinstance(info['variables'], dict) else variable\n\n for year in info['years']:\n # determine the actual name of the file containing variable variable for year year\n filename = name.replace('{variable}', abbreviation).replace('{year}', str(year))\n\n if filename not in out:\n out[filename] = {'variables': [], 'years': [], 'flags': info['flags']}\n\n if variable not in out[filename]['variables']:\n out[filename]['variables'].append(variable)\n if year not in out[filename]['years']:\n out[filename]['years'].append(year)\n\n self.proxy_files = out", "def _load_data_tracks(file_names, load_func=load_gpx_file):\n tracks = {}\n with concurrent.futures.ProcessPoolExecutor() as executor:\n future_to_file_name = {\n executor.submit(load_func, file_name): file_name\n for file_name in file_names\n }\n for future in concurrent.futures.as_completed(future_to_file_name):\n file_name = future_to_file_name[future]\n try:\n t = future.result()\n except TrackLoadError as e:\n log.error(f\"Error while loading {file_name}: {e}\")\n else:\n tracks[file_name] = t\n return tracks", "def obtain_rel_dicts(result,numbers,chain_name,current_class,seq_pos,seq_pos_n,gpcr_pdb,gpcr_aa,gnum_classes_rel,multiple_chains, pdbid, simplified=False,add_aa=False,seq_pdb=False,all_struc_num=False):\n chain_nm_seq_pos=\"\"\n rs_by_seg={1: [], 2: [], 3: [], 4: [], 5: [], 6: [], 7: [], 8: [], 9: [], 10: [], 11: [], 12: [], 13: [], 14: [], 15: [], 16: [], 17: []}\n if multiple_chains:\n chain_nm_seq_pos=\":\"+chain_name\n pos_gnum = numbers[current_class]\n for pos in result:\n if pos[0] != \"-\": #Consider only num in the pdb\n db_pos=pos[1][1]\n if db_pos:\n gnum_or_nth=\"\"\n this_gnum = pos_gnum[db_pos][1]\n if this_gnum: #If exist GPCR num for this position\n if simplified:\n (chain_num,bw,gpcrdb)=re.split('\\.|x', this_gnum)\n this_gnum=chain_num+\"x\"+gpcrdb\n if add_aa:\n this_pdb=str(pos[0][1])+\"-\"+chain_name+\"-\"+pos_gnum[db_pos][0]\n else:\n this_pdb=str(pos[0][1])+\"-\"+chain_name\n else:\n if add_aa:\n this_pdb=[pos[0][1],chain_name,pos_gnum[db_pos][0]]\n else:\n this_pdb=[pos[0][1],chain_name]\n gpcr_pdb[this_gnum]=this_pdb\n gpcr_aa[this_gnum]=[pos_gnum[db_pos][0], chain_name]\n gnum_or_nth=this_gnum\n rs_by_seg[pos_gnum[db_pos][2]].append(pos[0][1]+chain_nm_seq_pos) #Chain!!\n if type(seq_pdb)==dict:\n seq_pdb[db_pos]={\"pdb\":[pos[0][1],chain_name],\"gnum\":gnum_or_nth}\n seq_pos[seq_pos_n][2]=gnum_or_nth\n seq_pos_n+=1\n #######\n seg_li=[]\n for seg in range(2,17): #2,17\n slen=len(rs_by_seg[seg])\n if slen==0:\n seg_li.append([])\n elif slen==1:\n seg_li.append([rs_by_seg[seg][0]])\n else:\n seg_li.append([rs_by_seg[seg][0],rs_by_seg[seg][-1]])\n #######\n other_classes=list({\"A\",\"B\",\"C\",\"F\"} - set(current_class))\n other_classes_ok=[]\n for name in other_classes:\n if numbers[name]:\n other_classes_ok.append(name)\n gnum_classes_rel[name]={}\n for pos, (res,gnum,segm) in pos_gnum.items():\n if gnum:\n for class_name in other_classes_ok:\n gnum_altclass=numbers[class_name][pos][1]\n if gnum_altclass:\n if all_struc_num:\n (chain_num,bw,gpcrdb)=re.split('\\.|x',gnum)\n my_num=chain_num+\"x\"+gpcrdb\n (achain_num,abw,agpcrdb)=re.split('\\.|x',gnum_altclass)\n alt_gnum=achain_num+\"x\"+agpcrdb\n else:\n my_num=gnum.split(\"x\")[0]\n alt_gnum=gnum_altclass.split(\"x\")[0]\n gnum_classes_rel[class_name][alt_gnum]=my_num\n if type(seq_pdb)==dict:\n return(gpcr_pdb,gpcr_aa,gnum_classes_rel,other_classes_ok,seq_pos,seq_pos_n,seg_li,seq_pdb)\n else:\n return(gpcr_pdb,gpcr_aa,gnum_classes_rel,other_classes_ok,seq_pos,seq_pos_n,seg_li)", "def ReadFromFile(self):\n\n data = \"\"\n try:\n with open(self.fileLoc, \"r\") as file:\n data += file.read()\n except IOError:\n with open(self.fileLoc, \"w\") as file:\n file.write(\" \")\n return {}\n \n if len(data) == 0:\n return {}\n\n data = self.Decrypt(data)\n\n data = \"\".join(data.split())\n kvstrings = data.split(\"%\")\n kvstrings = filter(None, kvstrings)\n\n pairs = {}\n for x in kvstrings:\n kv = x.split(\":\")\n pairs[kv[0]] = kv[1]\n\n return pairs", "def extract_artifacts (self, layout):\n print('Extracting artifacts according to layout:')\n for path, afs in layout.items():\n artifact = afs[0][0]\n member = afs[0][1]\n print(' %s (from %s) -> %s' % (member, artifact, path))\n outf = os.path.join(self.stpath, path)\n zfile.ZFile.extract(artifact.lpath, member, outf)\n\n self.add_file(outf)\n\n # Rename files, if needed.\n for root, _, filenames in os.walk(self.stpath):\n for filename in filenames:\n fname = os.path.basename(filename)\n if fname in rename_files:\n bpath = os.path.join(root, os.path.dirname(filename))\n oldfile = os.path.join(bpath, fname)\n newfile = os.path.join(bpath, rename_files[fname])\n print('Renaming %s -> %s' % (oldfile, newfile))\n os.rename(oldfile, newfile)\n\n # And rename them in the files map too\n rename_these = [x for x in self.files.keys() if os.path.basename(x) in rename_files]\n for oldfile in rename_these:\n newfile = os.path.join(os.path.dirname(oldfile),\n rename_files[os.path.basename(oldfile)])\n self.files[newfile] = self.files[oldfile]\n del self.files[oldfile]", "def _getFacesAndMaterials(self):\r\n room = self.obj\r\n polygonDict = {} # a dict that holds faces (dict), their vertices (dict: positions and materials)\r\n mesh = room.meshes[0] # WARNING: supposed to work with a single mesh material\r\n poly = mesh.getPolygon(0) # get polygon list\r\n\r\n for n in range(0,mesh.numPolygons):\r\n polygonDict[n+1] = {}\r\n\r\n # get face (poly) materials\r\n poly = mesh.getPolygon(n)\r\n polygonDict[n+1]['material'] = poly.material_name.replace('MA','') # since blender add 'MA' to each material name\r\n\r\n # get face (poly) vertices positions\r\n v1_xyz = room.worldTransform * mesh.getVertex(poly.material_id, poly.v1).XYZ\r\n v2_xyz = room.worldTransform * mesh.getVertex(poly.material_id, poly.v2).XYZ\r\n v3_xyz = room.worldTransform * mesh.getVertex(poly.material_id, poly.v3).XYZ\r\n v4_xyz = room.worldTransform * mesh.getVertex(poly.material_id, poly.v4).XYZ\r\n polygonDict[n+1]['vertices'] = [v1_xyz, v2_xyz, v3_xyz, v4_xyz]\r\n # if gl.dbg: print (' ' + 'face ' + str(n) + ' - materials '+ poly.material_name.replace('MA',''))\r\n return polygonDict", "def text_to_json(file):\n\n #--------------------------------------------------------------------------\n # First read in the data\n #--------------------------------------------------------------------------\n x = []\n y = []\n z = []\n isFile = False\n if isinstance(file, str):\n isFile = True\n file = open(file, 'rt')\n lines = file.readlines()\n else:\n lines = file.readlines()\n reference = ''\n for line in lines:\n sline = line.strip()\n if sline.startswith('#'):\n reference += sline\n continue\n if sline.startswith('>'):\n if len(x): # start of new line segment\n x.append(np.nan)\n y.append(np.nan)\n z.append(np.nan)\n continue\n else: # start of file\n continue\n if not len(sline.strip()):\n continue\n parts = sline.split()\n if len(parts) < 3:\n raise ShakeLibException(\n 'Rupture file %s has no depth values.' % file)\n y.append(float(parts[0]))\n x.append(float(parts[1]))\n z.append(float(parts[2]))\n if isFile:\n file.close()\n\n # Construct GeoJSON dictionary\n\n coords = []\n poly = []\n for lon, lat, dep in zip(x, y, z):\n if np.isnan(lon):\n coords.append(poly)\n poly = []\n else:\n poly.append([lon, lat, dep])\n if poly != []:\n coords.append(poly)\n\n d = {\n \"type\": \"FeatureCollection\",\n \"metadata\": {},\n \"features\": [\n {\n \"type\": \"Feature\",\n \"properties\": {\n \"rupture type\": \"rupture extent\",\n \"reference\": reference\n },\n \"geometry\": {\n \"type\": \"MultiPolygon\",\n \"coordinates\": [coords]\n }\n }\n ]\n }\n return d", "def loadallvertices(self):\n if self.filedb is None:\n return\n vertices = dict()\n line_pattern = r\"[A-Z]{3},[A-Z]{3},[\\d]+$\"\n try:\n with open(self.filedb) as f:\n for line in f:\n # Recover origin, destiny and cost\n if bool(re.match(line_pattern, line)):\n start, finish, cost = line.rstrip('\\n\\r').split(\",\")\n # Create route entry\n route = {finish: int(cost)}\n origin_dict = vertices.get(start)\n if origin_dict is not None:\n origin_dict.update(route)\n vertices[start] = origin_dict\n else:\n vertices[start] = route\n\n with open(self.filedb) as f:\n for line2 in f:\n if bool(re.match(line_pattern, line2)):\n # Recover origin, destiny and cost\n start, finish, cost = line2.rstrip('\\n\\r').split(\",\")\n # Finish must be a vertice also\n if vertices.get(finish) is None:\n vertices[finish] = {finish: 0}\n\n except Exception as e:\n logging.error(\"File open error.\" + str(e))\n return None\n\n return vertices", "def voices_in_part_in_parts (parts):\n # don't crash if p doesn't have an id (that's invalid MusicXML,\n # but such files are out in the wild!\n dictionary = {}\n for p in parts:\n voices = voices_in_part (p)\n if (hasattr (p, \"id\")):\n dictionary[p.id] = voices\n else:\n # TODO: extract correct part id from other sources\n dictionary[None] = voices\n return dictionary;", "def frac2cart_all(frac_coordinates, lattice_array):\n coordinates = deepcopy(frac_coordinates)\n for coord in range(coordinates.shape[0]):\n coordinates[coord] = cartisian_from_fractional(coordinates[coord],\n lattice_array)\n return coordinates", "def test_parse_mapping_file_to_dict(self):\r\n s1 = ['#sample\\ta\\tb', '#comment line to skip',\r\n 'x \\t y \\t z ', ' ', '#more skip', 'i\\tj\\tk']\r\n exp = ([['x', 'y', 'z'], ['i', 'j', 'k']],\r\n ['sample', 'a', 'b'],\r\n ['comment line to skip', 'more skip'])\r\n mapdict, comments = parse_mapping_file_to_dict(s1)\r\n expdict = {'x': {'a': 'y', 'b': 'z'}, 'i': {'a': 'j', 'b': 'k'}}\r\n self.assertEqual(mapdict, expdict)\r\n self.assertEqual(comments, ['comment line to skip', 'more skip'])", "def read_multiple_alignments(tree, directory_path, coordinates):\n multiple_alignment_dict = {}\n for coord in coordinates:\n try:\n handle = open(os.path.join(directory_path, coord[0]), 'rb')\n multiple_alignment_dict[coord[0]] = cpickle.load(handle)\n except Exception, e:\n syserr(\"No alignment for %s, going on without it.\\n\" % coord[0])\n syserr(str(e) + \"\\n\")\n\n return multiple_alignment_dict", "def func2(string:str):\n with open(string,\"r\") as file:\n data = file.read()\n data = data.split(\"bandwidths [1]:\")[0]\n\n final = {}\n for i in range(1,3):\n final[\"formants [{}]\".format(i)] = []\n my_list = data.split(\"formants\")\n for i in range(2,4):\n final[\"formants [{}]\".format(i-1)].extend(list(map(pars_points,my_list[i].split(\"points \")[1:])))\n return final", "def __get_coords_from(self, name):\n geolocator = Nominatim(user_agent=\"spanish\")\n geocode = RateLimiter(geolocator.geocode, min_delay_seconds=1)\n location = geocode(name)\n return {\n \"name\": name,\n \"latitude\": location.latitude,\n \"longitude\": location.longitude,\n }", "def get_chunks(cube_shape, coord_names, chunk=True, step=2):\n\n ntimes = cube_shape[0]\n\n if chunk:\n assert coord_names[0] == 'time'\n\n remainder = ntimes % step\n while remainder == 1:\n step = step + 1\n remainder = ntimes % step\n\n start_indexes = range(0, ntimes, step)\n else:\n start_indexes = [0]\n step = ntimes\n\n return start_indexes, step", "def test_modify_coords(self):\n xyz1 = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((1.53830201, 0.86423425, 0.07482439), (0.94923576, -0.20847619, -0.03881977),\n (-0.56154542, -0.31516675, -0.05011465), (-1.18981166, 0.93489731, 0.17603211),\n (1.49712659, -1.15833718, -0.15458647), (-0.87737433, -0.70077243, -1.02287491),\n (-0.87053611, -1.01071746, 0.73427128), (-0.48610273, 1.61361259, 0.11915705))}\n xyz2 = {'symbols': ('C', 'C', 'N', 'H', 'H', 'H'), 'isotopes': (12, 12, 14, 1, 1, 1),\n 'coords': ((-0.48629842, 0.00448354, 0.00136213), (0.97554967, -0.0089943, -0.00273253),\n (2.13574353, -0.01969098, -0.00598223), (-0.88318669, -0.63966273, -0.78887729),\n (-0.87565097, -0.35336611, 0.95910491), (-0.86615712, 1.01723058, -0.16287498))}\n xyz3 = {'symbols': ('O', 'C', 'C', 'S', 'O', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 32, 16, 12, 12, 1, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': ((-2.77437517, 0.17200669, 0.18524832), (-1.64613785, -0.19208096, 0.80434075),\n (-0.40774525, 0.26424657, -0.07952902), (-0.26203276, 2.09580334, -0.05090198),\n (-0.67096595, -0.16397552, -1.42109845), (0.89264107, -0.40136991, 0.41083574),\n (2.12441624, -0.1300863, -0.44918504), (-1.50623429, -1.27619307, 0.9524955),\n (-1.45114032, 0.18501518, 1.82167553), (-1.59654975, 2.25615634, -0.09052499),\n (-1.65730431, -0.11079255, -1.400057), (0.74870779, -1.48997779, 0.41386971),\n (1.10331691, -0.11082471, 1.44762119), (2.41262211, 0.92463409, -0.42840126),\n (1.95758158, -0.4244074, -1.48990015), (2.97418137, -0.70882619, -0.0719403))}\n xyz4 = {'symbols': ('C', 'C', 'O', 'C', 'C', 'O', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (12, 12, 16, 12, 12, 16, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': ((-1.2713687423422115, -0.7423678681688866, -0.6322577211421921),\n (-0.08008635702808505, -0.40741599130374034, 0.2550353232234618),\n (-0.5452666768773297, -0.20159898814584978, 1.588840559327411),\n (0.6158080809151276, 0.8623086771891557, -0.21553636846891006),\n (1.9196775903993375, 1.0155396004927764, 0.5174563928754532),\n (3.0067486097953653, 1.0626738453913969, -0.05177300486677717),\n (-2.012827991034863, 0.06405231524730193, -0.6138583677564631),\n (-0.9611224758801538, -0.9119047827586647, -1.6677831987437075),\n (-1.7781253059828275, -1.6433798866337939, -0.27003123559560865),\n (0.6204384954940876, -1.2502614603989448, 0.2715082028581114),\n (-1.0190238747695064, -1.007069904421531, 1.8643494196872146),\n (0.014234510343435022, 1.753076784716312, -0.005169050775340246),\n (0.827317336700949, 0.8221266348378934, -1.2893801191974432),\n (1.8498494882204641, 1.107064846374729, 1.6152311353151314))}\n xyz5 = {'symbols': ('N', 'C', 'C', 'C', 'H', 'H', 'C', 'C', 'C', 'C', 'H', 'H', 'C', 'C', 'C', 'H', 'C', 'C',\n 'N', 'H', 'H', 'C', 'H', 'C', 'C', 'C', 'H', 'H', 'H', 'H', 'C', 'C', 'C', 'H', 'H', 'H',\n 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'O', 'O', 'C', 'O', 'H', 'H', 'H'),\n 'isotopes': (14, 12, 12, 12, 1, 1, 12, 12, 12, 12, 1, 1, 12, 12, 12, 1, 12, 12, 14, 1, 1, 12, 1, 12, 12,\n 12, 1, 1, 1, 1, 12, 12, 12, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 16, 16, 12, 16, 1, 1, 1),\n 'coords': ((-0.766219, -0.248648, -0.347086), (0.667812, -0.150498, -0.496932),\n (-1.490842, 1.000959, -0.245328), (1.311194, -1.339578, -1.19388),\n (0.976451, 0.831716, -0.911173), (1.231101, -0.062221, 0.660162),\n (-1.346406, -1.400789, 0.294395), (-1.022138, 2.069095, 0.533928),\n (-2.673271, 1.125443, -1.008282), (2.575265, -0.94966, -1.974365),\n (1.534634, -2.14679, -0.467576), (0.584227, -1.791819, -1.905459),\n (-0.574689, -2.103356, 1.24726), (-2.643838, -1.861964, -0.035016),\n (-1.73741, 3.268914, 0.549347), (-0.105632, 1.96688, 1.126589),\n (-3.134563, -0.04419, -1.826788), (-3.378705, 2.332664, -0.970971),\n (3.611589, -0.28425, -1.113057), (2.30114, -0.222978, -2.774031),\n (2.969795, -1.853671, -2.489377), (-1.04268, -3.284134, 1.815898),\n (0.388329, -1.696921, 1.570938), (-3.645512, -1.174123, -0.925823),\n (-3.088386, -3.061615, 0.555145), (-2.911462, 3.400813, -0.198004),\n (-1.376219, 4.102013, 1.150524), (-3.935589, 0.254447, -2.531702),\n (-2.298405, -0.411572, -2.461402), (-4.293927, 2.444159, -1.549116),\n (4.776265, 0.123769, -1.959689), (4.064268, -1.169457, 0.001273),\n (-2.30222, -3.77607, 1.457834), (-0.433782, -3.814872, 2.545573),\n (-4.135291, -1.935447, -1.571709), (-4.453058, -0.768805, -0.272612),\n (-4.078335, -3.442593, 0.302875), (-3.465321, 4.337257, -0.179068),\n (5.500278, 0.67338, -1.336133), (5.30611, -0.707961, -2.446036),\n (4.433161, 0.821539, -2.74083), (4.954327, -0.743379, 0.488676),\n (4.300156, -2.200598, -0.295594), (3.265545, -1.194959, 0.769181),\n (-2.671885, -4.702569, 1.890597), (1.78286, 0.089948, 1.873468),\n (1.758606, 1.382484, 2.130308), (2.973471, 2.040706, 1.623336),\n (2.813335, 2.256698, 0.248083), (2.919925, 3.030613, 2.105087),\n (3.858517, 1.438684, 1.858856), (3.005024, 1.410381, -0.277159))}\n xyz6 = {'symbols': ('N', 'C', 'C', 'H', 'C', 'H', 'H', 'N', 'H', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H', 'H',\n 'H', 'H', 'O', 'O', 'H', 'C', 'H', 'H', 'O', 'H'),\n 'isotopes': (14, 12, 12, 1, 12, 1, 1, 14, 1, 12, 12, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 16, 16, 1, 12, 1, 1, 16, 1),\n 'coords': ((2.608231, -0.458895, 1.629197), (2.408715, 0.132166, 0.318653),\n (1.174426, -0.323822, -0.471554), (3.304408, -0.071078, -0.291093),\n (-0.13532, 0.016735, 0.225918), (1.210534, 0.150539, -1.46601),\n (1.221625, -1.416078, -0.631885), (-1.316045, -0.574442, -0.379686),\n (-0.086456, -0.362851, 1.260573), (-1.468231, -0.411368, -1.77232),\n (-2.505886, -0.419831, 0.432347), (-2.403425, -0.886127, -2.107496),\n (-0.621099, -0.850903, -2.320815), (-3.364172, -0.88926, -0.068909),\n (-2.767365, 0.637288, 0.628231), (-2.360065, -0.927144, 1.400068),\n (2.574849, -1.475283, 1.579253), (1.886591, -0.170591, 2.284831),\n (2.375177, 1.228181, 0.441157), (-0.231725, 1.121336, 0.301367),\n (-1.455199, 0.947478, -2.255384), (-2.58006, 1.611276, -1.811891),\n (-3.315019, 1.53868, -2.760245), (-3.713498, 1.338038, -4.025244),\n (-4.754452, 0.99077, -4.021055), (-3.584519, 2.351475, -4.444827),\n (-2.87635, 0.381401, -4.513467), (-1.966974, 0.665311, -4.338804))}\n mol1 = converter.molecules_from_xyz(xyz1)[1]\n mol2 = converter.molecules_from_xyz(xyz2)[1]\n mol3 = converter.molecules_from_xyz(xyz3)[1]\n mol4 = converter.molecules_from_xyz(xyz4)[1]\n mol5 = converter.molecules_from_xyz(xyz5)[1] # a TS\n mol6 = converter.molecules_from_xyz(xyz6)[1] # a TS\n\n # test atom modification types\n modification_type = 'atom'\n\n # test R_atom modification\n indices, new_val = [0, 1], 1.5\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((0.06385450948408691, 1.6253138441202686, 0.042870253583423557),\n (-0.02582727173313104, 0.39833637030950975, 0.9010563970736782),\n (-0.02582727173313104, -1.003336361301907, 0.3272239637891734),\n (-0.02582727173313104, -1.003336361301907, -1.0899990532469916),\n (-0.08138177769352953, 0.465646654907214, 2.0002403496097383),\n (0.865704477722866, -1.5264119285073852, 0.6825623354173815),\n (-0.9185767861007101, -1.5268489957651346, 0.6785930201570352),\n (0.14577602706217008, -0.07998849407327513, -1.367625604543457))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val, 5)\n\n indices, new_val = [1, 0], -1.5\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((0.01167393998357115, -0.5225807439329089, -0.9899595616178738),\n (-0.040525509131742084, 0.26844387347263365, -2.2633625897949208),\n (0.01167393998357115, -0.5225807439329089, 1.4216698859880004),\n (0.01167393998357115, 0.8926022581407576, 1.3456557382334218),\n (0.11202785529567173, -2.2718515121487206, 0.04691079079738447),\n (-0.8954040276884763, -0.8508241498293034, 1.9356427400340799),\n (0.8880330020652463, -0.8439168226596885, 1.990234136037933),\n (-0.13167393678263156, 1.1200467154192293, 0.4039467156910099))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), - new_val, 5)\n\n # test A_atom modification\n indices, new_val = [2, 1, 0], 140\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((0.011940763595588438, -0.90654939253321, -1.1784203714214114),\n (0.011940763595588438, -0.90654939253321, 0.05065327345758153),\n (-0.02531707366035523, 0.06629439921242253, 1.2108932996837143),\n (0.011940763595588438, 1.5283906429141458, 0.05806971900412017),\n (0.03285612994605798, -1.8458593499019589, 0.6277855724118742),\n (-0.9645745795119229, 0.3758422785924207, 1.4467600455414558),\n (0.8166299978590752, 0.37902049128771864, 1.551524925579085),\n (-0.10465928281651019, 1.2266969334608921, -0.8663115945839973))}\n\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val, 5)\n\n # test changing an angle to 180 degrees\n indices, new_val = [0, 1, 2], 180\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((-0.0019281473980474666, 1.559641181574566, 1.013927346529066),\n (-0.0019281473980474772, 0.42219553322547265, 0.548267146825631),\n (-0.0019281473980474772, -0.9794771983859442, -0.025565286458873793),\n (-0.0019281473980474772, -0.9794771983859442, -1.4427883034950388),\n (-0.05748265335844597, 0.4895058178231769, 1.6474510993616909),\n (0.8896036020579495, -1.5025527655914221, 0.32977308516933435),\n (-0.8946776617656266, -1.5029898328491718, 0.32580376990898796),\n (0.16967515139725364, -0.05612933115731222, -1.7204148547915041))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val)\n\n # test changing a 180 degree angle to something else\n indices, new_val = [0, 1, 2], 120\n expected_xyz = {'symbols': ('C', 'C', 'N', 'H', 'H', 'H'), 'isotopes': (12, 12, 14, 1, 1, 1),\n 'coords': ((0.7757362507465277, 0.4478716325630875, 0.7767867108403768),\n (-0.3207007101270898, -0.18515666614565915, 0.04582870107149262),\n (-0.3207007101270898, -0.18515666614565915, -1.1144190466784232),\n (-0.3207007101270898, 0.8374974028016162, 1.8964626512298475),\n (-1.2063452316056904, -0.6964838693490394, 1.8964625790172804),\n (0.5649437124447699, -0.6964840572534022, 1.896462566459638))}\n new_xyz = converter.modify_coords(coords=xyz2, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol2)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol2), new_val, 5)\n\n # test D_atom modification\n indices, new_val = [0, 1, 2, 3], 30\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((-0.3812553590829658, 1.4249753409811934, 0.24885596109763952),\n (0.13588307254069157, 0.47112021672976, 0.8262208968300058),\n (0.13588307254069157, -0.9305525148816568, 0.25238846354550093),\n (0.13588307254069157, -0.9305525148816568, -1.1648345534906641),\n (0.08032856658029308, 0.5384305013274643, 1.9254048493660656),\n (1.0274148219966885, -1.4536280820871348, 0.6077268351737091),\n (-0.7568664418268876, -1.4540651493448844, 0.6037575199133627),\n (0.30748637133599266, -0.007204647653024865, -1.4424611047871294))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val, 5)\n\n indices, new_val = [3, 2, 1, 0], -30\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((-0.17268751280677364, -0.941696827801256, -1.1487068217042242),\n (-0.17268751280677364, -0.941696827801256, 0.08036682317476873),\n (-0.17268751280677364, 0.3328411496875977, 0.8986107061160642),\n (0.4830966870190505, 1.3983204216355287, 0.23286144075770054),\n (-0.18773471865125574, -1.8811191078717768, 0.6574991306756568),\n (-1.0994105700891015, 0.3771264916699556, 1.4764735369276594),\n (0.6806108103574798, 0.3121359507669669, 1.5812384626874982),\n (-0.2075631130119835, 1.1944491200970329, -0.8365980489813365))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1),\n 360 + new_val, 5)\n\n indices, new_val = [0, 1, 2, 3], -30\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((0.37739906428687087, 1.4249753409811934, 0.24885596109763952),\n (-0.13973936733678652, 0.47112021672976, 0.8262208968300058),\n (-0.13973936733678652, -0.9305525148816568, 0.25238846354550093),\n (-0.13973936733678652, -0.9305525148816568, -1.1648345534906641),\n (-0.195293873297185, 0.5384305013274643, 1.9254048493660656),\n (0.7517923821192105, -1.4536280820871348, 0.6077268351737091),\n (-1.0324888817043656, -1.4540651493448844, 0.6037575199133627),\n (0.0318639314585146, -0.007204647653024865, -1.4424611047871294))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1),\n 360 + new_val, 5)\n\n # test group modification types\n modification_type = 'group'\n\n # test R_group modification\n indices, new_val = [0, 1], 1.5\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((0.06385450815440741, 1.625313844153823, 0.04287025350146201),\n (-0.02582727144301671, 0.39833637029935165, 0.9010563970984908),\n (-0.02582727144301671, -1.0033363613120652, 0.327223963813986),\n (-0.02582727144301671, -1.0033363613120652, -1.089999053222179),\n (-0.0813817733100206, 0.4656466548101805, 2.0002403498467567),\n (0.8657044801882787, -1.5264119271233758, 0.6825623320367284),\n (-0.9185767836497759, -1.5268489971713646, 0.6785930235919653),\n (0.1457760273522844, -0.07998849408343323, -1.3676256045186443))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val, 5)\n\n # test A_group modification\n indices, new_val = [0, 1, 2], 160\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((0.01997925208754263, 1.625852603711386, 0.708691800251658),\n (-0.009887200766722545, 0.3981406366172051, 0.6591605436173553),\n (-0.009887200766722545, -1.0035320949942117, 0.08532811033285048),\n (-0.009887200766722545, -1.0035320949942117, -1.3318949067033146),\n (-0.06544170263372645, 0.465450921128034, 1.7583444963656214),\n (0.8816445508645728, -1.5266076608055221, 0.44066647855559316),\n (-0.9026367129734817, -1.5270447308535111, 0.4366971701108293),\n (0.16171609802857856, -0.08018422776557976, -1.6095214579997799))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val, 5)\n\n indices, new_val = [1, 2, 5], 160\n expected_xyz = {'symbols': ('O', 'C', 'C', 'S', 'O', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 32, 16, 12, 12, 1, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': ((-0.45549818019466204, 1.8548729964273216, 0.8440028131622062),\n (-0.2667929723517851, 0.6671106629415136, 1.42912314652022),\n (-0.2163066356464933, -0.45426196440936106, 0.30526758056697156),\n (1.3109140692843337, 0.4741705899686004, -0.12165329723035323),\n (-1.3557392716759613, 0.27771606050413156, -0.16203238949855803),\n (-0.2163066356464933, -1.8492005047245035, -0.34944907261899716),\n (-0.2163066356464933, -1.8492005047245035, -1.87604687202156),\n (-1.0601386155429, 0.3401156691690679, 2.122303234960202),\n (0.6302934527577109, 0.5164940342603479, 2.051815682570846),\n (1.143418340718557, 1.3271327629309078, 0.9043191341647172),\n (-1.5046641822171405, 0.8405156651772538, 0.6362234563562041),\n (-1.1248176985937233, -2.3816433802478305, -0.03815279071754074),\n (0.6330922017716909, -2.4415422695908298, 0.013011559357363423),\n (0.707681641272436, -1.4302805756837962, -2.2843133571390752),\n (-1.061876978104781, -1.2808214124615414, -2.27542464397285),\n (-0.30131566361820894, -2.876339919190297, -2.2463334380185054))}\n new_xyz = converter.modify_coords(coords=xyz3, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol3)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol3), new_val, 5)\n\n indices, new_val = [5, 2, 1], 160\n expected_xyz = {'symbols': ('O', 'C', 'C', 'S', 'O', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 32, 16, 12, 12, 1, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': ((0.2917048572251579, -1.5727341554069034, -1.3423072397835754),\n (0.2917048572251579, -1.5727341554069034, -0.0048638500194817524),\n (0.2917048572251579, -0.06886266257406626, 0.5064553318371674),\n (-1.363795569744117, -0.1202634403830567, -0.28936363114537844),\n (1.2964570556359054, 0.04149003667864859, -0.508809719558267),\n (0.4099139249017979, 1.1367441270166645, 1.4588451220109844),\n (0.29481769872300884, 2.504661621457458, 0.7909713103796479),\n (1.1685736645928884, -2.0373473546555556, 0.47685945259484286),\n (-0.5312728539867155, -2.0767912763680947, 0.5278926826114716),\n (-1.2231052441089643, -1.4156454828005882, -0.6216441060907665),\n (1.4364524039686508, -0.9213654475865127, -0.6804052856633311),\n (1.3966722481626304, 1.107137467791805, 1.9397033126698722),\n (-0.33241474313836356, 1.0625526837349102, 2.2633130452338497),\n (-0.7009351031697479, 2.671307058557274, 0.3706911401148234),\n (1.0334518240640673, 2.6225101662569066, -0.007826505507309234),\n (0.474437928409419, 3.293432289151483, 1.52916604039102))}\n new_xyz = converter.modify_coords(coords=xyz3, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol3)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol3), new_val, 4)\n\n # test D_group modification\n indices, new_val = [0, 1, 2, 3], 98.7\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((-0.751853407099498, 1.1325746654576616, 0.9630889493590222),\n (0.2705229494881336, 0.5773506493576217, 0.5667369568416694),\n (0.2705229494881336, -0.8243220822537951, -0.00709547644283548),\n (0.2705229494881336, -0.8243220822537951, -1.4243184934790005),\n (0.21496844352773511, 0.644660933955326, 1.6659209093777292),\n (1.1620546989441305, -1.347397649459273, 0.34824289518537266),\n (-0.6222265648794455, -1.3478347167170226, 0.3442735799250263),\n (0.4421262482834347, 0.09902578497483683, -1.7019450447754658))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val, 5)\n\n indices, new_val = [5, 2, 1, 0], 180\n expected_xyz = {'symbols': ('O', 'C', 'C', 'S', 'O', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 32, 16, 12, 12, 1, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': ((0.3034340517195509, -1.6113639549493641, -1.7901391417129255),\n (0.3034340517195509, -1.6113639549493641, -0.45269575194883194),\n (0.3034340517195509, -0.10749246211652697, 0.058623429907817215),\n (-1.3193844356755215, 0.6746571866866746, -0.30380395501671575),\n (1.3282593544657135, 0.581298860926198, -0.6678526090506967),\n (0.30343405171955073, -0.05040119820033895, 1.5985091447581203),\n (0.26233878444784786, 1.3540223173114139, 2.1955071424316666),\n (1.1803028491569083, -2.0759771588261957, 0.029027564277707585),\n (-0.5195436704231056, -2.115421071566818, 0.08006076790649397),\n (-1.414911803320983, 0.05150877481380545, -1.4915662613668217),\n (1.2907872270567131, 0.05736052141866721, -1.5046434284929022),\n (1.2266505257705096, -0.5178979180455376, 1.965811882691859),\n (-0.5283478351927398, -0.6406189828710822, 2.0028687871657294),\n (-0.6775241224477067, 1.8658969637383576, 1.9706253328328829),\n (1.0896028263747624, 1.9687229189733981, 1.8276430689661958),\n (0.35031987670665765, 1.2957313570336282, 3.285560142931404))}\n new_xyz = converter.modify_coords(coords=xyz3, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol3)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol3), new_val, 5)\n\n # test groups modification types\n modification_type = 'groups'\n\n # test D_groups modification\n indices, new_val = [0, 1, 2, 3], 98.7\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((-0.7692326765134374, 1.1252152574374596, 0.9810655314575423),\n (0.25314357064244697, 0.5699912505374165, 0.5847135445433043),\n (0.25314357064244697, -0.8316815836112654, 0.010881153979294123),\n (0.25314357064244697, -0.8316815836112654, -1.4063419471715688),\n (1.2326181278103254, 1.0755945976230115, 0.6133000157238186),\n (1.1446752957640132, -1.3547571699433192, 0.3662195585064876),\n (-0.6396059141384572, -1.3551941756763426, 0.3622501790547312),\n (0.4247468609767439, 0.09166629658280878, -1.6839684605765641))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val, 5)\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=xyz1, indices=[4, 1, 2, 3], mol=mol1),\n 176.7937925, 5)\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=[4, 1, 2, 3], mol=mol1),\n 279.5679938, 5)\n\n indices, new_val = [5, 2, 1, 0], 100\n expected_xyz = {'symbols': ('O', 'C', 'C', 'S', 'O', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 32, 16, 12, 12, 1, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': ((0.17617288317697363, -1.4263876505749937, -1.3907356765118228),\n (0.17617288317697363, -1.4263876505749937, -0.05329233131383648),\n (0.17617288317697363, 0.07748361087633482, 0.4580268316508156),\n (0.8541264407563205, 1.1799297944814306, -0.8464435250524343),\n (1.0315484892431994, 0.12891222316318918, 1.606136465715537),\n (-1.2415001838455297, 0.5175023395992786, 0.8716616732793354),\n (-2.371148423802697, -0.377635430276555, 0.3685473045279144),\n (1.0530416597996317, -1.8910009834245878, 0.42843102214143425),\n (-0.646804798256715, -1.930444842122042, 0.47946418053365614),\n (1.322524386187, 0.1392850561843193, -1.55769653865906),\n (1.5807657244329665, 0.9071634481807671, 1.3438012611373469),\n (-1.4308626545937098, 1.5181627982792263, 0.46103575662853813),\n (-1.3101730016766409, 0.6090291604729325, 1.9628224613881304),\n (-2.328405219901557, -1.376683205512397, 0.811273322532136),\n (-2.345556604764221, -0.47877786163003033, -0.7207928024513892),\n (-3.3382397150969996, 0.059047399283163715, 0.6394658008190603))}\n new_xyz = converter.modify_coords(coords=xyz3, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol3)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol3), new_val, 5)\n\n indices, new_val = [4, 3, 1, 0], 236.02\n expected_xyz = {'symbols': ('C', 'C', 'O', 'C', 'C', 'O', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (12, 12, 16, 12, 12, 16, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': ((-0.3420713780282814, -0.726846939196746, -1.8608060734620697),\n (-0.3420713780282814, -0.726846939196746, -0.33809952744080163),\n (-1.5199121786498575, -1.3903247017047589, 0.12046140490433599),\n (-0.3420713780282814, 0.692986716189357, 0.21142750813209843),\n (0.8346249371329908, 0.870417947793265, 1.130523629422891),\n (1.8415843350511496, 1.49899165752528, 0.8160475329621943),\n (-1.232802341934429, -0.22348356564525385, -2.2527724067647172),\n (0.5474409007790566, -0.2291658204558631, -2.2587884226234842),\n (-0.36650899336409903, -1.7525658745827613, -2.2443893713107435),\n (0.5235538883628821, -1.286773819894118, 0.03414982827280788),\n (-1.525486055520759, -2.2842579938670644, -0.2668197974505191),\n (-1.246930807816442, 0.9000033565709169, 0.7927934676101465),\n (-0.26242043164905693, 1.4290013064896112, -0.5956842516835208),\n (0.739203033547077, 0.4163114365921572, 2.132044487804084))}\n new_xyz = converter.modify_coords(coords=xyz4, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol4)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol4), new_val, 5)\n\n # test 1-indexed input\n indices = [5, 4, 2, 1]\n new_xyz = converter.modify_coords(coords=xyz4, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol4, index=1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol4, index=1),\n new_val, 5)\n\n # test TSs\n indices = [19, 10, 4, 2]\n fragments = [[46, 47, 48, 49, 50, 51, 52], [f + 1 for f in range(45)]]\n self.assertAlmostEqual(calculate_dihedral_angle(coords=xyz5, torsion=indices, index=1), 56.83358841, 3)\n new_xyz = converter.modify_coords(coords=xyz5,\n indices=indices,\n new_value=300,\n modification_type='groups',\n mol=mol5,\n index=1,\n fragments=fragments,\n )\n self.assertAlmostEqual(calculate_dihedral_angle(coords=new_xyz, torsion=indices, index=1), 300, places=3)\n\n indices = [1, 2, 3, 5]\n fragments = [[f + 1 for f in range(23)], [24, 25, 26, 27, 28]]\n self.assertAlmostEqual(calculate_dihedral_angle(coords=xyz6, torsion=indices, index=1), 62.30597206, 3)\n new_xyz = converter.modify_coords(coords=xyz6,\n indices=indices,\n new_value=200,\n modification_type='groups',\n mol=mol6,\n index=1,\n fragments=fragments,\n )\n self.assertAlmostEqual(calculate_dihedral_angle(coords=new_xyz, torsion=indices, index=1), 200, places=3)\n \n coords={'coords': ((-0.7862825353221515, -0.28824023055636216, 0.4782944637692894),\n (0.21968869054702736, 0.40094256193652866, -0.2919820499085219),\n (-0.07796443595084417, 0.5692847962524797, -1.6621913220858304),\n (-1.102200211589376, -1.1132157833188596, -0.01879031191901484),\n (-1.5973749070505925, 0.29546848172306867, 0.6474145668621136),\n (0.4237940503863438, 1.3660724867336205, 0.19101403432872205),\n (1.1352054736534014, -0.1980893380251006, -0.2652264470061931),\n (-0.7497944593402266, 1.258221857416732, -1.7507029654486272)),\n 'isotopes': (14, 12, 16, 1, 1, 1, 1, 1),\n 'symbols': ('N', 'C', 'O', 'H', 'H', 'H', 'H', 'H')}\n indices=[3, 0, 1, 2]\n new_value=53.76\n modification_type=\"groups\"\n mol=Molecule(smiles=\"NCO\")\n new_xyz = converter.modify_coords(coords=coords,\n indices=indices,\n new_value=new_value,\n modification_type=modification_type,\n mol=mol)\n self.assertTrue(type(new_xyz[\"coords\"][0][0] is float))", "def get_kml_dict(self,name,filename):\n\n lon1,lon2,lat1,lat2=self.get_bounds()\n d={'lat1':lat1,'lat2':lat2,'lon1':lon1,'lon2':lon2, \\\n 'name':name,'filename':filename,'time':self.get_time()}\n return d", "def xyzarray2frac(x, y, z, latmat):\n length = min([len(x), len(y), len(z)])\n abc = np.empty((length, 3))\n abc[:] = np.nan\n for i in range(length):\n kart = kart2frac([x[i], y[i], z[i]], latmat)\n abc[i][0] = kart[0]\n abc[i][1] = kart[1]\n abc[i][2] = kart[2]\n return abc", "def get_fragment_data(self, fragments=None, refresh_cache=False):\n\n # If no fragments explicitly shown, grab all\n if fragments is None:\n fragments = self.data.fragments.keys()\n\n # Figure out the lookup\n lookup = []\n for frag in fragments:\n lookup.extend(list(self.data.fragments[frag].values()))\n\n if refresh_cache is False:\n lookup = list(set(lookup) - self._torsiondrive_cache.keys())\n\n # Grab the data and update cache\n data = self.client.query_procedures(id=lookup)\n self._torsiondrive_cache.update({x.id: x for x in data})", "def build_geometry(node: md.Document) -> dict:\n geoms = []\n times = []\n if get1(node, \"MultiGeometry\"):\n return build_geometry(get1(node, \"MultiGeometry\"))\n if get1(node, \"MultiTrack\"):\n return build_geometry(get1(node, \"MultiTrack\"))\n if get1(node, \"gx:MultiTrack\"):\n return build_geometry(get1(node, \"gx:MultiTrack\"))\n for geotype in GEOTYPES:\n geonodes = get(node, geotype)\n if not geonodes:\n continue\n for geonode in geonodes:\n if geotype == \"Point\":\n geoms.append(\n {\n \"type\": \"Point\",\n \"coordinates\": coords1(val(get1(geonode, \"coordinates\"))),\n }\n )\n elif geotype == \"LineString\":\n geoms.append(\n {\n \"type\": \"LineString\",\n \"coordinates\": coords(val(get1(geonode, \"coordinates\"))),\n }\n )\n elif geotype == \"Polygon\":\n rings = get(geonode, \"LinearRing\")\n coordinates = [coords(val(get1(ring, \"coordinates\"))) for ring in rings]\n geoms.append(\n {\n \"type\": \"Polygon\",\n \"coordinates\": coordinates,\n }\n )\n elif geotype in [\"Track\", \"gx:Track\"]:\n track = gx_coords(geonode)\n geoms.append(\n {\n \"type\": \"LineString\",\n \"coordinates\": track[\"coordinates\"],\n }\n )\n if track[\"times\"]:\n times.append(track[\"times\"])\n\n return {\"geoms\": geoms, \"times\": times}", "def extract_data_trans_info(lines, PE_dims):\n data_trans_info = {}\n for line_id in range(len(lines)):\n line = lines[line_id]\n if line.find('read_channel_intel') != -1:\n # Check the start and end of the block\n block_start, block_end = locate_data_trans_block(line_id, lines) \n block_lines = lines[block_start : block_end + 1]\n # Parse the data type\n block_line = block_lines[1]\n data_type = block_line.strip().split(' ')[0]\n #print(data_type)\n # Parse the start PE index\n block_line = block_lines[2]\n m = re.search(r'\\((.+?)\\)', block_line)\n fifo_name = m.group(1)\n PE_index_start = fifo_name.split('_')[-len(PE_dims):]\n PE_index_start = [int(s) for s in PE_index_start]\n #print(PE_index_start)\n # Parse the IO group name\n group_name = fifo_name.split('_')[1]\n #print(group_name)\n data_trans_info[group_name] = {\\\n 'in_block_lines': block_lines, 'in_block_pos': [block_start, block_end], \\\n 'PE_index_start': PE_index_start, 'data_type': data_type}\n if line.find('write_channel_intel') != -1:\n m = re.search(r'\\((.+?)\\)', line)\n fifo_name = m.group(1).split(',')[0]\n group_name = fifo_name.split('_')[1]\n if group_name in data_trans_info: \n # Check the start and end of the block\n block_start, block_end = locate_data_trans_block(line_id, lines)\n block_lines = lines[block_start : block_end + 1]\n # Parse the end PE index\n block_line = block_lines[3]\n m = re.search(r'\\((.+?)\\)', block_line)\n fifo_name = m.group(1).split(',')[0]\n PE_index_end = fifo_name.split('_')[-len(PE_dims):]\n PE_index_end = [int(s) for s in PE_index_end]\n #print(PE_index_end)\n group_name = fifo_name.split('_')[1]\n data_trans_info[group_name]['PE_index_end'] = PE_index_end\n data_trans_info[group_name]['out_block_lines'] = block_lines\n data_trans_info[group_name]['out_block_pos'] = [block_start, block_end]\n\n return data_trans_info", "def new_values(basenames):\n result = {}\n for basename in basenames:\n home = os.environ['HOME']\n p = os.path.join(home, basename)\n if not os.path.isfile(p):\n continue\n size = '%d' % p.size\n mtime = '%0.8f' % p.mtime\n result[basename] = Signature(mtime, size, text_digest(p.text()))\n return pad_keys(result, basenames)", "def map_part(data):\n dmt_data = {}\n for entry in data:\n working_part = Part(_map_part_base(entry))\n working_part.update(_map_part_plnt(entry))\n working_part.update(_map_part_revn(entry))\n\n dmt_data[working_part.PartNum] = working_part\n\n return dmt_data", "def path_to_ufsr_dict(input_path, start_radius=3, max_radius=10, number_min=4, number_of_moments=3):\n parser = Bio.PDB.FastMMCIFParser(QUIET=True)\n name = input_path[-8:-4]\n structure = parser.get_structure(name, input_path)\n ligands = find_ligands_residue(structure)\n output = {}\n for ligand in ligands:\n # get the corresponding array and make it a USFR vector\n binding_pocket = find_spatial_binding_pocket(structure, ligand, start_radius, max_radius, number_min)\n if not binding_pocket:\n continue\n arr = pocket_to_array(binding_pocket)\n usfr_vector = ufsr.encode(arr, number_of_moments=number_of_moments)\n\n # check for duplicate pockets\n duplicate = False\n for key in output.keys():\n if key.get_resname() == ligand.get_resname():\n if np.array_equal(output[key], usfr_vector):\n duplicate = True\n break\n if not duplicate:\n output[ligand] = usfr_vector\n return output", "def firstpass(data, pbc, symbols, units):\n # Get units information\n units_dict = style.unit(units)\n \n # Initialize parameter values\n atomsstart = None\n velocitiesstart = None\n natoms = None\n natypes = None\n firstatoms = False\n atomscolumns = 0\n masses = None\n num_masses_to_read = 0\n xlo = xhi = ylo = yhi = zlo = zhi = None\n xy = 0.0\n xz = 0.0\n yz = 0.0\n i = 0\n \n # Read str and files in the same way\n with uber_open_rmode(data) as fp:\n \n # Loop over all lines in fp\n for i, fullline in enumerate(fp):\n try:\n fullline = fullline.decode('UTF-8')\n except:\n pass\n \n # Remove comments after '#'\n try:\n comment_index = fullline.index('#')\n except:\n line = fullline\n else:\n line = fullline[:comment_index]\n \n terms = line.split()\n\n # Skip blank lines\n if len(terms)>0:\n \n # Read number of atoms \n if len(terms) == 2 and terms[1] == 'atoms':\n natoms = int(terms[0])\n\n # Read number of atom types\n elif len(terms) == 3 and terms[1] == 'atom' and terms[2] == 'types': \n natypes = int(terms[0])\n \n # Read boundary info\n elif len(terms) == 4 and terms[2] == 'xlo' and terms[3] == 'xhi':\n xlo = uc.set_in_units(float(terms[0]), units_dict['length'])\n xhi = uc.set_in_units(float(terms[1]), units_dict['length'])\n\n elif len(terms) == 4 and terms[2] == 'ylo' and terms[3] == 'yhi':\n ylo = uc.set_in_units(float(terms[0]), units_dict['length'])\n yhi = uc.set_in_units(float(terms[1]), units_dict['length'])\n\n elif len(terms) == 4 and terms[2] == 'zlo' and terms[3] == 'zhi':\n zlo = uc.set_in_units(float(terms[0]), units_dict['length'])\n zhi = uc.set_in_units(float(terms[1]), units_dict['length'])\n\n elif len(terms) == 6 and terms[3] == 'xy' and terms[4] == 'xz' and terms[5] == 'yz':\n xy = uc.set_in_units(float(terms[0]), units_dict['length'])\n xz = uc.set_in_units(float(terms[1]), units_dict['length'])\n yz = uc.set_in_units(float(terms[2]), units_dict['length'])\n \n # Identify starting line number for Atoms data\n elif len(terms) == 1 and terms[0] == 'Atoms':\n atomsstart = i + 1\n firstatoms = True\n\n # Check for atom_style comment\n try: \n comment_index = fullline.index('#')\n except:\n atom_style = None\n else:\n atom_style = fullline[comment_index + 1:].strip()\n \n # Count number of columns in Atoms table\n elif firstatoms:\n atomscolumns = len(terms)\n firstatoms = False\n \n # Identify starting line for Masses data\n elif len(terms) == 1 and terms[0] == 'Masses':\n if natypes is None:\n raise FileFormatError('# atom types must appear before Masses list')\n masses = [None for i in range(natypes)]\n num_masses_to_read = natypes\n \n # Read masses\n elif num_masses_to_read > 0:\n read_mass(terms, masses)\n num_masses_to_read -= 1\n\n # Identify starting line number for Velocity data\n elif len(terms) == 1 and terms[0] == 'Velocities':\n velocitiesstart = i + 1\n \n if i == 0:\n raise FileNotFoundError(f'File {data} not found')\n\n if natoms is None:\n raise FileFormatError('# atoms not found')\n\n if xlo is None or xhi is None:\n raise FileFormatError('xlo, xhi box dimensions missing')\n\n if ylo is None or yhi is None:\n raise FileFormatError('ylo, yhi box dimensions missing')\n\n if zlo is None or zhi is None:\n raise FileFormatError('zlo, zhi box dimensions missing')\n\n if atomsstart is None:\n raise FileFormatError('Atoms section missing')\n\n # Create system with natoms\n box = Box(xlo=xlo, xhi=xhi,\n ylo=ylo, yhi=yhi,\n zlo=zlo, zhi=zhi,\n xy=xy, xz=xz, yz=yz)\n atoms = Atoms(natoms=natoms)\n system = System(box=box, atoms=atoms, pbc=pbc, symbols=symbols,\n masses=masses)\n\n # Compile dict of params\n params = {}\n params['atomsstart'] = atomsstart\n params['velocitiesstart'] = velocitiesstart\n params['atomscolumns'] = atomscolumns\n params['atom_style'] = atom_style\n\n return system, params", "def calc_plaquette_map(self, plaquettes, include_3_body=True):\n if not include_3_body:\n return super().calc_plaquette_map(plaquettes)\n \n # sort in descending total plaquette size\n plqs = sorted(plaquettes, key=lambda p: (-p[1][0] * p[1][1], p))\n \n mapping = dict()\n for p in plqs:\n sites = qtn.tensor_2d.plaquette_to_sites(p)\n\n # pairwise (2-local) interactions\n for coo_pair in combinations(sites, 2):\n if all(tuple(starmap(self.is_qubit_coo, coo_pair))):\n mapping[coo_pair] = p\n\n # 3-local interactions\n for coo_triple in combinations(sites, 3):\n if all(tuple(starmap(self.is_qubit_coo, coo_triple))):\n # make sure face qubit is the third entry\n if self.is_face_coo(*coo_triple[0]):\n coo_triple = (coo_triple[2], coo_triple[1], coo_triple[0])\n\n elif self.is_face_coo(*coo_triple[1]):\n coo_triple = (coo_triple[0], coo_triple[2], coo_triple[1])\n \n mapping[coo_triple] = p\n \n return mapping", "def test_mapping_file_to_dict(self):\r\n s1 = ['#sample\\ta\\tb', '#comment line to skip',\r\n 'x \\t y \\t z ', ' ', '#more skip', 'i\\tj\\tk']\r\n exp = ([['x', 'y', 'z'], ['i', 'j', 'k']],\r\n ['sample', 'a', 'b'],\r\n ['comment line to skip', 'more skip'])\r\n mapres = parse_mapping_file(s1) # map_data, header, comments\r\n mapdict = mapping_file_to_dict(*mapres[:2])\r\n expdict = {'x': {'a': 'y', 'b': 'z'}, 'i': {'a': 'j', 'b': 'k'}}\r\n self.assertEqual(mapdict, expdict)", "def read_texture_file(filename):\n \n # Deal with compressed files.\n import os\n if (os.path.splitext(filename)[1] == '.gz'):\n import gzip\n f = gzip.open(filename, 'rb')\n else:\n f = open(filename, 'r')\n\n # Stuff everything into a dict and a list\n # for now. Sort this out later (we will probably \n # want to have objects at some point\n header_data = {}\n particles = []\n\n header_lines = 5\n particle_header_lines = 9\n \n for line in f:\n if header_lines == 5:\n header_data['theia_lun'] = int(line)\n header_lines = header_lines - 1\n elif header_lines == 4:\n header_data['npartsallo'] = int(line)\n header_lines = header_lines - 1\n elif header_lines == 3:\n header_data['npartsused'] = int(line)\n header_lines = header_lines - 1\n elif header_lines == 2:\n header_data['n_expected_particles'] = int(line)\n header_lines = header_lines - 1\n elif header_lines == 1:\n header_data['nseen_particles'] = int(line)\n header_lines = header_lines - 1\n elif header_lines == 0:\n if particle_header_lines == 9:\n this_particle = {}\n this_particle['process_id'] = int(line)\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 8:\n this_particle['particle_id'] = int(line)\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 7:\n this_particle['old_particle_id'] = int(line)\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 6:\n this_particle['old_process_id'] = int(line)\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 5:\n this_particle['particle_class'] = line.strip()\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 4:\n this_particle['particle_position'] = np.array(\n [line[0:12], line[12:24], line[24:36]])\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 3:\n this_particle['idata_count'] = int(line)\n if this_particle['idata_count'] > 0:\n particle_header_lines = particle_header_lines - 1\n else:\n particle_header_lines = particle_header_lines - 2\n elif particle_header_lines == 2:\n this_particle['particle_idata'] = np.array(\n [line.rstrip('\\r\\n')[i:i+12] for i in xrange(0, len(line.rstrip('\\r\\n')), 12)]\n )\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 1:\n this_particle['rdata_count'] = int(line)\n if this_particle['rdata_count'] > 0:\n particle_header_lines = particle_header_lines - 1\n else:\n particles.append(this_particle)\n particle_header_lines = 9\n elif particle_header_lines == 0:\n this_particle['particle_rdata'] = np.array(\n [line.rstrip('\\r\\n')[i:i+14] for i in xrange(0, len(line.rstrip('\\r\\n')), 14)]\n )\n particles.append(this_particle)\n particle_header_lines = 9\n f.close()\n\n return header_data, particles", "def prepare_fastq(Fastq_Root=\"2.Fastq/\", ):\n fastqs = glob.glob(Fastq_Root + \"*.fastq\")\n data = {}\n for fq in fastqs:\n s = os.path.split(fq)[1]\n s = s.replace(\".fastq\", \"\")\n if s.endswith(\"_1\"):\n sample = s.replace(\"_1\", \"\")\n if sample not in data:\n data[sample] = [0, 0]\n data[sample][0] = fq\n if s.endswith(\"_2\"):\n sample = s.replace(\"_2\", \"\")\n if sample not in data:\n data[sample] = [0, 0]\n data[sample][1] = fq\n if not s.endswith(\"_1\") and not s.endswith(\"_2\"):\n data[s] = [fq]\n return data", "def convert_coord_data_to_dict(data):\r\n coord_header = data['coord'][0]\r\n coords = data['coord'][1]\r\n pct_var = data['coord'][3]\r\n coords_dict = {}\r\n pct_var_dict = {}\r\n coords_dict['pc vector number'] = coord_header\r\n for x in range(len(coords)):\r\n coords_dict[str(x + 1)] = coords[0:, x]\r\n pct_var_dict[str(x + 1)] = pct_var[x]\r\n\r\n return coords_dict, pct_var_dict", "def parse_blocks(fblocks):\n print('Parse blocks: ', end='')\n result = []\n\n for line in fblocks:\n stripped = line.strip()\n if len(stripped) > 0 and stripped[0] != '#':\n match = re.match(r\"([0-9A-F]+)\\.{2}([0-9A-F]+);\\s+(.+)\", stripped)\n result.append({\n 'begin': int(match.group(1), 16),\n 'end': int(match.group(2), 16),\n 'name': match.group(3)\n })\n\n print('done')\n return result", "def load_data(self, f):\n D = {}\n P = {}\n v = 1\n with open(f) as fp:\n lines = fp.read().split(\"\\n\")\n for line in lines[1:]:\n if(len(line.strip()) > 0):\n parts = line.split(\" \")\n P[v] = (decimal.Decimal(parts[0]), decimal.Decimal(parts[1]))\n v += 1\n\n\n for p in P:\n D[p] = {}\n p1 = P[p]\n for d in P:\n #if d == p:\n # continue\n #else:\n p2 = P[d]\n D[p][d] = math.sqrt(math.pow(p1[0] - p2[0], 2) + math.pow(p1[1] - p2[1], 2))\n\n return P, D", "def readProcessedFCD():\n procFcdDict = {}\n pqDateDict = {} # each date is a period / quota tupel assigned\n simDate = '2007-07-18 '\n day = 0\n # create keys for the procFcdDict\n for p in period:\n for q in quota:\n day += 86400\n date, time = calcTime.getDateFromDepart(day).split(\" \")\n pqDateDict.setdefault(date, (p, q))\n procFcdDict.setdefault((p, q), {})\n # print date,p,q\n\n inputFile = open(path.FQprocessedFCD, 'r')\n for line in inputFile:\n timestamp, edge, speed, cover, id = line.split('\\t')\n date, time = calcTime.getNiceTimeLabel(timestamp).split(\" \")\n # add values to actual Dict\n timestep = calcTime.getTimeInSecs(simDate + time)\n procFcdDict[pqDateDict[date]].setdefault(\n timestep, []).append((id, edge, float(speed) / 3.6))\n inputFile.close()\n\n return procFcdDict", "def pairsParser(seqBlock,names):\n for name in names:\n seq = []\n sIndx = [] #start index, where in the line the sequence start\n struct = [] #structure lines\n record = False\n for line in seqBlock:\n if line.startswith(name+' '):\n tmp = line.split()\n #if seq length is shorter then 80 for one seq and longer\n #for another seq the following block will be empty for the\n #shorter sequence. this if statement protects against that\n if len(tmp) == 4: \n try:\n seq.append(tmp[2])#[name,start nr,seq,end nr]\n except:\n print 'LINE',line\n print 'BLOCK', seqBlock\n sIndx.append(index(line,tmp[2])) \n record = True\n else:\n continue\n else:\n if record:\n record = False\n struct.append(line)\n\n###############################################################################\n# Construction of the full sequence and structure and then mapping each letter\n#in structure to a position\n\n Fseq = '' #full sequence\n Fstruct = '' #full structure\n for i in range(len(seq)):\n # slice out corresponding structure to sequence\n #so you can get the same index for structure and sequence\n tmpStruct = struct[i][sIndx[i]:(sIndx[i]+len(seq[i]))]\n Fseq = ''.join([Fseq,seq[i]])\n Fstruct = ''.join([Fstruct,tmpStruct])\n #Applies a position to every letter in structure sequence \n letterPos = zip(range(len(Fseq)),Fstruct)\n \n###############################################################################\n#Cunstruction of dictionary for where every letter in structure has a list of\n#positions corresponding to that of that letter in respect to the sequence\n\n alphabet = {}\n for pos, letter in letterPos:\n indices = []\n #if the dict contains the letter you want to add to that list\n if alphabet.__contains__(letter): \n indices = alphabet[letter]\n indices.append(pos)\n alphabet[letter] = indices\n #else you want to create a new list for that letter\n elif not letter==' ':\n indices.append(pos)\n alphabet[letter] = indices\n \n###############################################################################\n#Each list in alphabet needs to be split in two,\n#oL and cL (open and close list), to be able to fold the positions into pairs\n\n pairs = []\n for value in alphabet.values():\n middle = len(value)/2\n oL = value[:middle]\n cL = value[middle:]\n #pairs are created by making a tuple of the first in oL to\n #the last in cl, second in oL to second last in cL and so on\n pairs.extend(zip(oL,cL.__reversed__()))\n\n yield Pairs(pairs),Fseq", "def get_points(geo_file_path):\n points = dict()\n point_idx = 1\n section_points = read_section(geo_file_path, SECTION_MARKER_POINTS)\n\n for line in section_points:\n split = line.split(' ')\n if len(split) == 3:\n x, y, _ = split\n points[str(point_idx)] = float(x), float(y)\n point_idx += 1\n\n return points", "def get_identity(quast_report_paths: List[str]) -> dict:\n\n identity_dict = {}\n\n for report_path in quast_report_paths:\n # extract sample name\n sample = path.dirname(report_path).split(\"/\")[-1]\n\n # load report\n report_df = pd.read_csv(\n report_path, delimiter=\"\\t\", index_col=0, squeeze=True, names=[\"value\"]\n )\n\n # select genome fraction (%)\n try:\n fraction = float(report_df.at[\"Genome fraction (%)\"]) / 100\n except:\n # no \"Genome fraction (%)\" in quast report. Case for not assemblable samples\n fraction = 0.0\n\n # store in dict\n identity_dict[sample] = fraction\n\n return identity_dict" ]
[ "0.5323081", "0.511978", "0.50651133", "0.50311476", "0.4981396", "0.4981396", "0.49392763", "0.49328926", "0.49008498", "0.48904055", "0.48638704", "0.48584062", "0.48568657", "0.48379332", "0.4833694", "0.48149553", "0.47972798", "0.47770527", "0.47520956", "0.47463682", "0.47423008", "0.47419065", "0.47095552", "0.46859553", "0.46523467", "0.46411744", "0.46396038", "0.46207905", "0.4617517", "0.461293", "0.46090382", "0.4607012", "0.45967013", "0.4578896", "0.45727435", "0.45705587", "0.4565178", "0.4555231", "0.45281056", "0.45062232", "0.44992757", "0.44890362", "0.4488338", "0.44761154", "0.4473686", "0.4470011", "0.44633895", "0.4458204", "0.44521883", "0.44506887", "0.44443575", "0.44383496", "0.44366542", "0.44358853", "0.44307867", "0.4427444", "0.44255856", "0.44251975", "0.44212127", "0.44200304", "0.4418507", "0.44179273", "0.44173932", "0.44169673", "0.44163227", "0.44140217", "0.44115022", "0.44102022", "0.44050506", "0.4402967", "0.44028652", "0.4393469", "0.4391765", "0.43841028", "0.43789953", "0.43777192", "0.43725517", "0.43555415", "0.43550915", "0.43497545", "0.4349345", "0.43483192", "0.4343006", "0.43423128", "0.43422678", "0.43334997", "0.4332755", "0.43316036", "0.43282503", "0.4325781", "0.4322893", "0.43225646", "0.43214536", "0.43204916", "0.43192595", "0.43184182", "0.4316968", "0.43103027", "0.43076473", "0.4306913" ]
0.7859618
0
Returns the compound name and the cell parameters from a xd.mas style file specified by 'path'.
def read_xd_master_file(path, errorpointer): filepointer = open(path, 'r') for line in filepointer.readlines(): if 'TITLE' in line: compound_name = line.partition('!')[2].lstrip().rstrip() if 'CELL' in line: cell = [float(i) for i in line.split(" ") if '.' in i] break filepointer.close() try: return compound_name, cell except: errorpointer.write(path + '\n') return None, None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_compound_properties(path):\n filepointer = open(path)\n charge = None\n NE = None\n E_HF = None\n dipole = None\n read_dipole = False\n for line in filepointer:\n if read_dipole:\n read_dipole = False\n dipole = [float(value) for value in line.split(' ') if '.' in value]\n dipole = np.linalg.norm(dipole)\n elif 'Charge' in line and not charge:\n charge = line.split(' ')[-1].rstrip('\\n')\n elif 'Number of electrons' in line and not NE:\n NE = line.split(' ')[-1].rstrip('\\n')\n elif 'Total Energy' in line and not E_HF:\n E_HF = line.split(' ')[-1].rstrip('\\n')\n elif 'Dipole Moment' in line and not dipole:\n read_dipole = True\n if charge and NE and E_HF and dipole:\n break\n return [charge, NE, dipole, E_HF]", "def readParams(path):\n tiles = open(path, \"r\")\n #--- Starting date\n tiles.readline()\n index = tiles.readline()[:-1]\n \n #--- Starting date\n tiles.readline()\n B_date = tiles.readline()[:-1]\n \n #--- Stopping date\n tiles.readline()\n E_date = tiles.readline()[:-1]\n \n #--- DATA \n tiles.readline()\n DATA_path = tiles.readline()[:-1]\n \n #--- Csv \n tiles.readline()\n out = tiles.readline()[:-1]\n \n #--- Shapefile\n tiles.readline()\n shp = tiles.readline()[:-1]\n \n #--- Water mask\n water = DATA_path + '/waterMask'\n \n return index, B_date, E_date, DATA_path, out, shp, water", "def load_from_file(self, path):\n structure = None\n if re.search(\".pdb\", path):\n parser = PDBParser()\n else:\n parser = MMCIFParser()\n\n path = path.strip()\n model_id = os.path.basename(path)\n #if os.path.basename(path).split('.')[-1] == 'gz':\n # GZ = gzip.open(path, 'rb')\n # GZ.close()\n #else :\n\n structure = parser.get_structure(model_id, open_file( path ))\n header = parser.get_header()\n\n return structure, header", "def read_coordinates(path='', sort=True):\n maspointer = open(path + 'xd.mas', 'r')\n respointer = open(path + 'xd.res', 'r')\n\n positions = {}\n keylist = [] #Needed to keep the atomlist order. This is important for the frequency read function.\n for line in maspointer.readlines():\n if 'CELL ' in line:\n cell = [float(i) for i in line.split(\" \") if '.' in i]\n break\n for line in respointer.readlines():\n if '(' in line and not '!' in line:\n coords = [float(i) for i in line.split(\" \") if '.' in i]\n coords = coords[:-1]\n key = line.split(\" \")[0]\n keylist.append(key)\n positions[key] = coords\n if sort:\n sortkeylist = []\n for i in xrange(len(keylist)):\n j = i + 1\n for key in keylist:\n number = get_number(key)\n if j == int(number):\n sortkeylist.append(key)\n else:\n sortkeylist = keylist\n return cell, positions, sortkeylist", "def process(path):\n # get parameter value:\n with open('config.cym', 'r') as f:\n line = f.readline()\n #print(line)\n pam = float(line[1:])\n f.close()\n # get position of aster:\n with open('aster.txt', 'r') as f:\n for line in f:\n if len(line)>3 and not line[0]=='%':\n #print(line)\n val = line.split()\n x = float(val[2])\n y = float(val[3])\n #z = float(val[4])\n #pos = math.sqrt(x*x+y*y+z*z)\n pos = math.sqrt(x*x+y*y)\n\n f.close()\n return (pam, pos)", "def get_new_cell(path: tuple(str, ...)) -> dict[str, Any]:\n return {\n \"path\": path,\n \"type\": \"cell\",\n # \"celltype\": \"structured\", # or \"text\" for help cells\n \"datatype\": \"mixed\",\n \"hash_pattern\": None,\n \"UNTRANSLATED\": True,\n }", "def extract_geometry(file_path):\r\n file_path = Path(file_path)\r\n \r\n \"\"\"singleSlash = \"\\\\\" # WOW THIS IS INCREDIBLE FRUSTRATING--I think the tests folder might need to be capatilized...\r\n doubleSlash = \"\\\\\\\\\"---THE FILEPATH NEEDS TO BE SINGLE FORWARD SLASHES FOR THE PATH FUNCTION TO WORK\r\n file_path_geom = file_path.replace(singleSlash,doubleSlash)\"\"\"\r\n \r\n \"\"\" Going to put the conversion functionality in the parse arguments section\"\"\"\r\n \r\n workbook = xlrd.open_workbook(file_path)\r\n worksheet = workbook.sheet_by_name('Outputs')\r\n pt1x = worksheet.cell(1,2).value\r\n pt1z = worksheet.cell(1,3).value\r\n pt1y = worksheet.cell(1,4).value\r\n pt2x = worksheet.cell(2,2).value\r\n pt2z = worksheet.cell(2,3).value\r\n pt2y = worksheet.cell(2,4).value\r\n pt3x = worksheet.cell(3,2).value\r\n pt3z = worksheet.cell(3,3).value\r\n pt3y = worksheet.cell(3,4).value\r\n pt4x = worksheet.cell(4,2).value\r\n pt4z = worksheet.cell(4,3).value\r\n pt4y = worksheet.cell(4,4).value\r\n pt5x = worksheet.cell(5,2).value\r\n pt5z = worksheet.cell(5,3).value\r\n pt5y = worksheet.cell(5,4).value\r\n pt6x = worksheet.cell(6,2).value\r\n pt6z = worksheet.cell(6,3).value\r\n pt6y = worksheet.cell(6,4).value\r\n pt7x = worksheet.cell(7,2).value\r\n pt7z = worksheet.cell(7,3).value\r\n pt7y = worksheet.cell(7,4).value\r\n pt8x = worksheet.cell(8,2).value\r\n pt8z = worksheet.cell(8,3).value\r\n pt8y = worksheet.cell(8,4).value\r\n pt9x = worksheet.cell(9,2).value\r\n pt9z = worksheet.cell(9,3).value\r\n pt9y = worksheet.cell(9,4).value\r\n pt10x = worksheet.cell(10,2).value\r\n pt10z = worksheet.cell(10,3).value\r\n pt10y = worksheet.cell(10,4).value\r\n pt11x = worksheet.cell(11,2).value\r\n pt11z = worksheet.cell(11,3).value\r\n pt11y = worksheet.cell(11,4).value\r\n pt12x = worksheet.cell(12,2).value\r\n pt12z = worksheet.cell(12,3).value\r\n pt12y = worksheet.cell(12,4).value\r\n pt13x = worksheet.cell(13,2).value\r\n pt13z = worksheet.cell(13,3).value\r\n pt13y = worksheet.cell(13,4).value\r\n pt14x = worksheet.cell(14,2).value\r\n pt14z = worksheet.cell(14,3).value\r\n pt14y = worksheet.cell(14,4).value\r\n pt15x = worksheet.cell(15,2).value\r\n pt15z = worksheet.cell(15,3).value\r\n pt15y = worksheet.cell(15,4).value\r\n pt16x = worksheet.cell(16,2).value\r\n pt16z = worksheet.cell(16,3).value\r\n pt16y = worksheet.cell(16,4).value\r\n #U_100x = worksheet.cell(17,2).value\r\n #U_100z = worksheet.cell(17,3).value # Not really using the other 2-dimensions for now\r\n #U_100y = worksheet.cell(17,4).value\r\n \r\n if pt16z == 0:\r\n print(\"Top point has a 0 height value--error in data import\")\r\n return pt1x, pt1z, pt1y, pt2x, pt2z, pt2y, pt3x, pt3z, pt3y, pt4x, pt4z, pt4y, pt5x, pt5z, pt5y, pt6x, pt6z, pt6y, pt7x, pt7z, pt7y, pt8x, pt8z, pt8y, pt9x, pt9z, pt9y, pt10x, pt10z, pt10y, pt11x, pt11z, pt11y, pt12x, pt12z, pt12y, pt13x, pt13z, pt13y, pt14x, pt14z, pt14y, pt15x, pt15z, pt15y, pt16x, pt16z, pt16y", "def extract(path,\n top_flag='CARTESIAN COORDINATES',\n bottom_flag='Empirical Formula:',\n pattern=r'([\\d]*)\\s+([a-zA-Z]*)\\s+(\\-?\\d+\\.\\d+)\\s+(\\-?\\d+\\.\\d+)\\s+(\\-?\\d+\\.\\d+)'):\n top = tuple()\n bottom = tuple()\n rows = list()\n\n with open(path, 'r') as p:\n for num, line in enumerate(p, 1):\n if top_flag in line:\n top = num\n if bottom_flag in line:\n bottom = num\n\n with open(path, 'r') as p:\n for num, line in enumerate(p, 1):\n if (top <= num) and (bottom >= num):\n rows += re.findall(pattern, line)\n\n return pandas.DataFrame(rows, columns=['num', 'atom_name', 'x', 'y', 'z'])", "def format_script_for_cell(path):\n header = '\\n# Cell content replaced by load magic replacement.\\n'\n with open(str(path), encoding='utf8') as f:\n solution = f.read()\n if not solution:\n raise RuntimeError('Solution {} has no content.'.format(path))\n return header + solution", "def load_params_from_file(path):\n save_dict = mx.nd.load(path)\n arg_params = {}\n aux_params = {}\n for k, v in save_dict.items():\n tp, name = k.split(':', 1)\n if tp == 'arg':\n arg_params[name] = v\n if tp == 'aux':\n aux_params[name] = v\n return arg_params, aux_params", "def from_file(path):\n\n filename = os.path.basename(path)\n\n base, suffix = os.path.splitext(filename);\n\n if suffix == '.bin':\n g = bgy3d.from_file(path)\n elif suffix == '.m':\n g = contf.m2dat(path)\n else:\n print 'Unknown file suffix.'\n exit()\n\n return g", "def readFile(file_name):\n if file_name.split('.')[-1] == 'thid':\n x,m,w = readThid(file_name)\n e = np.empty_like(x)\n e[:] = np.nan\n return x,m,w,e\n else:\n return readParams(file_name)", "def readfile(path):\n with open(path, 'r', encoding='utf-8') as f:\n param = tuple(f.readlines())\n return param", "def Read_CSSR(filename):\n f = open(filename)\n#\n# First read unit cell\n#\n tokens = f.readline().split()\n if len(tokens) != 3: \n print \"Format mismatch -- first cell line\"\n sys.exit(1)\n a, b, c = map(float,tokens[:])\n tokens = f.readline().split()\n if len(tokens) < 3: \n print \"Format mismatch -- second cell line\"\n sys.exit(1)\n alpha, beta, gamma = map(float,tokens[0:3])\n\n cell = N.zeros((3,3),N.Float)\n\n alpha, beta, gamma = map(lambda x: x*pi/180.0, (alpha,beta,gamma))\n va = N.array((a,0.0,0.0),N.Float)\n vb = N.array((b*cos(gamma), b*sin(gamma), 0.0),N.Float)\n xxx = (cos(alpha)-cos(beta)*cos(gamma)) / sin(gamma)\n vc = N.array((c*cos(beta), c*xxx, c*sqrt(sin(beta)**2 - xxx**2)),N.Float)\n\n cell[0,:] = va[:]\n cell[1,:] = vb[:]\n cell[2,:] = vc[:]\n\n#\n# Now the atoms\n#\n tokens = f.readline().split()\n natoms = int(tokens[0])\n f.readline() # empty line\n\n crystal = Structure([])\n import re\n p = re.compile(\"[A-z]+\")\n for a in range(natoms):\n tokens = f.readline().split()\n number, tag, x, y, z = tokens[0:5]\n m = p.match(tag)\n if m:\n symbol = m.group()\n else:\n print \"Cannot match \", tag \n crystal.append(Atom(symbol, [float(x), float(y), float(z)]))\n\n crystal.SetUnitCell(cell)\n crystal.SetBoundaryConditions(periodic=True)\n\n return crystal", "def _load_template(self, path):\n mol = Chem.RWMol()\n extension = os.path.basename(path).split(\".\")[1]\n\n if extension == \"sdf\":\n mol = Chem.MolFromMolFile(path, sanitize=True, removeHs=True)\n elif extension == \"pdb\":\n mol = Chem.MolFromPDBFile(path, sanitize=True, removeHs=True)\n else:\n raise ValueError(\"Unsupported molecule type '{}'\".format(extension))\n\n p = Chem.AdjustQueryParameters()\n p.makeAtomsGeneric = True\n p.makeBondsGeneric = True\n\n mol = Chem.AdjustQueryProperties(mol, p)\n\n return mol", "def read_stellar_properties(path = os.path.join(HERE, '../inputs/stellar_properties.txt')):\n\n # Read in table of stellar types\n data = np.loadtxt(path, skiprows=19, dtype = str)\n\n # Parse\n stypes = data[:,0]\n masses = np.array(data[:,1], dtype=float)\n lums = np.array(data[:,2], dtype=float)\n rads = np.array(data[:,3], dtype=float)\n temps = np.array(data[:,4], dtype=float)\n mvs = np.array(data[:,6], dtype=float)\n\n # Construct dictionary\n dic = {\n \"stypes\" : stypes,\n \"masses\" : masses,\n \"lums\" : lums,\n \"rads\" : rads,\n \"temps\" : temps,\n \"mvs\" : mvs\n }\n\n return dic", "def readCrystParam(crystfile):\n \n # Default values\n ccell1 = np.eye(3)\n ccell2 = np.eye(3)\n planehkl = [1,0,0]\n diruvw = [0,1,0]\n \n try:\n with open(crystfile,\"r\") as f:\n content = f.readlines()\n except FileNotFoundError:\n content = []\n\n for l in content:\n if l[0].rstrip() == \"#\":\n continue\n line = l.split('=')\n if len(line) == 2:\n if line[0].rstrip()==\"ccell1\":\n ccell1 = eval(line[1].rstrip())\n elif line[0].rstrip()==\"ccell2\":\n ccell2 = eval(line[1].rstrip())\n elif line[0].rstrip()==\"planehkl\":\n planehkl = eval(line[1].rstrip())\n elif line[0].rstrip()==\"diruvw\":\n diruvw = eval(line[1].rstrip())\n else:\n print(\"WARNING: %s is not a supported input\"%(line[0].rstrip()))\n elif len(line) > 2:\n raise SyntaxError(l)\n\n return ccell1, ccell2, planehkl, diruvw", "def read(path):", "def get_parameters(path):\n f = open(path, \"r\")\n line = f.readline()\n line = line.strip('\\n')\n values = line.split(',')\n parameter = [[0 for i in range(len(values))] for i in range(6)]\n row = 0\n while line:\n line = line.strip('\\n')\n values = line.split(',')\n for i in range(len(values)):\n if row != 0:\n parameter[row][i] = (float)(values[i])\n else:\n parameter[row][i] = values[i]\n row += 1\n line = f.readline()\n f.close()\n return parameter", "def load_a_couple(self, path):\n return pd.read_hdf(path[0], key='s'), np.load(path[1])", "def parse_dem_header(path):\n lookup = _parse_header(path)\n\n # NB: many lookup fields have multiple elements, eg ['1000', 'Hz']\n subset = {ifc.PYRATE_NCOLS: int(lookup[GAMMA_WIDTH][0]), ifc.PYRATE_NROWS: int(lookup[GAMMA_NROWS][0])}\n\n expected = ['decimal', 'degrees']\n for k in [GAMMA_CORNER_LAT, GAMMA_CORNER_LONG, GAMMA_X_STEP, GAMMA_Y_STEP]:\n units = lookup[GAMMA_CORNER_LAT][1:]\n if units != expected: # pragma: no cover\n msg = \"Unrecognised units for GAMMA %s field\\n. Got %s, expected %s\"\n raise GammaException(msg % (k, units, expected))\n\n subset[ifc.PYRATE_LAT] = float(lookup[GAMMA_CORNER_LAT][0])\n subset[ifc.PYRATE_LONG] = float(lookup[GAMMA_CORNER_LONG][0])\n subset[ifc.PYRATE_Y_STEP] = float(lookup[GAMMA_Y_STEP][0])\n subset[ifc.PYRATE_X_STEP] = float(lookup[GAMMA_X_STEP][0])\n subset[ifc.PYRATE_DATUM] = \"\".join(lookup[GAMMA_DATUM])\n subset[ifc.PYRATE_INSAR_PROCESSOR] = GAMMA\n return subset", "def open_file(path):\n book = xlrd.open_workbook(path)\n # print number of sheets\n #print book.nsheets\n # print sheet names\n #print book.sheet_names()\n # get the first worksheet\n first_sheet = book.sheet_by_index(0)\n # read a row\n #print first_sheet.row_values(0)\n # read a cell\n cell = first_sheet.cell(1,0)\n #print cell\n #print cell.value\n # read a row slice\n #print first_sheet.row_slice(rowx=0,start_colx=0,end_colx=2)\n\n \"\"\"\n if Junipter.search_junipter_rule(first_sheet,1) == 0:\n print \"Juniper rule doesn't match\"\n else:\n print \"Juniper rule match\"\n \"\"\"\n\n \"\"\"\n if Mitac.search_mitac_rule(first_sheet,1) == 0:\n print \"Mitac rule doesn't match\"\n else:\n print \"Mitac rule match\"\n \"\"\"\n\n if Fabrinet.search_fabrinet_rule(first_sheet,3) == 0:\n print \"fabrinet rule doesn't match\"\n else:\n print \"fabrinet rule match\"", "def parse_geometry(path: str) -> Optional[Dict[str, tuple]]:\n if not os.path.isfile(path):\n raise InputError(f'Could not find file {path}')\n if path.endswith('.yml'):\n content = read_yaml_file(path)\n if isinstance(content, dict):\n if 'xyz' in content.keys():\n return content['xyz'] if isinstance(content['xyz'], dict) else str_to_xyz(content['xyz'])\n elif 'opt_xyz' in content.keys():\n return content['opt_xyz'] if isinstance(content['opt_xyz'], dict) else str_to_xyz(content['opt_xyz'])\n software = identify_ess(path)\n xyz_str = ''\n if software == 'xtb':\n lines = _get_lines_from_file(path)\n final_structure, coord, first_line = False, False, True\n for line in lines:\n if '$' in line or 'END' in line or len(line.split()) < 10:\n coord = False\n if coord:\n splits = line.split()\n xyz_str += f'{qcel.periodictable.to_E(splits[3])} {splits[0]} {splits[1]} {splits[2]}\\n'\n if final_structure and ('$coord' in line or len(line.split()) > 15):\n coord = True\n if len(line.split()) > 15 and first_line:\n splits = line.split()\n xyz_str += f'{qcel.periodictable.to_E(splits[3])} {splits[0]} {splits[1]} {splits[2]}\\n'\n first_line = False\n if 'final structure:' in line:\n final_structure = True\n return str_to_xyz(xyz_str)\n\n log = ess_factory(fullpath=path, check_for_errors=False)\n try:\n coords, number, _ = log.load_geometry()\n except LogError:\n logger.debug(f'Could not parse xyz from {path}')\n\n # Try parsing Gaussian standard orientation instead of the input orientation parsed by Arkane.\n lines = _get_lines_from_file(path)\n for i in range(len(lines)):\n if 'Standard orientation:' in lines[i]:\n xyz_str = ''\n j = i\n while len(lines) and not lines[j].split()[0].isdigit():\n j += 1\n while len(lines) and '-------------------' not in lines[j]:\n splits = lines[j].split()\n xyz_str += f'{qcel.periodictable.to_E(int(splits[1]))} {splits[3]} {splits[4]} {splits[5]}\\n'\n j += 1\n break\n\n if xyz_str:\n return str_to_xyz(xyz_str)\n return None\n\n return xyz_from_data(coords=coords, numbers=number)", "def get_gml_data(file_path):\n\n bbox = (2.34592e7,100+6.704e6,2.34603e7,700+6.704e6)\n return gpd.read_file(file_path, bbox=bbox)", "def read_xyz(filename):\n\n config = {}\n\n with open(filename, 'r') as f:\n # number of atoms (spins)\n config['nat'] = int(re.findall('\\S+', f.readline())[0])\n\n # box parameters (type, dimension, shape, periodicity)\n sarr = re.findall('\\S+', f.readline())\n config['latt_type'] = sarr[0]\n dims = list(map(int, sarr[1:4]))\n config['latt_box'] = np.array(dims)\n config['box'] = np.diag(dims)\n config['pbc'] = list(map(int, sarr[4:7]))\n if len(sarr) > 7:\n dim_intra = len(sarr) - 7\n\n atom_types = []\n xyz = []\n config['latt_i'] = np.zeros(dims, dtype=int)\n config['latt_atoms'] = np.zeros(dims, dtype=int)\n config['latt_intra'] = np.zeros(tuple(dims) + (dim_intra,), dtype='float64')\n for i in range(config['nat']):\n sarr = re.findall('\\S+', f.readline())\n t = int(sarr[0])\n r = tuple(map(int, sarr[1:4]))\n\n atom_types.append(t)\n xyz.append(r)\n\n config['latt_i'][r] = i\n config['latt_atoms'][r] = t\n\n for j in range(dim_intra):\n ci = float(sarr[4 + j])\n config['latt_intra'][r[0], r[1], r[2], j] = ci\n\n config['atom_types'] = np.array(atom_types)\n config['xyz'] = np.array(xyz)\n \n return config", "def readfile(self, path, filename):\n # The DataStudio software uses ISO-8859-1 encoding (especially for the degree sign in temperature files)\n file = open(path + filename, encoding=\"iso-8859-1\")\n rowlist = file.readlines()\n\n title = rowlist[0].strip(\"\\n\")\n labels = rowlist[1].strip(\"\\n\").split(sep=\"\\t\")\n\n data = np.zeros((len(rowlist)-2, 2))\n\n for i in range(2, len(rowlist)):\n columns = rowlist[i].split(sep=\"\\t\")\n data[i-2, 0] = float(columns[0].replace(\",\", \".\"))\n data[i-2, 1] = float(columns[1].replace(\",\", \".\"))\n\n return data, title, labels", "def ReadAtomParameter(AtomParameterPath):\r\n\r\n AtomParameter=os.path.join(AtomParameterPath,'AtomParameter')\r\n\r\n Key1,Key2,Key3=False,False,False\r\n MaterialAtomDictionary,GasAtomDictionary,MassDictionary={},{},{}\r\n SpecialPair,SpecialPairList=[],[]\r\n\r\n with open(AtomParameter, 'r') as File:\r\n for Line in File.readlines():\r\n if Line.strip():\r\n WordList=Line.strip().split()\r\n if WordList[0]=='#':\r\n continue\r\n elif WordList[0]=='MaterialAtom:':\r\n Key1=True\r\n elif WordList[0]=='GasAtom:':\r\n Key1=False\r\n Key2=True\r\n elif WordList[0]=='SpecialPair:':\r\n Key2=False\r\n Key3=True\r\n\r\n # MaterialAtom\r\n elif Key1==True and WordList[0]!='Number':\r\n MaterialAtomDictionary[WordList[1]]=WordList[2:4]\r\n MassDictionary[WordList[1]] = WordList[5]\r\n elif Key2==True and WordList[0]!='Number':\r\n GasAtomDictionary[WordList[1]]=WordList[2:4]\r\n MassDictionary[WordList[1]] = WordList[4]\r\n elif Key3==True and WordList[0]!='Number':\r\n SpecialPair.append(WordList[1:3])\r\n SpecialPair.append(WordList[3:5])\r\n\r\n SpecialPairList.append(SpecialPair)\r\n\r\n return MaterialAtomDictionary,GasAtomDictionary,SpecialPairList,MassDictionary", "def parse_info_from_file(path):\n try:\n filename = os.path.split(path)[1]\n filename = os.path.splitext(filename)[0]\n age, gender, race, _ = filename.split('_')\n\n return int(age), dataset_dict['gender_id'][int(gender)], dataset_dict['race_id'][int(race)]\n except Exception as ex:\n return None, None, None", "def macro(path):\n import pandas as pd\n path = os.path.expanduser(path)\n filename = 'macro.csv'\n if not os.path.exists(os.path.join(path, filename)):\n url = 'http://dustintran.com/data/r/Zelig/macro.csv'\n maybe_download_and_extract(path, url,\n save_file_name='macro.csv',\n resume=False)\n\n data = pd.read_csv(os.path.join(path, filename), index_col=0,\n parse_dates=True)\n x_train = data.values\n metadata = {'columns': data.columns}\n return x_train, metadata", "def load_data(path):\n data = loadmat(path)\n return data['X'], data['y']", "def _parse_synth(path):\n files, coords, angles = [], '', ''\n with open(f'{path}paths.txt') as f:\n while True:\n line = f.readline()\n if not line:\n break\n else:\n files.append(line.rstrip('\\n'))\n return files", "def read_postcode_sectors(path):\n with fiona.open(path, 'r') as pcd_sector_shapes:\n return [pcd for pcd in pcd_sector_shapes]", "def read_postcode_sectors(path):\n with fiona.open(path, 'r') as pcd_sector_shapes:\n return [pcd for pcd in pcd_sector_shapes]", "def load_hyd_abn(name, path='.', abn_elements=PreSN.stl_elements, skiprows=0, comments='#',\n is_rho=False, is_dm=True, is_dum=False):\n # abn_elements = 'H He C N O Ne Na Mg Al Si S Ar Ca Fe Ni Ni56'.split()\n\n # hydro\n ext_hyd = '.hyd'\n hyd_file = os.path.join(path, name + ext_hyd)\n if not os.path.isfile(hyd_file):\n logger.error(' No file for %s' % hyd_file)\n return None\n\n logger.info(' Load hyd-data from %s' % hyd_file)\n\n def set_params(pre, a):\n if len(a) > 0:\n if len(a) == 5:\n time_start, nzon, m_core, r_cen, rho_cen = a\n pre.set_par('time_start', time_start)\n pre.set_par('m_core', m_core * phys.M_sun)\n pre.set_par('r_cen', r_cen)\n pre.set_par('rho_cen', rho_cen)\n elif len(a) == 4:\n time_start, nzon, m_core, r_cen = a\n pre.set_par('time_start', time_start)\n pre.set_par('m_core', m_core * phys.M_sun)\n pre.set_par('r_cen', r_cen)\n elif len(a) == 2:\n time_start, nzon = a\n pre.set_par('time_start', time_start)\n return pre\n\n # read table data\n if is_dm:\n col_names = \"zone dm R Rho T V M\".split()\n else:\n col_names = \"zone M R Rho T V M2\".split()\n\n a = []\n # Load header\n with open(hyd_file, 'r') as f:\n header_line = f.readline()\n if len(header_line) > 0:\n a = [float(x) for x in header_line.split()]\n\n # Load data\n dt = np.dtype({'names': col_names,\n 'formats': ['i4'] + list(np.repeat('f8', len(col_names) - 1))})\n\n data_hyd = np.loadtxt(hyd_file, comments='#', skiprows=1, dtype=dt, usecols=np.arange(len(col_names)))\n\n nz = len(data_hyd['R'])\n\n presn = PreSN(name, nz, elements=abn_elements)\n set_params(presn, a)\n\n col_map = {PreSN.sR, PreSN.sT, PreSN.sRho, PreSN.sV}\n for v in col_map:\n presn.set_hyd(v, data_hyd[v], is_exp=v.startswith('lg'))\n\n # Set header data\n set_params(presn, a)\n\n # Set Mass\n if is_rho:\n r = presn.r\n rho = presn.rho\n r = np.insert(r, 0, presn.r_cen)\n # rho = np.insert(rho, 0, presn.rho_cen)\n dm = np.zeros(nz)\n for i in range(nz):\n dm[i] = (r[i + 1] ** 3 - r[i] ** 3) * rho[i] * 4. / 3. * np.pi\n # dm[i] = (r[i + 1] ** 3 - r[i] ** 3) * rho[i + 1] * 4. * np.pi / 3.\n m = np.cumsum(dm)\n m += presn.m_core\n else:\n m = data_hyd[PreSN.sM] * phys.M_sun\n\n presn.set_hyd(PreSN.sM, m)\n\n # Set chemical composition\n ext_abn = '.abn'\n abn_file = os.path.join(path, name + ext_abn)\n if not os.path.isfile(abn_file):\n logger.error(' No file for %s' % abn_file)\n return None\n\n logger.info(' Load abn-data from %s' % abn_file)\n col_names = (\"zone \" + ' '.join(abn_elements)).split()\n if is_dum:\n col_names = (\"zone dum1 dum2 dum3 \" + ' '.join(abn_elements)).split()\n\n # dt = np.dtype({'names': col_names, 'formats': np.repeat('f8', len(col_names))})\n dt = np.dtype({'names': col_names,\n 'formats': ['i4'] + list(np.repeat('f8', len(col_names) - 1))})\n # logger.info(dt)\n data_chem = np.loadtxt(abn_file, comments=comments, skiprows=skiprows, dtype=dt)\n\n for ename in abn_elements:\n presn.set_chem(ename, data_chem[ename])\n\n return presn", "def load_coefficients(path):\n # FILE_STORAGE_READ\n cv_file = cv2.FileStorage(path, cv2.FILE_STORAGE_READ)\n\n # note we also have to specify the type to retrieve other wise we only get a\n # FileNode object back instead of a matrix\n camera_matrix = cv_file.getNode(\"K\").mat()\n dist_matrix = cv_file.getNode(\"D\").mat()\n\n try:\n R_co = cv_file.getNode(\"R_co\").mat()\n R_oc = cv_file.getNode(\"R_oc\").mat()\n T_co = cv_file.getNode(\"T_co\").mat()\n T_oc = cv_file.getNode(\"T_oc\").mat()\n except:\n print(\"[INFO]: could not read R_co, R_oc, T_co, T_oc from: {}\".format(path))\n print(str(R_co), str(R_oc), str(T_co), str(T_oc))\n cv_file.release()\n return [camera_matrix, dist_matrix]\n\n cv_file.release()\n return [camera_matrix, dist_matrix, R_co, R_oc, T_co, T_oc]", "def read_gui_file(fpath, cryversion=17):\n if cryversion != 17:\n raise NotImplementedError(\"CRYSTAL versions other than 17\")\n\n structdata = {}\n path = pathlib.Path(fpath)\n with path.open() as f:\n lines = f.read().splitlines()\n init_data = lines[0].split()\n dimensionality = int(init_data[0])\n if dimensionality not in _DIMENSIONALITY:\n raise ValueError(\n \"dimensionality was not between 0 and 3: {}\".format(\n dimensionality))\n structdata[\"pbc\"] = _DIMENSIONALITY[dimensionality]\n structdata[\"origin_setting\"] = int(init_data[1])\n crystal_type = int(init_data[2])\n if crystal_type not in CRYSTAL_TYPE_MAP:\n raise ValueError(\"crystal_type was not between 1 and 6: {}\".format(\n dimensionality))\n structdata[\"crystal_type\"] = CRYSTAL_TYPE_MAP[crystal_type]\n structdata[\"lattice\"] = [[float(num) for num in l.split()]\n for l in lines[1:4]]\n structdata[\"nsymops\"] = nsymops = int(lines[4])\n symops = []\n for i in range(nsymops):\n symop = []\n for j in range(4):\n line_num = 5 + i * 4 + j\n values = lines[line_num].split()\n if not len(values) == 3:\n raise IOError(\n \"expected symop x, y and z coordinate on line {0}: {1}\".\n format(line_num, lines[line_num]))\n symop.extend(\n [float(values[0]),\n float(values[1]),\n float(values[2])])\n symops.append(symop)\n structdata[\"symops\"] = ops_cart_to_frac(symops, structdata[\"lattice\"])\n structdata[\"natoms\"] = natoms = int(lines[5 + nsymops * 4])\n structdata[\"atomic_numbers\"] = [\n int(l.split()[0])\n for l in lines[6 + nsymops * 4:6 + nsymops * 4 + natoms]\n ]\n structdata[\"ccoords\"] = [[\n float(num) for num in l.split()[1:4]\n ] for l in lines[6 + nsymops * 4:6 + nsymops * 4 + natoms]]\n\n return structdata", "def get_xyz_coord(path):\r\n\tlabels = loadmat(path)\r\n\tanno_xyz = []\r\n\tfor index in range(0, 1500):\r\n\t\tanno_xyz.append([])\r\n\t\tfor i in range(0, 21):\r\n\t\t\tx = labels['handPara'][0][i][index]\r\n\t\t\ty = labels['handPara'][1][i][index]\r\n\t\t\tz = labels['handPara'][2][i][index]\r\n\t\t\tanno_xyz[-1].append([x, y, z])\r\n\tanno_xyz = np.array(anno_xyz)\r\n\t# anno_xyz = np.reshape(labels['handPara'], (1500, 21, 3))\r\n\treturn anno_xyz", "def path_to_df(path, orig) :\n with open(path, 'r') as fich :\n strinfo = fich.readline()\n [strn, strm] = strinfo.split(\",\")\n info = {'n':int(strn.split(\"=\")[1]), 'm':int(strm.split(\"=\")[1])}\n data = pd.read_csv(fich, sep=\",\")\n data['origin'] = orig\n return info, data", "def from_path(fname):\n def parse_header(lines):\n metadata = {}\n for ln in lines:\n if ln.startswith('#') or len(ln) < 2:\n continue\n match = re.match('(\\w+)\\s+([\\w\\s\\.]+)', ln)\n if not match:\n warnings.warn(\"warning: can't understand line: %s\" % ln)\n continue\n key, value = match.group(1).lower(), match.group(2)\n if key == 'version':\n metadata[key] = value\n elif key in ('fields', 'type'):\n metadata[key] = value.split()\n elif key in ('size', 'count'):\n metadata[key] = map(int, value.split())\n elif key in ('width', 'height', 'points'):\n metadata[key] = int(value)\n elif key == 'viewpoint':\n metadata[key] = map(float, value.split())\n elif key == 'data':\n metadata[key] = value.strip().lower()\n # TO-DO apparently count is not required?\n # add some reasonable defaults\n if 'count' not in metadata:\n metadata['count'] = [1] * len(metadata['fields'])\n if 'viewpoint' not in metadata:\n metadata['viewpoint'] = [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]\n if 'version' not in metadata:\n metadata['version'] = '.7'\n return metadata\n\n def _build_dtype(metadata_):\n \"\"\" build numpy structured array dtype from pcl metadata.\n note that fields with count > 1 are 'flattened' by creating multiple\n single-count fields.\n TO-DO: allow 'proper' multi-count fields.\n \"\"\"\n fieldnames = []\n typenames = []\n numpy_pcd_type_mappings = [(np.dtype('float32'), ('F', 4)),\n (np.dtype('float64'), ('F', 8)),\n (np.dtype('uint8'), ('U', 1)),\n (np.dtype('uint16'), ('U', 2)),\n (np.dtype('uint32'), ('U', 4)),\n (np.dtype('uint64'), ('U', 8)),\n (np.dtype('int16'), ('I', 2)),\n (np.dtype('int32'), ('I', 4)),\n (np.dtype('int64'), ('I', 8))]\n pcd_type_to_numpy_type = dict((q, p) for (p, q) in numpy_pcd_type_mappings)\n\n for f, c, t, s in zip(metadata_['fields'],\n metadata_['count'],\n metadata_['type'],\n metadata_['size']):\n np_type = pcd_type_to_numpy_type[(t, s)]\n if c == 1:\n fieldnames.append(f)\n typenames.append(np_type)\n else:\n fieldnames.extend(['%s_%04d' % (f, i) for i in xrange(c)])\n typenames.extend([np_type] * c)\n dtype = np.dtype(zip(fieldnames, typenames))\n return dtype\n\n def parse_binary_pc_data(f, dtype, metadata):\n rowstep = metadata['points'] * dtype.itemsize\n # for some reason pcl adds empty space at the end of files\n buf = f.read(rowstep)\n return np.fromstring(buf, dtype=dtype)\n\n def parse_binary_compressed_pc_data(f, dtype, metadata):\n # compressed size of data (uint32)\n # uncompressed size of data (uint32)\n # compressed data\n # junk\n fmt = 'II'\n compressed_size, uncompressed_size = struct.unpack(fmt, f.read(struct.calcsize(fmt)))\n compressed_data = f.read(compressed_size)\n # (compressed > uncompressed)\n # should we read buf as raw binary?\n buf = lzf.decompress(compressed_data, uncompressed_size)\n if len(buf) != uncompressed_size:\n raise Exception('Error decompressing data')\n # the data is stored field-by-field\n pcs_data = np.zeros(metadata['width'], dtype=dtype)\n ix = 0\n for dti in range(len(dtype)):\n dt = dtype[dti]\n bytess = dt.itemsize * metadata['width']\n column = np.fromstring(buf[ix:(ix + bytess)], dt)\n pcs_data[dtype.names[dti]] = column\n ix += bytess\n return pcs_data\n\n with open(fname, 'rb') as f:\n header = []\n while True:\n ln = f.readline().strip()\n header.append(ln)\n if ln.startswith('DATA'):\n metadata = parse_header(header)\n dtype = _build_dtype(metadata)\n break\n if metadata['data'] == 'ascii':\n pc_data = np.loadtxt(f, dtype=dtype, delimiter=' ')\n pc_data.dtype = np.float32\n pc_data = pc_data.reshape(-1, 4)\n elif metadata['data'] == 'binary':\n pc_data = parse_binary_pc_data(f, dtype, metadata)\n elif metadata['data'] == 'binary_compressed':\n pc_data = parse_binary_compressed_pc_data(f, dtype, metadata)\n else:\n print('File->py_pcd.py: DATA field is not \"ascii\",maybe \"binary\" or \"binary_compressed\", try to add method for both')\n return 'CODE: 0x123'\n pc = point_cloud(metadata, pc_data)\n return pc", "def Parametros_(name_path):\n\n\tp = pathlib.Path(name_path)\n\tZ22 = []\n\tfor f in p.glob('*.csv'):\n\t\tZ = pd.read_csv(f, header = 0)\n\t\tZ = np.array(Z, dtype = np.float64 )\n\t\tZ22.append(Z)\n \n\tA = np.array(Z22)\n \n \n\tZ2 = A[0,:,:]\n\t## Adición de parámetros\n\tV_sys = np.append(Z2[:, 0], Z2[:, 1])\n\tPA = np.append(Z2[:, 2], Z2[:, 3])\n\ti = np.append(Z2[:, 4], Z2[:, 5])\n\n\tx_0 = 4*random.rand(346) + 78\n\ty_0 = 4*random.rand(346) + 78\n\n\tR_0 = 10*random.rand(346) + 5\n\tV_t = 200*random.rand(346) + 50\n\ta = random.rand(346) + 0.5\n\tg = random.rand(346) + 0.5\n\n\tParams_Bek = np.vstack([x_0, y_0, V_sys, i, PA, V_t, R_0, a, g]).T\n\n\tp = [\"x0\", \"y0\", \"v_sys\", \"i\", \"phi_0\", \"V_t\", \"R_0\", \"a\", \"g\"] \n\tfor j in range(len(Params_Bek.T)):\n\t\tsb.displot((Params_Bek.T[j]).ravel(), color='#F2AB6D', bins=10, kde=False)\n\t\tplt.xlabel(p[j])\n\t\tplt.ylabel('Cuentas')\n\t\tplt.show()\n\n\treturn Params_Bek", "def apply(file_path, parameters=None):\r\n if parameters is None:\r\n parameters = {}\r\n\r\n F = open(file_path, \"r\")\r\n content = F.readlines()\r\n F.close()\r\n\r\n return import_dfg_from_rows(content, parameters=parameters)", "def process(path, name):\n d = {}\n path = path / name\n with open(path.as_posix()) as fd:\n file_contents = fd.read()\n module = ast.parse(file_contents)\n docstring = ast.get_docstring(module)\n docstring_line = get_value(docstring)\n d['name'] = name\n if docstring_line:\n d['docstring'] = docstring_line\n else:\n d['docstring'] = 'No docstring provided.'\n return d", "def _read_in_file(path, idc):\n info('read in file %s' % path)\n\n if not os.path.exists(path):\n info('file path not exist: %s' % path)\n sys.exit(1)\n try:\n if path.endswith('csv.gz'):\n mat = pd.read_csv(path, compression='gzip', index_col=0)\n elif path.endswith('.parquet'):\n mat = pd.read_parquet(path)\n else:\n mat = pd.read_csv(path, sep='\\t', index_col=0)\n except:\n traceback.print_exc(file=sys.stderr) # maybe the file type problem\n sys.exit(1)\n # TARGET-RT, too few sample is avaliable\n mat = mat[~mat.project_id.isin(['TARGET-RT'])]\n # check file title\n if 'project_id' not in mat.columns.tolist():\n info('project_id not in column names')\n sys.exit(1)\n if 'sample_type' not in mat.columns.tolist():\n info('sample_type is not in columns')\n sys.exit(1)\n # specify to needed genes:\n # the gene not in matrix columns\n diffgene = list(set(idc) - set(mat.columns.tolist()))\n if diffgene:\n info('these genes %s are not in the expression matrix of this cancer, skip %s' % (\n str(diffgene), str(path)))\n # return(pd.DataFrame()) # return a empty dataframe\n return (mat)", "def get_header(fname, path='./'):\r\n f = file(path+fname,'r')\r\n \r\n header = {}\r\n headlines = 0\r\n \r\n while True:\r\n line = f.readline()\r\n clean_line = string.strip(line).split()\r\n key = string.strip(clean_line[0])\r\n val = string.strip(clean_line[-1])\r\n if not key[0].isalpha():\r\n break\r\n try:\r\n val = int(val)\r\n except:\r\n val = float(val)\r\n if key != 'NODATA_value':\r\n key = key.lower()\r\n header[key] = val\r\n headlines += 1\r\n \r\n f.close()\r\n\r\n for key in ['ncols','nrows','cellsize','xllcorner','yllcorner']:\r\n if not header.has_key(key):\r\n raise KeyError, 'File %s header does not contain key %s'%(path+fname, key)\r\n \r\n return header, headlines", "def parse_sheet(spreadsheet_path):\n\tdef get_cols(sheet):\n\t\t\"\"\"Given a sheet, look at the first row and determine the index of each column.\n\t\t Returns a Reg with the column number set in each field.\"\"\"\n\t\tnames = {\"number\" : [\"Register Number\", -1],\n\t\t \"name\" : [\"Register Name\", -1],\n\t\t \"size\" : [\"Size\", -1],\n\t\t \"read\" : [\"R/W\", -1],\n\t\t \"write\" : [\"R/W\", -1],\n\t\t \"default\" : [\"Default Value\", -1],\n\t\t \"func\" : [\"Firmware Write Func\", -1],\n\t\t \"desc\" : [\"Description\", -1]}\n\t\tfor i in range(0, sheet.ncols):\n\t\t\tcell = sheet.cell(0, i).value\n\t\t\tfor k in names:\n\t\t\t\tif names[k][0] == cell:\n\t\t\t\t\t names[k][1] = i\n\t\treturn Reg(number=names[\"number\"][1], name=names[\"name\"][1], size=names[\"size\"][1], read=names[\"read\"][1], write=names[\"write\"][1], default=names[\"default\"][1], func=names[\"func\"][1], desc=names[\"desc\"][1])\n\t\n\tbook = xlrd.open_workbook(spreadsheet_path)\n\tsheet = book.sheet_by_index(0)\n\tcols = get_cols(sheet)\n\tregs = []\n\tfor i in range(1, sheet.nrows):\n\t\treg = Reg(number=sheet.cell(i, cols.number).value,\n\t\t name=sheet.cell(i, cols.name).value,\n\t\t size=sheet.cell(i, cols.size).value,\n\t\t read='r' in sheet.cell(i, cols.read).value.lower(),\n\t\t write='w' in sheet.cell(i, cols.write).value.lower(),\n\t\t default=sheet.cell(i, cols.default).value,\n\t\t func=sheet.cell(i, cols.func).value,\n\t\t desc=sheet.cell(i, cols.desc).value)\n\t\tregs.append(reg)\n\treturn regs", "def read_file(file):\n if opts.input_type == 'fits':\n data = fileio.read_fits(file)\n else:\n data = fileio.read_ascii(file)\n c_id = data[0,:]\n g_num = np.array(range(len(c_id)), dtype = 'int')\n g_id = data[3,:]\n g_ra = np.array(data[4,:], dtype = 'float')\n g_dec = np.array(data[5,:], dtype = 'float')\n g_z = np.array(data[6,:], dtype = 'float')\n return c_id, g_num, g_id, g_ra, g_dec, g_z", "def read_gbvi_parameters(filename):\n\n parameters = dict()\n \n infile = open(filename, 'r')\n for line in infile:\n # Strip trailing comments\n index = line.find('%')\n if index != -1:\n line = line[0:index] \n\n # Parse parameters\n elements = line.split()\n if len(elements) == 3:\n [atomtype, radius, gamma] = elements\n parameters['%s_%s' % (atomtype,'radius')] = float(radius)\n parameters['%s_%s' % (atomtype,'gamma')] = float(gamma)\n\n return parameters", "def read_parameters(self, entry=None):\n if entry:\n self.entry = entry\n with self.entry.nxfile:\n self.name = self.entry.nxroot.nxname + \"/\" + self.entry.nxname\n if 'unit_cell' in self.entry['sample']:\n lattice_parameters = self.read_parameter('sample/unit_cell')\n if lattice_parameters is not None:\n self.a, self.b, self.c = lattice_parameters[:3]\n self.alpha, self.beta, self.gamma = lattice_parameters[3:]\n elif 'unit_cell_abc' in self.entry['sample']:\n lattice_parameters = self.read_parameter(\n 'sample/unit_cell_abc')\n if lattice_parameters is not None:\n self.a, self.b, self.c = lattice_parameters\n lattice_parameters = self.read_parameter(\n 'sample/unit_cell_alphabetagamma')\n if lattice_parameters is not None:\n self.alpha, self.beta, self.gamma = lattice_parameters\n else:\n self.a = self.read_parameter('sample/unitcell_a', self.a)\n self.b = self.read_parameter('sample/unitcell_b', self.b)\n self.c = self.read_parameter('sample/unitcell_c', self.c)\n self.alpha = self.read_parameter(\n 'sample/unitcell_alpha', self.alpha)\n self.beta = self.read_parameter(\n 'sample/unitcell_beta', self.beta)\n self.gamma = self.read_parameter(\n 'sample/unitcell_gamma', self.gamma)\n self.formula = self.read_parameter('sample/chemical_formula',\n self.formula)\n self.space_group = self.read_parameter(\n 'sample/space_group', self.space_group)\n self.laue_group = self.read_parameter(\n 'sample/laue_group', self.laue_group)\n self.wavelength = self.read_parameter(\n 'instrument/monochromator/wavelength', self.wavelength)\n self.distance = self.read_parameter('instrument/detector/distance',\n self.distance)\n self.yaw = self.read_parameter('instrument/detector/yaw', self.yaw)\n self.pitch = self.read_parameter('instrument/detector/pitch',\n self.pitch)\n self.roll = self.read_parameter(\n 'instrument/detector/roll', self.roll)\n self.xc = self.read_parameter('instrument/detector/beam_center_x',\n self.xc)\n self.yc = self.read_parameter('instrument/detector/beam_center_y',\n self.yc)\n self.xd = self.read_parameter('instrument/detector/translation_x',\n self.xd)\n self.yd = self.read_parameter('instrument/detector/translation_y',\n self.yd)\n self.frame_time = self.read_parameter(\n 'instrument/detector/frame_time', self.frame_time)\n self.shape = self.read_parameter(\n 'instrument/detector/shape', self.shape)\n phi = self.read_parameter('instrument/goniometer/phi', self.phi)\n if isinstance(phi, np.ndarray) and len(phi) > 1:\n self.phi = phi[0]\n self.phi_step = phi[1] - phi[0]\n else:\n self.phi = phi\n try:\n self.phi_step = self.read_parameter(\n 'instrument/goniometer/phi', self.phi, attr='step')\n except Exception:\n pass\n self.chi = self.read_parameter(\n 'instrument/goniometer/chi', self.chi)\n self.omega = self.read_parameter('instrument/goniometer/omega',\n self.omega)\n if 'instrument/goniometer' in self.entry:\n if 'theta' in self.entry['instrument/goniometer']:\n self.theta = self.read_parameter(\n 'instrument/goniometer/theta', self.theta)\n elif 'goniometer_pitch' in self.entry['instrument/goniometer']:\n self.theta = self.read_parameter(\n 'instrument/goniometer/goniometer_pitch', self.theta)\n elif 'gonpitch' in self.entry['instrument/goniometer']:\n self.theta = self.read_parameter(\n 'instrument/goniometer/gonpitch', self.theta)\n self.symmetry = self.read_parameter('sample/unit_cell_group',\n self.symmetry)\n self.centring = self.read_parameter('sample/lattice_centring',\n self.centring)\n self.xp = self.read_parameter('peaks/x')\n self.yp = self.read_parameter('peaks/y')\n self.zp = self.read_parameter('peaks/z')\n self.polar_angle = self.read_parameter('peaks/polar_angle')\n self.azimuthal_angle = self.read_parameter('peaks/azimuthal_angle')\n self.intensity = self.read_parameter('peaks/intensity')\n self.pixel_size = self.read_parameter(\n 'instrument/detector/pixel_size', self.pixel_size)\n self.pixel_mask = self.read_parameter(\n 'instrument/detector/pixel_mask')\n self.pixel_mask_applied = self.read_parameter(\n 'instrument/detector/pixel_mask_applied')\n self.rotation_angle = self.read_parameter('peaks/rotation_angle')\n self.primary = self.read_parameter('peaks/primary_reflection')\n self.secondary = self.read_parameter('peaks/secondary_reflection')\n self.Umat = self.read_parameter(\n 'instrument/detector/orientation_matrix')\n if isinstance(self.polar_angle, np.ndarray):\n try:\n self.set_polar_max(np.sort(self.polar_angle)[200] + 0.1)\n except IndexError:\n self.set_polar_max(self.polar_angle.max())\n else:\n self.set_polar_max(10.0)\n self.Qh = self.read_parameter('transform/Qh')\n self.Qk = self.read_parameter('transform/Qk')\n self.Ql = self.read_parameter('transform/Ql')\n self.initialize_peaks()", "def get_materials_properties(dbpath): #<un-named>nook\n odb = openOdb(path=dbpath)\n data = []\n for _name,_mat in odb.materials.items():\n _elastic_mod = _mat.elastic.table[0][0]\n _poisson = _mat.elastic.table[0][1]\n if hasattr(_mat,\"plastic\"):\n _plastic = _mat.plastic.table\n else:\n _plastic = []\n data.append((_name,_elastic_mod,_poisson,_plastic))\n odb.close()\n return data", "def parse_parameters(filePath):\r\n numThreads, queue, affinity = 0,\"\",\"\"\r\n \r\n for line in open(filePath):\r\n if \"spec.omp2001.size:\" in line:\r\n if get_last_column_number(line)==\"test\":\r\n print(\"IS TEST SIZE!!1 : \" + filePath)\r\n \r\n if \"spec.omp2001.sw_threads:\" in line:\r\n numThreads = int(get_last_column_number(line))\r\n \r\n if \"spec.omp2001.mach:\" in line:\r\n machine = line.split(\" \")[-1]\r\n columns = machine.split(\".\")\r\n \r\n queue = columns[0]\r\n affinity = columns[1]\r\n \r\n return numThreads, queue, affinity", "def read_parse_raw_data(path):\n file_list = TopologyHelper.get_file_list(path)\n print(\"Reading \" + str(len(file_list)) + \" files from \" + path)\n topology_info = []\n file_name = []\n for file in file_list:\n try:\n r = TopologyHelper.parse_file(file)\n tmp = (r[0])['Topology']\n topology_info.append(tmp)\n t = r[1]\n file_name.append(t)\n except:\n continue\n print(\"Parsing completed\")\n return file_name, topology_info", "def read_file(file_name):\n fits_file = fits.open(file_name)\n\n header = fits_file[0].header\n image_data = fits_file[1].data\n\n segmentation_data = fits_file[2].data\n\n header_keywords = {'CRVAL3': 0, 'CRPIX3': 0, 'CD3_3': 0}\n # clause to differentiate between CDELT3 and CD3_3\n\n for hdr_key, hdr_value in header_keywords.items():\n # finding required header values\n hdr_value = header[hdr_key]\n header_keywords[hdr_key] = hdr_value\n\n return header_keywords, image_data, segmentation_data", "def cbf_file_to_basis_dict(path):\n import dxtbx.format.Registry\n reader = dxtbx.format.Registry.get_format_class_for_file(path)\n instance = reader(path)\n return map_detector_to_basis_dict(instance.get_detector())", "def parse_litho(path):\n contents = []\n with open(path, 'rt') as f:\n for line in f:\n contents.append(line.strip('\\r').strip('\\n').split(','))\n litho_names = np.array(contents)[1:][:, 0] # recast as np.ndarray for convenience\n litho_vals = np.array(contents)[1:].T[1:].T.astype(float)\n thickness = np.array(\n [litho_vals[:, 0][0]] + [litho_vals[:, 0][i] - litho_vals[:, 0][i - 1] for i in range(1, len(litho_vals))])\n litho_vals = np.concatenate((thickness.reshape(thickness.size, 1), litho_vals), axis=1)\n\n return litho_names, litho_vals", "def parse_1d_scan_coords(path: str) -> List[Dict[str, tuple]]:\n if not os.path.isfile(path):\n raise InputError(f'Could not find file {path}')\n software = identify_ess(path)\n traj = list()\n\n if software == 'xtb':\n scan_path = os.path.join(os.path.dirname(path), 'xtbscan.log')\n if os.path.isfile(scan_path):\n lines = _get_lines_from_file(scan_path)\n xyz_str = ''\n for line in lines:\n splits = line.split()\n if len(splits) == 1:\n if xyz_str:\n traj.append(str_to_xyz(xyz_str))\n xyz_str = ''\n continue\n if 'energy:' in line:\n continue\n xyz_str += f'{qcel.periodictable.to_E(splits[0])} {splits[1]} {splits[2]} {splits[3]}\\n'\n traj.append(str_to_xyz(xyz_str))\n return traj\n\n lines = _get_lines_from_file(path)\n log = ess_factory(fullpath=path, check_for_errors=False)\n if not isinstance(log, GaussianLog):\n raise NotImplementedError(f'Currently parse_1d_scan_coords only supports Gaussian files, got {type(log)}')\n done = False\n i = 0\n while not done:\n if i >= len(lines) or 'Normal termination of Gaussian' in lines[i] or 'Error termination via' in lines[i]:\n done = True\n elif 'Optimization completed' in lines[i]:\n while i < len(lines) + 10 and 'Input orientation:' not in lines[i] or 'Forces (Hartrees/Bohr)' in lines [i + 7]:\n i += 1\n if 'Error termination via' in lines[i]:\n return traj\n i += 5\n xyz_str, skip_traj = '', False\n while len(lines) and '--------------------------------------------' not in lines[i]:\n if 'DIIS: error' in lines[i]:\n skip_traj = True\n break\n splits = lines[i].split()\n xyz_str += f'{qcel.periodictable.to_E(int(splits[1]))} {splits[3]} {splits[4]} {splits[5]}\\n'\n i += 1\n if not skip_traj:\n traj.append(str_to_xyz(xyz_str))\n i += 1\n return traj", "def parse_hdob_file(path):\n col_names = [\"obs_time\", \"lat\", \"lon\", \"static_air_press\", \"geo_pot_height\",\n \"sfc_press_dval\", \"t_air\", \"t_dew\", \"wind_dir_spd\", \"wind_peak\",\n \"sfc_wind_peak\", \"rain_rate\", \"qc_flags\"]\n file_header = ''\n obs_data = []\n\n # Determine if 'path' is a path or url\n if isfile(path):\n # open & read local file\n with open(path, 'r') as fh:\n for idx, line in enumerate(fh):\n line = line.rstrip('\\n')\n\n if (idx == 3):\n file_header = line\n elif ((idx > 3) and (idx < 24)):\n curr_line = line.split(' ')\n curr_line = [x for x in curr_line if x != ' ']\n obs_data.append(curr_line)\n hdob_df = pd.DataFrame(data=obs_data, index=range(0, len(obs_data)), columns=col_names)\n hdob_obj = HDOBFile(file_header, hdob_df)\n print(hdob_obj)\n # elif (isURL):", "def read_file(self, path, is_6dcmd = True):\n if not os.path.isfile(path):\n raise ValueError(f'Error: File not exist! {path}')\n\n data = list()\n txtFile = open(path)\n\n if is_6dcmd:\n cmd = list()\n for row in txtFile:\n row = row.split()\n data.append([float(row[2]), float(row[3]), float(row[4]), \n float(row[5]), float(row[6]), float(row[7])])\n cmd.append([row[0], row[1], row[8], row[9]])\n \n return np.array(data), np.array(cmd)\n \n else:\n for row in txtFile:\n data.append([float(i) for i in row.split()])\n return np.array(data)", "def get_from_table(self, path, name):\n df_table = self.get(path)\n keys = df_table[\"Parameter\"]\n if name in keys:\n job_id = keys.index(name)\n return df_table[\"Value\"][job_id]\n raise ValueError(\"Unknown name: {0}\".format(name))", "def read_fx_data_from_file(self, fileName, formatSpec):\n dataR = pd.read_csv(fileName, index_col=1)\n dataR.index = pd.to_datetime(dataR.index, format=formatSpec)\n dataR.sort_index(inplace=True)\n label = dataR['Name'][0]\n dataR.drop('Name', axis=1, inplace=True)\n return dataR, label", "def parse_managed_path(path):\n fields = path.split(':', 1)\n return fields[0], fields[1]", "def get_metadata(path,smi):\n import os, json\n\n smidict = json.load(open('smi_file.json'))\n currentjson = json.load(open(path+smidict[smi]))\n etot = currentjson['gaussian']['properties']['total_energy'] \n dipole = currentjson['gaussian']['properties']['electric_dipole_moment_norm'] \n quadrapole = currentjson['gaussian']['properties']['electric_quadrupole_moment_norm']\n solv = currentjson['gaussian']['properties']['SMD_solvation_energy']\n mp = currentjson['Tm']\n \n return etot, dipole, quadrapole, solv, mp", "def read_in(path, projnum):\n\n is_xlsx = path.endswith('xlsx')\n if is_xlsx:\n data = pd.read_excel(path, header = 1)\n for i,r in data.iterrows():\n if r.isnull().all():\n rmv = i - 1\n break\n data = data.loc[:rmv]\n proj_name = pd.read_excel(path, skiprows = range(1, 10000)).columns[0]\n\n if not (proj_name and proj_name != 'Phase'):\n proj_name = ' '.join(path.split('/')[-1].split(' --- ')[0].split()[:-2])\n\n return data, proj_name\n else:\n data = pd.read_csv(path)\n if data.shape[1] == 1:\n data = pd.read_csv(path, sep = '\\t')\n\n # DEFAULT VALUES IF NECESSARY\n if 'Phase' not in data.columns:\n data['Phase'] = 'Base Bid'\n\n if 'Phase #' not in data.columns:\n data[\"Phase #\"] = 1\n\n if 'Form Method' not in data.columns:\n data[\"Form Method\"] = \"HP\"\n\n if 'Drafting ID' in data.columns:\n data.rename(columns = {'Drafting ID': 'Block ID'}, inplace = True)\n if 'Deleted' not in data.columns:\n data['Deleted'] = ''\n proj = [c for c in data.columns if 'Project #' in c]\n if proj:\n data.rename(columns = {proj[0]: 'Project #'}, inplace = True)\n if (data['Project #'] == '').all():\n data['Project #'] = 'TypeProjectNumberHere' if projnum == '-' else projnum\n\n else:\n data['Project #'] = 'TypeProjectNumberHere' if projnum == '-' else projnum\n \n\n return data", "def importBackground(bkgdfilename):\n # 1. Import\n ifile = open(bkgdfilename, \"r\")\n lines = ifile.readlines()\n ifile.close()\n\n # 2. Parse to dictionary\n pardict = {}\n for line in lines:\n line = line.strip()\n if len(line) == 0:\n \t# Empty Line\n \t continue\n\n terms = line.split(\",\")\n if len(terms) != 2:\n \t print \"Warning! Line [%s] is not expected.\" % (line)\n \t continue\n\n parname = terms[0].strip()\n parvaluestr = terms[1].strip()\n\n pardict[parname] = parvaluestr\n\n # ENDFOR\n\n # 3. Set up\n bkgdtype = pardict.pop(\"Type\")\n startx = float(pardict.pop(\"StartX\"))\n endx = float(pardict.pop(\"EndX\"))\n\n # 4. Create table workspace\n tablews = CreateEmptyTableWorkspace(OutputWorkspace=\"LoadedBackgroundParameters\")\n\n tablews.addColumn(\"str\", \"Name\")\n tablews.addColumn(\"double\", \"Value\")\n\n for parname in sorted(pardict.keys()):\n parvalue = float(pardict[parname])\n tablews.addRow([parname, parvalue])\n\n return (tablews, bkgdtype, startx, endx)", "def create_dicts(self, path):\n line_d = {}\n rel_d = {}\n\n with open(path) as f:\n for line in islice(f, 0, None, 4):\n lister = line.split('\"')\n line_number = int(lister[0].split('\\t')[0])\n line_d[line_number] = ''.join(str(s) for s in lister[1:])\n \n with open(path) as f:\n for i, line in enumerate(islice(f, 1, None, 4)):\n rel_d[i] = line.split('\\n')[0]\n \n return (line_d, rel_d)", "def moments(path):\n\n g = from_file(path)\n\n h = 1.0 - g\n\n m1 = bgy3d.moments1(h)\n\n # Get the center of distribution\n center = m1[1:4] / m1[0]\n\n # Use center to compute 2nd momenta\n m2 = bgy3d.moments2nd(h, center)\n\n print \"Moments from\", path\n print \"<1> = \", m1[0]\n print \"<x> = \", m1[1] / m1[0]\n print \"<y> = \", m1[2] / m1[0]\n print \"<z> = \", m1[3] / m1[0]\n print \"<xy> = \", m2[0] / m1[0]\n print \"<yz> = \", m2[1] / m1[0]\n print \"<zx> = \", m2[2] / m1[0]\n print \"<z^2 - 1/3 * r^2> = \", m2[3] / m1[0]\n print \"<x^2 - y^2> = \", m2[4] / m1[0]\n print \"<r^2> = \", m2[5] / m1[0]", "def Read_Rcwa_Matlab(Path) : \n x,y=[],[]\n fs = open(Path, 'r') \n while 1: \n txt = fs.readline()\n if txt =='': \n break\n x.append(float(txt[0:25]))\n y.append(float(txt[29:-2])) \n fs.close()\n return x,y", "def import_data_file(path):\n if not os.path.isfile(path):\n raise ValueError(\"No file '{}'\".format(path))\n # load data\n data = np.loadtxt(path, dtype=float, comments='#')\n # load headings\n f = open(path, 'r')\n msg = \"\"\n while True:\n line = f.readline()\n if len(line) != 0:\n if line[0] == '#':\n msg += line\n else:\n break\n f.close()\n # return\n return msg, data", "def read_mic_report(path):\n workbook = xlrd.open_workbook(path, encoding_override='latin-1')\n sheet = workbook.sheet_by_index(0)\n data = {}\n errors = []\n for row, col in product(range(sheet.nrows), range(sheet.ncols)):\n cell_value = str(sheet.cell(row, col).value).lower()\n try:\n field = next(v for k, v in _FIELDS.items() if\n any([cell_value.startswith(n) for\n n in v.get('text')]))\n except StopIteration:\n continue\n if field['type'] == 'number':\n val = sheet.cell(row + field['row'], col + field['column']).value\n data[field['name']] = _handle_numbers(field, val)\n elif field['type'] == 'string':\n val = sheet.cell(row + field['row'], col + field['column']).value\n data[field['name']] = _handle_string(val)\n elif field['type'] == 'isotherm report':\n data['pressure'] = {}\n for i, item in enumerate(_get_data_labels(sheet, row, col)):\n points = _get_datapoints(sheet, row, col + i)\n _assign_data(item, field, data, points)\n elif field['type'] == 'error':\n errors += _get_errors(sheet, row, col)\n if errors:\n data['errors'] = errors\n _check(data, path)\n return data", "def get_data(path):\n df = pd.read_csv(path)\n\n return df", "def readParams(file_name):\n try:\n info = np.load(file_name,allow_pickle=True)[()]\n except FileNotFoundError:\n if file_name.split('/')[-2] == 'checkpoint':\n lfc_id_dir = '/expres/extracted/lfc_cal/lfc_id/'\n file_name = lfc_id_dir + os.path.basename(file_name)\n info = np.load(file_name,allow_pickle=True)[()]\n else:\n raise FileNotFoundError\n # Assemble information into \"fit-able\" form\n num_orders = len(info['params'])\n lines = [p[:,1] for p in info['params'] if p is not None]\n errs = [np.sqrt(cov[:,1,1]) for cov in info['cov'] if cov is not None]\n ordrs = [o for o in np.arange(86) if info['params'][o] is not None]\n waves = [w for w in info['wvln'] if w is not None]\n # I believe, but am not sure, that the wavelengths are multiplied by order\n # to separate them from when orders overlap at the edges\n waves = [wvln for order, wvln in zip(ordrs,waves)]\n ordrs = [np.ones_like(x) * m for m,x in zip(ordrs, lines)]\n\n x = np.concatenate(lines)\n y = np.concatenate(ordrs)\n e = np.concatenate(errs)\n w = np.concatenate(waves)\n # Note: default of pipeline includes ThAr lines, which we're not including here\n \n return (x,y,w,e)", "def load_names(path):\n global taxid_names, scientific_names, synonyms, lowercase_names\n with open(path, 'r') as r:\n for line in r:\n (taxid, name, unique, kind) = re.split(r'\\s*\\|\\s*', line.strip('|\\n\\t '), 3)\n if kind == 'scientific name':\n taxid_names[taxid] = name\n scientific_names[name] = taxid\n else:\n synonyms[name] = taxid\n lowercase_names[name.lower()] = taxid", "def parse_pbmc_dataset(filepath):\n\n data = sc.read_h5ad(filepath)\n labels_1 = data.obs[\"celltype.l1\"]\n labels_2 = data.obs[\"celltype.l2\"]\n labels_3 = data.obs[\"celltype.l3\"]\n dataframe = sc.get.var_df(data)\n genes = dataframe.index.values\n\n return data.X, (labels_1, labels_2, labels_3), genes", "def _readin_Q3D_matrix(path):\n\n text = Path(path).read_text()\n\n s1 = text.split('Capacitance Matrix')\n assert len(s1) == 2, \"Copuld not split text to `Capacitance Matrix`\"\n\n s2 = s1[1].split('Conductance Matrix')\n\n df_cmat = pd.read_csv(pd.compat.StringIO(\n s2[0].strip()), delim_whitespace=True, skipinitialspace=True, index_col=0)\n units = re.findall(r'C Units:(.*?),', text)[0]\n\n if len(s2) > 1:\n df_cond = pd.read_csv(pd.compat.StringIO(\n s2[1].strip()), delim_whitespace=True, skipinitialspace=True, index_col=0)\n units_cond = re.findall(r'G Units:(.*?)\\n', text)[0]\n else:\n df_cond = None\n\n design_variation = re.findall(r'DesignVariation:(.*?)\\n', text)[0]\n\n return df_cmat, units, design_variation, df_cond, units_cond", "def _read_smat(filename):\n return _read_hcore(filename)", "def read(path):\n \n file = open(path, 'r', encoding = 'utf-8')\n reader = csv.reader(file, delimiter = '\\t', quotechar = '', quoting = csv.QUOTE_NONE)\n result = []\n header = reader.__next__()\n for values in reader:\n entry = {}\n for i in range(len(header)):\n entry[header[i]] = values[i]\n result.append(entry)\n file.close()\n return result", "def read_data(path):\n data = pd.read_csv(path)\n return data", "def read_data(path):\n data = pd.read_csv(path)\n return data", "def loadFullMMF(self, path):\n \n logger.info('Loading macro file %s', path)\n \n mmfFile = open(path)\n \n currentLabel = None\n hRegExp = re.compile(\"~h \\\"(.+)\\\"\")\n pRegExp = re.compile(\"~p \\\"(.+)\\\"\")\n sRegExp = re.compile(\"~s \\\"(.+)\\\"\") # ~s \"dur_s2_102\"\n \n # read file line by line\n line = mmfFile.readline() \n while line:\n #print line\n \n # no ~h section till now, macro definitions?\n if currentLabel is None:\n \n # ~p macro? -> shared stream\n ret = pRegExp.match(line)\n if ret:\n l = self.readPMacro(ret.group(1), mmfFile)\n if l: \n line = l\n continue\n \n # ~s macro? -> shared state distribution\n ret = sRegExp.match(line)\n if ret:\n l = self.readSMacro(ret.group(1), mmfFile)\n if l: \n line = l\n continue\n \n \n # ~h section has begun? from now on only ~h definitions \n # ~h \"v^schwa-h+schwa=m@1_2/A:0_0_4/B:0-0-2@1-2&2-7#1-1$1-1!0-0;0-0|0/C:0+0+3/D:det_1/E:content+2@2+3&1+2#0+1/F:content_4/G:0_0/H:7=4@1=1|0/I:0=0/J:7+4-1\" \n ret = hRegExp.match(line)\n if ret:\n currentLabel = LabelEntry(ret.group(1)) \n self.labelList.append(currentLabel)\n self.labelDict[ ret.group(1) ] = currentLabel\n \n \n # we are already in the ~h section\n elif currentLabel:\n \n # link to a ~p macro which is then associated with the ~h macro \n # <STATE> 2\n # ~w \"SWeightall\"\n # <STREAM> 1\n # ~p \"mcep_s2_72\" \n ret = pRegExp.match(line)\n # or link to a ~s macro \n if not ret:\n ret = sRegExp.match(line)\n if ret:\n currentLabel.associatedMacroNames.append(ret.group(1))\n \n # fill macroToLabels\n if ret.group(1) in self.macroToLabels:\n self.macroToLabels[ ret.group(1) ].append(currentLabel)\n else:\n self.macroToLabels[ ret.group(1) ] = [ currentLabel ]\n \n \n line = mmfFile.readline()\n mmfFile.close()\n #print 'done: ' + str(len( self.macroToLabels ))\n #print self.macroToLabels", "def extract_data_from_all_xdm_schema(path: Path) -> Tuple[dict, dict]:\n with open(path, newline=\"\") as csvfile:\n reader = csv.DictReader(csvfile)\n\n columns_to_keep = [\"name\", \"datatype\", \"dataclass\"]\n data = {\n row[\"name\"]: {col: row[col] for col in columns_to_keep if col in row}\n for row in reader\n }\n xdm_rule_to_dtype = {\n k: v[\"datatype\"] for k, v in data.items() if \"datatype\" in v\n }\n xdm_rule_to_dclass = {\n k: v[\"dataclass\"] for k, v in data.items() if \"dataclass\" in v\n }\n\n return xdm_rule_to_dtype, xdm_rule_to_dclass", "def read_csv_hash(path):\n dic = get_dtype(path,1000)\n col_names = [i for i in dic]\n dtypes = [dic[i] for i in col_names]\n str_cols = [i for i in col_names if dic[i]=='str'] \n dtypes = ['int32' if i=='str' else i for i in dtypes]\n\n gdf = gd.read_csv(path,names=col_names,dtype=dtypes,skiprows=1)\n return gdf,str_cols", "def parse_data_from_file(path):\n print(path.stem)\n \n raw = path.stem.split('-')\n\n rawdate = raw[0][2:]\n print(rawdate)\n date = rawdate[6:] + \"/\" + rawdate[4:6] + '/' + rawdate[0:4]\n rawtime = raw[1]\n time = rawtime[0:2] + \"h\" + rawtime[2:4] + \"m\" + rawtime[4:6] + \"s\"\n dt = datetime.strptime(rawdate+rawtime, '%Y%m%d%H%M%S')\n print(dt)\n return dt", "def read_params(fname):\n f = open(fname, 'r')\n par = {} #output\n for i in range(10): # esta dentro de las primeras 10 lineas\n l = f.readline().split()\n #print \" ---> \", l\n number = u'%s' % l[-1] # presumably a number\n if not number.replace('.','').replace('-','').isnumeric():\n if l[0]=='#####':\n break\n else:\n continue # we proceed ONLY IF this is numeric string\n #print ' FIRST: ', l[0]\n if l[0]=='#####':\n #print \"IM I HERE????\"\n break # end of header\n\n name = l[1][:-1] # l[0] es '#', y -1 para comernos el \":\"\n value = np.float(l[2]) # l[2] es el valor\n par[name] = value\n\n return par", "def readData(path): \n try:\n open(path)\n dataset = np.loadtxt(path)\n # arms played by uniformly-random policy as recorded in dataset\n arms = dataset[:, 0].astype(int) \n # rewards received by playing arms using a uniformly-random policy as \n # recorded in dataset \n rewards = dataset[:, 1] \n # context vector \n contexts = dataset[:, 2:] \n except FileNotFoundError: \n raise \n return(arms, rewards, contexts)", "def read_file(path):\n # copied from straxen.common.open_resource\n # https://github.com/XENONnT/straxen/blob/a2e0e3abdbf278000cda70f7662a7d841c7223ef/straxen/common.py#L85\n name, fmt = os.path.splitext(path)\n\n if fmt in ['.npy', '.npy_pickle', '.npz']:\n result = np.load(path, allow_pickle=fmt == 'npy_pickle')\n if isinstance(result, np.lib.npyio.NpzFile):\n # Slurp the arrays in the file, so the result can be copied,\n # then close the file so its descriptors does not leak.\n result_slurped = {k: v[:] for k, v in result.items()}\n result.close()\n result = result_slurped\n elif fmt == '.pkl':\n with open(path, 'rb') as f:\n result = pickle.load(f)\n elif fmt == '.gz':\n subname, subfmt = os.path.splitext(name)\n if subfmt == '.pkl':\n with gzip.open(path, 'rb') as f:\n result = pickle.load(f)\n elif subfmt == '.json':\n with gzip.open(path, 'rb') as f:\n result = json.load(f)\n elif fmt == '.json':\n with open(path, mode='r') as f:\n result = commentjson.load(f)\n elif fmt == '.binary':\n with open(path, mode='rb') as f:\n result = f.read()\n elif fmt in ['.text', '.txt']:\n with open(path, mode='r') as f:\n result = f.read()\n elif fmt == '.csv':\n result = pd.read_csv(path)\n else:\n raise ValueError(f\"Unsupported format {fmt}!\")\n\n return result", "def parse_scan_args(file_path: str) -> dict:\n log = ess_factory(fullpath=file_path, check_for_errors=False)\n scan_args = {'scan': None, 'freeze': [],\n 'step': 0, 'step_size': 0, 'n_atom': 0}\n if isinstance(log, GaussianLog):\n try:\n # g09, g16\n scan_blk = parse_str_blocks(file_path, 'The following ModRedundant input section has been read:',\n 'Isotopes and Nuclear Properties', regex=False)[0][1:-1]\n except IndexError: # Cannot find any block\n # g03\n scan_blk_1 = parse_str_blocks(file_path, 'The following ModRedundant input section has been read:',\n 'GradGradGradGrad', regex=False)[0][1:-2]\n scan_blk_2 = parse_str_blocks(file_path, 'NAtoms=',\n 'One-electron integrals computed', regex=False)[0][:1]\n scan_blk = scan_blk_1 + scan_blk_2\n scan_pat = r'[DBA]?(\\s+\\d+){2,4}\\s+S\\s+\\d+[\\s\\d.]+'\n frz_pat = r'[DBA]?(\\s+\\d+){2,4}\\s+F'\n value_pat = r'[\\d.]+'\n for line in scan_blk:\n if re.search(scan_pat, line.strip()):\n values = re.findall(value_pat, line)\n scan_len = len(values) - 2 # atom indexes + step + stepsize\n scan_args['scan'] = [int(values[i]) for i in range(scan_len)]\n scan_args['step'] = int(values[-2])\n scan_args['step_size'] = float(values[-1])\n if re.search(frz_pat, line.strip()):\n values = re.findall(value_pat, line)\n scan_args['freeze'].append([int(values[i]) for i in range(len(values))])\n if 'NAtoms' in line:\n scan_args['n_atom'] = int(line.split()[1])\n else:\n raise NotImplementedError(f'parse_scan_args() can currently only parse Gaussian output '\n f'files, got {log}')\n return scan_args", "def readcif(filename, **kwds):\n \n # Read the unit cell parameters\n a, b, c, alf, bet, gam = [[]]*6\n with open(filename, 'r') as f:\n \n for line in f:\n if \"length_a\" in line:\n a = numgrab(line)\n elif \"length_b\" in line:\n b = numgrab(line)\n elif \"length_c\" in line:\n c = numgrab(line)\n elif \"angle_alpha\" in line:\n alf = numgrab(line)\n elif \"angle_beta\" in line:\n bet = numgrab(line)\n elif \"angle_gamma\" in line:\n gam = numgrab(line)\n \n crystVec = a + b + c + alf + bet + gam\n \n # Read atomic coordinates\n cifdata = pd.read_csv(filename, delim_whitespace=True, header=None, **kwds)\n atomLabels = np.array(cifdata.values[:,0], dtype='str')\n coords = np.array(cifdata.values[:,1:4]).astype('float64')\n\n return atomLabels, coords, crystVec", "def read_behaviors(path, cols=None):\n dtype = {COMP_MID: str, PROD_NO: int, TIMESTAMP: int, PROD_NM: str}\n return pd.read_csv(path, delimiter=',', dtype=dtype, usecols=cols)", "def read_grid(filename_grid, dim=2, slc=None):\n ## get shape and slice\n fid = h5py.File(filename_grid, 'r')\n if dim==2:\n varnames = ['x', 'y', 'ep']\n if slc is None: slc = np.s_[0,:,:]\n if dim==3:\n varnames = ['x', 'y', 'z', 'ep']\n if slc is None: slc = np.s_[:,:,:]\n\n dset = fid.get(varnames[0])\n shape = dset[slc].shape\n (nx,ny,nz) = dset.shape\n ## read variables\n grid = {}\n for varname in varnames:\n try:\n dset = fid.get(varname)\n grid[varname] = np.zeros(shape)\n dset.read_direct(grid[varname], source_sel=slc)\n grid[varname] = grid[varname].transpose()\n except:\n pass\n fid.close()\n return grid, nx, ny, nz", "def load_building_blocks(path):\t\t\n\t#TODO : automatization\n\tbenzene = Building_Block(abbrev=\"B\", num_atoms=6,origin=0, para_pos=3, para_angle=0, meta_pos=4 , meta_angle = -np.pi/3., ortho_pos=5, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/benzene.xyz\")\n\tnapthtalene = Building_Block(abbrev=\"N\", num_atoms=18,origin=0, para_pos=12, para_angle=0., meta_pos=11 , meta_angle = -np.pi/3., ortho_pos=10, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/naphtalene.xyz\")\n\tdbPc1 = Building_Block(abbrev=\"dbPc1\", num_atoms=32,origin=13, para_pos=1, para_angle=0, meta_pos=0 , meta_angle = +np.pi/3., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/dbPc1_block.xyz\")\n\tdbPc4 = Building_Block(abbrev=\"dbPc4\", num_atoms=55,origin=22, para_pos=1, para_angle=0, meta_pos=0 , meta_angle = -np.pi/3., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/dbPc4.xyz\")\n\tdbPc6 = Building_Block(abbrev=\"dbPc6\", num_atoms=52,origin=17, para_pos=0, para_angle=0, meta_pos=1 , meta_angle = -np.pi/3., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/dbPc6.xyz\")\n\tdbPc5 = Building_Block(abbrev=\"dbPc5\", num_atoms=58,origin=12, para_pos=26, para_angle=0, meta_pos=20 , meta_angle = -np.pi/3., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/dbPc5.xyz\")\n\tpseudo_para_naph_PCP = Building_Block(abbrev=\"pseudo-para_naph_PCP\", num_atoms=44,origin=0, para_pos=18, para_angle=0, meta_pos=16 , meta_angle = -np.pi/3, ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/pseudo-para_naph_PCP.xyz\")\n\tline =Building_Block(abbrev=\"line\", num_atoms=4,origin=0, para_pos=1, para_angle=0, meta_pos=1 , meta_angle = 0., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/line.xyz\")\n\t#rot=Building_Block(abbrev=\"line\", num_atoms=47,origin=6, para_pos=16, para_angle=0, meta_pos=20 , meta_angle = 0., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=2, path=path+\"/rot.xyz\")\n\t#stacked_anth=Building_Block(abbrev=\"stacked_anth\", num_atoms=62,origin=3, para_pos=22, para_angle=0, meta_pos=30 , meta_angle = 0., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=2, path=path+\"/stacked_anth.xyz\")\n\t\n\tbuilding_blocks = [benzene,napthtalene,dbPc1,dbPc4,dbPc6, dbPc5,pseudo_para_naph_PCP, line]\n\n\treturn building_blocks", "def read_csv(path):\n column_headers = ['Country', 'Region', 'Pop. Density (per sq. mi.)', 'Infant mortality (per 1000 births)',\n 'GDP ($ per capita) dollars']\n df = pd.read_csv(path)\n return df[column_headers]", "def get_data(path,name):\r\n tmp = []\r\n pn = path + \"\\\\\" + name\r\n file = open(pn, \"r\")\r\n for d in file:\r\n x,y = d.split(\",\")\r\n tmp += [[int(x), int(y)]]\r\n return tmp", "def read_data(data_path):\n data_set = pd.read_excel(data_path)\n qids_raw = data_set[\"QID\"].values\n conditions_raw = data_set[\"CONDITION\"].values\n outputs_raw = data_set[\"OUTPUT\"].values\n return qids_raw, conditions_raw, outputs_raw", "def create_dicts(path):\n line_d = {}\n rel_d = {}\n\n with open(path) as f:\n for line in islice(f, 0, None, 4):\n lister = line.split('\"')\n line_number = int(lister[0].split('\\t')[0])\n line_d[line_number] = ''.join(str(s) for s in lister[1:])\n \n with open(path) as f:\n for i, line in enumerate(islice(f, 1, None, 4)):\n rel_d[i] = line.split('\\n')[0]\n \n return (line_d, rel_d)", "def get_paramfile(path, cases):\n data = None\n if isinstance(path, six.string_types):\n for prefix, function_spec in cases.items():\n if path.startswith(prefix):\n function, kwargs = function_spec\n data = function(prefix, path, **kwargs)\n return data", "def _getPOSCAR():\r\n file = open(\"POSCAR\", \"r\")\r\n lines = file.readlines()\r\n line5 = str(lines[5]).strip().split()\r\n line6 = str(lines[6]).strip().split()\r\n cellname = ''\r\n for i in range(len(line5)):\r\n cellname += (line5[i] + line6[i])\r\n return cellname", "def get_metadata_from_path(path):\n try:\n import yaml\n # assumes index card is in the top-level of path\n index_card = os.path.join(path, \"M_index.yaml\")\n with open(index_card, \"r\") as stream:\n file_info = yaml.safe_load(stream)\n\n metadata_dict = {}\n metadata_dict[\"book_id\"] = file_info[\"book_id\"]\n metadata_dict[\"timestamp_start\"] = file_info[\"start_time\"]\n metadata_dict[\"type\"] = file_info[\"type\"]\n metadata_dict[\"obsid\"] = _convert_book_id_to_obsid(file_info[\"book_id\"])\n # get optional bits\n if \"stop_time\" in file_info:\n metadata_dict[\"timestamp_end\"] = file_info[\"stop_time\"]\n if \"observatory\" in file_info:\n metadata_dict[\"observatory\"] = file_info[\"observatory\"]\n if \"telescope\" in file_info:\n metadata_dict[\"telescope\"] = file_info[\"telescope\"]\n if \"stream_ids\" in file_info:\n metadata_dict[\"stream_ids\"] = file_info[\"stream_ids\"]\n if \"subtype\" in file_info:\n metadata_dict[\"subtype\"] = file_info[\"subtype\"]\n if \"tags\" in file_info:\n metadata_dict[\"tags\"] = file_info[\"tags\"]\n if \"scanification\" in file_info:\n metadata_dict[\"scanification\"] = file_info[\"scanification\"]\n if \"hwp_rate_hz\" in file_info:\n metadata_dict[\"hwp_rate_hz\"] = file_info[\"hwp_rate_hz\"]\n if \"sequencer_ref\" in file_info:\n metadata_dict[\"sequencer_ref\"] = file_info[\"sequencer_ref\"]\n return metadata_dict\n except (ImportError, FileNotFoundError, KeyError):\n pass\n\n return None", "def parse_from_file (path):\n with open(path) as f:\n return NFFG.parse(f.read())", "def read_metadata_txt(path):\n df = pd.read_csv(path,\n sep='\\s+', # Fields are separated by one or more spaces\n usecols=[0, 1, 2, 3, 4], # Grab only the first 4 columns\n # Missing elevation is noted as -999.9\n na_values=[-999.9],\n header=None,\n names=['station_id', 'latitude', 'longitude', 'elevation', 'state'])\n return df", "def read_dataset_info(path=None, paths=None, index_col=None, filter_by_min_spacing=False, verbose=False):\n if (path is None and paths is None) or (path is not None and paths is not None):\n raise ValueError(\"Only one of 'path' or 'paths' arguments must be provided\")\n\n dataset_info = get_dicom_info(glob.glob(path) if path is not None else paths, verbose=verbose)\n if filter_by_min_spacing:\n output_indices = (\n dataset_info\n .groupby('AccessionNumber')\n .agg({'SpacingZ': 'idxmin'})\n )\n index_df = dataset_info.loc[output_indices.loc[:, 'SpacingZ'], :]\n else:\n index_df = dataset_info\n return index_df if index_col is None else index_df.set_index(index_col)", "def read_map_file(path):\r\n with open(path) as f:\r\n dir_name = os.path.dirname(path)\r\n img = cv2.imread(dir_name + '/' + f.readline().strip())\r\n assert img.shape[0] > 0 and img.shape[1] > 0, 'Can not open image file'\r\n meter_per_pixel = float(f.readline().strip())\r\n ori_str = f.readline().strip().split()\r\n origin = np.array([int(ori_str[0]), int(ori_str[1])])\r\n init_heading = float(ori_str[2])\r\n return img, meter_per_pixel, origin, init_heading" ]
[ "0.6235567", "0.61867106", "0.5885496", "0.5595712", "0.5569049", "0.5382577", "0.5362518", "0.53287804", "0.5324073", "0.53078204", "0.52826846", "0.52695364", "0.52454114", "0.51977813", "0.5176313", "0.5159904", "0.51186925", "0.50939804", "0.5091242", "0.5027156", "0.49919435", "0.4978368", "0.4972508", "0.4964574", "0.4943375", "0.49425912", "0.49364504", "0.49329692", "0.49271902", "0.49262348", "0.49243966", "0.48984858", "0.48984858", "0.4897129", "0.4896239", "0.48909286", "0.4888352", "0.48873767", "0.48790792", "0.4864858", "0.48632297", "0.4857276", "0.4855678", "0.48549885", "0.48363364", "0.48279217", "0.48164418", "0.48160145", "0.48153794", "0.47846475", "0.47830722", "0.4776601", "0.4773016", "0.4768884", "0.4767583", "0.47646627", "0.47622707", "0.4760576", "0.47570458", "0.47520882", "0.47503012", "0.47452813", "0.47425163", "0.47388706", "0.47353506", "0.4714037", "0.47115403", "0.4707898", "0.47052097", "0.46988142", "0.46960673", "0.4690264", "0.4689395", "0.46884823", "0.4684401", "0.4682563", "0.4682563", "0.46802947", "0.46764356", "0.4676375", "0.46762335", "0.46738628", "0.46670872", "0.46665084", "0.46623257", "0.4662024", "0.46554053", "0.46540272", "0.465261", "0.46520972", "0.46504322", "0.4648786", "0.46478105", "0.4645201", "0.46409127", "0.46387342", "0.46363693", "0.46331245", "0.46299484", "0.4626754" ]
0.65298444
0
Reads the cell parameters from a 'xd.mas' file and the atomic positions from a 'xd.res' file. The function returns a list with the cell parameters and an dictionary which keys the atom name to its fractional coordinates.
def read_coordinates(path='', sort=True): maspointer = open(path + 'xd.mas', 'r') respointer = open(path + 'xd.res', 'r') positions = {} keylist = [] #Needed to keep the atomlist order. This is important for the frequency read function. for line in maspointer.readlines(): if 'CELL ' in line: cell = [float(i) for i in line.split(" ") if '.' in i] break for line in respointer.readlines(): if '(' in line and not '!' in line: coords = [float(i) for i in line.split(" ") if '.' in i] coords = coords[:-1] key = line.split(" ")[0] keylist.append(key) positions[key] = coords if sort: sortkeylist = [] for i in xrange(len(keylist)): j = i + 1 for key in keylist: number = get_number(key) if j == int(number): sortkeylist.append(key) else: sortkeylist = keylist return cell, positions, sortkeylist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_xyz(filename):\n\n config = {}\n\n with open(filename, 'r') as f:\n # number of atoms (spins)\n config['nat'] = int(re.findall('\\S+', f.readline())[0])\n\n # box parameters (type, dimension, shape, periodicity)\n sarr = re.findall('\\S+', f.readline())\n config['latt_type'] = sarr[0]\n dims = list(map(int, sarr[1:4]))\n config['latt_box'] = np.array(dims)\n config['box'] = np.diag(dims)\n config['pbc'] = list(map(int, sarr[4:7]))\n if len(sarr) > 7:\n dim_intra = len(sarr) - 7\n\n atom_types = []\n xyz = []\n config['latt_i'] = np.zeros(dims, dtype=int)\n config['latt_atoms'] = np.zeros(dims, dtype=int)\n config['latt_intra'] = np.zeros(tuple(dims) + (dim_intra,), dtype='float64')\n for i in range(config['nat']):\n sarr = re.findall('\\S+', f.readline())\n t = int(sarr[0])\n r = tuple(map(int, sarr[1:4]))\n\n atom_types.append(t)\n xyz.append(r)\n\n config['latt_i'][r] = i\n config['latt_atoms'][r] = t\n\n for j in range(dim_intra):\n ci = float(sarr[4 + j])\n config['latt_intra'][r[0], r[1], r[2], j] = ci\n\n config['atom_types'] = np.array(atom_types)\n config['xyz'] = np.array(xyz)\n \n return config", "def read_xyz(filename):\n #print('Reading geom from:'),filename\n atoms = []\n coordinates = []\n\t\n xyz = open(filename)\n n_atoms = int(xyz.readline())\n title = xyz.readline()\n for line in xyz:\n\tif len(line.strip()) == 0:\n\t\tpass\n\t\tbreak\t\n\tatom,x,y,z = line.split()\n\tatoms.append(atom)\n\tcoordinates.append([float(x), float(y), float(z)])\n xyz.close()\n coordinates = [[w * angtobh for w in ww] for ww in coordinates] #ang to bh\n\n if n_atoms != len(coordinates):\n \tprint('Number of atoms in xyz file doesnt equal to the number of lines.')\n\tsys.exit(1)\n \n return atoms, coordinates", "def ReadAtomParameter(AtomParameterPath):\r\n\r\n AtomParameter=os.path.join(AtomParameterPath,'AtomParameter')\r\n\r\n Key1,Key2,Key3=False,False,False\r\n MaterialAtomDictionary,GasAtomDictionary,MassDictionary={},{},{}\r\n SpecialPair,SpecialPairList=[],[]\r\n\r\n with open(AtomParameter, 'r') as File:\r\n for Line in File.readlines():\r\n if Line.strip():\r\n WordList=Line.strip().split()\r\n if WordList[0]=='#':\r\n continue\r\n elif WordList[0]=='MaterialAtom:':\r\n Key1=True\r\n elif WordList[0]=='GasAtom:':\r\n Key1=False\r\n Key2=True\r\n elif WordList[0]=='SpecialPair:':\r\n Key2=False\r\n Key3=True\r\n\r\n # MaterialAtom\r\n elif Key1==True and WordList[0]!='Number':\r\n MaterialAtomDictionary[WordList[1]]=WordList[2:4]\r\n MassDictionary[WordList[1]] = WordList[5]\r\n elif Key2==True and WordList[0]!='Number':\r\n GasAtomDictionary[WordList[1]]=WordList[2:4]\r\n MassDictionary[WordList[1]] = WordList[4]\r\n elif Key3==True and WordList[0]!='Number':\r\n SpecialPair.append(WordList[1:3])\r\n SpecialPair.append(WordList[3:5])\r\n\r\n SpecialPairList.append(SpecialPair)\r\n\r\n return MaterialAtomDictionary,GasAtomDictionary,SpecialPairList,MassDictionary", "def read(self, FN, natoms=None, return_title=False, \\\n multiplier=None, trajectory=False):\n if not os.path.isfile(FN):\n raise Exception('Coordinate file %s does not exist!' % FN)\n if FN.endswith('.gz'):\n import gzip\n F = gzip.open(FN, 'r')\n else:\n F = open(FN, 'r')\n dat = F.read().strip().split('\\n')\n F.close()\n\n title = dat.pop(0) # Title\n\n if len(dat[0].split()) > 1:\n # VMD format (does not specify number of atoms)\n crd = []\n for line in dat:\n crd = crd + [float(x) for x in line.split()]\n crd = np.resize(crd, (len(crd) / 3, 3))\n else:\n # AMBER format\n file_natoms = int(dat.pop(0)) # Number of atoms\n if (natoms is not None) and (file_natoms != natoms):\n print \"Incorrect number of atoms in crd file\"\n return np.array([])\n\n if trajectory:\n w = 8 # For mdcrd\n else:\n w = 12 # For inpcrd\n crd = []\n for line in dat:\n crd = crd + [float(line[x:x + w]) for x in range(0, len(line), w)]\n crd = np.resize(crd, (len(crd) / 3, 3))\n\n if multiplier is not None:\n crd = multiplier * crd\n if (natoms is not None):\n crd = np.vsplit(crd, crd.shape[0] / natoms)\n print \" read %d configurations from %s\" % (len(crd), FN)\n\n if return_title:\n return (crd, title)\n else:\n return crd", "def rd_xyz(self):\n nmol = self.__rd_xyz_nmol()\n fpin = open(self.config['xyzfile'], \"r\")\n tmol = self.template['molspec']['atoms']\n ntatom = self.template['molspec']['n_atoms']\n mol = []\n for i in range(nmol):\n # number of atom,\n line = fpin.readline().strip()\n natom = int(line)\n line = fpin.readline()\n\n jobname = \"%s\" % line[:-1]\n atom = []\n\n if ntatom != natom:\n print \"geometry data in template file is not consistant with xyz file. check the template.\"\n for j in range(natom):\n line = fpin.readline()\n rec = line.split()\n if len(rec) == 5:\n atomname, x, y, z, imove = rec\n elif len(rec) == 4:\n atomname, x, y, z = rec\n else:\n print \"nothing to do...\"\n exit(1)\n frg = tmol[j]['frg']\n record = {'name': atomname, 'coord': [float(x),float(y),float(z)], 'frg':frg}\n atom.append(record)\n onemol = {'natom': natom, 'jobname': jobname, 'info': '', 'atom':atom}\n mol.append(onemol)\n self.model['mol'] = mol\n fpin.close()\n return", "def read_parameters_diff_file(coords):\n param_map = hp.read_map(source +\n \"kids_data/\"\n \"COM_CompMap_Compton-SZMap-milca-\"\n \"ymaps_2048_R2.00.fits\")\n params = []\n for point in coords:\n ra, dec = point\n index = declratoindex(dec, ra)\n params.append(param_map[index])\n return params", "def extended_xyz_parse(xyz_d):\n \n s_properties = ['rot_A', \n 'rot_B', \n 'rot_C', \n 'dipole', \n 'polarizability', \n 'homo', \n 'lumo', \n 'band_gap', \n 'ese', \n 'zpe', \n 'u_0K', \n 'u_298.15K', \n 'h_298.15K', \n 'f_298.15K', \n 'cp_298.15K']\n\n mol_properties = {}\n\n\n lines = xyz_d.replace('*^','e').splitlines()\n \n r_no_atoms = lines[0]\n no_atoms = int(r_no_atoms)\n\n r_scalars = lines[1]\n mol_id = r_scalars.split()[:2]\n scalar_properties = np.array(r_scalars.split()[2:], np.float32)\n\n r_mcoords = lines[2:2+no_atoms]\n symbols = [m.split()[0] for m in r_mcoords]\n coords = np.array([m.split()[1:4] for m in r_mcoords], dtype=np.float32)\n \n charges = np.array([m.split()[4] for m in r_mcoords], dtype=np.float32)\n\n r_vibfreqs = lines[2+ no_atoms]\n vib_freqs = np.array([float(freq) for freq in r_vibfreqs.split()], dtype=np.float32)\n\n smiles = lines[3+no_atoms].split()\n inchi = lines[4+no_atoms].split()\n\n mol_properties['no_atoms'] = no_atoms\n mol_properties['mol_id'] = mol_id\n \n for i, p in enumerate(s_properties):\n mol_properties[p] = scalar_properties[i]\n\n mol_properties['symbols'] = symbols\n mol_properties['coords'] = coords\n mol_properties['charges'] = charges\n mol_properties['vib_freqs'] = vib_freqs\n mol_properties['smiles'] = smiles\n mol_properties['inchi'] = inchi\n \n return mol_properties", "def read_xd_master_file(path, errorpointer):\n filepointer = open(path, 'r')\n for line in filepointer.readlines():\n if 'TITLE' in line:\n compound_name = line.partition('!')[2].lstrip().rstrip()\n if 'CELL' in line:\n cell = [float(i) for i in line.split(\" \") if '.' in i]\n break\n filepointer.close()\n try:\n return compound_name, cell\n except:\n errorpointer.write(path + '\\n')\n return None, None", "def setupdict(parfile):\n pardict = {}\n with open(parfile,'r+') as f:\n for line in f:\n flags = line[56:65].split(' ')\n try:\n flags = [int(f) for f in flags]\n except:\n continue\n # if we found res pars\n if( all(flags) <= 3 ):\n # if any varied pars\n if( any(flags) > 0 ):\n # energies are dict keys\n estring = endf_float_str(float(line[0:11]))\n pardict[estring] = []\n pars = [float(line[0+11*i:11+11*i]) for i in range(len(flags))]\n for i,flag in enumerate(flags):\n if( flag > 0 ):\n pardict[estring].append((i,pars[i]))\n return pardict", "def readResiduals(in_c_file):\n\n DataDict = {}\n in_mjd, in_res, in_reserr, in_orbphs = [], [], [], []\n\n for line in open(in_c_file, \"r\").readlines():\n \n if ('#' not in line):\n elements = line.split()\n \n in_mjd.append(float(elements[6]))\n in_res.append(float(elements[2]))\n in_reserr.append(float(elements[3]))\n in_orbphs.append(float(elements[5]))\n \n # store as dictionary.\n DataDict['mjd'] = np.array(in_mjd)\n DataDict['residuals'] = np.array(in_res)\n DataDict['residuals_err'] = np.array(in_reserr)\n DataDict['orbital_phase'] = np.array(in_orbphs)\n\n return DataDict", "def get_coordinates_xyz(filename):\n\n f = open(filename, 'r')\n V = list()\n atoms = list()\n n_atoms = 0\n\n # Read the first line to obtain the number of atoms to read\n try:\n n_atoms = int(f.readline())\n except ValueError:\n print(\"Could not obtain the number of atoms in the .xyz file. \"+filename)\n return None\n\n # Skip the title line\n f.readline()\n\n # Use the number of atoms to not read beyond the end of a file\n for lines_read, line in enumerate(f):\n\n if lines_read == n_atoms:\n break\n\n atom = re.findall(r'[a-zA-Z]+', line)[0]\n # atom = atom.upper()\n\n numbers = re.findall(r'[-]?\\d+\\.\\d*(?:[Ee][-\\+]\\d+)?', line)\n numbers = [float(number) for number in numbers]\n\n # The numbers are not valid unless we obtain exacly three\n if len(numbers) == 3:\n V.append(np.array(numbers))\n atoms.append(atom)\n else:\n exit(\"Reading the .xyz file failed in line {0}. Please check the format.\".format(lines_read + 2))\n\n f.close()\n atoms = np.array(atoms)\n V = np.array(V)\n return atoms, V", "def getParams(self, resname, atomname):\n charge = None\n radius = None\n\n # print self.map.keys()\n\n if resname in self.map:\n resid = self.map[resname]\n if resid.hasAtom(atomname):\n atom = resid.atoms[atomname]\n charge = atom.charge\n radius = atom.radius\n\n return charge, radius", "def load_xyz(filename):\n periodic = load_periodic()\n #read molecule\n with open(filename) as f:\n size = int(next(f))\n title = next(f).strip()\n molecule = Molecule(title,size)\n for _ in range(size):\n row = next(f).split()\n tag = row[0]\n element = periodic[tag]\n coordinate = []\n for j in range(3):\n coordinate.append(float(row[j+1]))\n atom = Atom(element,coordinate)\n\n molecule.append(atom)\n f.close()\n \n return molecule", "def read_multiple_coordinates(fragmentnames):\n fragdict = {}\n for name in fragmentnames:\n path = name + '/'\n cell, pos = read_coordinates(path)\n atomlist = frac_to_cart(cell, pos)\n atomdict = {}\n for atom in atomlist:\n atomdict[atom[0][0]] = atom[1]\n fragdict[name] = atomlist\n return fragdict", "def readParams(file_name):\n try:\n info = np.load(file_name,allow_pickle=True)[()]\n except FileNotFoundError:\n if file_name.split('/')[-2] == 'checkpoint':\n lfc_id_dir = '/expres/extracted/lfc_cal/lfc_id/'\n file_name = lfc_id_dir + os.path.basename(file_name)\n info = np.load(file_name,allow_pickle=True)[()]\n else:\n raise FileNotFoundError\n # Assemble information into \"fit-able\" form\n num_orders = len(info['params'])\n lines = [p[:,1] for p in info['params'] if p is not None]\n errs = [np.sqrt(cov[:,1,1]) for cov in info['cov'] if cov is not None]\n ordrs = [o for o in np.arange(86) if info['params'][o] is not None]\n waves = [w for w in info['wvln'] if w is not None]\n # I believe, but am not sure, that the wavelengths are multiplied by order\n # to separate them from when orders overlap at the edges\n waves = [wvln for order, wvln in zip(ordrs,waves)]\n ordrs = [np.ones_like(x) * m for m,x in zip(ordrs, lines)]\n\n x = np.concatenate(lines)\n y = np.concatenate(ordrs)\n e = np.concatenate(errs)\n w = np.concatenate(waves)\n # Note: default of pipeline includes ThAr lines, which we're not including here\n \n return (x,y,w,e)", "def read_params(fname):\n f = open(fname, 'r')\n par = {} #output\n for i in range(10): # esta dentro de las primeras 10 lineas\n l = f.readline().split()\n #print \" ---> \", l\n number = u'%s' % l[-1] # presumably a number\n if not number.replace('.','').replace('-','').isnumeric():\n if l[0]=='#####':\n break\n else:\n continue # we proceed ONLY IF this is numeric string\n #print ' FIRST: ', l[0]\n if l[0]=='#####':\n #print \"IM I HERE????\"\n break # end of header\n\n name = l[1][:-1] # l[0] es '#', y -1 para comernos el \":\"\n value = np.float(l[2]) # l[2] es el valor\n par[name] = value\n\n return par", "def _read_xyz(ds,datafile,long_format=False):\n\n from cheml.io.xyz import get_molecules\n\n ds.list_of_mol = get_molecules(datafile,ds.nmol,long_format)\n ds.nmol = len(ds.list_of_mol)", "def read_xyz(self, filename):\n # first line contains number of atoms\n self.numatom = int(filename.readline().split()[0])\n # second line contains a comment\n self.comment = filename.readline()[:-3]\n # rest of the lines contain coordinates structured Element X Y Z\n string = \"Element X Y Z \\n\" + filename.read()\n self.contents = pd.read_table(StringIO(string), sep=r'\\s+')", "def _load_dat(self):\n modelfile = self.filename\n with open(modelfile) as f:\n content = f.readlines()\n\n self.comment = content.pop(0) # Comment line\n content = [x for x in content if not x.startswith('#')]\n\n for line in content:\n if('atoms' in line): self.natoms = int(line.split()[0])\n if('xlo' in line and 'xhi' in line):\n self.xsize = abs(float(line.split()[0])) + abs(float(line.split()[1]))\n if('ylo' in line and 'yhi' in line):\n self.ysize = abs(float(line.split()[0])) + abs(float(line.split()[1]))\n if('zlo' in line and 'zhi' in line):\n self.zsize = abs(float(line.split()[0])) + abs(float(line.split()[1]))\n if('atom types' in line): nelems = int(line.split()[0])\n if('Masses' in line): mflag = content.index(line) + 1\n if('Atoms' in line): aflag = content.index(line) + 1\n try:\n mflag\n except NameError:\n raise Exception(\"ERROR! You need to define the masses in the .dat file.\")\n atomtypes = {}\n while(nelems > 0):\n if(len(content[mflag].split()) == 2):\n atomtypes[int(content[mflag].split()[0])] = masses.get_znum(float(content[mflag].split()[1]))\n nelems -= 1\n mflag += 1\n self.atoms = []\n natoms = self.natoms\n while(natoms > 0):\n sline = content[aflag].split()\n if(len(sline) >= 5):\n # We found an atom\n id = int(sline[0])\n type = int(sline[1])\n x = float(sline[2])\n y = float(sline[3])\n z = float(sline[4])\n znum = atomtypes[type]\n # Add it to the model\n self.atoms.append(Atom(id,znum,x,y,z))\n natoms -= 1\n aflag += 1", "def process(path):\n # get parameter value:\n with open('config.cym', 'r') as f:\n line = f.readline()\n #print(line)\n pam = float(line[1:])\n f.close()\n # get position of aster:\n with open('aster.txt', 'r') as f:\n for line in f:\n if len(line)>3 and not line[0]=='%':\n #print(line)\n val = line.split()\n x = float(val[2])\n y = float(val[3])\n #z = float(val[4])\n #pos = math.sqrt(x*x+y*y+z*z)\n pos = math.sqrt(x*x+y*y)\n\n f.close()\n return (pam, pos)", "def readFile(file_name):\n if file_name.split('.')[-1] == 'thid':\n x,m,w = readThid(file_name)\n e = np.empty_like(x)\n e[:] = np.nan\n return x,m,w,e\n else:\n return readParams(file_name)", "def parse_params(filename):\n\n all_dicts = []\n\n with open(filename) as f:\n\n for line in f:\n\n params = line.strip().split()\n\n temp_dict = {\"die\": float(params[0])}\n\n temp_dict.update({i: float(params[i]) for i in range(1, 7)})\n\n all_dicts.append(temp_dict)\n\n f.close()\n\n return all_dicts", "def get_electrode_positions():\n positions = dict()\n with io.open(\"electrode_positions.txt\", \"r\") as pos_file:\n for line in pos_file:\n parts = line.split()\n positions[parts[0]] = tuple([float(part) for part in parts[1:]])\n return positions", "def get_electrode_positions():\n positions = dict()\n with io.open(\"electrode_positions.txt\", \"r\") as pos_file:\n for line in pos_file:\n parts = line.split()\n positions[parts[0]] = tuple([float(part) for part in parts[1:]])\n return positions", "def load_params_from_file(path):\n save_dict = mx.nd.load(path)\n arg_params = {}\n aux_params = {}\n for k, v in save_dict.items():\n tp, name = k.split(':', 1)\n if tp == 'arg':\n arg_params[name] = v\n if tp == 'aux':\n aux_params[name] = v\n return arg_params, aux_params", "def read_gbvi_parameters(filename):\n\n parameters = dict()\n \n infile = open(filename, 'r')\n for line in infile:\n # Strip trailing comments\n index = line.find('%')\n if index != -1:\n line = line[0:index] \n\n # Parse parameters\n elements = line.split()\n if len(elements) == 3:\n [atomtype, radius, gamma] = elements\n parameters['%s_%s' % (atomtype,'radius')] = float(radius)\n parameters['%s_%s' % (atomtype,'gamma')] = float(gamma)\n\n return parameters", "def read_abinit(filename='abinit.in'):\n\n from ase import Atoms, units\n\n if isinstance(filename, str):\n f = open(filename)\n else: # Assume it's a file-like object\n f = filename\n\n lines = f.readlines()\n if type(filename) == str:\n f.close()\n\n full_file = ''\n for line in lines:\n if '#' in line:\n meat, comment = line.split('#')\n else:\n meat = line\n full_file = full_file + meat + ' '\n\n full_file.strip()\n tokens = full_file.lower().split()\n\n # note that the file can not be scanned sequentially\n\n index = tokens.index(\"acell\")\n unit = 1.0\n if(tokens[index+4].lower()[:3] != 'ang'):\n unit = units.Bohr\n acell = [unit*float(tokens[index+1]),\n unit*float(tokens[index+2]),\n unit*float(tokens[index+3])]\n\n index = tokens.index(\"natom\")\n natom = int(tokens[index+1])\n\n index = tokens.index(\"ntypat\")\n ntypat = int(tokens[index+1])\n\n index = tokens.index(\"typat\")\n typat = []\n for i in range(natom):\n typat.append(int(tokens[index+1+i]))\n\n index = tokens.index(\"znucl\")\n znucl = []\n for i in range(ntypat):\n znucl.append(int(tokens[index+1+i]))\n\n index = tokens.index(\"rprim\")\n rprim = []\n for i in range(3):\n rprim.append([acell[i]*float(tokens[index+3*i+1]),\n acell[i]*float(tokens[index+3*i+2]),\n acell[i]*float(tokens[index+3*i+3])])\n\n # create a list with the atomic numbers\n numbers = []\n for i in range(natom):\n ii = typat[i] - 1\n numbers.append(znucl[ii])\n\n # now the positions of the atoms\n if \"xred\" in tokens:\n index = tokens.index(\"xred\")\n xred = []\n for i in range(natom):\n xred.append([float(tokens[index+3*i+1]),\n float(tokens[index+3*i+2]),\n float(tokens[index+3*i+3])])\n atoms = Atoms(cell=rprim, scaled_positions=xred, numbers=numbers,\n pbc=True)\n else:\n if \"xcart\" in tokens:\n index = tokens.index(\"xcart\")\n unit = units.Bohr\n elif \"xangst\" in tokens:\n unit = 1.0\n index = tokens.index(\"xangst\")\n else:\n raise IOError(\n \"No xred, xcart, or xangs keyword in abinit input file\")\n\n xangs = []\n for i in range(natom):\n xangs.append([unit*float(tokens[index+3*i+1]),\n unit*float(tokens[index+3*i+2]),\n unit*float(tokens[index+3*i+3])])\n atoms = Atoms(cell=rprim, positions=xangs, numbers=numbers, pbc=True)\n \n try:\n i = tokens.index('nsppol')\n except ValueError:\n nsppol = None\n else:\n nsppol = int(tokens[i + 1])\n\n if nsppol == 2:\n index = tokens.index('spinat')\n magmoms = [float(tokens[index + 3 * i + 3]) for i in range(natom)]\n atoms.set_initial_magnetic_moments(magmoms)\n\n return atoms", "def getCoords(file):\n global demag\n name = file.split('.')[0]\n name = name.split('_')\n x = int(name[2])//demag\n y = int(name[3])//demag\n return(int(x),int(y))", "def load_mxm2msd():\n res = {}\n with open(mxm2msd()) as f:\n for line in f:\n mxm, msd = line.strip().split(',')\n res[mxm] = msd\n return res", "def _read_dx(self, FN):\n if FN.endswith('.dx'):\n F = open(FN, 'r')\n else:\n import gzip\n F = gzip.open(FN, 'r')\n\n # Read the header\n line = F.readline()\n while line.find('object') == -1:\n line = F.readline()\n header = {}\n header['counts'] = [int(x) for x in line.split(' ')[-3:]]\n for name in ['origin', 'd0', 'd1', 'd2']:\n header[name] = [float(x) for x in F.readline().split(' ')[-3:]]\n F.readline()\n header['npts'] = int(F.readline().split(' ')[-3])\n\n # Test to make sure the grid type is okay.\n # These conditions are not absolultely essential,\n # but they reduce the number of subtraction operations.\n if not (header['d0'][1] == 0 and header['d0'][2] == 0\n and header['d1'][0] == 0 and header['d1'][2] == 0\n and header['d2'][0] == 0 and header['d2'][1] == 0):\n raise Exception('Trilinear grid must be in original basis')\n if not (header['d0'][0] > 0 and header['d1'][1] > 0\n and header['d2'][2] > 0):\n raise Exception('Trilinear grid must have positive coordinates')\n\n # Read the data\n vals = np.ndarray(shape=header['npts'], dtype=float)\n index = 0\n while index < header['npts']:\n line = F.readline()[:-1]\n items = [float(item) for item in line.split()]\n vals[index:index + len(items)] = items\n index = index + len(items)\n F.close()\n\n data = {\n 'origin':np.array(header['origin']), \\\n 'spacing':np.array([header['d0'][0],header['d1'][1],header['d2'][2]]), \\\n 'counts':np.array(header['counts']), \\\n 'vals':vals}\n return data", "def load_pdb(self):\n # 'atoms': list with the residue id for every atom.\n pdb = self.pdb\n for l_i in range(len(pdb)): \n dat = bio_lib.get_labels(pdb[l_i])\n res_atm = dat[0]\n res_nam = dat[1]\n res_ind = dat[2]\n res_chn = dat[3]\n self.identifiers.append([res_nam, res_ind, res_chn]) \n #x_i = dat[4]\n #y_i = dat[5]\n #z_i = dat[6]\n # Adjusted coordinates returned from PDB are not strictly formatted.\n if len(pdb[l_i]) > 10:\n x_i = pdb[l_i][31:].split()[0]\n y_i = pdb[l_i][31:].split()[1]\n z_i = pdb[l_i][31:].split()[2]\n c_i = \" \".join([res_atm, x_i, y_i, z_i])\n self.res_atm_xyz.append(c_i)", "def init_file(\n mode, filename, dimension=\"length\", units=\"automatic\", cell_units=\"automatic\"\n):\n\n rfile = open(filename, \"r\")\n ratoms = []\n\n info(\n \" # Initializing from file %s. Dimension: %s, units: %s, cell_units: %s\"\n % (filename, dimension, units, cell_units),\n verbosity.low,\n )\n while True:\n # while loop, so that more than one configuration can be given\n # so multiple beads can be initialized at once.\n try:\n ret = read_file(\n mode, rfile, dimension=dimension, units=units, cell_units=cell_units\n )\n except EOFError:\n break\n ratoms.append(ret[\"atoms\"])\n return ratoms, ret[\"cell\"] # if multiple frames, the last cell is returned", "def get_molecule_dict(chemfile):\n molecule_dict={}\n with open(chemfile,'r') as f:\n for line in f:\n line=line.strip().split('\\t')\n ikey=line[0]\n smi=line[1]\n mol = Chem.MolFromSmiles(smi)\n if not mol:\n raise ValueError(\"Could not generate Mol from SMILES string:\", smi)\n #Chem.SanitizeMol(mol)\n\n atoms={} #atom_idx -> atom features\n bonds={} #bond_idx -> bond features\n atoms2bond={} #(atom_idx1,atom_idx2) -> bond_idx\n \n nodes_by_degree = {d: [] for d in degrees}\n for atom in mol.GetAtoms():\n atom_feature = atom_features(atom)\n atom_id = smi+str(atom.GetIdx())\n atoms[atom.GetIdx()]=atom_feature \n atom_neighbors={aid: [] for aid in atoms.keys()} #atom_idx -> neighbor atom idxs\n bond_neighbors={aid: [] for aid in atoms.keys()} #atom_idx -> neighbor bond idxs\n\n for bond in mol.GetBonds():\n src_atom_idx = bond.GetBeginAtom().GetIdx()\n tgt_atom_idx = bond.GetEndAtom().GetIdx()\n bond_idx = bond.GetIdx()\n bond_neighbors[src_atom_idx].append(bond_idx)\n bond_neighbors[tgt_atom_idx].append(bond_idx)\n bond_feature = bond_features(bond)\n bonds[bond.GetIdx()] = bond_feature\n atom_neighbors[src_atom_idx].append(tgt_atom_idx)\n atom_neighbors[tgt_atom_idx].append(src_atom_idx)\n atoms2bond[(src_atom_idx,tgt_atom_idx)]=bond_idx\n atoms2bond[(tgt_atom_idx,src_atom_idx)]=bond_idx\n \n atoms_by_degree={d: [] for d in degrees}\n bonds_by_degree={d: [] for d in degrees}\n for aid in atom_neighbors:\n neighbor_atoms = atom_neighbors[aid]\n d = len(neighbor_atoms) #degree of the atom\n atoms_by_degree[d].append(aid) #current atom is degree=d\n neighbor_bonds=[]\n for neighbor in neighbor_atoms:\n bond_idx=atoms2bond[(aid,neighbor)]\n neighbor_bonds.append(bond_idx)\n bonds_by_degree[d].append(neighbor_bonds)\n\n neighbor_by_degree = []\n for degree in degrees:\n neighbor_by_degree.append({\n 'atom': atoms_by_degree[degree],\n 'bond': bonds_by_degree[degree]\n })\n \n molecule_dict[ikey]={'smiles':str(smi),\n 'neighbor_by_degree':neighbor_by_degree,\n 'atoms':atoms,'bonds':bonds,\n 'atom_neighbor':atom_neighbors,\n 'bond_neighbor':bond_neighbors}\n return molecule_dict", "def readCrystParam(crystfile):\n \n # Default values\n ccell1 = np.eye(3)\n ccell2 = np.eye(3)\n planehkl = [1,0,0]\n diruvw = [0,1,0]\n \n try:\n with open(crystfile,\"r\") as f:\n content = f.readlines()\n except FileNotFoundError:\n content = []\n\n for l in content:\n if l[0].rstrip() == \"#\":\n continue\n line = l.split('=')\n if len(line) == 2:\n if line[0].rstrip()==\"ccell1\":\n ccell1 = eval(line[1].rstrip())\n elif line[0].rstrip()==\"ccell2\":\n ccell2 = eval(line[1].rstrip())\n elif line[0].rstrip()==\"planehkl\":\n planehkl = eval(line[1].rstrip())\n elif line[0].rstrip()==\"diruvw\":\n diruvw = eval(line[1].rstrip())\n else:\n print(\"WARNING: %s is not a supported input\"%(line[0].rstrip()))\n elif len(line) > 2:\n raise SyntaxError(l)\n\n return ccell1, ccell2, planehkl, diruvw", "def get_dimensions ( file_in, separator ) :\n try :\n logger.info ( \"Extract dimensions from xyz file \" + str(file_in) ) \n d = {}\n first_row = True\n d[NOPS] = 0\n file = open(file_in, 'r')\n for line in file :\n d[NOPS] = d[NOPS] + 1\n l = line.rstrip().split(separator)\n x = float(l[0])\n y = float(l[1])\n z = float(l[2])\n if first_row :\n d[MINX] = x\n d[MAXX] = x\n d[MINY] = y\n d[MAXY] = y\n d[MINZ] = z\n d[MAXZ] = z\n first_row = False\n else :\n if x < d[MINX] :\n d[MINX] = x\n if x > d[MAXX] :\n d[MAXX] = x \n if y < d[MINY] :\n d[MINY] = y\n if y > d[MAXY] :\n d[MAXY] = y \n if z < d[MINZ] :\n d[MINZ] = z\n if z > d[MAXZ] :\n d[MAXZ] = z \n file.close() \n logger.info ('Now return')\n return d\n except Exception, err:\n logger.critical(\"Extract dimensions from xyz file failed: ERROR: %s\\n\" % str(err))\n raise", "def get_params(file):\n import shlex\n f = open(file)\n for line in f:\n text = shlex.split(line)\n if (\"InitialTime\" in text):\n tval = float(text[len(text)-1])\n elif (\"DensityUnits\" in text):\n dUnit = float(text[len(text)-1])\n elif (\"TimeUnits\" in text):\n tUnit = float(text[len(text)-1])\n elif (\"LengthUnits\" in text):\n lUnit = float(text[len(text)-1])\n return [tval, dUnit, tUnit, lUnit]", "def read_results():\r\n with open(\"packing.nfo\", \"r\") as fin:\r\n fin.readline()\r\n fin.readline()\r\n por_theory = float(fin.readline().split()[2])\r\n por_final = float(fin.readline().split()[2])\r\n print('Theoretical porosity:', por_theory)\r\n print('Final porosity:', por_final)\r\n with open(\"packing.xyzd\", \"rb\") as fin:\r\n btxt = fin.read()\r\n txt = list(struct.unpack(\"<\" + \"d\" * (len(btxt) // 8), btxt))\r\n data = array(zip(*[iter(txt)] * 4))\r\n data[:, 3] = data[:, 3] * \\\r\n ((1 - por_final) / (1 - por_theory))**(1 / 3)\r\n return data", "def read_parameters(self, entry=None):\n if entry:\n self.entry = entry\n with self.entry.nxfile:\n self.name = self.entry.nxroot.nxname + \"/\" + self.entry.nxname\n if 'unit_cell' in self.entry['sample']:\n lattice_parameters = self.read_parameter('sample/unit_cell')\n if lattice_parameters is not None:\n self.a, self.b, self.c = lattice_parameters[:3]\n self.alpha, self.beta, self.gamma = lattice_parameters[3:]\n elif 'unit_cell_abc' in self.entry['sample']:\n lattice_parameters = self.read_parameter(\n 'sample/unit_cell_abc')\n if lattice_parameters is not None:\n self.a, self.b, self.c = lattice_parameters\n lattice_parameters = self.read_parameter(\n 'sample/unit_cell_alphabetagamma')\n if lattice_parameters is not None:\n self.alpha, self.beta, self.gamma = lattice_parameters\n else:\n self.a = self.read_parameter('sample/unitcell_a', self.a)\n self.b = self.read_parameter('sample/unitcell_b', self.b)\n self.c = self.read_parameter('sample/unitcell_c', self.c)\n self.alpha = self.read_parameter(\n 'sample/unitcell_alpha', self.alpha)\n self.beta = self.read_parameter(\n 'sample/unitcell_beta', self.beta)\n self.gamma = self.read_parameter(\n 'sample/unitcell_gamma', self.gamma)\n self.formula = self.read_parameter('sample/chemical_formula',\n self.formula)\n self.space_group = self.read_parameter(\n 'sample/space_group', self.space_group)\n self.laue_group = self.read_parameter(\n 'sample/laue_group', self.laue_group)\n self.wavelength = self.read_parameter(\n 'instrument/monochromator/wavelength', self.wavelength)\n self.distance = self.read_parameter('instrument/detector/distance',\n self.distance)\n self.yaw = self.read_parameter('instrument/detector/yaw', self.yaw)\n self.pitch = self.read_parameter('instrument/detector/pitch',\n self.pitch)\n self.roll = self.read_parameter(\n 'instrument/detector/roll', self.roll)\n self.xc = self.read_parameter('instrument/detector/beam_center_x',\n self.xc)\n self.yc = self.read_parameter('instrument/detector/beam_center_y',\n self.yc)\n self.xd = self.read_parameter('instrument/detector/translation_x',\n self.xd)\n self.yd = self.read_parameter('instrument/detector/translation_y',\n self.yd)\n self.frame_time = self.read_parameter(\n 'instrument/detector/frame_time', self.frame_time)\n self.shape = self.read_parameter(\n 'instrument/detector/shape', self.shape)\n phi = self.read_parameter('instrument/goniometer/phi', self.phi)\n if isinstance(phi, np.ndarray) and len(phi) > 1:\n self.phi = phi[0]\n self.phi_step = phi[1] - phi[0]\n else:\n self.phi = phi\n try:\n self.phi_step = self.read_parameter(\n 'instrument/goniometer/phi', self.phi, attr='step')\n except Exception:\n pass\n self.chi = self.read_parameter(\n 'instrument/goniometer/chi', self.chi)\n self.omega = self.read_parameter('instrument/goniometer/omega',\n self.omega)\n if 'instrument/goniometer' in self.entry:\n if 'theta' in self.entry['instrument/goniometer']:\n self.theta = self.read_parameter(\n 'instrument/goniometer/theta', self.theta)\n elif 'goniometer_pitch' in self.entry['instrument/goniometer']:\n self.theta = self.read_parameter(\n 'instrument/goniometer/goniometer_pitch', self.theta)\n elif 'gonpitch' in self.entry['instrument/goniometer']:\n self.theta = self.read_parameter(\n 'instrument/goniometer/gonpitch', self.theta)\n self.symmetry = self.read_parameter('sample/unit_cell_group',\n self.symmetry)\n self.centring = self.read_parameter('sample/lattice_centring',\n self.centring)\n self.xp = self.read_parameter('peaks/x')\n self.yp = self.read_parameter('peaks/y')\n self.zp = self.read_parameter('peaks/z')\n self.polar_angle = self.read_parameter('peaks/polar_angle')\n self.azimuthal_angle = self.read_parameter('peaks/azimuthal_angle')\n self.intensity = self.read_parameter('peaks/intensity')\n self.pixel_size = self.read_parameter(\n 'instrument/detector/pixel_size', self.pixel_size)\n self.pixel_mask = self.read_parameter(\n 'instrument/detector/pixel_mask')\n self.pixel_mask_applied = self.read_parameter(\n 'instrument/detector/pixel_mask_applied')\n self.rotation_angle = self.read_parameter('peaks/rotation_angle')\n self.primary = self.read_parameter('peaks/primary_reflection')\n self.secondary = self.read_parameter('peaks/secondary_reflection')\n self.Umat = self.read_parameter(\n 'instrument/detector/orientation_matrix')\n if isinstance(self.polar_angle, np.ndarray):\n try:\n self.set_polar_max(np.sort(self.polar_angle)[200] + 0.1)\n except IndexError:\n self.set_polar_max(self.polar_angle.max())\n else:\n self.set_polar_max(10.0)\n self.Qh = self.read_parameter('transform/Qh')\n self.Qk = self.read_parameter('transform/Qk')\n self.Ql = self.read_parameter('transform/Ql')\n self.initialize_peaks()", "def get_dji_meta( filepath ):\n\n # list of metadata tags\n djimeta=[\"AbsoluteAltitude\",\"RelativeAltitude\",\"GimbalRollDegree\",\"GimbalYawDegree\",\\\n \"GimbalPitchDegree\",\"FlightRollDegree\",\"FlightYawDegree\",\"FlightPitchDegree\"]\n\n # read file in binary format and look for XMP metadata portion\n fd = open(filepath,'rb')\n d= fd.read()\n xmp_start = d.find(b'<x:xmpmeta')\n xmp_end = d.find(b'</x:xmpmeta')\n\n # convert bytes to string\n xmp_b = d[xmp_start:xmp_end+12]\n xmp_str = xmp_b.decode()\n\n fd.close()\n\n # parse the XMP string to grab the values\n xmp_dict={}\n for m in djimeta:\n istart = xmp_str.find(m)\n ss=xmp_str[istart:istart+len(m)+10]\n val = float(ss.split('\"')[1])\n xmp_dict.update({m : val})\n\n return xmp_dict", "def _load_data(self, atom_syms, coords, bohrs=True):\n\n # Imports\n import numpy as np\n from .const import atom_num, PHYS\n from .error import XYZError\n\n # Gripe if already initialized\n if 'geoms' in dir(self):\n raise XYZError(XYZError.OVERWRITE,\n \"Cannot overwrite contents of existing OpanXYZ\", \"\")\n ## end if\n\n # Check and store dimensions\n if not len(coords.shape) == 1:\n raise ValueError(\"Coordinates are not a vector\")\n ## end if\n if not len(atom_syms.shape) == 1:\n raise ValueError(\"Atom symbols are not a simple list\")\n ## end if\n if not coords.shape[0] == 3 * atom_syms.shape[0]:\n raise ValueError(\"len(coords) != 3 * len(atom_syms)\")\n ## end if\n\n # Proof the atoms list\n if not all( (atom_syms[i].upper() in atom_num)\n for i in range(atom_syms.shape[0]) ):\n # Invalid atoms specified\n raise ValueError(\"Invalid atoms specified: {0}\".format(\n [(j, atom_syms[j]) for j in\n (i for (i, valid) in\n enumerate(map(lambda k: k in atom_num, atom_syms))\n if not valid\n )\n ] ))\n ## end if\n\n # Ensure the geometry is all numeric\n if not all(map(np.isreal, coords)):\n raise ValueError(\"All coordinates must be real numeric\")\n ## end if\n\n # Store the number of atoms. Only one geometry. Standard string\n # content for things only relevant to file load.\n self.num_atoms = atom_syms.shape[0]\n self.num_geoms = 1\n self.in_str = self.LOAD_DATA_FLAG\n self.descs = np.array([self.LOAD_DATA_FLAG])\n self.XYZ_path = self.LOAD_DATA_FLAG\n\n # Store the atoms as vector\n self.atom_syms = list(map(str.upper, list(atom_syms)))\n\n # Store the single geometry by bracketing with an array\n self.geoms = [coords / (1.0 if bohrs else PHYS.ANG_PER_BOHR)]", "def get_params(file):\n import shlex\n f = open(file)\n for line in f:\n text = shlex.split(line)\n if (\"DensityUnits\" in text):\n dUnit = float(text[len(text)-1])\n elif (\"TimeUnits\" in text):\n tUnit = float(text[len(text)-1])\n elif (\"LengthUnits\" in text):\n lUnit = float(text[len(text)-1])\n vUnit = lUnit/tUnit\n return [dUnit, tUnit, lUnit, vUnit]", "def get_args():\n\n params = {}\n\n if len(argv) == 1:\n\n input_file = input('Please enter the path to the parameter file: ')\n\n else:\n\n input_file = argv[1]\n\n if path.isfile(input_file) == False:\n\n print('ERROR: Cannot find input parameter file')\n exit()\n\n flines = open(input_file,'r').readlines()\n\n str_keys = ['catalog_file', 'red_dir',\n 'target_ra', 'target_dec',\n 'star_class', 'isochrone_file',\n 'target_lc_file_g', 'target_lc_file_r', 'target_lc_file_i']\n\n for line in flines:\n\n (key, value) = line.replace('\\n','').split()\n\n if key in str_keys:\n\n params[key] = value\n\n else:\n\n if 'none' not in str(value).lower():\n params[key] = float(value)\n else:\n params[key] = None\n\n return params", "def readParams(path):\n tiles = open(path, \"r\")\n #--- Starting date\n tiles.readline()\n index = tiles.readline()[:-1]\n \n #--- Starting date\n tiles.readline()\n B_date = tiles.readline()[:-1]\n \n #--- Stopping date\n tiles.readline()\n E_date = tiles.readline()[:-1]\n \n #--- DATA \n tiles.readline()\n DATA_path = tiles.readline()[:-1]\n \n #--- Csv \n tiles.readline()\n out = tiles.readline()[:-1]\n \n #--- Shapefile\n tiles.readline()\n shp = tiles.readline()[:-1]\n \n #--- Water mask\n water = DATA_path + '/waterMask'\n \n return index, B_date, E_date, DATA_path, out, shp, water", "def read_txt_particles(particles_file, refpart, real_particles, bucket_length, comm, madx_format, verbose):\r\n \r\n four_momentum = refpart.get_four_momentum()\r\n pmass = four_momentum.get_mass()\r\n E_0 = four_momentum.get_total_energy()\r\n p0c = four_momentum.get_momentum()\r\n\r\n myrank = comm.get_rank()\r\n mpisize = comm.get_size()\r\n \r\n if myrank==0 and verbose:\r\n if madx_format:\r\n print \"Loading madX particles from txt file: \", particles_file\r\n else:\r\n print \"Loading Synergia particles from txt file: \", particles_file\r\n\r\n if myrank == 0:\r\n particles = np.loadtxt(particles_file)\r\n num_total_particles = particles.shape[0]\r\n # broadcast num particles to all nodes\r\n MPI.COMM_WORLD.bcast(num_total_particles, root=0)\r\n else:\r\n num_total_particles = None\r\n num_total_particles = MPI.COMM_WORLD.bcast(num_total_particles, root=0)\r\n\r\n if myrank == 0:\r\n # make sure the data has the correct shape, either [n,6] without\r\n # particles IDs or [n,7] with particle IDs.\r\n if (particles.shape[1] != 6) and (particles.shape[1] != 7):\r\n raise RuntimeError, \"input data shape %shas incorrect number of particle coordinates\"%repr(particles.shape)\r\n \r\n \r\n if madx_format:\r\n # numpy manipulations to convert kinematics\r\n # convert MAD-X T=-c*dt to Synergia c*ct\r\n particles[:,4] = -particles[:,4]\r\n # convert MAD-X Delta-E/pc to Synergia delta-p/p\r\n # sqrt(((dE/p0c)+(E0/p0c))**2 - (m/p0c)**2) - (p0c/p0c)\r\n m_over_pc = pmass/p0c\r\n E_0_over_pc = E_0/p0c\r\n particles[:,5] = np.sqrt( (particles[:,5] + E_0_over_pc) *\r\n (particles[:,5] + E_0_over_pc) - m_over_pc**2 ) - 1.0\r\n \r\n\r\n # if there are no IDs, append particle ID column\r\n if particles.shape[1] != 7:\r\n particles_w_id = np.column_stack((particles,\r\n np.arange(num_total_particles, dtype='d')))\r\n else:\r\n particles_w_id = particles\r\n \r\n if myrank == 0:\r\n print \"Read \", num_total_particles, \" particles\"\r\n \r\n #Note: Synergia bunch constructor updated - commit 077b99d7 - 11/17/2016\r\n #Using old constructor throws an ArgumentError of a non-standard type.\r\n # Using a try and except to handle both instances.\r\n try:\r\n # try the original constructor\r\n bunch = synergia.bunch.Bunch(\r\n refpart,\r\n num_total_particles, real_particles, comm,\r\n bucket_length)\r\n except Exception, e:\r\n #look to see if it's an ArgumentError by evaluating the traceback\r\n if (not str(e).startswith(\"Python argument types in\")):\r\n raise\r\n else:\r\n # use the new constructor\r\n if verbose:\r\n print \"Using updated bunch constructor\"\r\n bunch = synergia.bunch.Bunch(\r\n refpart,\r\n num_total_particles, real_particles, comm)\r\n # now set the new parameter 'z_period_length'\r\n if bucket_length is not None:\r\n bunch.set_z_period_length(bucket_length)\r\n else:\r\n bucket_length = 1. #fix this quantity\r\n\r\n local_num = bunch.get_local_num()\r\n local_particles = bunch.get_local_particles()\r\n\r\n # Each processor will have a possibly different number of local particles.\r\n # rank 0 has to find out how many each of them has and distribute them\r\n n_particles_by_proc = MPI.COMM_WORLD.gather(local_num, 0)\r\n if myrank == 0:\r\n # copy in my particles\r\n this_rank_start = 0\r\n local_particles[:,:] = particles_w_id[0:local_num, :]\r\n this_rank_start += local_num\r\n # send particles out to other ranks\r\n for r in range(1, mpisize):\r\n this_rank_end = this_rank_start+n_particles_by_proc[r]\r\n MPI.COMM_WORLD.send(obj=particles_w_id[this_rank_start:this_rank_end, :],\r\n dest=r)\r\n this_rank_start += n_particles_by_proc[r]\r\n else:\r\n # I'm not rank 0. Receive my particles\r\n lp = MPI.COMM_WORLD.recv(source=0)\r\n local_particles[:,:] = lp[:,:]\r\n return bunch", "def _read_file_for_magnets(sequence_file):\n LOG.debug(\" Reading File\")\n length_constants = {}\n magnet_strings = {}\n with open(sequence_file, 'r') as f_seq:\n for line in f_seq:\n var_and_value = _find_element_length(line)\n if var_and_value is not None:\n length_constants[var_and_value[0]] = var_and_value[1]\n else:\n var_and_value = _find_magnet_strength(line)\n if var_and_value is not None:\n magnet_strings[var_and_value[0]] = var_and_value[1]\n return magnet_strings, length_constants", "def load_atom_matrixes(name):\r\n hdulist = pyfits.open(os.path.join(data_dir, '{}.fits'.format(name)))#, ignore_missing_end=True)\r\n data_list = [hdulist[i].data for i in range(1, len(hdulist))]\r\n return data_list", "def read(f):\n \n if isinstance(f, basestring):\n # If the input is a string, treat as file name\n with open(f) as fh: # Ensure file is closed\n return read(fh) # Call again with file object\n \n # First line contains the date\n date = f.readline()\n if not date:\n raise IOError(\"Cannot read from input file \"+str(filename))\n \n # Second is description\n desc = f.readline()\n \n token = file_numbers(f)\n \n # Third contains number of mesh points\n try:\n npsi = int(token.next())\n ntheta = int(token.next())\n isym = int(token.next())\n except StopIteration:\n raise IOError(\"Unexpected end of file while reading grid size\")\n except ValueError:\n raise IOError(\"Third line should contain npsi, ntheta and isym\")\n \n # Check values\n if (isym < 0) or (isym > 1):\n raise IOError(\"isym must be either 0 or 1\")\n if (npsi < 1) or (ntheta < 1):\n raise IOError(\"Invalid npsi=\"+str(npsi)+\" or ntheta=\" + str(ntheta))\n \n # Read normalisation factors\n\n try:\n rcnt = float(token.next())\n xma = float(token.next())\n zma = float(token.next())\n btor = float(token.next())\n curtot = float(token.next())\n eaxe = float(token.next())\n dnorm = float(token.next())\n except:\n raise IOError(\"Couldn't read normalisation factors\")\n \n def read_array(n, name=\"Unknown\"):\n data = np.zeros([n])\n try:\n for i in np.arange(n):\n data[i] = float(token.next())\n except:\n raise IOError(\"Failed reading array '\"+name+\"' of size \", n)\n return data\n\n def read_2d(nx, ny, name=\"Unknown\"):\n data = np.zeros([nx, ny])\n for i in np.arange(nx):\n data[i,:] = read_array(ny, name+\"[\"+str(i)+\"]\")\n return data\n\n # Read 1D arrays\n psiflux = read_array(npsi, \"psiflux\")\n fnorm = read_array(npsi, \"fnorm\")\n ffpnorm = read_array(npsi, \"ffpnorm\")\n ponly = read_array(npsi, \"ponly\")\n pponly = read_array(npsi, \"pponly\")\n qsf = read_array(npsi, \"qsf\")\n d = read_array(npsi, \"d\")\n \n dpdz = read_array(ntheta, \"dpdz\")\n dpdr = read_array(ntheta, \"dpdr\")\n \n # 2D arrays\n \n xnorm = read_2d(ntheta, npsi, \"xnorm\")\n znorm = read_2d(ntheta, npsi, \"znorm\")\n \n # Try to read Br and Bz (may be present)\n try:\n Br = read_2d(ntheta, npsi, \"Br\")\n Bz = read_2d(ntheta, npsi, \"Bz\")\n except:\n Br = Bz = None\n \n ny = ntheta\n\n if isym == 1:\n # Fill in values for up-down symmetric case\n print(\"Grid is up-down symmetric. Reflecting grid about midplane\")\n ny = tsize = 2*(ntheta - 1) + 1\n \n def reflect(data, mapfunc = lambda x:x):\n \"\"\" Reflect a variable about midplane\n Optionally supply a mapping function\"\"\"\n data2 = np.zeros([tsize, npsi])\n # Copy the original data\n for i in np.arange(ntheta):\n data2[i,:] = data[i,:]\n # Now fill in the remainder\n for i in np.arange(ntheta, tsize):\n t0 = tsize - 1 - i\n data2[i,:] = mapfunc(data[t0,:])\n return data2\n \n xnorm = reflect(xnorm)\n znorm = reflect(znorm, lambda x: 2.*zma - x) # Reflect about zma\n if Br != None:\n Br = reflect(Br, lambda x:-x) # Br reverses\n if Bz != None:\n Bz = reflect(Bz) # Bz remains the same\n theta = tsize\n\n # Make sure we have Br, Bz and Bpol\n\n if (Br == None) or (Bz == None):\n # Calculate Bpol from psi then Br and Bz from Bpol\n # Use dpsi = R*Bp dx (for now)\n Bpol = np.zeros([ny, npsi])\n \n def deriv(f):\n n = np.size(f)\n dfdi = np.zeros(n)\n dfdi[1:-1] = (f[2:n] - f[0:-2])/2. # Central difference in the middle\n dfdi[0] = f[1] - f[0]\n dfdi[-1] = f[-1] - f[-2]\n return dfdi\n \n for i in np.arange(ntheta):\n drdi = deriv(xnorm[i, :])\n dzdi = deriv(znorm[i, :])\n dldi = sqrt(drdi**2 + dzdi**2) # Arc length\n dpsidi = deriv(psiflux)\n \n Bpol[i, :] = dpsidi / (dldi * xnorm[i,:])\n else:\n Bpol = np.sqrt(Br**2 + Bz**2)\n \n # Calculate toroidal field\n Btor = fnorm / xnorm\n \n #########################################\n # Create a dictionary of values to return\n # \n # Need to transpose 2D arrays to [psi, theta] \n # to be consistent with elite inputs\n \n var = {\"npsi\":npsi, \"npol\":ny, # Sizes\n \n \"psi\":psiflux,\n \"f(psi)\":fnorm,\n \"p\":ponly,\n \n \"R\": np.transpose(xnorm),\n \"Z\": np.transpose(znorm),\n\n \"Bp\":np.transpose(Bpol),\n \"Bt\":np.transpose(Btor),\n\n \"q\":qsf,\n\n \"ffprime\":ffpnorm,\n \"pprime\":pponly}\n\n if Br != None:\n var['Br'] = np.transpose(Br)\n if Bz != None:\n var['Bz'] = np.transpose(Bz)\n \n return var", "def read_pdb(filename):\n \n # Read the PDB file into memory.\n pdbfile = open(filename, 'r')\n\n # Extract the ATOM entries.\n # Format described here: http://bmerc-www.bu.edu/needle-doc/latest/atom-format.html\n atoms = list()\n for line in pdbfile:\n if line[0:6] == \"ATOM \":\n # Parse line into fields.\n atom = dict()\n atom[\"serial\"] = line[6:11]\n atom[\"atom\"] = line[12:16]\n atom[\"altLoc\"] = line[16:17]\n atom[\"resName\"] = line[17:20]\n atom[\"chainID\"] = line[21:22]\n atom[\"Seqno\"] = line[22:26]\n atom[\"iCode\"] = line[26:27]\n atom[\"x\"] = line[30:38]\n atom[\"y\"] = line[38:46]\n atom[\"z\"] = line[46:54]\n atom[\"occupancy\"] = line[54:60]\n atom[\"tempFactor\"] = line[60:66]\n atoms.append(atom)\n \n # Close PDB file.\n pdbfile.close()\n\n # Return dictionary of present residues.\n return atoms", "def read_geometry(geometry_filename, quiet=True):\n\n result_dict = {}\n\n extension = os.path.splitext(geometry_filename)[1]\n # read the geometry differently depending on the file format\n if extension == \".geom\":\n parser = GeometryFileParser(geometry_filename)\n result_dict = parser.pixel_map_for_cxiview()\n\n elif extension == \".h5\":\n x, y, r, dx_m = read_pixelmap(geometry_filename)\n coffset = float('nan')\n # clen is not neccessarily an integer so we choose as default the None \n # type\n clen = None\n\n # find the smallest size of cspad_geom that contains all\n # xy values but is symmetric about the origin\n M = 2 * int(max(abs(x.max()), abs(x.min()))) + 2\n N = 2 * int(max(abs(y.max()), abs(y.min()))) + 2\n\n\n # convert x y values to i j values Minus sign for y-axis because Python\n # takes (0,0) in top left corner instead of bottom left corner\n\n # Note to Valerio: Do not add the offset to convert to image\n # coordinates staring at (0,0) as we may want the actual pixel\n # coordinates This means do not center array here --> it is done in\n # pixel_remap instead Returning actual coordinates (x,y) is better for\n # other operations such as radial averages\n\n x = x\n y = -y\n img_shape = (M, N)\n\n result_dict = {\n 'x' : x.flatten(),\n 'y' : y.flatten(),\n 'r' : r.flatten(),\n 'dx' : dx_m,\n 'coffset' : coffset,\n 'shape' : img_shape,\n 'clen' : clen\n }\n else:\n print(\"Error reading geometry file: \" + geometry_filename) \n print(\"Unknown geometry file format: \" + extension)\n exit()\n\n # Print a sanity check unless suppressed\n if not quiet:\n print('----------')\n print('Geometry info:')\n print('X range (pix): ', x.min(), x.max())\n print('Y range (pix): ', y.min(), y.max())\n print('R range (pix): ', r.min(), r.max())\n print('Pixel size (m): %4.6f' % (dx_m))\n print(\"Geometry shape: \", x.shape)\n print(\"Geometry elements: \", x.flatten().shape)\n print(\"Assembled image size: \", img_shape)\n\n return result_dict", "def load_cdo_results():\n # Location of data files\n cdo_dir = os.path.dirname(__file__)+'/data/cdo_results/'\n # Dictionary in which to store data\n cdo_dict = {}\n # Load gridcell area data\n cdo_dict['gridarea'] = xr.open_dataset(cdo_dir+'data01_gridarea.nc',\n decode_times=False, autoclose=True)\n # Load data for regions\n for region in load_region_bounds_dict().keys():\n for suffix in ['', '_area', '_fldmean']:\n key = region + suffix\n if key == 'Glb': # for globe is data01.nc\n cdo_dict[key] = xr.open_dataset(cdo_dir+'../data01.nc',\n decode_times=False, autoclose=True)\n else:\n cdo_dict[key] = xr.open_dataset(cdo_dir+'data01_'+key+'.nc',\n decode_times=False, autoclose=True)\n # Return dictioary of data\n return cdo_dict", "def read_results(self):\n\n myfile = open(os.path.join(self.directory, 'results.tag'), 'r')\n self.lines = myfile.readlines()\n myfile.close()\n\n # print('atoms before read', self.atoms)\n # print('atoms_input before read', self.atoms_input)\n\n self.atoms = self.atoms_input\n\n charges, energy, free_energy = self.read_charges_and_energy()\n if charges is not None:\n self.results['charges'] = charges\n\n self.results['energy'] = energy\n self.results['free_energy'] = free_energy\n\n if self.do_forces:\n forces = self.read_forces()\n self.results['forces'] = forces\n\n self.mmpositions = None\n\n # stress stuff begins\n sstring = 'stress'\n have_stress = False\n stress = list()\n for iline, line in enumerate(self.lines):\n if sstring in line:\n have_stress = True\n start = iline + 1\n end = start + 3\n for i in range(start, end):\n cell = [float(x) for x in self.lines[i].split()]\n stress.append(cell)\n if have_stress:\n stress = -np.array(stress) * Hartree / Bohr**3\n self.results['stress'] = stress.flat[[0, 4, 8, 5, 2, 1]]\n # stress stuff ends\n\n # TODO: these two seem wrong with DFTB+ master but compatible with 19.1\n # eigenvalues and fermi levels\n #fermi_levels = self.read_fermi_levels()\n #if fermi_levels is not None:\n # self.results['fermi_levels'] = fermi_levels\n #\n #eigenvalues = self.read_eigenvalues()\n #if eigenvalues is not None:\n # self.results['eigenvalues'] = eigenvalues\n\n # calculation was carried out with atoms written in write_input\n os.remove(os.path.join(self.directory, 'results.tag'))", "def readMagneticsObservations(self, obs_file):\n\n fid = open(self.basePath + obs_file,'r')\n\n # First line has the inclination,declination and amplitude of B0\n line = fid.readline()\n B = np.array(line.split(),dtype=float)\n\n # Second line has the magnetization orientation and a flag\n line = fid.readline()\n M = np.array(line.split(),dtype=float)\n\n # Third line has the number of rows\n line = fid.readline()\n ndat = np.array(line.split(),dtype=int)\n\n # Pre-allocate space for obsx, obsy, obsz, data, uncert\n line = fid.readline()\n temp = np.array(line.split(),dtype=float)\n\n d = np.zeros(ndat, dtype=float)\n wd = np.zeros(ndat, dtype=float)\n locXYZ = np.zeros( (ndat,3), dtype=float)\n\n for ii in range(ndat):\n\n temp = np.array(line.split(),dtype=float)\n locXYZ[ii,:] = temp[:3]\n\n if len(temp) > 3:\n d[ii] = temp[3]\n\n if len(temp)==5:\n wd[ii] = temp[4]\n\n line = fid.readline()\n\n rxLoc = BaseMag.RxObs(locXYZ)\n srcField = BaseMag.SrcField([rxLoc],param=(B[2],B[0],B[1]))\n survey = BaseMag.LinearSurvey(srcField)\n survey.dobs = d\n survey.std = wd\n return survey", "def load_atom(db_dir):\n current = open(db_dir, \"r\")\n mol2_file = []\n for row in current:\n line = row.split()\n mol2_file.append(line)\n atom_start = mol2_file.index(['@<TRIPOS>ATOM']) + 1\n atom_end = mol2_file.index(['@<TRIPOS>BOND'])\n atom_info=mol2_file[atom_start:atom_end]\n mol=[]\n for line in atom_info:\n atom_type = line[1][0]\n x_y_z = np.asarray(line[2:5], float)\n idx = int(line[0])\n node1 = Node(atom_type, x_y_z, idx)\n mol.append(node1)\n return mol", "def read_szx_fmv_13(eps_file):\n raw_data = eps_file.scaled_mdr\n raw_unscaled = eps_file.mdr\n mphr = eps_file.mphr\n\n n_node_per_line = raw_data[\"LONGITUDE\"].shape[1]\n n_lines = raw_data[\"LONGITUDE\"].shape[0]\n n_records = raw_data[\"LONGITUDE\"].size\n\n data = {}\n metadata = {}\n idx_nodes = np.arange(n_lines).repeat(n_node_per_line)\n\n ascat_time = shortcdstime2jd(raw_data[\"UTC_LINE_NODES\"].flatten()[\"day\"],\n raw_data[\"UTC_LINE_NODES\"].flatten()[\"time\"])\n data[\"jd\"] = ascat_time[idx_nodes]\n\n metadata[\"spacecraft_id\"] = np.int8(mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(mphr[\"ORBIT_START\"])\n\n fields = [\n \"processor_major_version\", \"processor_minor_version\",\n \"format_major_version\", \"format_minor_version\"\n ]\n\n for f in fields:\n metadata[f] = np.int16(mphr[f.upper()])\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"abs_line_number\"\n ]\n\n for f in fields:\n data[f] = raw_data[f.upper()].flatten()[idx_nodes]\n\n fields = [(\"longitude\", long_nan), (\"latitude\", long_nan),\n (\"swath indicator\", byte_nan)]\n\n for f, nan_val in fields:\n data[f] = raw_data[f.upper()].flatten()\n valid = raw_unscaled[f.upper()].flatten() != nan_val\n data[f][~valid] = nan_val\n\n fields = [(\"sigma0_trip\", long_nan), (\"inc_angle_trip\", uint_nan),\n (\"azi_angle_trip\", int_nan), (\"kp\", uint_nan),\n (\"num_val_trip\", ulong_nan), (\"f_kp\", byte_nan),\n (\"f_usable\", byte_nan), (\"land_frac\", uint_nan)]\n\n for f, nan_val in fields:\n data[f] = raw_data[f.upper()].reshape(n_records, 3)\n valid = raw_unscaled[f.upper()].reshape(n_records, 3) != nan_val\n data[f][~valid] = nan_val\n\n # modify longitudes from (0, 360) to (-180,180)\n mask = np.logical_and(data[\"longitude\"] != long_nan,\n data[\"longitude\"] > 180)\n data[\"longitude\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n mask = (data[\"azi_angle_trip\"] != int_nan) & (data[\"azi_angle_trip\"] < 0)\n data[\"azi_angle_trip\"][mask] += 360\n\n data[\"node_num\"] = np.tile((np.arange(n_node_per_line) + 1),\n n_lines).astype(np.uint8)\n\n data[\"line_num\"] = idx_nodes.astype(np.uint16)\n\n data[\"as_des_pass\"] = (data[\"sat_track_azi\"] < 270).astype(np.uint8)\n\n data[\"swath_indicator\"] = data.pop(\"swath indicator\")\n\n data[\"f_land\"] = data.pop(\"land_frac\")\n\n return data, metadata", "def readsol(self,filename):\n\t\tf = file(filename)\n\t\tfor i in range(6): f.readline()\n\t\tl = f.readline().split()\n\n\t\trows = int(l[2])\n\t\tcols = int(l[5])\n\t\tfor i in range(3): f.readline()\n\t\tstatusString = f.readline().split()[0]\n\t\txpressStatus = {\n\t\t\t\"Optimal\":LpStatusOptimal,\n\t\t\t}\n\t\tif statusString not in xpressStatus:\n\t\t\traise ValueError, \"Unknow status returned by XPRESS: \"+statusString\n\t\tstatus = xpressStatus[statusString]\n\t\tvalues = {}\n\t\twhile 1:\n\t\t\tl = f.readline()\n\t\t\tif l == \"\": break\n\t\t\tline = l.split()\n\t\t\tif len(line) and line[0] == 'C':\n\t\t\t\tname = line[2]\n\t\t\t\tvalue = float(line[4])\n\t\t\t\tvalues[name] = value\n\t\treturn status, values", "def process_geometry(self, molecule_info):\n # Initalise variables\n atom_coords = []\n\n # Pull coordinates from molecule info.\n for line in molecule_info:\n xyz = np.asarray([\n float(line.split()[i+4])\n for i in range(3)\n ])\n atom_coords.append(xyz)\n\n return np.asarray(atom_coords)", "def load_data_from_disk(self):\n data = dict()\n Omega_M = self.theta_fid[0]\n der_den = 1. / (2. * self.delta_theta)\n\n print (\"Loading data from disk.. Omega_M = \", Omega_M, \"delta_theta = \", self.delta_theta[0])\n\n for key in ['x_central', 'x_m', 'x_p', 'x_central_test', 'x_m_test', 'x_p_test']:\n data[key] = np.load(f'./preloaded_data/{Omega_M}_{self.delta_theta[0]}_{key}.npy')\n\n return data, der_den", "def read_parameters():\n hdulist1 = pf.open(source+'/kids_data/KiDS_DR3.1_G9_ugri_shear.fits')\n param1 = hdulist1[1].data['e1'][:sample]\n param2 = hdulist1[1].data['e2'][:sample]\n weights = hdulist1[1].data['weight'][:sample]\n return param1, param2, weights", "def ReadIndmfl(filename, fh_info):\n def divmodulo(x,n):\n \"We want to take modulo and divide in fortran way, so that it is compatible with fortran code\"\n return ( sign(x)* (abs(x)/n) , sign(x)*mod(abs(x),n))\n\n fh = open(filename, 'r')\n lines = [line.split('#')[0].strip() for line in fh.readlines()] # strip comments\n lines = (line for line in lines if line) # strip blank lines & create generator expression\n\n hybr_emin, hybr_emax, Qrenorm, projector = [float(x) for x in lines.next().split()[:4]]\n if projector>=4:\n hybr_emin = int(hybr_emin)\n hybr_emax = int(hybr_emax)\n matsubara, broadc, broadnc, om_npts, om_emin, om_emax = [float(e) for e in lines.next().split()[:6]]\n matsubara = int(matsubara) # recast these to integers\n om_npts = int(om_npts) \n\n atoms={}\n cps={}\n natom = int(lines.next())\n for i in range(natom):\n iatom, nL, locrot_shift = [int(x) for x in lines.next().split()]\n (shift,locrot) = divmodulo(locrot_shift,3)\n if locrot<0: locrot=3\n \n Ls, qsplits, icps = array([[int(x) for x in lines.next().split()] for i in range(nL)]).T\n new_zx = [[float(x) for x in lines.next().split()] for loro in range(abs(locrot))]\n vec_shift = [float(x) for x in lines.next().split()] if shift else None\n\n atoms[iatom] = (locrot, new_zx, vec_shift)\n for icp, L, qsplit in zip(icps, Ls, qsplits):\n if cps.has_key(icp):\n cps[icp] += [(iatom, L, qsplit)]\n else:\n cps[icp] = [(iatom, L, qsplit)]\n\n #####################################################\n # read the big block of siginds and cftrans\n ncp, maxdim, maxsize = [int(e) for e in lines.next().split()[:3]]\n legends={}\n siginds={}\n cftrans={}\n for i in range(ncp):\n icp, dim, size = [int(e) for e in lines.next().split()]\n legends[icp] = lines.next().split(\"'\")[1::2]\n siginds[icp] = array([[int(e) for e in lines.next().split()] for row in range(dim)])\n raw_cftrans = array([[float(e) for e in lines.next().split()] for row in range(dim)])\n cftrans[icp] = raw_cftrans[:,0::2] + raw_cftrans[:,1::2]*1j\n\n return (siginds, cftrans, cps)", "def load_rd_prms(in_file):\n\n prms = np.load(in_file)\n return (prms['vs'],\n prms['mbs'].item(),\n prms['A'],\n prms['B'],\n prms['C'],\n prms['D'],\n prms['E'],\n prms['F'],\n prms['G'],\n prms['synUmax'],\n prms['synVmax'],\n prms['ucmax'],\n prms['dt'],\n prms['Du'],\n prms['Dv'],\n prms['RR'])", "def read_atoms(data, system, atom_style, units, atomsstart, atomscolumns):\n \n if atomsstart is not None:\n prop_info = atoms_prop_info(atom_style, units)\n ncols = countreadcolumns(prop_info)\n \n # Read Atoms table\n system = load_table(data, box=system.box, system=system, \n prop_info=prop_info, skiprows=atomsstart,\n nrows=system.natoms, comment='#',\n header=None, usecols=range(ncols))\n \n # Check if image flags are included\n if atomscolumns == ncols + 3:\n \n # Read image flags\n with uber_open_rmode(data) as f:\n imageflags = pd.read_csv(f, delim_whitespace=True, names=['bx', 'by', 'bz'],\n skiprows=atomsstart, nrows=system.natoms, comment='#',\n header=None, usecols=range(ncols, atomscolumns),\n dtype='int64')\n\n # Wrap atoms to correct images\n shift = imageflags.values.dot(system.box.vects)\n system.atoms.pos[:] += shift\n \n # Check for correct number of columns\n elif ncols != atomscolumns:\n raise FileFormatError(f'atom_style={atom_style} requires {ncols} or {ncols+3} Atoms table columns but {atomscolumns} found')\n\n return system", "def parsec_des_stellar_params(dmod=0):\n isos = load_parsec_isochrones(\"DECAM\")\n g_Teff_funcs = {}\n g_logg_funcs = {}\n r_Teff_funcs = {}\n r_logg_funcs = {}\n gmr_Teff_funcs = {}\n gmr_logg_funcs = {}\n interp_kwargs = {\"bounds_error\":False,\"fill_value\":np.nan}\n for key in isos.keys():\n tab = isos[key]\n tab = tab[(tab[\"label\"]==2) | (tab[\"label\"]==3)]\n gmag, rmag = tab[\"gmag\"], tab[\"rmag\"]\n logT, logg = tab[\"logTe\"], tab[\"logg\"]\n Teff = 10**logT\n g_Teff_funcs[key] = interpolate.interp1d(gmag+dmod,Teff,**interp_kwargs)\n g_logg_funcs[key] = interpolate.interp1d(gmag+dmod,logg,**interp_kwargs)\n r_Teff_funcs[key] = interpolate.interp1d(rmag+dmod,Teff,**interp_kwargs)\n r_logg_funcs[key] = interpolate.interp1d(rmag+dmod,logg,**interp_kwargs)\n gmr_Teff_funcs[key] = interpolate.interp1d(gmag-rmag,Teff,**interp_kwargs)\n gmr_logg_funcs[key] = interpolate.interp1d(gmag-rmag,logg,**interp_kwargs)\n return g_Teff_funcs, g_logg_funcs, r_Teff_funcs, r_logg_funcs, gmr_Teff_funcs, gmr_logg_funcs", "def read_szx_fmv_11(eps_file):\n raw_data = eps_file.scaled_mdr\n raw_unscaled = eps_file.mdr\n mphr = eps_file.mphr\n\n n_node_per_line = raw_data[\"LONGITUDE\"].shape[1]\n n_lines = raw_data[\"LONGITUDE\"].shape[0]\n n_records = raw_data[\"LONGITUDE\"].size\n\n data = {}\n metadata = {}\n idx_nodes = np.arange(n_lines).repeat(n_node_per_line)\n\n ascat_time = shortcdstime2jd(raw_data[\"UTC_LINE_NODES\"].flatten()[\"day\"],\n raw_data[\"UTC_LINE_NODES\"].flatten()[\"time\"])\n data[\"jd\"] = ascat_time[idx_nodes]\n\n metadata[\"spacecraft_id\"] = np.int8(mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(mphr[\"ORBIT_START\"])\n\n fields = [\n \"processor_major_version\", \"processor_minor_version\",\n \"format_major_version\", \"format_minor_version\"\n ]\n\n for f in fields:\n metadata[f] = np.int16(mphr[f.upper()])\n\n fields = [\"sat_track_azi\"]\n for f in fields:\n data[f] = raw_data[f.upper()].flatten()[idx_nodes]\n\n fields = [(\"longitude\", long_nan), (\"latitude\", long_nan),\n (\"swath_indicator\", byte_nan)]\n\n for f, nan_val in fields:\n data[f] = raw_data[f.upper()].flatten()\n valid = raw_unscaled[f.upper()].flatten() != nan_val\n data[f][~valid] = nan_val\n\n fields = [(\"sigma0_trip\", long_nan), (\"inc_angle_trip\", uint_nan),\n (\"azi_angle_trip\", int_nan), (\"kp\", uint_nan),\n (\"f_kp\", byte_nan), (\"f_usable\", byte_nan), (\"f_f\", uint_nan),\n (\"f_v\", uint_nan), (\"f_oa\", uint_nan), (\"f_sa\", uint_nan),\n (\"f_tel\", uint_nan), (\"f_land\", uint_nan)]\n\n for f, nan_val in fields:\n data[f] = raw_data[f.upper()].reshape(n_records, 3)\n valid = raw_unscaled[f.upper()].reshape(n_records, 3) != nan_val\n data[f][~valid] = nan_val\n\n # modify longitudes from (0, 360) to (-180,180)\n mask = np.logical_and(data[\"longitude\"] != long_nan,\n data[\"longitude\"] > 180)\n data[\"longitude\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n mask = (data[\"azi_angle_trip\"] != int_nan) & (data[\"azi_angle_trip\"] < 0)\n data[\"azi_angle_trip\"][mask] += 360\n\n data[\"node_num\"] = np.tile((np.arange(n_node_per_line) + 1),\n n_lines).astype(np.uint8)\n data[\"line_num\"] = idx_nodes.astype(np.uint16)\n data[\"as_des_pass\"] = (data[\"sat_track_azi\"] < 270).astype(np.uint8)\n\n return data, metadata", "def readQrelsDict(fileName):\n result = {}\n for e in readQrels(fileName):\n result.setdefault(e.queryId, {})[e.docId] = int(e.relGrade)\n return result", "def firstpass(data, pbc, symbols, units):\n # Get units information\n units_dict = style.unit(units)\n \n # Initialize parameter values\n atomsstart = None\n velocitiesstart = None\n natoms = None\n natypes = None\n firstatoms = False\n atomscolumns = 0\n masses = None\n num_masses_to_read = 0\n xlo = xhi = ylo = yhi = zlo = zhi = None\n xy = 0.0\n xz = 0.0\n yz = 0.0\n i = 0\n \n # Read str and files in the same way\n with uber_open_rmode(data) as fp:\n \n # Loop over all lines in fp\n for i, fullline in enumerate(fp):\n try:\n fullline = fullline.decode('UTF-8')\n except:\n pass\n \n # Remove comments after '#'\n try:\n comment_index = fullline.index('#')\n except:\n line = fullline\n else:\n line = fullline[:comment_index]\n \n terms = line.split()\n\n # Skip blank lines\n if len(terms)>0:\n \n # Read number of atoms \n if len(terms) == 2 and terms[1] == 'atoms':\n natoms = int(terms[0])\n\n # Read number of atom types\n elif len(terms) == 3 and terms[1] == 'atom' and terms[2] == 'types': \n natypes = int(terms[0])\n \n # Read boundary info\n elif len(terms) == 4 and terms[2] == 'xlo' and terms[3] == 'xhi':\n xlo = uc.set_in_units(float(terms[0]), units_dict['length'])\n xhi = uc.set_in_units(float(terms[1]), units_dict['length'])\n\n elif len(terms) == 4 and terms[2] == 'ylo' and terms[3] == 'yhi':\n ylo = uc.set_in_units(float(terms[0]), units_dict['length'])\n yhi = uc.set_in_units(float(terms[1]), units_dict['length'])\n\n elif len(terms) == 4 and terms[2] == 'zlo' and terms[3] == 'zhi':\n zlo = uc.set_in_units(float(terms[0]), units_dict['length'])\n zhi = uc.set_in_units(float(terms[1]), units_dict['length'])\n\n elif len(terms) == 6 and terms[3] == 'xy' and terms[4] == 'xz' and terms[5] == 'yz':\n xy = uc.set_in_units(float(terms[0]), units_dict['length'])\n xz = uc.set_in_units(float(terms[1]), units_dict['length'])\n yz = uc.set_in_units(float(terms[2]), units_dict['length'])\n \n # Identify starting line number for Atoms data\n elif len(terms) == 1 and terms[0] == 'Atoms':\n atomsstart = i + 1\n firstatoms = True\n\n # Check for atom_style comment\n try: \n comment_index = fullline.index('#')\n except:\n atom_style = None\n else:\n atom_style = fullline[comment_index + 1:].strip()\n \n # Count number of columns in Atoms table\n elif firstatoms:\n atomscolumns = len(terms)\n firstatoms = False\n \n # Identify starting line for Masses data\n elif len(terms) == 1 and terms[0] == 'Masses':\n if natypes is None:\n raise FileFormatError('# atom types must appear before Masses list')\n masses = [None for i in range(natypes)]\n num_masses_to_read = natypes\n \n # Read masses\n elif num_masses_to_read > 0:\n read_mass(terms, masses)\n num_masses_to_read -= 1\n\n # Identify starting line number for Velocity data\n elif len(terms) == 1 and terms[0] == 'Velocities':\n velocitiesstart = i + 1\n \n if i == 0:\n raise FileNotFoundError(f'File {data} not found')\n\n if natoms is None:\n raise FileFormatError('# atoms not found')\n\n if xlo is None or xhi is None:\n raise FileFormatError('xlo, xhi box dimensions missing')\n\n if ylo is None or yhi is None:\n raise FileFormatError('ylo, yhi box dimensions missing')\n\n if zlo is None or zhi is None:\n raise FileFormatError('zlo, zhi box dimensions missing')\n\n if atomsstart is None:\n raise FileFormatError('Atoms section missing')\n\n # Create system with natoms\n box = Box(xlo=xlo, xhi=xhi,\n ylo=ylo, yhi=yhi,\n zlo=zlo, zhi=zhi,\n xy=xy, xz=xz, yz=yz)\n atoms = Atoms(natoms=natoms)\n system = System(box=box, atoms=atoms, pbc=pbc, symbols=symbols,\n masses=masses)\n\n # Compile dict of params\n params = {}\n params['atomsstart'] = atomsstart\n params['velocitiesstart'] = velocitiesstart\n params['atomscolumns'] = atomscolumns\n params['atom_style'] = atom_style\n\n return system, params", "def read_forces(filename):\n f=open(filename,\"r\")\n castep_forces = f.readlines()\n f.close() \n nruter = []\n for index, line in enumerate(castep_forces):\n if 'Total number of ions in cell' in line:\n n_atoms = int(line.split()[7])\n if 'Cartesian components (eV/A)' in line:\n starting_line = index + 4\n for i in range(n_atoms):\n f = starting_line + i\n nruter.append([float(castep_forces[f].split()[m]) for m in range(3,6)]) \n nruter=np.array(nruter,dtype=np.double)\n return nruter", "def read_meas_adp(data, path='xd.res', use='meas'):\n use2 = 'frac_' + use\n switch = False\n filepointer = open(path, 'r')\n atomname = None\n for line in filepointer:\n if switch:\n split = [i for i in line.split(' ') if len(i) > 0]\n if not len(split) == 6:\n print('WARNING!!! Inconsistend number of floats while\\\n reading measured ADP.')\n data['exp'][atomname].adp[use2] = split\n switch = False\n if '(' in line:\n split = [i for i in line.split(' ') if len(i) > 0]\n if split[0][-1] == ')':\n switch = True\n atomname = split[0]\n use = 'cart_' + use\n for atom in data['exp'].atoms:\n # if use == 'cart_neut': print(atom)\n atom.adp[use] = rotate_adp2(atom.adp[use2],\n atom.molecule.frac2cartmatrix,\n atom.molecule.cell)\n return data", "def _get_energies_atom(self, file1, file2, natom):\n esp = loadtxt(file1)\n etot = loadtxt(file2)\n esp_at = esp[-natom:,1]\n etot_at = etot[-natom:,1]\n return esp_at, etot_at", "def get_data(filename):\n\n data = load(filename)\n\n density_factor = float(data.gas.densities.cosmo_factor.a_factor)\n temperature_factor = float(data.gas.temperatures.cosmo_factor.a_factor)\n\n number_density = (data.gas.densities * (density_factor / mh)).to(cm ** -3)\n temperature = (data.gas.temperatures * temperature_factor).to(\"K\")\n metallicity = data.gas.metal_mass_fractions\n metallicity[metallicity < min_metallicity] = min_metallicity\n\n return number_density.value, temperature.value, np.log10(metallicity.value)", "def read_info_file(filename):\n nb_params = dict()\n NB = namedtuple(\"NB\", [\"r\", \"p\"])\n n = 0\n for line in open(filename):\n if line.startswith(\"#\"):\n continue\n if n == 0:\n f = line.split()\n assert f == [\n \"sample\",\n \"cell\",\n \"medbin\",\n \"mapped\",\n \"suppl\",\n \"dupl\",\n \"mapq\",\n \"read2\",\n \"good\",\n \"pass1\",\n \"nb_p\",\n \"nb_r\",\n \"nb_a\",\n \"bam\",\n ]\n else:\n f = line.split()\n # sample = f[0]\n cell = f[1]\n # medbin = f[2]\n # mapped = f[3]\n # suppl = f[4]\n # dupl = f[5]\n # mapq = f[6]\n # read2 = f[7]\n # good = f[8]\n # pass1 = f[9]\n nb_p = float(f[10])\n nb_r = float(f[11])\n # nb_a = f[12]\n # bam = f[13]\n nb_params[cell] = NB(r=nb_r, p=nb_p)\n n += 1\n return nb_params", "def Read_CSSR(filename):\n f = open(filename)\n#\n# First read unit cell\n#\n tokens = f.readline().split()\n if len(tokens) != 3: \n print \"Format mismatch -- first cell line\"\n sys.exit(1)\n a, b, c = map(float,tokens[:])\n tokens = f.readline().split()\n if len(tokens) < 3: \n print \"Format mismatch -- second cell line\"\n sys.exit(1)\n alpha, beta, gamma = map(float,tokens[0:3])\n\n cell = N.zeros((3,3),N.Float)\n\n alpha, beta, gamma = map(lambda x: x*pi/180.0, (alpha,beta,gamma))\n va = N.array((a,0.0,0.0),N.Float)\n vb = N.array((b*cos(gamma), b*sin(gamma), 0.0),N.Float)\n xxx = (cos(alpha)-cos(beta)*cos(gamma)) / sin(gamma)\n vc = N.array((c*cos(beta), c*xxx, c*sqrt(sin(beta)**2 - xxx**2)),N.Float)\n\n cell[0,:] = va[:]\n cell[1,:] = vb[:]\n cell[2,:] = vc[:]\n\n#\n# Now the atoms\n#\n tokens = f.readline().split()\n natoms = int(tokens[0])\n f.readline() # empty line\n\n crystal = Structure([])\n import re\n p = re.compile(\"[A-z]+\")\n for a in range(natoms):\n tokens = f.readline().split()\n number, tag, x, y, z = tokens[0:5]\n m = p.match(tag)\n if m:\n symbol = m.group()\n else:\n print \"Cannot match \", tag \n crystal.append(Atom(symbol, [float(x), float(y), float(z)]))\n\n crystal.SetUnitCell(cell)\n crystal.SetBoundaryConditions(periodic=True)\n\n return crystal", "def _read_data(self):\n param_map = {'Temperature': 'water_temperature',\n 'EC': 'water_electrical_conductivity',\n 'Pressure': 'water_depth_non_vented',\n 'Battery': 'instrument_battery_voltage',\n }\n\n unit_map = {'degC': pq.degC,\n 'mS/cm': sq.mScm,\n 'psi': pq.psi,\n 'volts': pq.volt,\n }\n\n macroctd_data = MacroctdReader(self.data_file, self.default_tzinfo)\n\n # determine parameters provided and in what units\n self.parameters = dict()\n self.data = dict()\n\n for parameter in macroctd_data.parameters:\n try:\n pcode = param_map[(parameter.name).strip()]\n punit = unit_map[(parameter.unit).strip()]\n #ignore params that have no data\n if not np.all(np.isnan(parameter.data)):\n self.parameters[pcode] = sonde.master_parameter_list[pcode]\n self.data[param_map[parameter.name]] = parameter.data * \\\n punit\n except KeyError:\n warnings.warn('Un-mapped Parameter/Unit Type:\\n'\n '%s parameter name: \"%s\"\\n'\n '%s unit name: \"%s\"' %\n (self.file_format, parameter.name,\n self.file_format, parameter.unit),\n Warning)\n\n self.format_parameters = {\n 'header_lines': macroctd_data.header_lines,\n }\n\n self.serial_number = macroctd_data.serial_number\n self.site_name = macroctd_data.site_name\n self.dates = macroctd_data.dates", "def get_properties_from_axyzc(\n atoms_str,\n coordinates,\n charge,\n options=None,\n scr=constants.SCR,\n clean_files=True,\n cmd=XTB_CMD,\n filename=\"_tmp_xtb_input.xyz\",\n n_cores=1,\n n_threads=1,\n **kwargs,\n):\n\n assert health_check(cmd=cmd)\n\n if isinstance(scr, str):\n scr = pathlib.Path(scr)\n\n if not filename.endswith(\".xyz\"):\n filename += \".xyz\"\n\n temp_scr = tempfile.mkdtemp(dir=scr, prefix=\"xtb_\")\n temp_scr = pathlib.Path(temp_scr)\n\n xtb_cmd = cmd\n\n # Write input file\n inputstr = rmsd.set_coordinates(atoms_str, coordinates, title=\"xtb input\")\n\n with open(temp_scr / filename, \"w\") as f:\n f.write(inputstr)\n\n # Set charge in file\n with open(temp_scr / \".CHRG\", \"w\") as f:\n f.write(str(charge))\n\n # Overwrite threads\n env.set_threads(n_threads)\n\n # Run subprocess command\n cmd = [cmd, f\"{filename}\"]\n\n if options is not None:\n cmd += parse_options(options)\n\n # Merge to string\n cmd = \" \".join(cmd)\n cmd = f\"cd {temp_scr}; \" + cmd\n\n _logger.debug(cmd)\n\n lines = shell.stream(cmd)\n lines = list(lines)\n\n error_pattern = \"abnormal termination of xtb\"\n idx = linesio.get_rev_index(lines, error_pattern, stoppattern=\"#\")\n if idx is not None:\n\n _logger.critical(error_pattern)\n\n idx = linesio.get_rev_index(lines, \"ERROR\")\n\n if idx is None:\n _logger.critical(\"could not read error message\")\n\n else:\n\n for line in lines[idx + 1 : -2]:\n _logger.critical(line.strip())\n\n _logger.critical(cmd)\n _logger.critical(\"xtbexec \" + env.which(xtb_cmd))\n _logger.critical(\"xtbpath \" + os.environ.get(\"XTBPATH\", \"\"))\n _logger.critical(\"xtbhome \" + os.environ.get(\"XTBHOME\", \"\"))\n\n return None\n\n # Parse properties from xtb output\n properties = read_properties(lines, options=options, scr=temp_scr)\n\n # clean your room\n if clean_files:\n shutil.rmtree(temp_scr)\n\n return properties", "def read_szx_fmv_12(eps_file):\n raw_data = eps_file.scaled_mdr\n raw_unscaled = eps_file.mdr\n mphr = eps_file.mphr\n\n n_node_per_line = raw_data[\"LONGITUDE\"].shape[1]\n n_lines = raw_data[\"LONGITUDE\"].shape[0]\n n_records = raw_data[\"LONGITUDE\"].size\n\n data = {}\n metadata = {}\n idx_nodes = np.arange(n_lines).repeat(n_node_per_line)\n\n ascat_time = shortcdstime2jd(raw_data[\"UTC_LINE_NODES\"].flatten()[\"day\"],\n raw_data[\"UTC_LINE_NODES\"].flatten()[\"time\"])\n data[\"jd\"] = ascat_time[idx_nodes]\n\n metadata[\"spacecraft_id\"] = np.int8(mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(mphr[\"ORBIT_START\"])\n\n fields = [\n \"processor_major_version\", \"processor_minor_version\",\n \"format_major_version\", \"format_minor_version\"\n ]\n\n for f in fields:\n metadata[f] = np.int16(mphr[f.upper()])\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"abs_line_number\"\n ]\n\n for f in fields:\n data[f] = raw_data[f.upper()].flatten()[idx_nodes]\n\n fields = [(\"longitude\", long_nan), (\"latitude\", long_nan),\n (\"swath indicator\", byte_nan)]\n\n for f, nan_val in fields:\n data[f] = raw_data[f.upper()].flatten()\n valid = raw_unscaled[f.upper()].flatten() != nan_val\n data[f][~valid] = nan_val\n\n fields = [(\"sigma0_trip\", long_nan), (\"inc_angle_trip\", uint_nan),\n (\"azi_angle_trip\", int_nan), (\"kp\", uint_nan),\n (\"num_val_trip\", ulong_nan), (\"f_kp\", byte_nan),\n (\"f_usable\", byte_nan), (\"f_f\", uint_nan), (\"f_v\", uint_nan),\n (\"f_oa\", uint_nan), (\"f_sa\", uint_nan), (\"f_tel\", uint_nan),\n (\"f_ref\", uint_nan), (\"f_land\", uint_nan)]\n\n for f, nan_val in fields:\n data[f] = raw_data[f.upper()].reshape(n_records, 3)\n valid = raw_unscaled[f.upper()].reshape(n_records, 3) != nan_val\n data[f][~valid] = nan_val\n\n # modify longitudes from (0, 360) to (-180,180)\n mask = np.logical_and(data[\"longitude\"] != long_nan,\n data[\"longitude\"] > 180)\n data[\"longitude\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n mask = (data[\"azi_angle_trip\"] != int_nan) & (data[\"azi_angle_trip\"] < 0)\n data[\"azi_angle_trip\"][mask] += 360\n\n data[\"node_num\"] = np.tile((np.arange(n_node_per_line) + 1),\n n_lines).astype(np.uint8)\n\n data[\"line_num\"] = idx_nodes.astype(np.uint16)\n\n data[\"as_des_pass\"] = (data[\"sat_track_azi\"] < 270).astype(np.uint8)\n\n data[\"swath_indicator\"] = data.pop(\"swath indicator\")\n\n return data, metadata", "def readcif(filename, **kwds):\n \n # Read the unit cell parameters\n a, b, c, alf, bet, gam = [[]]*6\n with open(filename, 'r') as f:\n \n for line in f:\n if \"length_a\" in line:\n a = numgrab(line)\n elif \"length_b\" in line:\n b = numgrab(line)\n elif \"length_c\" in line:\n c = numgrab(line)\n elif \"angle_alpha\" in line:\n alf = numgrab(line)\n elif \"angle_beta\" in line:\n bet = numgrab(line)\n elif \"angle_gamma\" in line:\n gam = numgrab(line)\n \n crystVec = a + b + c + alf + bet + gam\n \n # Read atomic coordinates\n cifdata = pd.read_csv(filename, delim_whitespace=True, header=None, **kwds)\n atomLabels = np.array(cifdata.values[:,0], dtype='str')\n coords = np.array(cifdata.values[:,1:4]).astype('float64')\n\n return atomLabels, coords, crystVec", "def read_pdb(self, pdb):\n pdb_a = {}\n for line in pdb:\n at = re.compile(\"(ATOM|HETATM)\")\n if at.match(line):\n nm = re.sub(r'\\s', '', line[6:12])\n aname = re.sub(r'\\s', '', line[12:17])\n ri_c = re.sub(r'\\s', '', line[20:27])\n x = re.sub(r'\\s', '', line[30:38])\n y = re.sub(r'\\s', '', line[38:46])\n z = re.sub(r'\\s', '', line[46:55])\n if ri_c and aname and x and y and z:\n pdb_a[int(nm)] = [aname, Vector(float(x), float(y), float(z)), ri_c]\n return [pdb_a, nm]", "def f_read_adr_parameters_from_txt_file(parameters_file):\n dict = {}\n file = open(parameters_file, \"r\")\n for line in file:\n if line.strip().startswith('#') or line.strip().startswith('Parameter'):\n pass\n else:\n line = line.strip().replace(' ', '').split(',')\n if line[0] == 'ADR_mode': dict[\"operation_mode_num\"] = line[1]\n if line[0] == 'FFT_size': dict[\"FFT_size_samples\"] = line[1]\n if line[0] == 'Averaged_spectra': dict[\"spectra_averaging\"] = line[1]\n if line[0] == 'Start_freq_line': dict[\"start_line_freq\"] = line[1]\n if line[0] == 'Width_freq_lines': dict[\"width_line_freq\"] = line[1]\n if line[0] == 'CLC_source': dict[\"clock_source\"] = line[1]\n if line[0] == 'Sum_diff_mode': dict[\"sum_diff_mode_num\"] = line[1]\n if line[0] == 'Dif_delay': dict[\"chan_diff_delay\"] = line[1]\n if line[0] == 'File_size': dict[\"data_file_size\"] = line[1]\n\n return dict", "def parse_1d_scan_coords(path: str) -> List[Dict[str, tuple]]:\n if not os.path.isfile(path):\n raise InputError(f'Could not find file {path}')\n software = identify_ess(path)\n traj = list()\n\n if software == 'xtb':\n scan_path = os.path.join(os.path.dirname(path), 'xtbscan.log')\n if os.path.isfile(scan_path):\n lines = _get_lines_from_file(scan_path)\n xyz_str = ''\n for line in lines:\n splits = line.split()\n if len(splits) == 1:\n if xyz_str:\n traj.append(str_to_xyz(xyz_str))\n xyz_str = ''\n continue\n if 'energy:' in line:\n continue\n xyz_str += f'{qcel.periodictable.to_E(splits[0])} {splits[1]} {splits[2]} {splits[3]}\\n'\n traj.append(str_to_xyz(xyz_str))\n return traj\n\n lines = _get_lines_from_file(path)\n log = ess_factory(fullpath=path, check_for_errors=False)\n if not isinstance(log, GaussianLog):\n raise NotImplementedError(f'Currently parse_1d_scan_coords only supports Gaussian files, got {type(log)}')\n done = False\n i = 0\n while not done:\n if i >= len(lines) or 'Normal termination of Gaussian' in lines[i] or 'Error termination via' in lines[i]:\n done = True\n elif 'Optimization completed' in lines[i]:\n while i < len(lines) + 10 and 'Input orientation:' not in lines[i] or 'Forces (Hartrees/Bohr)' in lines [i + 7]:\n i += 1\n if 'Error termination via' in lines[i]:\n return traj\n i += 5\n xyz_str, skip_traj = '', False\n while len(lines) and '--------------------------------------------' not in lines[i]:\n if 'DIIS: error' in lines[i]:\n skip_traj = True\n break\n splits = lines[i].split()\n xyz_str += f'{qcel.periodictable.to_E(int(splits[1]))} {splits[3]} {splits[4]} {splits[5]}\\n'\n i += 1\n if not skip_traj:\n traj.append(str_to_xyz(xyz_str))\n i += 1\n return traj", "def read_xyz_traj(filename):\n with open(filename, 'r') as traj_file:\n traj = traj_file.readlines()\n n_atoms = int(traj[0].strip()) # Get number of atoms from first line\n n_frames = int(len(traj) / (n_atoms + 2)) # Calculate number of frames (assuming n_atoms is constant)\n trajectory = {'atoms': np.empty((n_frames, n_atoms), dtype='U2'), # String of length 2\n 'coordinates': np.empty((n_frames, n_atoms, 3)), # Float\n 'headers': np.empty((n_frames,), dtype=object)} # Python object\n for frame in range(n_frames):\n start = frame * (n_atoms + 2) # Frame start\n end = (frame + 1) * (n_atoms + 2) # Frame end\n trajectory['coordinates'][frame] = [[float(i) for i in line.split()[1:4]] for line in traj[start + 2:end]]\n trajectory['atoms'][frame] = [line.split()[0] for line in traj[start + 2:end]]\n trajectory['headers'][frame] = (traj[start + 1].strip())\n return trajectory", "def read_from_ascii(self, filename):\n self.ascii_filename = filename\n # read file content into a string\n f=open(filename,'r')\n file_str=f.read()\n f.close()\n # make dictionary with file content\n reg_exp_data_groups=re.compile(r'^#>>(\\w+):.*\\n',re.M)\n file_dict=self.make_data_dict_from_str(reg_exp_data_groups,file_str)\n # read arrays ------------------------------\n self.x=np.loadtxt(StringIO.StringIO(file_dict['x']))\n self.p=np.loadtxt(StringIO.StringIO(file_dict['p']))\n self.fmci_XP=np.loadtxt(StringIO.StringIO(file_dict['XP']))\n # regular expression for extracting parameter=value\n reg_exp_param_val=re.compile(r'\\n*(\\w+)=',re.M)\n # read params_physics -----------------------\n params_physics_dict=self.make_data_dict_from_str(reg_exp_param_val,file_dict['params_physics'])\n self.name=self.__get_particle_name(params_physics_dict['particle'])\n self.time=float(params_physics_dict['time'])\n # read params_TDC ---------------------------\n params_TDC_dict=self.make_data_dict_from_str(reg_exp_param_val,file_dict['params_TDC'])\n self.calc_id=params_TDC_dict['calc_id']\n self.i_ts=int(params_TDC_dict['i_ts'])", "def parse_hartman2016_stellar_params(filename):\n\n\n def read_by_word(skip_lines):\n \"\"\"Iterate over the words in the file, since lines make no sense.\"\"\"\n\n drop_mathrm = re.compile(r'\\\\(mathrm|rm)\\{(?P<mathrm>.*?)\\}')\n merge_colname = re.compile(r' *_')\n skip_latex = str.maketrans('', '', '{}$\\\\')\n with open(filename, 'r') as param_file:\n for line in param_file:\n if line.startswith('References.'):\n return\n if skip_lines > 0:\n skip_lines -= 1\n else:\n for word in merge_colname.sub(\n '_',\n drop_mathrm.sub(r'\\g<mathrm>',\n line).translate(skip_latex)\n ).split():\n yield word\n\n def read_value_with_error(next_word, file_word):\n \"\"\"\n Parse the various value formats to FloatWithErrors.\n\n Understands simple floating point values (e.g. 3.14159), floats with\n symmetric errors (e.g. '3.1415 +or- 0.0003') and floats with\n asymmetric errors (e.g. '${1.005}_{-0.027}^{+0.032}$')\n\n Args:\n - next_word:\n The next word in the file.\n\n - file_word:\n A file iterator which can be querried to get more words.\n\n Returns:\n - value:\n A FloatWithErrors instance contaning the parsed value.\n\n - next_word:\n The next word in the file.\n \"\"\"\n\n asymmetric_error_rex = re.compile(\n r'(?P<value>[-+0123456789.]*)'\n r'_-(?P<minus_err>[0123456789.]*)'\n r'\\^\\+(?P<plus_err>[0123456789.]*)'\n )\n asymmetric_match = asymmetric_error_rex.match(next_word)\n if asymmetric_match is not None:\n return (\n FloatWithErrors(\n float(asymmetric_match.group('value')),\n float(asymmetric_match.group('plus_err')),\n float(asymmetric_match.group('minus_err'))\n ),\n next(file_word)\n )\n if next_word == 'cdots':\n return (FloatWithErrors(float('nan'), float('nan')),\n next(file_word))\n\n value = float(next_word)\n next_word = next(file_word)\n if next_word == '+or-':\n next_word = next(file_word)\n return (FloatWithErrors(value, float(next_word)),\n next(file_word))\n\n return FloatWithErrors(value, float('nan')), next_word\n\n def read_reference_list(next_word, file_word):\n \"\"\"Read a list of integers indicating refercences.\"\"\"\n\n result = []\n\n if next_word == 'cdots':\n return result\n\n while next_word[-1] == ',':\n result.extend([int(r) for r in next_word[:-1].split(',')])\n next_word = next(file_word)\n result.extend([int(r) for r in next_word[:-1].split(',')])\n return result\n\n skip_lines = 2\n first_units = '(days)'\n surveys_3char = ['HAT', 'KEL', 'TrE', 'WAS']\n file_word = read_by_word(skip_lines)\n columns = []\n next_word = ''\n while next_word != first_units:\n next_word = next(file_word)\n if next_word != first_units:\n columns.append(next_word)\n\n result = Structure(**{c: [] for c in columns})\n\n while next_word[:3] not in surveys_3char:\n next_word = next(file_word)\n\n while True:\n assert next_word[:3] in surveys_3char\n try:\n for column_name in columns:\n if column_name == 'Planet':\n #False positive, Planet member created out of __init__\n #pylint: disable=no-member\n result.Planet.append(next_word)\n #pylint: enable=no-member\n next_word = next(file_word)\n elif column_name == 'References':\n #False positive, References member created out of __init__\n #pylint: disable=no-member\n result.References.append(\n read_reference_list(next_word, file_word)\n )\n #pylint: enable=no-member\n next_word = next(file_word)\n else:\n value, next_word = read_value_with_error(next_word,\n file_word)\n getattr(result, column_name).append(value)\n except StopIteration:\n break\n\n return result", "def read_zp(file):\n with open(file) as f_in:\n head = f_in.readline()\n units = f_in.readline()\n for line in f_in:\n try:\n zpWave[line.split(' ')[0].replace('\"', '')] = float(line.split(' ')[1])\n zpF0[line.split(' ')[0].replace('\"', '')] = float(line.split(' ')[2])\n \n except NameError:\n zpWave = {line.split(' ')[0].replace('\"', '') : float(line.split(' ')[1])}\n zpF0 = {line.split(' ')[0].replace('\"', '') : float(line.split(' ')[2])}\n \n return zpWave, zpF0", "def test_from_mpd_file_like():\n params = load_params(StringIO(MPD_PARAMS_STR))\n assert params == MPD_PARAMS\n assert isinstance(params[\"x\"], float)\n assert isinstance(params[\"y\"], int)", "def from_cdo_griddes(griddes):\n\n with open(griddes) as grid_file:\n grid_file_lines = grid_file.readlines()\n\n grid_dic = {}\n\n for line in grid_file_lines:\n words = line.split()\n if words[0] == '#':\n continue\n else:\n length = len(words)\n if length == 3:\n grid_dic[words[0]] = words[2]\n else:\n value_string = ' '.join(words[2:length-1])\n grid_dic[words[0]] = value_string\n\n if grid_dic['gridtype'] != 'lonlat':\n print(('Gridtype {0} not supported'.format(grid_dic['gridtype'])))\n return ''\n\n lon = np.zeros(int(grid_dic['xsize']))\n lat = np.zeros(int(grid_dic['ysize']))\n\n for i in range(len(lon)):\n lon[i] = float(grid_dic['xfirst']) + i * float(grid_dic['xinc'])\n for j in range(len(lat)):\n lat[j] = float(grid_dic['yfirst']) + j * float(grid_dic['yinc'])\n\n if grid_dic['xname'] == 'rlon':\n pol_lon = float(grid_dic['xnpole'])\n pol_lat = float(grid_dic['ynpole'])\n grid = RotGrid(lon, lat, pol_lon, pol_lat)\n else:\n grid = Grid(lon, lat)\n\n return grid", "def read_force_field(ff_file, units=\"Angstrom\", fragment_id=AtomicData.atomic_number):\n fh = open(ff_file)\n line = fh.readline()\n # read number of atoms\n nat = int(line.split()[0])\n # skip comment\n fh.readline()\n #\n atomlist = []\n atomtypes = []\n partial_charges = []\n lattice_vectors = []\n for i in xrange(nat+3):\n # read nat atoms and at most 3 lattice vectors\n line = fh.readline()\n if not line:\n # end of file reached\n break\n words = line.split()\n x,y,z = map(float, words[1:4])\n if units == \"Angstrom\":\n x,y,z = map(lambda c: c/AtomicData.bohr_to_angs, [x,y,z])\n if words[0] == \"Tv\":\n lattice_vectors.append( [x,y,z] )\n continue\n atno = fragment_id(words[0])\n atomlist.append((atno, [x,y,z]))\n atomtypes.append(int(words[4]))\n if len(words) > 5:\n # 6th column contains partial charges\n partial_charges.append( float(words[5]) )\n else:\n partial_charges.append( 0.0 ) \n fh.close()\n if lattice_vectors == []:\n print \"No lattice vectors found\"\n # no lattice vectors were provided\n # HACK: By setting a lattice vectors to 0, we tell\n # the function 'build_force_field' that we do not want\n # a periodic calculation in this direction.\n # If all lattice vectors are 0, only the atoms in the central\n # unit cell are included (number of replica cells == 0)\n lattice_vectors = [ [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0] ]\n\n assert len(lattice_vectors) == 3, \"Need 3 lattice vectors, got %d!\" % len(lattice_vectors)\n return atomlist, atomtypes, partial_charges, lattice_vectors", "def get_data_dict(params, x):\n parameters = {}\n for i, p in enumerate(feature_map.ordered_parameters):\n parameters[p] = x[i]\n for i, p in enumerate(var_form.ordered_parameters):\n parameters[p] = params[i]\n return parameters", "def get_coordinates_genes(path: str = \"\", data_files: dict = {}):\n\n essential_coordinates = {}\n\n # Get position genes\n if \"gff3\" in data_files:\n file_path = os.path.join(path, data_files[\"gff3\"])\n gene_coordinates = gene_position(file_path)\n else:\n raise ValueError(\"gff3 type not found in data\")\n\n # Get all annotated essential genes\n if \"essential_genes\" in data_files:\n file_path = os.path.join(path, data_files[\"essentials\"])\n with open(file_path, \"r\") as f:\n genes = f.readlines()[1:]\n for gene in genes:\n name = gene.strip(\"\\n\")\n essential_coordinates[name] = gene_coordinates.get(name).copy()\n else:\n raise ValueError(\"essentials not found in data\")\n\n # Get aliases of all genes\n if \"gene_names\" in data_files:\n file_path = os.path.join(path, \"Yeast_Protein_Names.txt\")\n aliases_designation = gene_aliases(file_path)[0] #'YMR056C' \\ ['AAC1'], ...\n else:\n raise ValueError(\"gene_names not found in data\")\n\n return essential_coordinates, aliases_designation", "def geometry(rdm):\n rdm = _rd_chem.AddHs(rdm)\n atms = rdm.GetAtoms()\n natms = len(rdm.GetAtoms())\n if natms == 1:\n asb = atms[0].GetSymbol()\n xyz = (0., 0., 0.)\n geo = ((asb, xyz),)\n else:\n _rd_all_chem.EmbedMolecule(rdm)\n _rd_all_chem.MMFFOptimizeMolecule(rdm)\n asbs = tuple(rda.GetSymbol() for rda in atms)\n xyzs = tuple(map(tuple, rdm.GetConformer(0).GetPositions()))\n geo = tuple(zip(asbs, xyzs))\n return geo", "def getAtomInformation(self, resIDs=None, atomIDs=None):\n if atomIDs is None:\n atomIDs = set ()\n else:\n atomIDs = set (atomIDs)\n\n if resIDs is not None:\n for i in resIDs:\n atomIDs.update (self._residueInfo[i]['atomID'])\n return self.getAtomInformation (atomIDs=atomIDs)\n\n atomIDs = list (atomIDs)\n atomIDs.sort ()\n return dict ((atomID, self._atomInfo[atomID]) for atomID in atomIDs)", "def read_xyz(filename, freq):\n\n\n#xyz file\n\n Atoms = []\n Coordinates = []\n\n xyz = open(filename)\n frame = 0\n while True:\n\n n_atoms = xyz.readline()\n\n if n_atoms == '':\n break\n else:\n n_atoms = int(n_atoms)\n title = xyz.readline()\n\n if frame%freq==0:\n atoms, coordinates = read_frame(xyz, n_atoms)\n Coordinates.append(coordinates)\n Atoms.append(atoms)\n\n else:\n read_frame(xyz, n_atoms)\n frame+=1\n\n return Atoms, Coordinates", "def read(self, FN, multiplier=None):\n if FN is None:\n raise Exception('File is not defined')\n elif FN.endswith('.dx') or FN.endswith('.dx.gz'):\n data = self._read_dx(FN)\n elif FN.endswith('.nc'):\n data = self._read_nc(FN)\n else:\n raise Exception('File type not supported')\n if multiplier is not None:\n data['origin'] = multiplier * data['origin']\n data['spacing'] = multiplier * data['spacing']\n return data", "def readXMLAmat(filename, time_ini, time_end, symbol = '?'):\n # Number of parameters to be read. Today is 6.\n n_param_read = 6\n pos = [[[],]*n_param_read,]*(time_end-time_ini+1)\n for t, time_path in enumerate(range(time_ini, time_end+1, 1)):\n xml_path_corr = corrTIFPath(filename, symbol, time_path)\n tree = etree.parse(xml_path_corr)\n root = tree.getroot()\n all_points = root.xpath('GaussianMixtureModel')\n x = []\n y = []\n z = []\n svID = []\n ID = []\n parent = []\n for point in all_points:\n # Needs try catch to avoid the errors in XML\n try:\n [x_aux, y_aux, z_aux] = [float(x) for x in point.xpath('attribute::m')[0].split()]\n x.append(x_aux)\n y.append(y_aux)\n z.append(z_aux)\n svID.append([int(a) for a in point.xpath('attribute::svIdx')[0].split()])\n ID.append(int(point.xpath('attribute::id')[0].strip()))\n parent.append(int(point.xpath('attribute::parent')[0].strip()))\n except:\n print('Point ID {p_id} in file {f_path} is corrupted'.format( \n f_path = xml_path_corr, p_id = int(point.xpath('attribute::id')[0].strip())))\n continue\n pos[t] = [x,y,z,svID,ID,parent]\n return pos", "def file2dict(file, dict, start_id):\n id = start_id\n line_number = 0\n file.seek(0)\n for line in file:\n if line_number == 0:\n n_atoms = int(float(line.strip()))\n if line_number >= 2 and line_number < n_atoms + 2:\n values_list = line.split()\n for i in range(1, 4):\n values_list[i] = float(values_list[i])\n dict[id] = {\n \"coor\": values_list[1:],\n \"element\": values_list[0]\n }\n id += 1\n line_number += 1\n return dict", "def read_qe(qefile, task):\n fileobj = open(qefile)\n lines = fileobj.readlines()\n fileobj.close()\n if task == \"PW_INP\": # Reading a pw.x input file\n for i, line in enumerate(lines):\n if \"nat\" in line:\n # Reading the number of atoms in the cell\n if \",\" in line.split()[2]:\n nat = int(line.split()[2][:len(line.split()[2])-1])\n else:\n nat = int(line.split()[2])\n elif \"ntyp\" in line:\n if \",\" in line.split()[2]:\n ntypat = int(line.split()[2][:len(line.split()[2])-1])\n else:\n ntypat = int(line.split()[2])\n elif \"CELL_PARAMETERS\" in line:\n # Reading the cell vectors\n cell = [x.split()[0:3] for x in lines[i + 1:i + 4]]\n cell = array([[float(col) for col in row] for row in cell])\n elif \"ATOMIC_POSITIONS\" in line:\n if \"crystal\" in line:\n # Reading the atoms and creating a collection of ase.Atoms objects\n geom_start = i + 1\n geom_stop = geom_start + nat\n species = [line.split()[0] for line in lines[geom_start:geom_stop]]\n geom = dot(array([[float(col) for col in line.split()[1:4]]\n for line in lines[geom_start:geom_stop]]), cell)\n else:\n # Reading the atoms and creating a collection of ase.Atoms objects\n geom_start = i + 1\n geom_stop = geom_start + nat\n species = [line.split()[0] for line in lines[geom_start:geom_stop]]\n geom = array([[float(col) for col in line.split()[1:4]]\n for line in lines[geom_start:geom_stop]])\n # Returning the input structure\n rstrc = Atoms(\n cell=cell,\n pbc=True,\n positions=geom,\n symbols=\"\".join(species))\n return rstrc\n elif task == \"PW_OUT_RELAX\": # Reading a pw.x output file for a calculation = \"relax\"\n status = \"NONE\"\n rstrcs = []\n rtotEs = []\n rtotFs = []\n rforces = []\n rstress = []\n for i, line in enumerate(lines):\n # Initial information related to the input cell\n if \"number of atoms/cell\" in line:\n # Reading the number of atoms in the cell\n nat = int(line.split()[4])\n elif \"number of atomic types\" in line:\n ntypat = int(line.split()[5])\n elif \"crystal axes: (cart. coord. in units of alat)\" in line:\n # Reading the cell vectors\n cell = [x.split()[3:6] for x in lines[i + 1:i + 4]]\n cell = array([[float(col) for col in row] for row in cell])\n elif \"Crystallographic axes\" in line:\n # Reading the input coordinates and creating a collection of ase.Atoms objects\n geom_start = i + 3\n geom_stop = geom_start + nat\n species = [line.split()[1] for line in lines[geom_start:geom_stop]]\n geom = dot(array([[float(col) for col in line.split()[6:9]]\n for line in lines[geom_start:geom_stop]]), cell)\n tstrc = Atoms(\n cell=cell,\n pbc=True,\n positions=geom,\n symbols=\"\".join(species))\n rstrcs.append(tstrc)\n #print (\"Appending coordinates (first)\")\n # Now, just after each SCF cycle\n # Reading total energy\n elif \"Forces acting on atoms\" in line:\n forces_start = i + 2\n forces_stop = forces_start + nat\n try:\n forces = array([[float(col) for col in line.split()[6:9]]\n for line in lines[forces_start:forces_stop]])\n #print (\"Appending forces\")\n rforces.append(forces)\n except ValueError:\n # expected to occur when forces are too big\n # and so incompatible with the format used in QE\n # for instance:\n # atom 3 type 2 force = 674.57999165 312.30521069-1079.69944125\n print (\"Rerror reading forces in file:\")\n print (qefile)\n #print (\"Appending forces (empty)\")\n rforces.append([])\n elif \"! total energy\" in line:\n rtotEs.append(float(line.split()[4]))\n #print (\"Appending energy\")\n elif \"total stress (Ry/bohr**3)\" in line:\n # Reading the stress tensor\n stress = [x.split()[0:3] for x in lines[i + 1:i + 4]]\n stress = array([[float(col) for col in row] for row in stress])\n rstress.append(stress)\n #print (\"Appending stress\")\n elif \"Total force\" in line:\n rtotFs.append(float(line.split()[3]))\n #print (\"Appending total forces\")\n elif \"ATOMIC_POSITIONS (alat)\" in line:\n # Reading the relaxed and creating a collection of ase.Atoms objects\n geom_start = i + 1\n geom_stop = geom_start + nat\n species = [line.split()[0] for line in lines[geom_start:geom_stop]]\n geom = array([[float(col) for col in line.split()[1:4]]\n for line in lines[geom_start:geom_stop]])\n tstrc = Atoms(\n cell=cell,\n pbc=True,\n positions=geom,\n symbols=\"\".join(species))\n rstrcs.append(tstrc)\n #print (\"Appending coordinates\")\n elif \"convergence NOT achieved after 100 iterations: stopping\" in line:\n # Removing the last item the vector with structures\n status = \"SCF_NOT_CONVERGED\"\n rstrcs.pop()\n #print (\"Removing coordinates\")\n # Checking if no even the first SCF started\n if len(rtotEs) == 0 and status == \"NONE\":\n status = \"CRASH\"\n rstrcs.pop()\n #print (\"Removing coordinates\")\n # Checking if the SCF has not been finished because of timeout\n if len(rstrcs) > len(rtotEs) and status == \"NONE\":\n status = \"TIMEOUT_OR_CRASH\"\n rstrcs.pop()\n #print (\"Removing coordinates\")\n # Checking if the BFGS has been finished\n if status == \"TIMEOUT_OR_CRASH\" and \"JOB DONE\" in lines[len(lines)-2]:\n status = \"FINISHED\"\n # Returning a collection of cells and properties\n return status, rstrcs, rtotEs, rtotFs, rforces, rstress", "def read_xyz_file(filename, num_spatial_dimensions):\n print(\"Reading data from XYZ file.\")\n\n particle_positions = []\n frame_number = 0\n line_number = 0\n frame_particles = 0\n with open(filename, 'r') as input_file:\n for line in input_file:\n if line_number == 0:\n # Check for blank line at end of file\n if line != \"\":\n frame_particles = int(line)\n particle_positions.append(np.zeros((frame_particles, num_spatial_dimensions)))\n elif line_number == 1:\n pass\n else:\n for dimension in range(num_spatial_dimensions):\n particle_positions[frame_number][line_number-2][dimension] = line.split()[1:][dimension]\n line_number += 1\n # If we have reached the last particle in the frame, reset counter for next frame\n if line_number == (frame_particles + 2):\n line_number = 0\n frame_number += 1\n\n print(\"XYZ read complete.\")\n\n return particle_positions", "def load_charmm_ff_params(fname):\n with open(fname) as f:\n lines = f.readlines()\n\n comment_stripper = re.compile(r'[!\\*].*')\n ffp = ForceFieldParams(fname)\n\n current_section = None\n for i in range(len(lines)):\n # Ignore comments and blank lines\n line = comment_stripper.sub('', lines[i].strip())\n if line == '': continue\n\n tokens = line.split()\n skip_line = False\n for section in ('ATOM', 'BOND', 'ANGL', 'DIHE', 'IMPR', 'NONB', 'CMAP'):\n if tokens[0].startswith(section):\n current_section = section\n skip_line = True\n break\n\n if skip_line: continue\n\n if current_section is 'BOND':\n key1, key2 = key_names((tokens[0], tokens[1]))\n ffp.bonds[key1] = ffp.bonds[key2] = {\n 'force_constant': float(tokens[2]),\n 'equilibrium_distance': float(tokens[3])\n }\n elif current_section is 'ANGL':\n # TODO: Urey-Bradley terms\n key1, key2 = key_names((tokens[0], tokens[1], tokens[2]))\n ffp.angles[key1] = ffp.angles[key2] = {\n 'force_constant': float(tokens[3]),\n 'equilibrium_angle': float(tokens[4]) * pi / 180.0\n }\n elif current_section is 'DIHE':\n key1, key2 = key_names((tokens[0], tokens[1], tokens[2], tokens[3]))\n ffp.dihedrals[key1] = ffp.dihedrals[key2] = {\n 'force_constant': float(tokens[4]),\n 'multiplicity': float(tokens[5]),\n 'delta': float(tokens[6])\n }\n elif current_section is 'IMPR':\n key = key_names((tokens[0], tokens[1], tokens[2], tokens[3]))\n else:\n # Unknown line type\n continue\n return ffp", "def read_file(file):\n if opts.input_type == 'fits':\n data = fileio.read_fits(file)\n else:\n data = fileio.read_ascii(file)\n c_id = data[0,:]\n g_num = np.array(range(len(c_id)), dtype = 'int')\n g_id = data[3,:]\n g_ra = np.array(data[4,:], dtype = 'float')\n g_dec = np.array(data[5,:], dtype = 'float')\n g_z = np.array(data[6,:], dtype = 'float')\n return c_id, g_num, g_id, g_ra, g_dec, g_z", "def read_periodic(ifile, periodic_dx):\n while 1:\n #line = lines.pop(0)\n line = ifile.readline()\n\n a = re.search(re_pfaces, line)\n if a:\n if not periodic_dx:\n periodic_face_map[int(a.group(3), 16)] = int(a.group(5), 16)\n continue\n break\n\n if not periodic_dx:\n keys = periodic_face_map.keys()\n vals = periodic_face_map.itervalues()\n for key, val in zip(keys, vals):\n periodic_face_map[val] = key", "def _read_arf(file):\n with fits.open(file) as hdul:\n data = hdul[1].data\n\n return data['energ_lo'], data['energ_hi'], data['specresp']", "def xrds_from_dir(path=None, fjord=None, metastr='_mdf', bitmask=False):\n warnings.warn(\"This function currently assumes a constant grid and EPSG for all input files\")\n assert fjord != None, \"You must specify the fjord code for these DEMs\"\n\n files = [f for f in os.listdir(path) if f.endswith('dem.tif')]\n\n # for DEMs nested in directories\n if len(files) == 0:\n try: \n os.remove(path+'.DS_Store')\n except FileNotFoundError:\n pass\n dirs = [dir for dir in os.listdir(path)]\n nestfiles=[]\n for dir in dirs:\n nestfiles.append([dir+'/'+f for f in os.listdir(path+dir) if f.endswith('dem.tif')])\n files = [items for nest in nestfiles for items in nest]\n\n i=0\n darrays = list(np.zeros(len(files)))\n dtimes = list(np.zeros(len(files)))\n for f in files:\n print(f)\n\n metaf = f.rpartition(\"_dem.tif\")[0] + metastr + \".txt\"\n try:\n meta = read_meta(path+metaf)\n # print(meta)\n dtimes[i] = get_DEM_img_times(meta)\n except FileNotFoundError:\n print(\"You must manually enter dtimes for these files within the code\")\n # dtimes[0] = dt.datetime(2012, 6, 29, hour=15, minute=26, second=30)\n # dtimes[1] = dt.datetime(2010, 8, 14, hour=15, minute=34)\n # except KeyError:\n # raise\n except AssertionError:\n print(\"These stereo image times are >30 min apart... skipped\")\n continue\n\n try:\n darrays[i] = read_DEM(path+f, fjord)\n # darrays[i] = read_DEM(path+f.rpartition(\"_dem.tif\")[0] + \"_dem_geoidcomp.tif\")\n except RasterioIOError:\n print(\"RasterioIOError on your input file\")\n break\n \n # read in and apply the bitmask\n if bitmask==True:\n bitmaskfn = path + f.rpartition(\"dem.tif\")[0] + \"bitmask.tif\"\n maskarr = read_mask(bitmaskfn, fjord)\n darrays[i] = darrays[i].where(maskarr==0, )\n \n i = i + 1\n\n if len(darrays)==1 and np.all(darrays[0]) == 0:\n warnings.warn(\"Your DEM will not be put into XArray\")\n return \"nodems\"\n\n elif len(darrays)>1 and np.all(darrays[darrays!=0]) == 0:\n warnings.simplefilter(\"always\")\n warnings.warn(\"None of your DEMs will be put into XArray\")\n return \"nodems\"\n\n else:\n # I just discovered xarray's mfdataset with the preprocess option to modify each dataset prior to opening. I'm guessing that'd be the way to go here\n # darr = xr.combine_nested(darrays, concat_dim=['dtime'])\n darr = xr.concat(darrays, \n dim=pd.Index(dtimes, name='dtime'), \n # coords=['x','y'], \n join='outer').chunk({'dtime': 1, 'x':3072, 'y':3072}) # figure out a better value for chunking this (it slows the JI one with 3 dems way down)\n # combine_attrs='no_conflicts' # only in newest version of xarray\n\n try:\n for arr in darrays:\n arr.close()\n except:\n pass\n del darrays\n \n # convert to dataset with elevation as a variable and add attributes\n attr = darr.attrs\n ds = darr.to_dataset()\n\n # coarsen the data to 4 m resolution to reduce memory crashes during processing\n # note that this may be important in later steps when resolution is used as an input\n coarse = 2\n if coarse > 1:\n print(\"Your input DEMs will be downsampled to enable processing\")\n ds = ds.coarsen(x=coarse, y=coarse, boundary='pad').mean()\n ds = ds.chunk({'dtime': 1, 'x':3072, 'y':3072})\n\n ds.attrs = attr\n ds.attrs['fjord'] = fjord\n ds.attrs['res'] = tuple(x * coarse for x in attr['res'])\n attr=None\n\n # newest version of xarray (0.16) has promote_attrs=True kwarg. Earlier versions don't...\n # ds = ds.to_dataset(name='elevation', promote_attrs=True).squeeze().drop('band')\n \n # using rioxarray means the transform is read in/created as part of the geospatial info, so it's unnecessary to manually create a transform\n # create affine transform for concatted dataset\n print('Please note the transform is computed assuming a coordinate reference system\\\n where x(min) is west and y(min) is south')\n # inputs: west, south, east, north, width, height\n # don't use len(x,y) for width and height in case they're not continuous\n width = abs((ds.x.max().item() - ds.x.min().item())/ds.attrs['res'][0])\n ht = abs((ds.y.max().item() - ds.y.min().item())/ds.attrs['res'][1])\n transform = rasterio.transform.from_bounds(ds.x.min().item()-0.5*ds.attrs['res'][0], ds.y.min().item()-0.5*ds.attrs['res'][1], \n ds.x.max().item()+0.5*ds.attrs['res'][0], ds.y.max().item()+0.5*ds.attrs['res'][1], \n width, ht)\n ds.attrs['transform'] = transform\n # set the transform and crs as attributes since that's how they're accessed later in the pipeline\n # ds.attrs['transform'] = (ds.spatial_ref.GeoTransform)\n # ds.attrs['crs'] = ds.spatial_ref.crs_wkt\n\n\n return ds" ]
[ "0.62354904", "0.6009959", "0.57815313", "0.57030374", "0.5664513", "0.5592775", "0.5527532", "0.5496724", "0.5496124", "0.5451055", "0.5448297", "0.53548825", "0.5279107", "0.52720135", "0.526696", "0.5173193", "0.5169754", "0.5153554", "0.5111259", "0.51051176", "0.5093884", "0.5085157", "0.50798553", "0.50798553", "0.50619435", "0.5043534", "0.5043248", "0.50341827", "0.50300485", "0.5029082", "0.5026075", "0.5017041", "0.50072837", "0.50064355", "0.4984045", "0.4963614", "0.49629787", "0.49608204", "0.49591586", "0.49485016", "0.49347207", "0.4931879", "0.49309722", "0.4923152", "0.49206522", "0.4914038", "0.4909582", "0.4904418", "0.48995185", "0.48812854", "0.48714188", "0.4868656", "0.48610035", "0.4858921", "0.48588488", "0.4858713", "0.4852864", "0.4849602", "0.4834369", "0.48300147", "0.48279515", "0.48251942", "0.48225355", "0.48144364", "0.48053038", "0.47888324", "0.47795513", "0.47767368", "0.47730932", "0.47728592", "0.47617525", "0.47547337", "0.47505414", "0.47501236", "0.47485924", "0.4742241", "0.4741092", "0.4739917", "0.47387192", "0.47349665", "0.47336942", "0.4728958", "0.47142804", "0.47104406", "0.47090575", "0.46970376", "0.4695131", "0.4694153", "0.46900946", "0.46894485", "0.4684472", "0.4679456", "0.46784604", "0.46776864", "0.46736223", "0.46706688", "0.46688157", "0.46575382", "0.46569657", "0.46562177" ]
0.68994904
0
Returns the number in the brackets of an atomname.
def get_number(atomname): switch = False number = '' for char in atomname: if char == ')': switch = False if switch: number += char if char == '(': switch = True return number
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def atomic_number(name):\n try:\n return symbols.index(name.capitalize()) + 1\n except ValueError:\n return lower_names.index(name.lower()) + 1", "def getNameIndex(name):\n try:\n location = len(name) - \"\".join(reversed(name)).index(\".\")\n index = int(name[location:])\n except Exception:\n index = 0\n return index", "def get_num(elem):\n if isinstance(elem, str):\n return _find_index(elem)\n else:\n for atm in elem:\n if atm not in sym and atm[0] not in ['X', 'D']:\n raise ValueError('Unrecognized atomic symbol \\'' + atm +\n '\\'. Use X prefix for dummy atoms.')\n return np.array([_find_index(atm) for atm in elem])", "def value(name):\r\n return sum(alpha.index(str(l)) + 1 for l in name)", "def atomic_number(self, element_name):\n return self.GetAtomicNumber(element_name)", "def _name2idx(name):\n match = re.search(r\"eth(\\d+)\", name, re.I)\n if not match:\n raise exception.CloudbaseInitException(\n \"invalid NetworkDetails name {!r}\"\n .format(name)\n )\n return int(match.group(1))", "def get_amount_of_digits(self, name: str):\n x = -1\n while name[x - 1].isdigit():\n x -= 1\n if name[:x].endswith(\"/streaming/p\"):\n return x", "def _get_natom(self, file):\n f = open_general(file)\n tmptxt = f.readlines()\n f.close()\n itmp = search_string('NATOM is', tmptxt)\n natom = int(tmptxt.pop(itmp).split()[-1])\n return natom", "def get_atom_intention(self, atom_name):\n source, _clone = self._atomdetail_by_name(atom_name)\n return source.intention", "def getbarvarnameindex(self,somename_):\n if isinstance(somename_,unicode):\n somename_ = somename_.encode(\"utf-8\",errors=\"replace\")\n asgn_ = ctypes.c_int32()\n index_ = ctypes.c_int32()\n res = __library__.MSK_XX_getbarvarnameindex(self.__nativep,somename_,ctypes.byref(asgn_),ctypes.byref(index_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n asgn_ = asgn_.value\n _asgn_return_value = asgn_\n index_ = index_.value\n _index_return_value = index_\n return (_asgn_return_value,_index_return_value)", "def getResidueNumber(self, iAtom):\n return self._getResiduePointer(iAtom)+1", "def number(self):\n return re.match(r'^.*?([0-9]+)$', self._alias).groups()[0]", "def _get_cindex(circ, name, index):\n ret = 0\n for reg in circ.cregs:\n if name != reg.name:\n ret += reg.size\n else:\n return ret + index\n return ret + index", "def get_number_from_symbol(symbol):\n return elements[symbol]['number']", "def item_no(self, name):\n sources = self.sources(self._maskname_from_item(name))\n return sources.index(name) + 1", "def get_atomic_number(molecule, atom_index):\n return molecule.GetAtomAtomicNumber(atom_index)", "def number(cls, tileName):\n return TILENAMEMAP[tileName]['Number'] if tileName in TILENAMEMAP else None", "def annulus_ident(self) -> int:\n return self._ann_ident", "def atom(token):\n if REGEX_INTEGER.match(token):\n return int(token)\n else:\n return token", "def extract_journal(name):\n match = re.search(\"\\d+\", name)\n if match != None: \n return name[:match.start()], int(name[match.start(): match.end()])\n else: \n return \"\", 0", "def getNameNum(name):\n dicto = {'a':1,'b':2,'c':3,'d':4,'e':5,'f':6,'g':7,'h':8,'i':9,'j':10,'k':11,'l':12,'m':13,'n':14,'o':15,'p':16,'q':17,'r':18,'s':19,'t':20,'u':21,'v':22,'w':23,'x':24,'y':25,'z':26}\n summ = 0\n for letter in name:\n summ += dicto.get(letter.lower())\n return summ", "def getBranchIndex(self):\n\n data = self.name.split('-')\n return int(data[2])", "def getOqiNameIndx( self, name ):\n \n if not self.oqiNames:\n self.getOqiNames( )\n\n if name in self.oqiNames:\n return self.oqiNames[ name ]\n elif name in self.oqiNames.values():\n return name\n else:\n return -1", "def get_index(s):\n return int(s[s.find(\"[\")+1:s.find(\"]\")])", "def parse_num(path):\n nbasename = path.basename.lower()\n if nbasename.startswith(nprefix):\n try:\n return int(nbasename[len(nprefix) :])\n except ValueError:\n pass", "def getOthNameIndx( self, name ):\n \n if not self.othNames:\n self.getOthNames( )\n\n if name in self.othNames:\n return self.othNames[ name ]\n elif name in self.othNames.values():\n return name\n else:\n return -1", "def pname(name):\n ranks = list(reversed(name.split(';')))\n for i, rank in enumerate(ranks):\n if rank in ['Others', 'Unassigned']:\n return rank\n if rank == '__':\n continue\n if rank.split('__')[1] is '':\n return ranks[i+1] + ';' + rank\n return rank", "def _get_freq(name):\n try:\n counts = int(name.split(\"_x\")[1])\n except:\n return 0\n return counts", "def _parse_atom_index(index):\n try:\n return int(index)\n except:\n return int(index, 16) - 0xA0000 + 100000", "def get_natom(self):\n return", "def return_episode_num(name):\n return int(name.split(\".\")[0].split(\"ep_\")[1]) # Use split to return only the episode number needed to sort the files in increasing order", "def _get_qindex(circ, name, index):\n ret = 0\n for reg in circ.qregs:\n if name != reg.name:\n ret += reg.size\n else:\n return ret + index\n return ret + index", "def getbarvarnameindex(self,somename_): # 3\n res,resargs = self.__obj.getbarvarnameindex(somename_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _asgn_return_value,_index_return_value = resargs\n return _asgn_return_value,_index_return_value", "def getvarnameindex(self,somename_):\n if isinstance(somename_,unicode):\n somename_ = somename_.encode(\"utf-8\",errors=\"replace\")\n asgn_ = ctypes.c_int32()\n index_ = ctypes.c_int32()\n res = __library__.MSK_XX_getvarnameindex(self.__nativep,somename_,ctypes.byref(asgn_),ctypes.byref(index_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n asgn_ = asgn_.value\n _asgn_return_value = asgn_\n index_ = index_.value\n _index_return_value = index_\n return (_asgn_return_value,_index_return_value)", "def index(self, atom):\n return self.atom_list.index(atom)", "def atomic_number(self) -> int:\n return elements.index(self.label) + 1", "def get_num_from_file(file_name):\n basename = file_name.partition('.')[0]\n first, second = basename.split('_')\n num = second.replace(\"genome\", '')\n num = num[1:]\n return int(num)", "def __get_num_from_str(elements: list, string: str) -> str:\n\n num = list()\n\n element_list = list(elements)\n\n for atom in string.split('-'):\n\n if atom == '*':\n num.append('0')\n else:\n num.append(f'{element_list.index(atom) + 1}')\n\n return ' '.join(num)", "def increment_name(name: str) -> str:\n\n match = _number_suffix.search(name)\n if match:\n number_str = match.group(1)\n next_number = int(number_str) + 1\n return f'{name[:-len(number_str)]}{next_number}'\n else:\n return f'{name}-1'", "def get_nt(name):\n ensembles = conventions.ensembles\n mask = (ensembles['name'] == name)\n return utils.extract_unique(ensembles[mask], 'nt')", "def acquisition_number_of(path_to_func_or_anat_or_json: Path) -> str:\n\n return path_to_func_or_anat_or_json.stem.split(\"_\")[-1].zfill(2)", "def getAtomName(self, iAtom):\n atomNames = self.getAtomNames()\n return atomNames[iAtom]", "def getNumberFromName(self, idx):\n\t\tfile = self.all_file_names[idx]\n\t\tnumber = file[4]\n\t\tif file[5].isdigit(): \n\t\t\tnumber += file[5]\n\t\treturn int(number)", "def getOhcNameIndx( self, name ):\n \n if not self.ohcNames:\n self.getOhcNames( )\n\n if name in self.ohcNames:\n return self.ohcNames[ name ]\n elif name in self.ohcNames.values():\n return name\n else:\n return -1", "def name_to_number(name):\n if (name == 'rock' or name == 'Rock'):\n return 0\n elif (name == 'Spock' or name == 'spock'):\n return 1\n elif (name == 'paper' or name == 'Paper'):\n return 2\n elif (name == 'lizard' or name == 'Lizard'):\n return 3\n elif (name == 'scissors' or name == 'Scissors'):\n return 4\n else:\n return -1", "def number(glyph):\n if len(glyph) != GLYPH_HEIGHT or \\\n any(len(row) != GLYPH_WIDTH for row in glyph):\n raise ValueError(\"Ill-formed grid\")\n\n try:\n return str(DIGIT_TO_GLYPH.index(glyph))\n except ValueError:\n return \"?\"", "def get_atom_code(self, atom):\n for code, symbol in self.__symbols_dict.items():\n # if keyword, return associated code\n if symbol == atom:\n return code\n\n if self.check_if_var(atom):\n # if identifier, return 0\n return 0\n if self.check_if_const(atom):\n # if constant, return 1\n return 1\n\n # invalid atom\n return -1", "def getOriNameIndx( self, name ):\n \n if not self.oriNames:\n self.getOriNames( )\n\n if name in self.oriNames:\n return self.oriNames[ name ]\n elif name in self.oriNames.values():\n return name\n else:\n return -1", "def get_namespace_index(cls, libvirt_network_if):\n matcher = re.match(r\"^tt(\\d+)$\", libvirt_network_if)\n return int(matcher.groups()[0]) if matcher is not None else 0", "def get_number(word):\n return int(re.match(NUMBER, word).group(1))", "def order(name: str):\n if name.startswith('pred'):\n split = name.split('_')\n if len(str(split[-2])) > 10: # New file format, -2 is hash\n return int(split[-3])\n return int(split[-2])\n split = name.split('_')\n x = split[-1].split('.')[0]\n return int(x)", "def name_to_number(name):\n\n # A simple if/elif/else game...\n\n if name == \"rock\":\n number = 0\n elif name == \"Spock\":\n number = 1\n elif name == \"paper\":\n number = 2\n elif name == \"lizard\":\n number = 3\n else:\n number = 4\n return number", "def getOeiNameIndx( self, name ):\n \n if not self.oeiNames:\n self.getOeiNames( )\n\n if name in self.oeiNames:\n return self.oeiNames[ name ]\n elif name in self.oeiNames.values():\n return name\n else:\n return -1", "def get_accession_num(seq_record):\n accession_atoms = seq_record.id.split(\"|\")\n gb_name = accession_atoms[3]\n # strip the version info before returning\n return gb_name[:-2]", "def getNameOffset(self) -> int:\n ...", "def getNamePrefix(name):\n try:\n location = len(name) - \"\".join(reversed(name)).index(\".\")\n # index is never used, but this line ensures that the index is an int.\n index = int(name[location:])\n prefix = name[:location-1]\n except Exception:\n prefix = name\n return prefix", "def atom(self, atom_name, resnum, chain_id, icode=' ', alt=' ', model_num=0):\n return self.struct[model_num][chain_id][(alt, resnum, icode)][atom_name]", "def atomic_number(argument):\n\n try:\n element = atomic_symbol(argument)\n atomic_numb = Elements[element]['atomic_number']\n except ValueError:\n raise ValueError(\"Unable to identify element in atomic_number\")\n except TypeError:\n raise TypeError(\"Invalid type in atomic_number\")\n\n return atomic_numb", "def tag_key(tagname: str) -> int:\n return int(tagname.split(\"_\")[1]) * 100 + int(tagname.split(\"_\")[2])", "def regToInt(name):\n match = re.match(r\"r([0-9]+)\", name)\n if match:\n index = int(match.group(1))\n if 0 <= index <= 15:\n return index\n raise AsmException(\"incorrect register %s\" % name)", "def get_layer_number(model, layer_name):\n for i, l in enumerate(model.layers):\n if l.name == layer_name:\n return i\n raise ValueError('No layer with name {} in model {}.'.format(layer_name, model.name))", "def updateName(g):\n try:\n n = int(g.group(2))\n except TypeError:\n n = 0\n\n return \"%s-%d\" % (g.group(1), n + 1)", "def residueNumber(self,i):\n assert(i >= 0 and i < self.nAtoms())\n assert(self._c_structure is not NULL)\n return freesasa_structure_atom_res_number(self._c_structure,i)", "def MonthNum(name):\n return _MONTH_DICT.get(name.capitalize(), 0)", "def getLimbIndex(self):\n\n data = self.name.split('-')\n return int(data[1]) - 1", "def get_name_value(name_node):\n return name_node.id", "def _get_next_bracket(self) -> int:\n return 0", "def get_image_index(name: str):\n base_name = os.path.basename(name)\n nums = pattern.findall(base_name)\n if len(nums) != num_count:\n raise BaseException(f\"can't exact index from the string: {name}\")\n return float(nums[num_sort_index])", "def _name_increment_revision(name):\n revre = r\"^(.*?)([0-9]+)$\"\n m = re.search(revre, name)\n if m:\n name = m.group(1) + str(int(m.group(2)) + 1)\n else:\n name = name + \" (copy)\"\n return name", "def _annotate_brackets(\n self,\n tokens: List[tokenize.TokenInfo],\n ) -> Mapping[int, int]:\n brackets = {bracket: 0 for bracket in MATCHING}\n for token in tokens:\n if token.exact_type in MATCHING.keys():\n brackets[token.exact_type] += 1\n if token.exact_type in MATCHING.values():\n reverse_bracket = get_reverse_bracket(token)\n if brackets[reverse_bracket] > 0:\n brackets[reverse_bracket] -= 1\n return brackets", "def get_bb(node_name):\n return int(re.search(r'\\d+', re.search(r'block_\\d+', node_name).group()).group())", "def name_to_number(name):\r\n \r\n if name == \"rock\":\r\n return 0\r\n elif name == \"Spock\":\r\n return 1\r\n elif name == \"paper\":\r\n return 2\r\n elif name == \"lizard\":\r\n return 3\r\n elif name == \"scissors\":\r\n return 4\r\n else:\r\n return \"Invalid!Enter any one of the following: rock,Spock,paper,lizard,scissors\"", "def getOfcNameIndx( self, name ):\n \n if not self.ofcNames:\n self.getOfcNames( )\n\n if name in self.ofcNames:\n return self.ofcNames[ name ]\n elif name in self.ofcNames.values():\n return name\n else:\n return -1", "def element_name(self, atomic_number):\n return self.GetElementName(atomic_number)", "def find_next_name(self, etfName):\n etfName = etfName.split('-')[0]\n max_n = max(list(map(lambda x: int(x.split('-')[1]) if x.split('-')[0] == etfName else 0, self.etfs.keys())))\n return etfName + '-' + str(max_n + 1)", "def _compress_name(name):\n n = 0\n for c in name:\n n = (n * _P1 + ord(c)) % _P2 \n return '%09d' % n", "def name_to_number(name):\n if name == \"rock\":\n number = 0\n elif name == \"Spock\":\n number = 1\n elif name == \"paper\":\n number = 2\n elif name == \"lizard\":\n number = 3\n elif name == \"scissors\":\n number = 4\n else:\n print \"Name is invalid!\"\n return 1\n return number", "def index(self, factor_name):\n return self._factor_names.index(str(factor_name))", "def name(prefix = 'tmp'):\n nameidx = context.curr().setdefault('NAME_INDEX', {})\n idx = nameidx.setdefault(prefix, 0)\n name = '_%s_%d' % (prefix, idx)\n nameidx[prefix] = idx + 1\n return name", "def name(node):\n\n return fst(node)", "def get_next_identifier(self) -> int:\n if self.items:\n return self.items[-1].identifier + 1\n else:\n return 1", "def extract_channel_number(title):\n # Generate re\n p = _re.compile(\"(lower)|(upper)\")\n result = _re.search(p, title)\n idx = result.lastindex\n return idx", "def get_index_from_section(section):\n return section.rsplit(\"(\", 1)[1].rstrip(\")\")", "def get_digit_prefix(characters):\n value = 0\n while characters and characters[0].isdigit():\n value = value * 10 + int(characters.pop(0))\n return value", "def getvarnameindex(self,somename_): # 3\n res,resargs = self.__obj.getvarnameindex(somename_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _asgn_return_value,_index_return_value = resargs\n return _asgn_return_value,_index_return_value", "def get_head_pos( head, ngram ):\n try:\n tokens = ngram.split( ' ' )\n return str([ i for i, t in enumerate( tokens ) if t.startswith( head + \"/\" )][0] + 1 )\n except ValueError:\n return None", "def to_ordinal(self):\n return mod(self.number - 1 + 39 * (self.number - self.name), 260)", "def countAtom (dico_count, PDB_parsed, debug = 0):\n count = 0\n \n for atom in PDB_parsed : \n residue = tool.transformAA(atom[\"resName\"])\n if debug : print residue\n \n if residue in dico_count : \n atom_Name = atom[\"name\"]\n if atom_Name in dico_count[residue] : \n count = count + 1\n return count", "def get_ident():\n return -1", "def getStoryNumber(self, source):\n numberStart = source.find('>') + 1\n numberEnd = source.find('.')\n return int(source[numberStart:numberEnd])", "def get_ns(name):\n ensembles = conventions.ensembles\n mask = (ensembles['name'] == name)\n return utils.extract_unique(ensembles[mask], 'ns')", "def getOsiNameIndx( self, name ):\n \n if not self.osiNames:\n self.getOsiNames( )\n\n if name in self.osiNames:\n return self.osiNames[ name ]\n elif name in self.osiNames.values():\n return name\n else:\n return -1", "def __rank_from_str_to_int(rank: str) -> int:\n return int(rank) - 1", "def getMangledNum(self):\n return (\"X\" * (len(self.num)-4)) + self.num[-4:]", "def atom(token):\n try:\n return int(token)\n except ValueError:\n try:\n return float(token)\n except ValueError:\n return Symbol(token) # Equivalent to str(token)", "def get_well_id(self, name):\n\n idx = -1\n if type(name) is str:\n wells_names = self.wells_list()\n if name in wells_names:\n idx = wells_names.index(name)\n return(idx)", "def get_layer_index(self, layer_name):\n for i, layer in enumerate(self.tmx_data.layers):\n if layer.name == layer_name:\n return i\n return -1", "def getChrNum(self):\n chrLookup = {\"X\":23,\"x\":23,\"Y\":24,\"y\":24}\n if self.chr.startswith(\"chr\"):\n num = self.chr[3:]\n if num in (\"X\",\"x\",\"Y\",\"y\"):\n num = chrLookup[num]\n return int(num)\n else: return self.chr", "def rAssetName(nodeNS):\n\t#return nodeNS.split('_')[0] + re.sub('.*?([0-9]*)$', r'\\1', nodeNS)\n\treturn nodeNS.split('_')[0]", "def _get_name(self, name):\n try:\n return self._names.index(name)\n except ValueError:\n self._names.append(name)\n return len(self._names) - 1" ]
[ "0.7131723", "0.6602477", "0.6242266", "0.6130583", "0.6037743", "0.5923889", "0.58768123", "0.5873613", "0.5861094", "0.5821463", "0.58060014", "0.5772363", "0.5768761", "0.5733707", "0.5732828", "0.5719784", "0.56835663", "0.56269145", "0.5624526", "0.5605099", "0.5601558", "0.5591843", "0.55889714", "0.5549441", "0.55389607", "0.5518706", "0.55121636", "0.5510011", "0.55072707", "0.5506972", "0.549323", "0.54926795", "0.5488917", "0.54628277", "0.54573554", "0.5447016", "0.54412895", "0.54274", "0.54117167", "0.5402535", "0.54000366", "0.53817254", "0.5374528", "0.5371295", "0.5365522", "0.5363801", "0.5294695", "0.528387", "0.52792096", "0.527658", "0.5274358", "0.52549434", "0.5251368", "0.52502203", "0.5249518", "0.5228311", "0.5224257", "0.5217466", "0.52047056", "0.5195071", "0.51946616", "0.5188073", "0.51849896", "0.51794827", "0.51634043", "0.51520693", "0.51443875", "0.5143009", "0.5136728", "0.5119954", "0.51084894", "0.5098786", "0.5086963", "0.5079968", "0.50756323", "0.50717527", "0.5071123", "0.5070273", "0.5061277", "0.5054451", "0.50412923", "0.5040765", "0.5038412", "0.50367016", "0.5034857", "0.5033899", "0.50331235", "0.50330526", "0.503206", "0.50249565", "0.502401", "0.5021983", "0.5021603", "0.5009218", "0.50002396", "0.49978775", "0.49967974", "0.49904302", "0.49800625", "0.49792692" ]
0.78789806
0
Transforms a set of given fractional coordinates to cartesian coordinates. Needs a list containing the cell parameters as its first argument and the dictionary returned by read coordinates(). Returns a dictionary with cartesian coordinates analog to fractional dictionary.
def frac_to_cart(cell, positions): atomlist = [] counter = 1 a, b, c = cell[0], cell[1], cell[2] alpha, beta, gamma = cell[3] / 180 * np.pi, cell[4] / 180 * np.pi, cell[5] / 180 * np.pi v = np.sqrt(1 - np.cos(alpha) * np.cos(alpha) - np.cos(beta) * np.cos(beta) - np.cos(gamma) * np.cos(gamma) \ + 2 * np.cos(alpha) * np.cos(beta) * np.cos(gamma)) transmatrix = np.matrix([[a, b * np.cos(gamma), c * np.cos(beta)], [0, b * np.sin(gamma), c * (np.cos(alpha) - np.cos(beta) * np.cos(gamma)) / np.sin(gamma)], [0, 0, c * v / np.sin(gamma)]]) for atom in positions: coordmatrix = np.dot(transmatrix, positions[str(atom)]) coordmatrix = np.array(coordmatrix).flatten().tolist() atomlist.append([]) atomlist[-1].append([atom, atomtable[atom[0]]]) counter += 1 atomlist[-1].append(np.array(coordmatrix)) return atomlist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def frac2cart_all(frac_coordinates, lattice_array):\n coordinates = deepcopy(frac_coordinates)\n for coord in range(coordinates.shape[0]):\n coordinates[coord] = cartisian_from_fractional(coordinates[coord],\n lattice_array)\n return coordinates", "def convert_cylinder_to_cartesian(dictionary):\n\n # Define num of points in x and y direction\n dictionary['nx'] = dictionary['nt'] # Angular number of points\n dictionary['ny'] = dictionary['nr'] # Radial number of points\n\n # Assemble gradient components into tensor\n if all(cyl_grad_u_key in dictionary for cyl_grad_u_key in CYL_GRAD_U_KEYS) and 'grad_u_cyl' not in dictionary:\n dictionary['grad_u_cyl'] = assemble_2nd_order_tensor(dictionary, CYL_GRAD_U_KEYS)\n elif 'grad_u_cyl' in dictionary:\n pass\n else:\n print('WARNING No gradient data available for conversion cylinder to cartesian coordinates.')\n\n # Loop all keys in dictionary\n for cyl_key in tuple(dictionary):\n if isinstance(dictionary[cyl_key], (np.ndarray, int, float, str)):\n if cyl_key in CYL_VEL_KEYS:\n velocity_cylinder_to_cartesian(dictionary, cyl_key, verbose=False)\n elif cyl_key == 'grad_u_cyl':\n gradient_cylinder_to_cartesian(dictionary, cyl_key)\n elif cyl_key in CYL_TAU_KEYS:\n tau_components_cylinder_to_cartesian(dictionary, cyl_key, verbose=False) # Requires off-diagonal elements in tau, for now just RENAMING\n elif cyl_key in CYL_ALL_BUDGET_KEYS:\n budget_tau_components_cylinder_to_cartesian(dictionary, cyl_key, verbose=False) # Just RENAMING budgets!\n elif cyl_key in CYL_RMS_KEYS:\n rms_velocity_cylinder_to_cartesian(dictionary, cyl_key, verbose=False) # Just RENAMING rms, requires off-diag tau for conversion\n else:\n pass\n else:\n assert False, 'Unknown variable type in dictionary for key %r with type %r' % (cyl_key, type(dictionary[cyl_key]))\n\n return 1", "def transform_coordinates(coords):\n # WGS 84 reference coordinate system parameters\n A = 6378.137 # major axis [km]\n E2 = 6.69437999014e-3 # eccentricity squared\n\n coords = prepare_coords(coords)\n\n # convert to radiants\n lat_rad = np.radians(coords[:, 0])\n lon_rad = np.radians(coords[:, 1])\n\n # convert to cartesian coordinates\n r_n = A / (np.sqrt(1 - E2 * (np.sin(lat_rad) ** 2)))\n x = r_n * np.cos(lat_rad) * np.cos(lon_rad)\n y = r_n * np.cos(lat_rad) * np.sin(lon_rad)\n z = r_n * (1 - E2) * np.sin(lat_rad)\n\n return np.column_stack((x, y, z))", "def cart2frac_all(coordinates, lattice_array):\n frac_coordinates = deepcopy(coordinates)\n for coord in range(frac_coordinates.shape[0]):\n frac_coordinates[coord] = fractional_from_cartesian(\n frac_coordinates[coord], lattice_array)\n return frac_coordinates", "def from_cartesian(self, coordinates, *axes):", "def _package_coordinates(self, coords_string):\n values = [float(x) for x in coords_string.strip().replace(\",\", \" \").split()]\n\n if len(values) % 2 != 0:\n raise Exception(\"Number of values for coordinates is not even.\")\n \n return {\"lat\": values[0::2], \"lon\": values[1::2], \"type\": \"polygon\", \"do_sanitise_geometries\": False}", "def convert_to_cartesian(grid: List[Tuple[float, float]], radius: float = 1.0) -> List[Tuple[float, float, float]]:\n\n # conversion radians -> degrees\n r2d = 180.0 / np.pi\n\n # calculate x/y/z coordinates, assuming r=1\n return [\n (\n radius * np.cos(lat / r2d) * np.cos(lon / r2d),\n radius * np.cos(lat / r2d) * np.sin(lon / r2d),\n radius * np.sin(lat / r2d),\n )\n for lon, lat in grid\n ]", "def cartesian_to_geographical(coordinate_triples):\n if len(coordinate_triples.shape) == 1:\n x = coordinate_triples[0]\n y = coordinate_triples[1]\n z = coordinate_triples[2]\n elif len(coordinate_triples.shape) == 2:\n assert coordinate_triples.shape[1] == 3\n x = coordinate_triples[:, 0]\n y = coordinate_triples[:, 1]\n z = coordinate_triples[:, 2]\n radius = np.sqrt(x**2 + y**2 + z**2)\n longitudes = np.arctan2(y, x)\n latitudes = np.arcsin(z/radius)\n return (latitudes, longitudes)", "def geo2Cartesian(lat, lon, h, julian_date):\n\n lat_rad = np.radians(lat)\n lon_rad = np.radians(lon)\n\n # Calculate ECEF coordinates\n ecef_x, ecef_y, ecef_z = latLonAlt2ECEF(lat_rad, lon_rad, h)\n\n\n # Get Local Sidreal Time\n LST_rad = math.radians(JD2LST(julian_date, np.degrees(lon_rad))[0])\n\n\n # Calculate the Earth radius at given latitude\n Rh = math.sqrt(ecef_x**2 + ecef_y**2 + ecef_z**2)\n\n # Calculate the geocentric latitude (latitude which considers the Earth as an elipsoid)\n lat_geocentric = math.atan2(ecef_z, math.sqrt(ecef_x**2 + ecef_y**2))\n\n # Calculate Cartesian ECI coordinates (in meters), in the epoch of date\n x = Rh*np.cos(lat_geocentric)*np.cos(LST_rad)\n y = Rh*np.cos(lat_geocentric)*np.sin(LST_rad)\n z = Rh*np.sin(lat_geocentric)\n\n return x, y, z", "def convert(coordinates):\n center = np.mean(coordinates, axis=0, dtype=np.float32)\n x = np.subtract(np.array(coordinates, dtype=np.float32), center)\n rho, phi = cart2pol(x[:, 0], x[:, 1])\n result = np.swapaxes(np.array([rho, phi], dtype=np.float32), 0, 1)\n\n # normalize rho values to range[0-1]\n result[:, 0] = normalize(result[:, 0].reshape(1, -1), norm='max')\n return result", "def cartesian_coordinates(self):\n # extract RA items\n ra_hours, ra_minutes, ra_seconds = RA_RE.match(str(self.ra)).groups()\n # then cast\n ra_hours = int(ra_hours)\n ra_minutes = int(ra_minutes)\n ra_seconds = float(ra_seconds)\n\n # extract DEC items\n dec_sign, dec_degrees, dec_minutes, dec_seconds = DEC_RE.match(str(self.dec)).groups()\n # then cast\n dec_sign = -1 if dec_sign == '-' else 1\n dec_degrees = int(dec_degrees)\n dec_minutes = int(dec_minutes)\n dec_seconds = float(dec_seconds)\n\n # to degrees\n a = (ra_hours*15) + (ra_minutes*0.25) + (ra_seconds*0.004166)\n b = abs(dec_degrees + dec_minutes/60 + dec_seconds/3600) * dec_sign\n\n # to radians\n a = math.radians(a)\n b = math.radians(b)\n\n distance = float(self.distance)\n\n x = (distance * math.cos(b)) * math.cos(a)\n y = (distance * math.cos(b)) * math.sin(a)\n z = distance * math.sin(b)\n\n return x, y, z", "def cartesian_coordinates(self, *axes):", "def cartesian2Geo(julian_date, x, y, z):\n\n\n # Calculate LLA\n lat, r_LST, ele = ecef2LatLonAlt(x, y, z)\n\n # Calculate proper longitude from the given JD\n lon, _ = LST2LongitudeEast(julian_date, np.degrees(r_LST))\n\n # Convert longitude to radians\n lon = np.radians(lon)\n\n\n return np.degrees(lat), np.degrees(lon), ele", "def cartesian_encoder(coord, r_E=6371):\n def _to_rad(deg):\n return deg * np.pi / 180.\n\n theta = _to_rad(coord[:, 0]) # lat [radians]\n phi = _to_rad(coord[:, 1]) # lon [radians]\n\n x = r_E * np.cos(phi) * np.cos(theta)\n y = r_E * np.sin(phi) * np.cos(theta)\n z = r_E * np.sin(theta)\n\n return np.concatenate([x.reshape(-1, 1), y.reshape(-1, 1), z.reshape(-1, 1)], axis=1)", "def from_cdo_griddes(griddes):\n\n with open(griddes) as grid_file:\n grid_file_lines = grid_file.readlines()\n\n grid_dic = {}\n\n for line in grid_file_lines:\n words = line.split()\n if words[0] == '#':\n continue\n else:\n length = len(words)\n if length == 3:\n grid_dic[words[0]] = words[2]\n else:\n value_string = ' '.join(words[2:length-1])\n grid_dic[words[0]] = value_string\n\n if grid_dic['gridtype'] != 'lonlat':\n print(('Gridtype {0} not supported'.format(grid_dic['gridtype'])))\n return ''\n\n lon = np.zeros(int(grid_dic['xsize']))\n lat = np.zeros(int(grid_dic['ysize']))\n\n for i in range(len(lon)):\n lon[i] = float(grid_dic['xfirst']) + i * float(grid_dic['xinc'])\n for j in range(len(lat)):\n lat[j] = float(grid_dic['yfirst']) + j * float(grid_dic['yinc'])\n\n if grid_dic['xname'] == 'rlon':\n pol_lon = float(grid_dic['xnpole'])\n pol_lat = float(grid_dic['ynpole'])\n grid = RotGrid(lon, lat, pol_lon, pol_lat)\n else:\n grid = Grid(lon, lat)\n\n return grid", "def geomFromInteriorPoints(coords):\n if isinstance(coords, numpy.ndarray):\n coords = coords.tolist()\n geomDict = {'type':'MultiPoint', 'coordinates':coords}\n geomPoints = ogr.CreateGeometryFromJson(repr(geomDict))\n return geomPoints", "def frac2cart(lattice, fcoords):\n ccoords = []\n for i in fcoords:\n x = i[0] * lattice[0][0] + i[1] * lattice[1][0] + i[2] * lattice[2][0]\n y = i[0] * lattice[0][1] + i[1] * lattice[1][1] + i[2] * lattice[2][1]\n z = i[0] * lattice[0][2] + i[1] * lattice[1][2] + i[2] * lattice[2][2]\n ccoords.append([x, y, z])\n return ccoords", "def test_modify_coords(self):\n xyz1 = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((1.53830201, 0.86423425, 0.07482439), (0.94923576, -0.20847619, -0.03881977),\n (-0.56154542, -0.31516675, -0.05011465), (-1.18981166, 0.93489731, 0.17603211),\n (1.49712659, -1.15833718, -0.15458647), (-0.87737433, -0.70077243, -1.02287491),\n (-0.87053611, -1.01071746, 0.73427128), (-0.48610273, 1.61361259, 0.11915705))}\n xyz2 = {'symbols': ('C', 'C', 'N', 'H', 'H', 'H'), 'isotopes': (12, 12, 14, 1, 1, 1),\n 'coords': ((-0.48629842, 0.00448354, 0.00136213), (0.97554967, -0.0089943, -0.00273253),\n (2.13574353, -0.01969098, -0.00598223), (-0.88318669, -0.63966273, -0.78887729),\n (-0.87565097, -0.35336611, 0.95910491), (-0.86615712, 1.01723058, -0.16287498))}\n xyz3 = {'symbols': ('O', 'C', 'C', 'S', 'O', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 32, 16, 12, 12, 1, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': ((-2.77437517, 0.17200669, 0.18524832), (-1.64613785, -0.19208096, 0.80434075),\n (-0.40774525, 0.26424657, -0.07952902), (-0.26203276, 2.09580334, -0.05090198),\n (-0.67096595, -0.16397552, -1.42109845), (0.89264107, -0.40136991, 0.41083574),\n (2.12441624, -0.1300863, -0.44918504), (-1.50623429, -1.27619307, 0.9524955),\n (-1.45114032, 0.18501518, 1.82167553), (-1.59654975, 2.25615634, -0.09052499),\n (-1.65730431, -0.11079255, -1.400057), (0.74870779, -1.48997779, 0.41386971),\n (1.10331691, -0.11082471, 1.44762119), (2.41262211, 0.92463409, -0.42840126),\n (1.95758158, -0.4244074, -1.48990015), (2.97418137, -0.70882619, -0.0719403))}\n xyz4 = {'symbols': ('C', 'C', 'O', 'C', 'C', 'O', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (12, 12, 16, 12, 12, 16, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': ((-1.2713687423422115, -0.7423678681688866, -0.6322577211421921),\n (-0.08008635702808505, -0.40741599130374034, 0.2550353232234618),\n (-0.5452666768773297, -0.20159898814584978, 1.588840559327411),\n (0.6158080809151276, 0.8623086771891557, -0.21553636846891006),\n (1.9196775903993375, 1.0155396004927764, 0.5174563928754532),\n (3.0067486097953653, 1.0626738453913969, -0.05177300486677717),\n (-2.012827991034863, 0.06405231524730193, -0.6138583677564631),\n (-0.9611224758801538, -0.9119047827586647, -1.6677831987437075),\n (-1.7781253059828275, -1.6433798866337939, -0.27003123559560865),\n (0.6204384954940876, -1.2502614603989448, 0.2715082028581114),\n (-1.0190238747695064, -1.007069904421531, 1.8643494196872146),\n (0.014234510343435022, 1.753076784716312, -0.005169050775340246),\n (0.827317336700949, 0.8221266348378934, -1.2893801191974432),\n (1.8498494882204641, 1.107064846374729, 1.6152311353151314))}\n xyz5 = {'symbols': ('N', 'C', 'C', 'C', 'H', 'H', 'C', 'C', 'C', 'C', 'H', 'H', 'C', 'C', 'C', 'H', 'C', 'C',\n 'N', 'H', 'H', 'C', 'H', 'C', 'C', 'C', 'H', 'H', 'H', 'H', 'C', 'C', 'C', 'H', 'H', 'H',\n 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'O', 'O', 'C', 'O', 'H', 'H', 'H'),\n 'isotopes': (14, 12, 12, 12, 1, 1, 12, 12, 12, 12, 1, 1, 12, 12, 12, 1, 12, 12, 14, 1, 1, 12, 1, 12, 12,\n 12, 1, 1, 1, 1, 12, 12, 12, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 16, 16, 12, 16, 1, 1, 1),\n 'coords': ((-0.766219, -0.248648, -0.347086), (0.667812, -0.150498, -0.496932),\n (-1.490842, 1.000959, -0.245328), (1.311194, -1.339578, -1.19388),\n (0.976451, 0.831716, -0.911173), (1.231101, -0.062221, 0.660162),\n (-1.346406, -1.400789, 0.294395), (-1.022138, 2.069095, 0.533928),\n (-2.673271, 1.125443, -1.008282), (2.575265, -0.94966, -1.974365),\n (1.534634, -2.14679, -0.467576), (0.584227, -1.791819, -1.905459),\n (-0.574689, -2.103356, 1.24726), (-2.643838, -1.861964, -0.035016),\n (-1.73741, 3.268914, 0.549347), (-0.105632, 1.96688, 1.126589),\n (-3.134563, -0.04419, -1.826788), (-3.378705, 2.332664, -0.970971),\n (3.611589, -0.28425, -1.113057), (2.30114, -0.222978, -2.774031),\n (2.969795, -1.853671, -2.489377), (-1.04268, -3.284134, 1.815898),\n (0.388329, -1.696921, 1.570938), (-3.645512, -1.174123, -0.925823),\n (-3.088386, -3.061615, 0.555145), (-2.911462, 3.400813, -0.198004),\n (-1.376219, 4.102013, 1.150524), (-3.935589, 0.254447, -2.531702),\n (-2.298405, -0.411572, -2.461402), (-4.293927, 2.444159, -1.549116),\n (4.776265, 0.123769, -1.959689), (4.064268, -1.169457, 0.001273),\n (-2.30222, -3.77607, 1.457834), (-0.433782, -3.814872, 2.545573),\n (-4.135291, -1.935447, -1.571709), (-4.453058, -0.768805, -0.272612),\n (-4.078335, -3.442593, 0.302875), (-3.465321, 4.337257, -0.179068),\n (5.500278, 0.67338, -1.336133), (5.30611, -0.707961, -2.446036),\n (4.433161, 0.821539, -2.74083), (4.954327, -0.743379, 0.488676),\n (4.300156, -2.200598, -0.295594), (3.265545, -1.194959, 0.769181),\n (-2.671885, -4.702569, 1.890597), (1.78286, 0.089948, 1.873468),\n (1.758606, 1.382484, 2.130308), (2.973471, 2.040706, 1.623336),\n (2.813335, 2.256698, 0.248083), (2.919925, 3.030613, 2.105087),\n (3.858517, 1.438684, 1.858856), (3.005024, 1.410381, -0.277159))}\n xyz6 = {'symbols': ('N', 'C', 'C', 'H', 'C', 'H', 'H', 'N', 'H', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H', 'H',\n 'H', 'H', 'O', 'O', 'H', 'C', 'H', 'H', 'O', 'H'),\n 'isotopes': (14, 12, 12, 1, 12, 1, 1, 14, 1, 12, 12, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 16, 16, 1, 12, 1, 1, 16, 1),\n 'coords': ((2.608231, -0.458895, 1.629197), (2.408715, 0.132166, 0.318653),\n (1.174426, -0.323822, -0.471554), (3.304408, -0.071078, -0.291093),\n (-0.13532, 0.016735, 0.225918), (1.210534, 0.150539, -1.46601),\n (1.221625, -1.416078, -0.631885), (-1.316045, -0.574442, -0.379686),\n (-0.086456, -0.362851, 1.260573), (-1.468231, -0.411368, -1.77232),\n (-2.505886, -0.419831, 0.432347), (-2.403425, -0.886127, -2.107496),\n (-0.621099, -0.850903, -2.320815), (-3.364172, -0.88926, -0.068909),\n (-2.767365, 0.637288, 0.628231), (-2.360065, -0.927144, 1.400068),\n (2.574849, -1.475283, 1.579253), (1.886591, -0.170591, 2.284831),\n (2.375177, 1.228181, 0.441157), (-0.231725, 1.121336, 0.301367),\n (-1.455199, 0.947478, -2.255384), (-2.58006, 1.611276, -1.811891),\n (-3.315019, 1.53868, -2.760245), (-3.713498, 1.338038, -4.025244),\n (-4.754452, 0.99077, -4.021055), (-3.584519, 2.351475, -4.444827),\n (-2.87635, 0.381401, -4.513467), (-1.966974, 0.665311, -4.338804))}\n mol1 = converter.molecules_from_xyz(xyz1)[1]\n mol2 = converter.molecules_from_xyz(xyz2)[1]\n mol3 = converter.molecules_from_xyz(xyz3)[1]\n mol4 = converter.molecules_from_xyz(xyz4)[1]\n mol5 = converter.molecules_from_xyz(xyz5)[1] # a TS\n mol6 = converter.molecules_from_xyz(xyz6)[1] # a TS\n\n # test atom modification types\n modification_type = 'atom'\n\n # test R_atom modification\n indices, new_val = [0, 1], 1.5\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((0.06385450948408691, 1.6253138441202686, 0.042870253583423557),\n (-0.02582727173313104, 0.39833637030950975, 0.9010563970736782),\n (-0.02582727173313104, -1.003336361301907, 0.3272239637891734),\n (-0.02582727173313104, -1.003336361301907, -1.0899990532469916),\n (-0.08138177769352953, 0.465646654907214, 2.0002403496097383),\n (0.865704477722866, -1.5264119285073852, 0.6825623354173815),\n (-0.9185767861007101, -1.5268489957651346, 0.6785930201570352),\n (0.14577602706217008, -0.07998849407327513, -1.367625604543457))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val, 5)\n\n indices, new_val = [1, 0], -1.5\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((0.01167393998357115, -0.5225807439329089, -0.9899595616178738),\n (-0.040525509131742084, 0.26844387347263365, -2.2633625897949208),\n (0.01167393998357115, -0.5225807439329089, 1.4216698859880004),\n (0.01167393998357115, 0.8926022581407576, 1.3456557382334218),\n (0.11202785529567173, -2.2718515121487206, 0.04691079079738447),\n (-0.8954040276884763, -0.8508241498293034, 1.9356427400340799),\n (0.8880330020652463, -0.8439168226596885, 1.990234136037933),\n (-0.13167393678263156, 1.1200467154192293, 0.4039467156910099))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), - new_val, 5)\n\n # test A_atom modification\n indices, new_val = [2, 1, 0], 140\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((0.011940763595588438, -0.90654939253321, -1.1784203714214114),\n (0.011940763595588438, -0.90654939253321, 0.05065327345758153),\n (-0.02531707366035523, 0.06629439921242253, 1.2108932996837143),\n (0.011940763595588438, 1.5283906429141458, 0.05806971900412017),\n (0.03285612994605798, -1.8458593499019589, 0.6277855724118742),\n (-0.9645745795119229, 0.3758422785924207, 1.4467600455414558),\n (0.8166299978590752, 0.37902049128771864, 1.551524925579085),\n (-0.10465928281651019, 1.2266969334608921, -0.8663115945839973))}\n\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val, 5)\n\n # test changing an angle to 180 degrees\n indices, new_val = [0, 1, 2], 180\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((-0.0019281473980474666, 1.559641181574566, 1.013927346529066),\n (-0.0019281473980474772, 0.42219553322547265, 0.548267146825631),\n (-0.0019281473980474772, -0.9794771983859442, -0.025565286458873793),\n (-0.0019281473980474772, -0.9794771983859442, -1.4427883034950388),\n (-0.05748265335844597, 0.4895058178231769, 1.6474510993616909),\n (0.8896036020579495, -1.5025527655914221, 0.32977308516933435),\n (-0.8946776617656266, -1.5029898328491718, 0.32580376990898796),\n (0.16967515139725364, -0.05612933115731222, -1.7204148547915041))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val)\n\n # test changing a 180 degree angle to something else\n indices, new_val = [0, 1, 2], 120\n expected_xyz = {'symbols': ('C', 'C', 'N', 'H', 'H', 'H'), 'isotopes': (12, 12, 14, 1, 1, 1),\n 'coords': ((0.7757362507465277, 0.4478716325630875, 0.7767867108403768),\n (-0.3207007101270898, -0.18515666614565915, 0.04582870107149262),\n (-0.3207007101270898, -0.18515666614565915, -1.1144190466784232),\n (-0.3207007101270898, 0.8374974028016162, 1.8964626512298475),\n (-1.2063452316056904, -0.6964838693490394, 1.8964625790172804),\n (0.5649437124447699, -0.6964840572534022, 1.896462566459638))}\n new_xyz = converter.modify_coords(coords=xyz2, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol2)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol2), new_val, 5)\n\n # test D_atom modification\n indices, new_val = [0, 1, 2, 3], 30\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((-0.3812553590829658, 1.4249753409811934, 0.24885596109763952),\n (0.13588307254069157, 0.47112021672976, 0.8262208968300058),\n (0.13588307254069157, -0.9305525148816568, 0.25238846354550093),\n (0.13588307254069157, -0.9305525148816568, -1.1648345534906641),\n (0.08032856658029308, 0.5384305013274643, 1.9254048493660656),\n (1.0274148219966885, -1.4536280820871348, 0.6077268351737091),\n (-0.7568664418268876, -1.4540651493448844, 0.6037575199133627),\n (0.30748637133599266, -0.007204647653024865, -1.4424611047871294))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val, 5)\n\n indices, new_val = [3, 2, 1, 0], -30\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((-0.17268751280677364, -0.941696827801256, -1.1487068217042242),\n (-0.17268751280677364, -0.941696827801256, 0.08036682317476873),\n (-0.17268751280677364, 0.3328411496875977, 0.8986107061160642),\n (0.4830966870190505, 1.3983204216355287, 0.23286144075770054),\n (-0.18773471865125574, -1.8811191078717768, 0.6574991306756568),\n (-1.0994105700891015, 0.3771264916699556, 1.4764735369276594),\n (0.6806108103574798, 0.3121359507669669, 1.5812384626874982),\n (-0.2075631130119835, 1.1944491200970329, -0.8365980489813365))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1),\n 360 + new_val, 5)\n\n indices, new_val = [0, 1, 2, 3], -30\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((0.37739906428687087, 1.4249753409811934, 0.24885596109763952),\n (-0.13973936733678652, 0.47112021672976, 0.8262208968300058),\n (-0.13973936733678652, -0.9305525148816568, 0.25238846354550093),\n (-0.13973936733678652, -0.9305525148816568, -1.1648345534906641),\n (-0.195293873297185, 0.5384305013274643, 1.9254048493660656),\n (0.7517923821192105, -1.4536280820871348, 0.6077268351737091),\n (-1.0324888817043656, -1.4540651493448844, 0.6037575199133627),\n (0.0318639314585146, -0.007204647653024865, -1.4424611047871294))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1),\n 360 + new_val, 5)\n\n # test group modification types\n modification_type = 'group'\n\n # test R_group modification\n indices, new_val = [0, 1], 1.5\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((0.06385450815440741, 1.625313844153823, 0.04287025350146201),\n (-0.02582727144301671, 0.39833637029935165, 0.9010563970984908),\n (-0.02582727144301671, -1.0033363613120652, 0.327223963813986),\n (-0.02582727144301671, -1.0033363613120652, -1.089999053222179),\n (-0.0813817733100206, 0.4656466548101805, 2.0002403498467567),\n (0.8657044801882787, -1.5264119271233758, 0.6825623320367284),\n (-0.9185767836497759, -1.5268489971713646, 0.6785930235919653),\n (0.1457760273522844, -0.07998849408343323, -1.3676256045186443))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val, 5)\n\n # test A_group modification\n indices, new_val = [0, 1, 2], 160\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((0.01997925208754263, 1.625852603711386, 0.708691800251658),\n (-0.009887200766722545, 0.3981406366172051, 0.6591605436173553),\n (-0.009887200766722545, -1.0035320949942117, 0.08532811033285048),\n (-0.009887200766722545, -1.0035320949942117, -1.3318949067033146),\n (-0.06544170263372645, 0.465450921128034, 1.7583444963656214),\n (0.8816445508645728, -1.5266076608055221, 0.44066647855559316),\n (-0.9026367129734817, -1.5270447308535111, 0.4366971701108293),\n (0.16171609802857856, -0.08018422776557976, -1.6095214579997799))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val, 5)\n\n indices, new_val = [1, 2, 5], 160\n expected_xyz = {'symbols': ('O', 'C', 'C', 'S', 'O', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 32, 16, 12, 12, 1, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': ((-0.45549818019466204, 1.8548729964273216, 0.8440028131622062),\n (-0.2667929723517851, 0.6671106629415136, 1.42912314652022),\n (-0.2163066356464933, -0.45426196440936106, 0.30526758056697156),\n (1.3109140692843337, 0.4741705899686004, -0.12165329723035323),\n (-1.3557392716759613, 0.27771606050413156, -0.16203238949855803),\n (-0.2163066356464933, -1.8492005047245035, -0.34944907261899716),\n (-0.2163066356464933, -1.8492005047245035, -1.87604687202156),\n (-1.0601386155429, 0.3401156691690679, 2.122303234960202),\n (0.6302934527577109, 0.5164940342603479, 2.051815682570846),\n (1.143418340718557, 1.3271327629309078, 0.9043191341647172),\n (-1.5046641822171405, 0.8405156651772538, 0.6362234563562041),\n (-1.1248176985937233, -2.3816433802478305, -0.03815279071754074),\n (0.6330922017716909, -2.4415422695908298, 0.013011559357363423),\n (0.707681641272436, -1.4302805756837962, -2.2843133571390752),\n (-1.061876978104781, -1.2808214124615414, -2.27542464397285),\n (-0.30131566361820894, -2.876339919190297, -2.2463334380185054))}\n new_xyz = converter.modify_coords(coords=xyz3, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol3)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol3), new_val, 5)\n\n indices, new_val = [5, 2, 1], 160\n expected_xyz = {'symbols': ('O', 'C', 'C', 'S', 'O', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 32, 16, 12, 12, 1, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': ((0.2917048572251579, -1.5727341554069034, -1.3423072397835754),\n (0.2917048572251579, -1.5727341554069034, -0.0048638500194817524),\n (0.2917048572251579, -0.06886266257406626, 0.5064553318371674),\n (-1.363795569744117, -0.1202634403830567, -0.28936363114537844),\n (1.2964570556359054, 0.04149003667864859, -0.508809719558267),\n (0.4099139249017979, 1.1367441270166645, 1.4588451220109844),\n (0.29481769872300884, 2.504661621457458, 0.7909713103796479),\n (1.1685736645928884, -2.0373473546555556, 0.47685945259484286),\n (-0.5312728539867155, -2.0767912763680947, 0.5278926826114716),\n (-1.2231052441089643, -1.4156454828005882, -0.6216441060907665),\n (1.4364524039686508, -0.9213654475865127, -0.6804052856633311),\n (1.3966722481626304, 1.107137467791805, 1.9397033126698722),\n (-0.33241474313836356, 1.0625526837349102, 2.2633130452338497),\n (-0.7009351031697479, 2.671307058557274, 0.3706911401148234),\n (1.0334518240640673, 2.6225101662569066, -0.007826505507309234),\n (0.474437928409419, 3.293432289151483, 1.52916604039102))}\n new_xyz = converter.modify_coords(coords=xyz3, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol3)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol3), new_val, 4)\n\n # test D_group modification\n indices, new_val = [0, 1, 2, 3], 98.7\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((-0.751853407099498, 1.1325746654576616, 0.9630889493590222),\n (0.2705229494881336, 0.5773506493576217, 0.5667369568416694),\n (0.2705229494881336, -0.8243220822537951, -0.00709547644283548),\n (0.2705229494881336, -0.8243220822537951, -1.4243184934790005),\n (0.21496844352773511, 0.644660933955326, 1.6659209093777292),\n (1.1620546989441305, -1.347397649459273, 0.34824289518537266),\n (-0.6222265648794455, -1.3478347167170226, 0.3442735799250263),\n (0.4421262482834347, 0.09902578497483683, -1.7019450447754658))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val, 5)\n\n indices, new_val = [5, 2, 1, 0], 180\n expected_xyz = {'symbols': ('O', 'C', 'C', 'S', 'O', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 32, 16, 12, 12, 1, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': ((0.3034340517195509, -1.6113639549493641, -1.7901391417129255),\n (0.3034340517195509, -1.6113639549493641, -0.45269575194883194),\n (0.3034340517195509, -0.10749246211652697, 0.058623429907817215),\n (-1.3193844356755215, 0.6746571866866746, -0.30380395501671575),\n (1.3282593544657135, 0.581298860926198, -0.6678526090506967),\n (0.30343405171955073, -0.05040119820033895, 1.5985091447581203),\n (0.26233878444784786, 1.3540223173114139, 2.1955071424316666),\n (1.1803028491569083, -2.0759771588261957, 0.029027564277707585),\n (-0.5195436704231056, -2.115421071566818, 0.08006076790649397),\n (-1.414911803320983, 0.05150877481380545, -1.4915662613668217),\n (1.2907872270567131, 0.05736052141866721, -1.5046434284929022),\n (1.2266505257705096, -0.5178979180455376, 1.965811882691859),\n (-0.5283478351927398, -0.6406189828710822, 2.0028687871657294),\n (-0.6775241224477067, 1.8658969637383576, 1.9706253328328829),\n (1.0896028263747624, 1.9687229189733981, 1.8276430689661958),\n (0.35031987670665765, 1.2957313570336282, 3.285560142931404))}\n new_xyz = converter.modify_coords(coords=xyz3, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol3)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol3), new_val, 5)\n\n # test groups modification types\n modification_type = 'groups'\n\n # test D_groups modification\n indices, new_val = [0, 1, 2, 3], 98.7\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((-0.7692326765134374, 1.1252152574374596, 0.9810655314575423),\n (0.25314357064244697, 0.5699912505374165, 0.5847135445433043),\n (0.25314357064244697, -0.8316815836112654, 0.010881153979294123),\n (0.25314357064244697, -0.8316815836112654, -1.4063419471715688),\n (1.2326181278103254, 1.0755945976230115, 0.6133000157238186),\n (1.1446752957640132, -1.3547571699433192, 0.3662195585064876),\n (-0.6396059141384572, -1.3551941756763426, 0.3622501790547312),\n (0.4247468609767439, 0.09166629658280878, -1.6839684605765641))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val, 5)\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=xyz1, indices=[4, 1, 2, 3], mol=mol1),\n 176.7937925, 5)\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=[4, 1, 2, 3], mol=mol1),\n 279.5679938, 5)\n\n indices, new_val = [5, 2, 1, 0], 100\n expected_xyz = {'symbols': ('O', 'C', 'C', 'S', 'O', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 32, 16, 12, 12, 1, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': ((0.17617288317697363, -1.4263876505749937, -1.3907356765118228),\n (0.17617288317697363, -1.4263876505749937, -0.05329233131383648),\n (0.17617288317697363, 0.07748361087633482, 0.4580268316508156),\n (0.8541264407563205, 1.1799297944814306, -0.8464435250524343),\n (1.0315484892431994, 0.12891222316318918, 1.606136465715537),\n (-1.2415001838455297, 0.5175023395992786, 0.8716616732793354),\n (-2.371148423802697, -0.377635430276555, 0.3685473045279144),\n (1.0530416597996317, -1.8910009834245878, 0.42843102214143425),\n (-0.646804798256715, -1.930444842122042, 0.47946418053365614),\n (1.322524386187, 0.1392850561843193, -1.55769653865906),\n (1.5807657244329665, 0.9071634481807671, 1.3438012611373469),\n (-1.4308626545937098, 1.5181627982792263, 0.46103575662853813),\n (-1.3101730016766409, 0.6090291604729325, 1.9628224613881304),\n (-2.328405219901557, -1.376683205512397, 0.811273322532136),\n (-2.345556604764221, -0.47877786163003033, -0.7207928024513892),\n (-3.3382397150969996, 0.059047399283163715, 0.6394658008190603))}\n new_xyz = converter.modify_coords(coords=xyz3, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol3)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol3), new_val, 5)\n\n indices, new_val = [4, 3, 1, 0], 236.02\n expected_xyz = {'symbols': ('C', 'C', 'O', 'C', 'C', 'O', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (12, 12, 16, 12, 12, 16, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': ((-0.3420713780282814, -0.726846939196746, -1.8608060734620697),\n (-0.3420713780282814, -0.726846939196746, -0.33809952744080163),\n (-1.5199121786498575, -1.3903247017047589, 0.12046140490433599),\n (-0.3420713780282814, 0.692986716189357, 0.21142750813209843),\n (0.8346249371329908, 0.870417947793265, 1.130523629422891),\n (1.8415843350511496, 1.49899165752528, 0.8160475329621943),\n (-1.232802341934429, -0.22348356564525385, -2.2527724067647172),\n (0.5474409007790566, -0.2291658204558631, -2.2587884226234842),\n (-0.36650899336409903, -1.7525658745827613, -2.2443893713107435),\n (0.5235538883628821, -1.286773819894118, 0.03414982827280788),\n (-1.525486055520759, -2.2842579938670644, -0.2668197974505191),\n (-1.246930807816442, 0.9000033565709169, 0.7927934676101465),\n (-0.26242043164905693, 1.4290013064896112, -0.5956842516835208),\n (0.739203033547077, 0.4163114365921572, 2.132044487804084))}\n new_xyz = converter.modify_coords(coords=xyz4, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol4)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol4), new_val, 5)\n\n # test 1-indexed input\n indices = [5, 4, 2, 1]\n new_xyz = converter.modify_coords(coords=xyz4, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol4, index=1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol4, index=1),\n new_val, 5)\n\n # test TSs\n indices = [19, 10, 4, 2]\n fragments = [[46, 47, 48, 49, 50, 51, 52], [f + 1 for f in range(45)]]\n self.assertAlmostEqual(calculate_dihedral_angle(coords=xyz5, torsion=indices, index=1), 56.83358841, 3)\n new_xyz = converter.modify_coords(coords=xyz5,\n indices=indices,\n new_value=300,\n modification_type='groups',\n mol=mol5,\n index=1,\n fragments=fragments,\n )\n self.assertAlmostEqual(calculate_dihedral_angle(coords=new_xyz, torsion=indices, index=1), 300, places=3)\n\n indices = [1, 2, 3, 5]\n fragments = [[f + 1 for f in range(23)], [24, 25, 26, 27, 28]]\n self.assertAlmostEqual(calculate_dihedral_angle(coords=xyz6, torsion=indices, index=1), 62.30597206, 3)\n new_xyz = converter.modify_coords(coords=xyz6,\n indices=indices,\n new_value=200,\n modification_type='groups',\n mol=mol6,\n index=1,\n fragments=fragments,\n )\n self.assertAlmostEqual(calculate_dihedral_angle(coords=new_xyz, torsion=indices, index=1), 200, places=3)\n \n coords={'coords': ((-0.7862825353221515, -0.28824023055636216, 0.4782944637692894),\n (0.21968869054702736, 0.40094256193652866, -0.2919820499085219),\n (-0.07796443595084417, 0.5692847962524797, -1.6621913220858304),\n (-1.102200211589376, -1.1132157833188596, -0.01879031191901484),\n (-1.5973749070505925, 0.29546848172306867, 0.6474145668621136),\n (0.4237940503863438, 1.3660724867336205, 0.19101403432872205),\n (1.1352054736534014, -0.1980893380251006, -0.2652264470061931),\n (-0.7497944593402266, 1.258221857416732, -1.7507029654486272)),\n 'isotopes': (14, 12, 16, 1, 1, 1, 1, 1),\n 'symbols': ('N', 'C', 'O', 'H', 'H', 'H', 'H', 'H')}\n indices=[3, 0, 1, 2]\n new_value=53.76\n modification_type=\"groups\"\n mol=Molecule(smiles=\"NCO\")\n new_xyz = converter.modify_coords(coords=coords,\n indices=indices,\n new_value=new_value,\n modification_type=modification_type,\n mol=mol)\n self.assertTrue(type(new_xyz[\"coords\"][0][0] is float))", "def coordinates(self, indices):\n\t\t\n\t\t# We expect a tuple\n\t\tif not type(indices) == tuple:\n\t\t\tindices = tuple((indices,))\n\n\t\tcoordinates = {}\n\t\tfor coordinate in self.coordinates_mapping.keys():\n\t\t\tcoordinate_variable = self.variable.group.variables[self.coordinates_mapping[coordinate]['variable']]\n\t\t\tcoordinate_mapping = self.coordinates_mapping[coordinate]['map']\n\t\t\t\n\n\t\t\tslice_list = []\n\t\t\tfailed = False\n\t\t\tfor index in coordinate_mapping:\n\t\t\t\t\n\t\t\t\t# Skip an None indices\n\t\t\t\tif indices[index] == None:\n\t\t\t\t\tfailed = True\n\t\t\t\t\tbreak\n\t\t\t\t\t\n\t\t\t\tslice_list.append(slice(indices[index], indices[index]+1))\n\t\t\t\n\t\t\t# Only get coordinates where we had valid indices\n\t\t\tif not failed:\n\t\t\t\tcoordinates[coordinate] = (coordinate_variable[slice_list].flatten()[0], coordinate_variable.get_attribute('units'))\n\n\t\t# Try and convert time coordinates to real datetimes\n\t\tfor name, coordinate in coordinates.items():\n\n\t\t\tif name == 'time':\n\t\t\t\ttry:\n\t\t\t\t\tdate = netCDF4.num2date(*coordinate)\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tcoordinates[name] = (date,'')\t\n\t\t\n\t\treturn coordinates", "def make_coordinate_combinations(lat=None, lon=None, alt=None, time=None):\n\n # make the 1D coordinates\n if lat is None:\n lat = ArrayCoordinates1d([0, 1, 2], name=\"lat\")\n if lon is None:\n lon = ArrayCoordinates1d([2, 4, 6], name=\"lon\")\n if alt is None:\n alt = ArrayCoordinates1d([6, 9, 12], name=\"alt\")\n if time is None:\n time = ArrayCoordinates1d([\"2018-01-01\", \"2018-02-01\", \"2018-03-01\"], name=\"time\")\n\n d = dict([(\"lat\", lat), (\"lon\", lon), (\"alt\", alt), (\"tim\", time)])\n\n dims_list = get_dims_list()\n\n # make the stacked coordinates\n for dim in [dim for dims in dims_list for dim in dims if \"_\" in dim]:\n cs = [d[k] for k in dim.split(\"_\")]\n if any(c.size != cs[0].size for c in cs):\n continue # can't stack these\n d[dim] = StackedCoordinates(cs)\n\n # make the ND coordinates\n coord_collection = OrderedDict()\n for dims in dims_list:\n if any(dim not in d for dim in dims):\n continue\n coord_collection[dims] = Coordinates([d[dim] for dim in dims])\n return coord_collection", "def interpolate_to_cartesian(\n self, nX=300, nY=300, nZ=100, *, use_float32=True, fill_value=np.nan\n ):\n ds = self.data\n ds = ds.bout.add_cartesian_coordinates()\n\n if not isinstance(use_float32, bool):\n raise ValueError(f\"use_float32 must be a bool, got '{use_float32}'\")\n if use_float32:\n float_type = np.float32\n ds = ds.astype(float_type)\n for coord in ds.coords:\n # Coordinates are not converted by Dataset.astype, so convert explicitly\n ds[coord] = ds[coord].astype(float_type)\n fill_value = float_type(fill_value)\n else:\n float_type = ds[ds.data_vars[0]].dtype\n\n tdim = ds.metadata[\"bout_tdim\"]\n zdim = ds.metadata[\"bout_zdim\"]\n if tdim in ds.dims:\n nt = ds.sizes[tdim]\n n_toroidal = ds.sizes[zdim]\n\n # Create Cartesian grid to interpolate to\n Xmin = ds[\"X_cartesian\"].min()\n Xmax = ds[\"X_cartesian\"].max()\n Ymin = ds[\"Y_cartesian\"].min()\n Ymax = ds[\"Y_cartesian\"].max()\n Zmin = ds[\"Z_cartesian\"].min()\n Zmax = ds[\"Z_cartesian\"].max()\n newX_1d = xr.DataArray(np.linspace(Xmin, Xmax, nX), dims=\"X\")\n newX = newX_1d.expand_dims({\"Y\": nY, \"Z\": nZ}, axis=[1, 2])\n newY_1d = xr.DataArray(np.linspace(Ymin, Ymax, nY), dims=\"Y\")\n newY = newY_1d.expand_dims({\"X\": nX, \"Z\": nZ}, axis=[0, 2])\n newZ_1d = xr.DataArray(np.linspace(Zmin, Zmax, nZ), dims=\"Z\")\n newZ = newZ_1d.expand_dims({\"X\": nX, \"Y\": nY}, axis=[0, 1])\n newR = np.sqrt(newX**2 + newY**2)\n newzeta = np.arctan2(newY, newX)\n # Define newzeta in range 0->2*pi\n newzeta = np.where(newzeta < 0.0, newzeta + 2.0 * np.pi, newzeta)\n\n from scipy.interpolate import (\n RegularGridInterpolator,\n griddata,\n )\n\n # Create Cylindrical coordinates for intermediate grid\n Rcyl_min = float_type(ds[\"R\"].min())\n Rcyl_max = float_type(ds[\"R\"].max())\n Zcyl_min = float_type(ds[\"Z\"].min())\n Zcyl_max = float_type(ds[\"Z\"].max())\n n_Rcyl = int(round(nZ * (Rcyl_max - Rcyl_min) / (Zcyl_max - Zcyl_min)))\n Rcyl = xr.DataArray(np.linspace(Rcyl_min, Rcyl_max, 2 * n_Rcyl), dims=\"r\")\n Zcyl = xr.DataArray(np.linspace(Zcyl_min, Zcyl_max, 2 * nZ), dims=\"z\")\n\n # Create Dataset for result\n result = xr.Dataset()\n result.attrs[\"metadata\"] = ds.metadata\n\n # Interpolate in two stages for efficiency. Unstructured 3d interpolation is\n # very slow. Unstructured 2d interpolation onto Cartesian (R, Z) grids, followed\n # by structured 3d interpolation onto the (X, Y, Z) grid, is much faster.\n # Structured 3d interpolation straight from (psi, theta, zeta) to (X, Y, Z)\n # leaves artifacts in the output, because theta does not vary continuously\n # everywhere (has branch cuts).\n\n zeta_out = np.zeros(n_toroidal + 1)\n zeta_out[:-1] = ds[zdim].values\n zeta_out[-1] = zeta_out[-2] + ds[\"dz\"].mean()\n\n def interp_single_time(da):\n print(\" interpolate poloidal planes\")\n\n da_cyl = da.bout.interpolate_from_unstructured(R=Rcyl, Z=Zcyl).transpose(\n \"R\", \"Z\", zdim, missing_dims=\"ignore\"\n )\n\n if zdim not in da_cyl.dims:\n da_cyl = da_cyl.expand_dims({zdim: n_toroidal + 1}, axis=-1)\n else:\n # Impose toroidal periodicity by appending zdim=0 to end of array\n da_cyl = xr.concat((da_cyl, da_cyl.isel({zdim: 0})), zdim)\n\n print(\" build 3d interpolator\")\n interp = RegularGridInterpolator(\n (Rcyl.values, Zcyl.values, zeta_out),\n da_cyl.values,\n bounds_error=False,\n fill_value=fill_value,\n )\n\n print(\" do 3d interpolation\")\n return interp(\n (newR, newZ, newzeta),\n method=\"linear\",\n )\n\n for name, da in ds.data_vars.items():\n print(f\"\\ninterpolating {name}\")\n # order of dimensions does not really matter here - output only depends on\n # shape of newR, newZ, newzeta. Possibly more efficient to assign the 2d\n # results in the loop to the last two dimensions, so put zeta first. Can't\n # just use da.min().item() here (to get a scalar value instead of a\n # zero-size array) because .item() doesn't work for dask arrays (yet!).\n\n datamin = float_type(da.min().values)\n datamax = float_type(da.max().values)\n\n if tdim in da.dims:\n data_cartesian = np.zeros((nt, nX, nY, nZ), dtype=float_type)\n for tind in range(nt):\n print(f\" tind={tind}\")\n data_cartesian[tind, :, :, :] = interp_single_time(\n da.isel({tdim: tind})\n )\n result[name] = xr.DataArray(data_cartesian, dims=[tdim, \"X\", \"Y\", \"Z\"])\n else:\n data_cartesian = interp_single_time(da)\n result[name] = xr.DataArray(data_cartesian, dims=[\"X\", \"Y\", \"Z\"])\n\n # Copy metadata to data variables, in case it is needed\n result[name].attrs[\"metadata\"] = ds.metadata\n\n result = result.assign_coords(X=newX_1d, Y=newY_1d, Z=newZ_1d)\n\n return result", "def read_multiple_coordinates(fragmentnames):\n fragdict = {}\n for name in fragmentnames:\n path = name + '/'\n cell, pos = read_coordinates(path)\n atomlist = frac_to_cart(cell, pos)\n atomdict = {}\n for atom in atomlist:\n atomdict[atom[0][0]] = atom[1]\n fragdict[name] = atomlist\n return fragdict", "def coords_to_dict(self, coords_list, translate=True):\n contact_points_list = []\n contact_points_dict = {\n 'urdf_file': self.urdf_file,\n 'contact_points': []}\n for coords in coords_list:\n if translate:\n coords = coords.copy_worldcoords().translate(\n self.object_center, 'world')\n pose = np.concatenate(\n [coords.T()[:3, 3][None, :],\n coords.T()[:3, :3]]).tolist()\n contact_points_list.append(pose)\n contact_points_dict['contact_points'] = contact_points_list\n return contact_points_dict", "def xdscoordinates():\n # Coordinate sensitive to length of vectors, so need to ensure that\n # lengths of both s0 and s1 are equal\n coords = {\n \"s0\": (0.013141995425357206, 0.002199999234194632, 1.4504754950989514),\n \"s1\": (-0.01752795848400313, -0.24786554213968193, 1.4290948735525306),\n \"m2\": (0.999975, -0.001289, -0.006968),\n \"phi\": 5.83575672475 * math.pi / 180,\n }\n coords[\"cs\"] = CoordinateSystem(\n coords[\"m2\"], coords[\"s0\"], coords[\"s1\"], coords[\"phi\"]\n )\n return coords", "def test_xyz_to_coords_list(self):\n xyz_dict = {'symbols': ('O', 'N', 'C', 'H', 'H'),\n 'isotopes': (16, 14, 12, 1, 1),\n 'coords': ((1.1746411, -0.15309781, 0.0),\n (0.06304988, 0.35149648, 0.0),\n (-1.12708952, -0.11333971, 0.0),\n (-1.93800144, 0.60171738, 0.0),\n (-1.29769464, -1.18742971, 0.0))}\n coords = converter.xyz_to_coords_list(xyz_dict)\n expected_coords = [[1.1746411, -0.15309781, 0.0],\n [0.06304988, 0.35149648, 0.0],\n [-1.12708952, -0.11333971, 0.0],\n [-1.93800144, 0.60171738, 0.0],\n [-1.29769464, -1.18742971, 0.0]]\n self.assertEqual(coords, expected_coords)", "def get_cartesian_coords(self):\n r = 1\n dec = self.dec + 90\n x = r * math.sin(np.deg2rad(dec)) * math.cos(np.deg2rad(self.ra))\n y = r * math.sin(np.deg2rad(dec)) * math.sin(np.deg2rad(self.ra))\n z = r * math.cos(np.deg2rad(dec))\n\n return [x, y, z]", "def _get_coords(p_coords):\n l_ret = CoordinateInformation()\n if isinstance(p_coords, list):\n l_list = p_coords\n else:\n l_list = p_coords.strip('\\[\\]')\n l_list = l_list.split(',')\n try:\n l_ret.X_Easting = float(l_list[0])\n l_ret.Y_Northing = float(l_list[1])\n l_ret.Z_Height = float(l_list[2])\n except Exception as e_err:\n print('Error {}'.format(e_err))\n l_ret.X_Easting = 0.0\n l_ret.Y_Northing = 0.0\n l_ret.Z_Height = 0.0\n return l_ret", "def load_grd(filename):\n with open(filename, 'r') as f:\n meta = {}\n meta['header'] = []\n meta['header'].append(f.readline().rstrip('\\n'))\n while meta['header'][-1] != '++++':\n meta['header'].append(f.readline().rstrip('\\n'))\n # These determine the type of grid and the field format.\n meta['KTYPE'] = int(f.readline().split()[0])\n if meta['KTYPE'] != 1:\n raise ValueError(\"Not implemented.\")\n meta['NSET'], meta['ICOMP'], meta['NCOMP'], meta['IGRID'] = [int(s) for s in f.readline().split()]\n # The grid center in units of the x and y grid spacing.\n meta['IX'], meta['IY'] = [int(s) for s in f.readline().split()]\n # These are the x and y grid limits: S is lower, and E is upper.\n meta['XS'], meta['YS'], meta['XE'], meta['YE'] = [float(s) for s in f.readline().split()]\n # These are the numbers of grid points in x and y.\n meta['NX'], meta['NY'], meta['KLIMIT'] = [int(s) for s in f.readline().split()]\n # Implement this to read elliptically truncated grids.\n if meta['KLIMIT'] != 0:\n raise ValueError(\"Not implemented.\")\n # Load the field data. This returns an array with shape (NX * NY, 2 * NCOMP).\n conv = dict([(column, string_to_float) for column in range(2 * meta['NCOMP'])])\n data = np.loadtxt(f, dtype=float, converters=conv)\n # Determine the grid spacing and center values.\n meta['DX'] = (meta['XE'] - meta['XS']) / (meta['NX'] - 1)\n meta['DY'] = (meta['YE'] - meta['YS']) / (meta['NY'] - 1)\n meta['XCEN'] = meta['DX'] * meta['IX']\n meta['YCEN'] = meta['DY'] * meta['IY']\n # Reshape the data.\n map = np.empty((meta['NX'], meta['NY'], meta['NCOMP']),\n dtype=np.complex)\n for component in range(meta['NCOMP']):\n column = data[:, 2 * component] + 1j * data[:, 2 * component + 1]\n map[:, :, component] = column.reshape(meta['NX'], meta['NY'], order='F')\n return meta, map", "def spherical_to_cartesian(r, lat, lon):\n import math\n\n if np.isscalar(r) and np.isscalar(lat) and np.isscalar(lon):\n x = r * math.cos(lat) * math.cos(lon)\n y = r * math.cos(lat) * math.sin(lon)\n z = r * math.sin(lat)\n else:\n x = r * np.cos(lat) * np.cos(lon)\n y = r * np.cos(lat) * np.sin(lon)\n z = r * np.sin(lat)\n\n return x, y, z", "def generate_coordinates(coords):\n x = coords.reshape(-1, 1).repeat(1, len(coords) * len(coords)).flatten()\n y = coords.reshape(-1, 1).repeat(1, len(coords)).flatten().repeat(len(coords))\n z = coords.reshape(-1, 1).flatten().repeat(len(coords)*len(coords))\n\n return x, y, z", "def convert_pose(coordinates, cartesian=True):\n if not cartesian:\n coordinates = convert(coordinates)\n else:\n coordinates[:, 0] = normalize(\n coordinates[:, 0].reshape(1, -1), norm='max')\n coordinates[:, 1] = normalize(\n coordinates[:, 1].reshape(1, -1), norm='max')\n\n return coordinates", "def zzX_to_dict(f):\n if poly_univariate_p(f):\n return zzx_to_dict(f)\n\n n, result = zzX_degree(f), {}\n\n for i in xrange(0, n+1):\n h = zzX_to_dict(f[n-i])\n\n for exp, coeff in h.iteritems():\n if type(exp) is not tuple:\n exp = (exp,)\n\n result[(i,)+exp] = coeff\n\n return result", "def extended_xyz_parse(xyz_d):\n \n s_properties = ['rot_A', \n 'rot_B', \n 'rot_C', \n 'dipole', \n 'polarizability', \n 'homo', \n 'lumo', \n 'band_gap', \n 'ese', \n 'zpe', \n 'u_0K', \n 'u_298.15K', \n 'h_298.15K', \n 'f_298.15K', \n 'cp_298.15K']\n\n mol_properties = {}\n\n\n lines = xyz_d.replace('*^','e').splitlines()\n \n r_no_atoms = lines[0]\n no_atoms = int(r_no_atoms)\n\n r_scalars = lines[1]\n mol_id = r_scalars.split()[:2]\n scalar_properties = np.array(r_scalars.split()[2:], np.float32)\n\n r_mcoords = lines[2:2+no_atoms]\n symbols = [m.split()[0] for m in r_mcoords]\n coords = np.array([m.split()[1:4] for m in r_mcoords], dtype=np.float32)\n \n charges = np.array([m.split()[4] for m in r_mcoords], dtype=np.float32)\n\n r_vibfreqs = lines[2+ no_atoms]\n vib_freqs = np.array([float(freq) for freq in r_vibfreqs.split()], dtype=np.float32)\n\n smiles = lines[3+no_atoms].split()\n inchi = lines[4+no_atoms].split()\n\n mol_properties['no_atoms'] = no_atoms\n mol_properties['mol_id'] = mol_id\n \n for i, p in enumerate(s_properties):\n mol_properties[p] = scalar_properties[i]\n\n mol_properties['symbols'] = symbols\n mol_properties['coords'] = coords\n mol_properties['charges'] = charges\n mol_properties['vib_freqs'] = vib_freqs\n mol_properties['smiles'] = smiles\n mol_properties['inchi'] = inchi\n \n return mol_properties", "def cartesian_structure(eur_dict):\n conversion_dict = {}\n for currency in config.CURRENCY_CODES:\n conversion_dict[currency] = {}\n if currency == 'EUR':\n conversion_dict[currency] = eur_dict\n continue\n for currency_nested in config.CURRENCY_CODES:\n if currency_nested == 'EUR':\n conversion_dict[currency][currency_nested] = round(1 / eur_dict[currency], config.ROUND_ACCURACY)\n elif currency_nested == currency:\n conversion_dict[currency][currency_nested] = 1.0\n else:\n conversion_dict[currency][currency_nested] = round(1 / eur_dict[currency] * eur_dict[currency_nested],\n config.ROUND_ACCURACY)\n return conversion_dict", "def test_xyz_from_data(self):\n symbols = ('C', 'H', 'H', 'H', 'H')\n isotopes = (12, 1, 1, 1, 1)\n coords = ((0.0, 0.0, 0.0),\n (0.6300326, 0.6300326, 0.6300326),\n (-0.6300326, -0.6300326, 0.6300326),\n (-0.6300326, 0.6300326, -0.6300326),\n (0.6300326, -0.6300326, -0.6300326))\n xyz_dict0 = converter.xyz_from_data(coords=coords, symbols=symbols, isotopes=isotopes)\n self.assertEqual(xyz_dict0, self.xyz1['dict'])\n xyz_dict1 = converter.xyz_from_data(coords=coords, symbols=symbols) # no specifying isotopes\n self.assertEqual(xyz_dict1, self.xyz1['dict'])\n\n numbers = [6, 1, 1, 1, 1]\n coords = [[0.0, 0.0, 0.0],\n [0.6300326, 0.6300326, 0.6300326],\n [-0.6300326, -0.6300326, 0.6300326],\n [-0.6300326, 0.6300326, -0.6300326],\n [0.6300326, -0.6300326, -0.6300326]]\n xyz_dict2 = converter.xyz_from_data(coords=coords, numbers=numbers)\n self.assertEqual(xyz_dict2, self.xyz1['dict'])\n\n numbers = [6, 1, 1, 1, 1]\n coords = [[0.0, 0.0, 0.0],\n [0.6300326, 0.6300326, 0.6300326],\n [-0.6300326, -0.6300326, 0.6300326],\n [-0.6300326, 0.6300326, -0.6300326],\n [0.6300326, -0.6300326, -0.6300326]]\n coords = np.array([np.array(coord, np.float64) for coord in coords], np.float64)\n xyz_dict2 = converter.xyz_from_data(coords=coords, numbers=numbers)\n self.assertEqual(xyz_dict2, self.xyz1['dict'])\n self.assertIsInstance(xyz_dict2['coords'], tuple)\n self.assertIsInstance(xyz_dict2['coords'][0], tuple)", "def get_multiple_coords(coord_fnames, edges_file=None, serial=False):\n # start with empty data structures\n coord_header = []\n coords = []\n edges = []\n\n # load predetermined edges if they were passed to us\n if not edges_file is None:\n edges = [ln.strip().split() for ln in open(edges_file,'U').readlines()]\n\n # load all coords files into same data matrix\n for i,f in enumerate(coord_fnames):\n try:\n coord_f = open(coord_fnames[i], 'U').readlines()\n except (TypeError, IOError):\n raise MissingFileError, 'Coord file required for this analysis'\n coord_header_i, coords_i, eigvals_i, pct_var_i = parse_coords(coord_f)\n sampleIDs = coord_header_i\n # append _i to this file's sampleIDs unless we have predetermined edges\n if edges_file is None:\n coord_header_i = ['%s_%d' %(h,i) for h in coord_header_i]\n\n # get eigvals, pct_var from first coords file\n if i==0:\n eigvals = eigvals_i\n pct_var = pct_var_i\n coord_header = coord_header_i\n coords = coords_i\n # for second, third, etc coords files, just append to first file\n else:\n coord_header.extend(coord_header_i)\n coords = vstack((coords,coords_i))\n # add all edges unless we have predetermined edges\n if edges_file is None:\n for _id in sampleIDs:\n if serial:\n for i in xrange(len(coord_fnames)-1):\n # edges go from one set to the next\n edges += [('%s_%d' %(_id,i), '%s_%d' %(_id,i+1))]\n else:\n for i in xrange(1,len(coord_fnames)):\n # edges go from first file's points to other files' points\n edges += [('%s_%d' %(_id,0), '%s_%d' %(_id,i))]\n\n return edges, [coord_header, coords, eigvals, pct_var, None, None]", "def get_cartesian_coordinates(station=None):\n\n coordinates = \\\n {\"GB OVLBI\": ( 884084.2636, -4924578.7481, 3943734.3354),\n \"DSS 12\": (-2350443.812, -4651980.837, +3665630.988),\n \"Echo\": (-2350443.812, -4651980.837, +3665630.988),\n \"DSS 13\": (-2351112.491, -4655530.714, +3660912.787),\n \"Venus\": (-2351112.491, -4655530.714, +3660912.787),\n \"DSS 14\": (-2353621.251, -4641341.542, +3677052.370),\n \"Mars\": (-2353621.251, -4641341.542, +3677052.370),\n \"DSS 15\": (-2353538.790, -4641649.507, +3676670.043), \\\n \"DSS 16\": (-2354763.158, -4646787.462, +3669387.069), \\\n \"DSS 17\": (-2354730.357, -4646751.776, +3669440.659), \\\n \"DSS 21\": (-2350000., -4700000. , +3700000. ), \\\n \"DSS 22\": (-2350000., -4700000. , +3700000. ), \\\n \"DSS 23\": (-2354757.567, -4646934.675, +3669207.824), \\\n \"DSS 24\": (-2354906.495, -4646840.128, +3669242.317), \\\n \"DSS 25\": (-2355022.066, -4646953.636, +3669040.895), \\\n \"DSS 26\": (-2354890.967, -4647166.925, +3668872.212), \\\n \"DSS 27\": (-2349915.260, -4656756.484, +3660096.529), \\\n \"DSS 28\": (-2350101.849, -4656673.447, +3660103.577), \\\n \"DSS 32\": (-1666531.345, +5209373.6709,-3270605.479), \\\n \"DSS 33\": (-4461083.514, +2682281.745, -3674570.392), \\\n \"DSS 34\": (-4461146.756, +2682439.293, -3674393.542),\n \"DSS 35\": (-4461273.084, +2682568.922, -3674152.089),\n \"DSS 36\": (-4461168.415, +2682814.657, -3674083.901),\n \"DSS 42\": (-4460981.016, +2682413.525, -3674582.072), \\\n \"DSS 43\": (-4460894.585, +2682361.554, -3674748.580), \\\n \"DSS 45\": (-4460935.250, +2682765.710, -3674381.402), \\\n \"DSS 46\": (-4460828.619, +2682129.556, -3674975.508), \\\n \"Parkes\": (-4554231.843, +2816758.983, -3454036.065), \\\n \"DSS 48\": (-4554231.843, +2816758.983, -3454036.065), \\\n \"DSS 53\": (+4849330.129, -360338.092, +4114758.766), \\\n \"DSS 54\": (+4849434.555, -360724.108, +4114618.643), \\\n \"DSS 55\": (+4849525.318, -360606.299, +4114494.905), \\\n \"DSS 61\": (+4849245.211, -360278.166, +4114884.445), \\\n \"DSS 62\": (+4846692.106, -370171.532, +4116842.926), \\\n \"DSS 63\": (+4849092.647, -360180.569, +4115109.113), \\\n \"DSS 65\": (+4849336.730, -360488.859, +4114748.775), \\\n \"DSS 66\": (+4849148.543, -360474.842, +4114995.021), \\\n \"MIL 71\": (0,0,0),\n \"DSS 74\": (0,0,0),\n \"DSS 83\": (0,0,0),\n \"DSS 84\": (0,0,0),\n \"DSS 95\": (0,0,0)}\n if type(station) == int:\n try:\n return coordinates[\"DSS %2d\" % station]\n except:\n module_logger.error(\"DSS %2d is not known\", station, exc_info=True)\n return None\n elif type(station) == str:\n try:\n return coordinates[station]\n except:\n module_logger.error(\"Invalid DSS ID: %s\", station, exc_info=True)\n return None\n else:\n return coordinates", "def HexahedralProjection(c1=(0,0,0), c2=(2,0,0), c3=(2,2,0), c4=(0,2,0.),\n c5=(0,1.8,3.), c6=(0.2,0,3.), c7=(2,0.2,3.), c8=(1.8,2,3.), points=None, npoints=6, equally_spaced=True):\n\n if points is None or not isinstance(points,np.ndarray):\n if not isinstance(c1,tuple) or not isinstance(c2,tuple) or not isinstance(c3,tuple) or not isinstance(c4,tuple) or \\\n not isinstance(c5,tuple) or not isinstance(c6,tuple) or not isinstance(c7,tuple) or not isinstance(c8,tuple):\n raise ValueError(\"coordinates should be given in tuples of two elements (x,y,z)\")\n else:\n c1 = np.array(c1); c2 = np.array(c2); c3 = np.array(c3); c4 = np.array(c4)\n c5 = np.array(c5); c6 = np.array(c6); c7 = np.array(c7); c8 = np.array(c8)\n opoints = np.vstack((c1,c2,c3,c4,c5,c6,c7,c8))\n else:\n opoints = points\n\n from Florence.FunctionSpace import Hex, HexES\n from Florence.QuadratureRules.GaussLobattoPoints import GaussLobattoPointsHex\n from Florence.QuadratureRules.EquallySpacedPoints import EquallySpacedPoints\n\n npoints = int(npoints)\n if npoints ==0: npoints=1\n\n if equally_spaced:\n points = EquallySpacedPoints(ndim=4,C=npoints-1)\n hpBases = HexES.Lagrange\n else:\n points = GaussLobattoPointsHex(npoints-1)\n hpBases = Hex.LagrangeGaussLobatto\n\n BasesHex = np.zeros((8,points.shape[0]),dtype=np.float64)\n for i in range(points.shape[0]):\n BasesHex[:,i] = hpBases(0,points[i,0],points[i,1],points[i,2],arrange=1)[:,0]\n\n node_arranger = NodeArrangementHex(npoints-1)[2]\n\n hmesh = Mesh()\n hmesh.Cube(lower_left_rear_point=(-1.,-1.,-1.), side_length=2, n=npoints, element_type=\"hex\")\n hexes = hmesh.elements\n\n # nnode = hmesh.nnode\n # nelem = hmesh.nelem\n # nsize = int((npoints+1)**3)\n\n mesh = Mesh()\n mesh.points = np.dot(BasesHex.T, opoints)\n\n _, inv = np.unique(hexes,return_inverse=True)\n sorter = np.argsort(node_arranger)\n mesh.elements = sorter[inv].reshape(hexes.shape)\n\n mesh.element_type=\"hex\"\n mesh.nelem = mesh.elements.shape[0]\n mesh.nnode = mesh.points.shape[0]\n mesh.GetBoundaryFaces()\n mesh.GetBoundaryEdges()\n\n return mesh", "def geodetic_to_grid(self, latitude, longitude):\n\n φ = math.radians(latitude)\n λ = math.radians(longitude)\n\n φ_star = φ - math.sin(φ) * math.cos(φ) * (self.A +\n self.B * math.sin(φ) ** 2 +\n self.C * math.sin(φ) ** 4 +\n self.D * math.sin(φ) ** 6)\n\n δλ = λ - self.λ0\n ξ_prim = math.atan(math.tan(φ_star) / math.cos(δλ))\n η_prim = math.atanh(math.cos(φ_star) * math.sin(δλ))\n\n x = self.k0 * self.â * (ξ_prim +\n self.β1 * math.sin(2 * ξ_prim) * math.cosh(2 * η_prim) +\n self.β2 * math.sin(4 * ξ_prim) * math.cosh(4 * η_prim) +\n self.β3 * math.sin(6 * ξ_prim) * math.cosh(6 * η_prim) +\n self.β4 * math.sin(8 * ξ_prim) * math.cosh(8 * η_prim)) + self.fn\n\n y = self.k0 * self.â * (η_prim +\n self.β1 * math.cos(2 * ξ_prim) * math.sinh(2 * η_prim) +\n self.β2 * math.cos(4 * ξ_prim) * math.sinh(4 * η_prim) +\n self.β3 * math.cos(6 * ξ_prim) * math.sinh(6 * η_prim) +\n self.β4 * math.cos(8 * ξ_prim) * math.sinh(8 * η_prim)) + self.fe\n\n return x, y", "def get_spec_points(spectre_file):\n observation_id = list(spectre_file['element_data.vol'].keys())[0]\n coords_dict = dict(spectre_file['element_data.vol'][observation_id])\n\n components = ['InertialCoordinates_x', 'InertialCoordinates_y', 'InertialCoordinates_z']\n dim = len(components)\n\n coords = [[], [], []]\n\n for i,component in enumerate(components):\n coords[i] = np.array(coords_dict[component])\n coords = np.asarray(coords)\n return np.transpose(coords)\n # return np.transpose(np.array([np.concatenate(x) for x in coords]))", "def cartesian_decoder(coord, r_E=6371):\n def _to_deg(rad):\n return rad * 180. / np.pi\n\n x, y, z = coord[:, 0], coord[:, 1], coord[:, 2]\n\n theta = np.arcsin(z / r_E)\n phi = np.arctan(y / x)\n\n # Convert to degrees. Longitudes, are bound between -90;90 in decode step, so correct in 3 and 4th quadrant of x-y plane (Asia)\n lat = _to_deg(theta)\n lon = _to_deg(phi) - 180 * ((x < 0) * (y < 0)) + 180 * ((x < 0) * (y > 0))\n\n return np.concatenate([lat.reshape(-1, 1), lon.reshape(-1, 1)], axis=1)", "def convert_coordinates(self, coordinates):\n return np.array(zip(*self.basemap(*zip(*coordinates))))", "def get_coordinates_genes(path: str = \"\", data_files: dict = {}):\n\n essential_coordinates = {}\n\n # Get position genes\n if \"gff3\" in data_files:\n file_path = os.path.join(path, data_files[\"gff3\"])\n gene_coordinates = gene_position(file_path)\n else:\n raise ValueError(\"gff3 type not found in data\")\n\n # Get all annotated essential genes\n if \"essential_genes\" in data_files:\n file_path = os.path.join(path, data_files[\"essentials\"])\n with open(file_path, \"r\") as f:\n genes = f.readlines()[1:]\n for gene in genes:\n name = gene.strip(\"\\n\")\n essential_coordinates[name] = gene_coordinates.get(name).copy()\n else:\n raise ValueError(\"essentials not found in data\")\n\n # Get aliases of all genes\n if \"gene_names\" in data_files:\n file_path = os.path.join(path, \"Yeast_Protein_Names.txt\")\n aliases_designation = gene_aliases(file_path)[0] #'YMR056C' \\ ['AAC1'], ...\n else:\n raise ValueError(\"gene_names not found in data\")\n\n return essential_coordinates, aliases_designation", "def get_2d_cartesian_grid(num_pts_1d, ranges):\n # from math_tools_cpp import cartesian_product_double as cartesian_product\n from PyDakota.math_tools import cartesian_product\n x1 = np.linspace(ranges[0], ranges[1], num_pts_1d)\n x2 = np.linspace(ranges[2], ranges[3], num_pts_1d)\n abscissa_1d = []\n abscissa_1d.append(x1)\n abscissa_1d.append(x2)\n grid = cartesian_product(abscissa_1d, 1)\n return grid", "def load_n3d_coords(file_path): \n \n import core.nuc_io as io\n\n seq_pos_dict = {}\n coords_dict = {} \n \n with io.open_file(file_path) as file_obj:\n chromo = None\n \n for line in file_obj:\n \n data = line.split()\n n_items = len(data)\n \n if not n_items:\n continue\n \n elif data[0] == '#':\n continue\n \n elif n_items == 3:\n chromo, n_coords, n_models = data\n \n #if chromo.lower()[:3] == 'chr':\n # chromo = chromo[3:]\n \n if chromo in coords_dict:\n raise Exception('Duplicate chromosome \"%s\" records in file %s' % (chromo, file_path))\n \n n_coords = int(n_coords)\n n_models = int(n_models)\n \n chromo_seq_pos = np.empty(n_coords, int)\n chromo_coords = np.empty((n_models, n_coords, 3), float)\n \n coords_dict[chromo] = chromo_coords\n seq_pos_dict[chromo] = chromo_seq_pos\n \n check = (n_models * 3) + 1\n i = 0\n \n elif not chromo:\n raise Exception('Missing chromosome record in file %s' % file_path)\n \n elif n_items != check:\n msg = 'Data size in file %s does not match Position + Models * Positions * 3'\n raise Exception(msg % file_path)\n \n else:\n chromo_seq_pos[i] = int(data[0])\n \n coord = [float(x) for x in data[1:]]\n coord = np.array(coord).reshape(n_models, 3)\n chromo_coords[:,i] = coord\n i += 1\n \n return seq_pos_dict, coords_dict", "def lon_lat_to_cartesian(lon, lat, R = 1):\n lon_r = np.radians(lon)\n lat_r = np.radians(lat)\n\n x = R * np.cos(lat_r) * np.cos(lon_r)\n y = R * np.cos(lat_r) * np.sin(lon_r)\n z = R * np.sin(lat_r)\n return x,y,z", "def make_ref_coords_nsamples(coords_dict, n_voxels_approx = 100000):\n min_coords = np.zeros((len(coords_dict), 3))\n max_coords = np.zeros((len(coords_dict), 3))\n index=0\n for i_data, coords in coords_dict.items():\n min_coords[index, :] = coords.min(axis=0)\n max_coords[index, :] = coords.max(axis=0)\n index += 1\n\n axis_arrays = {}\n min_gen = np.min(min_coords, axis=0)\n max_gen = np.max(max_coords, axis=0)\n volume = (max_gen[0] - min_gen[0]) * (max_gen[1] - min_gen[1]) * (max_gen[2] - min_gen[2])\n scaling_factor = np.cbrt(volume/n_voxels_approx) # because then V = N * scale_factor^3\n for i_dim in range(3):\n axis_arrays[i_dim] = np.arange(min_gen[i_dim], max_gen[i_dim] + scaling_factor, scaling_factor) # create axis array\n\n gridx, gridy, gridz = np.meshgrid(axis_arrays[0], axis_arrays[1], axis_arrays[2], indexing='ij')\n n_voxels = gridx.shape[0] * gridy.shape[1] * gridz.shape[2]\n grid = np.zeros((n_voxels, 3))\n grid[:, 0] = np.squeeze(gridx.reshape((n_voxels, 1))) # reshape to common format\n grid[:, 1] = np.squeeze(gridy.reshape((n_voxels, 1)))\n grid[:, 2] = np.squeeze(gridz.reshape((n_voxels, 1)))\n return grid", "def _write_coord(parameters):\n # Reorder elements\n if parameters[\"elements_order\"] is not None:\n order = parameters[\"elements_order\"]\n else:\n order = parameters[\"elements\"].keys()\n\n # Format\n fmt = block_to_format[\"COORD\"]\n fmt = str2format(fmt)\n\n out = []\n for k in order:\n values = parameters[\"elements\"][k][\"center\"]\n out += write_record(values, fmt)\n\n return out", "def _cal_grid_coordinates(self, nc_handle):\n print(\"calculating grid coordinates\")\n #\n x = np.zeros(self._grid[\"counts\"][0], dtype=float)\n y = np.zeros(self._grid[\"counts\"][1], dtype=float)\n z = np.zeros(self._grid[\"counts\"][2], dtype=float)\n \n for i in range(self._grid[\"counts\"][0]):\n x[i] = self._grid[\"origin\"][0] + i*self._grid[\"d0\"][0]\n\n for j in range(self._grid[\"counts\"][1]):\n y[j] = self._grid[\"origin\"][1] + j*self._grid[\"d1\"][1]\n\n for k in range(self._grid[\"counts\"][2]):\n z[k] = self._grid[\"origin\"][2] + k*self._grid[\"d2\"][2]\n\n self._set_grid_key_value(\"x\", x)\n self._set_grid_key_value(\"y\", y)\n self._set_grid_key_value(\"z\", z)\n\n for key in [\"x\", \"y\", \"z\"]:\n self._write_to_nc(nc_handle, key, self._grid[key])\n return None", "def get_electrode_positions():\n positions = dict()\n with io.open(\"electrode_positions.txt\", \"r\") as pos_file:\n for line in pos_file:\n parts = line.split()\n positions[parts[0]] = tuple([float(part) for part in parts[1:]])\n return positions", "def get_electrode_positions():\n positions = dict()\n with io.open(\"electrode_positions.txt\", \"r\") as pos_file:\n for line in pos_file:\n parts = line.split()\n positions[parts[0]] = tuple([float(part) for part in parts[1:]])\n return positions", "def _position_cartesian2cylindrical(pos):\n\n \n #save cartesian position of each particle\n x=pos[:,0]\n y=pos[:,1]\n z=pos[:,2]\n\n rho= np.sqrt(x**2+y**2)\n theta=np.arctan2(y,x)\n\n\n return np.dstack((rho,theta,z))[0]", "def _indices_to_coords(c,r):\n\n column = _index_to_column(c)\n row = r + 1\n\n return {'c': column, 'r': row, 'coord': f'{column}{row}'}", "def populate(self, compound_dict=None, x=None, y=None, z=None):\n if self.dimension == 3:\n a = self.lattice_spacings[0]\n b = self.lattice_spacings[1]\n c = self.lattice_spacings[2]\n if x is None:\n x = 1\n if y is None:\n y = 1\n if z is None:\n z = 1\n if x < 1 or y < 1 or z < 1:\n raise ValueError('Incorrect populate value: X, Y, or Z is < 1.'\n ' Cannot replicate unit cell less than 1')\n elif self.dimension == 2:\n a = self.lattice_spacings[0]\n b = self.lattice_spacings[1]\n if x is None:\n x = 1\n if y is None:\n y = 1\n if z is None:\n pass\n else:\n raise ValueError('Z is defined although dimension is 2D')\n if x < 1 or y < 1:\n raise ValueError('Incorrect populate value: X or Y is < 1. '\n ' Cannot replicate unit cell less than 1')\n elif self.dimension == 1:\n a = self.lattice_spacings[0]\n if x is None:\n x = 1\n if y is None:\n pass\n else:\n raise ValueError('Y is defined although dimension is 1D')\n if z is None:\n pass\n if z is not None:\n raise ValueError('Z is defined although dimension is 2D')\n if x < 1:\n raise ValueError('Incorrect populate value: X < 1. '\n ' Cannot replicate unit cell less than 1')\n else:\n raise ValueError('Dimension not defined.')\n\n cell = defaultdict(list)\n for key, val in self.basis_vectors.items():\n for val_item in range(len(val)):\n if self.dimension == 3:\n for i in range(x):\n for j in range(y):\n for k in range(z):\n tmpx = (val[val_item][0] + i) * a\n tmpy = (val[val_item][1] + j) * b\n tmpz = (val[val_item][2] + k) * c\n tmp_tuple = tuple((tmpx, tmpy, tmpz))\n cell[key].append(((tmp_tuple)))\n elif self.dimension == 2:\n for i in range(x):\n for j in range(y):\n tmpx = (val[val_item][0] + i) * a\n tmpy = (val[val_item][1] + j) * b\n tmp_tuple = tuple((tmpx, tmpy))\n cell[key].append(((tmp_tuple)))\n else:\n for i in range(x):\n tmpx = (val[val_item][0] + i) * a\n tmp_tuple = tuple((tmpx))\n cell[key].append(((tmp_tuple)))\n\n ret_lattice = mb.Compound()\n if compound_dict is None:\n for key_id, all_pos in cell.items():\n particle = mb.Particle(name=key_id, pos=[0, 0, 0])\n for pos in all_pos:\n particle_to_add = mb.clone(particle)\n mb.translate(particle_to_add, list(pos))\n ret_lattice.add(particle_to_add)\n else:\n for key_id, all_pos in cell.items():\n if isinstance(compound_dict[key_id], mb.Compound):\n compound_to_move = compound_dict[key_id]\n for pos in all_pos:\n tmp_comp = mb.clone(compound_to_move)\n mb.translate(tmp_comp, list(pos))\n ret_lattice.add(tmp_comp)\n else:\n err_type = type(compound_dict.get(key_id))\n TypeError('Invalid type in provided Compound Dictionary. '\n 'For key {}, type: {} was provided, '\n 'not Compound.'.format(key_id, err_type))\n return ret_lattice", "def convert_coord_data_to_dict(data):\r\n coord_header = data['coord'][0]\r\n coords = data['coord'][1]\r\n pct_var = data['coord'][3]\r\n coords_dict = {}\r\n pct_var_dict = {}\r\n coords_dict['pc vector number'] = coord_header\r\n for x in range(len(coords)):\r\n coords_dict[str(x + 1)] = coords[0:, x]\r\n pct_var_dict[str(x + 1)] = pct_var[x]\r\n\r\n return coords_dict, pct_var_dict", "def read_xyz(filename):\n\n config = {}\n\n with open(filename, 'r') as f:\n # number of atoms (spins)\n config['nat'] = int(re.findall('\\S+', f.readline())[0])\n\n # box parameters (type, dimension, shape, periodicity)\n sarr = re.findall('\\S+', f.readline())\n config['latt_type'] = sarr[0]\n dims = list(map(int, sarr[1:4]))\n config['latt_box'] = np.array(dims)\n config['box'] = np.diag(dims)\n config['pbc'] = list(map(int, sarr[4:7]))\n if len(sarr) > 7:\n dim_intra = len(sarr) - 7\n\n atom_types = []\n xyz = []\n config['latt_i'] = np.zeros(dims, dtype=int)\n config['latt_atoms'] = np.zeros(dims, dtype=int)\n config['latt_intra'] = np.zeros(tuple(dims) + (dim_intra,), dtype='float64')\n for i in range(config['nat']):\n sarr = re.findall('\\S+', f.readline())\n t = int(sarr[0])\n r = tuple(map(int, sarr[1:4]))\n\n atom_types.append(t)\n xyz.append(r)\n\n config['latt_i'][r] = i\n config['latt_atoms'][r] = t\n\n for j in range(dim_intra):\n ci = float(sarr[4 + j])\n config['latt_intra'][r[0], r[1], r[2], j] = ci\n\n config['atom_types'] = np.array(atom_types)\n config['xyz'] = np.array(xyz)\n \n return config", "def grid_mapping_to_coords(\n grid_mapping: GridMapping,\n xy_var_names: Tuple[str, str] = None,\n xy_dim_names: Tuple[str, str] = None,\n reuse_coords: bool = False,\n exclude_bounds: bool = False,\n) -> Dict[str, xr.DataArray]:\n\n if xy_var_names:\n _assert_valid_xy_names(xy_var_names, name='xy_var_names')\n if xy_dim_names:\n _assert_valid_xy_names(xy_dim_names, name='xy_dim_names')\n\n if reuse_coords:\n try:\n # noinspection PyUnresolvedReferences\n x, y = grid_mapping.x_coords, grid_mapping.y_coords\n except AttributeError:\n x, y = None, None\n if isinstance(x, xr.DataArray) \\\n and isinstance(y, xr.DataArray) \\\n and x.ndim == 1 \\\n and y.ndim == 1 \\\n and x.size == grid_mapping.width \\\n and y.size == grid_mapping.height:\n return {\n name: xr.DataArray(coord.values,\n dims=dim,\n attrs=coord.attrs)\n for name, dim, coord in zip(xy_var_names,\n xy_dim_names,\n (x, y))\n }\n\n x_name, y_name = xy_var_names or grid_mapping.xy_var_names\n x_dim_name, y_dim_name = xy_dim_names or grid_mapping.xy_dim_names\n w, h = grid_mapping.size\n x1, y1, x2, y2 = grid_mapping.xy_bbox\n x_res, y_res = grid_mapping.xy_res\n x_res_05 = x_res / 2\n y_res_05 = y_res / 2\n\n dtype = np.float64\n\n x_data = np.linspace(x1 + x_res_05, x2 - x_res_05, w, dtype=dtype)\n if grid_mapping.is_lon_360:\n x_data = from_lon_360(x_data)\n\n if grid_mapping.is_j_axis_up:\n y_data = np.linspace(y1 + y_res_05, y2 - y_res_05, h, dtype=dtype)\n else:\n y_data = np.linspace(y2 - y_res_05, y1 + y_res_05, h, dtype=dtype)\n\n if grid_mapping.crs.is_geographic:\n x_attrs = dict(\n long_name='longitude coordinate',\n standard_name='longitude',\n units='degrees_east'\n )\n y_attrs = dict(\n long_name='latitude coordinate',\n standard_name='latitude',\n units='degrees_north'\n )\n else:\n x_attrs = dict(\n long_name=\"x coordinate of projection\",\n standard_name=\"projection_x_coordinate\"\n )\n y_attrs = dict(\n long_name=\"y coordinate of projection\",\n standard_name=\"projection_y_coordinate\"\n )\n\n x_coords = xr.DataArray(x_data, dims=x_dim_name, attrs=x_attrs)\n y_coords = xr.DataArray(y_data, dims=y_dim_name, attrs=y_attrs)\n coords = {\n x_name: x_coords,\n y_name: y_coords,\n }\n if not exclude_bounds:\n x_bnds_0_data = np.linspace(x1, x2 - x_res, w, dtype=dtype)\n x_bnds_1_data = np.linspace(x1 + x_res, x2, w, dtype=dtype)\n\n if grid_mapping.is_lon_360:\n x_bnds_0_data = from_lon_360(x_bnds_0_data)\n x_bnds_1_data = from_lon_360(x_bnds_1_data)\n\n if grid_mapping.is_j_axis_up:\n y_bnds_0_data = np.linspace(y1, y2 - y_res, h, dtype=dtype)\n y_bnds_1_data = np.linspace(y1 + y_res, y2, h, dtype=dtype)\n else:\n y_bnds_0_data = np.linspace(y2, y1 + y_res, h, dtype=dtype)\n y_bnds_1_data = np.linspace(y2 - y_res, y1, h, dtype=dtype)\n\n bnds_dim_name = 'bnds'\n x_bnds_name = f'{x_name}_{bnds_dim_name}'\n y_bnds_name = f'{y_name}_{bnds_dim_name}'\n # Note, according to CF, bounds variables are not required to have\n # any attributes, so we don't pass any.\n x_bnds_coords = xr.DataArray(list(zip(x_bnds_0_data, x_bnds_1_data)),\n dims=[x_dim_name, bnds_dim_name])\n y_bnds_coords = xr.DataArray(list(zip(y_bnds_0_data, y_bnds_1_data)),\n dims=[y_dim_name, bnds_dim_name])\n x_coords.attrs.update(bounds=x_bnds_name)\n y_coords.attrs.update(bounds=y_bnds_name)\n coords.update({\n x_bnds_name: x_bnds_coords,\n y_bnds_name: y_bnds_coords,\n })\n\n return coords", "def _spherical_to_cartesian(ra, dec):\n rar = np.radians(ra)\n decr = np.radians(dec)\n\n x = np.cos(rar) * np.cos(decr)\n y = np.sin(rar) * np.cos(decr)\n z = np.sin(decr)\n \n return x, y, z", "def _position_cylindrical2cartesian(pos):\n \n rho=pos[:,0]\n theta=pos[:,1]\n z=pos[:,2]\n\n x=rho*np.cos(theta)\n y=rho*np.sin(theta)\n z=z\n\n return np.dstack((x,y,z))[0]", "def cart2frac(lattice, ccoords):\n det3 = np.linalg.det\n\n latt_tr = np.transpose(lattice)\n\n fcoords = []\n det_latt_tr = np.linalg.det(latt_tr)\n for i in ccoords:\n a = (det3([[i[0], latt_tr[0][1], latt_tr[0][2]], [\n i[1], latt_tr[1][1], latt_tr[1][2]\n ], [i[2], latt_tr[2][1], latt_tr[2][2]]])) / det_latt_tr\n b = (det3([[latt_tr[0][0], i[0], latt_tr[0][2]], [\n latt_tr[1][0], i[1], latt_tr[1][2]\n ], [latt_tr[2][0], i[2], latt_tr[2][2]]])) / det_latt_tr\n c = (det3([[latt_tr[0][0], latt_tr[0][1], i[0]], [\n latt_tr[1][0], latt_tr[1][1], i[1]\n ], [latt_tr[2][0], latt_tr[2][1], i[2]]])) / det_latt_tr\n fcoords.append([a, b, c])\n return fcoords", "def gcoord_split(args):\n if not os.path.isfile(args.coord):\n raise IOError('Input coordinate NIFTI file %s not found' % args.coord)\n if args.base is None and args.radial is None and args.sulcal is None and args.gyral is None:\n raise ValueError(\"No output files set\")\n\n img = nibabel.load(args.coord)\n coord = img.get_data()\n\n for idx, name in enumerate(('radial', 'sulcal', 'gyral')):\n if getattr(args, name, None) is not None:\n filename = getattr(args, name)\n elif args.base is not None:\n filename = args.base + '_%s.nii.gz' % name\n else:\n continue\n nibabel.Nifti1Image(coord[..., idx], img.affine).to_filename(filename)", "def _position_spherical2cartesian(pos):\n \n r=pos[:,0]\n theta=pos[:,1]\n phi=pos[:,2]\n\n if any(theta>np.pi) or any(theta<0): #sanity check. not necessary for phi.\n raise ValueError, \"Theta beyond [0,pi]. Exiting.\"\n\n\n x=r*np.sin(theta)*np.cos(phi)\n y=r*np.sin(theta)*np.sin(phi)\n z=r*np.cos(theta)\n\n return np.dstack((x,y,z))[0]", "def obs_ijpos(gridfile,lons,lats,coor):\n\n gfh= netCDF4.Dataset(gridfile)\n cartesian=0\n if (coor=='r'):\n try:\n \n latr=gfh.variables['lat_rho'][:,:]\n lonr=gfh.variables['lon_rho'][:,:]\n except:\n latr=gfh.variables['latitude'][:,:]\n lonr=gfh.variables['longitude'][:,:]\n \n\n try:\n xr=gfh.variables['xi_rho'][:]\n yr=gfh.variables['eta_rho'][:]\n except:\n try:\n xr=gfh.variables['x_rho'][:]\n yr=gfh.variables['y_rho'][:]\n except:\n print('Neither xi_rho/eta_rho or x_rho/y_rho on file.')\n print('This might slow down the calculations')\n\n\n elif (coor=='u'):\n latr=gfh.variables['lat_u'][:,:]\n lonr=gfh.variables['lon_u'][:,:]\n try:\n xr=gfh.variables['xi_u'][:]\n yr=gfh.variables['eta_u'][:]\n except:\n xr=gfh.variables['x_u'][:]\n yr=gfh.variables['y_u'][:]\n elif (coor=='v'):\n latr=gfh.variables['lat_v'][:,:]\n lonr=gfh.variables['lon_v'][:,:]\n try:\n xr=gfh.variables['xi_v'][:]\n yr=gfh.variables['eta_v'][:]\n except:\n xr=gfh.variables['x_v'][:]\n yr=gfh.variables['y_v'][:]\n\n IN = point_in_polygon(lonr, latr, lons, lats)\n ind=np.where(IN)[0]\n \n if lats.size >1: \n lons=lons[ind]; lats=lats[ind]\n # If there's no lons, lats left at this stage, return oipos, ojpos with -999 everywhere\n if not len(lons):\n return np.ones_like(IN)*-999, np.ones_like(IN)*-999\n \n try:\n try:\n mapstr=str(gfh.variables['h'].getncattr('mapping'))\n except:\n try:\n mapstr=str(gfh.variables['h'].getncattr('grid_mapping'))\n except:\n pass\n try:\n projstring=(gfh.variables[mapstr]).getncattr('proj4')\n except:\n try:\n projstring=(gfh.variables[mapstr]).getncattr('proj4string')\n except:\n pass\n try:\n projstring=(gfh.variables['grid_mapping']).getncattr('proj4')\n except:\n try:\n projstring=(gfh.variables['grid_mapping']).getncattr('proj4string')\n except:\n pass\n\n gridproj=proj.Proj(str(projstring))\n hasproj=1\n except:\n hasproj=0\n\n # Check if lat, lon spacing is uniform\n dx1=np.abs(lonr[0,1]-lonr[0,0])\n dx2=np.abs(lonr[0,-1]-lonr[0,-2])\n n=int(np.round(lonr.shape[1]/2))\n dx3=np.abs(lonr[0,n]-lonr[0,n-1])\n\n dy1=np.abs(latr[1,0]-latr[0,0])\n dy2=np.abs(latr[-1,0]-latr[-2,0])\n n=int(np.round(latr.shape[0]/2))\n dy3=np.abs(latr[n,0]-latr[n-1,0])\n\n if ( (dx1 == dx2) & (dx1==dx3) & (dx2==dx3) & (dy1 == dy2) & (dy1==dy3) & (dy2==dy3) ):\n cartesian=1\n gridproj=proj.Proj(\"+proj=latlong +datum=WGS84\")\n \n\n \n if hasproj:\n dx=xr[1]-xr[0]\n dy=yr[1]-yr[0]\n [x,y]=gridproj(lons,lats)\n ipos=(x-xr[0])/dx\n jpos=(y-yr[0])/dy\n\n elif cartesian:\n [x1,y1]=gridproj(lonr[0,0],latr[0,0])\n [x2,y2]=gridproj(lonr[0,1],latr[0,1])\n dx=x2-x1\n [x2,y2]=gridproj(lonr[1,0],latr[1,0])\n dy=y2-y1\n [x,y]=gridproj(lons,lats)\n [x0,y0]=gridproj(lonr[0,0],latr[0,0])\n\n ipos=(x-x0)/dx\n jpos=(y-y0)/dy\n\n else:\n x=np.linspace(0,lonr.shape[1]-1,lonr.shape[1])\n y=np.linspace(0,lonr.shape[0]-1,lonr.shape[0])\n xi=np.zeros_like(lonr); yi=np.zeros([lonr.shape[1],lonr.shape[0]])\n xi[:,:]=x; yi[:,:]=y; yi=np.swapaxes(yi,1,0)\n zi=scipy.interpolate.griddata((lonr.flatten(),latr.flatten()),xi.flatten(),(lons,lats))\n ipos=zi\n zi=scipy.interpolate.griddata((lonr.flatten(),latr.flatten()),yi.flatten(),(lons,lats))\n jpos=zi\n \n if 'ind' in locals():\n oipos=np.ones(IN.shape)*-999.; ojpos=np.ones(IN.shape)*-999.\n oipos[ind]=ipos; ojpos[ind]=jpos\n else:\n oipos=ipos\n ojpos=jpos\n if not IN:\n oipos = np.array([-999.])\n ojpos = np.array([-999.])\n gfh.close()\n return oipos,ojpos", "def convertmany(self, *args, **kwargs):\n return _coordsys.coordsys_convertmany(self, *args, **kwargs)", "def proj_coords(coords, proj_in, proj_out): \n return [proj_coord(coord, proj_in, proj_out) for coord in coords]", "def _calc_coords(self, X, Y, Z):\r\n def _write_coords(coord):\r\n XX.append(X[coord])\r\n YY.append(Y[coord])\r\n ZZ.append(Z[coord])\r\n\r\n def _build_layer():\r\n for j in range(self.size[1]):\r\n for i in range(self.size[0]):\r\n # write NW corner\r\n if i == 0:\r\n nwCoord = 2 * i + 4 * self.size[0] * j + const\r\n _write_coords(nwCoord)\r\n # write NE corner\r\n neCoord = 2 * i + 4 * self.size[0] * j + const + 1\r\n _write_coords(neCoord)\r\n if j == self.size[1] - 1:\r\n for i in range(self.size[0]):\r\n # write SW corner\r\n if i == 0:\r\n swCoord = 2 * i + 4 * self.size[0] * j + 2 * self.size[0] + const\r\n _write_coords(swCoord)\r\n # write SE corner\r\n seCoord = 2 * i + 4 * self.size[0] * j + 2 * self.size[0] + const + 1\r\n _write_coords(seCoord)\r\n\r\n # At this point, we have all points needed for unstructured grid in X,Y,Z\r\n # However, they must be re-arranged so we can define Hexahedrons\r\n # TODO: REFINE CELLS\r\n # PSUEDO:\r\n # find cell to be refined\r\n # add new cells (as easy as pie)\r\n\r\n XX, YY, ZZ = ([] for i in range(3))\r\n const = 0\r\n for k in range(self.size[2]):\r\n _build_layer()\r\n if k == self.size[2] - 1:\r\n const += self.size[0] * self.size[1] * 4\r\n _build_layer()\r\n break\r\n else:\r\n const += self.size[0] * self.size[1] * 8\r\n return XX, YY, ZZ", "def createCoordTuples(data):\n data['xy'] = None\n for i, row in data.iterrows():\n data['xy'][i] = [np.round(row['geometry'].x, decimals=5), np.round(row['geometry'].y, decimals=5)]\n return data", "def cartesian(position):\n return [position[0] * cos(position[1]), position[0] * sin(position[1])]", "def set_fractional_coordinates(self, coords):\n self.fractional_coordinates = coords\n self.xyz_coordinates = self.convert_abc_to_xyz(coords)\n self.nions = len(coords)\n return", "def SphericalToCartesian(Spherical):\n\n # r,theta,phi -> x,y,z\n r = Spherical[:,0]\n st = np.sin(Spherical[:,1])\n sp = np.sin(Spherical[:,2])\n ct = np.cos(Spherical[:,1])\n cp = np.cos(Spherical[:,2])\n x = r*st*cp\n y = r*st*sp\n z = r*ct\n\n if (len(Spherical[0,:])==3):\n Cartesian = np.column_stack((x,y,z))\n return Cartesian\n else:\n # vr,vtheta,vphi -> vx,vy,vz\n vr = Spherical[:,3]\n vt = Spherical[:,4]\n vp = Spherical[:,5]\n vx = vr*st*cp - vt*ct*cp - vp*sp\n vy = vr*st*sp + vt*ct*sp + vp*cp\n vz = vr*ct - vt*st\n Cartesian= np.column_stack((x,y,z,vx,vy,vz))\n return Cartesian", "def _geodetic_to_cartesian(cls, lat, lon, alt):\n C = Earth.r / np.sqrt(1 - (Earth.e * np.sin(lat)) ** 2)\n S = Earth.r * (1 - Earth.e ** 2) / np.sqrt(1 - (Earth.e * np.sin(lat)) ** 2)\n r_d = (C + alt) * np.cos(lat)\n r_k = (S + alt) * np.sin(lat)\n\n norm = np.sqrt(r_d ** 2 + r_k ** 2)\n return norm * np.array(\n [np.cos(lat) * np.cos(lon), np.cos(lat) * np.sin(lon), np.sin(lat)]\n )", "def spherical2cartesian(phi, theta, depth):\n x = depth * np.sin(theta) * np.cos(phi)\n y = depth * np.cos(theta)\n z = depth * np.sin(theta) * np.sin(phi)\n\n return x, y, z", "def cartesianToSpherical(x=0, y=0, z=0):\n\n hxy = np.hypot(x, y)\n radius = np.hypot(hxy, z)\n altitude = np.arctan2(z, hxy)\n azimuth = np.arctan2(y, x)\n return altitude, azimuth, radius", "def populate(num_points):\r\n superscript = str.maketrans(\"0123456789\", \"⁰¹²³⁴⁵⁶⁷⁸⁹\")\r\n highest_deg = int(math.sqrt(num_points))\r\n x_terms = []\r\n y_terms = []\r\n for i in range(highest_deg):\r\n x_terms.append(f'(x{i})'.translate(superscript))\r\n y_terms.append(f'(y{i})'.translate(superscript))\r\n return [x_terms, y_terms]", "def test_xyz_to_coords_and_element_numbers(self):\n coords, atom_nums = converter.xyz_to_coords_and_element_numbers(self.xyz1['dict'])\n self.assertEqual(coords,\n [[0.0, 0.0, 0.0],\n [0.6300326, 0.6300326, 0.6300326],\n [-0.6300326, -0.6300326, 0.6300326],\n [-0.6300326, 0.6300326, -0.6300326],\n [0.6300326, -0.6300326, -0.6300326]])\n self.assertEqual(atom_nums, [6, 1, 1, 1, 1])", "def get_cartesian_coord(lat, lon, h):\n a = 6378137.0\n rf = 298.257223563\n lat_rad = radians(lat)\n lon_rad = radians(lon)\n N = sqrt(a / (1 - (1 - (1 - 1 / rf) ** 2) * (sin(lat_rad)) ** 2))\n X = (N + h) * cos(lat_rad) * cos(lon_rad)\n Y = (N + h) * cos(lat_rad) * sin(lon_rad)\n Z = ((1 - 1 / rf) ** 2 * N + h) * sin(lat_rad)\n return X, Y, Z", "def pristine_coords_to_objects(list_of_coords):\n list_of_objects = []\n for element in range(len(list_of_coords)):\n list_of_objects.append(Atom(element, \"CX\", \"GGG\", element, list_of_coords[element][0], list_of_coords[element][1], list_of_coords[element][2]))\n return list_of_objects", "def read_coordinate_file(file):\n with open(file, 'r') as file1:\n coords = []\n\n for line in file1:\n line = line.strip('{} \\n')\n (a, b) = line.split(\",\")\n ''' \n x and y are expressed as latitude and longitude. These are converted with the Mercator projection (from Computer assignment 1)\n into x and y coordinates.\n '''\n coord = [(float(b)*m.pi/180), (m.log((m.tan(m.pi/4+m.pi*float(a)/360))))]\n coords.append(coord)\n return np.array(coords)", "def save_n3d_coords(file_path, coords_dict, seq_pos_dict): \n \n file_obj = open(file_path, 'w')\n write = file_obj.write\n \n for chromo in seq_pos_dict:\n chromo_coords = coords_dict[chromo]\n chromo_seq_pos = seq_pos_dict[chromo]\n \n num_models = len(chromo_coords)\n num_coords = len(chromo_seq_pos)\n \n if chromo[:3].lower() != 'chr':\n chromo_name = 'chr' + chromo\n else:\n chromo_name = chromo\n \n line = '%s\\t%d\\t%d\\n' % (chromo_name, num_coords, num_models)\n write(line)\n \n for j in range(num_coords):\n data = chromo_coords[:,j].ravel().tolist()\n data = '\\t'.join('%.8f' % d for d in data)\n \n line = '%d\\t%s\\n' % (chromo_seq_pos[j], data)\n write(line)\n\n file_obj.close()", "def gene_coords_by_name(probes, names):\n names = list(filter(None, set(names)))\n if not names:\n return {}\n\n # Create an index of gene names\n gene_index = collections.defaultdict(set)\n for i, gene in enumerate(probes[\"gene\"]):\n for gene_name in gene.split(\",\"):\n if gene_name in names:\n gene_index[gene_name].add(i)\n # Retrieve coordinates by name\n all_coords = collections.defaultdict(lambda: collections.defaultdict(set))\n for name in names:\n gene_probes = probes.data.take(sorted(gene_index.get(name, [])))\n if not len(gene_probes):\n raise ValueError(f\"No targeted gene named {name!r} found\")\n # Find the genomic range of this gene's probes\n start = gene_probes[\"start\"].min()\n end = gene_probes[\"end\"].max()\n chrom = core.check_unique(gene_probes[\"chromosome\"], name)\n # Deduce the unique set of gene names for this region\n uniq_names = set()\n for oname in set(gene_probes[\"gene\"]):\n uniq_names.update(oname.split(\",\"))\n all_coords[chrom][start, end].update(uniq_names)\n # Consolidate each region's gene names into a string\n uniq_coords = {}\n for chrom, hits in all_coords.items():\n uniq_coords[chrom] = [\n (start, end, \",\".join(sorted(gene_names)))\n for (start, end), gene_names in hits.items()\n ]\n return uniq_coords", "def load_store(filename):\n result = {}\n # Open file\n with open(filename, 'r') as file:\n # Read first character\n char = file.read(1)\n while char:\n # ; defines a new point\n if char == \";\":\n # The next characters are of the form (x,y,e)\n char = file.read(1) # left bracket\n\n char = file.read(1) # x\n x = char\n char = file.read(1) # comma or second digit\n\n # This means x is a two digit number\n if char != ',':\n # Add the second digit and then cast\n x += char\n x = int(x)\n char = file.read(1) # Now read the comma\n else:\n # One digit number so just cast\n print(char)\n x = int(x)\n \n # Follow a similar process for y and e\n char = file.read(1) # y\n\n y = char\n char = file.read(1) # comma or second digit\n if char != ',':\n y += char\n y = int(y)\n char = file.read(1)\n else:\n y = int(y)\n\n char = file.read(1) # encoded product\n e = char\n char = file.read(1)\n if char != ')':\n e += char\n e = int(e)\n char = file.read(1)\n else:\n e = int(e)\n \n # Add to the dictionary\n coords = (x,y)\n result[(x,y)] = e\n\n char = file.read(1)\n return result", "def read_coordinates(path='', sort=True):\n maspointer = open(path + 'xd.mas', 'r')\n respointer = open(path + 'xd.res', 'r')\n\n positions = {}\n keylist = [] #Needed to keep the atomlist order. This is important for the frequency read function.\n for line in maspointer.readlines():\n if 'CELL ' in line:\n cell = [float(i) for i in line.split(\" \") if '.' in i]\n break\n for line in respointer.readlines():\n if '(' in line and not '!' in line:\n coords = [float(i) for i in line.split(\" \") if '.' in i]\n coords = coords[:-1]\n key = line.split(\" \")[0]\n keylist.append(key)\n positions[key] = coords\n if sort:\n sortkeylist = []\n for i in xrange(len(keylist)):\n j = i + 1\n for key in keylist:\n number = get_number(key)\n if j == int(number):\n sortkeylist.append(key)\n else:\n sortkeylist = keylist\n return cell, positions, sortkeylist", "def th_map_coordinates(input, coords, order=1):\r\n\r\n assert order == 1\r\n input_size = input.size(0)\r\n\r\n coords = torch.clamp(coords, 0, input_size - 1)\r\n coords_lt = coords.floor().long()\r\n coords_rb = coords.ceil().long()\r\n coords_lb = torch.stack([coords_lt[:, 0], coords_rb[:, 1]], 1)\r\n coords_rt = torch.stack([coords_rb[:, 0], coords_lt[:, 1]], 1)\r\n\r\n vals_lt = th_gather_2d(input, coords_lt.detach())\r\n vals_rb = th_gather_2d(input, coords_rb.detach())\r\n vals_lb = th_gather_2d(input, coords_lb.detach())\r\n vals_rt = th_gather_2d(input, coords_rt.detach())\r\n\r\n coords_offset_lt = coords - coords_lt.type(coords.data.type())\r\n\r\n vals_t = vals_lt + (vals_rt - vals_lt) * coords_offset_lt[:, 0]\r\n vals_b = vals_lb + (vals_rb - vals_lb) * coords_offset_lt[:, 0]\r\n mapped_vals = vals_t + (vals_b - vals_t) * coords_offset_lt[:, 1]\r\n return mapped_vals", "def convert(gpsfile):\n coordinates = []\n lst = []\n for line in gpsfile:\n if line.startswith('$GPGGA'):\n # get time, fix signal and dilution of precision\n arr = line.split(',')\n data = [arr[2], arr[6], arr[8]]\n lst.append(data)\n\n elif line.startswith('lng'):\n # get longitude, latitude, altitude, speed, angle\n arr = line.split(',')\n lng = arr[0].split('=')\n lng = lng[1]\n lat = arr[1].split('=')\n lat = lat[1]\n alt = arr[2].split('=')\n alt = alt[1]\n speed = arr[3].split('=')\n speed = speed[1]\n ang = arr[5].split('=')\n ang = ang[1]\n lst.append([float(lng), float(lat), float(alt), float(speed), float(ang)])\n\n # check if a GPGGA line was found, otherwise don't add this point\n if len(lst) == 2:\n coordinates.append(lst)\n lst = []\n\n return coordinates", "def sphericalToCartesian(altitude=0, azimuth=0, radius=0):\n\n rcos_theta = radius * np.cos(altitude)\n x = rcos_theta * np.cos(azimuth)\n y = rcos_theta * np.sin(azimuth)\n z = radius * np.sin(altitude)\n return x, y, z", "def griddata_CS(coords,values,grid):\r\n\tcoords = np.array(coords)\r\n\tvertices,weights = interp_weights(coords, grid)\r\n\tg_data = interpolate(values, vertices, weights)\r\n\treturn g_data", "def line_to_DGGS(line_coords, resolution): # one poly and the attribute record for it\n doneDGGScells = [] #to accumlate a list of completed cells\n arrLines = []\n for pt in line_coords: # for each point calculate the DGGS by calling on the DGGS engine\n # ask the engine what cell thisPoint is in\n thisDGGS = rdggs.cell_from_point(resolution, pt, plane=False)# plane=false therefore on the ellipsoid curve\n #add cell if not already in there\n if thisDGGS not in doneDGGScells: # == new one\n doneDGGScells.append(thisDGGS) # save as a done cell\n return doneDGGScells", "def grid_spherical_decomposed(x, y, z, data, x_i, y_i, z_i, horz_res, missing_value=-32767):\n\n r_map = np.sqrt(x**2.0 + y**2.0) # cartesian radius from map (x,y) center\n az_map = np.arctan2(y,x) #azimuth in the cartesian system. might vary along a ray due to map projection curvature\n vcp = np.fromiter((np.median(az_map[:, i_az, :]) for i_az in range(az_map.shape[1])), np.float32)\n print x.shape\n \n r_i = np.arange(r_map.min(), r_map.max(), horz_res) # cartesian radius from map(x,y) center\n\n # also need to griddata the x, y, z geographic coordinates.\n # decomposed geometry in radar polar coordinates is a not a\n # geophysical coordinate system (it's really a tangent plane\n # coord sys without beam refraction effects), so really there \n # are two xyz systems in play here.\n\n # unless, if by using z and R = np.sqrt(x**2.0 + y**2.0), we remain in a cylinderical \n # system referenced to the map projection in use. I think this is true.\n\n # Interpolate from spherical to cylindrical.\n # Cylindrical system is a different\n # range coordinate than the radar range coordinate.\n az_idx = 1\n cyl_grid_shape = (r_i.shape[0], x.shape[az_idx], z_i.shape[0])\n cyl_grid = np.empty(cyl_grid_shape)\n \n for az_id in range(cyl_grid_shape[az_idx]):\n progress(az_id, cyl_grid_shape[az_idx], 'Gridding along azimuths')\n rhi_r = r_map[:, az_id, :]\n # rhi_y = y[:, az_id, :]\n # R_i = rhir = np.sqrt(x[:, az_id, :]**2.0 + y[:, az_id, :]**2.0)\n rhi_z = z[:, az_id, :]\n rhi_data = data[:, az_id, :]\n \n # input and output coordinates need to be taken from the same coordinate system\n cyl_grid[:, az_id, :] = griddata(rhi_r.flatten(), rhi_z.flatten(), rhi_data.flatten(), r_i, z_i).T\n print \"\\r\" + 'Gridding along azimuths ... done'\n # cyl_grid is r, az, z instead of r, az, el\n \n # get mesh of coordinates for all interpolated radii r_i and along the azimuth\n # since constant radar azimuth might have curvature induced by the map projection\n # it's tricky to do this.\n\n # steps:\n # Do new transform from r,az radar system to map system using r=r_i to get x,y\n # or \n # Just do naive assumption that azimuths are straight and accept the error (used this one)\n \n # interpolate from cylindrical to cartesian.\n grid = np.empty((len(x_i), len(y_i), len(z_i)), dtype=np.float32)\n for z_id in range(z_i.shape[0]):\n progress(z_id, z_i.shape[0], 'Gridding at constant altitude')\n cappi_x = r_i[:, None]*np.cos(vcp[None, :])\n cappi_y = r_i[:, None]*np.sin(vcp[None, :])\n cappi_data = cyl_grid[:,:,z_id]\n \n # input and output coordinates need to be taken from the same coordinate system\n grid_2d = griddata(cappi_x.flatten(), cappi_y.flatten(), cappi_data.flatten(), x_i, y_i).T\n grid[:, :, z_id] = grid_2d\n print \"\\r\" + 'Gridding at constant altitude ... done'\n \n grid[np.isnan(grid)] = missing_value\n \n return grid", "def gene_coords_by_range(probes, chrom, start, end, ignore=params.IGNORE_GENE_NAMES):\n ignore += params.ANTITARGET_ALIASES\n # Tabulate the genes in the selected region\n genes = collections.OrderedDict()\n for row in probes.in_range(chrom, start, end):\n name = str(row.gene)\n if name in genes:\n genes[name][1] = row.end\n elif name not in ignore:\n genes[name] = [row.start, row.end]\n # Reorganize the data structure\n return {\n chrom: [(gstart, gend, name) for name, (gstart, gend) in list(genes.items())]\n }", "def coords(self, coords={}):\n # type: (dict) -> Entity\n if not coords:\n return\n for c in ['x', 'y']:\n if c in coords:\n self.type_def[c] = int(coords[c])\n\n return self", "def cartesian2cylindrical(coords):\n cyl = np.zeros(coords.shape)\n cyl[:, 0] = np.sqrt(coords[:, 0] ** 2 + coords[:, 1] ** 2)\n cyl[:, 1] = np.arctan2(coords[:, 1], coords[:, 0])\n cyl[:, 2] = coords[:, 2]\n return cyl", "def add_cartesian_coordinates(self):\n return _add_cartesian_coordinates(self.data)", "def update_coords(self, cartesian_coords, polar_cords):\n\n self.cartesian_coords = cartesian_coords\n self.polar_coords = polar_cords\n\n self.db_upsert(force_insert=True)", "def QuadrilateralProjection(c1=(0,0), c2=(2,0), c3=(2,2), c4=(0,2), points=None, npoints=10, equally_spaced=True):\n\n if points is None or not isinstance(points,np.ndarray):\n if not isinstance(c1,tuple) or not isinstance(c2,tuple) or not isinstance(c3,tuple) or not isinstance(c4,tuple):\n raise ValueError(\"coordinates should be given in tuples of two elements (x,y)\")\n else:\n c1 = np.array(c1); c2 = np.array(c2); c3 = np.array(c3); c4 = np.array(c4)\n opoints = np.vstack((c1,c2,c3,c4))\n else:\n opoints = points\n\n from Florence.FunctionSpace import Quad, QuadES\n from Florence.QuadratureRules.GaussLobattoPoints import GaussLobattoPointsQuad\n from Florence.QuadratureRules.EquallySpacedPoints import EquallySpacedPoints\n\n npoints = int(npoints)\n if npoints ==0: npoints=1\n\n if equally_spaced:\n points = EquallySpacedPoints(ndim=3,C=npoints-1)\n hpBases = QuadES.Lagrange\n else:\n points = GaussLobattoPointsQuad(npoints-1)\n hpBases = Quad.LagrangeGaussLobatto\n\n BasesQuad = np.zeros((4,points.shape[0]),dtype=np.float64)\n for i in range(points.shape[0]):\n BasesQuad[:,i] = hpBases(0,points[i,0],points[i,1],arrange=1)[:,0]\n\n node_arranger = NodeArrangementQuad(npoints-1)[2]\n\n qmesh = Mesh()\n qmesh.Square(lower_left_point=(-1.,-1.), side_length=2,n=npoints, element_type=\"quad\")\n quads = qmesh.elements\n\n\n nnode = qmesh.nnode\n nelem = qmesh.nelem\n nsize = int((npoints+1)**2)\n\n mesh = Mesh()\n mesh.points = np.dot(BasesQuad.T, opoints)\n\n _, inv = np.unique(quads,return_inverse=True)\n sorter = np.argsort(node_arranger)\n mesh.elements = sorter[inv].reshape(quads.shape)\n\n mesh.element_type=\"quad\"\n mesh.nelem = mesh.elements.shape[0]\n mesh.nnode = mesh.points.shape[0]\n mesh.GetBoundaryEdges()\n\n return mesh", "def transform_xy_coords(xy_coords, sc_plot):\r\n sorted_keys = xy_coords.keys()\r\n all_cids = []\r\n all_xcoords = []\r\n all_ycoords = []\r\n sc_plot.set_transform(sc_plot.axes.transData)\r\n trans = sc_plot.get_transform()\r\n\r\n for s_label in sorted_keys:\r\n s_data = xy_coords[s_label]\r\n if s_data[0] == []:\r\n pass\r\n else:\r\n icoords = trans.transform(zip(s_data[0], s_data[1]))\r\n xcoords, ycoords = zip(*icoords)\r\n all_cids.extend(s_data[2])\r\n all_xcoords.extend(xcoords)\r\n all_ycoords.extend(ycoords)\r\n\r\n return all_cids, all_xcoords, all_ycoords", "def makeToCoordinates(fromCoords, Uframe, Vframe, scale):\n\n out = []\n\n for e in fromCoords:\n x = e[0]\n y = e[1]\n toX = Uframe[x][y]\n toY = Vframe[x][y]\n out.append((int(round(x+toX*scale)),int(round(y+toY*scale))))\n\n return out", "def convert_coords(self, coords):\n xPos = int(int(coords[0]) / 8)\n yPos = int(coords[1])\n zPos = int(int(coords[2]) / 8)\n return list(xPos, yPos, zPos)", "def to_cartesian(self):\n\n if self.cartesian is None:\n theta = math.radians(self.lat)\n phi = math.radians(self.long)\n x = R_EARTH * math.cos(theta) * math.cos(phi)\n y = R_EARTH * math.cos(theta) * math.sin(phi)\n z = R_EARTH * math.sin(theta)\n self.cartesian = CartesianPoint(x, y, z)\n return self.cartesian", "def geo2cell(geofile, posfile):", "def _pixel_to_map(coordinates, geotransform):\n coordinates_map = np.empty(coordinates.shape)\n coordinates_map[..., 0] = (\n geotransform[0]\n + geotransform[1] * coordinates[..., 0]\n + geotransform[2] * coordinates[..., 1]\n )\n coordinates_map[..., 1] = (\n geotransform[3]\n + geotransform[4] * coordinates[..., 0]\n + geotransform[5] * coordinates[..., 1]\n )\n return coordinates_map" ]
[ "0.5961069", "0.5903401", "0.5892975", "0.5871701", "0.5760852", "0.57279056", "0.5697128", "0.56781834", "0.563091", "0.56191087", "0.5551498", "0.54956114", "0.5457087", "0.5442877", "0.5397851", "0.5384462", "0.5377413", "0.53385407", "0.53186697", "0.5285104", "0.52689", "0.5246485", "0.5215178", "0.51768494", "0.5157614", "0.51311517", "0.51253206", "0.511855", "0.5117385", "0.5110315", "0.5105038", "0.5080791", "0.50756276", "0.5067555", "0.5060366", "0.5048861", "0.504632", "0.50383455", "0.50313073", "0.5021509", "0.5002327", "0.49998632", "0.49874067", "0.49803472", "0.49632406", "0.4959317", "0.49548727", "0.49506545", "0.49498513", "0.4944459", "0.4944459", "0.4942693", "0.49343368", "0.49298957", "0.49210635", "0.49199146", "0.4917244", "0.49129528", "0.489491", "0.4872949", "0.48729357", "0.4869404", "0.48688406", "0.48667476", "0.48579067", "0.48521626", "0.48518863", "0.4850894", "0.48231447", "0.48162085", "0.48152745", "0.48045677", "0.47980174", "0.47872812", "0.47868183", "0.47832364", "0.47796032", "0.47779265", "0.47758272", "0.47701007", "0.47673815", "0.47642136", "0.47570607", "0.47496486", "0.47401333", "0.47354868", "0.47249922", "0.47226277", "0.4713782", "0.47083476", "0.4706032", "0.47046745", "0.47014242", "0.46997908", "0.468743", "0.46836346", "0.4679679", "0.46734536", "0.46719986", "0.46611777" ]
0.49380758
52
Keys the coordinates of the atoms read from xd.res to the numerical part of its name.
def list_to_dict(atomlist, full=False): atomdict = {} if full: for atom in atomlist: atomdict[atom[0]] = atom[1] else: for atom in atomlist: atomdict[atom[0][0]] = atom[1] return atomdict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_coordinates(path='', sort=True):\n maspointer = open(path + 'xd.mas', 'r')\n respointer = open(path + 'xd.res', 'r')\n\n positions = {}\n keylist = [] #Needed to keep the atomlist order. This is important for the frequency read function.\n for line in maspointer.readlines():\n if 'CELL ' in line:\n cell = [float(i) for i in line.split(\" \") if '.' in i]\n break\n for line in respointer.readlines():\n if '(' in line and not '!' in line:\n coords = [float(i) for i in line.split(\" \") if '.' in i]\n coords = coords[:-1]\n key = line.split(\" \")[0]\n keylist.append(key)\n positions[key] = coords\n if sort:\n sortkeylist = []\n for i in xrange(len(keylist)):\n j = i + 1\n for key in keylist:\n number = get_number(key)\n if j == int(number):\n sortkeylist.append(key)\n else:\n sortkeylist = keylist\n return cell, positions, sortkeylist", "def coords(self) -> dict[str, xr.IndexVariable]:\n return {self.ycoords.name: self.ycoords, self.xcoords.name: self.xcoords}", "def get_crds(self, name):\n\n coords = name.split('_')\n lat = float(coords[1])\n lon = float(coords[2])\n\n return [lon, lat]", "def get_residue_coords(self, resnum):\n return", "def xdscoordinates():\n # Coordinate sensitive to length of vectors, so need to ensure that\n # lengths of both s0 and s1 are equal\n coords = {\n \"s0\": (0.013141995425357206, 0.002199999234194632, 1.4504754950989514),\n \"s1\": (-0.01752795848400313, -0.24786554213968193, 1.4290948735525306),\n \"m2\": (0.999975, -0.001289, -0.006968),\n \"phi\": 5.83575672475 * math.pi / 180,\n }\n coords[\"cs\"] = CoordinateSystem(\n coords[\"m2\"], coords[\"s0\"], coords[\"s1\"], coords[\"phi\"]\n )\n return coords", "def setResNameCheckCoords(self):\n exit = False\n localDir = os.path.abspath('.')\n if not os.path.exists(self.tmpDir):\n os.mkdir(self.tmpDir)\n #if not os.path.exists(os.path.join(tmpDir, self.inputFile)):\n copy2(self.absInputFile, self.tmpDir)\n os.chdir(self.tmpDir)\n\n if self.ext == '.pdb':\n tmpFile = open(self.inputFile, 'r')\n else:\n cmd = '%s -i %s -fi %s -o tmp -fo ac -pf y' % \\\n (self.acExe, self.inputFile, self.ext[1:])\n self.printDebug(cmd)\n out = getoutput(cmd)\n if not out.isspace():\n self.printDebug(out)\n try:\n tmpFile = open('tmp', 'r')\n except:\n rmtree(self.tmpDir)\n raise\n\n tmpData = tmpFile.readlines()\n residues = set()\n coords = {}\n for line in tmpData:\n if 'ATOM ' in line or 'HETATM' in line:\n residues.add(line[17:20])\n at = line[0:17]\n cs = line[30:54]\n if coords.has_key(cs):\n coords[cs].append(at)\n else:\n coords[cs] = [at]\n #self.printDebug(coords)\n\n if len(residues) > 1:\n self.printError(\"more than one residue detected '%s'\" % str(residues))\n self.printError(\"verify your input file '%s'. Aborting ...\" % self.inputFile)\n sys.exit(1)\n\n dups = \"\"\n short = \"\"\n long = \"\"\n longSet = set()\n id = 0\n items = coords.items()\n l = len(items)\n for item in items:\n id += 1\n if len(item[1]) > 1: # if True means atoms with same coordinates\n for i in item[1]:\n dups += \"%s %s\\n\" % (i, item[0])\n\n# for i in xrange(0,len(data),f):\n# fdata += (data[i:i+f])+' '\n\n for id2 in xrange(id,l):\n item2 = items[id2]\n c1 = map(float,[item[0][i:i+8] for i in xrange(0,24,8)])\n c2 = map(float,[item2[0][i:i+8] for i in xrange(0,24,8)])\n dist2 = self.distance(c1,c2)\n if dist2 < minDist2:\n dist = math.sqrt(dist2)\n short += \"%8.5f %s %s\\n\" % (dist, item[1], item2[1])\n if dist2 < maxDist2: # and not longOK:\n longSet.add(str(item[1]))\n longSet.add(str(item2[1]))\n if str(item[1]) not in longSet:\n long += \"%s\\n\" % item[1]\n\n if dups:\n self.printError(\"Atoms with same coordinates in '%s'!\" % self.inputFile)\n self.printQuoted(dups[:-1])\n exit = True\n\n if short:\n self.printError(\"Atoms TOO close (< %s Ang.)\" % minDist)\n self.printQuoted(\"Dist (Ang.) Atoms\\n\" + short[:-1])\n exit = True\n\n if long:\n self.printError(\"Atoms TOO alone (> %s Ang.)\" % maxDist)\n self.printQuoted(long[:-1])\n exit = True\n\n if exit:\n if self.force:\n self.printWarn(\"You chose to proceed anyway with '-f' option. GOOD LUCK!\")\n else:\n self.printError(\"Use '-f' option if you want to proceed anyway. Aborting ...\")\n rmtree(self.tmpDir)\n sys.exit(1)\n\n resname = list(residues)[0]\n newresname = resname\n\n if resname.isdigit() or 'E' in resname[1:3].upper() or 'ADD' in resname.upper():\n newresname = 'R' + resname\n if not resname.isalnum():\n newresname = 'MOL'\n if newresname != resname:\n self.printWarn(\"In %s.lib, residue name will be '%s' instead of '%s' elsewhere\"\n % (self.acBaseName, newresname, resname))\n\n self.resName = newresname\n\n os.chdir(localDir)\n self.printDebug(\"setResNameCheckCoords done\")", "def get_startpos(self) -> Dict[AtomKey, numpy.array]:\n ...", "def loc_key(self):\r\n key = tuple(self.loc.coord)\r\n return (key)", "def _coordinate_flex_vocabulary(obj, latname=None, lonname=None):\n if (latname is not None) or (lonname is not None):\n try:\n lat = obj[latname]\n lon = obj[lonname]\n except KeyError:\n raise LookupError\n\n if (np.size(lat) > 1) and (np.size(lon) > 1):\n lat = np.atleast_1d(lat)\n lon = np.atleast_1d(lon)\n return lat, lon\n\n vocab = [\n {\"lat\": \"LATITUDE\", \"lon\": \"LONGITUDE\"},\n {\"lat\": \"latitude\", \"lon\": \"longitude\"},\n {\"lat\": \"lat\", \"lon\": \"lon\"},\n {\"lat\": \"LAT\", \"lon\": \"LON\"},\n ]\n for v in vocab:\n try:\n lat = obj[v[\"lat\"]]\n lon = obj[v[\"lon\"]]\n if (np.size(lat) > 1) and (np.size(lon) > 1):\n lat = np.atleast_1d(lat)\n lon = np.atleast_1d(lon)\n return lat, lon\n except KeyError:\n pass\n raise LookupError", "def filecoords(self):\n coords = sorted(self.map.keys())\n for coord in coords:\n yield coord, self.map[coord]", "def x(self):\n return self._kml['x']", "def getCoords(file):\n global demag\n name = file.split('.')[0]\n name = name.split('_')\n x = int(name[2])//demag\n y = int(name[3])//demag\n return(int(x),int(y))", "def _ident(self):\n key_values = [unicode(self.db_key[k]) for k in self.db_key]\n return \"Position (\" + ', '.join(key_values) + \")\"", "def _make_key(self):\n all_position_values = (chromosome_sort_key(self.chromosome), self.min_position, self.max_position, \n self.strand, self.position_before, self.position_after)\n return all_position_values", "def test_xyz_to_coords_and_element_numbers(self):\n coords, atom_nums = converter.xyz_to_coords_and_element_numbers(self.xyz1['dict'])\n self.assertEqual(coords,\n [[0.0, 0.0, 0.0],\n [0.6300326, 0.6300326, 0.6300326],\n [-0.6300326, -0.6300326, 0.6300326],\n [-0.6300326, 0.6300326, -0.6300326],\n [0.6300326, -0.6300326, -0.6300326]])\n self.assertEqual(atom_nums, [6, 1, 1, 1, 1])", "def galaxy_positions():\n hdulist1 = pf.open(source+'/kids_data/KiDS_DR3.1_G9_ugri_shear.fits')\n '''\n hdulist2 = pf.open('../kids_data/KiDS_DR3.1_G12_ugri_shear.fits')\n hdulist3 = pf.open('../kids_data/KiDS_DR3.1_G15_ugri_shear.fits')\n hdulist4 = pf.open('../kids_data/KiDS_DR3.1_G23_ugri_shear.fits')\n hdulist5 = pf.open('../kids_data/KiDS_DR3.1_GS_ugri_shear.fits')\n '''\n ra = hdulist1[1].data['RAJ2000'][:sample]\n dec = hdulist1[1].data['DECJ2000'][:sample]\n global maxra\n maxra = max(ra)\n global minra\n minra = min(ra)\n global maxdec\n maxdec = max(dec)\n global mindec\n mindec = min(dec)\n global bsize\n bsize = abs(max(maxra, maxdec) - min(mindec, minra))\n coords = np.column_stack([ra, dec])\n global SIZE\n SIZE = len(coords)\n print(maxra, maxdec, minra, mindec, SIZE)\n ctree = cKDTree(coords)\n return ctree", "def nm_152263_exons():\n return [(0, 234), (234, 360), (360, 494), (494, 612), (612, 683), (683, 759),\n (759, 822), (822, 892), (892, 971), (971, 7099)]", "def key(self, x):\r\n return tuple(x)", "def extractCoords(self):\n if not self.rank:\n logging.info('Extracting atomic poitions')\n\n # Extract coordinates from liggghts\n self.lmp.command('variable x atom x')\n x = Rxn.lmp.extract_variable(\"x\", \"group1\", 1)\n\n self.lmp.command('variable y atom y')\n y = Rxn.lmp.extract_variable(\"y\", \"group1\", 1)\n\n self.lmp.command('variable z atom z')\n z = Rxn.lmp.extract_variable(\"z\", \"group1\", 1)\n\n coords = np.zeros((self.lmp.get_natoms(),3))\n\n for i in range(self.lmp.get_natoms()):\n coords[i,:] = x[i], y[i], z[i]\n\n self.lmp.command('variable x delete')\n self.lmp.command('variable y delete')\n self.lmp.command('variable z delete')\n\n return coords", "def getAtmsAndCoords(self, atmNames):\n childNames = list(self.childByName.keys())\n check = list(map(lambda x, cn = childNames: x in cn, atmNames))\n # all the given names should be atoms of the residue\n if 0 in check:\n return 0, None\n coords = []\n # short cut to coords.append\n coordsappend = coords.append\n cName = self.childByName\n for name in atmNames:\n atm = cName[name]\n if atm.alternate: coordsappend(atm.getAverageCoords())\n else:\n coordsappend(atm.coords)\n return 1, coords", "def getCoords(self):\n if self._ra == \"\" or self._dec == \"\":\n raise ValueError('Object named ' + self._name +' has no coordinates in database.')\n ra = self._ra.split(\":\")\n dec = self._dec.split(\":\")\n raTuple = (int(ra[0]), int(ra[1]), float(ra[2]))\n decTuple = (dec[0][0], int(dec[0][1:]), int(dec[1]), float(dec[2]))\n return raTuple, decTuple", "def _get_x(self, closed_orbits):\n closed_orbits_energy = [orbit[0] for orbit in closed_orbits]\n closed_orbits_x = [orbit[1:][0][0] for orbit in closed_orbits]\n closed_orbits_x_dict = dict(zip(closed_orbits_energy, closed_orbits_x))\n self.closed_orbits_x = closed_orbits_x_dict", "def getSHSIDDict():\n m = {}\n fin = open(\"SHSDataset/Chromas/msd_keys_mapping.cly\")\n for l in fin.readlines():\n l = l.rstrip()\n f = l.split(\",\")\n m[f[0]] = int(f[1])\n fin.close()\n return m", "def parseX(self):\n\t\treturn self._dict.keys()", "def define_ccd_pos(ccd_pos_dict, raft_name, slot_names, xpos, ypos):\n ccd_pos_dict[raft_name] = {slot:[xpos[i],ypos[i]] for i,slot in enumerate(slot_names)}", "def get_coords(self) -> Tuple[int]:\r\n return self.file, self.rank", "def get_electrode_positions():\n positions = dict()\n with io.open(\"electrode_positions.txt\", \"r\") as pos_file:\n for line in pos_file:\n parts = line.split()\n positions[parts[0]] = tuple([float(part) for part in parts[1:]])\n return positions", "def get_electrode_positions():\n positions = dict()\n with io.open(\"electrode_positions.txt\", \"r\") as pos_file:\n for line in pos_file:\n parts = line.split()\n positions[parts[0]] = tuple([float(part) for part in parts[1:]])\n return positions", "def xd_element(name):\n try:\n name = name[:2]\n except:\n pass\n try:\n covalence_radius[name]\n except:\n name = name[0]\n return name", "def xunits(self):\n return self._kml['xunits']", "def getAtomIndices( structure, resname ):\n atom_indices_ligand = []\n topology = structure.topology\n for atom in topology.atoms():\n if str(resname) in atom.residue.name:\n atom_indices_ligand.append(atom.index)\n\n return atom_indices_ligand", "def keys(cls, name, position: tuple):\n assert len(position) == 2\n row, col = position\n assert isinstance(row, int) and isinstance(col, int)\n my_remote = cls._keys[name]\n return my_remote[row][col]", "def loadDCPos(self):\n with open(gv.DC_POS_PATH, 'r') as fh: \n for line in fh:\n dcID, _, dcPos = line.rstrip().split(';')\n self.centerDict[dcID] = [float(i) for i in dcPos.split(',')]", "def n_coords(self):\n trans_x = np.arange(-self.trans_dia / 2, self.trans_dia / 2 + 0.002, 0.002)\n a_n = [(trans_x[n + 1] - trans_x[n]) / 2 for n in range(self.N)]\n cx_n = [trans_x[n] + (trans_x[n + 1] - trans_x[n]) / 2 for n in range(self.N)]\n coords = [(x, 0) for x in cx_n]\n d = {'trans_x': trans_x, 'A_n': a_n, 'N_coords': coords}\n return d", "def getNames(self, resname, atomname):\n rname = None\n aname = None\n if resname in self.map:\n res = self.map[resname]\n if res.hasAtom(atomname):\n atom = res.atoms[atomname]\n aname = atom.name\n rname = atom.resname\n return rname, aname", "def get_coord(path, n_i, n_k):\n fnm = \"%s/coord_mpi%02i%02i.nc\" % (path, n_i, n_k)\n fnc = netcdf.netcdf_file(fnm, 'r')\n x = fnc.variables['x'][:, :]\n z = fnc.variables['z'][:, :]\n return x, z", "def data_xy(position) -> dict:\n\n return {\"x\": position[0], \"y\": position[1]}", "def getSearchSpaceCoords(self):", "def __getitem__(self, k):\n return self._coords[k]", "def generatecoordinates(self, x, y):\n entry = []\n for u, v in zip(x, y):\n if u == \"_\":\n entry.append(v)\n else:\n entry.append(v-1)\n\n return entry", "def term_name_key(name_tuple: tuple[str, list[float]]) -> tuple[str, str]:\n t_type, atoms = name_tuple[0].split('(', 1)\n atoms = atoms[:-1] # Delete the last closing parenthesis\n t_type_words = t_type.rstrip('0123456789') # In case of polyterm, delete the numbers in the end\n match = re.match(r\"([a-zA-Z]+)([0-9]+)\", t_type, re.I)\n t_type_numbers = int(match.groups()[-1]) if match else ''\n return t_type_words, atoms, t_type_numbers", "def build_coordinates_map(self):\n\t\t# We need to keep track of which dimensions we can map\n\t\tmapped = []\n\t\t\n\t\t# First lets check for standard 1D coordinate variables. These are variables\n\t\t# that have the same name as one of the variables dimensions or 1D variables\n\t\t# sharing a dimension with the variable \n\t\tfor di in range (0,len(self.variable.dimensions)):\n\n\t\t\tdimension = self.variable.dimensions[di]\n\t\t\tdim_name = dimension.name\n\n\t\t\t# Find variables with same name as the dimension\n\t\t\tif dim_name in self.variable.group.variables.keys():\n\n\t\t\t\tcoord_variable = self.variable.group.variables[dim_name]\n\t\t\t\tself.coordinates_variables.append(coord_variable)\n\t\t\t\tmapped.append(dim_name)\n\t\t\t\t\n\t\t\t\t# See if we can use the units to find out what spatial/temporal variable this is from \n\t\t\t\t# the CF conventions\n\t\t\t\tcoordinate_name = cf_units2coordinates(coord_variable.get_attribute('units'))\n\t\t\t\t\n\t\t\t\t# If we can't we just default to the dimension name\n\t\t\t\tif not coordinate_name:\n\t\t\t\t\tcoordinate_name = dim_name\n\t\t\t\t\n\t\t\t\tself.coordinates_mapping[coordinate_name] = {'variable':dim_name, 'map':[di]}\t\t\t\n\t\t\t\t\n\t\t# Next lets see if we have a \"coordinates\" attribute we can use (CF convention)\n\t\tif self.variable.get_attribute('coordinates'):\n\t\t\t\n\t\t\tself.coordinates_names = self.variable.get_attribute('coordinates').split()\n\t\t\t\t\t\t\n\t\t\t# Find each associated variable\n\t\t\tfor name in self.coordinates_names:\n\t\t\t\t\n\t\t\t\tif name in self.variable.group.variables.keys():\n\t\t\t\t\t\n\t\t\t\t\tcoord_variable = self.variable.group.variables[name]\n\t\t\t\t\tself.coordinates_variables.append(coord_variable)\n\n\t\t\t\t\t#print 'got coordinate variable ', coord_variable, coord_variable.dimensions\n\t\t\t\t\t# See if we can find out what spatial/temporal variable this is\n\t\t\t\t\ttry:\n\t\t\t\t\t\tcoordinate_name = cf_dimensions[self.variable.group.variables[name].get_attribute('units')]\n\t\t\t\t\texcept:\n\t\t\t\t\t\tcoordinate_name = name\n\n\t\t\t\t\t# Create the coordinates_mapping entry but with an empty dimensions map for now\n\t\t\t\t\tself.coordinates_mapping[coordinate_name] = {'variable':name, 'map':[], 'coordinates': self.coordinates_names}\n\t\t\t\t\t\t\n\t\t\t\t\t# Add each coordinates variable dimension to the mappable list and generate the map\n\t\t\t\t\t#print 'generating dimensions map for ', coord_variable.dimensions\n\t\t\t\t\tfor dimension in coord_variable.dimensions:\n\t\t\t\t\t\t#print dimension, coord_variable.dimensions\n\t\t\t\t\t\tself.coordinates_mapping[coordinate_name]['map'].append(self.variable.dimensions.index(dimension))\n\t\t\t\t\t\tif not dimension.name in mapped:\n\t\t\t\t\t\t\tmapped.append(dimension.name)\n\t\t\t\t\t\t\t\n\t\t# Setup shortcut to identify time coordinate variable\n\t\ttry:\n\t\t\tself.time_variable = self.variable.group.variables[self.coordinates_mapping['time']['variable']]\n\t\t\tself.time_dim = self.coordinates_mapping['time']['map'][0]\n\t\texcept:\n\t\t\tself.time_variable = None\n\t\t\tself.time_dim = None\n\t\t\t\n\t\t# Shortcuts for latitude and longitude coordinate variables\n\t\ttry:\n\t\t\tself.latitude_variable = self.variable.group.variables[self.coordinates_mapping['latitude']['variable']]\n\t\texcept:\n\t\t\tself.latitude_variable = None\n\n\t\ttry:\n\t\t\tself.longitude_variable = self.variable.group.variables[self.coordinates_mapping['longitude']['variable']]\n\t\texcept:\n\t\t\tself.longitude_variable = None\n\n\t\ttry:\n\t\t\tself.level_variable = self.variable.group.variables[self.coordinates_mapping['level']['variable']]\n\t\t\tself.level_dim = self.coordinates_mapping['level']['map'][0]\n\t\texcept:\n\t\t\tself.level_variable = None\n\t\t\tself.level_dim = None", "def getCoordinates(self):\n return list(self.gridVars.keys())", "def _position_to_id(self, x, y):\n return x + y * self.n", "def _key(self):\n return (self.name, self.array_type.upper(), self.values)", "def keys():", "def _key(self):\n return (self.name, self.struct_types, self.struct_values)", "def get_row_list_sorting_key(x):\n name, count = x\n if '_' not in name:\n return name\n s = name.split('_')\n end = s[-1]\n start = '_'.join(s[:-1])\n if utils.is_int(end):\n return (start, int(end))\n return name", "def get_row_list_sorting_key(x):\n name, count = x\n if '_' not in name:\n return name\n s = name.split('_')\n end = s[-1]\n start = '_'.join(s[:-1])\n if is_int(end):\n return (start, int(end))\n return name", "def __compound_key(key):\n x_int = int(key[0])\n y_int = int(key[1])\n zeros = len(str(y_int))\n key = x_int * (10 ** zeros) + y_int\n\n return key", "def key(self, name):\n return name", "def keys(self):\n return _osgAnimation.mapVertexInfluence_keys(self)", "def keys(self):\n\t\treturn tuple(self.dist.keys())", "def id_from_coord(n, Nx):\n return int(n[0] * Nx**2 + n[1] * Nx + n[2])", "def _get_sort_key(self) -> np.array:\n data = self.reader.GetOutput()\n raw_cell_coords = np.empty((data.GetNumberOfCells(), 3))\n for i in range(data.GetNumberOfCells()):\n cell_corners = vtk_to_numpy(data.GetCell(i).GetPoints().GetData())\n raw_cell_coords[i] = np.array(\n [cell_corners[:, n].mean() for n in range(cell_corners.shape[1])]\n )\n\n cell_coords = np.array(\n [tuple(line) for line in raw_cell_coords],\n dtype=[(\"r\", \"f4\"), (\"phi\", \"f4\"), (\"z\", \"f4\")],\n )\n return cell_coords.argsort(order=[\"r\", \"phi\"])", "def put_coord_names(self, coord_names):\n coord_names = [\"{0:{1}s}\".format(x, MAX_STR_LENGTH)[:MAX_STR_LENGTH]\n for x in coord_names]\n ierr = exolib.py_expcon(self.exoid, coord_names)\n if ierr:\n raise ExodusIIWriterError(\"Error putting coordinate names\")", "def xcoords(self) -> xr.IndexVariable:\n xcoords = self._obj[self.x_dim]\n if self.x_dim not in self._obj.coords:\n for key in list(self._obj.coords.keys()):\n if key.startswith(self.x_dim):\n xcoords = self._obj.coords[key]\n break\n if xcoords.ndim == 2 and list(xcoords.dims).index(self.x_dim) != 1:\n raise ValueError(\n \"Invalid raster: dimension order wrong. Fix using\"\n f'\".transpose(..., {self.y_dim}, {self.x_dim})\"'\n )\n if xcoords.size < 2 or (xcoords.ndim == 2 and xcoords.shape[1] < 2):\n raise ValueError(f\"Invalid raster: less than 2 cells in x_dim {self.x_dim}\")\n return xcoords", "def x(self):\n return self[\"x\"]", "def clean_posiResNums(self) -> None:\n position_copy = self.POSITION\n pos = position_copy.content\n tmpN = \"\"\n tmpID = 0\n tmpOldID = pos[0].resID\n\n for p in pos:\n # print(p)\n # print(tmpN,tmpID)\n if p.resName == tmpN and p.resID == tmpOldID: # same residue as before\n p.resID = tmpID\n elif (\n p.resName == tmpN and p.resID != tmpOldID): # same resiname but diff ID (double? - this is a problem!)\n tmpOldID = p.resID\n tmpID += 1\n p.resID = tmpID\n else: # next name and residue id\n tmpID += 1\n tmpN = p.resName\n tmpOldID = p.resID\n p.resID = tmpID\n\n self.POSITION.content = pos", "def get_hash(self):\n s = super(Point, self).get_hash()\n for c in self.coordinate:\n s += \"_%f\" % c\n return s", "def labelDEShaw_rmsd(filename='bpti-rmsd-alpha-dspace.npy'):\n settings = systemsettings()\n logging.info('Loading Pre-Calc RMSD Distances from: %s ','bpti-rmsd-alpha-dspace.npy')\n rms = np.load(filename)\n prox = np.array([np.argsort(i) for i in rms])\n theta = settings.RMSD_THETA\n logging.info('Labeling All DEShaw Points. Usng THETA=%f', theta)\n rmslabel = []\n for i in range(len(rms)):\n A = prox[i][0]\n proximity = abs(rms[i][prox[i][1]] - rms[i][A]) #abs\n B = prox[i][1] if proximity < theta else A\n rmslabel.append((A, B))\n return rmslabel", "def get_positions(directory): \n positions = {}\n names = {}\n pos_dict = {'1': \"GK\", '2': \"DEF\", '3': \"MID\", '4': \"FWD\"}\n fin = open(directory + \"/players_raw.csv\", 'rU',encoding=\"utf-8\")\n reader = csv.DictReader(fin)\n for row in reader:\n positions[int(row['id'])] = pos_dict[row['element_type']] \n names[int(row['id'])] = row['first_name'] + ' ' + row['second_name']\n return names, positions", "def __nc_geo_data(self, geo_dsets):\n res = {}\n if geo_dsets is None:\n geo_dsets = 'latitude_center,longitude_center'\n\n for key in geo_dsets.split(','):\n if key in self.fid['/instrument'].variables.keys():\n res[key] = self.fid['/instrument/{}'.format(key)][:]\n\n return res", "def exog_names(self):\n return self.data.xnames", "def _find_coordinates(self, coords, ref):\n result = []\n temp_fastq_length = 500\n reference_seq = ''\n with open(ref) as f:\n lines = f.readlines()[1:]\n for line in lines:\n reference_seq += line.strip()\n with open('temp_index/temp_index.fasta', 'w') as fw:\n fw.write('>{}\\n{}'.format(self.read_id, self.seq))\n subprocess.run('bwa index temp_index/temp_index.fasta', shell=True, stdout=FNULL, stderr=subprocess.STDOUT)\n for coord in coords:\n with open('temp_index/coordinate_rDNA.fastq', 'w') as fw:\n fw.write('>temp\\n{}\\n+\\n{}\\n'.format(reference_seq[coord-1:coord+temp_fastq_length-1], 'J' * temp_fastq_length))\n # with -a option, multiple hits are more clearly shown\n utilities.bwa_mapping('temp_index/temp_index.fasta', 'temp_index/coordinate_rDNA.fastq', 'temp_index/temp_sam4coord.sam', multi=True)\n with open('temp_index/temp_sam4coord.sam') as samf:\n map_result = samf.readlines()[2:]\n for mapping in map_result:\n row = mapping.strip().split()\n AS = int(mapping.strip().split('AS:i:')[1].split()[0])\n flag = int(row[1])\n if utilities.easy_flag(flag, 16) != 1:\n direction = '+'\n else:\n direction = '-'\n mapped_coord = int(row[3])\n if AS > 0.3 * temp_fastq_length:\n result.append([coord, mapped_coord, direction])\n return result", "def _generate_coords(self):\n coords = np.dstack([self.X.ravel(), self.Y.ravel()])[0]\n return coords, spatial.cKDTree(coords)", "def coordinates(self):", "def save_point_name_index(reader):\n point_names = [None]\n # Gets the given number associated with a point that the values are indexed on\n for row in reader:\n point_name_index = re.match('Point_(\\d+):', row[0])\n if point_name_index is None:\n break # Time Interval\n point_names.insert(int(point_name_index.group(1)), row[1])\n\n return point_names", "def parseX(self):\n\t\treturn self._dictOut.keys()", "def atom_to_internal_coordinates(self, verbose: bool = ...) -> None:\n ...", "def atom_to_internal_coordinates(self, verbose: bool = ...) -> None:\n ...", "def extended_xyz_parse(xyz_d):\n \n s_properties = ['rot_A', \n 'rot_B', \n 'rot_C', \n 'dipole', \n 'polarizability', \n 'homo', \n 'lumo', \n 'band_gap', \n 'ese', \n 'zpe', \n 'u_0K', \n 'u_298.15K', \n 'h_298.15K', \n 'f_298.15K', \n 'cp_298.15K']\n\n mol_properties = {}\n\n\n lines = xyz_d.replace('*^','e').splitlines()\n \n r_no_atoms = lines[0]\n no_atoms = int(r_no_atoms)\n\n r_scalars = lines[1]\n mol_id = r_scalars.split()[:2]\n scalar_properties = np.array(r_scalars.split()[2:], np.float32)\n\n r_mcoords = lines[2:2+no_atoms]\n symbols = [m.split()[0] for m in r_mcoords]\n coords = np.array([m.split()[1:4] for m in r_mcoords], dtype=np.float32)\n \n charges = np.array([m.split()[4] for m in r_mcoords], dtype=np.float32)\n\n r_vibfreqs = lines[2+ no_atoms]\n vib_freqs = np.array([float(freq) for freq in r_vibfreqs.split()], dtype=np.float32)\n\n smiles = lines[3+no_atoms].split()\n inchi = lines[4+no_atoms].split()\n\n mol_properties['no_atoms'] = no_atoms\n mol_properties['mol_id'] = mol_id\n \n for i, p in enumerate(s_properties):\n mol_properties[p] = scalar_properties[i]\n\n mol_properties['symbols'] = symbols\n mol_properties['coords'] = coords\n mol_properties['charges'] = charges\n mol_properties['vib_freqs'] = vib_freqs\n mol_properties['smiles'] = smiles\n mol_properties['inchi'] = inchi\n \n return mol_properties", "def make_reference(self):\r\n d = dict([(\"M\", 1000),\r\n (\"CM\", 800),\r\n (\"D\", 500),\r\n (\"CD\", 300),\r\n (\"C\", 100),\r\n (\"XC\", 80),\r\n (\"L\", 50),\r\n (\"XL\", 30),\r\n (\"X\", 10),\r\n (\"IX\", 8),\r\n (\"V\", 5),\r\n (\"IV\", 3),\r\n (\"I\", 1),])\r\n return d", "def key_to_coordinates(key):\n stripkey = key.strip(\"(\").strip(\")\").split(\", \")\n point_coordinates = tuple(float(elem) for elem in stripkey)\n return point_coordinates", "def coordinates(self, name, start=None, end=None):\n record = self.process(name)\n if not start and not end:\n start = 1\n end = record.end - record.start + 1\n positions = {}\n match_positions = []\n\n if record.strand == '+':\n _start = 1\n for relative, actual in enumerate(range(record.start - 1, record.end),\n start=_start):\n positions[relative] = actual\n for pos in range(start, end + 1):\n match_positions.append(positions[pos])\n return [(record.scaffold, min(match_positions), max(match_positions) + 1,\n record.mirbase_name, 0, record.strand)]\n\n elif record.strand == '-':\n _start = 1\n for relative, actual in enumerate(reversed(range(record.start - 1,\n record.end)), start=_start):\n positions[relative] = actual\n for pos in range(start, end + 1):\n match_positions.append(positions[pos])\n return [(record.scaffold, min(match_positions), max(match_positions) + 1,\n record.mirbase_name, 0, record.strand)]", "def landmark_x(self):\n ######## TODO: NATSORT columns before returning #######\n x_cols = [col for col in self.landmark_columns if \"x\" in col]\n return self[x_cols]", "def landmark_x(self):\n ######## TODO: NATSORT columns before returning #######\n x_cols = [col for col in self.landmark_columns if \"x\" in col]\n return self[x_cols]", "def mapToCoordinates(self, shot):\r\n toks = shot.split(\"-\")\r\n return Coordinates(ord(toks[0]) - ord(\"A\"), int(toks[1]) - 1)", "def dictkey(self):\n return self.start + \",\" + self.end", "def getResidueInformation(self, resIDs=None, atomIDs=None):\n if resIDs is None:\n resIDs = set ()\n else:\n resIDs = set (resIDs)\n\n if atomIDs is not None:\n for i in atomIDs:\n resIDs.add (self._atomInfo[i][\"residue\"])\n return self.getResidueInformation (resIDs=resIDs)\n\n resIDs = list (resIDs)\n resIDs.sort ()\n str=''\n for res in resIDs:\n str=str+self._residueInfo[res][\"name\"]\n print self._shortenResidue (str)\n str = ''\n return dict ((resID, self._residueInfo[resID]) for resID in resIDs)", "def decl_key(decl):\n prop = decl.split(':')[0] # get property name\n if str(prop) in SHORTHAND_REL_inv:\n return SHORTHAND_REL_inv[str(prop)]\n else:\n return str(prop)", "def getKromosom(self):\n intx = int(\"\".join(self.kromosom[:4]),2)\n inty = int(\"\".join(self.kromosom[4:]),2)\n return [intx,inty]", "def _euclidean_dist_loc(map_x: object) -> dict:\n \n local_distance = {}\n for node, connections in enumerate(map_x.roads):\n nx, ny = map_x.intersections[node]\n for connection in connections:\n cx, cy = map_x.intersections[connection]\n distance = math.sqrt( (nx-cx)**2 + (ny-cy)**2 )\n local_distance.setdefault(node, {})\n local_distance[node].update( {connection: distance} )\n return local_distance", "def getInternalCoordinateDefinitions(self):\n coord_types, _atom_indices = self.force_field.getInternalCoordinateDefinitions()\n atom_indices = []\n for k,ktype in enumerate(coord_types):\n # indices of atoms involved in internal coordinate k\n I, J, K, L = _atom_indices[4*k], _atom_indices[4*k+1], _atom_indices[4*k+2], _atom_indices[4*k+3]\n if ktype == 'B':\n bond = (I,J)\n atom_indices.append( bond )\n elif ktype == 'A':\n angle = (I,J,K)\n atom_indices.append( angle )\n elif ktype == 'D':\n dihedral = (I,J,K,L)\n atom_indices.append( dihedral )\n elif ktype == 'I':\n inversion = (I,J,K,L)\n atom_indices.append( inversion )\n else:\n raise ValueError(\"Unknown type of internal coordinate '%s'!\" % ktype)\n \n return coord_types, atom_indices", "def get_pointmentions_for_points(fxfn):\n print \"Get pointmention pk for point pk\"\n p2pm = {}\n fd = gzip.open(fxfn)\n for idx, ll in enumerate(fd):\n if '\\\"ui.pointmention\\\"' in ll:\n jso = js.loads(ll.strip().strip(\",\"))\n p2pm.setdefault(jso[\"fields\"][\"point\"], set()).add(jso[\"pk\"])\n if idx and not idx % 10000:\n print \"- Done {} lines, {}\".format(idx, time.strftime(\"%H:%M:%S\",\n time.localtime()))\n return p2pm", "def test_modify_coords(self):\n xyz1 = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((1.53830201, 0.86423425, 0.07482439), (0.94923576, -0.20847619, -0.03881977),\n (-0.56154542, -0.31516675, -0.05011465), (-1.18981166, 0.93489731, 0.17603211),\n (1.49712659, -1.15833718, -0.15458647), (-0.87737433, -0.70077243, -1.02287491),\n (-0.87053611, -1.01071746, 0.73427128), (-0.48610273, 1.61361259, 0.11915705))}\n xyz2 = {'symbols': ('C', 'C', 'N', 'H', 'H', 'H'), 'isotopes': (12, 12, 14, 1, 1, 1),\n 'coords': ((-0.48629842, 0.00448354, 0.00136213), (0.97554967, -0.0089943, -0.00273253),\n (2.13574353, -0.01969098, -0.00598223), (-0.88318669, -0.63966273, -0.78887729),\n (-0.87565097, -0.35336611, 0.95910491), (-0.86615712, 1.01723058, -0.16287498))}\n xyz3 = {'symbols': ('O', 'C', 'C', 'S', 'O', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 32, 16, 12, 12, 1, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': ((-2.77437517, 0.17200669, 0.18524832), (-1.64613785, -0.19208096, 0.80434075),\n (-0.40774525, 0.26424657, -0.07952902), (-0.26203276, 2.09580334, -0.05090198),\n (-0.67096595, -0.16397552, -1.42109845), (0.89264107, -0.40136991, 0.41083574),\n (2.12441624, -0.1300863, -0.44918504), (-1.50623429, -1.27619307, 0.9524955),\n (-1.45114032, 0.18501518, 1.82167553), (-1.59654975, 2.25615634, -0.09052499),\n (-1.65730431, -0.11079255, -1.400057), (0.74870779, -1.48997779, 0.41386971),\n (1.10331691, -0.11082471, 1.44762119), (2.41262211, 0.92463409, -0.42840126),\n (1.95758158, -0.4244074, -1.48990015), (2.97418137, -0.70882619, -0.0719403))}\n xyz4 = {'symbols': ('C', 'C', 'O', 'C', 'C', 'O', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (12, 12, 16, 12, 12, 16, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': ((-1.2713687423422115, -0.7423678681688866, -0.6322577211421921),\n (-0.08008635702808505, -0.40741599130374034, 0.2550353232234618),\n (-0.5452666768773297, -0.20159898814584978, 1.588840559327411),\n (0.6158080809151276, 0.8623086771891557, -0.21553636846891006),\n (1.9196775903993375, 1.0155396004927764, 0.5174563928754532),\n (3.0067486097953653, 1.0626738453913969, -0.05177300486677717),\n (-2.012827991034863, 0.06405231524730193, -0.6138583677564631),\n (-0.9611224758801538, -0.9119047827586647, -1.6677831987437075),\n (-1.7781253059828275, -1.6433798866337939, -0.27003123559560865),\n (0.6204384954940876, -1.2502614603989448, 0.2715082028581114),\n (-1.0190238747695064, -1.007069904421531, 1.8643494196872146),\n (0.014234510343435022, 1.753076784716312, -0.005169050775340246),\n (0.827317336700949, 0.8221266348378934, -1.2893801191974432),\n (1.8498494882204641, 1.107064846374729, 1.6152311353151314))}\n xyz5 = {'symbols': ('N', 'C', 'C', 'C', 'H', 'H', 'C', 'C', 'C', 'C', 'H', 'H', 'C', 'C', 'C', 'H', 'C', 'C',\n 'N', 'H', 'H', 'C', 'H', 'C', 'C', 'C', 'H', 'H', 'H', 'H', 'C', 'C', 'C', 'H', 'H', 'H',\n 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'O', 'O', 'C', 'O', 'H', 'H', 'H'),\n 'isotopes': (14, 12, 12, 12, 1, 1, 12, 12, 12, 12, 1, 1, 12, 12, 12, 1, 12, 12, 14, 1, 1, 12, 1, 12, 12,\n 12, 1, 1, 1, 1, 12, 12, 12, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 16, 16, 12, 16, 1, 1, 1),\n 'coords': ((-0.766219, -0.248648, -0.347086), (0.667812, -0.150498, -0.496932),\n (-1.490842, 1.000959, -0.245328), (1.311194, -1.339578, -1.19388),\n (0.976451, 0.831716, -0.911173), (1.231101, -0.062221, 0.660162),\n (-1.346406, -1.400789, 0.294395), (-1.022138, 2.069095, 0.533928),\n (-2.673271, 1.125443, -1.008282), (2.575265, -0.94966, -1.974365),\n (1.534634, -2.14679, -0.467576), (0.584227, -1.791819, -1.905459),\n (-0.574689, -2.103356, 1.24726), (-2.643838, -1.861964, -0.035016),\n (-1.73741, 3.268914, 0.549347), (-0.105632, 1.96688, 1.126589),\n (-3.134563, -0.04419, -1.826788), (-3.378705, 2.332664, -0.970971),\n (3.611589, -0.28425, -1.113057), (2.30114, -0.222978, -2.774031),\n (2.969795, -1.853671, -2.489377), (-1.04268, -3.284134, 1.815898),\n (0.388329, -1.696921, 1.570938), (-3.645512, -1.174123, -0.925823),\n (-3.088386, -3.061615, 0.555145), (-2.911462, 3.400813, -0.198004),\n (-1.376219, 4.102013, 1.150524), (-3.935589, 0.254447, -2.531702),\n (-2.298405, -0.411572, -2.461402), (-4.293927, 2.444159, -1.549116),\n (4.776265, 0.123769, -1.959689), (4.064268, -1.169457, 0.001273),\n (-2.30222, -3.77607, 1.457834), (-0.433782, -3.814872, 2.545573),\n (-4.135291, -1.935447, -1.571709), (-4.453058, -0.768805, -0.272612),\n (-4.078335, -3.442593, 0.302875), (-3.465321, 4.337257, -0.179068),\n (5.500278, 0.67338, -1.336133), (5.30611, -0.707961, -2.446036),\n (4.433161, 0.821539, -2.74083), (4.954327, -0.743379, 0.488676),\n (4.300156, -2.200598, -0.295594), (3.265545, -1.194959, 0.769181),\n (-2.671885, -4.702569, 1.890597), (1.78286, 0.089948, 1.873468),\n (1.758606, 1.382484, 2.130308), (2.973471, 2.040706, 1.623336),\n (2.813335, 2.256698, 0.248083), (2.919925, 3.030613, 2.105087),\n (3.858517, 1.438684, 1.858856), (3.005024, 1.410381, -0.277159))}\n xyz6 = {'symbols': ('N', 'C', 'C', 'H', 'C', 'H', 'H', 'N', 'H', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H', 'H',\n 'H', 'H', 'O', 'O', 'H', 'C', 'H', 'H', 'O', 'H'),\n 'isotopes': (14, 12, 12, 1, 12, 1, 1, 14, 1, 12, 12, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 16, 16, 1, 12, 1, 1, 16, 1),\n 'coords': ((2.608231, -0.458895, 1.629197), (2.408715, 0.132166, 0.318653),\n (1.174426, -0.323822, -0.471554), (3.304408, -0.071078, -0.291093),\n (-0.13532, 0.016735, 0.225918), (1.210534, 0.150539, -1.46601),\n (1.221625, -1.416078, -0.631885), (-1.316045, -0.574442, -0.379686),\n (-0.086456, -0.362851, 1.260573), (-1.468231, -0.411368, -1.77232),\n (-2.505886, -0.419831, 0.432347), (-2.403425, -0.886127, -2.107496),\n (-0.621099, -0.850903, -2.320815), (-3.364172, -0.88926, -0.068909),\n (-2.767365, 0.637288, 0.628231), (-2.360065, -0.927144, 1.400068),\n (2.574849, -1.475283, 1.579253), (1.886591, -0.170591, 2.284831),\n (2.375177, 1.228181, 0.441157), (-0.231725, 1.121336, 0.301367),\n (-1.455199, 0.947478, -2.255384), (-2.58006, 1.611276, -1.811891),\n (-3.315019, 1.53868, -2.760245), (-3.713498, 1.338038, -4.025244),\n (-4.754452, 0.99077, -4.021055), (-3.584519, 2.351475, -4.444827),\n (-2.87635, 0.381401, -4.513467), (-1.966974, 0.665311, -4.338804))}\n mol1 = converter.molecules_from_xyz(xyz1)[1]\n mol2 = converter.molecules_from_xyz(xyz2)[1]\n mol3 = converter.molecules_from_xyz(xyz3)[1]\n mol4 = converter.molecules_from_xyz(xyz4)[1]\n mol5 = converter.molecules_from_xyz(xyz5)[1] # a TS\n mol6 = converter.molecules_from_xyz(xyz6)[1] # a TS\n\n # test atom modification types\n modification_type = 'atom'\n\n # test R_atom modification\n indices, new_val = [0, 1], 1.5\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((0.06385450948408691, 1.6253138441202686, 0.042870253583423557),\n (-0.02582727173313104, 0.39833637030950975, 0.9010563970736782),\n (-0.02582727173313104, -1.003336361301907, 0.3272239637891734),\n (-0.02582727173313104, -1.003336361301907, -1.0899990532469916),\n (-0.08138177769352953, 0.465646654907214, 2.0002403496097383),\n (0.865704477722866, -1.5264119285073852, 0.6825623354173815),\n (-0.9185767861007101, -1.5268489957651346, 0.6785930201570352),\n (0.14577602706217008, -0.07998849407327513, -1.367625604543457))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val, 5)\n\n indices, new_val = [1, 0], -1.5\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((0.01167393998357115, -0.5225807439329089, -0.9899595616178738),\n (-0.040525509131742084, 0.26844387347263365, -2.2633625897949208),\n (0.01167393998357115, -0.5225807439329089, 1.4216698859880004),\n (0.01167393998357115, 0.8926022581407576, 1.3456557382334218),\n (0.11202785529567173, -2.2718515121487206, 0.04691079079738447),\n (-0.8954040276884763, -0.8508241498293034, 1.9356427400340799),\n (0.8880330020652463, -0.8439168226596885, 1.990234136037933),\n (-0.13167393678263156, 1.1200467154192293, 0.4039467156910099))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), - new_val, 5)\n\n # test A_atom modification\n indices, new_val = [2, 1, 0], 140\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((0.011940763595588438, -0.90654939253321, -1.1784203714214114),\n (0.011940763595588438, -0.90654939253321, 0.05065327345758153),\n (-0.02531707366035523, 0.06629439921242253, 1.2108932996837143),\n (0.011940763595588438, 1.5283906429141458, 0.05806971900412017),\n (0.03285612994605798, -1.8458593499019589, 0.6277855724118742),\n (-0.9645745795119229, 0.3758422785924207, 1.4467600455414558),\n (0.8166299978590752, 0.37902049128771864, 1.551524925579085),\n (-0.10465928281651019, 1.2266969334608921, -0.8663115945839973))}\n\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val, 5)\n\n # test changing an angle to 180 degrees\n indices, new_val = [0, 1, 2], 180\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((-0.0019281473980474666, 1.559641181574566, 1.013927346529066),\n (-0.0019281473980474772, 0.42219553322547265, 0.548267146825631),\n (-0.0019281473980474772, -0.9794771983859442, -0.025565286458873793),\n (-0.0019281473980474772, -0.9794771983859442, -1.4427883034950388),\n (-0.05748265335844597, 0.4895058178231769, 1.6474510993616909),\n (0.8896036020579495, -1.5025527655914221, 0.32977308516933435),\n (-0.8946776617656266, -1.5029898328491718, 0.32580376990898796),\n (0.16967515139725364, -0.05612933115731222, -1.7204148547915041))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val)\n\n # test changing a 180 degree angle to something else\n indices, new_val = [0, 1, 2], 120\n expected_xyz = {'symbols': ('C', 'C', 'N', 'H', 'H', 'H'), 'isotopes': (12, 12, 14, 1, 1, 1),\n 'coords': ((0.7757362507465277, 0.4478716325630875, 0.7767867108403768),\n (-0.3207007101270898, -0.18515666614565915, 0.04582870107149262),\n (-0.3207007101270898, -0.18515666614565915, -1.1144190466784232),\n (-0.3207007101270898, 0.8374974028016162, 1.8964626512298475),\n (-1.2063452316056904, -0.6964838693490394, 1.8964625790172804),\n (0.5649437124447699, -0.6964840572534022, 1.896462566459638))}\n new_xyz = converter.modify_coords(coords=xyz2, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol2)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol2), new_val, 5)\n\n # test D_atom modification\n indices, new_val = [0, 1, 2, 3], 30\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((-0.3812553590829658, 1.4249753409811934, 0.24885596109763952),\n (0.13588307254069157, 0.47112021672976, 0.8262208968300058),\n (0.13588307254069157, -0.9305525148816568, 0.25238846354550093),\n (0.13588307254069157, -0.9305525148816568, -1.1648345534906641),\n (0.08032856658029308, 0.5384305013274643, 1.9254048493660656),\n (1.0274148219966885, -1.4536280820871348, 0.6077268351737091),\n (-0.7568664418268876, -1.4540651493448844, 0.6037575199133627),\n (0.30748637133599266, -0.007204647653024865, -1.4424611047871294))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val, 5)\n\n indices, new_val = [3, 2, 1, 0], -30\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((-0.17268751280677364, -0.941696827801256, -1.1487068217042242),\n (-0.17268751280677364, -0.941696827801256, 0.08036682317476873),\n (-0.17268751280677364, 0.3328411496875977, 0.8986107061160642),\n (0.4830966870190505, 1.3983204216355287, 0.23286144075770054),\n (-0.18773471865125574, -1.8811191078717768, 0.6574991306756568),\n (-1.0994105700891015, 0.3771264916699556, 1.4764735369276594),\n (0.6806108103574798, 0.3121359507669669, 1.5812384626874982),\n (-0.2075631130119835, 1.1944491200970329, -0.8365980489813365))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1),\n 360 + new_val, 5)\n\n indices, new_val = [0, 1, 2, 3], -30\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((0.37739906428687087, 1.4249753409811934, 0.24885596109763952),\n (-0.13973936733678652, 0.47112021672976, 0.8262208968300058),\n (-0.13973936733678652, -0.9305525148816568, 0.25238846354550093),\n (-0.13973936733678652, -0.9305525148816568, -1.1648345534906641),\n (-0.195293873297185, 0.5384305013274643, 1.9254048493660656),\n (0.7517923821192105, -1.4536280820871348, 0.6077268351737091),\n (-1.0324888817043656, -1.4540651493448844, 0.6037575199133627),\n (0.0318639314585146, -0.007204647653024865, -1.4424611047871294))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1),\n 360 + new_val, 5)\n\n # test group modification types\n modification_type = 'group'\n\n # test R_group modification\n indices, new_val = [0, 1], 1.5\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((0.06385450815440741, 1.625313844153823, 0.04287025350146201),\n (-0.02582727144301671, 0.39833637029935165, 0.9010563970984908),\n (-0.02582727144301671, -1.0033363613120652, 0.327223963813986),\n (-0.02582727144301671, -1.0033363613120652, -1.089999053222179),\n (-0.0813817733100206, 0.4656466548101805, 2.0002403498467567),\n (0.8657044801882787, -1.5264119271233758, 0.6825623320367284),\n (-0.9185767836497759, -1.5268489971713646, 0.6785930235919653),\n (0.1457760273522844, -0.07998849408343323, -1.3676256045186443))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val, 5)\n\n # test A_group modification\n indices, new_val = [0, 1, 2], 160\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'), 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((0.01997925208754263, 1.625852603711386, 0.708691800251658),\n (-0.009887200766722545, 0.3981406366172051, 0.6591605436173553),\n (-0.009887200766722545, -1.0035320949942117, 0.08532811033285048),\n (-0.009887200766722545, -1.0035320949942117, -1.3318949067033146),\n (-0.06544170263372645, 0.465450921128034, 1.7583444963656214),\n (0.8816445508645728, -1.5266076608055221, 0.44066647855559316),\n (-0.9026367129734817, -1.5270447308535111, 0.4366971701108293),\n (0.16171609802857856, -0.08018422776557976, -1.6095214579997799))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val, 5)\n\n indices, new_val = [1, 2, 5], 160\n expected_xyz = {'symbols': ('O', 'C', 'C', 'S', 'O', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 32, 16, 12, 12, 1, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': ((-0.45549818019466204, 1.8548729964273216, 0.8440028131622062),\n (-0.2667929723517851, 0.6671106629415136, 1.42912314652022),\n (-0.2163066356464933, -0.45426196440936106, 0.30526758056697156),\n (1.3109140692843337, 0.4741705899686004, -0.12165329723035323),\n (-1.3557392716759613, 0.27771606050413156, -0.16203238949855803),\n (-0.2163066356464933, -1.8492005047245035, -0.34944907261899716),\n (-0.2163066356464933, -1.8492005047245035, -1.87604687202156),\n (-1.0601386155429, 0.3401156691690679, 2.122303234960202),\n (0.6302934527577109, 0.5164940342603479, 2.051815682570846),\n (1.143418340718557, 1.3271327629309078, 0.9043191341647172),\n (-1.5046641822171405, 0.8405156651772538, 0.6362234563562041),\n (-1.1248176985937233, -2.3816433802478305, -0.03815279071754074),\n (0.6330922017716909, -2.4415422695908298, 0.013011559357363423),\n (0.707681641272436, -1.4302805756837962, -2.2843133571390752),\n (-1.061876978104781, -1.2808214124615414, -2.27542464397285),\n (-0.30131566361820894, -2.876339919190297, -2.2463334380185054))}\n new_xyz = converter.modify_coords(coords=xyz3, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol3)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol3), new_val, 5)\n\n indices, new_val = [5, 2, 1], 160\n expected_xyz = {'symbols': ('O', 'C', 'C', 'S', 'O', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 32, 16, 12, 12, 1, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': ((0.2917048572251579, -1.5727341554069034, -1.3423072397835754),\n (0.2917048572251579, -1.5727341554069034, -0.0048638500194817524),\n (0.2917048572251579, -0.06886266257406626, 0.5064553318371674),\n (-1.363795569744117, -0.1202634403830567, -0.28936363114537844),\n (1.2964570556359054, 0.04149003667864859, -0.508809719558267),\n (0.4099139249017979, 1.1367441270166645, 1.4588451220109844),\n (0.29481769872300884, 2.504661621457458, 0.7909713103796479),\n (1.1685736645928884, -2.0373473546555556, 0.47685945259484286),\n (-0.5312728539867155, -2.0767912763680947, 0.5278926826114716),\n (-1.2231052441089643, -1.4156454828005882, -0.6216441060907665),\n (1.4364524039686508, -0.9213654475865127, -0.6804052856633311),\n (1.3966722481626304, 1.107137467791805, 1.9397033126698722),\n (-0.33241474313836356, 1.0625526837349102, 2.2633130452338497),\n (-0.7009351031697479, 2.671307058557274, 0.3706911401148234),\n (1.0334518240640673, 2.6225101662569066, -0.007826505507309234),\n (0.474437928409419, 3.293432289151483, 1.52916604039102))}\n new_xyz = converter.modify_coords(coords=xyz3, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol3)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol3), new_val, 4)\n\n # test D_group modification\n indices, new_val = [0, 1, 2, 3], 98.7\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((-0.751853407099498, 1.1325746654576616, 0.9630889493590222),\n (0.2705229494881336, 0.5773506493576217, 0.5667369568416694),\n (0.2705229494881336, -0.8243220822537951, -0.00709547644283548),\n (0.2705229494881336, -0.8243220822537951, -1.4243184934790005),\n (0.21496844352773511, 0.644660933955326, 1.6659209093777292),\n (1.1620546989441305, -1.347397649459273, 0.34824289518537266),\n (-0.6222265648794455, -1.3478347167170226, 0.3442735799250263),\n (0.4421262482834347, 0.09902578497483683, -1.7019450447754658))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val, 5)\n\n indices, new_val = [5, 2, 1, 0], 180\n expected_xyz = {'symbols': ('O', 'C', 'C', 'S', 'O', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 32, 16, 12, 12, 1, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': ((0.3034340517195509, -1.6113639549493641, -1.7901391417129255),\n (0.3034340517195509, -1.6113639549493641, -0.45269575194883194),\n (0.3034340517195509, -0.10749246211652697, 0.058623429907817215),\n (-1.3193844356755215, 0.6746571866866746, -0.30380395501671575),\n (1.3282593544657135, 0.581298860926198, -0.6678526090506967),\n (0.30343405171955073, -0.05040119820033895, 1.5985091447581203),\n (0.26233878444784786, 1.3540223173114139, 2.1955071424316666),\n (1.1803028491569083, -2.0759771588261957, 0.029027564277707585),\n (-0.5195436704231056, -2.115421071566818, 0.08006076790649397),\n (-1.414911803320983, 0.05150877481380545, -1.4915662613668217),\n (1.2907872270567131, 0.05736052141866721, -1.5046434284929022),\n (1.2266505257705096, -0.5178979180455376, 1.965811882691859),\n (-0.5283478351927398, -0.6406189828710822, 2.0028687871657294),\n (-0.6775241224477067, 1.8658969637383576, 1.9706253328328829),\n (1.0896028263747624, 1.9687229189733981, 1.8276430689661958),\n (0.35031987670665765, 1.2957313570336282, 3.285560142931404))}\n new_xyz = converter.modify_coords(coords=xyz3, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol3)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol3), new_val, 5)\n\n # test groups modification types\n modification_type = 'groups'\n\n # test D_groups modification\n indices, new_val = [0, 1, 2, 3], 98.7\n expected_xyz = {'symbols': ('O', 'C', 'C', 'O', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 16, 1, 1, 1, 1),\n 'coords': ((-0.7692326765134374, 1.1252152574374596, 0.9810655314575423),\n (0.25314357064244697, 0.5699912505374165, 0.5847135445433043),\n (0.25314357064244697, -0.8316815836112654, 0.010881153979294123),\n (0.25314357064244697, -0.8316815836112654, -1.4063419471715688),\n (1.2326181278103254, 1.0755945976230115, 0.6133000157238186),\n (1.1446752957640132, -1.3547571699433192, 0.3662195585064876),\n (-0.6396059141384572, -1.3551941756763426, 0.3622501790547312),\n (0.4247468609767439, 0.09166629658280878, -1.6839684605765641))}\n new_xyz = converter.modify_coords(coords=xyz1, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol1), new_val, 5)\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=xyz1, indices=[4, 1, 2, 3], mol=mol1),\n 176.7937925, 5)\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=[4, 1, 2, 3], mol=mol1),\n 279.5679938, 5)\n\n indices, new_val = [5, 2, 1, 0], 100\n expected_xyz = {'symbols': ('O', 'C', 'C', 'S', 'O', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 32, 16, 12, 12, 1, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': ((0.17617288317697363, -1.4263876505749937, -1.3907356765118228),\n (0.17617288317697363, -1.4263876505749937, -0.05329233131383648),\n (0.17617288317697363, 0.07748361087633482, 0.4580268316508156),\n (0.8541264407563205, 1.1799297944814306, -0.8464435250524343),\n (1.0315484892431994, 0.12891222316318918, 1.606136465715537),\n (-1.2415001838455297, 0.5175023395992786, 0.8716616732793354),\n (-2.371148423802697, -0.377635430276555, 0.3685473045279144),\n (1.0530416597996317, -1.8910009834245878, 0.42843102214143425),\n (-0.646804798256715, -1.930444842122042, 0.47946418053365614),\n (1.322524386187, 0.1392850561843193, -1.55769653865906),\n (1.5807657244329665, 0.9071634481807671, 1.3438012611373469),\n (-1.4308626545937098, 1.5181627982792263, 0.46103575662853813),\n (-1.3101730016766409, 0.6090291604729325, 1.9628224613881304),\n (-2.328405219901557, -1.376683205512397, 0.811273322532136),\n (-2.345556604764221, -0.47877786163003033, -0.7207928024513892),\n (-3.3382397150969996, 0.059047399283163715, 0.6394658008190603))}\n new_xyz = converter.modify_coords(coords=xyz3, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol3)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol3), new_val, 5)\n\n indices, new_val = [4, 3, 1, 0], 236.02\n expected_xyz = {'symbols': ('C', 'C', 'O', 'C', 'C', 'O', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (12, 12, 16, 12, 12, 16, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': ((-0.3420713780282814, -0.726846939196746, -1.8608060734620697),\n (-0.3420713780282814, -0.726846939196746, -0.33809952744080163),\n (-1.5199121786498575, -1.3903247017047589, 0.12046140490433599),\n (-0.3420713780282814, 0.692986716189357, 0.21142750813209843),\n (0.8346249371329908, 0.870417947793265, 1.130523629422891),\n (1.8415843350511496, 1.49899165752528, 0.8160475329621943),\n (-1.232802341934429, -0.22348356564525385, -2.2527724067647172),\n (0.5474409007790566, -0.2291658204558631, -2.2587884226234842),\n (-0.36650899336409903, -1.7525658745827613, -2.2443893713107435),\n (0.5235538883628821, -1.286773819894118, 0.03414982827280788),\n (-1.525486055520759, -2.2842579938670644, -0.2668197974505191),\n (-1.246930807816442, 0.9000033565709169, 0.7927934676101465),\n (-0.26242043164905693, 1.4290013064896112, -0.5956842516835208),\n (0.739203033547077, 0.4163114365921572, 2.132044487804084))}\n new_xyz = converter.modify_coords(coords=xyz4, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol4)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol4), new_val, 5)\n\n # test 1-indexed input\n indices = [5, 4, 2, 1]\n new_xyz = converter.modify_coords(coords=xyz4, indices=indices, new_value=new_val,\n modification_type=modification_type, mol=mol4, index=1)\n self.assertTrue(almost_equal_coords_lists(new_xyz, expected_xyz))\n self.assertAlmostEqual(converter.get_zmat_param_value(coords=new_xyz, indices=indices, mol=mol4, index=1),\n new_val, 5)\n\n # test TSs\n indices = [19, 10, 4, 2]\n fragments = [[46, 47, 48, 49, 50, 51, 52], [f + 1 for f in range(45)]]\n self.assertAlmostEqual(calculate_dihedral_angle(coords=xyz5, torsion=indices, index=1), 56.83358841, 3)\n new_xyz = converter.modify_coords(coords=xyz5,\n indices=indices,\n new_value=300,\n modification_type='groups',\n mol=mol5,\n index=1,\n fragments=fragments,\n )\n self.assertAlmostEqual(calculate_dihedral_angle(coords=new_xyz, torsion=indices, index=1), 300, places=3)\n\n indices = [1, 2, 3, 5]\n fragments = [[f + 1 for f in range(23)], [24, 25, 26, 27, 28]]\n self.assertAlmostEqual(calculate_dihedral_angle(coords=xyz6, torsion=indices, index=1), 62.30597206, 3)\n new_xyz = converter.modify_coords(coords=xyz6,\n indices=indices,\n new_value=200,\n modification_type='groups',\n mol=mol6,\n index=1,\n fragments=fragments,\n )\n self.assertAlmostEqual(calculate_dihedral_angle(coords=new_xyz, torsion=indices, index=1), 200, places=3)\n \n coords={'coords': ((-0.7862825353221515, -0.28824023055636216, 0.4782944637692894),\n (0.21968869054702736, 0.40094256193652866, -0.2919820499085219),\n (-0.07796443595084417, 0.5692847962524797, -1.6621913220858304),\n (-1.102200211589376, -1.1132157833188596, -0.01879031191901484),\n (-1.5973749070505925, 0.29546848172306867, 0.6474145668621136),\n (0.4237940503863438, 1.3660724867336205, 0.19101403432872205),\n (1.1352054736534014, -0.1980893380251006, -0.2652264470061931),\n (-0.7497944593402266, 1.258221857416732, -1.7507029654486272)),\n 'isotopes': (14, 12, 16, 1, 1, 1, 1, 1),\n 'symbols': ('N', 'C', 'O', 'H', 'H', 'H', 'H', 'H')}\n indices=[3, 0, 1, 2]\n new_value=53.76\n modification_type=\"groups\"\n mol=Molecule(smiles=\"NCO\")\n new_xyz = converter.modify_coords(coords=coords,\n indices=indices,\n new_value=new_value,\n modification_type=modification_type,\n mol=mol)\n self.assertTrue(type(new_xyz[\"coords\"][0][0] is float))", "def distance_to_objects(orbit_tree: Dict[str, str], satellite_name) -> Dict[str, int]:\n object_distances: Dict[str, int] = {}\n satellite = orbit_tree[satellite_name]\n distance = 0\n while satellite != 'COM':\n # Get distance\n object_distances[satellite] = distance # Start with distance to object directly orbited, =0 for out calc.\n distance += 1\n # Next satellite name\n satellite = orbit_tree[satellite]\n return object_distances", "def _get_identifiers_from_kbs(self) -> dict:\n id_mapping_dict = defaultdict(set)\n\n for kb in self.kbs:\n sys.stdout.write('\\n%s \\n' % kb.name)\n for p in tqdm.tqdm(kb.pathways, total=len(kb.pathways)):\n for ent in p.entities:\n id_set = list(set(ent.xrefs))\n if len(id_set) == 1:\n id_mapping_dict[id_set.pop()] = set([])\n for p, q in itertools.combinations(id_set, 2):\n id_mapping_dict[p].add(q)\n id_mapping_dict[q].add(p)\n\n return id_mapping_dict", "def DOM_positions(omkeys, omgeo):\n x_list = []\n y_list = []\n z_list = []\n r_list = []\n for omkey in omkeys:\n x_list.append(omgeo[omkey].position.x)\n y_list.append(omgeo[omkey].position.y)\n z_list.append(omgeo[omkey].position.z)\n r_list.append(np.sqrt(omgeo[omkey].position.x**2 + omgeo[omkey].position.y**2))\n returndict = {\"x\":x_list,\"y\":y_list,\"z\":z_list, \"r\":r_list}\n return returndict", "def signal_dictionary(music_filename):\n\tx = []\n\ty = []\n\tassign_points = {}\n\n\tsignal = input_waves.WAVtoSignal(music_filename)\n\tfor i in range(len(signal)):\n\t\tx = signal[i][0]\n\t\ty = signal[i][1]\n\t\tassign_points.update({x:y})\n\tprint 'assign dictionary complete'\n\treturn assign_points", "def key_dict_values (self):\r\n\r\n if self.using_database:\r\n value_tuple = (notebookname,)\r\n db_cursor.execute(\"SELECT note_index \"\r\n +\"FROM keys_to_indexes\"\r\n +\" WHERE notebook=?;\",\r\n value_tuple)\r\n\r\n fetched = db_cursor.fetchall()\r\n if fetched:\r\n return {index[0].strip() for index in fetched}\r\n return set()\r\n\r\n return self.key_dict.values()", "def read_xyz(filename):\n #print('Reading geom from:'),filename\n atoms = []\n coordinates = []\n\t\n xyz = open(filename)\n n_atoms = int(xyz.readline())\n title = xyz.readline()\n for line in xyz:\n\tif len(line.strip()) == 0:\n\t\tpass\n\t\tbreak\t\n\tatom,x,y,z = line.split()\n\tatoms.append(atom)\n\tcoordinates.append([float(x), float(y), float(z)])\n xyz.close()\n coordinates = [[w * angtobh for w in ww] for ww in coordinates] #ang to bh\n\n if n_atoms != len(coordinates):\n \tprint('Number of atoms in xyz file doesnt equal to the number of lines.')\n\tsys.exit(1)\n \n return atoms, coordinates", "def x_coords(self):\n x_coords = np.linspace(0, self.fft_length / self.samplate, self.fft_length + 1)\n return x_coords", "def update_chunk(self):\n for key, value in self.piece_coordinates.items():\n # Why is the key a numpy.int type ???\n self.chunk[value] = key", "def getAtomInformation(self, resIDs=None, atomIDs=None):\n if atomIDs is None:\n atomIDs = set ()\n else:\n atomIDs = set (atomIDs)\n\n if resIDs is not None:\n for i in resIDs:\n atomIDs.update (self._residueInfo[i]['atomID'])\n return self.getAtomInformation (atomIDs=atomIDs)\n\n atomIDs = list (atomIDs)\n atomIDs.sort ()\n return dict ((atomID, self._atomInfo[atomID]) for atomID in atomIDs)", "def get_coordinates_xyz(filename):\n\n f = open(filename, 'r')\n V = list()\n atoms = list()\n n_atoms = 0\n\n # Read the first line to obtain the number of atoms to read\n try:\n n_atoms = int(f.readline())\n except ValueError:\n print(\"Could not obtain the number of atoms in the .xyz file. \"+filename)\n return None\n\n # Skip the title line\n f.readline()\n\n # Use the number of atoms to not read beyond the end of a file\n for lines_read, line in enumerate(f):\n\n if lines_read == n_atoms:\n break\n\n atom = re.findall(r'[a-zA-Z]+', line)[0]\n # atom = atom.upper()\n\n numbers = re.findall(r'[-]?\\d+\\.\\d*(?:[Ee][-\\+]\\d+)?', line)\n numbers = [float(number) for number in numbers]\n\n # The numbers are not valid unless we obtain exacly three\n if len(numbers) == 3:\n V.append(np.array(numbers))\n atoms.append(atom)\n else:\n exit(\"Reading the .xyz file failed in line {0}. Please check the format.\".format(lines_read + 2))\n\n f.close()\n atoms = np.array(atoms)\n V = np.array(V)\n return atoms, V", "def Keys(self) -> _n_1_t_4:", "def store_wn_lookup():\n syns = list( wn.all_synsets() )\n #syn_str = map(lambda s: str(s).replace(\"Synset\",'').strip('()').strip(\"'\"), syns)\n syn_str = map(lambda s: str(s).replace(\"Synset\",'').strip('()').strip(\"'\").strip('\"'), syns)\n #offsets_list = [(\"n%08d\" % s.offset, s) for s in syns]\n olist = map(lambda a, b: (\"n%08d\" % a.offset, b), syns, syn_str)\n offset_dict = dict(olist)\n pickle.dump(offset_dict, open('/Users/xlx/Documents/proj/imgnet-flickr/db3/wn_offset_dict.pickle', 'wb'))", "def determine_coordinates_and_cell_names(self):\n self.coordinates_and_cell_headers = [\n annot[0]\n for annot in self.file.columns\n if annot[0].lower() in (\"z\", \"y\", \"x\", \"name\")\n ]\n # annotation column names\n self.annot_column_headers = [\n annot\n for annot in self.file.columns\n if annot[0].lower() not in (\"z\", \"y\", \"x\", \"name\")\n ]", "def generate_average_coord_numbers(self):\n coord_numbers = {}\n for typea in self.atomtypes:\n coord_numbers[znum2sym.z2sym(typea)] = 0\n for typeb in self.atomtypes:\n coord_numbers[znum2sym.z2sym(typea)+'-'+znum2sym.z2sym(typeb)] = 0\n for atom in self.atoms:\n for n in atom.neighs:\n coord_numbers[znum2sym.z2sym(atom.z)] += 1\n coord_numbers[znum2sym.z2sym(atom.z)+'-'+znum2sym.z2sym(n.z)] += 1\n for key in coord_numbers:\n elem = znum2sym.sym2z(key.split('-')[0])\n coord_numbers[key] /= float(self.atomtypes[elem])\n return coord_numbers", "def keys(self):\n\t\treturn iter(Point(x, y) for y, x in itertools.product(range(self.dims.height), range(self.dims.width)))" ]
[ "0.6195289", "0.5910004", "0.5743342", "0.57411754", "0.5721014", "0.5695221", "0.5674487", "0.5631202", "0.55479956", "0.5417099", "0.5406426", "0.5400523", "0.53862077", "0.535987", "0.53320104", "0.52723056", "0.5238446", "0.5231852", "0.522691", "0.520619", "0.52059865", "0.519242", "0.5186785", "0.51775813", "0.5169222", "0.5135861", "0.5124056", "0.5124056", "0.5123731", "0.512372", "0.5116894", "0.5106428", "0.5104267", "0.5093122", "0.5092381", "0.5088447", "0.5070969", "0.5040461", "0.5019934", "0.500447", "0.4988217", "0.4986199", "0.498572", "0.4985612", "0.49748775", "0.49672103", "0.4955438", "0.4954305", "0.49507338", "0.4949641", "0.4949576", "0.49449143", "0.49357602", "0.4923265", "0.49229288", "0.49184832", "0.49079338", "0.4893329", "0.48862967", "0.48852232", "0.4883961", "0.48749655", "0.4871239", "0.48651475", "0.48620626", "0.48597425", "0.48561287", "0.48549518", "0.48427996", "0.4840936", "0.4840936", "0.48324177", "0.48309943", "0.4826933", "0.48264834", "0.48231778", "0.48231778", "0.48194218", "0.48151883", "0.48129386", "0.48102975", "0.48054162", "0.48041508", "0.47958013", "0.47912848", "0.47906885", "0.4773738", "0.47694707", "0.47689733", "0.47689113", "0.47687212", "0.47684494", "0.47662792", "0.4766221", "0.4764436", "0.4762823", "0.4759905", "0.475224", "0.47517315", "0.47511485", "0.47484148" ]
0.0
-1
Returns the angle between two vectors.
def get_angle(v1, v2): return np.arccos(np.dot(v1, v2))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def angle_between_vectors(a, b):\n return math.acos(dot_product(a, b) / (length(a) * length(b)))", "def angle_between_vectors(vec1, vec2):\n vec = vec1 - vec2\n vec = vec.perpendicular()\n return vec.angle", "def angle(v1: Vector, v2: Vector) -> float:\n return math.degrees(math.acos((v1 * v2) / (v1.length() * v2.length())))", "def angleBetweenVectors(v1, v2):\n v2Size = vectorLength(v2)\n if not v2Size:\n theta = 0.0\n else:\n theta = math.acos(dotProduct(v1, v2) / v2Size)\n return theta", "def angle(vec1, vec2):\n\n return math.acos(dotproduct(vec1, vec2) / (length(vec1) * length(vec2)))", "def angle(self, v1, v2):\r\n cosang = np.dot(v1, v2)\r\n sinang = np.linalg.norm(np.cross(v1, v2))\r\n return np.arctan2(sinang, cosang)", "def angle_between(v1, v2):\n v1_u = unit_vector(v1)\n v2_u = unit_vector(v2)\n\n #takes out if vectors are 1 or -1 (basically if they're the same direction)\n angle = math.degrees(np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0)))\n return angle", "def angle_between(v1: Vec2, v2: Vec2):\n v = dir_vector(v1, v2)\n a = atan2(v.y, v.x)\n if a < 0:\n a = 2 * pi + a\n return a", "def angle(v1, v2):\n return acos(np.clip(v1.dot(v2) / (length(v1) * length(v2)), -1.0, 1.0))", "def compute_angle(v1, v2):\n cosang = np.dot(v1, v2)\n sinang = la.norm(np.cross(v1, v2))\n angle = np.arctan2(sinang, cosang)\n return angle", "def cal_angle_between_two_vectors(vec_1, vec_2):\n unit_vec_1 = vec_1 / np.linalg.norm(vec_1)\n unit_vec_2 = vec_2 / np.linalg.norm(vec_2)\n dot_product = np.dot(unit_vec_1, unit_vec_2)\n \n return np.arccos(dot_product) / np.pi * 180", "def angle_between_vectors(vector1,vector2):\n value = np.sum(np.multiply(vector1, vector2)) / (np.linalg.norm(vector1) * np.linalg.norm(vector2))\n if (value<-1) | (value>1):\n value = np.sign(value)\n angle = np.arccos(value)\n return angle", "def angle_between(v1, v2):\n v1_u = unit_vector(v1)\n v2_u = unit_vector(v2)\n return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))", "def angle(a,b):\n return acos(np.dot(a,b)/np.linalg.norm(a)/np.linalg.norm(b))", "def angle_between(v1, v2):\n return np.arccos(np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2)))", "def AngleBetween(a, b):\n r = a.Length() * b.Length()\n if r < 1.0e-8:\n return BadVectorError()\n dot = (a.x*b.x + a.y*b.y + a.z*b.z) / r\n if dot <= -1.0:\n return 180.0\n if dot >= +1.0:\n return 0.0\n return math.degrees(math.acos(dot))", "def vector_angle(v1, v2):\n cos_theta = np.dot(v1, v2) / np.linalg.norm(v1) / np.linalg.norm(v2)\n # Clip ensures that cos_theta is within -1 to 1 by rounding say -1.000001 to -1 to fix numerical issues\n angle = np.arccos(np.clip(cos_theta, -1, 1))\n\n return angle", "def angleBetween(v1, v2):\n v1_u = unitVector(v1)\n v2_u = unitVector(v2)\n return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))", "def angle_vecs(vec1,vec2):\n angle=np.arccos(np.dot(vec1,vec2)/(np.linalg.norm(vec1)*np.linalg.norm(vec2)))\n return angle", "def vector_angle_finder(vect_1, vect_2):\n theta = np.arccos(np.dot(vect_1, vect_2) / (magnitude_vect(vect_1) * magnitude_vect(vect_2)))\n angle = theta * 180 / math.pi\n return angle", "def get_angle_between_vectors(self, A, B):\n\t\tdot_prod = A[0]*B[0] + A[1]*B[1]\n\t\tlen_A = math.sqrt(A[0]**2 + A[1]**2)\n\t\tlen_B = math.sqrt(B[0]**2 + B[1]**2)\n\n\t\treturn math.acos(dot_prod / (len_A + len_B))", "def angle(first, other=FreeCAD.Vector(1,0,0)):\n if isinstance(first,FreeCAD.Vector) and isinstance(other,FreeCAD.Vector):\n return math.acos(dotproduct(normalized(first),normalized(other)))", "def angle_between_vectors(x, y):\n first_step = abs(x[0] * y[0] + x[1] * y[1] + x[2] * y[2]) / (\n np.sqrt(x[0]**2 + x[1]**2 + x[2]**2) *\n np.sqrt(y[0]**2 + y[1]**2 + y[2]**2))\n second_step = np.arccos(first_step)\n return (second_step)", "def angle_btw(v1, v2):\n cos_ang = np.dot(v1, v2)\n sin_ang = np.linalg.norm(np.cross(v1, v2))\n return np.arctan2(sin_ang, cos_ang) * 180 / math.pi", "def angle(vec1, vec2):\n assert vec1.shape == vec2.shape\n \n cos_vec = np.inner(vec1, vec2)/(np.linalg.norm(vec1)*np.linalg.norm(vec2))\n angle = math.acos(cos_vec)\n in_deg = math.degrees(angle)\n if in_deg >= 90:\n return (180-in_deg)\n return in_deg", "def angle_between_vectors(u, v):\r\n mag_u = math.sqrt(u[0]**2 + u[1]**2 + u[2]**2)\r\n mag_v = math.sqrt(v[0]**2 + v[1]**2 + v[2]**2)\r\n dot_prod = u[0] * v[0] + u[1] * v[1] + u[2] * v[2]\r\n return math.acos(dot_prod/(mag_u*mag_v))", "def angle_between(a, b):\n from math import acos\n return acos( dot_product(a, b) / (magnitude(a) * magnitude(b)) )", "def vec_angle_rad(v1,v2):\r\n \r\n c = np.dot(v1,v2)/(vector_len(v2)* vector_len(v2))\r\n return math.acos(c)", "def vec_angle_deg(v1,v2):\r\n \r\n return math.degrees(vec_angle_rad(v1,v2))", "def angle(p1, p2):\n return dot(p1, p2)", "def angle_between_vectors_degrees(u, v):\n return np.degrees(\n math.acos(np.dot(u, v) / (np.linalg.norm(u) * np.linalg.norm(v))))", "def angle_between(x1: float, y1: float, x2: float, y2: float) -> float:\n dx = x2 - x1\n dy = y2 - y1\n\n # We return negative because pyglet and math treat rotation differently\n return -math.atan2(dy, dx)", "def angle_between_vectors(self, u, v):\n vec1_unit = self.get_unit_vector(u)\n vec2_unit = self.get_unit_vector(v)\n return np.arccos(np.clip(np.dot(vec1_unit, vec2_unit), -1.0, 1.0)) * (180/math.pi)", "def angle(v1, v2, acute=True):\n angle = np.arccos(np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2)))\n if acute == True:\n return angle\n else:\n return 2 * np.pi - angle", "def calcul_angle_vector(vec1, vec2):\n \n try:\n div=(vec1[0]*vec2[0]+vec1[1]*vec2[1]+vec1[2]*vec2[2])/(distance(vec1,[0,0,0])*distance(vec2,[0,0,0]))\n if div>1:\n div=1\n if div<-1:\n div=-1\n #KC#CG# tranlation to degrees\n angle=180/math.pi*math.acos(div)\n except:\n print vec1\n print vec2\n print (vec1[0]*vec2[0]+vec1[1]*vec2[1]+vec1[2]*vec2[2])/(distance(vec1,[0,0,0])*distance(vec2,[0,0,0]))\n return angle", "def angle(pt_a, pt_b):\n x1, y1 = pt_a\n x2, y2 = pt_b\n return atan2(y2-y1, x2-x1)", "def angle(self, vector):\n\n return (math.degrees(math.acos((self.dot(vector) / (self.magnitude() *\n vector.magnitude())))))", "def angle_between_vectors(vect_ref, vect):\n\n c = np.dot(vect_ref.T, vect) / (np.linalg.norm(vect_ref) * np.linalg.norm(vect))\n angle = np.arccos(np.clip(c, -1, 1))\n\n return angle", "def angle(*args):\n if len(args) < 1:\n return 0.0\n elif len(args) == 1:\n return np.arctan2(args[0][1], args[0][0])\n else:\n v1 = args[0].flatten()\n v2 = args[1].flatten()\n return np.arccos(np.dot(v1, v2) / (norm(v1) * norm(v2)))", "def get_angle(p1, p2):\n return math.atan2(p2[1] - p1[1], p2[0] - p1[0])", "def calculate_vector_angle(vector_1, vector_2):\n dot = dot_product(vector_1, vector_2)\n cos_angle = float(dot / (two_norm(vector_1) * two_norm(vector_2)))\n # Buffer for floating point errors\n if 1.2 > cos_angle > 1:\n cos_angle = 1\n elif -1.2 < cos_angle < -1:\n cos_angle = -1\n elif -1.2 > cos_angle or 1.2 < cos_angle:\n raise KeypointError(\"Ratio for angle is outside of the domain.\")\n if cos_angle > 0:\n multiplier = 1\n else:\n multiplier = -1\n angle_of_interest = (180 - math.degrees(math.acos(cos_angle))) * multiplier\n return angle_of_interest", "def get_vec_angle(vec1: List, vec2: List) -> Union[float, None]:\n if np.linalg.norm(np.array(vec1)) == 0 or np.linalg.norm(np.array(vec2)) == 0:\n warnings.warn(\"Do not input 0 vector\")\n return\n\n diff_degree = np.dot(np.array(vec1), np.array(vec2))\n diff_degree /= np.linalg.norm(np.array(vec1))\n diff_degree /= np.linalg.norm(np.array(vec2))\n diff_degree = np.clip(diff_degree, -1, 1)\n diff_degree = np.arccos(diff_degree) * 180 / np.pi\n return diff_degree", "def vector_angle(v):\n assert len(v) == 2\n x, y = v\n return np.arctan2(y, x)", "def angle(a: Point, b: Point) -> int:\n ang = math.degrees(math.atan2(b.y - a.y, b.x - a.x)) + 90\n return ang + 360 if ang < 0 else ang", "def angle_between_vectors_degrees(u, v):\n a = np.dot(u, v)\n b = np.linalg.norm(u)\n c = np.linalg.norm(v)\n d = a / (b* c)\n if d > 1:\n d = 1\n if d < -1:\n d = -1\n e = acos(d)\n f = np.degrees(e)\n return f", "def angle(p1, p2):\n x_dist = p2[0] - p1[0]\n y_dist = p2[1] - p1[1]\n return math.atan2(-y_dist, x_dist) % (2 * math.pi)", "def angle_between(v1, v2):\n v = np.array(v1)\n w = np.array(v2)\n\n norm_v = norm(v)\n norm_w = norm(w)\n\n cos_angle = np.around(np.dot(v, w) / norm_v / norm_w, PRECISION)\n\n if not -1 <= cos_angle <= 1:\n return None\n else:\n return np.around(np.arccos(cos_angle) * 360 / 2 / np.pi, PRECISION)", "def getAngle(v1,v2,prec=1E-6):\n \n return(math.acos((np.dot(v1,v2))/np.linalg.norm(v1)/np.linalg.norm(v2)))", "def angle(point1, point2):\n return atan2(point2.y() - point1.y(), point2.x() - point1.x())", "def get_vector(a, b):\n dx = float(b[0] - a[0])\n dy = float(b[1] - a[1])\n\n distance = math.sqrt(dx ** 2 + dy ** 2)\n\n if dy > 0:\n angle = math.degrees(math.atan(-dx / dy))\n elif dy == 0:\n if dx < 0:\n angle = 90.0\n elif dx > 0:\n angle = -90.0\n else:\n angle = 0.0\n else:\n if dx < 0:\n angle = 180 - math.degrees(math.atan(dx / dy))\n elif dx > 0:\n angle = -180 - math.degrees(math.atan(dx / dy))\n else:\n angle = 180.0\n\n return distance, angle", "def _angle(*vectors):\n if len(vectors) == 1:\n return DubinsUAV2D._sawtooth(np.arctan2(vectors[0][1], vectors[0][0]))\n elif len(vectors) == 2:\n return DubinsUAV2D._sawtooth(np.arctan2(vectors[1][1], vectors[1][0]) - np.arctan2(vectors[0][1], vectors[0][0]))\n else:\n raise AttributeError()", "def angle(o1,o2):\n\n o1 = np.array(o1)\n o2 = np.array(o2)\n\n o1a = o1[0:3]\n o1b = o1[3:6]\n \n o2a = o2[0:3]\n o2b = o2[3:6]\n\n norm_a = np.linalg.norm(o1a) * np.linalg.norm(o2a)\n norm_b = np.linalg.norm(o1b) * np.linalg.norm(o2b)\n\n dot_a = np.dot(o1a,o2a) / norm_a\n dot_b = np.dot(o1b,o2b) / norm_b\n \n if dot_a > 1.0 and dot_a - 1.0 <= np.finfo(dot_a.dtype).eps:\n dot_a = 1.0\n \n if dot_b > 1.0 and dot_b - 1.0 <= np.finfo(dot_b.dtype).eps:\n dot_b = 1.0\n\n angle_a = np.arccos(dot_a) * (180.0 / np.pi)\n angle_b = np.arccos(dot_b) * (180.0 / np.pi)\n\n return (angle_a, angle_b)", "def angle_between(v2, v1):\n v1_u = unit_vector(v1)\n v2_u = unit_vector(v2)\n result = np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))\n if np.isnan(result):\n if abs(v1_u + v2_u) < .5 * (abs(v1_u) + abs(v2_u)):\n return np.pi\n else:\n return 0.0\n if Left( [v2[1],v2[3]], [0,0], [v1[1],v1[3]] ):\n return 2*np.pi - result\n return result", "def angle(self, other):\n return acosd(self.normalized().dot(other.normalized()))", "def _get_angle(point1, point2):\n ydelta = point2[0] - point1[0]\n xdelta = point2[1] - point1[1]\n if xdelta == 0:\n hypot = np.sqrt(xdelta ** 2 + ydelta ** 2)\n theta = np.arcsin(ydelta / hypot)\n elif ydelta == 0:\n hypot = np.sqrt(xdelta ** 2 + ydelta ** 2)\n theta = np.arccos(xdelta / hypot)\n else:\n theta = np.arctan(ydelta / xdelta)\n return theta", "def angle(firstPoint, secondPoint):\n\txDiff = secondPoint.x - firstPoint.x\n\tyDiff = secondPoint.y - firstPoint.y\n\treturn math.degrees(math.atan2(yDiff, xDiff))", "def getangle(p1, p2):\n\treturn atan2( p2[1]-p1[1], p2[0]-p1[0] )", "def angle(self, other):\n return acosd(np.clip(self.uv().dot(other.uv()), -1, 1))", "def angle_between(vec1, vec2, radian=True):\n cos = np.dot(vec1, vec2) / np.linalg.norm(vec1) / np.linalg.norm(vec2)\n angle = np.arccos(np.clip(cos, -1, 1))\n if not radian:\n angle = angle / np.pi * 180\n return angle", "def angle(self, vec2):\n if type(vec2) != Vector:\n raise TypeError(\"Not a vector\")\n\n from math import acos\n return acos(self.dot(vec2) / (self.magnitude() * vec2.magnitude()))", "def __get_angle(self, names, vecA, vecB):\n pivot = max(names, key=names.count)\n\n if names[0] != pivot: # Atoms needs to be order to pick vectors correctly\n vecA = vecA * -1\n\n if names[2] != pivot:\n vecB = vecB * -1\n\n radians = vecA.AngleTo(vecB)\n angle = 180 / math.pi * radians\n\n return angle", "def get_angle(v1,v2) :\n\n if (np.linalg.norm(v1)*np.linalg.norm(v2)) != 0 : \n cosangle = np.dot(v1,v2)/(np.linalg.norm(v1)*np.linalg.norm(v2))\n cosangle = np.maximum(-1,np.minimum(1, cosangle))\n angle = np.arccos(cosangle) \n if np.cross(v1,v2) < 0 :\n angle = 2*np.pi - angle \n return angle\n return None", "def get_angle_between(self, other):\n cross = self.x*other[1] - self.y*other[0]\n dot = self.x*other[0] + self.y*other[1]\n return math.atan2(cross, dot)", "def get_exact_angle(pt1, pt2):\n dx, dy = pt2[0]-pt1[0], pt2[1]-pt1[1]\n return math.atan2(dy,dx)", "def compute_angle_v2v(v1, v2, v3=None):\n\n alpha = math.acos(dot_product(v1, v2) / (vlength(v1)*vlength(v2)))\n if v3 is not None:\n cross = cross_product(v2, v1)\n if dot_product(cross,v3) > 0.0:\n return 2*math.pi-alpha\n\n return alpha", "def angle(p1, p2):\n dx = p2[0] - p1[0]\n dy = p2[1] - p1[1]\n if dx == 0:\n if dy == 0:\n return 0\n return 90\n alpha = math.atan(dy / dx) * 180 / math.pi\n if alpha < 0:\n alpha = 180 - alpha\n return alpha", "def get_angle(vert1, vert2):\n x_axis = np.array([1, 0])\n input_axis = vert2 - vert1\n input_axis = input_axis / np.linalg.norm(input_axis)\n return math.degrees(np.arccos(np.dot(x_axis, input_axis)))", "def calculate_angle(asteroid_1: Asteroid, asteroid_2: Asteroid) -> float:\n dy = asteroid_2.y - asteroid_1.y\n dx = asteroid_2.x - asteroid_1.x\n return math.atan2(dy, dx) * 180.0 / math.pi", "def angle(v1,v2, deg = False):\n # v1.v2 = ||v1||||v2|| cos(angle) => angle = arcos(v1.v2/||v1||||v2||)\n # see more: http://www.wikihow.com/Find-the-Angle-Between-Two-Vectors\n # tested with http://codereview.stackexchange.com/a/54413\n if deg: return np.rad2deg(np.arccos(old_div(np.dot(v1,v2),(anorm(v1)*anorm(v2))))) # *180.0/np.pi\n return np.arccos(old_div(np.dot(v1,v2),(anorm(v1)*anorm(v2))))", "def py_ang(v1, v2):\n cosang = np.dot(v1, v2)\n sinang = la.norm(np.cross(v1, v2))\n return np.arctan2(sinang, cosang)", "def angle( nt1, nt2, nt3 ):\n if vector(nt1, nt2) == [0,0]:\n print(\"nt1\", nt1.seqpos, \" at \", nt1.x, nt1.y, \" is at the same position as nt2\", nt2.seqpos)\n if vector(nt2, nt3) == [0,0]:\n print(\"nt2\", nt2.seqpos, \" at \", nt2.x, nt2.y, \" is at the same position as nt3\", nt3.seqpos)\n #print(vector(nt1, nt2), vector(nt2, nt3))\n if vectors_close(vector(nt1, nt2), vector(nt2, nt3)):\n # These vectors are identical and that is messing with the ability to call two things parallel?\n return 180.0\n return 180.0 - math.degrees(math.acos(dot(vector(nt1, nt2), vector(nt2, nt3)) / (mod(vector(nt1, nt2)) * mod(vector(nt2, nt3)))))", "def vertical_angle(A, B):\n if A is None or B is None:\n return None\n return degrees(atan2(B[1] - A[1], B[0] - A[0]) - pi / 2)", "def angle_to( self, vector3 ):\n # make sure neither vector is zero-length\n sm = self.magnitude\n vm = vector3.magnitude\n if abs(sm) < self.EPSILON or abs(vm) < self.EPSILON:\n raise ZeroDivisionError(\n \"can't calculate angle between zero-length vectors!\" )\n \n # calculation will fail if vectors have same heading\n # catch error and return zero\n try:\n return math.degrees( math.acos(self.dot(vector3) / (sm * vm)) )\n except ValueError:\n # test whether direction is same or opposite\n if Vector3( self ).add( vector3 ).magnitude < sm:\n return 180.0\n return 0.0", "def test_angle_between_close_vectors():\n a = np.array([0.9689124217106448, 0.24740395925452294, 0.0, 0.0])\n b = np.array([0.9689124217106448, 0.247403959254523, 0.0, 0.0])\n angle = pr.angle_between_vectors(a, b)\n assert_almost_equal(angle, 0.0)", "def py_ang(self,v1, v2):\n cosang = np.dot(v1, v2)\n sinang = la.norm(np.cross(v1, v2))\n return np.arctan2(sinang, cosang)", "def angle(self):\n return atan2(self.v.y, self.v.x)", "def angle_between(i1, j1, i2, j2):\n\n dot_product = i1 * i2 + j1 * j2\n magnitude1 = np.sqrt(i1 ** 2 + j1 ** 2)\n magnitude2 = np.sqrt(i2 ** 2 + j2 ** 2)\n\n theta = np.arccos(dot_product / (magnitude1 * magnitude2))\n\n return np.rad2deg(theta).round(3)", "def _add_vectors(v1, v2):\n x = math.cos(v1[1]) * v1[0] + math.cos(v2[1]) * v2[0]\n y = math.sin(v1[1]) * v1[0] + math.sin(v2[1]) * v2[0]\n\n angle = 0.5 * math.pi - math.atan2(y, x)\n length = math.hypot(x, y)\n return (length, angle)", "def angle(self):\n self._normalise()\n norm = np.linalg.norm(self.vector)\n return self._wrap_angle(2.0 * atan2(norm,self.scalar))", "def angle_2D(v):\n len_v=(v[0]**2+v[1]**2)**(0.5)\n if len_v==0:\n return 0\n ret = math.acos(v[0]/len_v)\n if v[1]<0:\n ret=6.283185307179586-ret\n return ret", "def angle(p0, p1, prv_ang=0):\r\n ang = math.atan2(p0[1] - p1[1], p0[0] - p1[0])\r\n a0 = (ang - prv_ang)\r\n a0 = a0 % (PI * 2) - PI\r\n return a0", "def vector_cosine_angle(vec_1:tuple, vec_2:tuple)->float:\n if is_zero_vector(vec_1) or is_zero_vector(vec_2):\n return None\n return dot_product(vec_1, vec_2) / (magnitude(vec_1) * magnitude(vec_2))", "def get_angle(p0, p1=np.array([0, 0]), p2=None):\n if p2 is None:\n p2 = p1 + np.array([1, 0])\n v0 = np.array(p0) - np.array(p1) \n v1 = np.array(p2) - np.array(p1)\n\n angle = np.math.atan2(np.linalg.det([v0,v1]),np.dot(v0,v1))\n return np.degrees(angle)", "def angle_between(vecs, baseline):\n vecs = CA_coords(vecs)\n baseline = CA_coords(baseline)\n return np.arccos(np.clip(vecs @ baseline.T, -1.0, 1.0))", "def test_angle_between_vectors():\n v = np.array([1, 0, 0])\n a = np.array([0, 1, 0, np.pi / 2])\n R = pr.matrix_from_axis_angle(a)\n vR = np.dot(R, v)\n assert_almost_equal(pr.angle_between_vectors(vR, v), a[-1])\n v = np.array([0, 1, 0])\n a = np.array([1, 0, 0, np.pi / 2])\n R = pr.matrix_from_axis_angle(a)\n vR = np.dot(R, v)\n assert_almost_equal(pr.angle_between_vectors(vR, v), a[-1])\n v = np.array([0, 0, 1])\n a = np.array([1, 0, 0, np.pi / 2])\n R = pr.matrix_from_axis_angle(a)\n vR = np.dot(R, v)\n assert_almost_equal(pr.angle_between_vectors(vR, v), a[-1])", "def angle(self):\n v = self.p1 - self.p0\n return atan2(v.y, v.x)", "def angle2pos(pos1: np.ndarray, pos2: np.ndarray) -> float:\n assert pos1.shape == pos2.shape\n diff = pos2 - pos1\n diff /= np.linalg.norm(diff)\n # x1: y-coordinates, x2: x-coordinates\n angle = np.arctan2(diff[1], diff[0])\n return angle", "def angles_vectors_degrees(u, v):\n a = angle_smallest_vectors_degrees(u, v)\n return a, 360. - a", "def angles_vectors(u, v):\n a = angle_smallest_vectors(u, v)\n return a, pi * 2 - a", "def get_intersect_angle(self, p0, p1, p2):\n u, v = p1-p0, p2-p0\n costheta = u.dot(v) / math.sqrt(u.dot(u) * v.dot(v))\n return math.degrees(math.acos(costheta))", "def calc_angle(v1, v2, v3):\n v1 = v1 - v2\n v3 = v3 - v2\n return v1.angle(v3)", "def angle_between_two(self, other):\n # angle = math.atan2(other.position.y - self.position.y,\n # other.position.x - self.position.x)\n minus = other.position - self.position\n angle = math.atan2(minus.y, minus.x)\n return angle", "def atan2_vec(vector):\n return -np.arctan2(vector[1], vector[0])", "def angle_between(u, v, n=None):\n if n is None:\n return np.arctan2(np.linalg.norm(np.cross(u, v)), np.dot(u, v))\n else:\n return np.arctan2(np.dot(n, np.cross(u, v)), np.dot(u, v))", "def angleTo(x1, y1, x2, y2):\n assert not (x1 == 0 and y1 == 0) and not (x2 == 0 and y2 == 0), \"neither point should be the origin\"\n if x1 == x2:\n if y1 < y2:\n return math.pi / 2\n elif y1 == y2:\n return 0\n return math.pi * 3 / 2\n dx, dy = x2 - x1, y2 - y1\n rawDeg = math.atan(dy / dx)\n if dx < 0:\n rawDeg += math.pi\n return rawDeg % (math.pi * 2)", "def get_angle(pt1, pt2):\n dx, dy = pt2[0]-pt1[0], pt2[1]-pt1[1]\n if abs(dx)<=TOL and dy>0:\n angle=0.5*np.pi\n elif abs(dy)<=TOL and dx<0:\n angle=np.pi\n elif abs(dx)<=TOL and dy<0:\n angle=1.5*np.pi\n elif abs(dy)<=TOL and dx>0:\n angle=0.0\n else:\n raise ValueError(\"Warning! The angle between the two points must be an \"\n \"integer multiples of 90deg from each other\")\n return angle", "def angle_diff(a1, a2):\n a = a1 - a2\n if abs(a) > 180:\n return np.sign(a)*360 - a\n else:\n return a", "def calculate_angle(start: tuple, end: tuple):\n radians = -math.atan2(end[0] - start[0], end[1] - start[1])\n return math.degrees(radians) % 360", "def get_interior_angle(vec0, vec1):\n angle = np.math.atan2(np.linalg.det([vec0, vec1]), np.dot(vec0, vec1))\n degrees = abs(np.degrees(angle))\n # Min and max should be between 0° an 90°.\n degrees = min(degrees, 180.0 - degrees)\n return degrees", "def angle_difference(θ1, θ2):\n ordinary_diff = (θ2 - θ1) % np.pi\n return (np.pi / 2) - np.abs(ordinary_diff - (np.pi / 2))" ]
[ "0.8752359", "0.8643369", "0.8583145", "0.85778534", "0.8426674", "0.83578736", "0.8332106", "0.825163", "0.8241473", "0.82317245", "0.8202834", "0.8093871", "0.80870366", "0.8080988", "0.8072849", "0.8057119", "0.80453193", "0.8017723", "0.80015993", "0.7964633", "0.79558647", "0.7945779", "0.7903372", "0.78857374", "0.7868664", "0.78489274", "0.7843989", "0.7822268", "0.77984506", "0.7777398", "0.77604437", "0.77585506", "0.77416456", "0.77227575", "0.77125615", "0.76941675", "0.76701343", "0.7659698", "0.76566404", "0.7621", "0.7620155", "0.7604601", "0.759869", "0.7570777", "0.7563653", "0.7556266", "0.7540476", "0.7532771", "0.74808615", "0.7460284", "0.7460278", "0.74289685", "0.74223906", "0.7419861", "0.7372774", "0.73647135", "0.73588175", "0.73575175", "0.73456484", "0.73102915", "0.7305489", "0.72959274", "0.7275784", "0.72731394", "0.7248391", "0.72117674", "0.72074544", "0.7200766", "0.7191808", "0.71845967", "0.7157774", "0.7125607", "0.7100897", "0.70822906", "0.708199", "0.70773536", "0.70610875", "0.70526516", "0.7039857", "0.7020747", "0.69967526", "0.69959885", "0.6993482", "0.69875646", "0.6979282", "0.6978226", "0.69683164", "0.69617325", "0.6961393", "0.6959497", "0.69563335", "0.6949843", "0.69421893", "0.69413346", "0.69362926", "0.68802446", "0.6862889", "0.6859864", "0.6858894", "0.6857142" ]
0.80805355
14
Create and init a conv1d layer with spectral normalization
def _conv1d_spect(ni, no, ks=1, stride=1, padding=0, bias=False): conv = nn.Conv1d(ni, no, ks, stride=stride, padding=padding, bias=bias) nn.init.kaiming_normal_(conv.weight) if bias: conv.bias.data.zero_() return spectral_norm(conv)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def V1_init(layer, size, spatial_freq, center=None, scale=1., bias=False, seed=None, tied=False):\n classname = layer.__class__.__name__\n assert classname.find('Conv2d') != -1, 'This init only works for Conv layers'\n\n out_channels, in_channels, xdim, ydim = layer.weight.shape\n data = layer.weight.data.numpy().copy()\n # same weights for each channel\n if tied:\n W = V1_weights(out_channels, (xdim, ydim),\n size, spatial_freq, center, scale, seed=seed)\n for chan in range(in_channels):\n if not tied:\n W = V1_weights(out_channels, (xdim, ydim),\n size, spatial_freq, center, scale, seed=seed)\n data[:, chan, :, :] = W.reshape(out_channels, xdim, ydim)\n data = Tensor(data)\n with torch.no_grad():\n layer.weight.copy_(data)\n\n if bias == False:\n layer.bias = None", "def __init__(self,\n channels: int,\n kernel_size: int=15,\n activation: nn.Layer=nn.ReLU(),\n norm: str=\"batch_norm\",\n causal: bool=False,\n bias: bool=True,\n adaptive_scale: bool=False,\n init_weights: bool=False):\n assert check_argument_types()\n super().__init__()\n self.bias = bias\n self.channels = channels\n self.kernel_size = kernel_size\n self.adaptive_scale = adaptive_scale\n if self.adaptive_scale:\n ada_scale = self.create_parameter(\n [1, 1, channels], default_initializer=I.Constant(1.0))\n self.add_parameter('ada_scale', ada_scale)\n ada_bias = self.create_parameter(\n [1, 1, channels], default_initializer=I.Constant(0.0))\n self.add_parameter('ada_bias', ada_bias)\n\n self.pointwise_conv1 = Conv1D(\n channels,\n 2 * channels,\n kernel_size=1,\n stride=1,\n padding=0,\n bias_attr=None\n if bias else False, # None for True, using bias as default config\n )\n\n # self.lorder is used to distinguish if it's a causal convolution,\n # if self.lorder > 0:\n # it's a causal convolution, the input will be padded with\n # `self.lorder` frames on the left in forward (causal conv impl).\n # else: it's a symmetrical convolution\n if causal:\n padding = 0\n self.lorder = kernel_size - 1\n else:\n # kernel_size should be an odd number for none causal convolution\n assert (kernel_size - 1) % 2 == 0\n padding = (kernel_size - 1) // 2\n self.lorder = 0\n\n self.depthwise_conv = Conv1D(\n channels,\n channels,\n kernel_size,\n stride=1,\n padding=padding,\n groups=channels,\n bias_attr=None\n if bias else False, # None for True, using bias as default config\n )\n\n assert norm in ['batch_norm', 'layer_norm']\n if norm == \"batch_norm\":\n self.use_layer_norm = False\n self.norm = BatchNorm1D(channels)\n else:\n self.use_layer_norm = True\n self.norm = LayerNorm(channels)\n\n self.pointwise_conv2 = Conv1D(\n channels,\n channels,\n kernel_size=1,\n stride=1,\n padding=0,\n bias_attr=None\n if bias else False, # None for True, using bias as default config\n )\n self.activation = activation\n\n if init_weights:\n self.init_weights()", "def conv_init(conv, act='linear'):\r\n n = conv.kernel_size[0] * conv.kernel_size[1] * conv.out_channels\r\n conv.weight.data.normal_(0, math.sqrt(2. / n))", "def test_conv1d():\n filters = 3\n kernel_size = 2\n strides = 1\n batch_size = 2\n in_channels = 3\n input_size = 5\n input_shape = (batch_size, input_size, in_channels)\n\n keras_layer = keras.layers.Conv1D(filters=filters, kernel_size=kernel_size, strides=strides, use_bias=True, bias_initializer=\"ones\")\n input_layer = keras.Input(batch_shape=input_shape)\n keras_model = keras.models.Model(input=input_layer, outputs=keras_layer(input_layer))\n\n new_weights = np.arange(18).reshape(2, 3, 3)\n keras_layer.set_weights([new_weights, keras_layer.get_weights()[1]])\n\n kinput = np.arange(batch_size * input_size * in_channels).reshape(input_shape)\n kout = keras_model.predict(kinput)\n\n torch_model, _ = translate.translate_layer(keras_layer)\n tinput = torch.Tensor(kinput).permute(0, 2, 1)\n tout = torch_model(tinput).permute(0, 2, 1)\n assert np.isclose(kout, tout.cpu().data.numpy()).all()", "def __init__(self, in_channels, out_channels):\n super(CNN, self).__init__()\n self.conv1 = nn.Conv1d(in_channels=in_channels, out_channels=out_channels, kernel_size=5, padding=1)", "def dir_conv_layer(model, nb_filters, rate):\n\n model = Conv1D(filters=nb_filters, kernel_size=3, padding='causal', dilation_rate=rate, activation='relu')(model)\n model = BatchNormalization()(model)\n\n # exponentially increase dilated convolution receptive field\n # receptive field size loops back around when rate = 16 to create [1...8] block\n rate *= 2\n if rate == 16:\n rate = 1\n return model, rate", "def conv_init(m):\r\n\r\n classname = m.__class__.__name__\r\n if classname.find('Conv') != -1:\r\n init.xavier_uniform_(m.weight, gain = np.sqrt(2))\r\n elif classname.find('BatchNorm') != -1:\r\n init.constant_(m.weight, 1)\r\n init.constant_(m.bias, 0)", "def SNConv2d(*args, **kwargs):\n return spectral_norm(nn.Conv2d(*args, **kwargs))", "def Linear1d(\n in_channels: int,\n out_channels: int,\n stride: int = 1,\n bias: bool = True,\n) -> torch.nn.Module:\n return nn.Conv1d(in_channels, out_channels, kernel_size=1, stride=stride, bias=bias)", "def __init__(self, dim_in, dim_out):\n super(Conv1D, self).__init__()\n self.dim_in = dim_in\n self.dim_out = dim_out\n w = torch.empty(dim_in, dim_out)\n nn.init.normal_(w, std=0.02)\n self.w = nn.Parameter(w)\n self.b = nn.Parameter(torch.zeros(dim_out))", "def sn_conv1x1(x, output_dim, training=True, name='sn_conv1x1'):\n with tf.variable_scope(name, custom_getter=sn_gettr(training=training)):\n w = tf.get_variable(\n 'weights', [1, 1, x.get_shape()[-1], output_dim],\n initializer=tf.keras.initializers.VarianceScaling(\n scale=1.0, mode='fan_avg', distribution='uniform'))\n conv = tf.nn.conv2d(\n input=x, filters=w, strides=[1, 1, 1, 1], padding='SAME')\n return conv", "def add_conv_type1(model, depth, input_shape=None):\n if input_shape is not None:\n model.add(Convolution2D(depth, 5, 5, subsample=(2, 2), \\\n input_shape=input_shape))\n else:\n model.add(Convolution2D(depth, 5, 5, subsample=(2, 2), \\\n activation='relu', W_regularizer=l2(0.05)))", "def _first_conv(x: tf.Tensor) -> tf.Tensor:\n with slim.arg_scope([slim.conv2d], activation_fn=None, normalizer_fn=None):\n x = ResNet._conv2d_same(x, 64, 7, stride=2, scope='conv1')\n return slim.max_pool2d(x, [3, 3], stride=2, scope='pool1')", "def resnet_layer(inputs,\n num_filters=16,\n kernel_size=3,\n strides=1,\n activation='relu',\n batch_normalization=False,\n conv_first=True):\n # conv = Conv1D(num_filters,\n # kernel_size=kernel_size,\n # strides=strides,\n # padding='same',\n # kernel_initializer='he_normal',\n # kernel_regularizer=l2(1e-4))\n conv = Conv1D(num_filters,\n kernel_size=kernel_size,\n strides=strides,\n padding='same',\n kernel_initializer='he_normal',\n )\n x = inputs\n if conv_first:\n x = conv(x)\n if batch_normalization:\n x = BatchNormalization()(x)\n if activation is not None:\n x = Activation(activation)(x)\n else:\n if batch_normalization:\n x = BatchNormalization()(x)\n if activation is not None:\n x = Activation(activation)(x)\n x = conv(x)\n return x", "def _init_layers(self) -> None:\n super()._init_layers()\n self.controller = nn.Conv2d(\n self.feat_channels, self.num_params, 3, padding=1)", "def build_2l_conv1d(input_shape, # type: tuple\n n_outputs, # type: int\n filters=64, # type: int\n kernel_size=3, # type: int\n dropout_rate=0.5, # type: float\n max_pool_size=2 # type: int\n ):\n # type: (...) -> Sequential\n model = Sequential(name='fd2lcov1dnet')\n model.add(Conv1D(filters=filters, kernel_size=kernel_size, activation='relu', input_shape=input_shape))\n model.add(Conv1D(filters=filters, kernel_size=kernel_size, activation='relu', input_shape=input_shape))\n\n return __add_model_tail(model, n_outputs, dropout_rate, max_pool_size)", "def __init__(self, n_filters = 64,\n n_kernels = 3,\n n_outputs = 10,\n inp_shape = (28,28),\n residual=True,\n regularizer = None,\n intializer = None,\n use_pool= False,\n use_dropout = False,\n use_batchnorm = False\n ):\n super(CNNModel, self).__init__()\n self.conv_dim = len(inp_shape)-1\n self.n_filters = n_filters\n self.initializer = intializer\n self.n_kernels = n_kernels\n self.projection = 3\n self.n_outputs = n_outputs\n self.num_layers = 1\n self.inp_shape = inp_shape\n self.regularizer = regularizer\n self.use_pool = use_pool\n self.residual = residual\n self.use_dropout = use_dropout\n self.use_batchnorm = use_batchnorm\n\n kernel_initializer = initializers.RandomNormal(mean=0.0, stddev=0.05)\n\n if self.conv_dim == 1:\n self.input_layer = layers.Conv1D(self.n_filters, (self.projection),\n activation = \"linear\",\n input_shape = self.inp_shape,\n name ='cnn_input',\n padding = 'same',\n kernel_regularizer = self.regularizer,\n bias_regularizer = self.regularizer,\n kernel_initializer=kernel_initializer,\n bias_initializer=initializers.get(\"zeros\")\n )\n self.output_layer = layers.Conv1D(self.n_kernels, (self.projection),\n activation=\"linear\",\n input_shape=(None, self.inp_shape[0], self.n_filters),\n name='cnn_output',\n padding = 'same',\n kernel_regularizer=self.regularizer,\n bias_regularizer=self.regularizer,\n kernel_initializer=kernel_initializer,\n bias_initializer=initializers.get(\"zeros\")\n )\n if self.use_pool:\n self.pool = layers.MaxPool1D()\n elif self.conv_dim == 2:\n self.input_layer = layers.Conv2D(self.n_filters, (self.projection,self.projection),\n activation=\"linear\",\n input_shape=self.inp_shape,\n name='cnn_input',\n padding = 'same',\n kernel_regularizer=self.regularizer,\n bias_regularizer=self.regularizer,\n kernel_initializer=kernel_initializer,\n bias_initializer=initializers.get(\"zeros\")\n )\n self.output_layer = layers.Conv2D(self.n_kernels, (self.projection, self.projection),\n activation= \"linear\",\n input_shape=(None, self.inp_shape[0],self.inp_shape[1], self.n_filters),\n name=\"cnn_output\",\n padding = 'same',\n kernel_regularizer=self.regularizer,\n bias_regularizer=self.regularizer,\n kernel_initializer=kernel_initializer,\n bias_initializer=initializers.get(\"zeros\")\n )\n if self.use_pool:\n self.pool = layers.MaxPool2D()\n self.list_cnn = [self.input_layer]\n self.flatten = layers.Flatten()\n\n #compute input shape after flatten for the dense layer\n if not self.use_pool:\n self.class_inp = np.prod(self.inp_shape[:-1])*self.n_kernels\n else:\n self.class_inp = np.prod(self.inp_shape[:-1])*self.n_kernels//(2**self.conv_dim)\n # self.classify = MyDenseLayer(\n # self.n_outputs,shape = (None,self.class_inp),\n # layer_name = 'classify',\n # initializer = \"RandomNormal\")\n self.classify = layers.Dense(units = self.n_outputs,\n activation = 'softmax', use_bias = True,\n input_shape = self.class_inp,\n kernel_initializer = kernel_initializer, bias_initializer=initializers.get(\"zeros\"),\n name = 'classification_layer')", "def _strict_conv1d(x, h):\n with ops.name_scope('strict_conv1d', values=[x, h]):\n x = array_ops.reshape(x, (1, -1, 1, 1))\n h = array_ops.reshape(h, (-1, 1, 1, 1))\n result = nn_ops.conv2d(x, h, [1, 1, 1, 1], 'SAME')\n return array_ops.reshape(result, [-1])", "def _make_conv_level(in_channels, out_channels, num_convs, norm_func,\n stride=1, dilation=1):\n layers = []\n for i in range(num_convs):\n layers.extend([\n nn.Conv2D(in_channels, out_channels, kernel_size=3,\n stride=stride if i == 0 else 1,\n padding=dilation, bias_attr=False, dilation=dilation),\n norm_func(out_channels),\n nn.ReLU()])\n\n in_channels = out_channels\n\n return nn.Sequential(*layers)", "def shallow_CNN(num_bands = None, k_1 = None, k_2 = None, k_3 = None):\n active = 'relu'\n active2 = 'tanh'\n active3 = 'linear'\n inp = Input(shape=(None, None, num_bands))\n# bn = BatchNormalization()(inp)\n l1 = Conv2D(64, kernel_size=k_1, activation= active, padding='same', kernel_initializer='he_normal' )(inp)\n l2 = Conv2D(48, kernel_size=k_2, activation=active, padding='same', kernel_initializer='he_normal')(l1)\n l3 = Conv2D(32, kernel_size=k_3, activation=active, padding='same', kernel_initializer='he_normal')(l2)\n l4 = Conv2D(1, kernel_size=k_3, activation=active2, padding='same', kernel_initializer='he_normal',name=\"details\")(l3)\n# l4= Conv2D(1, kernel_size=k_3, activation=active2, padding='same', kernel_initializer='he_normal')(l3)\n# inp2 = Input(shape=(None, None, 1))\n inp1 = Input(shape=(None, None, 1))\n out = Add(name=\"band\")([l4, inp1])\n out1 = Conv2D(1, kernel_size=k_3, activation=active3, padding='same', kernel_initializer='he_normal',name=\"struct\")(out)\n out2 = Conv2D(1, kernel_size=k_3, activation=active3, padding='same', kernel_initializer='he_normal',name=\"TV\")(out)\n model = Model([inp, inp1], [out, out1, out2], name='shallow_CNN')\n \n# out= Conv2D(1, kernel_size=k_3, activation='relu', padding='same', kernel_initializer='he_normal',name=\"nothing\")(out1)\n# model = Model(inp, l4, name='shallow_CNN')\n return model", "def conv1x1(in_channels, out_channels, groups=1):\n return nn.Conv2d(\n in_channels, \n out_channels, \n kernel_size=1, \n groups=groups,\n stride=1)", "def __init__(self, momentum: float = .5):\n super(VanillaEncoder, self).__init__()\n self.conv1 = PointNetConv2Layer(64, momentum)\n self.conv2 = PointNetConv2Layer(64, momentum)\n self.conv3 = PointNetConv2Layer(64, momentum)\n self.conv4 = PointNetConv2Layer(128, momentum)\n self.conv5 = PointNetConv2Layer(1024, momentum)", "def Conv1dWrapper(generated, *args, **kwargs):\n if generated:\n return Conv1dGenerated(*args, **kwargs)\n else:\n return Conv1dStatic(*args, **kwargs)", "def __init__(self, filter1x1, ker_size, filters):\n super(reduce, self).__init__()\n self.con1 = layers.Conv2D(\n filter1x1, kernel_size=1, padding=\"same\", activation=\"relu\"\n )\n self.conv = layers.Conv2D(\n filters, kernel_size=ker_size, padding=\"same\", activation=\"relu\"\n )", "def LinearizedConv1d(\n in_channels: int,\n out_channels: int,\n kernel_size: int,\n dropout: float=0,\n **kwargs,\n) -> nn.Module:\n m = fairseq_linear_conv(in_channels, out_channels, kernel_size, **kwargs)\n std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))\n nn.init.normal_(m.weight, mean=0, std=std)\n nn.init.constant_(m.bias, 0)\n return nn.utils.weight_norm(m, dim=2)", "def time_conv_layer(model, nb_filters):\n\n model = Conv1D(filters=nb_filters, kernel_size=3, padding='causal', activation='relu')(model)\n model = BatchNormalization()(model)\n return model", "def __init__(self, z_dim, initailize_weights=True):\n super().__init__()\n self.z_dim = z_dim\n\n self.frc_encoder = nn.Sequential(\n CausalConv1D(6, 16, kernel_size=2, stride=2),\n nn.LeakyReLU(0.1, inplace=True),\n CausalConv1D(16, 32, kernel_size=2, stride=2),\n nn.LeakyReLU(0.1, inplace=True),\n CausalConv1D(32, 64, kernel_size=2, stride=2),\n nn.LeakyReLU(0.1, inplace=True),\n CausalConv1D(64, 128, kernel_size=2, stride=2),\n nn.LeakyReLU(0.1, inplace=True),\n CausalConv1D(128, 2 * self.z_dim, kernel_size=2, stride=2),\n nn.LeakyReLU(0.1, inplace=True),\n )\n\n if initailize_weights:\n init_weights(self.modules())", "def apply(self, input):\n\n # input.unsqueeze(1) changes dim from (minibatch_size, sequence_length) to\n # (minibatch_size, num_channels=1, sequence_length)\n # the final squeeze(1) removes the num_channels=1 axis\n return torch.nn.functional.conv1d(input.unsqueeze(1), self.filt.type_as(input),\n padding=self.padding).squeeze(1)", "def resnet_layer(inputs,\n num_filters=16,\n kernel_size=3,\n strides=1,\n activation='relu',\n batch_normalization=True,\n conv_first=True):\n conv = Conv2D(num_filters,\n kernel_size=kernel_size,\n strides=strides,\n padding='same',\n kernel_initializer='he_normal',\n kernel_regularizer=l2(1e-4))\n\n x = inputs\n if conv_first:\n x = conv(x)\n if batch_normalization:\n x = BatchNormalization()(x)\n if activation is not None:\n x = Activation(activation)(x)\n else:\n if batch_normalization:\n x = BatchNormalization()(x)\n if activation is not None:\n x = Activation(activation)(x)\n x = conv(x)\n return x", "def resnet_layer(inputs,\n num_filters=16,\n kernel_size=3,\n strides=1,\n activation='relu',\n batch_normalization=True,\n conv_first=True):\n conv = Conv2D(num_filters,\n kernel_size=kernel_size,\n strides=strides,\n padding='same',\n kernel_initializer='he_normal',\n kernel_regularizer=l2(1e-4))\n\n x = inputs\n if conv_first:\n x = conv(x)\n if batch_normalization:\n x = BatchNormalization()(x)\n if activation is not None:\n x = Activation(activation)(x)\n else:\n if batch_normalization:\n x = BatchNormalization()(x)\n if activation is not None:\n x = Activation(activation)(x)\n x = conv(x)\n return x", "def resnet_layer(inputs,\n num_filters=16,\n kernel_size=3,\n strides=1,\n activation='relu',\n batch_normalization=True,\n conv_first=True):\n conv = Conv2D(num_filters,\n kernel_size=kernel_size,\n strides=strides,\n padding='same',\n kernel_initializer='he_normal',\n kernel_regularizer=l2(1e-4))\n\n x = inputs\n if conv_first:\n x = conv(x)\n if batch_normalization:\n x = BatchNormalization()(x)\n if activation is not None:\n x = Activation(activation)(x)\n else:\n if batch_normalization:\n x = BatchNormalization()(x)\n if activation is not None:\n x = Activation(activation)(x)\n x = conv(x)\n return x", "def resnet_layer(inputs,\n num_filters=16,\n kernel_size=3,\n strides=1,\n activation='relu',\n batch_normalization=True,\n conv_first=True):\n conv = Conv2D(num_filters,\n kernel_size=kernel_size,\n strides=strides,\n padding='same',\n kernel_initializer='he_normal',\n kernel_regularizer=l2(1e-4))\n\n x = inputs\n if conv_first:\n x = conv(x)\n if batch_normalization:\n x = BatchNormalization()(x)\n if activation is not None:\n x = Activation(activation)(x)\n else:\n if batch_normalization:\n x = BatchNormalization()(x)\n if activation is not None:\n x = Activation(activation)(x)\n x = conv(x)\n return x", "def resnet_layer(inputs,\n num_filters=16,\n kernel_size=3,\n strides=1,\n activation='relu',\n batch_normalization=True,\n conv_first=True):\n conv = Conv2D(num_filters,\n kernel_size=kernel_size,\n strides=strides,\n padding='same',\n kernel_initializer='he_normal',\n kernel_regularizer=l2(1e-4))\n\n x = inputs\n if conv_first:\n x = conv(x)\n if batch_normalization:\n x = BatchNormalization()(x)\n if activation is not None:\n x = Activation(activation)(x)\n else:\n if batch_normalization:\n x = BatchNormalization()(x)\n if activation is not None:\n x = Activation(activation)(x)\n x = conv(x)\n return x", "def __init__(\n self, config: SimpleGCNConfig = SimpleGCNConfig(name=\"simplegcn\")\n ):\n super().__init__()\n self.edge_lengthscale = config.edge_lengthscale\n self.weight_edges = config.weight_edges\n\n self.atom_embedding = nn.Linear(\n config.atom_input_features, config.width\n )\n\n self.layer1 = GraphConv(config.width, config.width)\n self.layer2 = GraphConv(config.width, config.output_features)\n self.readout = AvgPooling()", "def conv1d(inputs,\n filters,\n kernel_size,\n strides=1,\n padding='same',\n data_format='channels_last',\n dilation_rate=1,\n activation=None,\n use_bias=True,\n kernel_initializer=None,\n bias_initializer=tf.zeros_initializer(),\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n trainable=True,\n name=None,\n reuse=None):\n\n if padding=='causal':\n # zero pad left side of input\n n_pads = dilation_rate*(kernel_size-1)\n inputs = tf.pad(inputs,[[0,0],[n_pads,0],[0,0]]) \n padding = 'valid'\n\n params = {\"inputs\":inputs, \"filters\":filters, \"kernel_size\":kernel_size,\n \"strides\":strides,\"padding\":padding,\"data_format\":data_format,\n \"dilation_rate\":dilation_rate,\"activation\":activation,\"use_bias\":use_bias,\n \"kernel_initializer\":kernel_initializer,\"bias_initializer\":bias_initializer,\n \"kernel_regularizer\":kernel_regularizer,\"bias_regularizer\":bias_regularizer,\n \"activity_regularizer\":activity_regularizer,\"kernel_constraint\":kernel_constraint,\n \"bias_constraint\":bias_constraint,\"trainable\":trainable,\"name\":name,\"reuse\":reuse} \n\n conv_out = tf.layers.conv1d(**params)\n\n return conv_out", "def resnet_layer(inputs,\r\n num_filters=16,\r\n kernel_size=3,\r\n strides=1,\r\n activation='relu',\r\n batch_normalization=True,\r\n conv_first=True):\r\n conv = Conv2D(num_filters,\r\n kernel_size=kernel_size,\r\n strides=strides,\r\n padding='same',\r\n kernel_initializer='he_normal',\r\n kernel_regularizer=l2(1e-4))\r\n\r\n x = inputs\r\n if conv_first:\r\n x = conv(x)\r\n if batch_normalization:\r\n x = BatchNormalization()(x)\r\n if activation is not None:\r\n x = Activation(activation)(x)\r\n else:\r\n if batch_normalization:\r\n x = BatchNormalization()(x)\r\n if activation is not None:\r\n x = Activation(activation)(x)\r\n x = conv(x)\r\n return x", "def resnet_layer(inputs,\r\n num_filters=16,\r\n kernel_size=3,\r\n strides=1,\r\n activation='relu',\r\n batch_normalization=True,\r\n conv_first=True):\r\n conv = Conv2D(num_filters,\r\n kernel_size=kernel_size,\r\n strides=strides,\r\n padding='same',\r\n kernel_initializer='he_normal',\r\n kernel_regularizer=l2(1e-4))\r\n\r\n x = inputs\r\n if conv_first:\r\n x = conv(x)\r\n if batch_normalization:\r\n x = BatchNormalization()(x)\r\n if activation is not None:\r\n x = Activation(activation)(x)\r\n else:\r\n if batch_normalization:\r\n x = BatchNormalization()(x)\r\n if activation is not None:\r\n x = Activation(activation)(x)\r\n x = conv(x)\r\n return x", "def __init__(self, sigma_initializer=RandomNormal(0, 1), spectral_iterations=1,\n fully_diff_spectral=True, stateful=False, renormalize=False, **kwargs):\n super(SNConditionalConv11, self).__init__(**kwargs)\n self.sigma_initializer = keras.initializers.get(sigma_initializer)\n self.fully_diff_spectral = fully_diff_spectral\n self.spectral_iterations = spectral_iterations\n self.stateful = stateful\n self.renormalize = renormalize", "def resnet_layer(inputs,\n num_filters=16,\n kernel_size=3,\n strides=1,\n activation='relu',\n batch_normalization=True,\n conv_first=True):\n conv = Conv2D(num_filters,\n kernel_size=kernel_size,\n strides=strides,\n padding='same',\n kernel_initializer='he_normal',\n kernel_regularizer=l2(1e-4))\n\n x = inputs\n if conv_first:\n x = conv(x)\n if batch_normalization:\n x = BatchNormalization()(x)\n if activation is not None:\n x = Activation(activation)(x)\n else:\n if batch_normalization:\n x = BatchNormalization()(x)\n if activation is not None:\n x = Activation(activation)(x)\n x = conv(x)\n\n return x", "def __init__(self, sigma_initializer=RandomNormal(0, 1), spectral_iterations=1,\n fully_diff_spectral=True, stateful=False, renormalize=False, **kwargs):\n super(SNFactorizedConv11, self).__init__(**kwargs)\n self.sigma_initializer = keras.initializers.get(sigma_initializer)\n self.fully_diff_spectral = fully_diff_spectral\n self.spectral_iterations = spectral_iterations\n self.stateful = stateful\n self.renormalize = renormalize", "def spectral_norm(module, name='weight', n_power_iterations=1, eps=1e-12, dim=None):\n if dim is None:\n if isinstance(module, (torch.nn.ConvTranspose1d,\n torch.nn.ConvTranspose2d,\n torch.nn.ConvTranspose3d)):\n dim = 1\n else:\n dim = 0\n SpectralNorm.apply(module, name, n_power_iterations, dim, eps)\n return module", "def apply_conv2d_1x1(input_layer, num_classes, kernel_size=1):\n # tf.layers.conv2d(inputs, filters, kernel_size, strides=(1, 1), padding='valid', ...,\n # kernel_initializer=None, ... , kernel_regularizer=None)\n return tf.layers.conv2d(input_layer, num_classes, kernel_size, padding='same',\n kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))", "def patch_first_conv(model, in_channels):\n\n # get first conv\n for module in model.modules():\n if isinstance(module, nn.Conv2d):\n break\n\n # change input channels for first conv\n module.in_channels = in_channels\n weight = module.weight.detach()\n reset = False\n\n if in_channels == 1:\n weight = weight.sum(1, keepdim=True)\n elif in_channels == 2:\n weight = weight[:, :2] * (3.0 / 2.0)\n else:\n reset = True\n weight = torch.Tensor(\n module.out_channels,\n module.in_channels // module.groups,\n *module.kernel_size\n )\n\n module.weight = nn.parameter.Parameter(weight)\n if reset:\n module.reset_parameters()", "def testMask1D(self):\n mask = np.ones((3,), dtype=np.float32)\n inputs = tf.constant(1.0, shape=(1, 5, 2))\n conv1 = snt.Conv1D(\n output_channels=1,\n kernel_shape=3,\n mask=mask,\n padding=snt.VALID,\n use_bias=False,\n initializers=create_constant_initializers(1.0, 0.0, use_bias=False))\n out = conv1(inputs)\n expected_out = np.reshape(np.array([6, 6, 6]), (1, 3, 1))\n with self.test_session():\n tf.variables_initializer([conv1.w]).run()\n self.assertAllClose(out.eval(), expected_out)", "def discriminator_block(in_filters, out_filters, f_size=4, normalize=True,stride=2):\n layers = [nn.Conv2d(in_filters, out_filters, f_size, stride=stride, padding=0)]\n if normalize:\n layers.append(nn.InstanceNorm2d(out_filters))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n return layers", "def add_conv_type2(model, depth):\n model.add(Convolution2D(depth, 3, 3, subsample=(1, 1)))", "def __init__(self, filter1x1):\n super(poolproj, self).__init__()\n self.max = layers.MaxPooling2D(pool_size=3, strides=1, padding=\"same\")\n self.conv = layers.Conv2D(\n filter1x1, kernel_size=1, padding=\"same\", activation=\"relu\"\n )", "def __init__(self,opt):\n super(SNPatchDiscriminator, self).__init__()\n # if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n # use_bias = norm_layer.func == nn.InstanceNorm2d\n # else:\n # use_bias = norm_layer == nn.InstanceNorm2d\n\n kw = 4\n padw = 1\n n_layers = 3\n ndf = opt.ndf\n use_bias = True\n sequence = [nn.utils.spectral_norm(nn.Conv2d(opt.input_nc, ndf, kernel_size=kw, stride=2, padding=padw)), nn.LeakyReLU(0.2, True)]\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers): # gradually increase the number of filters\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 4)\n sequence += [\n nn.utils.spectral_norm(nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias)),\n # norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n self.model = nn.Sequential(*sequence)", "def __init__(self, name=\"conv_1d_lstm_cell\", **kwargs):\n super(Conv1DLSTMCell, self).__init__(conv_ndims=1, name=name, **kwargs)", "def resnet_layer(inputs,\n num_filters=16,\n kernel_size=3,\n strides=1,\n activation='relu',\n weight_decay=l2(1e-4),\n batch_normalization=True,\n conv_first=True,\n layer_num=None):\n conv = Conv2D(num_filters,\n kernel_size=kernel_size,\n strides=strides,\n padding='same',\n kernel_initializer='he_normal',\n kernel_regularizer=weight_decay,\n name='conv2d_%d' % layer_num)\n\n x = inputs\n if conv_first:\n x = conv(x)\n if batch_normalization:\n x = BatchNormalization()(x)\n if activation is not None:\n x = Activation(activation)(x)\n else:\n if batch_normalization:\n x = BatchNormalization()(x)\n if activation is not None:\n x = Activation(activation)(x)\n x = conv(x)\n return x", "def __init__(self, **kwargs):\n super(Debug, self).__init__(**kwargs)\n with self.name_scope():\n self.conv1 = nn.Conv2D(channels=4, kernel_size=2)", "def __init__(self, input_nc, ndf=64, n_layers=6, norm_layer=nn.BatchNorm2d, global_stages=0):\n super(OrgDiscriminator, self).__init__()\n if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n kw = 3\n padw = 0\n self.conv1 = spectral_norm(PartialConv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw))\n if global_stages < 1:\n self.conv1f = spectral_norm(PartialConv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw))\n else:\n self.conv1f = self.conv1\n self.relu1 = nn.LeakyReLU(0.2, True)\n nf_mult = 1\n nf_mult_prev = 1\n\n n = 1\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 8)\n self.conv2 = spectral_norm(\n PartialConv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias))\n self.norm2 = norm_layer(ndf * nf_mult)\n if global_stages < 2:\n self.conv2f = spectral_norm(\n PartialConv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias))\n self.norm2f = norm_layer(ndf * nf_mult)\n else:\n self.conv2f = self.conv2\n self.norm2f = self.norm2\n\n self.relu2 = nn.LeakyReLU(0.2, True)\n\n n = 2\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 8)\n self.conv3 = spectral_norm(\n PartialConv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias))\n self.norm3 = norm_layer(ndf * nf_mult)\n if global_stages < 3:\n self.conv3f = spectral_norm(\n PartialConv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias))\n self.norm3f = norm_layer(ndf * nf_mult)\n else:\n self.conv3f = self.conv3\n self.norm3f = self.norm3\n self.relu3 = nn.LeakyReLU(0.2, True)\n\n n = 3\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 8)\n self.norm4 = norm_layer(ndf * nf_mult)\n self.conv4 = spectral_norm(\n PartialConv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias))\n self.conv4f = spectral_norm(\n PartialConv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias))\n self.norm4f = norm_layer(ndf * nf_mult)\n\n self.relu4 = nn.LeakyReLU(0.2, True)\n\n n = 4\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 8)\n self.conv5 = spectral_norm(\n PartialConv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias))\n self.conv5f = spectral_norm(\n PartialConv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias))\n self.norm5 = norm_layer(ndf * nf_mult)\n self.norm5f = norm_layer(ndf * nf_mult)\n self.relu5 = nn.LeakyReLU(0.2, True)\n\n n = 5\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 8)\n self.conv6 = spectral_norm(\n PartialConv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias))\n self.conv6f = spectral_norm(\n PartialConv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias))\n self.norm6 = norm_layer(ndf * nf_mult)\n self.norm6f = norm_layer(ndf * nf_mult)\n self.relu6 = nn.LeakyReLU(0.2, True)\n\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n_layers, 8)\n self.conv7 = spectral_norm(\n PartialConv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias))\n self.conv7f = spectral_norm(\n PartialConv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias))", "def init_weight(self):\n init_layer(self.conv1)\n init_layer(self.conv2)\n init_bn(self.norm1)\n init_bn(self.norm2)", "def __init__(self, activation_function, input_size=(1, 1), output_size=(1, 1),\n learning_batch_size=1, filter_size=(1, 1), input_feature_maps=1,\n output_feature_maps=1, convolution_mode='valid', step=1):\n super(ConvolutionalLayer, self).__init__(activation_function, input_size=1, output_size=1,\n learning_batch_size=learning_batch_size)\n self._filter_size = filter_size\n self._input_feature_maps = input_feature_maps\n self._output_feature_maps = output_feature_maps\n self._step = step # Laisser à 1 pour l'instant\n self._convolution_mode = convolution_mode\n self._weights = np.random.randn(self._output_feature_maps, self._input_feature_maps,\n self._filter_size[0], self._filter_size[1])\n self._bias = np.zeros(self._output_feature_maps)\n self._input_size = input_size\n self._output_size = output_size\n if self._convolution_mode == 'full':\n self._output_size = (self._input_size[0] + (self._filter_size[0]-1),\n self._input_size[1] + (self._filter_size[1]-1))\n self._reverse_convolution_mode = 'valid'\n # elif self._convolution_mode == 'same':\n # self._output_size = self._input_size\n # self._reverse_convolution_mode = 'same'\n elif self._convolution_mode == 'valid':\n self._output_size = (self._input_size[0] - (self._filter_size[0]-1),\n self._input_size[1] - (self._filter_size[1]-1))\n self._reverse_convolution_mode = 'full'\n else:\n raise Exception(\"Invalid convolution mode\")\n self.input = np.zeros((self._learning_batch_size, self._input_feature_maps,\n self._input_size[0], self._input_size[1]))\n self.activation_levels = np.zeros((self._learning_batch_size, self._output_feature_maps,\n self._output_size[0], self._output_size[1]))\n self.output = np.zeros((self._learning_batch_size, self._output_feature_maps,\n self._output_size[0], self._output_size[1]))", "def __init__(self, rng, input, n_in = 0, n_out = 0, \n halfWinSize = 0, activation = T.nnet.relu, mask = None):\n self.input = input\n self.n_in = n_in\n self.n_out = n_out\n\tself.halfWinSize = halfWinSize\n\n windowSize = 2*halfWinSize + 1\n self.filter_size = windowSize\n\n # reshape input to shape (batchSize, n_in, nRows=1, nCols=seqLen) \n in4conv2D = input.dimshuffle(0, 1, 'x', 2)\n\n # initialize the filter\n w_shp = (n_out, n_in, 1, windowSize)\n\tif activation == T.nnet.relu:\n W_values = np.asarray(\n rng.normal(scale = np.sqrt(2. / (n_in*windowSize + n_out)),\n size = w_shp), \n dtype = theano.config.floatX )\n\telse:\n W_values = np.asarray(\n rng.uniform(low = - np.sqrt(6. / (n_in*windowSize + n_out)), \n high = np.sqrt(6. / (n_in*windowSize + n_out)), \n size = w_shp),\n dtype=theano.config.floatX\n )\n if activation == theano.tensor.nnet.sigmoid:\n \tW_values *= 4\n\n self.W = theano.shared(value=W_values, name='ResConv1d_W', borrow=True)\n\n b_shp = (n_out,)\n self.b = theano.shared(\n np.asarray(rng.uniform(low = -.0, high = .0, size = b_shp), \n dtype=input.dtype), \n name ='ResConv1d_b', \n borrow=True)\n\n # conv_out and conv_out_bias have shape (batch_size, n_out, 1, nCols)\n conv_out = T.nnet.conv2d(in4conv2D, self.W, \n filter_shape=w_shp, border_mode='half')\n if activation is not None:\n conv_out_bias = activation(conv_out + \n self.b.dimshuffle('x', 0, 'x', 'x'))\n else:\n conv_out_bias = (conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))\n\n\t## out2 has shape (batchSize, n_out, nCols)\n out2 = conv_out_bias.dimshuffle(0, 1, 3, 2)[:, :, :, 0]\n\n if mask is not None:\n ## since we did zero padding at left side of the input tensor\n ## we need to reset these positions to 0 again after convolution \n ## to avoid introducing noise\n ## mask has shape (batchSize, #positions_to_be_masked)\n\n ##take the subtensor of out2 that needs modification\n out2_sub = out2[:, :, :mask.shape[1] ]\n mask_new = mask.dimshuffle(0, 'x', 1)\n self.output = T.set_subtensor(out2_sub, T.mul(out2_sub, mask_new))\n else:\n self.output = out2\n\n\t##self.output has shape (batchSize, n_out, nCols)\n\n # parameters of the model\n self.params=[self.W, self.b]\n\n self.paramL1 = abs(self.W).sum() + abs(self.b).sum()\n self.paramL2 = (self.W**2).sum() + (self.b**2).sum()", "def __init__(self,\n intermediate_channels,\n output_channels,\n pred_key,\n name,\n conv_type='depthwise_separable_conv',\n bn_layer=tf.keras.layers.BatchNormalization):\n super(PanopticDeepLabSingleHead, self).__init__(name=name)\n self._pred_key = pred_key\n\n self.conv_block = convolutions.StackedConv2DSame(\n conv_type=conv_type,\n num_layers=1,\n output_channels=intermediate_channels,\n kernel_size=5,\n name='conv_block',\n use_bias=False,\n use_bn=True,\n bn_layer=bn_layer,\n activation='relu')\n self.final_conv = layers.Conv2D(\n output_channels,\n kernel_size=1,\n name='final_conv',\n kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.01))", "def __init__(self, sigma_initializer=RandomNormal(0, 1), spectral_iterations=1,\n fully_diff_spectral=True, stateful=False, renormalize=False, **kwargs):\n super(SNConditionalConv2D, self).__init__(**kwargs)\n self.sigma_initializer = keras.initializers.get(sigma_initializer)\n self.fully_diff_spectral = fully_diff_spectral\n self.spectral_iterations = spectral_iterations\n self.stateful = stateful\n self.renormalize = renormalize", "def conv1d(\n input,\n weight,\n bias=None,\n stride=1,\n padding=0,\n dilation=1,\n groups=1,\n):\n return _conv('Conv', utils._single, **locals())", "def define_encoder_block(layer_in, n_filters, batchnorm=True):\n\n # weight initialization\n init = RandomNormal(stddev=0.02)\n # add downsampling layer\n g = Conv2D(n_filters, (4, 4), strides=(2, 2), padding='same',\n kernel_initializer=init)(layer_in)\n # conditionally add batch normalization\n if batchnorm:\n g = BatchNormalization()(g, training=True)\n # leaky relu activation\n g = LeakyReLU(alpha=0.2)(g)\n\n return g", "def __init__(self, config):\n super(NLayerDiscriminator, self).__init__()\n input_nc = config[\"in_channels\"]\n ndf = config[\"ndf\"]\n n_layers = config[\"n_layers\"]\n use_actnorm = config[\"use_actnorm\"]\n use_spectral = config[\"spectral_norm\"]\n if not use_actnorm:\n norm_layer = nn.BatchNorm2d\n else:\n norm_layer = ActNorm\n if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func != nn.BatchNorm2d\n else:\n use_bias = norm_layer != nn.BatchNorm2d\n\n kw = 4\n padw = 1\n sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers): # gradually increase the number of filters\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n_layers, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n sequence += [\n nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map\n\n if use_spectral:\n for i, lay in enumerate(sequence):\n if isinstance(lay, nn.Conv2d):\n sequence[i] = spectral_norm(lay)\n\n self.main = nn.Sequential(*sequence)\n\n weights_init(self.main)", "def patch_first_conv(model, in_channels: int = 4) -> None:\n\n # get first conv\n for module in model.modules():\n if isinstance(module, torch.nn.Conv2d):\n break\n\n # change input channels for first conv\n module.in_channels = in_channels\n weight = module.weight.detach()\n # reset = False\n\n if in_channels == 1:\n weight = weight.sum(1, keepdim=True)\n elif in_channels == 2:\n weight = weight[:, :2] * (3.0 / 2.0)\n elif in_channels == 4:\n weight = torch.nn.Parameter(torch.cat([weight, weight[:, -1:, :, :]], dim=1))\n elif in_channels % 3 == 0:\n weight = torch.nn.Parameter(torch.cat([weight] * (in_channels // 3), dim=1))\n\n module.weight = weight", "def __init__(self, n_dim: int, norm_layer: str, in_channels: int,\n start_filts: int, mode: str = '7x7'):\n super().__init__()\n self._in_channels = in_channels\n self._start_filts = start_filts\n self._mode = mode\n\n if mode == '7x7':\n self.convs = torch.nn.Sequential(\n *[ConvNd(n_dim, in_channels, self._start_filts,\n kernel_size=7, stride=2, padding=3, bias=False),\n NormNd(norm_layer, n_dim, self._start_filts)]\n )\n elif mode == '3x3':\n self.convs = torch.nn.Sequential(\n *[ConvNd(n_dim, in_channels, self._start_filts,\n kernel_size=3, stride=2, padding=1, bias=False),\n NormNd(norm_layer, n_dim, self._start_filts),\n ConvNd(n_dim, self._start_filts, self._start_filts,\n kernel_size=3, stride=1, padding=1, bias=False),\n NormNd(norm_layer, n_dim, self._start_filts),\n ConvNd(n_dim, self._start_filts, self._start_filts,\n kernel_size=3, stride=1, padding=1, bias=False),\n NormNd(norm_layer, n_dim, self._start_filts)\n ]\n )\n else:\n raise ValueError('{} is not a supported mode!'.format(mode))", "def basic_block(x, num_features, cfg, name):\n x = Conv1D(num_features, kernel_size=3, padding='same', use_bias=True,\n kernel_regularizer=l2(cfg.weight_decay), kernel_initializer=taejun_uniform(), name=f'{name}_conv')(x)\n x = BatchNormalization(name=f'{name}_norm')(x)\n x = Activation('relu', name=f'{name}_relu')(x)\n x = MaxPool1D(pool_size=3, name=f'{name}_pool')(x)\n return x", "def conv1x1(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)", "def _init_layers(self) -> None:\n self.relu = nn.ReLU(inplace=True)\n self.cls_convs = nn.ModuleList()\n self.reg_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.reg_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n pred_pad_size = self.pred_kernel_size // 2\n self.atss_cls = nn.Conv2d(\n self.feat_channels,\n self.num_anchors * self.cls_out_channels,\n self.pred_kernel_size,\n padding=pred_pad_size)\n self.atss_reg = nn.Conv2d(\n self.feat_channels,\n self.num_base_priors * 4,\n self.pred_kernel_size,\n padding=pred_pad_size)\n self.atss_centerness = nn.Conv2d(\n self.feat_channels,\n self.num_base_priors * 1,\n self.pred_kernel_size,\n padding=pred_pad_size)\n self.scales = nn.ModuleList(\n [Scale(1.0) for _ in self.prior_generator.strides])", "def __init__(self):\n # def __init__(self, embed_size, input_channels):\n super(EncoderCNN, self).__init__()\n self.off_model = OffsetCNN()\n self.sig_model = SignificanceCNN()\n self.sigmoid = nn.Sigmoid()\n self.W = nn.Conv3d(1, 1, (5, 1, 1))", "def __init__(self):\n\n super(GlobalDiscriminator, self).__init__()\n\n # input image will have the size of 64x64x3\n self.first_conv_layer = TransitionDown(in_channels=3, out_channels=32, kernel_size=5)\n self.second_conv_layer = TransitionDown(in_channels=32, out_channels=32, kernel_size=5)\n self.third_conv_layer = TransitionDown(in_channels=32, out_channels=64, kernel_size=5)\n self.fourth_conv_layer = TransitionDown(in_channels=64, out_channels=64, kernel_size=5)\n\n self.fc1 = nn.Linear(5 * 5 * 64, 1)\n\n torch.nn.init.xavier_uniform(self.fc1.weight)", "def discriminator_block(in_filters, out_filters, normalization=True):\n layers = [nn.Conv2d(in_filters, out_filters, 4, stride=2, padding=1)]\n if normalization:\n layers.append(nn.InstanceNorm2d(out_filters))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n return layers", "def __init__(self, \n input_dim=(3, 32, 32), \n num_filters = (32, 64), filter_sizes = (7, 7), conv_param = {\"stride\": 1, \"pad\": 3},\n hidden_dim= 100, num_classes=10, weight_scale=1e-3, reg=0.0,\n dtype=np.float32\n ):\n self.params = {}\n self.reg = reg\n self.dtype = dtype\n self.conv_param = conv_param\n self.filter_sizes = filter_sizes\n self.num_layers = 4\n ############################################################################\n # TODO: Initialize weights and biases for the three-layer convolutional #\n # network. Weights should be initialized from a Gaussian with standard #\n # deviation equal to weight_scale; biases should be initialized to zero. #\n # All weights and biases should be stored in the dictionary self.params. #\n ############################################################################\n \n C, H, W = input_dim\n filter_size1, filter_size2 = filter_sizes\n num_filters1, num_filters2 = num_filters\n\n # conv layer 1: (N, C, H, W) -> (N, num_filters1, H, W)\n self.params['W1'] = np.random.normal(0, weight_scale, [num_filters1, C, filter_size1, filter_size1]) # square filter\n self.params['b1'] = np.zeros((num_filters1, ))\n self.params[\"sbnGamma1\"] = np.ones((num_filters1, )) # scale parameter one for each color channel during spatial batch norm\n self.params[\"sbnBeta1\"] = np.zeros((num_filters1, )) # shift parameter one for each color channel during spatial batch norm\n\n # conv layer 2: (N, num_filters1, H, W) -> (N, num_filters2, H, W)\n self.params['W2'] = np.random.normal(0, weight_scale, [num_filters2, num_filters1, filter_size2, filter_size2]) # square filter\n self.params['b2'] = np.zeros((num_filters2, ))\n self.params[\"sbnGamma2\"] = np.ones((num_filters2, ))\n self.params[\"sbnBeta2\"] = np.zeros((num_filters2, ))\n\n # (2, 2, 2) maxpool: (N, num_filters2, H, W) -> (N, num_filters2, H/2. W/2)\n # maxpool layer contributes nothing to self.params that need to be updated.\n self.maxpool_params = {\"pool_height\": 2, \"pool_width\": 2, \"stride\": 2}\n\n # affine layer 3: (N, num_filters2, H/2. W/2) -> (N, hidden_dim)\n self.params['W3'] = np.random.normal(0, weight_scale, [num_filters2 * (H / 2) * (W / 2), hidden_dim])\n self.params['b3'] = np.zeros((hidden_dim, ))\n self.params[\"bnGamma3\"] = np.ones((hidden_dim, ))\n self.params[\"bnBeta3\"] = np.zeros((hidden_dim, ))\n\n # output affine - sfmx layer 4: (N, hidden_dim) -> (N, num_classes)\n self.params['W4'] = np.random.normal(0, weight_scale, [hidden_dim, num_classes])\n self.params['b4'] = np.zeros((num_classes, ))\n\n self.bn_params = [{\"mode\": \"train\"} for _ in range(self.num_layers)]\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n for k, v in self.params.iteritems():\n self.params[k] = v.astype(dtype)", "def discriminator_block(in_filters, out_filters, normalize=True):\n layers = [nn.Conv2d(in_filters, out_filters, 4, stride=2, padding=1)]\n if normalize:\n layers.append(nn.InstanceNorm2d(out_filters))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n return layers", "def discriminator_block(in_filters, out_filters, normalize=True):\n layers = [nn.Conv2d(in_filters, out_filters, 4, stride=2, padding=1)]\n if normalize:\n layers.append(nn.InstanceNorm2d(out_filters))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n return layers", "def discriminator_block(in_filters, out_filters, normalize=True):\n layers = [nn.Conv2d(in_filters, out_filters, 4, stride=2, padding=1)]\n if normalize:\n layers.append(nn.InstanceNorm2d(out_filters))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n return layers", "def conv2d(layer_input, filters, f_size=4, bn=True):\n d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)\n d = LeakyReLU(alpha=.2)(d)\n if bn:\n d = BatchNormalization(momentum=0.8)(d)\n return d", "def discriminator_block(in_filters, out_filters, normalize=True):\r\n layers = [nn.Conv2d(in_filters, out_filters, 4, stride=2, padding=1)]\r\n if normalize:\r\n layers.append(nn.InstanceNorm2d(out_filters))\r\n layers.append(nn.LeakyReLU(0.2, inplace=True))\r\n return layers", "def activation_channels_l1(activation):\n if activation.dim() == 4:\n view_2d = activation.view(-1, activation.size(2) * activation.size(3)) # (batch*channels) x (h*w)\n featuremap_norms = view_2d.norm(p=1, dim=1) # (batch*channels) x 1\n featuremap_norms_mat = featuremap_norms.view(activation.size(0), activation.size(1)) # batch x channels\n elif activation.dim() == 2:\n featuremap_norms_mat = activation.norm(p=1, dim=1) # batch x 1\n else:\n raise ValueError(\"activation_channels_l1: Unsupported shape: \".format(activation.shape))\n # We need to move the results back to the CPU\n return featuremap_norms_mat.mean(dim=0).cpu()", "def conv_layer(n_in_filters, n_filters, ker_size, stride=1, \n depthwise=False, zero_bn=False, act=True) :\n bn = nn.BatchNorm2d(n_filters)\n nn.init.constant_(bn.weight, 0. if zero_bn else 1.)\n conv = nn.Conv2d(n_in_filters, n_filters, ker_size, stride=stride,padding=ker_size//2, \n bias=False,groups = n_in_filters if depthwise else 1)\n layer = [conv, bn]\n if act: layer += [Swish()]\n return nn.Sequential(*layer)", "def conv2d(layer_input, filters, f_size=4, bn=True):\n d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)\n d = LeakyReLU(alpha=0.2)(d)\n if bn:\n d = BatchNormalization(momentum=0.8)(d)\n return d", "def conv1x1(in_planes, out_planes, stride=1):\r\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,\r\n padding=0, bias=False)", "def __init__(self, l1_regularization=0.):\n self.l1_regularization = l1_regularization\n self.nspec = None\n self.npixels = None\n self.nlabels = None\n self.ncoeffs = None\n\n self.coeffs = None\n self.scatter = None\n\n # normalization factor\n self.labels_median = 0.\n self.labels_std = 1.\n\n # labels names\n self.label_names = ['Teff', 'Logg', 'M_H', 'Alpha_M']\n\n self.trained_flag = False\n self.force_cpu = False\n self.log_device_placement = False", "def __init__(self, input_nc=3, ndf=64, n_layers=3, norm_type='spectral'):\n super(Discriminator_PatchGAN, self).__init__()\n self.n_layers = n_layers\n norm_layer = self.get_norm_layer(norm_type=norm_type)\n kw = 4\n padw = int(np.ceil((kw - 1.0) / 2))\n sequence = [[self.use_spectral_norm(nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), norm_type), nn.LeakyReLU(0.2, True)]]\n nf = ndf\n for n in range(1, n_layers):\n nf_prev = nf\n nf = min(nf * 2, 512)\n sequence += [[self.use_spectral_norm(nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=2, padding=padw), norm_type), norm_layer(nf), nn.LeakyReLU(0.2, True)]]\n nf_prev = nf\n nf = min(nf * 2, 512)\n sequence += [[self.use_spectral_norm(nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=1, padding=padw), norm_type), norm_layer(nf), nn.LeakyReLU(0.2, True)]]\n sequence += [[self.use_spectral_norm(nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw), norm_type)]]\n self.model = nn.Sequential()\n for n in range(len(sequence)):\n self.model.add_module('child' + str(n), nn.Sequential(*sequence[n]))\n self.model.apply(self.weights_init)", "def _construct_model(self):\n self.model = AutoEncoderConvolutional(self.n_latent_features, self.reduced_size)\n self.model = self.model.to(self.device, non_blocking=True)", "def init_layer(layer):\n \n if layer.weight.ndimension() == 4:\n (n_out, n_in, height, width) = layer.weight.size()\n n = n_in * height * width\n \n elif layer.weight.ndimension() == 2:\n (n_out, n) = layer.weight.size()\n\n std = math.sqrt(2. / n)\n scale = std * math.sqrt(3.)\n layer.weight.data.uniform_(-scale, scale)\n\n if layer.bias is not None:\n layer.bias.data.fill_(0.)", "def SampleCNN(cfg):\n # Variable-length input for feature visualization.\n x_in = Input(shape=(None, 1), name='input')\n\n num_features = cfg.init_features\n x = Conv1D(num_features, kernel_size=3, strides=3, padding='same', use_bias=True,\n kernel_regularizer=l2(cfg.weight_decay), kernel_initializer=taejun_uniform(scale=1.), name='conv0')(x_in)\n x = BatchNormalization(name='norm0')(x)\n x = Activation('relu', name='relu0')(x)\n\n # Stack convolutional blocks.\n layer_outputs = []\n for i in range(cfg.num_blocks):\n num_features *= 2 if (i == 2 or i == (cfg.num_blocks - 1)) else 1\n x = cfg.block_fn(x, num_features, cfg, f'block{i}')\n layer_outputs.append(x)\n\n if cfg.multi: # Use multi-level feature aggregation or not.\n x = Concatenate(name='multi')([GlobalMaxPool1D(name=f'final_pool{i}')(output)\n for i, output in enumerate(layer_outputs[-3:])])\n else:\n x = GlobalMaxPool1D(name='final_pool')(x)\n\n # The final two FCs.\n x = Dense(x.shape[-1].value, kernel_initializer='glorot_uniform', name='final_fc')(x)\n x = BatchNormalization(name='final_norm')(x)\n x = Activation('relu', name='final_relu')(x)\n if cfg.dropout > 0.:\n x = Dropout(cfg.dropout, name='final_drop')(x)\n x = Dense(cfg.num_classes, kernel_initializer='glorot_uniform', name='logit')(x)\n x = Activation(cfg.activation, name='pred')(x)\n\n return Model(inputs=[x_in], outputs=[x], name='sample_cnn')", "def build_one(frames=64, bands=40, n_classes=10, dropout=0.0, tstride = 1, fstride = 4):\n\n from keras.layers import Conv2D, Dense, Dropout, Flatten\n\n\n # In the paper there are some differences\n # uses log-mel as input instead of MFCC\n # uses 4 in stride for frequency\n # has a linear bottleneck as second layer to reduce multiplications,\n # instead of doing a single full-frequency convolution\n # probably uses ReLu for the DNN layers?\n # probably does not use ReLu for the conv layer?\n\n # Note, in keyword spotting task tstride=2,4,8 performed well also\n \n conv_f = 8\n conv_t = 32\n kernels = 90\n bottleneck = 32\n\n input_shape = (frames, bands, 1)\n\n model = keras.Sequential([\n Conv2D(kernels, (conv_t, conv_f), strides=(tstride, fstride),\n padding='valid', activation='relu', use_bias=True,\n input_shape=input_shape),\n Dense(bottleneck, activation=None, use_bias=True),\n Dropout(dropout),\n Dense(128, activation='relu', use_bias=True),\n Dropout(dropout),\n Dense(128, activation='relu', use_bias=True),\n Dropout(dropout),\n Dense(n_classes, activation='softmax', use_bias=True),\n ])\n return model", "def discriminator_block(in_filters, out_filters, stride, normalize):\n layers = [nn.Conv2d(in_filters, out_filters, 3, stride, 1)]\n if normalize:\n layers.append(nn.InstanceNorm2d(out_filters))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n return layers", "def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):\n super(GLConvDiscriminator, self).__init__()\n\n kw = 4\n padw = 1\n sequence = [\n DiscriminatorBlock(input_nc, ndf, downsample=True)\n ]\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers): # gradually increase the number of filters\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 8)\n sequence += [\n DiscriminatorBlock(ndf * nf_mult_prev, ndf * nf_mult)\n ]\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n_layers, 8)\n # sequence += [DiscriminatorBlock(ndf * nf_mult_prev, ndf * nf_mult, downsample=False)]\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=3, stride=1, padding=1),\n nn.InstanceNorm2d(ndf*nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n self.model = nn.Sequential(*sequence)\n self.patchgan_conv = nn.Conv2d(ndf * nf_mult, 1, kernel_size=3, stride=1, padding=1)\n # self.global_conv = nn.Conv2d(ndf * nf_mult, ndf * nf_mult, kernel_size=3, stride=1, padding=1)", "def __init__(self, img_size, latent_dim=10):\n super(EncoderBurgess, self).__init__()\n\n # Layer parameters\n hid_channels = 32\n kernel_size = 4\n hidden_dim = 256\n self.latent_dim = latent_dim\n self.img_size = img_size\n # Shape required to start transpose convs\n self.reshape = (hid_channels, kernel_size, kernel_size)\n n_chan = self.img_size[0]\n\n # Convolutional layers\n cnn_kwargs = dict(stride=2, padding=1)\n self.conv1 = nn.Conv2d(n_chan, hid_channels, kernel_size, **cnn_kwargs)\n self.conv2 = nn.Conv2d(hid_channels, hid_channels, kernel_size, **cnn_kwargs)\n self.conv3 = nn.Conv2d(hid_channels, hid_channels, kernel_size, **cnn_kwargs)\n\n # If input image is 64x64 do fourth convolution\n if self.img_size[1] == self.img_size[2] == 64:\n self.conv_64 = nn.Conv2d(\n hid_channels, hid_channels, kernel_size, **cnn_kwargs\n )\n\n # Fully connected layers\n self.lin1 = nn.Linear(np.product(self.reshape), hidden_dim)\n self.lin2 = nn.Linear(hidden_dim, hidden_dim)\n\n # Fully connected layers for mean and variance\n self.mu_logvar_gen = nn.Linear(hidden_dim, self.latent_dim * 2)", "def sentmodel(sent_data):\n\n # with tf.variable_scope(\"sent\", reuse=tf.AUTO_REUSE):\n with tf.variable_scope(\"sent\"):\n sent_data = tf.expand_dims(sent_data, -1)\n filter_sizes = [2, 3, 5]\n filter_bitsent = mul_filtercnn(filter_sizes, sent_data, 'sent')\n \n fc_sent = tf.identity(tf.layers.conv1d(\\\n inputs=filter_bitsent,\\\n filters=1,\\\n kernel_size=1,\\\n padding=\"same\",\\\n activation=tf.nn.sigmoid),name=\"fc_sent\")\n return fc_sent", "def __init__(self, channels, momentum):\n super(PointNetConv2Layer, self).__init__()\n self.channels = channels\n self.momentum = momentum", "def weights_init(m):\n if(type(m) == nn.ConvTranspose2d or type(m) == nn.Conv2d):\n nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif(type(m) == nn.BatchNorm2d):\n nn.init.normal_(m.weight.data, 1.0, 0.02)\n nn.init.constant_(m.bias.data, 0)", "def weights_init(m):\n if(type(m) == nn.ConvTranspose2d or type(m) == nn.Conv2d):\n nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif(type(m) == nn.BatchNorm2d):\n nn.init.normal_(m.weight.data, 1.0, 0.02)\n nn.init.constant_(m.bias.data, 0)", "def __init__(self, embed_size, dropout=0.5, image_model='resnet101', simple=False, pretrained=True):\n super(EncoderCNNFeatures, self).__init__()\n resnet = globals()[image_model](pretrained=pretrained)\n\n self.linear = nn.Sequential(nn.Conv2d(resnet.fc.in_features, embed_size, kernel_size=1, padding=0),\n nn.Dropout2d(dropout))\n\n self.simple = simple\n if simple:\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))", "def _init_layers(self):\n self.relu = nn.ReLU(inplace=True)\n self.cls_convs = nn.ModuleList()\n self.reg_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=dict(type='DCN', deform_groups=1)\n if i == 0 and self.use_dcn else self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.reg_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=dict(type='DCN', deform_groups=1)\n if i == 0 and self.use_dcn else self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.atss_cls = nn.Conv2d(\n self.feat_channels,\n self.num_base_priors * self.cls_out_channels,\n 3,\n padding=1)\n self.atss_reg = nn.Conv2d(\n self.feat_channels, self.num_base_priors * 4, 3, padding=1)\n self.atss_iou = nn.Conv2d(\n self.feat_channels, self.num_base_priors * 1, 3, padding=1)\n self.scales = nn.ModuleList(\n [Scale(1.0) for _ in self.prior_generator.strides])\n\n # we use the global list in loss\n self.cls_num_pos_samples_per_level = [\n 0. for _ in range(len(self.prior_generator.strides))\n ]\n self.reg_num_pos_samples_per_level = [\n 0. for _ in range(len(self.prior_generator.strides))\n ]", "def normalization(channels):\n return GroupNorm32(32, channels)", "def conv_1x1_bn(self, inp, oup):\n return nn.Sequential(\n nn.Conv2d(inp, oup, 1, 1, 0, bias=False),\n self.get_bn_module(oup),\n nn.ReLU6(inplace=True)\n )", "def __init__(self, z_dim, initailize_weights=True):\n super().__init__()\n self.z_dim = z_dim\n\n self.img_conv1 = conv2d(3, 16, kernel_size=7, stride=2)\n self.img_conv2 = conv2d(16, 32, kernel_size=5, stride=2)\n self.img_conv3 = conv2d(32, 64, kernel_size=5, stride=2)\n self.img_conv4 = conv2d(64, 64, stride=2)\n self.img_conv5 = conv2d(64, 128, stride=2)\n self.img_conv6 = conv2d(128, self.z_dim, stride=2)\n self.img_encoder = nn.Linear(4 * self.z_dim, 2 * self.z_dim)\n self.flatten = Flatten()\n\n if initailize_weights:\n init_weights(self.modules())", "def setUp(self):\n\n super(SeparableConv1DTest, self).setUp()\n\n self.batch_size = batch_size = random.randint(1, 100)\n self.in_width = in_width = random.randint(10, 188)\n self.in_channels = in_channels = random.randint(1, 10)\n self.input_shape = [batch_size, in_width, in_channels]\n\n self.kernel_shape_w = kernel_shape_w = random.randint(1, 10)\n self.channel_multiplier = channel_multiplier = random.randint(1, 10)\n self.kernel_shape = [kernel_shape_w]\n\n self.out_channels_dw = out_channels_dw = in_channels * channel_multiplier\n self.output_shape = [batch_size, in_width, out_channels_dw]\n self.depthwise_filter_shape = [\n 1, kernel_shape_w, in_channels, channel_multiplier\n ]\n self.pointwise_filter_shape = [1, 1, out_channels_dw, out_channels_dw]", "def init_weights(self, leveledinit: bool, kernel_size: int, bias: bool) -> None:\n if leveledinit:\n nn.init.normal_(self.conv1d.weight, std=1e-3)\n nn.init.normal_(self.conv1d.bias, std=1e-6)\n with torch.no_grad():\n self.conv1d.weight[:, 0, :] += 1.0 / kernel_size\n else:\n nn.init.xavier_uniform_(self.conv1d.weight)\n\n if self.embed in (\"pre\", \"post\"):\n nn.init.xavier_uniform_(self.embedding.weight)", "def __init__(self, input, num_filters, filter_size, stride=(2, 2), padding=(0, 0), activation=rectify):\n\n self.input = input\n self.output = layers.TransposedConv2DLayer(self.input, num_filters, filter_size, stride=stride, crop=padding,\n W=initialize_parameters()[0], b=initialize_parameters()[1],\n nonlinearity=activation)", "def init_weights(layer):\r\n layer_name = layer.__class__.__name__\r\n if layer_name.find(\"Conv\") != -1:\r\n layer.weight.data.normal_(0.0, 0.02)\r\n elif layer_name.find(\"BatchNorm\") != -1:\r\n layer.weight.data.normal_(1.0, 0.02)\r\n layer.bias.data.fill_(0)" ]
[ "0.6902533", "0.6492409", "0.6420767", "0.6415407", "0.63521636", "0.62814677", "0.62803644", "0.6270748", "0.6245793", "0.6226488", "0.607607", "0.6073126", "0.5975672", "0.59554344", "0.5911507", "0.59033775", "0.58964217", "0.58664775", "0.5841798", "0.58339804", "0.5821849", "0.5810137", "0.58017254", "0.5754241", "0.5749667", "0.5731316", "0.5727349", "0.5721029", "0.5706489", "0.5706489", "0.5706489", "0.5706489", "0.5706489", "0.5705993", "0.5704719", "0.5689524", "0.5689524", "0.5681186", "0.5679818", "0.5674878", "0.5666142", "0.56563056", "0.56478965", "0.5647524", "0.5645704", "0.56401074", "0.56328315", "0.5626139", "0.5622971", "0.5610218", "0.5606796", "0.5606156", "0.560147", "0.5598296", "0.5596549", "0.55909026", "0.5590475", "0.55903417", "0.55874515", "0.55854446", "0.55720097", "0.55699664", "0.5566862", "0.5561474", "0.5558867", "0.5552725", "0.5537854", "0.55353874", "0.55349416", "0.5527373", "0.5517424", "0.5517424", "0.5514933", "0.55082726", "0.5506547", "0.54983425", "0.5497135", "0.5496557", "0.54949045", "0.54942673", "0.5492895", "0.5491587", "0.547766", "0.54774827", "0.547709", "0.54768044", "0.5475141", "0.54740554", "0.5471287", "0.54676294", "0.54676294", "0.5466248", "0.5464432", "0.54620355", "0.5456814", "0.54542065", "0.54406804", "0.5439132", "0.54379535", "0.54376477" ]
0.74178046
0
Helper function that returns dedicated directory for Post media. This organizes user uploaded Post content and is used by `ministry.models.Post.attachment` to save uploaded content. Arguments =========
def post_media_dir(instance, filename, prepend=settings.MEDIA_ROOT): if instance.ministry: _ministry = instance.ministry elif instance.campaign: _ministry = instance.campaign.ministry else: e = 'There was an unknown error finding a dir for %s' % instance.title raise AttributeError(e) return path.join(generic_media_dir(_ministry, prepend=prepend), 'post_media', filename)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upload_dir(self):\n return os.path.join(settings.MEDIA_ROOT,self.upload_dir_rel())", "def get_media_directory():\n\treturn _paths[_MEDIA_DIRECTORY_KEY]", "def public_upload_dir(self):\n return os.path.join(settings.MEDIA_ROOT,\n self.public_upload_dir_rel())", "def create_news_post_dir(instance, prepend=settings.MEDIA_ROOT):\n for _ in (post_media_dir,):\n _path = path.split(_(instance, \"\", prepend=prepend))[0]\n try:\n mkdir(_path)\n except FileExistsError:\n pass\n except FileNotFoundError:\n if instance.ministry:\n _ministry = instance.ministry\n elif instance.campaign:\n _campaign = instance.campaign\n _ministry = _campaign.ministry\n else:\n e = 'There was an unknown error finding a dir for %s' % instance.name\n raise AttributeError(e)\n\n # NOTE: this is infinitely recursive if `prepend` does not lead to correct directory\n create_news_post_dir(instance, prepend=prepend)", "def path_media(self) -> Path:\n return self.path_supervisor / MEDIA_DATA", "def get_project_data_folder(self):\n return os.path.join(settings.MEDIA_ROOT,self.short_name)", "def mediaGenerator(request):\n folder = 'content/' + request\n mediaPaths = glob(folder + '/*')\n return random.choice(mediaPaths)", "def get_media_path(self, filename):\n return join(settings.CMS_PAGE_MEDIA_PATH, \"%d\" % self.id, filename)", "def get_media_dir(self):\n dir_path = _paths.concat(self._gnbase, _DIRNAME_GNMEDIA)\n if not _os.path.isdir(dir_path):\n raise OSError('GEONIS media directory {!r} does not exist'.format(dir_path))\n return dir_path", "def get_full_folder_path(self):\n data_dir_path = os.path.join(settings.MEDIA_ROOT,self.folder)\n return data_dir_path", "def full_path(self):\n return os.path.join(settings.MEDIA_ROOT, self.path)", "def get_gallery(self):\n return os.path.join(self.directory, GALLERY_DIR)", "def media_path(self):\n return self._path", "def upload_dir_rel(self):\n return os.path.join(self.short_name,\"uploads\")", "def create_media_path(custom_path=''):\n def generate_path(instance, filename):\n if hasattr((instance), 'name'):\n return os.path.join(\n custom_path,\n instance.name,\n filename\n )\n\n return os.path.join(\n custom_path,\n filename\n )\n\n return generate_path", "def prepare_media_url(self, object):\n if object.media is not None:\n return os.path.join(settings.MEDIA_URL, object.media.media_file.name)\n else:\n return ''", "def get_upload_path(instance, filename):\n from os import path\n from django.conf import settings\n from django.template.defaultfilters import slugify\n \n if hasattr(settings, 'MEDIA_BROWSER_UPLOAD_BASE'):\n base = settings.MEDIA_BROWSER_UPLOAD_BASE\n else:\n base = 'media_browser_uploads'\n type = slugify(instance._meta.verbose_name_plural)\n upload_path = path.join(base, type)\n # If MEDIA_BROWSER_ORGANIZE_BY_DATE is not set or is False, return \n # current path:\n if not hasattr(settings, 'MEDIA_BROWSER_ORGANIZE_BY_DATE') \\\n or settings.MEDIA_BROWSER_ORGANIZE_BY_DATE:\n return path.join(upload_path, filename)\n # Otherwise, put in dated subfolders:\n else:\n return path.join(upload_path, \"%Y\", \"%m\", \"%d\", filename)", "def setup_local_storage(mod, media_type, media_id, id=None):\n BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n if mod == 'post':\n mod = 'posts'\n path = os.path.join(BASE_DIR, 'save', mod, str(media_id))\n if id:\n path = os.path.join(BASE_DIR, 'save', mod, str(id))\n name = media_type.lower()\n try:\n os.mkdir(path)\n except FileExistsError as e:\n timestamp = time.strftime('%Y%m%d-%H%M%S')\n name += f\"_{timestamp}\"\n except OSError as e:\n raise InvalidUsage(\"OSError in setup_local_storage. \", status_code=501, payload=e)\n filename = f\"{str(path)}/{name}\"\n return path, filename", "def get_data_folder_path(challenge_short_name):\n return safe_join(settings.MEDIA_ROOT, challenge_short_name)", "def _UploadFile(self, media_source, title, category):\n media_entry = gdata.GDataEntry()\n media_entry.title = atom.Title(text=title)\n media_entry.category.append(category)\n media_entry = self.Post(media_entry, '/feeds/documents/private/full',\n media_source = media_source,\n extra_headers = {'Slug' : media_source.file_name })\n\n return media_entry", "def media_folder_name(self):\n raise NotImplementedError", "def directory(self) -> Path:\n (directory := Path(\"markdown\").resolve(strict=False)).mkdir(exist_ok=True, parents=True)\n return directory", "def public_upload_dir_rel(self):\n return os.path.join(self.short_name,settings.COMIC_PUBLIC_FOLDER_NAME)", "def content_media_urls(*paths):\n from mezzanine.conf import settings\n media_url = settings.CONTENT_MEDIA_URL.strip(\"/\")\n return [\"/%s/%s\" % (media_url, path) for path in paths]", "def get_directory(self, subdir=None):\n path = settings.SUBMISSION_DIR / str(self.assignment.id) / str(self.id)\n if subdir:\n path = path / subdir\n\n return path", "def get_thumbnails_directory():\n\treturn _paths[_THUMBNAILS_DIRECTORY_KEY]", "def return_directory(path):\n files = os.listdir(path)\n content = \"\"\n for file in files:\n content = content + f\"{file}\\n\"\n content = content.encode()\n mime_type = b\"text/plain\"\n return content, mime_type", "def prepare_media(self, object):\n if object.media is not None:\n #return object.media.media_file.name\n return '/api/v1/media/{0}/'.format(object.media.id)\n else:\n return ''", "def savePost(post, save_folder, header=\"\", save_file=None):\n\n\tslug = post[\"url-with-slug\"].rpartition(\"/\")[2]\n\tdate_gmt = post[\"date-gmt\"]\n\tdate = date_gmt[:-7]\n\n\tslug = byte_truncate(slug)\n\tfile_name = os.path.join(save_folder, date +\" \"+ slug + \".html\")\n\tf = codecs.open(file_name, \"w\", encoding=ENCODING)\n\n\t#\tDate info for all posts\n\tf.write('<article>\\n\\t<time datetime>' + date + '</time>\\n\\t')\n\n#\tPOST KINDS\t:\n\n#\tText\n\n\tif post[\"type\"] == \"regular\":\n\t\ttitle = \"\"\n\t\ttitle_tag = post.find(\"regular-title\")\n\t\tif title_tag:\n\t\t\ttitle = unescape(title_tag.string)\n\t\tbody = \"\"\n\t\tbody_tag = post.find(\"regular-body\")\n\t\tif body_tag:\n\t\t\tbody = unescape(body_tag.string)\n\n\t\tif title:\n\t\t\tf.write(\"<h3>\" + title + \"</h3>\\n\\t\")\n\t\tif body:\n\t\t\tf.write(body)\n\n#\tPhoto\n\n\tif post[\"type\"] == \"photo\":\n\t\tcaption = \"\"\n\t\tcaption_tag = post.find(\"photo-caption\")\n\t\tif caption_tag:\n\t\t\tcaption = unescape(caption_tag.string)\n\t\timage_url = post.find(\"photo-url\", {\"max-width\": \"1280\"}).string\n\n\t\timage_filename = image_url.rpartition(\"/\")[2].encode(ENCODING)\n\t\timage_folder = os.path.join(save_folder, \"../images\")\n\t\tif not os.path.exists(image_folder):\n\t\t\tos.mkdir(image_folder)\n\t\tlocal_image_path = os.path.join(image_folder, image_filename)\n\n\t\tif not os.path.exists(local_image_path):\n\t\t\t# only download images if they don't already exist\n\t\t\tprint \"Downloading a photo. This may take a moment.\"\n\t\t\ttry:\n\t\t\t\timage_response = urllib2.urlopen(image_url)\n\t\t\t\timage_file = open(local_image_path, \"wb\")\n\t\t\t\timage_file.write(image_response.read())\n\t\t\t\timage_file.close()\n\t\t\texcept urllib2.HTTPError, e:\n\t\t\t\tlogging.warning('HTTPError = ' + str(e.code))\n\t\t\texcept urllib2.URLError, e:\n\t\t\t\tlogging.warning('URLError = ' + str(e.reason))\n\t\t\texcept httplib.HTTPException, e:\n\t\t\t\tlogging.warning('HTTPException')\n\t\t\texcept Exception:\n\t\t\t\timport traceback\n\t\t\t\tlogging.warning('generic exception: ' + traceback.format_exc())\n\n\t\tf.write(caption + '<img alt=\"' + caption.replace('\"', '&quot;') + '\" src=\"images/' + image_filename + '\" />')\n\n#\tQuote\n\n\tif post[\"type\"] == \"quote\":\n\t\tquote = \"\"\n\t\tquote_tag = post.find(\"quote-text\")\n\t\tif quote_tag:\n\t\t\tquote = unescape(quote_tag.string)\n\t\tsource = \"\"\n\t\tsource_tag = post.find(\"quote-source\")\n\t\tif source_tag:\n\t\t\tsource = unescape(source_tag.string)\n\n\t\tif quote:\n\t\t\tf.write(\"<blockquote>\\n\\t\\t<p>\" + quote + \"</p>\\n\\t\\t\")\n\t\t\tif source:\n\t\t\t\tf.write('<cite>' + source + '</cite>\\n\\t')\n\t\tif quote:\n\t\t\tf.write(\"</blockquote>\")\n\n#\tFooter for all posts\n\n\tf.write(\"\\n</article>\")\n\tf.close()", "def get_posts(path, which=None, verbose=True):\n \"\"\" Here we conditionally define include_test depending on what type of\n object 'which' is.\"\"\"\n\n class_ = type(which)\n if class_ == str:\n def include_test(name):\n if name == which:\n return True\n else:\n return False\n elif class_ in {list, tuple, set}:\n def include_test(name):\n if name in which:\n return True\n else:\n return False\n else:\n def include_test(name):\n return True\n\n \"\"\" Loop through all files in the posts directory and add each file format\n e.g. 'json', 'html', etc as keys for a subdictionary. The base dictionary\n is keyed by the file name (excluding the extension).\n\n example:\n >>> output = {\n ... 'myfile':{\n ... 'html':'/path/to/posts/myfile.html',\n ... 'json':'/path/to/posts/myfile.json'\n ... }\n ...}\n \"\"\"\n all_files = defaultdict(dict)\n for file_ in listdir(path):\n if isfile(join(path, file_)):\n name = file_.split('.')[0]\n type_ = splitext(file_)[1].strip('.')\n file_path = join(path, file_)\n all_files[name][type_] = file_path\n\n posts = list()\n required_keys = {'html', 'title', 'date'}\n for key, value in all_files.items():\n \"\"\" If include_test says that we don't need this file, we skip the\n rest of the current iteration and continue with the next key, value\n pair. \"\"\"\n if not include_test(key):\n continue\n value['id_'] = key\n if 'json' in value:\n with open(value['json'], \"rU\") as json_handle:\n value.update(\n json.load(json_handle, object_hook=json_date_parser)\n )\n elif 'yaml' in value:\n with open(value['yaml'], 'rU') as yaml_handle:\n value.update(yaml.load(yaml_handle))\n elif 'yml' in value:\n with open(value['yml'], 'rU') as yaml_handle:\n value.update(yaml.load(yaml_handle))\n\n \"\"\" If some required keys are missing then we skip the rest of the\n current iteration and continue with the next key, value pair.\n If verbose is True we print which keys were missing.\"\"\"\n if required_keys.intersection(value) != required_keys:\n if verbose:\n d = required_keys.difference(required_keys.intersection(value))\n print(\n \"Excluded {} from posts because it did not \".format(key) +\n \"have all of the required information. The field(s) \" +\n \"'{}' was/were missing.\".format(\"', '\".join(list(d)))\n )\n continue\n\n \"\"\" Everything is cool, add the post to the list.\"\"\"\n posts.append(value)\n\n \"\"\" We could run into problems here when dates aren't parsed as datetime\n objects. I might need to figure out a better way of ordering posts by date\n in the future.\"\"\"\n posts.sort(key=lambda d: d['date'])\n return posts", "def get_attachment_upload_dir(instance, filename):\n return f\"{tasks}/{attachments}/{str(instance.task.id)}/{filename}\"", "def fresh_media_root(**kwargs):\n with TemporaryDirectory(**kwargs) as media_root:\n with override_settings(MEDIA_ROOT=media_root):\n yield", "def absolute_folder_name(self):\n return 'music_decompose/media/{0}'.format(self.media_folder_name)", "def photo_directory_path(instance, filename):\n return 'gallery/photos/{0}/{1}'.format(instance.photo_name, filename)", "def save_image_to_media(image, image_name):\n print('u1')\n # Set image name saved\n image_name_save = image_name + '.' + str(imghdr.what(image))\n # Dir save\n print('u2')\n\n fs = FileSystemStorage(location=settings.IMAGE_USER)\n # Save image\n print('u3')\n\n filename = fs.save(image_name_save, image)\n # Url dir save\n print('u4')\n uploaded_file_url = fs.url(filename)\n print('u5')\n full_path_image = settings.IMAGE_PATH_STATIC + uploaded_file_url\n return full_path_image", "def external_storage():\r\n\r\n folder_path = os.path.join('/media/pi/', args.storage_name, args.directory_name)\r\n if not os.path.exists(folder_path):\r\n os.makedirs(folder_path)\r\n return folder_path", "def make_gallery(post_name, image_list, config={'gallery_dir': 'galleries'}):\n gallery_name = make_gallery_name_from_post_name(post_name)\n gallery_path = get_gallery_path(gallery_name)\n output_path = os.path.join(gallery_path, \"index.md\")\n with open(output_path, \"w\") as fd:\n fd.write(make_gallery_index(gallery_name, image_list))\n\n copy_images(gallery_path, image_list)\n #make_thumbs\n #make_image_pages", "def get_content_page_dir(page_id):\n return os.path.join(content_pages_dir, page_id)", "def media(filename):\n media_path = flask.current_app.instance_path + '/media'\n return flask.send_from_directory(media_path, filename)", "def _create_folders(self):\n if not os.path.exists(os.path.join(BASE_DIR, DIR)):\n os.mkdir(os.path.join(BASE_DIR, DIR))\n directory = os.path.join(BASE_DIR, DIR, self.title)\n if not os.path.exists(directory):\n os.mkdir(directory)\n return directory", "def roompost_image_file_path(instance, filename):\n ext = filename.split('.')[-1]\n filename = f'{uuid.uuid4()}.{ext}'\n\n return os.path.join('uploads/post/', filename)", "def make_directory(scripts):\n if not os.path.exists(os.path.join(os.path.dirname(__file__), 'Uploads')):\n os.makedirs(os.path.join(os.path.dirname(__file__), 'Uploads'))\n for script_object in scripts:\n if script_object.type is None:\n continue\n path = script_object.type.split('::')\n path = os.path.join(os.path.dirname(__file__), \"/\".join(path[:-1]))\n if not os.path.exists(path):\n os.makedirs(path)", "def media(self, req):\n first_part = req.path_info_peek()\n if first_part in self.media_paths:\n req.path_info_pop()\n path = self.media_paths[first_part]\n else:\n path = resource_filename(\"weberror\", \"eval-media\")\n app = urlparser.StaticURLParser(path)\n return app", "def media(self, path):\n path = \"/media/%s%s\" % (self.session.root, format_path(path))\n\n url, params, headers = self.request(path, method='GET')\n\n return self.rest_client.GET(url, headers)", "def video_directory_path(instance, filename):\n return 'gallery/video/{0}/{1}'.format(instance.video_name, filename)", "def album_directory_path(instance, filename):\n return 'gallery/album/{0}/{1}'.format(instance.album_name, filename)", "def get_folder(self):\n return os.path.join(\n settings.PRIVATE_STORAGE_ROOT, Exam.EXAM_FILES_LOCATION,\n str(self.unique_id)[0:2])", "def get_content_directories() -> List[str]:\n result:list[str] = []\n for current_path in os.listdir(\"content\"):\n if os.path.isdir(os.path.join(\"content\", current_path)):\n result.append(os.path.join(\"content\", current_path))\n return result", "def get_localised_dir(self, language):\n return os.path.join(\n self.base_path,\n to_locale(language),\n self.content_path\n )", "def filter(self):\n for f in FileHelper.ALL_PATHS:\n media_obj = MediaObject(FileHelper.get_url(f), FileHelper.get_title(f), FileHelper.get_media_type(f), FileHelper.get_icon(f), FileHelper.get_duration(f), FileHelper.get_ctype(f))\n _id = media_obj.uuid\n if media_obj.media_type == \"image\":\n DB.IMAGES[_id] = media_obj\n elif media_obj.media_type == \"audio\":\n DB.MUSIC[_id] = media_obj\n elif media_obj.media_type == \"video\":\n DB.VIDEOS[_id] = media_obj\n else:\n print \"File '%s' doesn't play nice.\" % (f)", "def delete_post_media(self: User, post_id: str) -> Optional[Post]:\n post = dangerously_get_post(post_id)\n if self != post.author:\n raise UnauthorizedAccess()\n\n post.media_list = []\n post.save()\n\n if exists_in_post_cache(post.id):\n # only set in post cache if it already exists\n # post cache should only have reshared posts so it should not cache any deleted post\n set_in_post_cache(post)\n\n return post", "def getMediaFiles(path):\n fileList = getMediaFileList(path)\n # dirList = getDirectoryList(path)\n\n # results = map(getMediaFiles, dirList)\n\n # for result in results:\n # fileList = fileList + result\n\n return fileList", "def get_directory(self):\n path = os.path.join(settings.SUBMISSION_DIR, \n str(self.course.semester), \n str(self.course.code), \n \"Sec.%d\" % self.course.section,\n self.code)\n return path.replace(\" \", \"_\")", "def create_gallery(jekyll_site_path, gallery_name):\n\n gallery_path = os.path.join(jekyll_site_path, 'images', 'galleries', gallery_name)\n\n if not os.path.exists(gallery_path):\n os.makedirs(gallery_path)\n\n print(f\"Created gallery path {gallery_path}\")\n\n return gallery_path", "def get_media_files(tweets, today, hour, output_folder):\n media_file = \"\"\n tweet_id = \"\"\n create_picture_folder(output_folder)\n\n for tweet in tweets:\n if tweet.get('delete') != None:\n continue\n if not tweet['retweeted'] and 'RT @' not in tweet['text'] and not tweet['in_reply_to_status_id']:\n media = tweet.get('entities').get('media', [])\n if len(media) > 0:\n # media_files.append(media[0]['media_url'])\n media_file += media[0]['media_url']\n # tweet_ids.append(tweet['id'])\n tweet_id += tweet['id_str']\n return media_file, tweet_id", "def insert_post(post):\n full_path_image = ''\n try:\n # Set image name saved\n image_name_save = post.category_post_image_name + '.' + str(imghdr.what(post.category_post_image))\n # Dir save\n fs = FileSystemStorage(location=settings.IMAGE_USER)\n # Save image\n filename = fs.save(image_name_save, post.category_post_image)\n # Url dir save\n uploaded_file_url = fs.url(filename)\n full_path_image = settings.IMAGE_PATH_STATIC + uploaded_file_url\n\n post.category_post_image = full_path_image\n\n # Change display\n post.display = True if post.display == 'true' else False\n\n # Change display order\n post.display_order = 0 if post.display_order == '' else post.display_order\n\n # Save into DB\n category_post = CategoryPostDao.insert_category_post(post)\n\n # Clean cache list\n CacheUtil.clean_cache_by_key(KEY_CACHE_API_CATEGORY_POST)\n CacheUtil.clean_cache_by_key(str(KEY_CACHE_API_CATEGORY_POST_IN_CATEGORY) + str(post.category_id_id))\n CacheUtil.clean_cache_by_key(str(KEY_CACHE_CATEGORY_POST_DISPLAY_IN_CATEGORY) + str(category_post.category_id_id))\n CacheUtil.clean_cache_by_key(KEY_CACHE_CATEGORY_POST_DETAIL_DISPLAY_BY_URL + str(category_post.category_post_url))\n CacheUtil.clean_cache_by_key(KEY_CACHE_CATEGORY_POST_DISPLAY_LIMIT_IN_CATEGORY + str(category_post.category_id_id))\n \n # Save cache\n key_cache = str(KEY_CACHE_API_CATEGORY_POST_ID) + str(category_post.category_post_id)\n cache.set(key_cache, category_post, settings.CACHE_TIME)\n except Exception as error:\n if full_path_image != '':\n path1 = str(settings.BASE_DIR) + '/' + settings.APP_NAME1 + '/' + full_path_image\n path2 = str(settings.BASE_DIR) + '/' + full_path_image\n os.remove(path1)\n os.remove(path2)\n print('error: ' + str(error))\n raise error", "def blog_get_mkd_attachment(post):\n attach = dict()\n try:\n lead = post.rindex(\"<!-- \")\n data = re.search(g_data.TAG_RE, post[lead:])\n if data is None:\n raise VimPressFailedGetMkd(\"Attached markdown not found.\")\n attach.update(data.groupdict())\n attach[\"mkd_rawtext\"] = urllib2.urlopen(attach[\"mkd_url\"]).read()\n except (IOError, ValueError):\n raise VimPressFailedGetMkd(\"The attachment URL was found but was unable to be read.\")\n\n return attach", "def media_path_to_url(path):\n media_url = settings.MEDIA_URL\n if media_url.endswith('/'):\n media_url = media_url[:-1]\n return path.replace(settings.MEDIA_ROOT, media_url)", "def organize_my_photos(path, locale, extension):\n # Set locale\n if locale:\n locale.setlocale(category=locale.LC_ALL, locale=locale)\n # Set extensions\n extensions = EXTENSIONS\n if extension:\n extensions = (extension,)\n # Get all photos\n for root, dirs, files in os.walk(path):\n for file in files:\n if file.lower().endswith(extensions):\n # Get path file\n origin = os.path.join(root, file)\n # Get date\n date_created = get_date_created(os.path.join(root, file))\n date_list = date_created.split('/')\n date_created_day = date_list[0]\n date_created_month = date_list[1]\n date_created_year = date_list[2]\n # Make folder: format year/month day -> 2018/abr 23/photo.jpg\n dest_folder = os.path.join(date_created_year, f'{calendar.month_name[int(date_created_month)]} {date_created_day}')\n dest = os.path.join(date_created_year, f'{calendar.month_name[int(date_created_month)]} {date_created_day}', file)\n if not os.path.exists(dest_folder):\n os.makedirs(dest_folder)\n # Move photo\n shutil.move(origin, dest)", "def json_to_post(self, post, tag):\r\n # Get media type (image/video)\r\n mediaType = self.get_value(post, (\"images\", 0, \"type\"))\r\n if mediaType is None:\r\n mediaType = self.get_value(post, (\"type\", ))\r\n mediaType = self.get_media_type(mediaType)\r\n\r\n imageCount = self.get_value(post, ('images_count',))\r\n if imageCount is None:\r\n imageCount = 1\r\n \r\n # Only want 1 image/video\r\n if imageCount > 1:\r\n return None\r\n \r\n # Get media url and size\r\n if mediaType == MediaType.IMAGE:\r\n media = self.get_value(post, ('images', 0, 'id', ))\r\n size = self.get_value(post, ('size', ))\r\n if size is None:\r\n size = self.get_value(post, ('images', 0, 'size', ))\r\n if media is None:\r\n media = self.get_value(post, ('link', ))\r\n elif mediaType == MediaType.VIDEO:\r\n media = self.get_value(post, ('images', 0, 'id', ))\r\n size = self.get_value(post, ('mp4_size', ))\r\n if size is None:\r\n size = self.get_value(post, ('images', 0, 'mp4_size', ))\r\n if media is None:\r\n media = self.get_value(post, ('mp4', ))\r\n else:\r\n self.logger.log(logger.LogLevel.WARNING, 'mediaType is not Image or Video')\r\n return None\r\n\r\n #check if image/video is over max size\r\n if mediaType == MediaType.IMAGE:\r\n if size >= self.setting.maxImageSize:\r\n return None\r\n elif mediaType == MediaType.VIDEO:\r\n if size >= self.setting.maxVideoSize:\r\n return None\r\n\r\n postId = self.get_value(post, (\"id\", ))\r\n title = self.get_value(post, (\"title\", ))\r\n views = self.get_value(post, (\"views\", ))\r\n ups = self.get_value(post, (\"ups\", ))\r\n downs = self.get_value(post, (\"downs\", ))\r\n return Post(postId, title, mediaType.value, media, size, tag, views, ups, downs)", "def get_directory() -> str:\n return directory", "def path_extern_media(self) -> PurePath:\n return PurePath(self.path_extern_supervisor, MEDIA_DATA)", "def fix_post(post_name):\n #find the image links\n with open(\"_posts\" + post_name) as fd:\n image_links, browse_links = post_to_list_of_image_and_browselinks(fd)\n gallery_name = make_gallery_name_from_post_name(post_name)\n gallery_path = os.path.join(\"../galleries\", gallery_name)\n try:\n os.makedirs(os.path.join(gallery_path, \"images\"))\n except OSError as err:\n if err.errno != 17:\n raise\n\n for image in image_links:\n #download image to it (both normal and thumb)\n with open(os.path.join(gallery_path, image), \"wb\") as output:\n with closing(urllib2.urlopen(\"http://orionrobots.co.uk/%s\" % image)) as original:\n output.write(original.read())\n with open(os.path.join(gallery_path, \"thm_\" + image), \"wb\") as output:\n with closing(urllib2.urlopen(\"http://orionrobots.co.uk/%s\" % image)) as original:\n output.write(original.read())\n\n #Log that the link to X in the post will now need to be a link to Y.\n #if there are browseimage links\n #make gallery thumb page.\n #For each browseimaqe link\n #Match with an image link\n #prepare list\n #log link change\n #For each in list\n #make gallery front end for it with\n #First/last/prev/next/thumbs/blog post", "def get_default_data_dir(self):\n data_dir_path = os.path.join(self.comicsite.short_name,self.folder_prefix,self.cleantitle)\n return data_dir_path", "def Directory(self) -> str:", "def relative_to_media_root(filepath, media_root=settings.MEDIA_ROOT):\n relative_path = os.path.relpath(filepath, media_root)\n return relative_path", "def take_action(self, parsed_args):\n folder_content = dict()\n parent = utils.key_len(parsed_args.parent)\n folder_content = self.app.metagen.directory_list(parent)\n content_type_map = {\n '1': 'Folder',\n '2': 'Sample',\n '3': 'MRSA Sample',\n '4': 'Listeria Sample'\n }\n header = ['type', 'name', 'id', 'status', 'size', 'created']\n if folder_content:\n if not folder_content['items']:\n self.logger.info('\\nFolder {} (id: {}) is empty'.format(folder_content['name'], parent))\n for_output = [[' ', ' ', ' ', ' ', ' ', ' ']]\n return (header, for_output)\n else:\n raise Exception(\"Exception uccured.\")\n\n def _set_date(inp):\n return dt.fromtimestamp((inp[1]/1000)).strftime('%Y-%m-%d %H:%M:%S')\n\n def _del_none(inp):\n out = [inp[1]]\n if not out[0]:\n out = [0 if v[1] == 'int' else '-' for k, v in field_maps.items() if inp[0] == v[0]]\n return out[0]\n\n def _set_dim(inp):\n out = inp if inp else 0\n out = utils.convert_size(out)\n return out if out is not '0B' else '-'\n\n def _set_type(inp):\n ctype = content_type_map[str(inp[1])] if content_type_map.get(str(inp[1])) else inp[1]\n return ctype\n\n def _convert(inp):\n for item in inp.items():\n for k, v in field_maps.items():\n if item[0] == v[0]:\n inp[item[0]] = field_maps[k][2](item)\n break\n return inp\n\n field_maps = {\n 'type': ['content_type', 'str', _set_type],\n 'id': ['id', 'str', _del_none],\n 'name': ['name', 'str', _del_none],\n 'status': ['status', 'str', _del_none],\n 'size': ['size', 'int', _del_none],\n 'created': ['created', 'int', _set_date]\n }\n\n \"\"\"we need just items for output\"\"\"\n items_data = [_convert(item) for item in folder_content['items']]\n\n \"\"\"order regarding order parameters\"\"\"\n if parsed_args.order:\n if parsed_args.order.lower() in header:\n items_data = sorted(items_data,\n key=itemgetter(field_maps[parsed_args.order.lower()][0]),\n reverse=(not parsed_args.up)\n )\n for_output = [[item[field_maps[f][0]] if f is not 'size'\n else _set_dim(item[field_maps[f][0]])\n for f in header]\n for item in items_data\n ]\n self.logger.info('\\nContent of the Folder {} (id: {})'.format(folder_content['name'], parent))\n return (header, for_output)", "def _make_media_path(self, media_id, host, username=None, width=None, height=None, crop=None):\n\t\ttry:\n\t\t\tmedia_id = validation.media_id(media_id)\n\t\t\tvalidation.required(host, 'host')\n\t\t\tif username:\n\t\t\t\tusername = validation.username(username, 'username')\n\t\t\tif width:\n\t\t\t\twidth = validation.cast_integer(width, 'width')\n\t\t\t\theight = validation.cast_integer(height, 'height')\n\t\t\t\tcrop = validation.cast_boolean(crop, 'crop')\n\t\texcept errors.ValidationError, ex:\n\t\t\treturn utils.return_deferred_error(ex.value)\n\n\t\t@stack\n\t\tdef make_path(void):\n\t\t\tx = media_id[0]\n\t\t\tyz = media_id[1:3]\n\t\t\tmedia_path = \"%s/%s/%s/%s\" % (self._cfg_media_root_dir, host, x, yz)\n\t\t\tif width:\n\t\t\t\tmedia_path += \"/renders/%sx%sx%s\" % (width, height, int(crop))\n\t\n\t\t\tif username:\n\t\t\t\tmedia_path += \"/%s\" % username\n\t\n\t\t\tmedia_path += \"/%s\" % media_id\n\t\n\t\t\treturn media_path\n\n\t\td = Deferred()\n\t\td.addCallback(make_path)\n\t\td.addCallback(lambda _: (0, _))\n\t\td.addErrback(lambda _: (-1, _.getErrorMessage()))\n\t\td.callback(0)\n\t\treturn d", "def _copy_to_media(self, template_name, source=''):\n dirpath = os.path.join(self.cache_root, os.path.dirname(template_name))\n filename = os.path.basename(template_name)\n fullpath = os.path.join(dirpath, filename)\n\n if not os.path.isfile(fullpath) or settings.DEBUG:\n if not os.path.exists(dirpath):\n os.makedirs(dirpath)\n\n f = open(fullpath, 'w')\n f.write(source)\n f.close()\n\n return urljoin(self.cache_url, template_name), filename", "def get_directory(self):\n return self.directory", "def handle_media( environ ):\n # TODO: implement me\n return 200, [], _html.format(\n title = 'MEDIA',\n head = '',\n body = 'MEDIA'\n )", "def extract(request):\n try:\n files = request.FILES.getlist('myFile')\n msg_data = []\n fs = FileSystemStorage()\n for file in files:\n name = file.name.replace(\" \", \"_\")\n if os.path.exists(settings.MEDIA_ROOT + \"\\\\\" + name):\n os.remove(settings.MEDIA_ROOT + \"\\\\\" + name)\n fs.save(settings.MEDIA_ROOT + \"\\\\\" + name, file)\n msg = extract_msg.Message(settings.MEDIA_ROOT + \"\\\\\" + name)\n msg.save_attachments(customPath=settings.MEDIA_ROOT + \"\\\\\")\n attachments = []\n for i in range(0, len(msg.attachments)):\n attachments.append({\n \"filename\": msg.attachments[i].shortFilename,\n \"filepath\": \"/media/\" + msg.attachments[i].shortFilename\n })\n msg_data.append({\n # \"mainProperties\": msg.mainProperties,\n # \"header\": msg.header,\n \"attachments\": attachments,\n \"filename\": file.name,\n \"filepath\": \"/media/\" + name,\n \"from\": msg.sender,\n \"to\": msg.to,\n \"cc\": msg.cc,\n \"subject\": msg.subject,\n \"date\": msg.date,\n \"body\": msg.body,\n })\n msg.close()\n response = {\n \"response\": \"SUCCESS\",\n \"message\": \"File Uploaded!\",\n \"data\": msg_data\n }\n except:\n response = {\n \"response\": \"FAIL\",\n \"message\": \"Erorr in file uploading!\",\n \"data\": msg_data\n }\n return Response(response)", "def get_media(shelter_id=0, section_name=\"\"):\r\n shelter = Shelter.query.filter(Shelter.id==shelter_id).first()\r\n if not shelter:\r\n flash(\"No such shelter\", \"warning\")\r\n return redirect(redirect_url())\r\n\r\n file = request.files.get('mediafile', None)\r\n if file and file.filename == '':\r\n flash('No selected file', 'warning')\r\n return redirect(request.url)\r\n if file and allowed_file(file.filename,\r\n conf.ALLOWED_EXTENSIONS_PICTURE.union(\r\n conf.ALLOWED_EXTENSIONS_DOCUMENT)):\r\n if 'pictures' in request.form:\r\n path = os.path.join(conf.SHELTERS_PICTURES_SERVER_PATH, str(shelter.id))\r\n if 'documents' in request.form:\r\n path = os.path.join(conf.SHELTERS_DOCUMENTS_SERVER_PATH, str(shelter.id))\r\n if not os.path.exists(path):\r\n os.makedirs(path)\r\n filename = secure_filename(file.filename)\r\n file.save(os.path.join(path , filename))\r\n\r\n category_id = request.form['category_id']\r\n\t\t\r\n if category_id:\r\n if 'pictures' in request.form:\r\n new_media = ShelterPicture(file_name=filename,\r\n shelter_id=shelter.id, category_id=category_id)\r\n if 'documents' in request.form:\r\n new_media = ShelterDocument(file_name=filename,\r\n shelter_id=shelter.id, category_id=category_id)\r\n db.session.add(new_media)\r\n db.session.commit()\r\n\r\n return redirect(request.url)", "def get_content_path(section, path=None):\n\n if path and os.path.isdir(path):\n return 'content/' + path\n elif section == 'blog' and os.path.isdir('content/blog'):\n return 'content/blog'\n elif section == 'news' and os.path.isdir('content/news'):\n return 'content/news'\n\n return 'content/' + path", "def LoadPosts(self):\n result = {}\n for userid in self.User_list:\n file_list = set(os.listdir(f\"{self.home_dir}/{userid}\")) - set(['student.txt','img.jpg'])\n file_list = sorted(list(file_list))\n file_des_dict = {}\n for entirepostname in file_list:\n postname = entirepostname.replace(\".txt\",\"\")\n sp = postname.split(\"-\")\n if sp[0] not in file_des_dict.keys():\n file_des_dict[sp[0]] = {'post':None,'comments':[],'reply':{}}\n if len(sp) == 1:\n file_des_dict[sp[0]]['post'] = f\"{self.home_dir}/{userid}/{entirepostname}\"\n elif len(sp) == 2:\n file_des_dict[sp[0]]['comments'].append(sp[1])\n else:\n if sp[1] not in file_des_dict[sp[0]]['reply'].keys():\n file_des_dict[sp[0]]['reply'][sp[1]] = []\n file_des_dict[sp[0]]['reply'][sp[1]].append(sp[2])\n result[userid] = file_des_dict\n return result", "def route(self, media):\n for mr in media:\n for accepted in self.dumpers:\n if mr in accepted:\n self.content_type = mr # TODO handle \"*\" in media range\n return self.dumpers[accepted]\n return (None, None)", "def app_media_path(app_path, media, check_exists=True):\n app_module = import_module(app_path)\n app_dir = os.path.dirname(app_module.__file__)\n app_media = os.path.realpath(os.path.join(app_dir, 'media', media))\n\n if check_exists and not os.path.exists(app_media):\n raise Exception(\"app_media_path: file doesn't exist: %s, %s'\" % (\n app_path,\n media,\n ))\n return app_media", "def get_image_dir(self):\n return self.img_dir", "def get_gallery_path(gallery_name, config={'gallery_dir': 'galleries'}):\n return os.path.join(config['gallery_dir'], gallery_name)", "def _make_media_readable(self, source_dir):\n logger.info(\"Making site_media readable\")\n\n site_media = os.path.join(source_dir, 'site_media')\n with hide(*fab_output_hides):\n sudo('chmod -R o+r %s' % site_media)", "def get_available_galleries(include_default=False):\n galleries = []\n\n for directory in Path(MEDIA_AVATARS).dirs():\n if include_default or directory[-8:] != '_default':\n gallery = {'name': directory.name, 'images': []}\n\n images = directory.files('*.gif')\n images += directory.files('*.jpg')\n images += directory.files('*.jpeg')\n images += directory.files('*.png')\n\n for image in images:\n image_path = image[len(settings.MEDIA_ROOT):]\n if image_path.startswith('/'):\n image_path = image_path[1:]\n gallery['images'].append(image_path)\n\n if gallery['images']:\n galleries.append(gallery)\n\n return galleries", "def folders():\n\n os.makedirs('Images/')\n os.makedirs('Seg/')\n\n return", "def get_destination_path(default, project):\n # Select a shard or use default.\n media_shards = os.getenv('MEDIA_SHARDS')\n if media_shards is None:\n path = default\n else:\n path = f\"/{random.choice(media_shards.split(','))}\"\n # Make sure project path exists.\n project_path = os.path.join(path, str(project))\n os.makedirs(project_path, exist_ok=True)\n return path", "def getDir(*seg):\n path = os.path.join(dataRoot, *seg)\n if not os.path.isdir(path):\n os.makedirs(path)\n return path", "def save(self, *args, **kwargs):\n if not self.slug:\n self.slug = slugify(self.title)\n existing = Project.objects.filter(\n # has some false positives, but almost no false negatives\n slug__startswith=self.slug).order_by('-pk').first()\n if existing:\n self.slug = self.slug + str(existing.pk)\n\n \"\"\"Save images to the Media model\"\"\"\n imagesave(self.description)\n\n super(Project, self).save(*args, **kwargs)", "def prepare_destination(self):\n self.movie_root_path = self.config.share_movie_root_path % (\n self.share_path, self.title)\n\n if os.path.isdir(self.movie_root_path):\n if self.capacity_reached():\n Logger.log(\n '[!] Capacity reached. Skipping adding movie %s.' % self.title)\n else:\n if not os.path.isdir(self.movie_root_path):\n Logger.log('[+] Adding Movie: %s' % self.title)\n os.mkdir(self.movie_root_path)", "def get_yaml_media(self, provider_name):\n return [YamlMedia(f, provider_name) for f in self.datafiles[provider_name]]", "def upload():\r\n\r\n if not os.path.isdir(TO_SEGMENT):\r\n os.mkdir(TO_SEGMENT)\r\n else:\r\n print(\"could not create upload directory: {}\".format(TO_SEGMENT))\r\n print(request.files.getlist(\"file\"))\r\n\r\n for upload in request.files.getlist(\"file\"):\r\n filename = upload.filename\r\n destination = \"/\".join([TO_SEGMENT, filename])\r\n upload.save(destination)\r\n\r\n return redirect(url_for('get_gallery'))", "def upload_to_wordpress(wp, post, pkg):\n from wordpress_xmlrpc.compat import xmlrpc_client\n from wordpress_xmlrpc.methods import media\n\n r = wp.call(media.GetMediaLibrary({'parent_id': post.id, 'mime_type': 'text/csv'}))\n\n extant = {e.title: e for e in r}\n\n for r in pkg.resources():\n url = r.resolved_url\n if r.qualified_name in extant:\n print('👌', r.qualified_name, 'Not uploading; already exist. ')\n r.value = extant[r.qualified_name].link\n\n else:\n\n data = url.fspath.read_bytes()\n\n d = {\n 'name': r.qualified_name,\n 'type': 'text/csv',\n 'bits': xmlrpc_client.Binary(data),\n 'post_id': post.id\n }\n\n prt('Uploading', r.qualified_name)\n\n try:\n rsp = wp.call(media.UploadFile(d))\n prt('✅', r.qualified_name, 'Uploaded to ', rsp['url'])\n r.value = rsp['url']\n except Exception as e:\n err(r.qualified_name, 'Upload failed: ', e, f'len={len(data)}')\n raise", "def get_data_folder():\n data_folder = './MedData/'\n\n if not os.path.isdir(data_folder):\n os.makedirs(data_folder)\n\n return data_folder", "def submission_dir(self):\n submissions_dir = osp.join(self.root, \"submissions\")\n date = '-'.join([\n f'{getattr(datetime.now(), x)}'\n for x in ['year', 'month', 'day']])\n time = '-'.join([\n f'{getattr(datetime.now(), x)}'\n for x in ['hour', 'minute', 'second']])\n submission_name = f'{date}_{time}'\n path = osp.join(submissions_dir, submission_name)\n return path", "def user_directory_path(instance, filename: str) -> str:\n\n # File will be uploaded to MEDIA_ROOT/user_<id>/<filename>\n return 'user_{0}/{1}'.format(instance.profile.user.pk, filename)", "def medias(self):\n ret = {}\n m = self.application_tree['medias']\n for k, v in six.iteritems(m):\n ret[k] = media.Media(k, v)\n return ret", "def walker(self, path=None, base_folder=None):\n path = path or self.path\n base_folder = base_folder or self.base_folder\n # prevent trailing slashes and other inconsistencies on path.\n path = os.path.normpath(upath(path))\n if base_folder:\n base_folder = os.path.normpath(upath(base_folder))\n print(\"The directory structure will be imported in %s\" % (base_folder,))\n if self.verbosity >= 1:\n print(\"Import the folders and files in %s\" % (path,))\n root_folder_name = os.path.basename(path)\n for root, dirs, files in os.walk(path):\n rel_folders = root.partition(path)[2].strip(os.path.sep).split(os.path.sep)\n while '' in rel_folders:\n rel_folders.remove('')\n if base_folder:\n folder_names = base_folder.split('/') + [root_folder_name] + rel_folders\n else:\n folder_names = [root_folder_name] + rel_folders\n folder = self.get_or_create_folder(folder_names)\n for file_obj in files:\n dj_file = DjangoFile(open(os.path.join(root, file_obj)),\n name=file_obj)\n self.import_file(file_obj=dj_file, folder=folder)\n if self.verbosity >= 1:\n print(('folder_created #%s / file_created #%s / ' +\n 'image_created #%s') % (\n self.folder_created, self.file_created,\n self.image_created))", "def giveFileUploadDestinationPath(uploadmodel,filename):\n\n # uploadmodel can be either a ComicSite, meaning a\n # header image or something belonging to a ComicSite is being uploaded, or\n # a ComicSiteModel, meaning it is some inheriting class\n # TODO: This is confused code. Have a single way of handling uploads,\n # lika a small js browser with upload capability.\n\n\n if hasattr(uploadmodel,'short_name'):\n is_comicsite = True\n else:\n is_comicsite = False\n\n if is_comicsite:\n comicsite = uploadmodel\n # Any image uploaded as part of a comcisite is public. These images\n # are only headers and other public things\n permission_lvl = ComicSiteModel.ALL\n else:\n comicsite = uploadmodel.comicsite\n permission_lvl = uploadmodel.permission_lvl\n\n # If permission is ALL, upload this file to the public_html folder\n if permission_lvl == ComicSiteModel.ALL:\n \"\"\"Since we want this procedure only working for a specific Challenge (i.e., LUNA16) we put this flag. Hardcoding name of specific Challenge LUNA16\"\"\"\n\n if str(uploadmodel.comicsite) == \"LUNA16\":\n path = os.path.join(comicsite.public_upload_dir_rel(),\n os.path.join('%s' % uploadmodel.user, '%s_' %(datetime.datetime.now().strftime('%Y%m%d_%H%M%S')) + filename))\n\n else:\n path = os.path.join(comicsite.public_upload_dir_rel(), filename)\n else:\n\n if str(uploadmodel.comicsite) == \"LUNA16\":\n path = os.path.join(comicsite.upload_dir_rel(),\n os.path.join('%s' % uploadmodel.user, '%s_' %(datetime.datetime.now().strftime('%Y%m%d_%H%M%S')) + filename))\n\n else:\n path = os.path.join(comicsite.upload_dir_rel(), filename)\n\n path = path.replace(\"\\\\\",\"/\") # replace remove double slashes because this can mess up django's url system\n return path", "def getAttachDir(request, pagename, create=0):\n if request.page and pagename == request.page.page_name:\n page = request.page # reusing existing page obj is faster\n else:\n page = Page(request, pagename)\n return page.getPagePath(\"attachments\", check_create=create)", "def illustration_directory_path(request, file):\n return directory_path('illustration', file)", "def get_object(self):\n try:\n obj = WorkoutFile.objects.get(\n Q(workout=self.kwargs['workout_id']) & Q(file=f'workouts/{self.kwargs[\"workout_id\"]}/{self.kwargs[\"filename\"]}')\n )\n except:\n raise Http404(\"Media does not exist\")\n\n self.check_object_permissions(self.request, obj)\n\n return obj", "def upload():\n global FILE_NAME\n target = os.path.join(APP_ROOT, \"images\")\n print(target)\n\n if not os.path.isdir(target):\n os.mkdir(target)\n\n for file in request.files.getlist(\"file\"):\n print(file)\n filename = file.filename\n destination = \"/\".join([target, filename])\n FILE_NAME = destination\n file.save(destination)\n return render_template(\"complete.html\")", "def getDir( self ):\n return self.dir" ]
[ "0.6631069", "0.6379418", "0.63036174", "0.61629796", "0.6003821", "0.5945193", "0.58830917", "0.58586687", "0.5771684", "0.5700648", "0.5641766", "0.56303585", "0.55782616", "0.5443347", "0.543865", "0.53991973", "0.5380151", "0.5379563", "0.53412217", "0.5281436", "0.5271819", "0.5268363", "0.525724", "0.52451295", "0.523664", "0.5236475", "0.51891416", "0.51837957", "0.5172882", "0.5165145", "0.51589495", "0.5156361", "0.51493204", "0.51144487", "0.5094255", "0.50295275", "0.50120825", "0.50098616", "0.5003331", "0.49741405", "0.49547434", "0.4947242", "0.49419653", "0.49188843", "0.49151805", "0.4907989", "0.48897615", "0.48882806", "0.48841733", "0.48693815", "0.4861169", "0.4857943", "0.48530543", "0.4845623", "0.48382714", "0.48175946", "0.48171148", "0.48103023", "0.48007515", "0.4793966", "0.47908175", "0.47605893", "0.47357434", "0.47303724", "0.47250694", "0.47192553", "0.46970287", "0.4696323", "0.46942362", "0.46833995", "0.46803933", "0.4674241", "0.466977", "0.4668919", "0.46683162", "0.4654249", "0.4652004", "0.46368316", "0.46351394", "0.4624772", "0.46130732", "0.461257", "0.4610404", "0.46100733", "0.46075672", "0.46062127", "0.46020007", "0.4593941", "0.45907038", "0.45757273", "0.45728663", "0.45594797", "0.4554952", "0.45528448", "0.4546996", "0.45397437", "0.45366117", "0.4528686", "0.4521875", "0.4517513" ]
0.6963208
0
Utility function that creates a dedicated directory for Post media. Arguments =========
def create_news_post_dir(instance, prepend=settings.MEDIA_ROOT): for _ in (post_media_dir,): _path = path.split(_(instance, "", prepend=prepend))[0] try: mkdir(_path) except FileExistsError: pass except FileNotFoundError: if instance.ministry: _ministry = instance.ministry elif instance.campaign: _campaign = instance.campaign _ministry = _campaign.ministry else: e = 'There was an unknown error finding a dir for %s' % instance.name raise AttributeError(e) # NOTE: this is infinitely recursive if `prepend` does not lead to correct directory create_news_post_dir(instance, prepend=prepend)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def post_media_dir(instance, filename, prepend=settings.MEDIA_ROOT):\n if instance.ministry:\n _ministry = instance.ministry\n elif instance.campaign:\n _ministry = instance.campaign.ministry\n else:\n e = 'There was an unknown error finding a dir for %s' % instance.title\n raise AttributeError(e)\n\n return path.join(generic_media_dir(_ministry, prepend=prepend),\n 'post_media', filename)", "def setup_local_storage(mod, media_type, media_id, id=None):\n BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n if mod == 'post':\n mod = 'posts'\n path = os.path.join(BASE_DIR, 'save', mod, str(media_id))\n if id:\n path = os.path.join(BASE_DIR, 'save', mod, str(id))\n name = media_type.lower()\n try:\n os.mkdir(path)\n except FileExistsError as e:\n timestamp = time.strftime('%Y%m%d-%H%M%S')\n name += f\"_{timestamp}\"\n except OSError as e:\n raise InvalidUsage(\"OSError in setup_local_storage. \", status_code=501, payload=e)\n filename = f\"{str(path)}/{name}\"\n return path, filename", "def make_directory(scripts):\n if not os.path.exists(os.path.join(os.path.dirname(__file__), 'Uploads')):\n os.makedirs(os.path.join(os.path.dirname(__file__), 'Uploads'))\n for script_object in scripts:\n if script_object.type is None:\n continue\n path = script_object.type.split('::')\n path = os.path.join(os.path.dirname(__file__), \"/\".join(path[:-1]))\n if not os.path.exists(path):\n os.makedirs(path)", "def mkdir(path):", "def _create_folders(self):\n if not os.path.exists(os.path.join(BASE_DIR, DIR)):\n os.mkdir(os.path.join(BASE_DIR, DIR))\n directory = os.path.join(BASE_DIR, DIR, self.title)\n if not os.path.exists(directory):\n os.mkdir(directory)\n return directory", "def mkdir(self, *args):\n p = self.join(*args)\n error.checked_call(os.mkdir, os.fspath(p))\n return p", "def create_media_path(custom_path=''):\n def generate_path(instance, filename):\n if hasattr((instance), 'name'):\n return os.path.join(\n custom_path,\n instance.name,\n filename\n )\n\n return os.path.join(\n custom_path,\n filename\n )\n\n return generate_path", "def _create_dir(filename):\n head = os.path.dirname(filename)\n if head != '' and not os.path.isdir(head):\n os.makedirs(head)", "def create_directory(tracking_id):\n upload_path = os.path.join(app.config['DRS_UPLOADS'], '{0}'.format(tracking_id))\n if not os.path.isdir(upload_path):\n os.mkdir(upload_path)", "def prepare_url(self, url, **kwargs):\n (self.base_path / url).mkdir(mode=kwargs.get(\"dir_mode\", 0o755), parents=True)", "def create_dir(cls, relpath):\r\n safe_mkdir(os.path.join(cls.build_root, relpath))", "def create_dir(dir_type, base_path):\n\n path = os.path.join(base_path, dir_type)\n if not os.path.exists(path):\n os.mkdir(path)\n print('Created directory {!r}'.format(path))\n else:\n print('Found directory {!r}'.format(path))\n\n\n if dir_type.find('figure') != -1:\n sc.settings.figdir = path\n scv.settings.figdir = path\n\n return path", "def prep_folder(args):\n if(args.save_folder[-1]!='/'):\n args.save_folder += '/'\n if(not os.path.isdir(args.save_folder)):\n os.mkdir(args.save_folder)", "def create_directory():\n try:\n if os.path.isdir(\"./imagesFromTweets\") != True:\n os.makedirs(\"./imagesFromTweets\")\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise", "def mkDir(contentDirPath):\n\tif os.path.isdir(contentDirPath):\n\t\tprint \"Directory %s already exists.\" % contentDirPath\n\t\tclearLocalDir(contentDirPath)\n\t\treturn;\n\telse:\n\t\tos.mkdir(contentDirPath)\n\t\tprint \"Created directory %s.\" % contentDirPath", "def dirmaker(dirp):\n try:\n if not os.path.exists(dirp):\n os.makedirs(dirp)\n except:\n pass", "def create_gallery(jekyll_site_path, gallery_name):\n\n gallery_path = os.path.join(jekyll_site_path, 'images', 'galleries', gallery_name)\n\n if not os.path.exists(gallery_path):\n os.makedirs(gallery_path)\n\n print(f\"Created gallery path {gallery_path}\")\n\n return gallery_path", "def create_project_dir():\r\n with settings(warn_only=True):\r\n run('mkdir -p %s/packages' % (env.path,))\r\n run('mkdir %s/log' % (env.path,))\r\n run('mkdir -p %s/media/uploads' % (env.path,))\r\n run('mkdir -p %s/collected_static' % (env.path,))\r\n # change permissions for writable folder\r\n cmd = env.host_settings.get('make_folder_world_writeable','chown -R www-data:www-data')\r\n if cmd:\r\n run('%s %s/media' % (cmd, env.path))\r\n run('%s %s/collected_static' % (cmd, env.path))", "def directory(self) -> Path:\n (directory := Path(\"markdown\").resolve(strict=False)).mkdir(exist_ok=True, parents=True)\n return directory", "def make_dir(url):\n parts = url.strip('/').split('/')\n done = []\n for part in parts:\n path = os.path.join(STORAGE_PATH, '/'.join(done), part)\n if not os.path.exists(path):\n os.mkdir(path)\n done.append(part)", "def CreateDirs(self):\n# First, create a list of directories.\n dnames = []\n tags = ['', '_m', '_mf']\n for entry in self.info.keys():\n if self.info[entry]['type'] == 'epi':\n for tag in tags:\n fname = self.info[entry].get('imgfile%s' % tag, None)\n if fname is not None:\n dnames.append(os.path.dirname(fname))\n else:\n if self.info[entry].get('outdir',None) is not None:\n dnames.append(self.info[entry]['outdir'])\n\n# Create them if they don't already exist.\n for dname in dnames:\n if not os.path.exists(dname):\n self.MakeDir(dname)\n if self.verbose:\n print 'mkdir %s' % dname", "def create_directory(self):\n dirname = self.name+\"_distillates\"\n i = 1\n while True:\n try:\n mkdir(dirname)\n return dirname\n except OSError:\n dirname = self.name+\"_distillates_{0}\".format(i)\n i += 1", "def create_directory():\r\n\r\n # Create directory for all lyrics\r\n try:\r\n os.mkdir(markovDir)\r\n except FileExistsError:\r\n pass", "def create_files(save_dir, vid_name):\n file_name = vid_name.split('/')[-1].split('.')[0]\n if not os.path.isdir(os.path.join(save_dir, file_name)):\n os.makedirs(os.path.join(save_dir, file_name))\n return file_name", "def prepare_destination(self):\n self.movie_root_path = self.config.share_movie_root_path % (\n self.share_path, self.title)\n\n if os.path.isdir(self.movie_root_path):\n if self.capacity_reached():\n Logger.log(\n '[!] Capacity reached. Skipping adding movie %s.' % self.title)\n else:\n if not os.path.isdir(self.movie_root_path):\n Logger.log('[+] Adding Movie: %s' % self.title)\n os.mkdir(self.movie_root_path)", "def MakeDir(self, path: str) -> None:\n ...", "def createDir(self, dir_name):\n os.mkdir(os.path.join(self.user[\"Save\"], dir_name))", "def create_directory_structure():\n\n def ensure_directory(path):\n try:\n os.makedirs(path)\n\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n ensure_directory('./out/textures')\n ensure_directory('./out/data')", "def make_dir(d):\n if not os.path.exists(d):\n os.makedirs(d)", "def make_dir(d):\n if not os.path.exists(d):\n os.makedirs(d)", "def DefineAndCreateDirectory(targetDirectory):\n \n if not os.path.isdir(targetDirectory):\n os.makedirs(targetDirectory)\n \n # Make sure path ends with separator (//)\n if not targetDirectory.endswith(os.path.sep):\n targetDirectory += os.path.sep\n \n return targetDirectory", "def cmd_mkdir(self, msg_dict):\r\n dir = msg_dict[\"second_parameter\"]\r\n # dir_path = \"/home/%s/%s\" % (msg_dict[\"username\"],dir)\r\n dir_path = msg_dict[\"current_directory\"] + '/' + msg_dict[\"second_parameter\"]\r\n os.system(\"mkdir %s\" % dir_path)\r\n self.request.send(msg_dict[\"current_directory\"].encode())", "def _createDir(self, dirFullPath):\n os.makedirs(dirFullPath)", "def create_directory():\n global dirName\n dirName = 'Downloaded Files'\n global folder_path\n if os.path.isdir(dirName) == True:\n print(\"This folder already exists, path:\", os.path.abspath(dirName))\n else:\n os.mkdir(dirName)\n global folder_path\n folder_path = os.path.abspath(dirName)\n print(\"Directory \" , dirName , \" Created \")", "def create_dirs(self):\n for new_directory in [self.event_dir, self.event_dir / 'videos']:\n new_directory.mkdir(exist_ok=self.overwrite)\n logger.debug('Dir {} created', new_directory)", "def create_directories(path):\n directories = ['images', 'pdf', 'videos', 'audio', 'spreedsheet', 'text', 'scripts', 'docs', 'other']\n for directory in directories:\n create_directory(path, directory)", "def upload_dir(self):\n return os.path.join(settings.MEDIA_ROOT,self.upload_dir_rel())", "def create(self, basedir, outdir, name, prefix=None):", "def create_directory_structure(path_main):\n\n if not path_main.exists():\n path_main.mkdir(parents=True)", "def get_destination_path(default, project):\n # Select a shard or use default.\n media_shards = os.getenv('MEDIA_SHARDS')\n if media_shards is None:\n path = default\n else:\n path = f\"/{random.choice(media_shards.split(','))}\"\n # Make sure project path exists.\n project_path = os.path.join(path, str(project))\n os.makedirs(project_path, exist_ok=True)\n return path", "def safeCreateDir(relPath):\n if not os.path.isdir(relPath):\n os.mkdir(relPath)", "def prepDir(path=None):\n if path:\n if os.path.exists(path):\n return path\n else:\n os.makedirs(path)\n else:\n # Do something innocent when no path is provided\n path = tempfile.mkdtemp(prefix='XEPs_')\n print \"creating {} for output\".format(path)\n return path", "def make_directory(self):\n if not os.path.isdir(self.directory):\n os.mkdir(self.directory)", "def make_mtrack_basedir(mtrack):\n if not BASEDIR_WRITEABLE:\n check_basedir_writeable()\n\n if not os.path.exists(mtrack.audio_path):\n os.mkdir(mtrack.audio_path)\n\n if not os.path.exists(mtrack._stem_dir_path):\n os.mkdir(mtrack._stem_dir_path)\n\n if not os.path.exists(mtrack._raw_dir_path):\n os.mkdir(mtrack._raw_dir_path)\n\n return True", "def create_dir(link_dir):\n if not os.path.exists(link_dir):\n os.makedirs(link_dir)", "def createFolder(self):\n self.destination = self.getPath() #Find the destination to create the folder\n try:\n os.makedirs(self.destination) #Try and make a folder\n except FileExistsError:\n pass #Otherwise continue if an error is encountered because the file exists already", "def make_directories():\n os.mkdir('principal_wings')\n os.mkdir('random_wings')", "def make_directory(base):\n \t\n i = 0\n while 1:\n try:\n if i == 0:\n dirname = base\n else:\n dirname = base + '_' + str(i)\n os.mkdir(dirname)\n break\n except OSError:\n if not os.path.isdir(dirname):\n raise\n i += 1\n pass \n return dirname", "def fresh_media_root(**kwargs):\n with TemporaryDirectory(**kwargs) as media_root:\n with override_settings(MEDIA_ROOT=media_root):\n yield", "def _create_dir(self, stream_name:str=None, version:int=None, user_id:str=None):\n storage_path = self._get_storage_path(stream_name=stream_name, version=version, user_id=user_id)\n if self.nosql_store == \"hdfs\":\n if not self.fs.exists(storage_path):\n self.fs.mkdir(storage_path)\n return storage_path\n elif self.nosql_store==\"filesystem\":\n if not os.path.exists(storage_path):\n self.fs.makedirs(storage_path)\n return storage_path\n return None", "def create_dir(dir_path,plot_type):\n\n alphabet = \"ABCDEFGHIJKLMNOPQRSTUZWXYZ\"\n alphabet += alphabet.lower()\n alphabet += \"01234567890\"\n\n\n if dir_path==None or dir_path=='':\n dir_path=''\n random_dir_name=''.join([choice(alphabet) for i in range(10)])\n dir_path ='./'+plot_type+strftime(\"%Y_%m_%d_%H_%M_%S\")+random_dir_name+'/'\n\n if not os.path.exists(dir_path):\n os.mkdir(dir_path)\n\n return dir_path", "def create_content_folder(output_path):\n config['Instance'] = {}\n config['Instance']['content_dir'] = output_path\n\n try:\n os.makedirs(output_path)\n except FileExistsError:\n pass", "def handle_directory(directory_path):\n # if directory has no trailing '/' then add it\n if directory_path[-1] != '/':\n directory_path += '/'\n # if directory doesn't exist then create it\n if not os.path.exists(directory_path):\n os.mkdir(directory_path)\n\n return directory_path", "def CreateDirectory(dir):\n if not os.path.exists(dir):\n os.makedirs(dir, 0777)", "def mkdir(self, path):\n try:\n postdata = codecs.encode(json.dumps({ 'dir': path }), 'utf-8')\n self._urlopen('/api/fileops/mkdir', postdata).read()\n except HTTPError as err:\n raise RuntimeError(\"Unable to create '{}'\".format(path))", "def mkd(self,dirname):\n try:\n self.ftp.mkd(dirname)\n except:\n print('Error creating remote folder:%s'%dirname)\n return 1\n\n return 0", "def make_path(self):\n folders = [\n f\"{self.save_path}{self.name}/json/\",\n f\"{self.save_path}{self.name}/images/\",\n ]\n if hasattr(self, \"masks\"):\n folders.append(f\"{self.save_path}{self.name}/masks/\")\n for folder in folders:\n if not os.path.exists(folder):\n os.makedirs(folder)", "def make_dir(dir_path):\n if os.path.isdir(dir_path) == False:\n os.mkdir(dir_path)", "def create_dir(self, domain):\n domain_dir = self.get_domaindir(domain)\n if not os.path.exists(domain_dir):\n os.makedirs(domain_dir)\n\n for dir in DOMAIN_DIRS:\n dir_ = domain_dir + \"/\" + dir\n if not os.path.exists(dir_):\n os.makedirs(dir_)", "def create_directory() -> None:\n slash_indexes = []\n for x in range(0, len(directory)):\n if directory[x] == \"/\" or directory[x] == \"\\\\\":\n slash_indexes.append(x)\n \n directories_to_create = []\n for x in range(0, len(slash_indexes)):\n if x == len(slash_indexes)-1:\n if os.path.isdir(directory[0:len(directory)]):\n existing_directory = directory[0:len(directory)]\n else:\n directories_to_create.append(directory[0:len(directory)])\n\n else: \n if os.path.isdir(directory[0:slash_indexes[x+1]]):\n existing_directory = directory[0:slash_indexes[x+1]]\n else:\n directories_to_create.append(directory[0:slash_indexes[x+1]])\n\n for _dir in directories_to_create:\n os.mkdir(_dir)", "def create_dir_for_file(f_path):\n d = os.path.dirname(f_path)\n if d and not os.path.exists(d):\n os.makedirs(d)", "def external_storage():\r\n\r\n folder_path = os.path.join('/media/pi/', args.storage_name, args.directory_name)\r\n if not os.path.exists(folder_path):\r\n os.makedirs(folder_path)\r\n return folder_path", "def make_experiment_directory(path='',config=None,default_dir='_runs'):\n directory = path\n if not path:\n timestamp = datetime.now().strftime('%Y-%m-%dT%H-%M-%S-%f')\n directory = os.path.join(default_dir,timestamp)\n directory = os.path.abspath(directory) \n if os.path.isdir(directory) and not config.override and not config.cloud:\n raise ValueError(\n 'directory already exists, use --override option: %s'\n % directory)\n elif os.path.isdir(directory) and not config.cloud: \n rmtree(directory)\n if not config.cloud: \n os.makedirs(directory)\n if config:\n config.wdir = directory \n return directory", "def mkdir(base, name):\n path = os.path.join(base, name)\n if not os.path.exists(path):\n os.makedirs(path)\n return path", "def mkdir(self, *args):\r\n if args:\r\n return self.join(*args).mkdir()\r\n else:\r\n self._svn('mkdir')\r\n return self", "def create_dir(_dir):\n if not os.path.exists(_dir):\n os.makedirs(_dir)", "def dirCreate(newFoldername):\r\n current_directory = os.getcwd()\r\n new_directory = os.path.join(current_directory,newFoldername)\r\n \r\n if not os.path.exists(new_directory):\r\n os.makedirs(new_directory)\r\n return new_directory", "def create_directory(dirname):\n\n if dirname and not os.path.exists(dirname):\n os.makedirs(dirname)", "def make_directories(self):\n os.makedirs(self.data_dir, exist_ok=True)\n os.makedirs(self.patches_dir, exist_ok=True)\n os.makedirs(self.raw_image_dir, exist_ok=True)\n os.makedirs(self.pro_image_dir, exist_ok=True)\n os.makedirs(self.results_dir, exist_ok=True)", "def write_upload_files(self, appstruct):\n \n # Create the directory if it does not exist\n final_dir = \"thumbnails/%s\" % slugify(appstruct[\"serial\"])\n if not os.path.exists(final_dir):\n log.info(\"Make directory: %s\", final_dir)\n os.makedirs(final_dir)\n\n final_file = \"%s/uploaded.pdf\" % final_dir\n file_pointer = appstruct[\"pdf_upload\"][\"fp\"]\n self.single_file_write(file_pointer, final_file)", "def create_directory(Name=None, ShortName=None, Password=None, Description=None, Size=None, VpcSettings=None):\n pass", "def _make_dirs(filepath, mode):\n parent = filepath.parent\n if \"w\" in mode and parent:\n os.makedirs(parent, exist_ok=True)", "def mkdir(dirName):\r\n raw = 'mockaroo_data/raw/' + dirName\r\n cln = 'mockaroo_data/cln/' + dirName\r\n call(['mkdir', raw])\r\n call(['mkdir', cln])\r\n\r\n return", "def createShotDirs(shotFolder, shotName, *args):\n cFuncs.createShotDirectories(shotFolder, shotName)", "def make_dir(name='results'):\n if os.path.isabs(name):\n output_path = name\n else:\n output_path = os.path.join(os.getcwd(), name)\n\n if ('.' not in output_path):\n directory = os.path.dirname(os.path.join(output_path, 'toto')) # doesn't work w/o 'toto'\n else :\n directory = os.path.dirname(output_path);\n\n try:\n os.makedirs(directory)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n return output_path", "def createDirectories(self):\n # -- LOG\n thepath = os.path.dirname(self.settings.logfile)\n distutils.dir_util.mkpath(thepath)\n\n # -- SESSION \n thepath = self.settings.sessionpath\n distutils.dir_util.mkpath(thepath)\n\n # -- DATABASE\n thepath = self.settings.dbpath\n distutils.dir_util.mkpath(thepath)", "def create_dir(dir_path):\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)", "def create_folder(self):\n self.config.csv_path.mkdir(parents=True, exist_ok=True)\n self.config.images_path.mkdir(parents=True, exist_ok=True)", "def mkdir(self, remotefolder):\n raise NotImplementedError(\"Implement this method in child class\")", "def prepare_folder(path):\n if not os.path.isdir(path):\n os.makedirs(path)", "def get_media_directory():\n\treturn _paths[_MEDIA_DIRECTORY_KEY]", "def create_directory(self, directory: str) -> Dict:\n raise NotImplementedError", "def create_directories(self, app_label):\n for folder_name in [\"views\", \"urls\", \"templates/%s\" % app_label]:\n directory_path = \"%s/%s\" % (app_label, folder_name)\n if not os.path.exists(directory_path):\n os.makedirs(directory_path)", "def makeDirs(self, inPath):\n\n if not os.path.exists(inPath): os.mkdir(inPath)", "def _make_media_path(self, media_id, host, username=None, width=None, height=None, crop=None):\n\t\ttry:\n\t\t\tmedia_id = validation.media_id(media_id)\n\t\t\tvalidation.required(host, 'host')\n\t\t\tif username:\n\t\t\t\tusername = validation.username(username, 'username')\n\t\t\tif width:\n\t\t\t\twidth = validation.cast_integer(width, 'width')\n\t\t\t\theight = validation.cast_integer(height, 'height')\n\t\t\t\tcrop = validation.cast_boolean(crop, 'crop')\n\t\texcept errors.ValidationError, ex:\n\t\t\treturn utils.return_deferred_error(ex.value)\n\n\t\t@stack\n\t\tdef make_path(void):\n\t\t\tx = media_id[0]\n\t\t\tyz = media_id[1:3]\n\t\t\tmedia_path = \"%s/%s/%s/%s\" % (self._cfg_media_root_dir, host, x, yz)\n\t\t\tif width:\n\t\t\t\tmedia_path += \"/renders/%sx%sx%s\" % (width, height, int(crop))\n\t\n\t\t\tif username:\n\t\t\t\tmedia_path += \"/%s\" % username\n\t\n\t\t\tmedia_path += \"/%s\" % media_id\n\t\n\t\t\treturn media_path\n\n\t\td = Deferred()\n\t\td.addCallback(make_path)\n\t\td.addCallback(lambda _: (0, _))\n\t\td.addErrback(lambda _: (-1, _.getErrorMessage()))\n\t\td.callback(0)\n\t\treturn d", "def album_directory_path(instance, filename):\n return 'gallery/album/{0}/{1}'.format(instance.album_name, filename)", "def make_directory(self,directory):\n\t\tif not os.path.exists(directory):\n\t\t\ttry:\n\t\t\t\tos.makedirs(directory)\n\t\t\t\treturn directory\n\t\t\texcept():\n\t\t\t\tprint('Could not create directory ', directory)\n\t\t\t\treturn None\n\t\telse:\n\t\t\treturn directory", "def make_source_dir():\n\n os.makedirs(files['source_dir'].rel)", "def make_gallery(post_name, image_list, config={'gallery_dir': 'galleries'}):\n gallery_name = make_gallery_name_from_post_name(post_name)\n gallery_path = get_gallery_path(gallery_name)\n output_path = os.path.join(gallery_path, \"index.md\")\n with open(output_path, \"w\") as fd:\n fd.write(make_gallery_index(gallery_name, image_list))\n\n copy_images(gallery_path, image_list)\n #make_thumbs\n #make_image_pages", "def createDirectory(self, summary_handle,directory,mode,role =\"\",summary_var_dict={}):\n if role:\n directory = directory + \"/\" + role\n \n tmp_var = \"mkdir -p %s%s%s\" %(directory,self,role)\n if mode == \"RECOVERY\":\n flag = self.check_var_in_dict(tmp_var,summary_var_dict)\n if flag == \"true\":\n return\n\n self.pushMode(CLI_MODES.shell)\n if role:\n self.removePath(directory)\n\n logger.info (\"Directory is %s\" %directory)\n output = self.sendCmd(\"mkdir -p %s\" % directory)\n status = self.command_execution_status()\n if status == \"true\":\n summary_handle.write(\"mkdir -p %s,%s,%s,pass \\n\" %(directory,self,role))\n else:\n summary_handle.write(\"mkdir -p %s,%s,%s,fail \\n\" %(directory,self,role)) \n\n self.popMode()\n return output", "def _prepare_dst_dir(self, dst, src=None, perm=None, **kwargs):\n if self.isdir(dst):\n full_dst = os.path.join(dst, os.path.basename(src)) if src else dst\n\n elif self.isfile(dst):\n full_dst = dst\n\n else:\n # interpret dst as a file name, create missing dirs\n dst_dir = self.dirname(dst)\n if dst_dir and self.create_file_dir and not self.isdir(dst_dir):\n self.mkdir(dst_dir, perm=perm, recursive=True)\n full_dst = dst\n\n return full_dst", "def fs_mkdir(self, dirname: str) -> None:\n self.exec_(\"import uos\\nuos.mkdir('%s')\" % dirname)", "def _prepare_directory(destination, connector_id):\n\n # Use the destination directory when provided\n if destination:\n if not os.path.exists(destination):\n # Create all sub-directories\n os.makedirs(destination)\n # Create a sub-directory in the current directory\n # when a destination isn't provided\n else:\n if not os.path.isdir(connector_id):\n os.mkdir(connector_id)\n destination = connector_id\n\n if os.path.isdir(destination):\n os.chdir(destination)\n else:\n error = 'Couldn\\'t download to the desination directory {}.'\n raise CLIError(error.format(destination))\n\n return os.getcwd()", "def mkdir_needed(d):\n dirs=[d['outdir']]\n dirs.append( get_sample_dir(d['outdir'],d['obj']) )\n for dr in dirs:\n if not os.path.exists(dr):\n os.makedirs(dr)", "def video_directory_path(instance, filename):\n return 'gallery/video/{0}/{1}'.format(instance.video_name, filename)", "def cmd_mkd(args):", "def createdatafolder(name):\n folder = os.path.join(pathtofolder(),name)\n os.makedirs(folder)\n pass", "def create_directory_for_specialist(spec_name):\n os.makedirs(root_location + \"specialists/\" + model_name(spec_name) + \"/\", exist_ok = True)", "def create_directory(self, directory):\n mgm, directory = self._safe_split_mgm(directory)\n logger.warning('Creating directory on SE: {0}'.format(self._join_mgm_lfn(mgm, directory)))\n cmd = [ 'xrdfs', mgm, 'mkdir', '-p', directory ]\n svj.core.utils.run_command(cmd)", "def _UploadFile(self, media_source, title, category):\n media_entry = gdata.GDataEntry()\n media_entry.title = atom.Title(text=title)\n media_entry.category.append(category)\n media_entry = self.Post(media_entry, '/feeds/documents/private/full',\n media_source = media_source,\n extra_headers = {'Slug' : media_source.file_name })\n\n return media_entry" ]
[ "0.7129349", "0.6599995", "0.65848666", "0.6278641", "0.6270782", "0.62637275", "0.621008", "0.6079701", "0.60781497", "0.6029266", "0.6018568", "0.6013729", "0.5994422", "0.5988125", "0.5982215", "0.59778285", "0.5950593", "0.59488165", "0.59399545", "0.59338474", "0.5907977", "0.58873296", "0.5882072", "0.58646166", "0.5859473", "0.58408856", "0.58262074", "0.5810219", "0.5789764", "0.5789764", "0.5788908", "0.5782401", "0.57809186", "0.5770615", "0.5765893", "0.575373", "0.57393867", "0.5731182", "0.5722723", "0.5721178", "0.571974", "0.570711", "0.57017773", "0.569901", "0.56806976", "0.5676483", "0.5669248", "0.56661516", "0.5658683", "0.56479055", "0.56423175", "0.56398684", "0.5639493", "0.5627241", "0.5623383", "0.5610224", "0.560657", "0.5605446", "0.55874693", "0.5585878", "0.55829203", "0.55755067", "0.55750597", "0.55678236", "0.5557937", "0.5548448", "0.55470014", "0.5546248", "0.55230904", "0.55113775", "0.5509401", "0.5506635", "0.5503918", "0.5499548", "0.5495557", "0.5494846", "0.549423", "0.5493004", "0.54874617", "0.54795223", "0.54734105", "0.54733497", "0.5472251", "0.5468412", "0.5468144", "0.5467742", "0.54588777", "0.5443491", "0.5437102", "0.54350924", "0.54340935", "0.5427602", "0.5424041", "0.54238147", "0.54235166", "0.54203343", "0.5419905", "0.5415662", "0.54148614", "0.5414697" ]
0.73629373
0
Roundtrip to check what we serialise is what we get back.
def test_serialises_and_deserialises_hs00_message_correctly_for_full_1d_data(self): original_hist = { "source": "some_source", "timestamp": 123456, "current_shape": [5], "dim_metadata": [ { "length": 5, "unit": "m", "label": "some_label", "bin_boundaries": np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0]), } ], "last_metadata_timestamp": 123456, "data": np.array([1.0, 2.0, 3.0, 4.0, 5.0]), "errors": np.array([5.0, 4.0, 3.0, 2.0, 1.0]), "info": "info_string", } buf = serialise_hs00(original_hist) hist = deserialise_hs00(buf) assert hist["source"] == original_hist["source"] assert hist["timestamp"] == original_hist["timestamp"] assert hist["current_shape"] == original_hist["current_shape"] self._check_metadata_for_one_dimension( hist["dim_metadata"][0], original_hist["dim_metadata"][0] ) assert np.array_equal(hist["data"], original_hist["data"]) assert np.array_equal(hist["errors"], original_hist["errors"]) assert hist["info"] == original_hist["info"] assert ( hist["last_metadata_timestamp"] == original_hist["last_metadata_timestamp"] )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_dumps(self):\n result = self.mapper.dumps(self.serialization)\n self.mapper.to_dict.assert_called_once_with(\"custom\")\n self.serialization.assert_called_once_with(\n self.mapper.to_dict.return_value\n )\n self.assertIs(result, self.serialization.return_value)", "def _serialise(self):\n # TODO (M Foley)\n pass", "def test_return_type(self):\n self.assertEqual(type(self.obj.to_json_string(self.valid_ld)), str)", "def test_dump_single(self):\n result = self.serializer.dump(self.schema_to_serialize)\n self.assertIsInstance(result, dict)", "def revert(self):\n if hasattr(self, '_init_data'):\n self.deserialize(self._init_data)\n return True\n return False", "def test_serialize(self):\n self.assert_raises(TypeError, self.instance.serialize, (1,))", "def test_serialize_no_metadata(self):\n pass # pragma: no cover", "def isJWS_unserialized_single(x):\n if isinstance(x, dict) \\\n and \"payload\" in x and \"signature\" in x \\\n and (\"protected\" in x or \"unprotected\" in x):\n try: \n if \"protected\" in x: \n json.loads(x[\"protected\"])\n return True\n except:\n return False\n else: \n return False", "def roundtrip(data):\r\n body = xmlrpclib.dumps(data)\r\n result = xmlrpclib.loads(body)[0]\r\n if result != data:\r\n print result", "def test_serialize(value, expected):\n assert json_dumps(value) == expected", "def assertDeserializeNonString(self):\r\n self.assertDeserializeEqual(None, None)\r\n self.assertDeserializeEqual(3.14, 3.14)\r\n self.assertDeserializeEqual(True, True)\r\n self.assertDeserializeEqual([10], [10])\r\n self.assertDeserializeEqual({}, {})\r\n self.assertDeserializeEqual([], [])\r\n self.assertDeserializeEqual(None, 'null')", "def test_serialize(self):\n self.assertEqual(self.scrapes.serialize(), [\n {'content': [u'First para', u'Second para'], 'title': u'First article', 'authors': [{'lastname': u'Swain', 'firstname': u'Matt'}]},\n {'content': [u'Para 1', u'Para 2'], 'title': u'Second article', 'authors': [{'lastname': u'Smith', 'firstname': u'John'}]},\n {'content': [u'Thing one', u'Thing two'], 'title': u'Third article', 'authors': [{'lastname': u'Bond', 'firstname': u'James'}]}\n ])", "def test_serialize(self):\n self.assertEqual(self.scrapes.serialize(), [\n {'content': [u'First para', u'Second para'], 'title': u'First article', 'authors': [{'lastname': u'Swain', 'firstname': u'Matt'}]},\n {'content': [u'Para 1', u'Para 2'], 'title': u'Second article', 'authors': [{'lastname': u'Smith', 'firstname': u'John'}]},\n {'content': [u'Thing one', u'Thing two'], 'title': u'Third article', 'authors': [{'lastname': u'Bond', 'firstname': u'James'}]}\n ])", "def test_serialize(self):\n r = self.RNA(\"ugagg\")\n assert dumps(r)", "def is_sedes(obj):\n return hasattr(obj, 'serialize') and hasattr(obj, 'deserialize')", "def isJWP_unserialized(x):\n if isinstance(x, dict) \\\n and \"payload\" in x and isinstance(x[\"unprotected\"], dict)\\\n and \"unprotected\" in x and \"signature\" not in x:\n return True\n else:\n return False", "def isJWE_unserialized(x):\n return isJWE_unserialized_single(x) or isJWE_unserialized_multi(x)", "def test_serialize_sinfo(self):\n self.assert_raises(RuntimeError, self.instance.serialize,\n self.testing_options['objects'][0],\n add_serializer_info=True)", "def isJWS_unserialized(x):\n return isJWS_unserialized_single(x) or isJWS_unserialized_multi(x)", "def test_dumps(self):\n data = \"something\"\n result = self.mapper.loads(self.deser_fn, data)\n self.mapper.from_dict.assert_called_once_with(\n self.deser_fn.return_value, \"custom\"\n )\n self.deser_fn.assert_called_once_with(data)\n self.assertIs(result, self.mapper.from_dict.return_value)", "def is_stringified(self) -> bool:\n return self._stringify", "def isJWE_unserialized_single(x):\n if isinstance(x, dict) \\\n and (\"unprotected\" in x or \"protected\" in x) \\\n and (\"ciphertext\" in x):\n try:\n if \"protected\" in x:\n json.loads(x[\"protected\"])\n return True\n except:\n return False\n else:\n return False", "def test_encode(self):\n result = json.loads(self.cls.objects.to_json())\n for index, item in enumerate(result):\n self.assertNotIn(\n \"to_json_exclude\", item,\n (\"to_json_exclude found at index {}\").format(index)\n )\n self.assertNotIn(\n \"json_exclude\", item,\n (\"json_exclude found at index {}\").format(index)\n )\n self.assertIn(\n \"from_json_exclude\", item,\n (\"from_json_exclude not found at index {}\").format(index)\n )\n self.assertIn(\n \"required\", item,\n (\"required not found at index {}\").format(index)\n )", "def _post_deserialize (self):\n pass", "def test_serialize_none(self):\n self.assertEqual(serialize(None), 'null')", "def isJOSE_unserialized(x):\n return isJWS_unserialized(x) or isJWE_unserialized(x) \\\n or isJWP_unserialized(x)", "def test_08(self):\n ret = Base.to_json_string(None)\n self.assertEqual(ret, \"[]\")", "def assertSerializeDeserialize(self, obj, version=None):\n tested_versions = [version] if version is not None else Version.supported\n for version in tested_versions:\n constructor = obj.__class__.from_json\n json_obj = obj.to_json(version)\n clone = constructor(json_obj)\n\n self.assertEqual(obj.__class__, clone.__class__)\n\n if isinstance(obj, dict):\n orig_dict = obj\n clone_dict = clone\n else:\n orig_dict = obj.__dict__\n clone_dict = clone.__dict__\n\n self.assertEqual(orig_dict, clone_dict)", "def test_quantitative_filter_serialization(self):\n result = serializers.FilterSerializer(self.internal_filter).data\n self.assertDictEqual(result, self.external_filter)", "def is_jsonable(self, input):\n try:\n json.dumps(input)\n return True\n except (TypeError, OverflowError):\n return False", "def test_encode_decode(self):\n assert self._test == pybinn.loads(pybinn.dumps(self._test))", "def test_valid_ld(self):\n self.assertEqual(self.obj.to_json_string(self.valid_ld),\n json.dumps(self.valid_ld))", "def assertDeserializeEqual(self, expected, arg):\r\n assert_equals(expected, deserialize_field(self.test_field(), arg))", "def test_dumps(self):\n schema = self.UnitTestingSchema()\n serializer = JSONSchema()\n self.assertIsInstance(serializer.dumps(schema), str)", "def test_serialization_deserialization(self):\n\n original_time = now()\n serialized_time = DatetimeMapper.forward(original_time)\n assert serialized_time == original_time.strftime(\"%Y-%m-%dT%H:%M:%S.%f\")\n\n deserialized_time = DatetimeMapper.backward(serialized_time)\n assert original_time == deserialized_time\n\n deserialized_time = DatetimeMapper.backward(None)\n assert isinstance(deserialized_time, datetime)", "def _TestReadSerialized(self, serializer_object, json_dict):\n # We use json.dumps to make sure the dict does not serialize into\n # an invalid JSON string such as one that contains Python string prefixes\n # like b'' or u''.\n json_string = json.dumps(json_dict)\n unserialized_object = serializer_object.ReadSerialized(json_string)\n\n self.assertIsNotNone(unserialized_object)\n return unserialized_object", "def test_non_list_of_dicts_arg(self):\n self.assertEqual(self.obj.to_json_string(666), '666')", "def test_deserialize(self):\r\n\r\n # test that from_json produces no exceptions\r\n self.assertDeserializeEqual('10:20:30', '\"10:20:30\"')", "def check_round_trip(data: dict, logger: Logger) -> Optional[bytes]:\n try:\n as_json_text = json.dumps(data, default=encode_value).encode(\"utf-8\")\n except Exception as e:\n report_error(\"CumulusCI found an unusual datatype in your config:\", e, logger)\n return None\n try:\n test_load = load_config_from_json_or_pickle(as_json_text)\n assert _simplify_config(test_load) == _simplify_config(\n data\n ), f\"JSON did not round-trip-cleanly {test_load}, {data}\"\n except Exception as e: # pragma: no cover\n report_error(\"CumulusCI found a problem saving your config:\", e, logger)\n return None\n assert isinstance(as_json_text, bytes)\n return as_json_text", "def is_raw(self):\n return not self.has_structure", "def parse_round_trip(self):\n parsed = self.test_proto.parse()\n round_trip = avro.protocol.parse(str(parsed))\n self.assertEqual(parsed, round_trip)", "def test_serialize_and_deserialize_returns_unchanged_collection(\n self\n ) -> None:\n self.assertEqual(\n self.collection.to_dict(),\n collection_domain.Collection.deserialize(\n self.collection.serialize()).to_dict())", "def test_serialize(self, val):\n val_orig = FitVal(*val)\n\n ser = json.dumps(val_orig, cls=ExperimentEncoder)\n val_deser = json.loads(ser, cls=ExperimentDecoder)\n\n self.assertEqual(val_orig, val_deser)", "def recvcheck(self):\n\n data = self.recv()\n if self.serializer == 'json' and data and isinstance(data, dict):\n if '@error' in data:\n exception = getattr(builtins, data['@error'])\n if (isinstance(exception, type) and\n issubclass(exception, Exception)):\n raise exception(data['@message'])\n else:\n if isinstance(data, Exception):\n raise data\n return data", "def test_encoding_round_trip(cell):\n orig = copy.copy(cell.__dict__)\n cell._from_serializeable_dict(cell._to_serializeable_dict())\n round_trip = cell.__dict__\n for key in cell._allowed:\n if type(orig[key]) == np.ndarray or type(orig[key]) == list:\n assert all(orig[key] == round_trip[key])\n else:\n assert orig[key] == round_trip[key]", "def test_json(self):\n\t\tdecoded = json.loads(json.dumps(self.filter.to_js_obj()))\n\t\tself.assertIsNotNone(decoded, \"JSON conversion failed!\")\n\t\tself.assertEqual(self.filter.to_js_obj(), decoded, \"JSON conversion mismatch!\")", "def test_to_json(self):\n self.amenity_json = self.amenity.to_json()\n actual = 1\n try:\n serialized = json.dumps(self.amenity_json)\n except:\n actual = 0\n self.assertTrue(1 == actual)", "def serialize(self):\n pass", "def serialize(self, obj):\n pass", "def test_serialize_deserialize1(self):\n for command in Command:\n serialized = command.serialize()\n deserialized = Command.deserialize(serialized)\n self.assertTrue(deserialized is command)", "def convertAndAssertJSONEqual(self, data, expected_data, msg=None):\n\n super(SimpleTestCase, self).assertJSONEqual(json.dumps(data, cls=DjangoJSONEncoder), expected_data, msg)", "def test_repr(self, cls):\n inst = cls()\n # Exact values aren't a concern so long as neither direction\n # raises an exception.\n pkl = cloudpickle.dumps(inst)\n cloudpickle.loads(pkl)", "def test_can_deserialize_plain_object(self):\n handler = BaseRestHandler(mock.MagicMock(), mock.MagicMock())\n handler._write_buffer = []\n obj = SerializeMe()\n obj.key = \"value\"\n handler.write_object(obj)\n res = json.loads(handler._write_buffer[0])\n self.assertDictEqual(res, {\"key\": \"value\"})", "def jucify(self):\n to_ret = False\n # TODO: finish this\n return to_ret", "def can_be_pickled(x):\n try:\n s = BytesIO() \n pickle.dump(x, s) \n return True\n except:\n return False", "def test_serialization(self):\n serialized = self.Gs.as_dict()\n unserialized = BayesianNetwork.from_dict(serialized)\n\n self.assertDictEqual(serialized, unserialized.as_dict())", "def __getstate__(self):\n raise IOError(\"You tried to serialize something that should not\"\n \" be serialized.\")", "def is_picklable(obj):\n try:\n pickle.dumps(obj)\n\n except pickle.PicklingError:\n return False\n return True", "def test_time_filter_serialization(self):\n result = serializers.FilterSerializer(self.internal_filter).data\n self.assertDictEqual(result, self.external_filter)", "def serialize(self, data):", "def is_text_serializer(serializer):\n return isinstance(serializer.dumps({}), text_type)", "def test_return_value(self):\n self.assertEqual(self.r0.from_json_string(self.string), self.d)", "def test_quantitative_filter_deserialization(self):\n serializer = serializers.FilterSerializer(data=self.external_filter)\n self.assertTrue(serializer.is_valid())\n self.assertDictEqual(serializer.validated_data, self.internal_filter)", "def test_pickle_serialization(self, molecule):\n serialized = pickle.dumps(molecule)\n molecule_copy = pickle.loads(serialized)\n assert molecule == molecule_copy", "def serialize(self, data):\n if isinstance(data, str):\n return data\n\n if hasattr(data, \"read\"):\n return data.read()\n\n raise ValueError(\"Unable to handle input format: %s\" % type(data))", "def converted(self):\n if self.converters:\n return self.converters.converted(self)\n return False", "def _check_primitive(self) -> PossibleResult[T]:\n if self.constructor in _PRIMITIVES:\n if self.obj is UNDEFINED:\n raise DeserializeError(\n self.constructor, self.obj, self.new_depth, self.key\n )\n if self.obj is None:\n raise DeserializeError(\n self.constructor, self.obj, self.new_depth, self.key\n )\n if not isinstance(self.obj, self.constructor):\n if not self.convert_primitives:\n raise DeserializeError(\n self.constructor, self.obj, self.new_depth, self.key\n )\n try:\n return self.constructor(self.obj) # type: ignore\n except (ValueError, TypeError) as error:\n raise DeserializeError(\n self.constructor, self.obj, self.new_depth, self.key\n ) from error\n return self.obj\n return NO_RESULT", "def testRoundtrip(self):\n key = createKey()\n data = {u'user': u'aliafshar', u'id': u'91821212'}\n token = dataToToken(key, data)\n self.assertEqual(data, tokenToData(key, token))", "def test_roundtrip(self):\n self.read_container = self.roundtripContainer()\n self.assertIsNotNone(str(self.container)) # added as a test to make sure printing works\n self.assertIsNotNone(str(self.read_container))\n # make sure we get a completely new object\n self.assertNotEqual(id(self.container), id(self.read_container))\n self.assertIs(self.read_nwbfile.objects[self.container.object_id], self.read_container)\n self.assertContainerEqual(self.read_container, self.container)", "def is_serializable(instance_or_class: Any) -> bool:\n return hasattr(instance_or_class, SERDE_SCOPE)", "def _deserialiseLightweight(self,unpickled):\n if self.sampleid != unpickled['sampleId']:\n raise RuntimeError('sampleids do not match: '+self.sampleid+' '+unpickled['sampleId'])\n if self.condition != unpickled['condition']:\n raise RuntimeError('conditions do not match: '+self.condition+' '+unpickled['condition'])\n if self.wellids != unpickled['wellIds']:\n raise RuntimeError('wellids do not match: '+self.wellids+' '+unpickled['wellIds'])\n if self._wellIndices != unpickled['wellIndices']:\n raise RuntimeError('wellIndices do not match: '+self._wellIndices+' '+unpickled['wellIndices'])\n self._activeWellIndices=unpickled['activeWellIndices']", "def test_serialize(state):\n assert len(state.players) == 2\n st_data = state.to_data()\n\n assert st_data, \"Expect that we would have some data!\"\n assert len(st_data[\"deck\"]) == 52\n assert len(st_data[\"discarded\"]) == 0\n # Render player subset properly\n assert len(st_data[\"players\"]) == 2\n assert len(st_data[\"players\"][0][\"hand\"]) == 0\n\n new_state = MockState.from_data(st_data)\n assert new_state.__class__ == MockState\n st_data_new = new_state.to_data()\n\n assert st_data == st_data_new", "def test_serialize(self):\n self.assertEqual(self.blogs.serialize(), [\n {'content': [u'First para', u'Second para'], 'title': u'First article'},\n {'content': [u'Para 1', u'Para 2'], 'title': u'Second article'},\n {'content': [u'Thing one', u'Thing two'], 'title': u'Third article'}\n ])", "def test_pickle_serialization(self, molecule):\n serialized = pickle.dumps(molecule)\n molecule_copy = pickle.loads(serialized)\n assert molecule == molecule_copy\n assert molecule_copy.n_conformers == molecule.n_conformers\n assert np.allclose(molecule_copy.conformers[0], molecule.conformers[0])", "def _to_serialize(value):\n return value.serialize() if value is not None else None", "def serialize(self):", "def dump_payload(self, obj):\n return want_bytes(self.serializer.dumps(obj, **self.serializer_kwargs))", "def test_dict_serialization(self, molecule):\n serialized = molecule.to_dict()\n molecule_copy = Molecule.from_dict(serialized)\n assert molecule == molecule_copy", "def test_json_serialization(self, molecule):\n molecule_copy = Molecule.from_json(molecule.to_json())\n assert molecule_copy == molecule\n assert molecule_copy.n_conformers == molecule.n_conformers\n assert np.allclose(molecule_copy.conformers[0], molecule.conformers[0])", "def test_to_plain_python_obj_mixed(test_input):\n # It's enough that we don't get an exception here\n output = r.to_plain_python_obj(test_input)\n # We should not get a json conversion error\n json.dumps(output)", "def test_serialize_a_recommendation(self):\n recommendation = Recommendation(product_id=1, recommendation_product_id=2, relationship=Type.UP_SELL)\n data = recommendation.serialize()\n self.assertNotEqual(data, None)\n self.assertIn(\"product_id\", data)\n self.assertEqual(data[\"product_id\"], recommendation.product_id)\n self.assertIn(\"recommendation_product_id\", data)\n self.assertEqual(data[\"recommendation_product_id\"], recommendation.recommendation_product_id)\n self.assertIn(\"relationship\", data)\n self.assertEqual(data[\"relationship\"], recommendation.relationship.name)", "def test_generated_protocol_serialisation(self):\n # create a message\n reply_message = {1: \"number one\", 2: \"number two\", 7: \"number seven\"}\n # message 1\n message = TwoPartyNegotiationMessage(\n message_id=1,\n dialogue_reference=(str(0), \"\"),\n target=0,\n performative=TwoPartyNegotiationMessage.Performative.INFORM_REPLY,\n reply_message=reply_message,\n )\n\n # serialise the message\n encoded_message_in_bytes = TwoPartyNegotiationSerializer().encode(message)\n\n # deserialise the message\n decoded_message = TwoPartyNegotiationSerializer().decode(\n encoded_message_in_bytes\n )\n\n # Compare the original message with the serialised+deserialised message\n assert decoded_message.message_id == message.message_id\n assert decoded_message.dialogue_reference == message.dialogue_reference\n assert decoded_message.dialogue_reference[0] == message.dialogue_reference[0]\n assert decoded_message.dialogue_reference[1] == message.dialogue_reference[1]\n assert decoded_message.target == message.target\n assert decoded_message.performative == message.performative\n assert decoded_message.reply_message == message.reply_message", "def test_boolean_in_serializer() -> None:\n assert cv.custom_serializer(cv.boolean) == {\n \"type\": \"boolean\",\n }", "def pickling_check(instance):\n pkled_instance = pickle.loads(pickle.dumps(instance))\n equality_check(instance, pkled_instance)", "def json(self):\r\n try:\r\n import simplejson as json\r\n except ImportError:\r\n import json\r\n return Assert(json.loads(self.obj))", "def test_user_profile_serialization(self):\n\n # Construct a json representation of a UserProfile model\n user_profile_model_json = {}\n user_profile_model_json['id'] = 'testString'\n user_profile_model_json['iam_id'] = 'testString'\n user_profile_model_json['realm'] = 'testString'\n user_profile_model_json['user_id'] = 'testString'\n user_profile_model_json['firstname'] = 'testString'\n user_profile_model_json['lastname'] = 'testString'\n user_profile_model_json['state'] = 'testString'\n user_profile_model_json['email'] = 'testString'\n user_profile_model_json['phonenumber'] = 'testString'\n user_profile_model_json['altphonenumber'] = 'testString'\n user_profile_model_json['photo'] = 'testString'\n user_profile_model_json['account_id'] = 'testString'\n\n # Construct a model instance of UserProfile by calling from_dict on the json representation\n user_profile_model = UserProfile.from_dict(user_profile_model_json)\n assert user_profile_model != False\n\n # Construct a model instance of UserProfile by calling from_dict on the json representation\n user_profile_model_dict = UserProfile.from_dict(user_profile_model_json).__dict__\n user_profile_model2 = UserProfile(**user_profile_model_dict)\n\n # Verify the model instances are equivalent\n assert user_profile_model == user_profile_model2\n\n # Convert model instance back to dict and verify no loss of data\n user_profile_model_json2 = user_profile_model.to_dict()\n assert user_profile_model_json2 == user_profile_model_json", "def test_default_serialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = sy.serialize(obj, to_proto=True)\n\n assert sy.serialize(obj) == blob", "def _check_serialize(self, kwargs):\n for k in kwargs:\n if k in self.backend.TO_SERIALIZE:\n if isinstance(kwargs[k], dict):\n kwargs[k] = {j: self.backend.serialize(kwargs[k][j])\n for j in kwargs[k]}\n elif isinstance(kwargs[k], list):\n kwargs[k] = [self.backend.serialize(j)\n for j in kwargs[k]]\n else:\n raise TypeError('Your iterable should be a dict or a list')\n return kwargs", "def test_serialization_no_conformers(self):\n mol = Molecule.from_smiles(\"CCO\")\n\n dict_copy = Molecule.from_dict(mol.to_dict())\n assert mol == dict_copy\n\n # TODO: yaml_copy = Molecule.from_yaml(mol.to_yaml())\n with pytest.raises(NotImplementedError):\n mol.to_toml()\n\n bson_copy = Molecule.from_bson(mol.to_bson())\n assert mol == bson_copy\n\n json_copy = Molecule.from_json(mol.to_json())\n assert mol == json_copy\n\n # TODO: round-trip when from_xml is implemented\n mol_as_xml = mol.to_xml()\n with pytest.raises(NotImplementedError):\n Molecule.from_xml(mol_as_xml)\n\n messagepack_copy = Molecule.from_messagepack(mol.to_messagepack())\n assert mol == messagepack_copy\n\n pickle_copy = pickle.loads(pickle.dumps(mol))\n assert mol == pickle_copy", "def test_serialization_no_conformers(self):\n mol = Molecule.from_smiles(\"CCO\")\n\n dict_copy = Molecule.from_dict(mol.to_dict())\n assert mol == dict_copy\n\n # TODO: yaml_copy = Molecule.from_yaml(mol.to_yaml())\n with pytest.raises(NotImplementedError):\n mol.to_toml()\n\n bson_copy = Molecule.from_bson(mol.to_bson())\n assert mol == bson_copy\n\n json_copy = Molecule.from_json(mol.to_json())\n assert mol == json_copy\n\n # TODO: round-trip when from_xml is implemented\n mol_as_xml = mol.to_xml()\n with pytest.raises(NotImplementedError):\n Molecule.from_xml(mol_as_xml)\n\n messagepack_copy = Molecule.from_messagepack(mol.to_messagepack())\n assert mol == messagepack_copy\n\n pickle_copy = pickle.loads(pickle.dumps(mol))\n assert mol == pickle_copy", "def test_none(self):\n self.assertEqual(self.obj.to_json_string(None), '[]')", "def serialize(self, obj):\n return obj", "def objDictConsistency(self, cls, hard, dis=False):\n orig = cls(\"OK\")\n # the hard way\n if hard:\n p = self.dumpWithPreobjects(None, orig.__dict__, orig, dis=dis)\n d, obj = self.pickler.loads(p)[-1]\n else:\n p = self.dumpWithPreobjects(None, orig, orig.__dict__, dis=dis)\n obj, d = self.pickler.loads(p)[-1]\n self.assertTrue(type(obj)is type(orig))\n self.assertTrue(type(obj.__dict__) is type(orig.__dict__)) # @IgnorePep8\n self.assertEquals(set(obj.__dict__.keys()), set(orig.__dict__.keys()))\n self.assertTrue(obj.__dict__ is d)\n self.assertTrue(obj.isOk() is True)", "def backcast(self) -> bool:\n return self.__backcast", "def serialize(self):\n raise NotImplemented()", "def test_raw_data(self):\n self.assertEqual(self.tester.raw_data, 1)", "def serialize(self, data):\n return data", "def test_base_case_json(self):\n json_data = '{\"a\": 1}'\n json_flattened = json_flatten(json_data)\n self.assertEqual(json.loads(json_flattened), json.loads('{\"a\" : 1}'))", "def test_snp_json_roundtrip(self):\n given = self.ntwk1\n actual = rf.from_json_string(rf.to_json_string(given))\n self.assertEqual(actual, given)\n self.assertEqual(actual.frequency, given.frequency)\n self.assertEqual(actual.name, given.name)\n self.assertEqual(actual.comments, given.comments)\n self.assertEqual(actual.z0.tolist(), given.z0.tolist())\n self.assertEqual(actual.port_names, given.port_names)\n self.assertEqual(actual.variables, given.variables)", "def test_to_json_string(self):\n self.assertEqual(Base.to_json_string(None), \"[]\")\n self.assertTrue(type(Base.to_json_string(None)) is str)\n self.assertEqual(Base.to_json_string([]), \"[]\")\n self.assertTrue(type(Base.to_json_string([])) is str)\n myDict = {'id': 4, 'width': 3, 'height': 4, 'x': 1, 'y': 3}\n myDict2 = {'id': 3, 'width': 6, 'height': 2, 'x': 1, 'y': 9}\n jsonized = Base.to_json_string([myDict, myDict2])\n self.assertTrue(type(jsonized) is str)\n myDict3 = json.loads(jsonized)\n self.assertEqual(myDict3, [myDict, myDict2])", "def round_trip(message):\n return bits.bits_to_message(bits.message_to_bits(message)) == message" ]
[ "0.65497476", "0.64250165", "0.6318902", "0.6110213", "0.6092596", "0.60524327", "0.6047912", "0.60349566", "0.59723496", "0.59635586", "0.59397215", "0.59294575", "0.59294575", "0.59081197", "0.58982784", "0.58918864", "0.58906686", "0.5878805", "0.58623", "0.5853447", "0.5826075", "0.58153296", "0.5812717", "0.58071756", "0.5794471", "0.5785449", "0.5782361", "0.5774637", "0.57684493", "0.5760834", "0.57446843", "0.5724752", "0.5709062", "0.569147", "0.5687801", "0.56571716", "0.56280816", "0.5620996", "0.5619737", "0.56180525", "0.56041485", "0.5598943", "0.5598287", "0.5595587", "0.55944103", "0.55884326", "0.55857265", "0.5569962", "0.55649996", "0.5556069", "0.5549877", "0.5548616", "0.5548238", "0.55424994", "0.55392677", "0.5510641", "0.5503435", "0.5500031", "0.54999846", "0.54997605", "0.5494271", "0.5493163", "0.5482728", "0.54794973", "0.54633415", "0.54618675", "0.5460477", "0.5458001", "0.54513127", "0.544926", "0.54448813", "0.5435727", "0.5431473", "0.54137164", "0.5409626", "0.5409365", "0.54088485", "0.54026324", "0.5399572", "0.539559", "0.5390935", "0.53819275", "0.5372337", "0.53666735", "0.5365913", "0.53636456", "0.53634423", "0.5358752", "0.5352307", "0.5352307", "0.5345392", "0.5338859", "0.5332617", "0.5327083", "0.5325902", "0.53254426", "0.5323825", "0.53235537", "0.53190833", "0.53104806", "0.53097904" ]
0.0
-1
Roundtrip to check what we serialise is what we get back.
def test_serialises_and_deserialises_hs00_message_correctly_for_minimal_1d_data( self, ): original_hist = { "timestamp": 123456, "current_shape": [5], "dim_metadata": [ { "length": 5, "unit": "m", "label": "some_label", "bin_boundaries": np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0]), } ], "data": np.array([1.0, 2.0, 3.0, 4.0, 5.0]), } buf = serialise_hs00(original_hist) hist = deserialise_hs00(buf) assert hist["source"] == "" assert hist["timestamp"] == original_hist["timestamp"] assert hist["current_shape"] == original_hist["current_shape"] self._check_metadata_for_one_dimension( hist["dim_metadata"][0], original_hist["dim_metadata"][0] ) assert np.array_equal(hist["data"], original_hist["data"]) assert len(hist["errors"]) == 0 assert hist["info"] == ""
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_dumps(self):\n result = self.mapper.dumps(self.serialization)\n self.mapper.to_dict.assert_called_once_with(\"custom\")\n self.serialization.assert_called_once_with(\n self.mapper.to_dict.return_value\n )\n self.assertIs(result, self.serialization.return_value)", "def _serialise(self):\n # TODO (M Foley)\n pass", "def test_return_type(self):\n self.assertEqual(type(self.obj.to_json_string(self.valid_ld)), str)", "def test_dump_single(self):\n result = self.serializer.dump(self.schema_to_serialize)\n self.assertIsInstance(result, dict)", "def revert(self):\n if hasattr(self, '_init_data'):\n self.deserialize(self._init_data)\n return True\n return False", "def test_serialize(self):\n self.assert_raises(TypeError, self.instance.serialize, (1,))", "def test_serialize_no_metadata(self):\n pass # pragma: no cover", "def isJWS_unserialized_single(x):\n if isinstance(x, dict) \\\n and \"payload\" in x and \"signature\" in x \\\n and (\"protected\" in x or \"unprotected\" in x):\n try: \n if \"protected\" in x: \n json.loads(x[\"protected\"])\n return True\n except:\n return False\n else: \n return False", "def roundtrip(data):\r\n body = xmlrpclib.dumps(data)\r\n result = xmlrpclib.loads(body)[0]\r\n if result != data:\r\n print result", "def test_serialize(value, expected):\n assert json_dumps(value) == expected", "def assertDeserializeNonString(self):\r\n self.assertDeserializeEqual(None, None)\r\n self.assertDeserializeEqual(3.14, 3.14)\r\n self.assertDeserializeEqual(True, True)\r\n self.assertDeserializeEqual([10], [10])\r\n self.assertDeserializeEqual({}, {})\r\n self.assertDeserializeEqual([], [])\r\n self.assertDeserializeEqual(None, 'null')", "def test_serialize(self):\n self.assertEqual(self.scrapes.serialize(), [\n {'content': [u'First para', u'Second para'], 'title': u'First article', 'authors': [{'lastname': u'Swain', 'firstname': u'Matt'}]},\n {'content': [u'Para 1', u'Para 2'], 'title': u'Second article', 'authors': [{'lastname': u'Smith', 'firstname': u'John'}]},\n {'content': [u'Thing one', u'Thing two'], 'title': u'Third article', 'authors': [{'lastname': u'Bond', 'firstname': u'James'}]}\n ])", "def test_serialize(self):\n self.assertEqual(self.scrapes.serialize(), [\n {'content': [u'First para', u'Second para'], 'title': u'First article', 'authors': [{'lastname': u'Swain', 'firstname': u'Matt'}]},\n {'content': [u'Para 1', u'Para 2'], 'title': u'Second article', 'authors': [{'lastname': u'Smith', 'firstname': u'John'}]},\n {'content': [u'Thing one', u'Thing two'], 'title': u'Third article', 'authors': [{'lastname': u'Bond', 'firstname': u'James'}]}\n ])", "def test_serialize(self):\n r = self.RNA(\"ugagg\")\n assert dumps(r)", "def is_sedes(obj):\n return hasattr(obj, 'serialize') and hasattr(obj, 'deserialize')", "def isJWP_unserialized(x):\n if isinstance(x, dict) \\\n and \"payload\" in x and isinstance(x[\"unprotected\"], dict)\\\n and \"unprotected\" in x and \"signature\" not in x:\n return True\n else:\n return False", "def isJWE_unserialized(x):\n return isJWE_unserialized_single(x) or isJWE_unserialized_multi(x)", "def test_serialize_sinfo(self):\n self.assert_raises(RuntimeError, self.instance.serialize,\n self.testing_options['objects'][0],\n add_serializer_info=True)", "def isJWS_unserialized(x):\n return isJWS_unserialized_single(x) or isJWS_unserialized_multi(x)", "def test_dumps(self):\n data = \"something\"\n result = self.mapper.loads(self.deser_fn, data)\n self.mapper.from_dict.assert_called_once_with(\n self.deser_fn.return_value, \"custom\"\n )\n self.deser_fn.assert_called_once_with(data)\n self.assertIs(result, self.mapper.from_dict.return_value)", "def is_stringified(self) -> bool:\n return self._stringify", "def isJWE_unserialized_single(x):\n if isinstance(x, dict) \\\n and (\"unprotected\" in x or \"protected\" in x) \\\n and (\"ciphertext\" in x):\n try:\n if \"protected\" in x:\n json.loads(x[\"protected\"])\n return True\n except:\n return False\n else:\n return False", "def test_encode(self):\n result = json.loads(self.cls.objects.to_json())\n for index, item in enumerate(result):\n self.assertNotIn(\n \"to_json_exclude\", item,\n (\"to_json_exclude found at index {}\").format(index)\n )\n self.assertNotIn(\n \"json_exclude\", item,\n (\"json_exclude found at index {}\").format(index)\n )\n self.assertIn(\n \"from_json_exclude\", item,\n (\"from_json_exclude not found at index {}\").format(index)\n )\n self.assertIn(\n \"required\", item,\n (\"required not found at index {}\").format(index)\n )", "def _post_deserialize (self):\n pass", "def test_serialize_none(self):\n self.assertEqual(serialize(None), 'null')", "def isJOSE_unserialized(x):\n return isJWS_unserialized(x) or isJWE_unserialized(x) \\\n or isJWP_unserialized(x)", "def test_08(self):\n ret = Base.to_json_string(None)\n self.assertEqual(ret, \"[]\")", "def assertSerializeDeserialize(self, obj, version=None):\n tested_versions = [version] if version is not None else Version.supported\n for version in tested_versions:\n constructor = obj.__class__.from_json\n json_obj = obj.to_json(version)\n clone = constructor(json_obj)\n\n self.assertEqual(obj.__class__, clone.__class__)\n\n if isinstance(obj, dict):\n orig_dict = obj\n clone_dict = clone\n else:\n orig_dict = obj.__dict__\n clone_dict = clone.__dict__\n\n self.assertEqual(orig_dict, clone_dict)", "def test_quantitative_filter_serialization(self):\n result = serializers.FilterSerializer(self.internal_filter).data\n self.assertDictEqual(result, self.external_filter)", "def is_jsonable(self, input):\n try:\n json.dumps(input)\n return True\n except (TypeError, OverflowError):\n return False", "def test_encode_decode(self):\n assert self._test == pybinn.loads(pybinn.dumps(self._test))", "def test_valid_ld(self):\n self.assertEqual(self.obj.to_json_string(self.valid_ld),\n json.dumps(self.valid_ld))", "def assertDeserializeEqual(self, expected, arg):\r\n assert_equals(expected, deserialize_field(self.test_field(), arg))", "def test_dumps(self):\n schema = self.UnitTestingSchema()\n serializer = JSONSchema()\n self.assertIsInstance(serializer.dumps(schema), str)", "def test_serialization_deserialization(self):\n\n original_time = now()\n serialized_time = DatetimeMapper.forward(original_time)\n assert serialized_time == original_time.strftime(\"%Y-%m-%dT%H:%M:%S.%f\")\n\n deserialized_time = DatetimeMapper.backward(serialized_time)\n assert original_time == deserialized_time\n\n deserialized_time = DatetimeMapper.backward(None)\n assert isinstance(deserialized_time, datetime)", "def _TestReadSerialized(self, serializer_object, json_dict):\n # We use json.dumps to make sure the dict does not serialize into\n # an invalid JSON string such as one that contains Python string prefixes\n # like b'' or u''.\n json_string = json.dumps(json_dict)\n unserialized_object = serializer_object.ReadSerialized(json_string)\n\n self.assertIsNotNone(unserialized_object)\n return unserialized_object", "def test_non_list_of_dicts_arg(self):\n self.assertEqual(self.obj.to_json_string(666), '666')", "def test_deserialize(self):\r\n\r\n # test that from_json produces no exceptions\r\n self.assertDeserializeEqual('10:20:30', '\"10:20:30\"')", "def check_round_trip(data: dict, logger: Logger) -> Optional[bytes]:\n try:\n as_json_text = json.dumps(data, default=encode_value).encode(\"utf-8\")\n except Exception as e:\n report_error(\"CumulusCI found an unusual datatype in your config:\", e, logger)\n return None\n try:\n test_load = load_config_from_json_or_pickle(as_json_text)\n assert _simplify_config(test_load) == _simplify_config(\n data\n ), f\"JSON did not round-trip-cleanly {test_load}, {data}\"\n except Exception as e: # pragma: no cover\n report_error(\"CumulusCI found a problem saving your config:\", e, logger)\n return None\n assert isinstance(as_json_text, bytes)\n return as_json_text", "def is_raw(self):\n return not self.has_structure", "def parse_round_trip(self):\n parsed = self.test_proto.parse()\n round_trip = avro.protocol.parse(str(parsed))\n self.assertEqual(parsed, round_trip)", "def test_serialize_and_deserialize_returns_unchanged_collection(\n self\n ) -> None:\n self.assertEqual(\n self.collection.to_dict(),\n collection_domain.Collection.deserialize(\n self.collection.serialize()).to_dict())", "def test_serialize(self, val):\n val_orig = FitVal(*val)\n\n ser = json.dumps(val_orig, cls=ExperimentEncoder)\n val_deser = json.loads(ser, cls=ExperimentDecoder)\n\n self.assertEqual(val_orig, val_deser)", "def recvcheck(self):\n\n data = self.recv()\n if self.serializer == 'json' and data and isinstance(data, dict):\n if '@error' in data:\n exception = getattr(builtins, data['@error'])\n if (isinstance(exception, type) and\n issubclass(exception, Exception)):\n raise exception(data['@message'])\n else:\n if isinstance(data, Exception):\n raise data\n return data", "def test_encoding_round_trip(cell):\n orig = copy.copy(cell.__dict__)\n cell._from_serializeable_dict(cell._to_serializeable_dict())\n round_trip = cell.__dict__\n for key in cell._allowed:\n if type(orig[key]) == np.ndarray or type(orig[key]) == list:\n assert all(orig[key] == round_trip[key])\n else:\n assert orig[key] == round_trip[key]", "def test_json(self):\n\t\tdecoded = json.loads(json.dumps(self.filter.to_js_obj()))\n\t\tself.assertIsNotNone(decoded, \"JSON conversion failed!\")\n\t\tself.assertEqual(self.filter.to_js_obj(), decoded, \"JSON conversion mismatch!\")", "def test_to_json(self):\n self.amenity_json = self.amenity.to_json()\n actual = 1\n try:\n serialized = json.dumps(self.amenity_json)\n except:\n actual = 0\n self.assertTrue(1 == actual)", "def serialize(self):\n pass", "def serialize(self, obj):\n pass", "def test_serialize_deserialize1(self):\n for command in Command:\n serialized = command.serialize()\n deserialized = Command.deserialize(serialized)\n self.assertTrue(deserialized is command)", "def convertAndAssertJSONEqual(self, data, expected_data, msg=None):\n\n super(SimpleTestCase, self).assertJSONEqual(json.dumps(data, cls=DjangoJSONEncoder), expected_data, msg)", "def test_repr(self, cls):\n inst = cls()\n # Exact values aren't a concern so long as neither direction\n # raises an exception.\n pkl = cloudpickle.dumps(inst)\n cloudpickle.loads(pkl)", "def test_can_deserialize_plain_object(self):\n handler = BaseRestHandler(mock.MagicMock(), mock.MagicMock())\n handler._write_buffer = []\n obj = SerializeMe()\n obj.key = \"value\"\n handler.write_object(obj)\n res = json.loads(handler._write_buffer[0])\n self.assertDictEqual(res, {\"key\": \"value\"})", "def jucify(self):\n to_ret = False\n # TODO: finish this\n return to_ret", "def can_be_pickled(x):\n try:\n s = BytesIO() \n pickle.dump(x, s) \n return True\n except:\n return False", "def test_serialization(self):\n serialized = self.Gs.as_dict()\n unserialized = BayesianNetwork.from_dict(serialized)\n\n self.assertDictEqual(serialized, unserialized.as_dict())", "def __getstate__(self):\n raise IOError(\"You tried to serialize something that should not\"\n \" be serialized.\")", "def is_picklable(obj):\n try:\n pickle.dumps(obj)\n\n except pickle.PicklingError:\n return False\n return True", "def test_time_filter_serialization(self):\n result = serializers.FilterSerializer(self.internal_filter).data\n self.assertDictEqual(result, self.external_filter)", "def serialize(self, data):", "def is_text_serializer(serializer):\n return isinstance(serializer.dumps({}), text_type)", "def test_return_value(self):\n self.assertEqual(self.r0.from_json_string(self.string), self.d)", "def test_quantitative_filter_deserialization(self):\n serializer = serializers.FilterSerializer(data=self.external_filter)\n self.assertTrue(serializer.is_valid())\n self.assertDictEqual(serializer.validated_data, self.internal_filter)", "def test_pickle_serialization(self, molecule):\n serialized = pickle.dumps(molecule)\n molecule_copy = pickle.loads(serialized)\n assert molecule == molecule_copy", "def serialize(self, data):\n if isinstance(data, str):\n return data\n\n if hasattr(data, \"read\"):\n return data.read()\n\n raise ValueError(\"Unable to handle input format: %s\" % type(data))", "def converted(self):\n if self.converters:\n return self.converters.converted(self)\n return False", "def _check_primitive(self) -> PossibleResult[T]:\n if self.constructor in _PRIMITIVES:\n if self.obj is UNDEFINED:\n raise DeserializeError(\n self.constructor, self.obj, self.new_depth, self.key\n )\n if self.obj is None:\n raise DeserializeError(\n self.constructor, self.obj, self.new_depth, self.key\n )\n if not isinstance(self.obj, self.constructor):\n if not self.convert_primitives:\n raise DeserializeError(\n self.constructor, self.obj, self.new_depth, self.key\n )\n try:\n return self.constructor(self.obj) # type: ignore\n except (ValueError, TypeError) as error:\n raise DeserializeError(\n self.constructor, self.obj, self.new_depth, self.key\n ) from error\n return self.obj\n return NO_RESULT", "def testRoundtrip(self):\n key = createKey()\n data = {u'user': u'aliafshar', u'id': u'91821212'}\n token = dataToToken(key, data)\n self.assertEqual(data, tokenToData(key, token))", "def test_roundtrip(self):\n self.read_container = self.roundtripContainer()\n self.assertIsNotNone(str(self.container)) # added as a test to make sure printing works\n self.assertIsNotNone(str(self.read_container))\n # make sure we get a completely new object\n self.assertNotEqual(id(self.container), id(self.read_container))\n self.assertIs(self.read_nwbfile.objects[self.container.object_id], self.read_container)\n self.assertContainerEqual(self.read_container, self.container)", "def is_serializable(instance_or_class: Any) -> bool:\n return hasattr(instance_or_class, SERDE_SCOPE)", "def _deserialiseLightweight(self,unpickled):\n if self.sampleid != unpickled['sampleId']:\n raise RuntimeError('sampleids do not match: '+self.sampleid+' '+unpickled['sampleId'])\n if self.condition != unpickled['condition']:\n raise RuntimeError('conditions do not match: '+self.condition+' '+unpickled['condition'])\n if self.wellids != unpickled['wellIds']:\n raise RuntimeError('wellids do not match: '+self.wellids+' '+unpickled['wellIds'])\n if self._wellIndices != unpickled['wellIndices']:\n raise RuntimeError('wellIndices do not match: '+self._wellIndices+' '+unpickled['wellIndices'])\n self._activeWellIndices=unpickled['activeWellIndices']", "def test_serialize(state):\n assert len(state.players) == 2\n st_data = state.to_data()\n\n assert st_data, \"Expect that we would have some data!\"\n assert len(st_data[\"deck\"]) == 52\n assert len(st_data[\"discarded\"]) == 0\n # Render player subset properly\n assert len(st_data[\"players\"]) == 2\n assert len(st_data[\"players\"][0][\"hand\"]) == 0\n\n new_state = MockState.from_data(st_data)\n assert new_state.__class__ == MockState\n st_data_new = new_state.to_data()\n\n assert st_data == st_data_new", "def test_serialize(self):\n self.assertEqual(self.blogs.serialize(), [\n {'content': [u'First para', u'Second para'], 'title': u'First article'},\n {'content': [u'Para 1', u'Para 2'], 'title': u'Second article'},\n {'content': [u'Thing one', u'Thing two'], 'title': u'Third article'}\n ])", "def test_pickle_serialization(self, molecule):\n serialized = pickle.dumps(molecule)\n molecule_copy = pickle.loads(serialized)\n assert molecule == molecule_copy\n assert molecule_copy.n_conformers == molecule.n_conformers\n assert np.allclose(molecule_copy.conformers[0], molecule.conformers[0])", "def _to_serialize(value):\n return value.serialize() if value is not None else None", "def serialize(self):", "def dump_payload(self, obj):\n return want_bytes(self.serializer.dumps(obj, **self.serializer_kwargs))", "def test_dict_serialization(self, molecule):\n serialized = molecule.to_dict()\n molecule_copy = Molecule.from_dict(serialized)\n assert molecule == molecule_copy", "def test_json_serialization(self, molecule):\n molecule_copy = Molecule.from_json(molecule.to_json())\n assert molecule_copy == molecule\n assert molecule_copy.n_conformers == molecule.n_conformers\n assert np.allclose(molecule_copy.conformers[0], molecule.conformers[0])", "def test_to_plain_python_obj_mixed(test_input):\n # It's enough that we don't get an exception here\n output = r.to_plain_python_obj(test_input)\n # We should not get a json conversion error\n json.dumps(output)", "def test_serialize_a_recommendation(self):\n recommendation = Recommendation(product_id=1, recommendation_product_id=2, relationship=Type.UP_SELL)\n data = recommendation.serialize()\n self.assertNotEqual(data, None)\n self.assertIn(\"product_id\", data)\n self.assertEqual(data[\"product_id\"], recommendation.product_id)\n self.assertIn(\"recommendation_product_id\", data)\n self.assertEqual(data[\"recommendation_product_id\"], recommendation.recommendation_product_id)\n self.assertIn(\"relationship\", data)\n self.assertEqual(data[\"relationship\"], recommendation.relationship.name)", "def test_generated_protocol_serialisation(self):\n # create a message\n reply_message = {1: \"number one\", 2: \"number two\", 7: \"number seven\"}\n # message 1\n message = TwoPartyNegotiationMessage(\n message_id=1,\n dialogue_reference=(str(0), \"\"),\n target=0,\n performative=TwoPartyNegotiationMessage.Performative.INFORM_REPLY,\n reply_message=reply_message,\n )\n\n # serialise the message\n encoded_message_in_bytes = TwoPartyNegotiationSerializer().encode(message)\n\n # deserialise the message\n decoded_message = TwoPartyNegotiationSerializer().decode(\n encoded_message_in_bytes\n )\n\n # Compare the original message with the serialised+deserialised message\n assert decoded_message.message_id == message.message_id\n assert decoded_message.dialogue_reference == message.dialogue_reference\n assert decoded_message.dialogue_reference[0] == message.dialogue_reference[0]\n assert decoded_message.dialogue_reference[1] == message.dialogue_reference[1]\n assert decoded_message.target == message.target\n assert decoded_message.performative == message.performative\n assert decoded_message.reply_message == message.reply_message", "def test_boolean_in_serializer() -> None:\n assert cv.custom_serializer(cv.boolean) == {\n \"type\": \"boolean\",\n }", "def pickling_check(instance):\n pkled_instance = pickle.loads(pickle.dumps(instance))\n equality_check(instance, pkled_instance)", "def json(self):\r\n try:\r\n import simplejson as json\r\n except ImportError:\r\n import json\r\n return Assert(json.loads(self.obj))", "def test_user_profile_serialization(self):\n\n # Construct a json representation of a UserProfile model\n user_profile_model_json = {}\n user_profile_model_json['id'] = 'testString'\n user_profile_model_json['iam_id'] = 'testString'\n user_profile_model_json['realm'] = 'testString'\n user_profile_model_json['user_id'] = 'testString'\n user_profile_model_json['firstname'] = 'testString'\n user_profile_model_json['lastname'] = 'testString'\n user_profile_model_json['state'] = 'testString'\n user_profile_model_json['email'] = 'testString'\n user_profile_model_json['phonenumber'] = 'testString'\n user_profile_model_json['altphonenumber'] = 'testString'\n user_profile_model_json['photo'] = 'testString'\n user_profile_model_json['account_id'] = 'testString'\n\n # Construct a model instance of UserProfile by calling from_dict on the json representation\n user_profile_model = UserProfile.from_dict(user_profile_model_json)\n assert user_profile_model != False\n\n # Construct a model instance of UserProfile by calling from_dict on the json representation\n user_profile_model_dict = UserProfile.from_dict(user_profile_model_json).__dict__\n user_profile_model2 = UserProfile(**user_profile_model_dict)\n\n # Verify the model instances are equivalent\n assert user_profile_model == user_profile_model2\n\n # Convert model instance back to dict and verify no loss of data\n user_profile_model_json2 = user_profile_model.to_dict()\n assert user_profile_model_json2 == user_profile_model_json", "def test_default_serialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = sy.serialize(obj, to_proto=True)\n\n assert sy.serialize(obj) == blob", "def _check_serialize(self, kwargs):\n for k in kwargs:\n if k in self.backend.TO_SERIALIZE:\n if isinstance(kwargs[k], dict):\n kwargs[k] = {j: self.backend.serialize(kwargs[k][j])\n for j in kwargs[k]}\n elif isinstance(kwargs[k], list):\n kwargs[k] = [self.backend.serialize(j)\n for j in kwargs[k]]\n else:\n raise TypeError('Your iterable should be a dict or a list')\n return kwargs", "def test_serialization_no_conformers(self):\n mol = Molecule.from_smiles(\"CCO\")\n\n dict_copy = Molecule.from_dict(mol.to_dict())\n assert mol == dict_copy\n\n # TODO: yaml_copy = Molecule.from_yaml(mol.to_yaml())\n with pytest.raises(NotImplementedError):\n mol.to_toml()\n\n bson_copy = Molecule.from_bson(mol.to_bson())\n assert mol == bson_copy\n\n json_copy = Molecule.from_json(mol.to_json())\n assert mol == json_copy\n\n # TODO: round-trip when from_xml is implemented\n mol_as_xml = mol.to_xml()\n with pytest.raises(NotImplementedError):\n Molecule.from_xml(mol_as_xml)\n\n messagepack_copy = Molecule.from_messagepack(mol.to_messagepack())\n assert mol == messagepack_copy\n\n pickle_copy = pickle.loads(pickle.dumps(mol))\n assert mol == pickle_copy", "def test_serialization_no_conformers(self):\n mol = Molecule.from_smiles(\"CCO\")\n\n dict_copy = Molecule.from_dict(mol.to_dict())\n assert mol == dict_copy\n\n # TODO: yaml_copy = Molecule.from_yaml(mol.to_yaml())\n with pytest.raises(NotImplementedError):\n mol.to_toml()\n\n bson_copy = Molecule.from_bson(mol.to_bson())\n assert mol == bson_copy\n\n json_copy = Molecule.from_json(mol.to_json())\n assert mol == json_copy\n\n # TODO: round-trip when from_xml is implemented\n mol_as_xml = mol.to_xml()\n with pytest.raises(NotImplementedError):\n Molecule.from_xml(mol_as_xml)\n\n messagepack_copy = Molecule.from_messagepack(mol.to_messagepack())\n assert mol == messagepack_copy\n\n pickle_copy = pickle.loads(pickle.dumps(mol))\n assert mol == pickle_copy", "def test_none(self):\n self.assertEqual(self.obj.to_json_string(None), '[]')", "def serialize(self, obj):\n return obj", "def objDictConsistency(self, cls, hard, dis=False):\n orig = cls(\"OK\")\n # the hard way\n if hard:\n p = self.dumpWithPreobjects(None, orig.__dict__, orig, dis=dis)\n d, obj = self.pickler.loads(p)[-1]\n else:\n p = self.dumpWithPreobjects(None, orig, orig.__dict__, dis=dis)\n obj, d = self.pickler.loads(p)[-1]\n self.assertTrue(type(obj)is type(orig))\n self.assertTrue(type(obj.__dict__) is type(orig.__dict__)) # @IgnorePep8\n self.assertEquals(set(obj.__dict__.keys()), set(orig.__dict__.keys()))\n self.assertTrue(obj.__dict__ is d)\n self.assertTrue(obj.isOk() is True)", "def backcast(self) -> bool:\n return self.__backcast", "def serialize(self):\n raise NotImplemented()", "def test_raw_data(self):\n self.assertEqual(self.tester.raw_data, 1)", "def serialize(self, data):\n return data", "def test_base_case_json(self):\n json_data = '{\"a\": 1}'\n json_flattened = json_flatten(json_data)\n self.assertEqual(json.loads(json_flattened), json.loads('{\"a\" : 1}'))", "def test_snp_json_roundtrip(self):\n given = self.ntwk1\n actual = rf.from_json_string(rf.to_json_string(given))\n self.assertEqual(actual, given)\n self.assertEqual(actual.frequency, given.frequency)\n self.assertEqual(actual.name, given.name)\n self.assertEqual(actual.comments, given.comments)\n self.assertEqual(actual.z0.tolist(), given.z0.tolist())\n self.assertEqual(actual.port_names, given.port_names)\n self.assertEqual(actual.variables, given.variables)", "def test_to_json_string(self):\n self.assertEqual(Base.to_json_string(None), \"[]\")\n self.assertTrue(type(Base.to_json_string(None)) is str)\n self.assertEqual(Base.to_json_string([]), \"[]\")\n self.assertTrue(type(Base.to_json_string([])) is str)\n myDict = {'id': 4, 'width': 3, 'height': 4, 'x': 1, 'y': 3}\n myDict2 = {'id': 3, 'width': 6, 'height': 2, 'x': 1, 'y': 9}\n jsonized = Base.to_json_string([myDict, myDict2])\n self.assertTrue(type(jsonized) is str)\n myDict3 = json.loads(jsonized)\n self.assertEqual(myDict3, [myDict, myDict2])", "def round_trip(message):\n return bits.bits_to_message(bits.message_to_bits(message)) == message" ]
[ "0.65497476", "0.64250165", "0.6318902", "0.6110213", "0.6092596", "0.60524327", "0.6047912", "0.60349566", "0.59723496", "0.59635586", "0.59397215", "0.59294575", "0.59294575", "0.59081197", "0.58982784", "0.58918864", "0.58906686", "0.5878805", "0.58623", "0.5853447", "0.5826075", "0.58153296", "0.5812717", "0.58071756", "0.5794471", "0.5785449", "0.5782361", "0.5774637", "0.57684493", "0.5760834", "0.57446843", "0.5724752", "0.5709062", "0.569147", "0.5687801", "0.56571716", "0.56280816", "0.5620996", "0.5619737", "0.56180525", "0.56041485", "0.5598943", "0.5598287", "0.5595587", "0.55944103", "0.55884326", "0.55857265", "0.5569962", "0.55649996", "0.5556069", "0.5549877", "0.5548616", "0.5548238", "0.55424994", "0.55392677", "0.5510641", "0.5503435", "0.5500031", "0.54999846", "0.54997605", "0.5494271", "0.5493163", "0.5482728", "0.54794973", "0.54633415", "0.54618675", "0.5460477", "0.5458001", "0.54513127", "0.544926", "0.54448813", "0.5435727", "0.5431473", "0.54137164", "0.5409626", "0.5409365", "0.54088485", "0.54026324", "0.5399572", "0.539559", "0.5390935", "0.53819275", "0.5372337", "0.53666735", "0.5365913", "0.53636456", "0.53634423", "0.5358752", "0.5352307", "0.5352307", "0.5345392", "0.5338859", "0.5332617", "0.5327083", "0.5325902", "0.53254426", "0.5323825", "0.53235537", "0.53190833", "0.53104806", "0.53097904" ]
0.0
-1
Roundtrip to check what we serialise is what we get back.
def test_serialises_and_deserialises_hs00_message_correctly_for_full_2d_data(self): original_hist = { "source": "some_source", "timestamp": 123456, "current_shape": [2, 5], "dim_metadata": [ { "length": 2, "unit": "b", "label": "y", "bin_boundaries": np.array([10.0, 11.0, 12.0]), }, { "length": 5, "unit": "m", "label": "x", "bin_boundaries": np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0]), }, ], "last_metadata_timestamp": 123456, "data": np.array([[1.0, 2.0, 3.0, 4.0, 5.0], [6.0, 7.0, 8.0, 9.0, 10.0]]), "errors": np.array([[5.0, 4.0, 3.0, 2.0, 1.0], [10.0, 9.0, 8.0, 7.0, 6.0]]), "info": "info_string", } buf = serialise_hs00(original_hist) hist = deserialise_hs00(buf) assert hist["source"] == original_hist["source"] assert hist["timestamp"] == original_hist["timestamp"] assert hist["current_shape"] == original_hist["current_shape"] self._check_metadata_for_one_dimension( hist["dim_metadata"][0], original_hist["dim_metadata"][0] ) self._check_metadata_for_one_dimension( hist["dim_metadata"][1], original_hist["dim_metadata"][1] ) assert np.array_equal(hist["data"], original_hist["data"]) assert np.array_equal(hist["errors"], original_hist["errors"]) assert hist["info"] == original_hist["info"] assert ( hist["last_metadata_timestamp"] == original_hist["last_metadata_timestamp"] )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_dumps(self):\n result = self.mapper.dumps(self.serialization)\n self.mapper.to_dict.assert_called_once_with(\"custom\")\n self.serialization.assert_called_once_with(\n self.mapper.to_dict.return_value\n )\n self.assertIs(result, self.serialization.return_value)", "def _serialise(self):\n # TODO (M Foley)\n pass", "def test_return_type(self):\n self.assertEqual(type(self.obj.to_json_string(self.valid_ld)), str)", "def test_dump_single(self):\n result = self.serializer.dump(self.schema_to_serialize)\n self.assertIsInstance(result, dict)", "def revert(self):\n if hasattr(self, '_init_data'):\n self.deserialize(self._init_data)\n return True\n return False", "def test_serialize(self):\n self.assert_raises(TypeError, self.instance.serialize, (1,))", "def test_serialize_no_metadata(self):\n pass # pragma: no cover", "def isJWS_unserialized_single(x):\n if isinstance(x, dict) \\\n and \"payload\" in x and \"signature\" in x \\\n and (\"protected\" in x or \"unprotected\" in x):\n try: \n if \"protected\" in x: \n json.loads(x[\"protected\"])\n return True\n except:\n return False\n else: \n return False", "def roundtrip(data):\r\n body = xmlrpclib.dumps(data)\r\n result = xmlrpclib.loads(body)[0]\r\n if result != data:\r\n print result", "def test_serialize(value, expected):\n assert json_dumps(value) == expected", "def assertDeserializeNonString(self):\r\n self.assertDeserializeEqual(None, None)\r\n self.assertDeserializeEqual(3.14, 3.14)\r\n self.assertDeserializeEqual(True, True)\r\n self.assertDeserializeEqual([10], [10])\r\n self.assertDeserializeEqual({}, {})\r\n self.assertDeserializeEqual([], [])\r\n self.assertDeserializeEqual(None, 'null')", "def test_serialize(self):\n self.assertEqual(self.scrapes.serialize(), [\n {'content': [u'First para', u'Second para'], 'title': u'First article', 'authors': [{'lastname': u'Swain', 'firstname': u'Matt'}]},\n {'content': [u'Para 1', u'Para 2'], 'title': u'Second article', 'authors': [{'lastname': u'Smith', 'firstname': u'John'}]},\n {'content': [u'Thing one', u'Thing two'], 'title': u'Third article', 'authors': [{'lastname': u'Bond', 'firstname': u'James'}]}\n ])", "def test_serialize(self):\n self.assertEqual(self.scrapes.serialize(), [\n {'content': [u'First para', u'Second para'], 'title': u'First article', 'authors': [{'lastname': u'Swain', 'firstname': u'Matt'}]},\n {'content': [u'Para 1', u'Para 2'], 'title': u'Second article', 'authors': [{'lastname': u'Smith', 'firstname': u'John'}]},\n {'content': [u'Thing one', u'Thing two'], 'title': u'Third article', 'authors': [{'lastname': u'Bond', 'firstname': u'James'}]}\n ])", "def test_serialize(self):\n r = self.RNA(\"ugagg\")\n assert dumps(r)", "def is_sedes(obj):\n return hasattr(obj, 'serialize') and hasattr(obj, 'deserialize')", "def isJWP_unserialized(x):\n if isinstance(x, dict) \\\n and \"payload\" in x and isinstance(x[\"unprotected\"], dict)\\\n and \"unprotected\" in x and \"signature\" not in x:\n return True\n else:\n return False", "def isJWE_unserialized(x):\n return isJWE_unserialized_single(x) or isJWE_unserialized_multi(x)", "def test_serialize_sinfo(self):\n self.assert_raises(RuntimeError, self.instance.serialize,\n self.testing_options['objects'][0],\n add_serializer_info=True)", "def isJWS_unserialized(x):\n return isJWS_unserialized_single(x) or isJWS_unserialized_multi(x)", "def test_dumps(self):\n data = \"something\"\n result = self.mapper.loads(self.deser_fn, data)\n self.mapper.from_dict.assert_called_once_with(\n self.deser_fn.return_value, \"custom\"\n )\n self.deser_fn.assert_called_once_with(data)\n self.assertIs(result, self.mapper.from_dict.return_value)", "def is_stringified(self) -> bool:\n return self._stringify", "def isJWE_unserialized_single(x):\n if isinstance(x, dict) \\\n and (\"unprotected\" in x or \"protected\" in x) \\\n and (\"ciphertext\" in x):\n try:\n if \"protected\" in x:\n json.loads(x[\"protected\"])\n return True\n except:\n return False\n else:\n return False", "def test_encode(self):\n result = json.loads(self.cls.objects.to_json())\n for index, item in enumerate(result):\n self.assertNotIn(\n \"to_json_exclude\", item,\n (\"to_json_exclude found at index {}\").format(index)\n )\n self.assertNotIn(\n \"json_exclude\", item,\n (\"json_exclude found at index {}\").format(index)\n )\n self.assertIn(\n \"from_json_exclude\", item,\n (\"from_json_exclude not found at index {}\").format(index)\n )\n self.assertIn(\n \"required\", item,\n (\"required not found at index {}\").format(index)\n )", "def _post_deserialize (self):\n pass", "def test_serialize_none(self):\n self.assertEqual(serialize(None), 'null')", "def isJOSE_unserialized(x):\n return isJWS_unserialized(x) or isJWE_unserialized(x) \\\n or isJWP_unserialized(x)", "def test_08(self):\n ret = Base.to_json_string(None)\n self.assertEqual(ret, \"[]\")", "def assertSerializeDeserialize(self, obj, version=None):\n tested_versions = [version] if version is not None else Version.supported\n for version in tested_versions:\n constructor = obj.__class__.from_json\n json_obj = obj.to_json(version)\n clone = constructor(json_obj)\n\n self.assertEqual(obj.__class__, clone.__class__)\n\n if isinstance(obj, dict):\n orig_dict = obj\n clone_dict = clone\n else:\n orig_dict = obj.__dict__\n clone_dict = clone.__dict__\n\n self.assertEqual(orig_dict, clone_dict)", "def test_quantitative_filter_serialization(self):\n result = serializers.FilterSerializer(self.internal_filter).data\n self.assertDictEqual(result, self.external_filter)", "def is_jsonable(self, input):\n try:\n json.dumps(input)\n return True\n except (TypeError, OverflowError):\n return False", "def test_encode_decode(self):\n assert self._test == pybinn.loads(pybinn.dumps(self._test))", "def test_valid_ld(self):\n self.assertEqual(self.obj.to_json_string(self.valid_ld),\n json.dumps(self.valid_ld))", "def assertDeserializeEqual(self, expected, arg):\r\n assert_equals(expected, deserialize_field(self.test_field(), arg))", "def test_dumps(self):\n schema = self.UnitTestingSchema()\n serializer = JSONSchema()\n self.assertIsInstance(serializer.dumps(schema), str)", "def test_serialization_deserialization(self):\n\n original_time = now()\n serialized_time = DatetimeMapper.forward(original_time)\n assert serialized_time == original_time.strftime(\"%Y-%m-%dT%H:%M:%S.%f\")\n\n deserialized_time = DatetimeMapper.backward(serialized_time)\n assert original_time == deserialized_time\n\n deserialized_time = DatetimeMapper.backward(None)\n assert isinstance(deserialized_time, datetime)", "def _TestReadSerialized(self, serializer_object, json_dict):\n # We use json.dumps to make sure the dict does not serialize into\n # an invalid JSON string such as one that contains Python string prefixes\n # like b'' or u''.\n json_string = json.dumps(json_dict)\n unserialized_object = serializer_object.ReadSerialized(json_string)\n\n self.assertIsNotNone(unserialized_object)\n return unserialized_object", "def test_non_list_of_dicts_arg(self):\n self.assertEqual(self.obj.to_json_string(666), '666')", "def test_deserialize(self):\r\n\r\n # test that from_json produces no exceptions\r\n self.assertDeserializeEqual('10:20:30', '\"10:20:30\"')", "def check_round_trip(data: dict, logger: Logger) -> Optional[bytes]:\n try:\n as_json_text = json.dumps(data, default=encode_value).encode(\"utf-8\")\n except Exception as e:\n report_error(\"CumulusCI found an unusual datatype in your config:\", e, logger)\n return None\n try:\n test_load = load_config_from_json_or_pickle(as_json_text)\n assert _simplify_config(test_load) == _simplify_config(\n data\n ), f\"JSON did not round-trip-cleanly {test_load}, {data}\"\n except Exception as e: # pragma: no cover\n report_error(\"CumulusCI found a problem saving your config:\", e, logger)\n return None\n assert isinstance(as_json_text, bytes)\n return as_json_text", "def is_raw(self):\n return not self.has_structure", "def parse_round_trip(self):\n parsed = self.test_proto.parse()\n round_trip = avro.protocol.parse(str(parsed))\n self.assertEqual(parsed, round_trip)", "def test_serialize_and_deserialize_returns_unchanged_collection(\n self\n ) -> None:\n self.assertEqual(\n self.collection.to_dict(),\n collection_domain.Collection.deserialize(\n self.collection.serialize()).to_dict())", "def test_serialize(self, val):\n val_orig = FitVal(*val)\n\n ser = json.dumps(val_orig, cls=ExperimentEncoder)\n val_deser = json.loads(ser, cls=ExperimentDecoder)\n\n self.assertEqual(val_orig, val_deser)", "def recvcheck(self):\n\n data = self.recv()\n if self.serializer == 'json' and data and isinstance(data, dict):\n if '@error' in data:\n exception = getattr(builtins, data['@error'])\n if (isinstance(exception, type) and\n issubclass(exception, Exception)):\n raise exception(data['@message'])\n else:\n if isinstance(data, Exception):\n raise data\n return data", "def test_encoding_round_trip(cell):\n orig = copy.copy(cell.__dict__)\n cell._from_serializeable_dict(cell._to_serializeable_dict())\n round_trip = cell.__dict__\n for key in cell._allowed:\n if type(orig[key]) == np.ndarray or type(orig[key]) == list:\n assert all(orig[key] == round_trip[key])\n else:\n assert orig[key] == round_trip[key]", "def test_json(self):\n\t\tdecoded = json.loads(json.dumps(self.filter.to_js_obj()))\n\t\tself.assertIsNotNone(decoded, \"JSON conversion failed!\")\n\t\tself.assertEqual(self.filter.to_js_obj(), decoded, \"JSON conversion mismatch!\")", "def test_to_json(self):\n self.amenity_json = self.amenity.to_json()\n actual = 1\n try:\n serialized = json.dumps(self.amenity_json)\n except:\n actual = 0\n self.assertTrue(1 == actual)", "def serialize(self):\n pass", "def serialize(self, obj):\n pass", "def test_serialize_deserialize1(self):\n for command in Command:\n serialized = command.serialize()\n deserialized = Command.deserialize(serialized)\n self.assertTrue(deserialized is command)", "def convertAndAssertJSONEqual(self, data, expected_data, msg=None):\n\n super(SimpleTestCase, self).assertJSONEqual(json.dumps(data, cls=DjangoJSONEncoder), expected_data, msg)", "def test_repr(self, cls):\n inst = cls()\n # Exact values aren't a concern so long as neither direction\n # raises an exception.\n pkl = cloudpickle.dumps(inst)\n cloudpickle.loads(pkl)", "def test_can_deserialize_plain_object(self):\n handler = BaseRestHandler(mock.MagicMock(), mock.MagicMock())\n handler._write_buffer = []\n obj = SerializeMe()\n obj.key = \"value\"\n handler.write_object(obj)\n res = json.loads(handler._write_buffer[0])\n self.assertDictEqual(res, {\"key\": \"value\"})", "def jucify(self):\n to_ret = False\n # TODO: finish this\n return to_ret", "def can_be_pickled(x):\n try:\n s = BytesIO() \n pickle.dump(x, s) \n return True\n except:\n return False", "def test_serialization(self):\n serialized = self.Gs.as_dict()\n unserialized = BayesianNetwork.from_dict(serialized)\n\n self.assertDictEqual(serialized, unserialized.as_dict())", "def __getstate__(self):\n raise IOError(\"You tried to serialize something that should not\"\n \" be serialized.\")", "def is_picklable(obj):\n try:\n pickle.dumps(obj)\n\n except pickle.PicklingError:\n return False\n return True", "def test_time_filter_serialization(self):\n result = serializers.FilterSerializer(self.internal_filter).data\n self.assertDictEqual(result, self.external_filter)", "def serialize(self, data):", "def is_text_serializer(serializer):\n return isinstance(serializer.dumps({}), text_type)", "def test_return_value(self):\n self.assertEqual(self.r0.from_json_string(self.string), self.d)", "def test_quantitative_filter_deserialization(self):\n serializer = serializers.FilterSerializer(data=self.external_filter)\n self.assertTrue(serializer.is_valid())\n self.assertDictEqual(serializer.validated_data, self.internal_filter)", "def test_pickle_serialization(self, molecule):\n serialized = pickle.dumps(molecule)\n molecule_copy = pickle.loads(serialized)\n assert molecule == molecule_copy", "def serialize(self, data):\n if isinstance(data, str):\n return data\n\n if hasattr(data, \"read\"):\n return data.read()\n\n raise ValueError(\"Unable to handle input format: %s\" % type(data))", "def converted(self):\n if self.converters:\n return self.converters.converted(self)\n return False", "def _check_primitive(self) -> PossibleResult[T]:\n if self.constructor in _PRIMITIVES:\n if self.obj is UNDEFINED:\n raise DeserializeError(\n self.constructor, self.obj, self.new_depth, self.key\n )\n if self.obj is None:\n raise DeserializeError(\n self.constructor, self.obj, self.new_depth, self.key\n )\n if not isinstance(self.obj, self.constructor):\n if not self.convert_primitives:\n raise DeserializeError(\n self.constructor, self.obj, self.new_depth, self.key\n )\n try:\n return self.constructor(self.obj) # type: ignore\n except (ValueError, TypeError) as error:\n raise DeserializeError(\n self.constructor, self.obj, self.new_depth, self.key\n ) from error\n return self.obj\n return NO_RESULT", "def testRoundtrip(self):\n key = createKey()\n data = {u'user': u'aliafshar', u'id': u'91821212'}\n token = dataToToken(key, data)\n self.assertEqual(data, tokenToData(key, token))", "def test_roundtrip(self):\n self.read_container = self.roundtripContainer()\n self.assertIsNotNone(str(self.container)) # added as a test to make sure printing works\n self.assertIsNotNone(str(self.read_container))\n # make sure we get a completely new object\n self.assertNotEqual(id(self.container), id(self.read_container))\n self.assertIs(self.read_nwbfile.objects[self.container.object_id], self.read_container)\n self.assertContainerEqual(self.read_container, self.container)", "def is_serializable(instance_or_class: Any) -> bool:\n return hasattr(instance_or_class, SERDE_SCOPE)", "def _deserialiseLightweight(self,unpickled):\n if self.sampleid != unpickled['sampleId']:\n raise RuntimeError('sampleids do not match: '+self.sampleid+' '+unpickled['sampleId'])\n if self.condition != unpickled['condition']:\n raise RuntimeError('conditions do not match: '+self.condition+' '+unpickled['condition'])\n if self.wellids != unpickled['wellIds']:\n raise RuntimeError('wellids do not match: '+self.wellids+' '+unpickled['wellIds'])\n if self._wellIndices != unpickled['wellIndices']:\n raise RuntimeError('wellIndices do not match: '+self._wellIndices+' '+unpickled['wellIndices'])\n self._activeWellIndices=unpickled['activeWellIndices']", "def test_serialize(state):\n assert len(state.players) == 2\n st_data = state.to_data()\n\n assert st_data, \"Expect that we would have some data!\"\n assert len(st_data[\"deck\"]) == 52\n assert len(st_data[\"discarded\"]) == 0\n # Render player subset properly\n assert len(st_data[\"players\"]) == 2\n assert len(st_data[\"players\"][0][\"hand\"]) == 0\n\n new_state = MockState.from_data(st_data)\n assert new_state.__class__ == MockState\n st_data_new = new_state.to_data()\n\n assert st_data == st_data_new", "def test_serialize(self):\n self.assertEqual(self.blogs.serialize(), [\n {'content': [u'First para', u'Second para'], 'title': u'First article'},\n {'content': [u'Para 1', u'Para 2'], 'title': u'Second article'},\n {'content': [u'Thing one', u'Thing two'], 'title': u'Third article'}\n ])", "def test_pickle_serialization(self, molecule):\n serialized = pickle.dumps(molecule)\n molecule_copy = pickle.loads(serialized)\n assert molecule == molecule_copy\n assert molecule_copy.n_conformers == molecule.n_conformers\n assert np.allclose(molecule_copy.conformers[0], molecule.conformers[0])", "def _to_serialize(value):\n return value.serialize() if value is not None else None", "def serialize(self):", "def dump_payload(self, obj):\n return want_bytes(self.serializer.dumps(obj, **self.serializer_kwargs))", "def test_dict_serialization(self, molecule):\n serialized = molecule.to_dict()\n molecule_copy = Molecule.from_dict(serialized)\n assert molecule == molecule_copy", "def test_json_serialization(self, molecule):\n molecule_copy = Molecule.from_json(molecule.to_json())\n assert molecule_copy == molecule\n assert molecule_copy.n_conformers == molecule.n_conformers\n assert np.allclose(molecule_copy.conformers[0], molecule.conformers[0])", "def test_to_plain_python_obj_mixed(test_input):\n # It's enough that we don't get an exception here\n output = r.to_plain_python_obj(test_input)\n # We should not get a json conversion error\n json.dumps(output)", "def test_serialize_a_recommendation(self):\n recommendation = Recommendation(product_id=1, recommendation_product_id=2, relationship=Type.UP_SELL)\n data = recommendation.serialize()\n self.assertNotEqual(data, None)\n self.assertIn(\"product_id\", data)\n self.assertEqual(data[\"product_id\"], recommendation.product_id)\n self.assertIn(\"recommendation_product_id\", data)\n self.assertEqual(data[\"recommendation_product_id\"], recommendation.recommendation_product_id)\n self.assertIn(\"relationship\", data)\n self.assertEqual(data[\"relationship\"], recommendation.relationship.name)", "def test_generated_protocol_serialisation(self):\n # create a message\n reply_message = {1: \"number one\", 2: \"number two\", 7: \"number seven\"}\n # message 1\n message = TwoPartyNegotiationMessage(\n message_id=1,\n dialogue_reference=(str(0), \"\"),\n target=0,\n performative=TwoPartyNegotiationMessage.Performative.INFORM_REPLY,\n reply_message=reply_message,\n )\n\n # serialise the message\n encoded_message_in_bytes = TwoPartyNegotiationSerializer().encode(message)\n\n # deserialise the message\n decoded_message = TwoPartyNegotiationSerializer().decode(\n encoded_message_in_bytes\n )\n\n # Compare the original message with the serialised+deserialised message\n assert decoded_message.message_id == message.message_id\n assert decoded_message.dialogue_reference == message.dialogue_reference\n assert decoded_message.dialogue_reference[0] == message.dialogue_reference[0]\n assert decoded_message.dialogue_reference[1] == message.dialogue_reference[1]\n assert decoded_message.target == message.target\n assert decoded_message.performative == message.performative\n assert decoded_message.reply_message == message.reply_message", "def test_boolean_in_serializer() -> None:\n assert cv.custom_serializer(cv.boolean) == {\n \"type\": \"boolean\",\n }", "def pickling_check(instance):\n pkled_instance = pickle.loads(pickle.dumps(instance))\n equality_check(instance, pkled_instance)", "def json(self):\r\n try:\r\n import simplejson as json\r\n except ImportError:\r\n import json\r\n return Assert(json.loads(self.obj))", "def test_user_profile_serialization(self):\n\n # Construct a json representation of a UserProfile model\n user_profile_model_json = {}\n user_profile_model_json['id'] = 'testString'\n user_profile_model_json['iam_id'] = 'testString'\n user_profile_model_json['realm'] = 'testString'\n user_profile_model_json['user_id'] = 'testString'\n user_profile_model_json['firstname'] = 'testString'\n user_profile_model_json['lastname'] = 'testString'\n user_profile_model_json['state'] = 'testString'\n user_profile_model_json['email'] = 'testString'\n user_profile_model_json['phonenumber'] = 'testString'\n user_profile_model_json['altphonenumber'] = 'testString'\n user_profile_model_json['photo'] = 'testString'\n user_profile_model_json['account_id'] = 'testString'\n\n # Construct a model instance of UserProfile by calling from_dict on the json representation\n user_profile_model = UserProfile.from_dict(user_profile_model_json)\n assert user_profile_model != False\n\n # Construct a model instance of UserProfile by calling from_dict on the json representation\n user_profile_model_dict = UserProfile.from_dict(user_profile_model_json).__dict__\n user_profile_model2 = UserProfile(**user_profile_model_dict)\n\n # Verify the model instances are equivalent\n assert user_profile_model == user_profile_model2\n\n # Convert model instance back to dict and verify no loss of data\n user_profile_model_json2 = user_profile_model.to_dict()\n assert user_profile_model_json2 == user_profile_model_json", "def test_default_serialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = sy.serialize(obj, to_proto=True)\n\n assert sy.serialize(obj) == blob", "def _check_serialize(self, kwargs):\n for k in kwargs:\n if k in self.backend.TO_SERIALIZE:\n if isinstance(kwargs[k], dict):\n kwargs[k] = {j: self.backend.serialize(kwargs[k][j])\n for j in kwargs[k]}\n elif isinstance(kwargs[k], list):\n kwargs[k] = [self.backend.serialize(j)\n for j in kwargs[k]]\n else:\n raise TypeError('Your iterable should be a dict or a list')\n return kwargs", "def test_serialization_no_conformers(self):\n mol = Molecule.from_smiles(\"CCO\")\n\n dict_copy = Molecule.from_dict(mol.to_dict())\n assert mol == dict_copy\n\n # TODO: yaml_copy = Molecule.from_yaml(mol.to_yaml())\n with pytest.raises(NotImplementedError):\n mol.to_toml()\n\n bson_copy = Molecule.from_bson(mol.to_bson())\n assert mol == bson_copy\n\n json_copy = Molecule.from_json(mol.to_json())\n assert mol == json_copy\n\n # TODO: round-trip when from_xml is implemented\n mol_as_xml = mol.to_xml()\n with pytest.raises(NotImplementedError):\n Molecule.from_xml(mol_as_xml)\n\n messagepack_copy = Molecule.from_messagepack(mol.to_messagepack())\n assert mol == messagepack_copy\n\n pickle_copy = pickle.loads(pickle.dumps(mol))\n assert mol == pickle_copy", "def test_serialization_no_conformers(self):\n mol = Molecule.from_smiles(\"CCO\")\n\n dict_copy = Molecule.from_dict(mol.to_dict())\n assert mol == dict_copy\n\n # TODO: yaml_copy = Molecule.from_yaml(mol.to_yaml())\n with pytest.raises(NotImplementedError):\n mol.to_toml()\n\n bson_copy = Molecule.from_bson(mol.to_bson())\n assert mol == bson_copy\n\n json_copy = Molecule.from_json(mol.to_json())\n assert mol == json_copy\n\n # TODO: round-trip when from_xml is implemented\n mol_as_xml = mol.to_xml()\n with pytest.raises(NotImplementedError):\n Molecule.from_xml(mol_as_xml)\n\n messagepack_copy = Molecule.from_messagepack(mol.to_messagepack())\n assert mol == messagepack_copy\n\n pickle_copy = pickle.loads(pickle.dumps(mol))\n assert mol == pickle_copy", "def test_none(self):\n self.assertEqual(self.obj.to_json_string(None), '[]')", "def serialize(self, obj):\n return obj", "def objDictConsistency(self, cls, hard, dis=False):\n orig = cls(\"OK\")\n # the hard way\n if hard:\n p = self.dumpWithPreobjects(None, orig.__dict__, orig, dis=dis)\n d, obj = self.pickler.loads(p)[-1]\n else:\n p = self.dumpWithPreobjects(None, orig, orig.__dict__, dis=dis)\n obj, d = self.pickler.loads(p)[-1]\n self.assertTrue(type(obj)is type(orig))\n self.assertTrue(type(obj.__dict__) is type(orig.__dict__)) # @IgnorePep8\n self.assertEquals(set(obj.__dict__.keys()), set(orig.__dict__.keys()))\n self.assertTrue(obj.__dict__ is d)\n self.assertTrue(obj.isOk() is True)", "def backcast(self) -> bool:\n return self.__backcast", "def serialize(self):\n raise NotImplemented()", "def test_raw_data(self):\n self.assertEqual(self.tester.raw_data, 1)", "def serialize(self, data):\n return data", "def test_base_case_json(self):\n json_data = '{\"a\": 1}'\n json_flattened = json_flatten(json_data)\n self.assertEqual(json.loads(json_flattened), json.loads('{\"a\" : 1}'))", "def test_snp_json_roundtrip(self):\n given = self.ntwk1\n actual = rf.from_json_string(rf.to_json_string(given))\n self.assertEqual(actual, given)\n self.assertEqual(actual.frequency, given.frequency)\n self.assertEqual(actual.name, given.name)\n self.assertEqual(actual.comments, given.comments)\n self.assertEqual(actual.z0.tolist(), given.z0.tolist())\n self.assertEqual(actual.port_names, given.port_names)\n self.assertEqual(actual.variables, given.variables)", "def test_to_json_string(self):\n self.assertEqual(Base.to_json_string(None), \"[]\")\n self.assertTrue(type(Base.to_json_string(None)) is str)\n self.assertEqual(Base.to_json_string([]), \"[]\")\n self.assertTrue(type(Base.to_json_string([])) is str)\n myDict = {'id': 4, 'width': 3, 'height': 4, 'x': 1, 'y': 3}\n myDict2 = {'id': 3, 'width': 6, 'height': 2, 'x': 1, 'y': 9}\n jsonized = Base.to_json_string([myDict, myDict2])\n self.assertTrue(type(jsonized) is str)\n myDict3 = json.loads(jsonized)\n self.assertEqual(myDict3, [myDict, myDict2])", "def round_trip(message):\n return bits.bits_to_message(bits.message_to_bits(message)) == message" ]
[ "0.65497476", "0.64250165", "0.6318902", "0.6110213", "0.6092596", "0.60524327", "0.6047912", "0.60349566", "0.59723496", "0.59635586", "0.59397215", "0.59294575", "0.59294575", "0.59081197", "0.58982784", "0.58918864", "0.58906686", "0.5878805", "0.58623", "0.5853447", "0.5826075", "0.58153296", "0.5812717", "0.58071756", "0.5794471", "0.5785449", "0.5782361", "0.5774637", "0.57684493", "0.5760834", "0.57446843", "0.5724752", "0.5709062", "0.569147", "0.5687801", "0.56571716", "0.56280816", "0.5620996", "0.5619737", "0.56180525", "0.56041485", "0.5598943", "0.5598287", "0.5595587", "0.55944103", "0.55884326", "0.55857265", "0.5569962", "0.55649996", "0.5556069", "0.5549877", "0.5548616", "0.5548238", "0.55424994", "0.55392677", "0.5510641", "0.5503435", "0.5500031", "0.54999846", "0.54997605", "0.5494271", "0.5493163", "0.5482728", "0.54794973", "0.54633415", "0.54618675", "0.5460477", "0.5458001", "0.54513127", "0.544926", "0.54448813", "0.5435727", "0.5431473", "0.54137164", "0.5409626", "0.5409365", "0.54088485", "0.54026324", "0.5399572", "0.539559", "0.5390935", "0.53819275", "0.5372337", "0.53666735", "0.5365913", "0.53636456", "0.53634423", "0.5358752", "0.5352307", "0.5352307", "0.5345392", "0.5338859", "0.5332617", "0.5327083", "0.5325902", "0.53254426", "0.5323825", "0.53235537", "0.53190833", "0.53104806", "0.53097904" ]
0.0
-1
Roundtrip to check what we serialise is what we get back.
def test_serialises_and_deserialises_hs00_message_correctly_for_int_array_data( self, ): original_hist = { "source": "some_source", "timestamp": 123456, "current_shape": [5], "dim_metadata": [ { "length": 5, "unit": "m", "label": "some_label", "bin_boundaries": np.array([0, 1, 2, 3, 4, 5]), } ], "last_metadata_timestamp": 123456, "data": np.array([1, 2, 3, 4, 5]), "errors": np.array([5, 4, 3, 2, 1]), "info": "info_string", } buf = serialise_hs00(original_hist) hist = deserialise_hs00(buf) assert hist["source"] == original_hist["source"] assert hist["timestamp"] == original_hist["timestamp"] assert hist["current_shape"] == original_hist["current_shape"] self._check_metadata_for_one_dimension( hist["dim_metadata"][0], original_hist["dim_metadata"][0] ) assert np.array_equal(hist["data"], original_hist["data"]) assert np.array_equal(hist["errors"], original_hist["errors"]) assert hist["info"] == original_hist["info"] assert ( hist["last_metadata_timestamp"] == original_hist["last_metadata_timestamp"] )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_dumps(self):\n result = self.mapper.dumps(self.serialization)\n self.mapper.to_dict.assert_called_once_with(\"custom\")\n self.serialization.assert_called_once_with(\n self.mapper.to_dict.return_value\n )\n self.assertIs(result, self.serialization.return_value)", "def _serialise(self):\n # TODO (M Foley)\n pass", "def test_return_type(self):\n self.assertEqual(type(self.obj.to_json_string(self.valid_ld)), str)", "def test_dump_single(self):\n result = self.serializer.dump(self.schema_to_serialize)\n self.assertIsInstance(result, dict)", "def revert(self):\n if hasattr(self, '_init_data'):\n self.deserialize(self._init_data)\n return True\n return False", "def test_serialize(self):\n self.assert_raises(TypeError, self.instance.serialize, (1,))", "def test_serialize_no_metadata(self):\n pass # pragma: no cover", "def isJWS_unserialized_single(x):\n if isinstance(x, dict) \\\n and \"payload\" in x and \"signature\" in x \\\n and (\"protected\" in x or \"unprotected\" in x):\n try: \n if \"protected\" in x: \n json.loads(x[\"protected\"])\n return True\n except:\n return False\n else: \n return False", "def roundtrip(data):\r\n body = xmlrpclib.dumps(data)\r\n result = xmlrpclib.loads(body)[0]\r\n if result != data:\r\n print result", "def test_serialize(value, expected):\n assert json_dumps(value) == expected", "def assertDeserializeNonString(self):\r\n self.assertDeserializeEqual(None, None)\r\n self.assertDeserializeEqual(3.14, 3.14)\r\n self.assertDeserializeEqual(True, True)\r\n self.assertDeserializeEqual([10], [10])\r\n self.assertDeserializeEqual({}, {})\r\n self.assertDeserializeEqual([], [])\r\n self.assertDeserializeEqual(None, 'null')", "def test_serialize(self):\n self.assertEqual(self.scrapes.serialize(), [\n {'content': [u'First para', u'Second para'], 'title': u'First article', 'authors': [{'lastname': u'Swain', 'firstname': u'Matt'}]},\n {'content': [u'Para 1', u'Para 2'], 'title': u'Second article', 'authors': [{'lastname': u'Smith', 'firstname': u'John'}]},\n {'content': [u'Thing one', u'Thing two'], 'title': u'Third article', 'authors': [{'lastname': u'Bond', 'firstname': u'James'}]}\n ])", "def test_serialize(self):\n self.assertEqual(self.scrapes.serialize(), [\n {'content': [u'First para', u'Second para'], 'title': u'First article', 'authors': [{'lastname': u'Swain', 'firstname': u'Matt'}]},\n {'content': [u'Para 1', u'Para 2'], 'title': u'Second article', 'authors': [{'lastname': u'Smith', 'firstname': u'John'}]},\n {'content': [u'Thing one', u'Thing two'], 'title': u'Third article', 'authors': [{'lastname': u'Bond', 'firstname': u'James'}]}\n ])", "def test_serialize(self):\n r = self.RNA(\"ugagg\")\n assert dumps(r)", "def is_sedes(obj):\n return hasattr(obj, 'serialize') and hasattr(obj, 'deserialize')", "def isJWP_unserialized(x):\n if isinstance(x, dict) \\\n and \"payload\" in x and isinstance(x[\"unprotected\"], dict)\\\n and \"unprotected\" in x and \"signature\" not in x:\n return True\n else:\n return False", "def isJWE_unserialized(x):\n return isJWE_unserialized_single(x) or isJWE_unserialized_multi(x)", "def test_serialize_sinfo(self):\n self.assert_raises(RuntimeError, self.instance.serialize,\n self.testing_options['objects'][0],\n add_serializer_info=True)", "def isJWS_unserialized(x):\n return isJWS_unserialized_single(x) or isJWS_unserialized_multi(x)", "def test_dumps(self):\n data = \"something\"\n result = self.mapper.loads(self.deser_fn, data)\n self.mapper.from_dict.assert_called_once_with(\n self.deser_fn.return_value, \"custom\"\n )\n self.deser_fn.assert_called_once_with(data)\n self.assertIs(result, self.mapper.from_dict.return_value)", "def is_stringified(self) -> bool:\n return self._stringify", "def isJWE_unserialized_single(x):\n if isinstance(x, dict) \\\n and (\"unprotected\" in x or \"protected\" in x) \\\n and (\"ciphertext\" in x):\n try:\n if \"protected\" in x:\n json.loads(x[\"protected\"])\n return True\n except:\n return False\n else:\n return False", "def test_encode(self):\n result = json.loads(self.cls.objects.to_json())\n for index, item in enumerate(result):\n self.assertNotIn(\n \"to_json_exclude\", item,\n (\"to_json_exclude found at index {}\").format(index)\n )\n self.assertNotIn(\n \"json_exclude\", item,\n (\"json_exclude found at index {}\").format(index)\n )\n self.assertIn(\n \"from_json_exclude\", item,\n (\"from_json_exclude not found at index {}\").format(index)\n )\n self.assertIn(\n \"required\", item,\n (\"required not found at index {}\").format(index)\n )", "def _post_deserialize (self):\n pass", "def test_serialize_none(self):\n self.assertEqual(serialize(None), 'null')", "def isJOSE_unserialized(x):\n return isJWS_unserialized(x) or isJWE_unserialized(x) \\\n or isJWP_unserialized(x)", "def test_08(self):\n ret = Base.to_json_string(None)\n self.assertEqual(ret, \"[]\")", "def assertSerializeDeserialize(self, obj, version=None):\n tested_versions = [version] if version is not None else Version.supported\n for version in tested_versions:\n constructor = obj.__class__.from_json\n json_obj = obj.to_json(version)\n clone = constructor(json_obj)\n\n self.assertEqual(obj.__class__, clone.__class__)\n\n if isinstance(obj, dict):\n orig_dict = obj\n clone_dict = clone\n else:\n orig_dict = obj.__dict__\n clone_dict = clone.__dict__\n\n self.assertEqual(orig_dict, clone_dict)", "def test_quantitative_filter_serialization(self):\n result = serializers.FilterSerializer(self.internal_filter).data\n self.assertDictEqual(result, self.external_filter)", "def is_jsonable(self, input):\n try:\n json.dumps(input)\n return True\n except (TypeError, OverflowError):\n return False", "def test_encode_decode(self):\n assert self._test == pybinn.loads(pybinn.dumps(self._test))", "def test_valid_ld(self):\n self.assertEqual(self.obj.to_json_string(self.valid_ld),\n json.dumps(self.valid_ld))", "def assertDeserializeEqual(self, expected, arg):\r\n assert_equals(expected, deserialize_field(self.test_field(), arg))", "def test_dumps(self):\n schema = self.UnitTestingSchema()\n serializer = JSONSchema()\n self.assertIsInstance(serializer.dumps(schema), str)", "def test_serialization_deserialization(self):\n\n original_time = now()\n serialized_time = DatetimeMapper.forward(original_time)\n assert serialized_time == original_time.strftime(\"%Y-%m-%dT%H:%M:%S.%f\")\n\n deserialized_time = DatetimeMapper.backward(serialized_time)\n assert original_time == deserialized_time\n\n deserialized_time = DatetimeMapper.backward(None)\n assert isinstance(deserialized_time, datetime)", "def _TestReadSerialized(self, serializer_object, json_dict):\n # We use json.dumps to make sure the dict does not serialize into\n # an invalid JSON string such as one that contains Python string prefixes\n # like b'' or u''.\n json_string = json.dumps(json_dict)\n unserialized_object = serializer_object.ReadSerialized(json_string)\n\n self.assertIsNotNone(unserialized_object)\n return unserialized_object", "def test_non_list_of_dicts_arg(self):\n self.assertEqual(self.obj.to_json_string(666), '666')", "def test_deserialize(self):\r\n\r\n # test that from_json produces no exceptions\r\n self.assertDeserializeEqual('10:20:30', '\"10:20:30\"')", "def check_round_trip(data: dict, logger: Logger) -> Optional[bytes]:\n try:\n as_json_text = json.dumps(data, default=encode_value).encode(\"utf-8\")\n except Exception as e:\n report_error(\"CumulusCI found an unusual datatype in your config:\", e, logger)\n return None\n try:\n test_load = load_config_from_json_or_pickle(as_json_text)\n assert _simplify_config(test_load) == _simplify_config(\n data\n ), f\"JSON did not round-trip-cleanly {test_load}, {data}\"\n except Exception as e: # pragma: no cover\n report_error(\"CumulusCI found a problem saving your config:\", e, logger)\n return None\n assert isinstance(as_json_text, bytes)\n return as_json_text", "def is_raw(self):\n return not self.has_structure", "def parse_round_trip(self):\n parsed = self.test_proto.parse()\n round_trip = avro.protocol.parse(str(parsed))\n self.assertEqual(parsed, round_trip)", "def test_serialize_and_deserialize_returns_unchanged_collection(\n self\n ) -> None:\n self.assertEqual(\n self.collection.to_dict(),\n collection_domain.Collection.deserialize(\n self.collection.serialize()).to_dict())", "def test_serialize(self, val):\n val_orig = FitVal(*val)\n\n ser = json.dumps(val_orig, cls=ExperimentEncoder)\n val_deser = json.loads(ser, cls=ExperimentDecoder)\n\n self.assertEqual(val_orig, val_deser)", "def recvcheck(self):\n\n data = self.recv()\n if self.serializer == 'json' and data and isinstance(data, dict):\n if '@error' in data:\n exception = getattr(builtins, data['@error'])\n if (isinstance(exception, type) and\n issubclass(exception, Exception)):\n raise exception(data['@message'])\n else:\n if isinstance(data, Exception):\n raise data\n return data", "def test_encoding_round_trip(cell):\n orig = copy.copy(cell.__dict__)\n cell._from_serializeable_dict(cell._to_serializeable_dict())\n round_trip = cell.__dict__\n for key in cell._allowed:\n if type(orig[key]) == np.ndarray or type(orig[key]) == list:\n assert all(orig[key] == round_trip[key])\n else:\n assert orig[key] == round_trip[key]", "def test_json(self):\n\t\tdecoded = json.loads(json.dumps(self.filter.to_js_obj()))\n\t\tself.assertIsNotNone(decoded, \"JSON conversion failed!\")\n\t\tself.assertEqual(self.filter.to_js_obj(), decoded, \"JSON conversion mismatch!\")", "def test_to_json(self):\n self.amenity_json = self.amenity.to_json()\n actual = 1\n try:\n serialized = json.dumps(self.amenity_json)\n except:\n actual = 0\n self.assertTrue(1 == actual)", "def serialize(self):\n pass", "def serialize(self, obj):\n pass", "def test_serialize_deserialize1(self):\n for command in Command:\n serialized = command.serialize()\n deserialized = Command.deserialize(serialized)\n self.assertTrue(deserialized is command)", "def convertAndAssertJSONEqual(self, data, expected_data, msg=None):\n\n super(SimpleTestCase, self).assertJSONEqual(json.dumps(data, cls=DjangoJSONEncoder), expected_data, msg)", "def test_repr(self, cls):\n inst = cls()\n # Exact values aren't a concern so long as neither direction\n # raises an exception.\n pkl = cloudpickle.dumps(inst)\n cloudpickle.loads(pkl)", "def test_can_deserialize_plain_object(self):\n handler = BaseRestHandler(mock.MagicMock(), mock.MagicMock())\n handler._write_buffer = []\n obj = SerializeMe()\n obj.key = \"value\"\n handler.write_object(obj)\n res = json.loads(handler._write_buffer[0])\n self.assertDictEqual(res, {\"key\": \"value\"})", "def jucify(self):\n to_ret = False\n # TODO: finish this\n return to_ret", "def can_be_pickled(x):\n try:\n s = BytesIO() \n pickle.dump(x, s) \n return True\n except:\n return False", "def test_serialization(self):\n serialized = self.Gs.as_dict()\n unserialized = BayesianNetwork.from_dict(serialized)\n\n self.assertDictEqual(serialized, unserialized.as_dict())", "def __getstate__(self):\n raise IOError(\"You tried to serialize something that should not\"\n \" be serialized.\")", "def is_picklable(obj):\n try:\n pickle.dumps(obj)\n\n except pickle.PicklingError:\n return False\n return True", "def test_time_filter_serialization(self):\n result = serializers.FilterSerializer(self.internal_filter).data\n self.assertDictEqual(result, self.external_filter)", "def serialize(self, data):", "def is_text_serializer(serializer):\n return isinstance(serializer.dumps({}), text_type)", "def test_return_value(self):\n self.assertEqual(self.r0.from_json_string(self.string), self.d)", "def test_quantitative_filter_deserialization(self):\n serializer = serializers.FilterSerializer(data=self.external_filter)\n self.assertTrue(serializer.is_valid())\n self.assertDictEqual(serializer.validated_data, self.internal_filter)", "def test_pickle_serialization(self, molecule):\n serialized = pickle.dumps(molecule)\n molecule_copy = pickle.loads(serialized)\n assert molecule == molecule_copy", "def serialize(self, data):\n if isinstance(data, str):\n return data\n\n if hasattr(data, \"read\"):\n return data.read()\n\n raise ValueError(\"Unable to handle input format: %s\" % type(data))", "def converted(self):\n if self.converters:\n return self.converters.converted(self)\n return False", "def _check_primitive(self) -> PossibleResult[T]:\n if self.constructor in _PRIMITIVES:\n if self.obj is UNDEFINED:\n raise DeserializeError(\n self.constructor, self.obj, self.new_depth, self.key\n )\n if self.obj is None:\n raise DeserializeError(\n self.constructor, self.obj, self.new_depth, self.key\n )\n if not isinstance(self.obj, self.constructor):\n if not self.convert_primitives:\n raise DeserializeError(\n self.constructor, self.obj, self.new_depth, self.key\n )\n try:\n return self.constructor(self.obj) # type: ignore\n except (ValueError, TypeError) as error:\n raise DeserializeError(\n self.constructor, self.obj, self.new_depth, self.key\n ) from error\n return self.obj\n return NO_RESULT", "def testRoundtrip(self):\n key = createKey()\n data = {u'user': u'aliafshar', u'id': u'91821212'}\n token = dataToToken(key, data)\n self.assertEqual(data, tokenToData(key, token))", "def test_roundtrip(self):\n self.read_container = self.roundtripContainer()\n self.assertIsNotNone(str(self.container)) # added as a test to make sure printing works\n self.assertIsNotNone(str(self.read_container))\n # make sure we get a completely new object\n self.assertNotEqual(id(self.container), id(self.read_container))\n self.assertIs(self.read_nwbfile.objects[self.container.object_id], self.read_container)\n self.assertContainerEqual(self.read_container, self.container)", "def is_serializable(instance_or_class: Any) -> bool:\n return hasattr(instance_or_class, SERDE_SCOPE)", "def _deserialiseLightweight(self,unpickled):\n if self.sampleid != unpickled['sampleId']:\n raise RuntimeError('sampleids do not match: '+self.sampleid+' '+unpickled['sampleId'])\n if self.condition != unpickled['condition']:\n raise RuntimeError('conditions do not match: '+self.condition+' '+unpickled['condition'])\n if self.wellids != unpickled['wellIds']:\n raise RuntimeError('wellids do not match: '+self.wellids+' '+unpickled['wellIds'])\n if self._wellIndices != unpickled['wellIndices']:\n raise RuntimeError('wellIndices do not match: '+self._wellIndices+' '+unpickled['wellIndices'])\n self._activeWellIndices=unpickled['activeWellIndices']", "def test_serialize(state):\n assert len(state.players) == 2\n st_data = state.to_data()\n\n assert st_data, \"Expect that we would have some data!\"\n assert len(st_data[\"deck\"]) == 52\n assert len(st_data[\"discarded\"]) == 0\n # Render player subset properly\n assert len(st_data[\"players\"]) == 2\n assert len(st_data[\"players\"][0][\"hand\"]) == 0\n\n new_state = MockState.from_data(st_data)\n assert new_state.__class__ == MockState\n st_data_new = new_state.to_data()\n\n assert st_data == st_data_new", "def test_serialize(self):\n self.assertEqual(self.blogs.serialize(), [\n {'content': [u'First para', u'Second para'], 'title': u'First article'},\n {'content': [u'Para 1', u'Para 2'], 'title': u'Second article'},\n {'content': [u'Thing one', u'Thing two'], 'title': u'Third article'}\n ])", "def test_pickle_serialization(self, molecule):\n serialized = pickle.dumps(molecule)\n molecule_copy = pickle.loads(serialized)\n assert molecule == molecule_copy\n assert molecule_copy.n_conformers == molecule.n_conformers\n assert np.allclose(molecule_copy.conformers[0], molecule.conformers[0])", "def _to_serialize(value):\n return value.serialize() if value is not None else None", "def serialize(self):", "def dump_payload(self, obj):\n return want_bytes(self.serializer.dumps(obj, **self.serializer_kwargs))", "def test_dict_serialization(self, molecule):\n serialized = molecule.to_dict()\n molecule_copy = Molecule.from_dict(serialized)\n assert molecule == molecule_copy", "def test_json_serialization(self, molecule):\n molecule_copy = Molecule.from_json(molecule.to_json())\n assert molecule_copy == molecule\n assert molecule_copy.n_conformers == molecule.n_conformers\n assert np.allclose(molecule_copy.conformers[0], molecule.conformers[0])", "def test_to_plain_python_obj_mixed(test_input):\n # It's enough that we don't get an exception here\n output = r.to_plain_python_obj(test_input)\n # We should not get a json conversion error\n json.dumps(output)", "def test_serialize_a_recommendation(self):\n recommendation = Recommendation(product_id=1, recommendation_product_id=2, relationship=Type.UP_SELL)\n data = recommendation.serialize()\n self.assertNotEqual(data, None)\n self.assertIn(\"product_id\", data)\n self.assertEqual(data[\"product_id\"], recommendation.product_id)\n self.assertIn(\"recommendation_product_id\", data)\n self.assertEqual(data[\"recommendation_product_id\"], recommendation.recommendation_product_id)\n self.assertIn(\"relationship\", data)\n self.assertEqual(data[\"relationship\"], recommendation.relationship.name)", "def test_generated_protocol_serialisation(self):\n # create a message\n reply_message = {1: \"number one\", 2: \"number two\", 7: \"number seven\"}\n # message 1\n message = TwoPartyNegotiationMessage(\n message_id=1,\n dialogue_reference=(str(0), \"\"),\n target=0,\n performative=TwoPartyNegotiationMessage.Performative.INFORM_REPLY,\n reply_message=reply_message,\n )\n\n # serialise the message\n encoded_message_in_bytes = TwoPartyNegotiationSerializer().encode(message)\n\n # deserialise the message\n decoded_message = TwoPartyNegotiationSerializer().decode(\n encoded_message_in_bytes\n )\n\n # Compare the original message with the serialised+deserialised message\n assert decoded_message.message_id == message.message_id\n assert decoded_message.dialogue_reference == message.dialogue_reference\n assert decoded_message.dialogue_reference[0] == message.dialogue_reference[0]\n assert decoded_message.dialogue_reference[1] == message.dialogue_reference[1]\n assert decoded_message.target == message.target\n assert decoded_message.performative == message.performative\n assert decoded_message.reply_message == message.reply_message", "def test_boolean_in_serializer() -> None:\n assert cv.custom_serializer(cv.boolean) == {\n \"type\": \"boolean\",\n }", "def pickling_check(instance):\n pkled_instance = pickle.loads(pickle.dumps(instance))\n equality_check(instance, pkled_instance)", "def json(self):\r\n try:\r\n import simplejson as json\r\n except ImportError:\r\n import json\r\n return Assert(json.loads(self.obj))", "def test_user_profile_serialization(self):\n\n # Construct a json representation of a UserProfile model\n user_profile_model_json = {}\n user_profile_model_json['id'] = 'testString'\n user_profile_model_json['iam_id'] = 'testString'\n user_profile_model_json['realm'] = 'testString'\n user_profile_model_json['user_id'] = 'testString'\n user_profile_model_json['firstname'] = 'testString'\n user_profile_model_json['lastname'] = 'testString'\n user_profile_model_json['state'] = 'testString'\n user_profile_model_json['email'] = 'testString'\n user_profile_model_json['phonenumber'] = 'testString'\n user_profile_model_json['altphonenumber'] = 'testString'\n user_profile_model_json['photo'] = 'testString'\n user_profile_model_json['account_id'] = 'testString'\n\n # Construct a model instance of UserProfile by calling from_dict on the json representation\n user_profile_model = UserProfile.from_dict(user_profile_model_json)\n assert user_profile_model != False\n\n # Construct a model instance of UserProfile by calling from_dict on the json representation\n user_profile_model_dict = UserProfile.from_dict(user_profile_model_json).__dict__\n user_profile_model2 = UserProfile(**user_profile_model_dict)\n\n # Verify the model instances are equivalent\n assert user_profile_model == user_profile_model2\n\n # Convert model instance back to dict and verify no loss of data\n user_profile_model_json2 = user_profile_model.to_dict()\n assert user_profile_model_json2 == user_profile_model_json", "def test_default_serialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = sy.serialize(obj, to_proto=True)\n\n assert sy.serialize(obj) == blob", "def _check_serialize(self, kwargs):\n for k in kwargs:\n if k in self.backend.TO_SERIALIZE:\n if isinstance(kwargs[k], dict):\n kwargs[k] = {j: self.backend.serialize(kwargs[k][j])\n for j in kwargs[k]}\n elif isinstance(kwargs[k], list):\n kwargs[k] = [self.backend.serialize(j)\n for j in kwargs[k]]\n else:\n raise TypeError('Your iterable should be a dict or a list')\n return kwargs", "def test_serialization_no_conformers(self):\n mol = Molecule.from_smiles(\"CCO\")\n\n dict_copy = Molecule.from_dict(mol.to_dict())\n assert mol == dict_copy\n\n # TODO: yaml_copy = Molecule.from_yaml(mol.to_yaml())\n with pytest.raises(NotImplementedError):\n mol.to_toml()\n\n bson_copy = Molecule.from_bson(mol.to_bson())\n assert mol == bson_copy\n\n json_copy = Molecule.from_json(mol.to_json())\n assert mol == json_copy\n\n # TODO: round-trip when from_xml is implemented\n mol_as_xml = mol.to_xml()\n with pytest.raises(NotImplementedError):\n Molecule.from_xml(mol_as_xml)\n\n messagepack_copy = Molecule.from_messagepack(mol.to_messagepack())\n assert mol == messagepack_copy\n\n pickle_copy = pickle.loads(pickle.dumps(mol))\n assert mol == pickle_copy", "def test_serialization_no_conformers(self):\n mol = Molecule.from_smiles(\"CCO\")\n\n dict_copy = Molecule.from_dict(mol.to_dict())\n assert mol == dict_copy\n\n # TODO: yaml_copy = Molecule.from_yaml(mol.to_yaml())\n with pytest.raises(NotImplementedError):\n mol.to_toml()\n\n bson_copy = Molecule.from_bson(mol.to_bson())\n assert mol == bson_copy\n\n json_copy = Molecule.from_json(mol.to_json())\n assert mol == json_copy\n\n # TODO: round-trip when from_xml is implemented\n mol_as_xml = mol.to_xml()\n with pytest.raises(NotImplementedError):\n Molecule.from_xml(mol_as_xml)\n\n messagepack_copy = Molecule.from_messagepack(mol.to_messagepack())\n assert mol == messagepack_copy\n\n pickle_copy = pickle.loads(pickle.dumps(mol))\n assert mol == pickle_copy", "def test_none(self):\n self.assertEqual(self.obj.to_json_string(None), '[]')", "def serialize(self, obj):\n return obj", "def objDictConsistency(self, cls, hard, dis=False):\n orig = cls(\"OK\")\n # the hard way\n if hard:\n p = self.dumpWithPreobjects(None, orig.__dict__, orig, dis=dis)\n d, obj = self.pickler.loads(p)[-1]\n else:\n p = self.dumpWithPreobjects(None, orig, orig.__dict__, dis=dis)\n obj, d = self.pickler.loads(p)[-1]\n self.assertTrue(type(obj)is type(orig))\n self.assertTrue(type(obj.__dict__) is type(orig.__dict__)) # @IgnorePep8\n self.assertEquals(set(obj.__dict__.keys()), set(orig.__dict__.keys()))\n self.assertTrue(obj.__dict__ is d)\n self.assertTrue(obj.isOk() is True)", "def backcast(self) -> bool:\n return self.__backcast", "def serialize(self):\n raise NotImplemented()", "def test_raw_data(self):\n self.assertEqual(self.tester.raw_data, 1)", "def serialize(self, data):\n return data", "def test_base_case_json(self):\n json_data = '{\"a\": 1}'\n json_flattened = json_flatten(json_data)\n self.assertEqual(json.loads(json_flattened), json.loads('{\"a\" : 1}'))", "def test_snp_json_roundtrip(self):\n given = self.ntwk1\n actual = rf.from_json_string(rf.to_json_string(given))\n self.assertEqual(actual, given)\n self.assertEqual(actual.frequency, given.frequency)\n self.assertEqual(actual.name, given.name)\n self.assertEqual(actual.comments, given.comments)\n self.assertEqual(actual.z0.tolist(), given.z0.tolist())\n self.assertEqual(actual.port_names, given.port_names)\n self.assertEqual(actual.variables, given.variables)", "def test_to_json_string(self):\n self.assertEqual(Base.to_json_string(None), \"[]\")\n self.assertTrue(type(Base.to_json_string(None)) is str)\n self.assertEqual(Base.to_json_string([]), \"[]\")\n self.assertTrue(type(Base.to_json_string([])) is str)\n myDict = {'id': 4, 'width': 3, 'height': 4, 'x': 1, 'y': 3}\n myDict2 = {'id': 3, 'width': 6, 'height': 2, 'x': 1, 'y': 9}\n jsonized = Base.to_json_string([myDict, myDict2])\n self.assertTrue(type(jsonized) is str)\n myDict3 = json.loads(jsonized)\n self.assertEqual(myDict3, [myDict, myDict2])", "def round_trip(message):\n return bits.bits_to_message(bits.message_to_bits(message)) == message" ]
[ "0.65497476", "0.64250165", "0.6318902", "0.6110213", "0.6092596", "0.60524327", "0.6047912", "0.60349566", "0.59723496", "0.59635586", "0.59397215", "0.59294575", "0.59294575", "0.59081197", "0.58982784", "0.58918864", "0.58906686", "0.5878805", "0.58623", "0.5853447", "0.5826075", "0.58153296", "0.5812717", "0.58071756", "0.5794471", "0.5785449", "0.5782361", "0.5774637", "0.57684493", "0.5760834", "0.57446843", "0.5724752", "0.5709062", "0.569147", "0.5687801", "0.56571716", "0.56280816", "0.5620996", "0.5619737", "0.56180525", "0.56041485", "0.5598943", "0.5598287", "0.5595587", "0.55944103", "0.55884326", "0.55857265", "0.5569962", "0.55649996", "0.5556069", "0.5549877", "0.5548616", "0.5548238", "0.55424994", "0.55392677", "0.5510641", "0.5503435", "0.5500031", "0.54999846", "0.54997605", "0.5494271", "0.5493163", "0.5482728", "0.54794973", "0.54633415", "0.54618675", "0.5460477", "0.5458001", "0.54513127", "0.544926", "0.54448813", "0.5435727", "0.5431473", "0.54137164", "0.5409626", "0.5409365", "0.54088485", "0.54026324", "0.5399572", "0.539559", "0.5390935", "0.53819275", "0.5372337", "0.53666735", "0.5365913", "0.53636456", "0.53634423", "0.5358752", "0.5352307", "0.5352307", "0.5345392", "0.5338859", "0.5332617", "0.5327083", "0.5325902", "0.53254426", "0.5323825", "0.53235537", "0.53190833", "0.53104806", "0.53097904" ]
0.0
-1
Roundtrip to check what we serialise is what we get back.
def test_serialises_and_deserialises_hs00_message_correctly_when_float_input_is_not_ndarray( self, ): original_hist = { "source": "some_source", "timestamp": 123456, "current_shape": [2, 5], "dim_metadata": [ { "length": 2, "unit": "b", "label": "y", "bin_boundaries": [10.0, 11.0, 12.0], }, { "length": 5, "unit": "m", "label": "x", "bin_boundaries": [0.0, 1.0, 2.0, 3.0, 4.0, 5.0], }, ], "last_metadata_timestamp": 123456, "data": [[1.0, 2.0, 3.0, 4.0, 5.0], [6.0, 7.0, 8.0, 9.0, 10.0]], "errors": [[5.0, 4.0, 3.0, 2.0, 1.0], [10.0, 9.0, 8.0, 7.0, 6.0]], "info": "info_string", } buf = serialise_hs00(original_hist) hist = deserialise_hs00(buf) assert hist["source"] == original_hist["source"] assert hist["timestamp"] == original_hist["timestamp"] assert hist["current_shape"] == original_hist["current_shape"] self._check_metadata_for_one_dimension( hist["dim_metadata"][0], original_hist["dim_metadata"][0] ) self._check_metadata_for_one_dimension( hist["dim_metadata"][1], original_hist["dim_metadata"][1] ) assert np.array_equal(hist["data"], original_hist["data"]) assert np.array_equal(hist["errors"], original_hist["errors"]) assert hist["info"] == original_hist["info"] assert ( hist["last_metadata_timestamp"] == original_hist["last_metadata_timestamp"] )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_dumps(self):\n result = self.mapper.dumps(self.serialization)\n self.mapper.to_dict.assert_called_once_with(\"custom\")\n self.serialization.assert_called_once_with(\n self.mapper.to_dict.return_value\n )\n self.assertIs(result, self.serialization.return_value)", "def _serialise(self):\n # TODO (M Foley)\n pass", "def test_return_type(self):\n self.assertEqual(type(self.obj.to_json_string(self.valid_ld)), str)", "def test_dump_single(self):\n result = self.serializer.dump(self.schema_to_serialize)\n self.assertIsInstance(result, dict)", "def revert(self):\n if hasattr(self, '_init_data'):\n self.deserialize(self._init_data)\n return True\n return False", "def test_serialize(self):\n self.assert_raises(TypeError, self.instance.serialize, (1,))", "def test_serialize_no_metadata(self):\n pass # pragma: no cover", "def isJWS_unserialized_single(x):\n if isinstance(x, dict) \\\n and \"payload\" in x and \"signature\" in x \\\n and (\"protected\" in x or \"unprotected\" in x):\n try: \n if \"protected\" in x: \n json.loads(x[\"protected\"])\n return True\n except:\n return False\n else: \n return False", "def roundtrip(data):\r\n body = xmlrpclib.dumps(data)\r\n result = xmlrpclib.loads(body)[0]\r\n if result != data:\r\n print result", "def test_serialize(value, expected):\n assert json_dumps(value) == expected", "def assertDeserializeNonString(self):\r\n self.assertDeserializeEqual(None, None)\r\n self.assertDeserializeEqual(3.14, 3.14)\r\n self.assertDeserializeEqual(True, True)\r\n self.assertDeserializeEqual([10], [10])\r\n self.assertDeserializeEqual({}, {})\r\n self.assertDeserializeEqual([], [])\r\n self.assertDeserializeEqual(None, 'null')", "def test_serialize(self):\n self.assertEqual(self.scrapes.serialize(), [\n {'content': [u'First para', u'Second para'], 'title': u'First article', 'authors': [{'lastname': u'Swain', 'firstname': u'Matt'}]},\n {'content': [u'Para 1', u'Para 2'], 'title': u'Second article', 'authors': [{'lastname': u'Smith', 'firstname': u'John'}]},\n {'content': [u'Thing one', u'Thing two'], 'title': u'Third article', 'authors': [{'lastname': u'Bond', 'firstname': u'James'}]}\n ])", "def test_serialize(self):\n self.assertEqual(self.scrapes.serialize(), [\n {'content': [u'First para', u'Second para'], 'title': u'First article', 'authors': [{'lastname': u'Swain', 'firstname': u'Matt'}]},\n {'content': [u'Para 1', u'Para 2'], 'title': u'Second article', 'authors': [{'lastname': u'Smith', 'firstname': u'John'}]},\n {'content': [u'Thing one', u'Thing two'], 'title': u'Third article', 'authors': [{'lastname': u'Bond', 'firstname': u'James'}]}\n ])", "def test_serialize(self):\n r = self.RNA(\"ugagg\")\n assert dumps(r)", "def is_sedes(obj):\n return hasattr(obj, 'serialize') and hasattr(obj, 'deserialize')", "def isJWP_unserialized(x):\n if isinstance(x, dict) \\\n and \"payload\" in x and isinstance(x[\"unprotected\"], dict)\\\n and \"unprotected\" in x and \"signature\" not in x:\n return True\n else:\n return False", "def isJWE_unserialized(x):\n return isJWE_unserialized_single(x) or isJWE_unserialized_multi(x)", "def test_serialize_sinfo(self):\n self.assert_raises(RuntimeError, self.instance.serialize,\n self.testing_options['objects'][0],\n add_serializer_info=True)", "def isJWS_unserialized(x):\n return isJWS_unserialized_single(x) or isJWS_unserialized_multi(x)", "def test_dumps(self):\n data = \"something\"\n result = self.mapper.loads(self.deser_fn, data)\n self.mapper.from_dict.assert_called_once_with(\n self.deser_fn.return_value, \"custom\"\n )\n self.deser_fn.assert_called_once_with(data)\n self.assertIs(result, self.mapper.from_dict.return_value)", "def is_stringified(self) -> bool:\n return self._stringify", "def isJWE_unserialized_single(x):\n if isinstance(x, dict) \\\n and (\"unprotected\" in x or \"protected\" in x) \\\n and (\"ciphertext\" in x):\n try:\n if \"protected\" in x:\n json.loads(x[\"protected\"])\n return True\n except:\n return False\n else:\n return False", "def test_encode(self):\n result = json.loads(self.cls.objects.to_json())\n for index, item in enumerate(result):\n self.assertNotIn(\n \"to_json_exclude\", item,\n (\"to_json_exclude found at index {}\").format(index)\n )\n self.assertNotIn(\n \"json_exclude\", item,\n (\"json_exclude found at index {}\").format(index)\n )\n self.assertIn(\n \"from_json_exclude\", item,\n (\"from_json_exclude not found at index {}\").format(index)\n )\n self.assertIn(\n \"required\", item,\n (\"required not found at index {}\").format(index)\n )", "def _post_deserialize (self):\n pass", "def test_serialize_none(self):\n self.assertEqual(serialize(None), 'null')", "def isJOSE_unserialized(x):\n return isJWS_unserialized(x) or isJWE_unserialized(x) \\\n or isJWP_unserialized(x)", "def test_08(self):\n ret = Base.to_json_string(None)\n self.assertEqual(ret, \"[]\")", "def assertSerializeDeserialize(self, obj, version=None):\n tested_versions = [version] if version is not None else Version.supported\n for version in tested_versions:\n constructor = obj.__class__.from_json\n json_obj = obj.to_json(version)\n clone = constructor(json_obj)\n\n self.assertEqual(obj.__class__, clone.__class__)\n\n if isinstance(obj, dict):\n orig_dict = obj\n clone_dict = clone\n else:\n orig_dict = obj.__dict__\n clone_dict = clone.__dict__\n\n self.assertEqual(orig_dict, clone_dict)", "def test_quantitative_filter_serialization(self):\n result = serializers.FilterSerializer(self.internal_filter).data\n self.assertDictEqual(result, self.external_filter)", "def is_jsonable(self, input):\n try:\n json.dumps(input)\n return True\n except (TypeError, OverflowError):\n return False", "def test_encode_decode(self):\n assert self._test == pybinn.loads(pybinn.dumps(self._test))", "def test_valid_ld(self):\n self.assertEqual(self.obj.to_json_string(self.valid_ld),\n json.dumps(self.valid_ld))", "def assertDeserializeEqual(self, expected, arg):\r\n assert_equals(expected, deserialize_field(self.test_field(), arg))", "def test_dumps(self):\n schema = self.UnitTestingSchema()\n serializer = JSONSchema()\n self.assertIsInstance(serializer.dumps(schema), str)", "def test_serialization_deserialization(self):\n\n original_time = now()\n serialized_time = DatetimeMapper.forward(original_time)\n assert serialized_time == original_time.strftime(\"%Y-%m-%dT%H:%M:%S.%f\")\n\n deserialized_time = DatetimeMapper.backward(serialized_time)\n assert original_time == deserialized_time\n\n deserialized_time = DatetimeMapper.backward(None)\n assert isinstance(deserialized_time, datetime)", "def _TestReadSerialized(self, serializer_object, json_dict):\n # We use json.dumps to make sure the dict does not serialize into\n # an invalid JSON string such as one that contains Python string prefixes\n # like b'' or u''.\n json_string = json.dumps(json_dict)\n unserialized_object = serializer_object.ReadSerialized(json_string)\n\n self.assertIsNotNone(unserialized_object)\n return unserialized_object", "def test_non_list_of_dicts_arg(self):\n self.assertEqual(self.obj.to_json_string(666), '666')", "def test_deserialize(self):\r\n\r\n # test that from_json produces no exceptions\r\n self.assertDeserializeEqual('10:20:30', '\"10:20:30\"')", "def check_round_trip(data: dict, logger: Logger) -> Optional[bytes]:\n try:\n as_json_text = json.dumps(data, default=encode_value).encode(\"utf-8\")\n except Exception as e:\n report_error(\"CumulusCI found an unusual datatype in your config:\", e, logger)\n return None\n try:\n test_load = load_config_from_json_or_pickle(as_json_text)\n assert _simplify_config(test_load) == _simplify_config(\n data\n ), f\"JSON did not round-trip-cleanly {test_load}, {data}\"\n except Exception as e: # pragma: no cover\n report_error(\"CumulusCI found a problem saving your config:\", e, logger)\n return None\n assert isinstance(as_json_text, bytes)\n return as_json_text", "def is_raw(self):\n return not self.has_structure", "def parse_round_trip(self):\n parsed = self.test_proto.parse()\n round_trip = avro.protocol.parse(str(parsed))\n self.assertEqual(parsed, round_trip)", "def test_serialize_and_deserialize_returns_unchanged_collection(\n self\n ) -> None:\n self.assertEqual(\n self.collection.to_dict(),\n collection_domain.Collection.deserialize(\n self.collection.serialize()).to_dict())", "def test_serialize(self, val):\n val_orig = FitVal(*val)\n\n ser = json.dumps(val_orig, cls=ExperimentEncoder)\n val_deser = json.loads(ser, cls=ExperimentDecoder)\n\n self.assertEqual(val_orig, val_deser)", "def recvcheck(self):\n\n data = self.recv()\n if self.serializer == 'json' and data and isinstance(data, dict):\n if '@error' in data:\n exception = getattr(builtins, data['@error'])\n if (isinstance(exception, type) and\n issubclass(exception, Exception)):\n raise exception(data['@message'])\n else:\n if isinstance(data, Exception):\n raise data\n return data", "def test_encoding_round_trip(cell):\n orig = copy.copy(cell.__dict__)\n cell._from_serializeable_dict(cell._to_serializeable_dict())\n round_trip = cell.__dict__\n for key in cell._allowed:\n if type(orig[key]) == np.ndarray or type(orig[key]) == list:\n assert all(orig[key] == round_trip[key])\n else:\n assert orig[key] == round_trip[key]", "def test_json(self):\n\t\tdecoded = json.loads(json.dumps(self.filter.to_js_obj()))\n\t\tself.assertIsNotNone(decoded, \"JSON conversion failed!\")\n\t\tself.assertEqual(self.filter.to_js_obj(), decoded, \"JSON conversion mismatch!\")", "def test_to_json(self):\n self.amenity_json = self.amenity.to_json()\n actual = 1\n try:\n serialized = json.dumps(self.amenity_json)\n except:\n actual = 0\n self.assertTrue(1 == actual)", "def serialize(self):\n pass", "def serialize(self, obj):\n pass", "def test_serialize_deserialize1(self):\n for command in Command:\n serialized = command.serialize()\n deserialized = Command.deserialize(serialized)\n self.assertTrue(deserialized is command)", "def convertAndAssertJSONEqual(self, data, expected_data, msg=None):\n\n super(SimpleTestCase, self).assertJSONEqual(json.dumps(data, cls=DjangoJSONEncoder), expected_data, msg)", "def test_repr(self, cls):\n inst = cls()\n # Exact values aren't a concern so long as neither direction\n # raises an exception.\n pkl = cloudpickle.dumps(inst)\n cloudpickle.loads(pkl)", "def test_can_deserialize_plain_object(self):\n handler = BaseRestHandler(mock.MagicMock(), mock.MagicMock())\n handler._write_buffer = []\n obj = SerializeMe()\n obj.key = \"value\"\n handler.write_object(obj)\n res = json.loads(handler._write_buffer[0])\n self.assertDictEqual(res, {\"key\": \"value\"})", "def jucify(self):\n to_ret = False\n # TODO: finish this\n return to_ret", "def can_be_pickled(x):\n try:\n s = BytesIO() \n pickle.dump(x, s) \n return True\n except:\n return False", "def test_serialization(self):\n serialized = self.Gs.as_dict()\n unserialized = BayesianNetwork.from_dict(serialized)\n\n self.assertDictEqual(serialized, unserialized.as_dict())", "def __getstate__(self):\n raise IOError(\"You tried to serialize something that should not\"\n \" be serialized.\")", "def is_picklable(obj):\n try:\n pickle.dumps(obj)\n\n except pickle.PicklingError:\n return False\n return True", "def test_time_filter_serialization(self):\n result = serializers.FilterSerializer(self.internal_filter).data\n self.assertDictEqual(result, self.external_filter)", "def serialize(self, data):", "def is_text_serializer(serializer):\n return isinstance(serializer.dumps({}), text_type)", "def test_return_value(self):\n self.assertEqual(self.r0.from_json_string(self.string), self.d)", "def test_quantitative_filter_deserialization(self):\n serializer = serializers.FilterSerializer(data=self.external_filter)\n self.assertTrue(serializer.is_valid())\n self.assertDictEqual(serializer.validated_data, self.internal_filter)", "def test_pickle_serialization(self, molecule):\n serialized = pickle.dumps(molecule)\n molecule_copy = pickle.loads(serialized)\n assert molecule == molecule_copy", "def serialize(self, data):\n if isinstance(data, str):\n return data\n\n if hasattr(data, \"read\"):\n return data.read()\n\n raise ValueError(\"Unable to handle input format: %s\" % type(data))", "def converted(self):\n if self.converters:\n return self.converters.converted(self)\n return False", "def _check_primitive(self) -> PossibleResult[T]:\n if self.constructor in _PRIMITIVES:\n if self.obj is UNDEFINED:\n raise DeserializeError(\n self.constructor, self.obj, self.new_depth, self.key\n )\n if self.obj is None:\n raise DeserializeError(\n self.constructor, self.obj, self.new_depth, self.key\n )\n if not isinstance(self.obj, self.constructor):\n if not self.convert_primitives:\n raise DeserializeError(\n self.constructor, self.obj, self.new_depth, self.key\n )\n try:\n return self.constructor(self.obj) # type: ignore\n except (ValueError, TypeError) as error:\n raise DeserializeError(\n self.constructor, self.obj, self.new_depth, self.key\n ) from error\n return self.obj\n return NO_RESULT", "def testRoundtrip(self):\n key = createKey()\n data = {u'user': u'aliafshar', u'id': u'91821212'}\n token = dataToToken(key, data)\n self.assertEqual(data, tokenToData(key, token))", "def test_roundtrip(self):\n self.read_container = self.roundtripContainer()\n self.assertIsNotNone(str(self.container)) # added as a test to make sure printing works\n self.assertIsNotNone(str(self.read_container))\n # make sure we get a completely new object\n self.assertNotEqual(id(self.container), id(self.read_container))\n self.assertIs(self.read_nwbfile.objects[self.container.object_id], self.read_container)\n self.assertContainerEqual(self.read_container, self.container)", "def is_serializable(instance_or_class: Any) -> bool:\n return hasattr(instance_or_class, SERDE_SCOPE)", "def _deserialiseLightweight(self,unpickled):\n if self.sampleid != unpickled['sampleId']:\n raise RuntimeError('sampleids do not match: '+self.sampleid+' '+unpickled['sampleId'])\n if self.condition != unpickled['condition']:\n raise RuntimeError('conditions do not match: '+self.condition+' '+unpickled['condition'])\n if self.wellids != unpickled['wellIds']:\n raise RuntimeError('wellids do not match: '+self.wellids+' '+unpickled['wellIds'])\n if self._wellIndices != unpickled['wellIndices']:\n raise RuntimeError('wellIndices do not match: '+self._wellIndices+' '+unpickled['wellIndices'])\n self._activeWellIndices=unpickled['activeWellIndices']", "def test_serialize(state):\n assert len(state.players) == 2\n st_data = state.to_data()\n\n assert st_data, \"Expect that we would have some data!\"\n assert len(st_data[\"deck\"]) == 52\n assert len(st_data[\"discarded\"]) == 0\n # Render player subset properly\n assert len(st_data[\"players\"]) == 2\n assert len(st_data[\"players\"][0][\"hand\"]) == 0\n\n new_state = MockState.from_data(st_data)\n assert new_state.__class__ == MockState\n st_data_new = new_state.to_data()\n\n assert st_data == st_data_new", "def test_serialize(self):\n self.assertEqual(self.blogs.serialize(), [\n {'content': [u'First para', u'Second para'], 'title': u'First article'},\n {'content': [u'Para 1', u'Para 2'], 'title': u'Second article'},\n {'content': [u'Thing one', u'Thing two'], 'title': u'Third article'}\n ])", "def test_pickle_serialization(self, molecule):\n serialized = pickle.dumps(molecule)\n molecule_copy = pickle.loads(serialized)\n assert molecule == molecule_copy\n assert molecule_copy.n_conformers == molecule.n_conformers\n assert np.allclose(molecule_copy.conformers[0], molecule.conformers[0])", "def _to_serialize(value):\n return value.serialize() if value is not None else None", "def serialize(self):", "def dump_payload(self, obj):\n return want_bytes(self.serializer.dumps(obj, **self.serializer_kwargs))", "def test_dict_serialization(self, molecule):\n serialized = molecule.to_dict()\n molecule_copy = Molecule.from_dict(serialized)\n assert molecule == molecule_copy", "def test_json_serialization(self, molecule):\n molecule_copy = Molecule.from_json(molecule.to_json())\n assert molecule_copy == molecule\n assert molecule_copy.n_conformers == molecule.n_conformers\n assert np.allclose(molecule_copy.conformers[0], molecule.conformers[0])", "def test_to_plain_python_obj_mixed(test_input):\n # It's enough that we don't get an exception here\n output = r.to_plain_python_obj(test_input)\n # We should not get a json conversion error\n json.dumps(output)", "def test_serialize_a_recommendation(self):\n recommendation = Recommendation(product_id=1, recommendation_product_id=2, relationship=Type.UP_SELL)\n data = recommendation.serialize()\n self.assertNotEqual(data, None)\n self.assertIn(\"product_id\", data)\n self.assertEqual(data[\"product_id\"], recommendation.product_id)\n self.assertIn(\"recommendation_product_id\", data)\n self.assertEqual(data[\"recommendation_product_id\"], recommendation.recommendation_product_id)\n self.assertIn(\"relationship\", data)\n self.assertEqual(data[\"relationship\"], recommendation.relationship.name)", "def test_generated_protocol_serialisation(self):\n # create a message\n reply_message = {1: \"number one\", 2: \"number two\", 7: \"number seven\"}\n # message 1\n message = TwoPartyNegotiationMessage(\n message_id=1,\n dialogue_reference=(str(0), \"\"),\n target=0,\n performative=TwoPartyNegotiationMessage.Performative.INFORM_REPLY,\n reply_message=reply_message,\n )\n\n # serialise the message\n encoded_message_in_bytes = TwoPartyNegotiationSerializer().encode(message)\n\n # deserialise the message\n decoded_message = TwoPartyNegotiationSerializer().decode(\n encoded_message_in_bytes\n )\n\n # Compare the original message with the serialised+deserialised message\n assert decoded_message.message_id == message.message_id\n assert decoded_message.dialogue_reference == message.dialogue_reference\n assert decoded_message.dialogue_reference[0] == message.dialogue_reference[0]\n assert decoded_message.dialogue_reference[1] == message.dialogue_reference[1]\n assert decoded_message.target == message.target\n assert decoded_message.performative == message.performative\n assert decoded_message.reply_message == message.reply_message", "def test_boolean_in_serializer() -> None:\n assert cv.custom_serializer(cv.boolean) == {\n \"type\": \"boolean\",\n }", "def pickling_check(instance):\n pkled_instance = pickle.loads(pickle.dumps(instance))\n equality_check(instance, pkled_instance)", "def json(self):\r\n try:\r\n import simplejson as json\r\n except ImportError:\r\n import json\r\n return Assert(json.loads(self.obj))", "def test_user_profile_serialization(self):\n\n # Construct a json representation of a UserProfile model\n user_profile_model_json = {}\n user_profile_model_json['id'] = 'testString'\n user_profile_model_json['iam_id'] = 'testString'\n user_profile_model_json['realm'] = 'testString'\n user_profile_model_json['user_id'] = 'testString'\n user_profile_model_json['firstname'] = 'testString'\n user_profile_model_json['lastname'] = 'testString'\n user_profile_model_json['state'] = 'testString'\n user_profile_model_json['email'] = 'testString'\n user_profile_model_json['phonenumber'] = 'testString'\n user_profile_model_json['altphonenumber'] = 'testString'\n user_profile_model_json['photo'] = 'testString'\n user_profile_model_json['account_id'] = 'testString'\n\n # Construct a model instance of UserProfile by calling from_dict on the json representation\n user_profile_model = UserProfile.from_dict(user_profile_model_json)\n assert user_profile_model != False\n\n # Construct a model instance of UserProfile by calling from_dict on the json representation\n user_profile_model_dict = UserProfile.from_dict(user_profile_model_json).__dict__\n user_profile_model2 = UserProfile(**user_profile_model_dict)\n\n # Verify the model instances are equivalent\n assert user_profile_model == user_profile_model2\n\n # Convert model instance back to dict and verify no loss of data\n user_profile_model_json2 = user_profile_model.to_dict()\n assert user_profile_model_json2 == user_profile_model_json", "def test_default_serialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = sy.serialize(obj, to_proto=True)\n\n assert sy.serialize(obj) == blob", "def _check_serialize(self, kwargs):\n for k in kwargs:\n if k in self.backend.TO_SERIALIZE:\n if isinstance(kwargs[k], dict):\n kwargs[k] = {j: self.backend.serialize(kwargs[k][j])\n for j in kwargs[k]}\n elif isinstance(kwargs[k], list):\n kwargs[k] = [self.backend.serialize(j)\n for j in kwargs[k]]\n else:\n raise TypeError('Your iterable should be a dict or a list')\n return kwargs", "def test_serialization_no_conformers(self):\n mol = Molecule.from_smiles(\"CCO\")\n\n dict_copy = Molecule.from_dict(mol.to_dict())\n assert mol == dict_copy\n\n # TODO: yaml_copy = Molecule.from_yaml(mol.to_yaml())\n with pytest.raises(NotImplementedError):\n mol.to_toml()\n\n bson_copy = Molecule.from_bson(mol.to_bson())\n assert mol == bson_copy\n\n json_copy = Molecule.from_json(mol.to_json())\n assert mol == json_copy\n\n # TODO: round-trip when from_xml is implemented\n mol_as_xml = mol.to_xml()\n with pytest.raises(NotImplementedError):\n Molecule.from_xml(mol_as_xml)\n\n messagepack_copy = Molecule.from_messagepack(mol.to_messagepack())\n assert mol == messagepack_copy\n\n pickle_copy = pickle.loads(pickle.dumps(mol))\n assert mol == pickle_copy", "def test_serialization_no_conformers(self):\n mol = Molecule.from_smiles(\"CCO\")\n\n dict_copy = Molecule.from_dict(mol.to_dict())\n assert mol == dict_copy\n\n # TODO: yaml_copy = Molecule.from_yaml(mol.to_yaml())\n with pytest.raises(NotImplementedError):\n mol.to_toml()\n\n bson_copy = Molecule.from_bson(mol.to_bson())\n assert mol == bson_copy\n\n json_copy = Molecule.from_json(mol.to_json())\n assert mol == json_copy\n\n # TODO: round-trip when from_xml is implemented\n mol_as_xml = mol.to_xml()\n with pytest.raises(NotImplementedError):\n Molecule.from_xml(mol_as_xml)\n\n messagepack_copy = Molecule.from_messagepack(mol.to_messagepack())\n assert mol == messagepack_copy\n\n pickle_copy = pickle.loads(pickle.dumps(mol))\n assert mol == pickle_copy", "def test_none(self):\n self.assertEqual(self.obj.to_json_string(None), '[]')", "def serialize(self, obj):\n return obj", "def objDictConsistency(self, cls, hard, dis=False):\n orig = cls(\"OK\")\n # the hard way\n if hard:\n p = self.dumpWithPreobjects(None, orig.__dict__, orig, dis=dis)\n d, obj = self.pickler.loads(p)[-1]\n else:\n p = self.dumpWithPreobjects(None, orig, orig.__dict__, dis=dis)\n obj, d = self.pickler.loads(p)[-1]\n self.assertTrue(type(obj)is type(orig))\n self.assertTrue(type(obj.__dict__) is type(orig.__dict__)) # @IgnorePep8\n self.assertEquals(set(obj.__dict__.keys()), set(orig.__dict__.keys()))\n self.assertTrue(obj.__dict__ is d)\n self.assertTrue(obj.isOk() is True)", "def backcast(self) -> bool:\n return self.__backcast", "def serialize(self):\n raise NotImplemented()", "def test_raw_data(self):\n self.assertEqual(self.tester.raw_data, 1)", "def serialize(self, data):\n return data", "def test_base_case_json(self):\n json_data = '{\"a\": 1}'\n json_flattened = json_flatten(json_data)\n self.assertEqual(json.loads(json_flattened), json.loads('{\"a\" : 1}'))", "def test_snp_json_roundtrip(self):\n given = self.ntwk1\n actual = rf.from_json_string(rf.to_json_string(given))\n self.assertEqual(actual, given)\n self.assertEqual(actual.frequency, given.frequency)\n self.assertEqual(actual.name, given.name)\n self.assertEqual(actual.comments, given.comments)\n self.assertEqual(actual.z0.tolist(), given.z0.tolist())\n self.assertEqual(actual.port_names, given.port_names)\n self.assertEqual(actual.variables, given.variables)", "def test_to_json_string(self):\n self.assertEqual(Base.to_json_string(None), \"[]\")\n self.assertTrue(type(Base.to_json_string(None)) is str)\n self.assertEqual(Base.to_json_string([]), \"[]\")\n self.assertTrue(type(Base.to_json_string([])) is str)\n myDict = {'id': 4, 'width': 3, 'height': 4, 'x': 1, 'y': 3}\n myDict2 = {'id': 3, 'width': 6, 'height': 2, 'x': 1, 'y': 9}\n jsonized = Base.to_json_string([myDict, myDict2])\n self.assertTrue(type(jsonized) is str)\n myDict3 = json.loads(jsonized)\n self.assertEqual(myDict3, [myDict, myDict2])", "def round_trip(message):\n return bits.bits_to_message(bits.message_to_bits(message)) == message" ]
[ "0.65497476", "0.64250165", "0.6318902", "0.6110213", "0.6092596", "0.60524327", "0.6047912", "0.60349566", "0.59723496", "0.59635586", "0.59397215", "0.59294575", "0.59294575", "0.59081197", "0.58982784", "0.58918864", "0.58906686", "0.5878805", "0.58623", "0.5853447", "0.5826075", "0.58153296", "0.5812717", "0.58071756", "0.5794471", "0.5785449", "0.5782361", "0.5774637", "0.57684493", "0.5760834", "0.57446843", "0.5724752", "0.5709062", "0.569147", "0.5687801", "0.56571716", "0.56280816", "0.5620996", "0.5619737", "0.56180525", "0.56041485", "0.5598943", "0.5598287", "0.5595587", "0.55944103", "0.55884326", "0.55857265", "0.5569962", "0.55649996", "0.5556069", "0.5549877", "0.5548616", "0.5548238", "0.55424994", "0.55392677", "0.5510641", "0.5503435", "0.5500031", "0.54999846", "0.54997605", "0.5494271", "0.5493163", "0.5482728", "0.54794973", "0.54633415", "0.54618675", "0.5460477", "0.5458001", "0.54513127", "0.544926", "0.54448813", "0.5435727", "0.5431473", "0.54137164", "0.5409626", "0.5409365", "0.54088485", "0.54026324", "0.5399572", "0.539559", "0.5390935", "0.53819275", "0.5372337", "0.53666735", "0.5365913", "0.53636456", "0.53634423", "0.5358752", "0.5352307", "0.5352307", "0.5345392", "0.5338859", "0.5332617", "0.5327083", "0.5325902", "0.53254426", "0.5323825", "0.53235537", "0.53190833", "0.53104806", "0.53097904" ]
0.0
-1
Roundtrip to check what we serialise is what we get back.
def test_serialises_and_deserialises_hs00_message_correctly_when_int_input_is_not_ndarray( self, ): original_hist = { "source": "some_source", "timestamp": 123456, "current_shape": [2, 5], "dim_metadata": [ { "length": 2, "unit": "b", "label": "y", "bin_boundaries": [10, 11, 12], }, { "length": 5, "unit": "m", "label": "x", "bin_boundaries": [0, 1, 2, 3, 4, 5], }, ], "last_metadata_timestamp": 123456, "data": [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]], "errors": [[5, 4, 3, 2, 1], [10, 9, 8, 7, 6]], "info": "info_string", } buf = serialise_hs00(original_hist) hist = deserialise_hs00(buf) assert hist["source"] == original_hist["source"] assert hist["timestamp"] == original_hist["timestamp"] assert hist["current_shape"] == original_hist["current_shape"] self._check_metadata_for_one_dimension( hist["dim_metadata"][0], original_hist["dim_metadata"][0] ) self._check_metadata_for_one_dimension( hist["dim_metadata"][1], original_hist["dim_metadata"][1] ) assert np.array_equal(hist["data"], original_hist["data"]) assert np.array_equal(hist["errors"], original_hist["errors"]) assert hist["info"] == original_hist["info"] assert ( hist["last_metadata_timestamp"] == original_hist["last_metadata_timestamp"] )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_dumps(self):\n result = self.mapper.dumps(self.serialization)\n self.mapper.to_dict.assert_called_once_with(\"custom\")\n self.serialization.assert_called_once_with(\n self.mapper.to_dict.return_value\n )\n self.assertIs(result, self.serialization.return_value)", "def _serialise(self):\n # TODO (M Foley)\n pass", "def test_return_type(self):\n self.assertEqual(type(self.obj.to_json_string(self.valid_ld)), str)", "def test_dump_single(self):\n result = self.serializer.dump(self.schema_to_serialize)\n self.assertIsInstance(result, dict)", "def revert(self):\n if hasattr(self, '_init_data'):\n self.deserialize(self._init_data)\n return True\n return False", "def test_serialize(self):\n self.assert_raises(TypeError, self.instance.serialize, (1,))", "def test_serialize_no_metadata(self):\n pass # pragma: no cover", "def isJWS_unserialized_single(x):\n if isinstance(x, dict) \\\n and \"payload\" in x and \"signature\" in x \\\n and (\"protected\" in x or \"unprotected\" in x):\n try: \n if \"protected\" in x: \n json.loads(x[\"protected\"])\n return True\n except:\n return False\n else: \n return False", "def roundtrip(data):\r\n body = xmlrpclib.dumps(data)\r\n result = xmlrpclib.loads(body)[0]\r\n if result != data:\r\n print result", "def test_serialize(value, expected):\n assert json_dumps(value) == expected", "def assertDeserializeNonString(self):\r\n self.assertDeserializeEqual(None, None)\r\n self.assertDeserializeEqual(3.14, 3.14)\r\n self.assertDeserializeEqual(True, True)\r\n self.assertDeserializeEqual([10], [10])\r\n self.assertDeserializeEqual({}, {})\r\n self.assertDeserializeEqual([], [])\r\n self.assertDeserializeEqual(None, 'null')", "def test_serialize(self):\n self.assertEqual(self.scrapes.serialize(), [\n {'content': [u'First para', u'Second para'], 'title': u'First article', 'authors': [{'lastname': u'Swain', 'firstname': u'Matt'}]},\n {'content': [u'Para 1', u'Para 2'], 'title': u'Second article', 'authors': [{'lastname': u'Smith', 'firstname': u'John'}]},\n {'content': [u'Thing one', u'Thing two'], 'title': u'Third article', 'authors': [{'lastname': u'Bond', 'firstname': u'James'}]}\n ])", "def test_serialize(self):\n self.assertEqual(self.scrapes.serialize(), [\n {'content': [u'First para', u'Second para'], 'title': u'First article', 'authors': [{'lastname': u'Swain', 'firstname': u'Matt'}]},\n {'content': [u'Para 1', u'Para 2'], 'title': u'Second article', 'authors': [{'lastname': u'Smith', 'firstname': u'John'}]},\n {'content': [u'Thing one', u'Thing two'], 'title': u'Third article', 'authors': [{'lastname': u'Bond', 'firstname': u'James'}]}\n ])", "def test_serialize(self):\n r = self.RNA(\"ugagg\")\n assert dumps(r)", "def is_sedes(obj):\n return hasattr(obj, 'serialize') and hasattr(obj, 'deserialize')", "def isJWP_unserialized(x):\n if isinstance(x, dict) \\\n and \"payload\" in x and isinstance(x[\"unprotected\"], dict)\\\n and \"unprotected\" in x and \"signature\" not in x:\n return True\n else:\n return False", "def isJWE_unserialized(x):\n return isJWE_unserialized_single(x) or isJWE_unserialized_multi(x)", "def test_serialize_sinfo(self):\n self.assert_raises(RuntimeError, self.instance.serialize,\n self.testing_options['objects'][0],\n add_serializer_info=True)", "def isJWS_unserialized(x):\n return isJWS_unserialized_single(x) or isJWS_unserialized_multi(x)", "def test_dumps(self):\n data = \"something\"\n result = self.mapper.loads(self.deser_fn, data)\n self.mapper.from_dict.assert_called_once_with(\n self.deser_fn.return_value, \"custom\"\n )\n self.deser_fn.assert_called_once_with(data)\n self.assertIs(result, self.mapper.from_dict.return_value)", "def is_stringified(self) -> bool:\n return self._stringify", "def isJWE_unserialized_single(x):\n if isinstance(x, dict) \\\n and (\"unprotected\" in x or \"protected\" in x) \\\n and (\"ciphertext\" in x):\n try:\n if \"protected\" in x:\n json.loads(x[\"protected\"])\n return True\n except:\n return False\n else:\n return False", "def test_encode(self):\n result = json.loads(self.cls.objects.to_json())\n for index, item in enumerate(result):\n self.assertNotIn(\n \"to_json_exclude\", item,\n (\"to_json_exclude found at index {}\").format(index)\n )\n self.assertNotIn(\n \"json_exclude\", item,\n (\"json_exclude found at index {}\").format(index)\n )\n self.assertIn(\n \"from_json_exclude\", item,\n (\"from_json_exclude not found at index {}\").format(index)\n )\n self.assertIn(\n \"required\", item,\n (\"required not found at index {}\").format(index)\n )", "def _post_deserialize (self):\n pass", "def test_serialize_none(self):\n self.assertEqual(serialize(None), 'null')", "def isJOSE_unserialized(x):\n return isJWS_unserialized(x) or isJWE_unserialized(x) \\\n or isJWP_unserialized(x)", "def test_08(self):\n ret = Base.to_json_string(None)\n self.assertEqual(ret, \"[]\")", "def assertSerializeDeserialize(self, obj, version=None):\n tested_versions = [version] if version is not None else Version.supported\n for version in tested_versions:\n constructor = obj.__class__.from_json\n json_obj = obj.to_json(version)\n clone = constructor(json_obj)\n\n self.assertEqual(obj.__class__, clone.__class__)\n\n if isinstance(obj, dict):\n orig_dict = obj\n clone_dict = clone\n else:\n orig_dict = obj.__dict__\n clone_dict = clone.__dict__\n\n self.assertEqual(orig_dict, clone_dict)", "def test_quantitative_filter_serialization(self):\n result = serializers.FilterSerializer(self.internal_filter).data\n self.assertDictEqual(result, self.external_filter)", "def is_jsonable(self, input):\n try:\n json.dumps(input)\n return True\n except (TypeError, OverflowError):\n return False", "def test_encode_decode(self):\n assert self._test == pybinn.loads(pybinn.dumps(self._test))", "def test_valid_ld(self):\n self.assertEqual(self.obj.to_json_string(self.valid_ld),\n json.dumps(self.valid_ld))", "def assertDeserializeEqual(self, expected, arg):\r\n assert_equals(expected, deserialize_field(self.test_field(), arg))", "def test_dumps(self):\n schema = self.UnitTestingSchema()\n serializer = JSONSchema()\n self.assertIsInstance(serializer.dumps(schema), str)", "def test_serialization_deserialization(self):\n\n original_time = now()\n serialized_time = DatetimeMapper.forward(original_time)\n assert serialized_time == original_time.strftime(\"%Y-%m-%dT%H:%M:%S.%f\")\n\n deserialized_time = DatetimeMapper.backward(serialized_time)\n assert original_time == deserialized_time\n\n deserialized_time = DatetimeMapper.backward(None)\n assert isinstance(deserialized_time, datetime)", "def _TestReadSerialized(self, serializer_object, json_dict):\n # We use json.dumps to make sure the dict does not serialize into\n # an invalid JSON string such as one that contains Python string prefixes\n # like b'' or u''.\n json_string = json.dumps(json_dict)\n unserialized_object = serializer_object.ReadSerialized(json_string)\n\n self.assertIsNotNone(unserialized_object)\n return unserialized_object", "def test_non_list_of_dicts_arg(self):\n self.assertEqual(self.obj.to_json_string(666), '666')", "def test_deserialize(self):\r\n\r\n # test that from_json produces no exceptions\r\n self.assertDeserializeEqual('10:20:30', '\"10:20:30\"')", "def check_round_trip(data: dict, logger: Logger) -> Optional[bytes]:\n try:\n as_json_text = json.dumps(data, default=encode_value).encode(\"utf-8\")\n except Exception as e:\n report_error(\"CumulusCI found an unusual datatype in your config:\", e, logger)\n return None\n try:\n test_load = load_config_from_json_or_pickle(as_json_text)\n assert _simplify_config(test_load) == _simplify_config(\n data\n ), f\"JSON did not round-trip-cleanly {test_load}, {data}\"\n except Exception as e: # pragma: no cover\n report_error(\"CumulusCI found a problem saving your config:\", e, logger)\n return None\n assert isinstance(as_json_text, bytes)\n return as_json_text", "def is_raw(self):\n return not self.has_structure", "def parse_round_trip(self):\n parsed = self.test_proto.parse()\n round_trip = avro.protocol.parse(str(parsed))\n self.assertEqual(parsed, round_trip)", "def test_serialize_and_deserialize_returns_unchanged_collection(\n self\n ) -> None:\n self.assertEqual(\n self.collection.to_dict(),\n collection_domain.Collection.deserialize(\n self.collection.serialize()).to_dict())", "def test_serialize(self, val):\n val_orig = FitVal(*val)\n\n ser = json.dumps(val_orig, cls=ExperimentEncoder)\n val_deser = json.loads(ser, cls=ExperimentDecoder)\n\n self.assertEqual(val_orig, val_deser)", "def recvcheck(self):\n\n data = self.recv()\n if self.serializer == 'json' and data and isinstance(data, dict):\n if '@error' in data:\n exception = getattr(builtins, data['@error'])\n if (isinstance(exception, type) and\n issubclass(exception, Exception)):\n raise exception(data['@message'])\n else:\n if isinstance(data, Exception):\n raise data\n return data", "def test_encoding_round_trip(cell):\n orig = copy.copy(cell.__dict__)\n cell._from_serializeable_dict(cell._to_serializeable_dict())\n round_trip = cell.__dict__\n for key in cell._allowed:\n if type(orig[key]) == np.ndarray or type(orig[key]) == list:\n assert all(orig[key] == round_trip[key])\n else:\n assert orig[key] == round_trip[key]", "def test_json(self):\n\t\tdecoded = json.loads(json.dumps(self.filter.to_js_obj()))\n\t\tself.assertIsNotNone(decoded, \"JSON conversion failed!\")\n\t\tself.assertEqual(self.filter.to_js_obj(), decoded, \"JSON conversion mismatch!\")", "def test_to_json(self):\n self.amenity_json = self.amenity.to_json()\n actual = 1\n try:\n serialized = json.dumps(self.amenity_json)\n except:\n actual = 0\n self.assertTrue(1 == actual)", "def serialize(self):\n pass", "def serialize(self, obj):\n pass", "def test_serialize_deserialize1(self):\n for command in Command:\n serialized = command.serialize()\n deserialized = Command.deserialize(serialized)\n self.assertTrue(deserialized is command)", "def convertAndAssertJSONEqual(self, data, expected_data, msg=None):\n\n super(SimpleTestCase, self).assertJSONEqual(json.dumps(data, cls=DjangoJSONEncoder), expected_data, msg)", "def test_repr(self, cls):\n inst = cls()\n # Exact values aren't a concern so long as neither direction\n # raises an exception.\n pkl = cloudpickle.dumps(inst)\n cloudpickle.loads(pkl)", "def test_can_deserialize_plain_object(self):\n handler = BaseRestHandler(mock.MagicMock(), mock.MagicMock())\n handler._write_buffer = []\n obj = SerializeMe()\n obj.key = \"value\"\n handler.write_object(obj)\n res = json.loads(handler._write_buffer[0])\n self.assertDictEqual(res, {\"key\": \"value\"})", "def jucify(self):\n to_ret = False\n # TODO: finish this\n return to_ret", "def can_be_pickled(x):\n try:\n s = BytesIO() \n pickle.dump(x, s) \n return True\n except:\n return False", "def test_serialization(self):\n serialized = self.Gs.as_dict()\n unserialized = BayesianNetwork.from_dict(serialized)\n\n self.assertDictEqual(serialized, unserialized.as_dict())", "def __getstate__(self):\n raise IOError(\"You tried to serialize something that should not\"\n \" be serialized.\")", "def is_picklable(obj):\n try:\n pickle.dumps(obj)\n\n except pickle.PicklingError:\n return False\n return True", "def test_time_filter_serialization(self):\n result = serializers.FilterSerializer(self.internal_filter).data\n self.assertDictEqual(result, self.external_filter)", "def serialize(self, data):", "def is_text_serializer(serializer):\n return isinstance(serializer.dumps({}), text_type)", "def test_return_value(self):\n self.assertEqual(self.r0.from_json_string(self.string), self.d)", "def test_quantitative_filter_deserialization(self):\n serializer = serializers.FilterSerializer(data=self.external_filter)\n self.assertTrue(serializer.is_valid())\n self.assertDictEqual(serializer.validated_data, self.internal_filter)", "def test_pickle_serialization(self, molecule):\n serialized = pickle.dumps(molecule)\n molecule_copy = pickle.loads(serialized)\n assert molecule == molecule_copy", "def serialize(self, data):\n if isinstance(data, str):\n return data\n\n if hasattr(data, \"read\"):\n return data.read()\n\n raise ValueError(\"Unable to handle input format: %s\" % type(data))", "def converted(self):\n if self.converters:\n return self.converters.converted(self)\n return False", "def _check_primitive(self) -> PossibleResult[T]:\n if self.constructor in _PRIMITIVES:\n if self.obj is UNDEFINED:\n raise DeserializeError(\n self.constructor, self.obj, self.new_depth, self.key\n )\n if self.obj is None:\n raise DeserializeError(\n self.constructor, self.obj, self.new_depth, self.key\n )\n if not isinstance(self.obj, self.constructor):\n if not self.convert_primitives:\n raise DeserializeError(\n self.constructor, self.obj, self.new_depth, self.key\n )\n try:\n return self.constructor(self.obj) # type: ignore\n except (ValueError, TypeError) as error:\n raise DeserializeError(\n self.constructor, self.obj, self.new_depth, self.key\n ) from error\n return self.obj\n return NO_RESULT", "def testRoundtrip(self):\n key = createKey()\n data = {u'user': u'aliafshar', u'id': u'91821212'}\n token = dataToToken(key, data)\n self.assertEqual(data, tokenToData(key, token))", "def test_roundtrip(self):\n self.read_container = self.roundtripContainer()\n self.assertIsNotNone(str(self.container)) # added as a test to make sure printing works\n self.assertIsNotNone(str(self.read_container))\n # make sure we get a completely new object\n self.assertNotEqual(id(self.container), id(self.read_container))\n self.assertIs(self.read_nwbfile.objects[self.container.object_id], self.read_container)\n self.assertContainerEqual(self.read_container, self.container)", "def is_serializable(instance_or_class: Any) -> bool:\n return hasattr(instance_or_class, SERDE_SCOPE)", "def _deserialiseLightweight(self,unpickled):\n if self.sampleid != unpickled['sampleId']:\n raise RuntimeError('sampleids do not match: '+self.sampleid+' '+unpickled['sampleId'])\n if self.condition != unpickled['condition']:\n raise RuntimeError('conditions do not match: '+self.condition+' '+unpickled['condition'])\n if self.wellids != unpickled['wellIds']:\n raise RuntimeError('wellids do not match: '+self.wellids+' '+unpickled['wellIds'])\n if self._wellIndices != unpickled['wellIndices']:\n raise RuntimeError('wellIndices do not match: '+self._wellIndices+' '+unpickled['wellIndices'])\n self._activeWellIndices=unpickled['activeWellIndices']", "def test_serialize(state):\n assert len(state.players) == 2\n st_data = state.to_data()\n\n assert st_data, \"Expect that we would have some data!\"\n assert len(st_data[\"deck\"]) == 52\n assert len(st_data[\"discarded\"]) == 0\n # Render player subset properly\n assert len(st_data[\"players\"]) == 2\n assert len(st_data[\"players\"][0][\"hand\"]) == 0\n\n new_state = MockState.from_data(st_data)\n assert new_state.__class__ == MockState\n st_data_new = new_state.to_data()\n\n assert st_data == st_data_new", "def test_serialize(self):\n self.assertEqual(self.blogs.serialize(), [\n {'content': [u'First para', u'Second para'], 'title': u'First article'},\n {'content': [u'Para 1', u'Para 2'], 'title': u'Second article'},\n {'content': [u'Thing one', u'Thing two'], 'title': u'Third article'}\n ])", "def test_pickle_serialization(self, molecule):\n serialized = pickle.dumps(molecule)\n molecule_copy = pickle.loads(serialized)\n assert molecule == molecule_copy\n assert molecule_copy.n_conformers == molecule.n_conformers\n assert np.allclose(molecule_copy.conformers[0], molecule.conformers[0])", "def _to_serialize(value):\n return value.serialize() if value is not None else None", "def serialize(self):", "def dump_payload(self, obj):\n return want_bytes(self.serializer.dumps(obj, **self.serializer_kwargs))", "def test_dict_serialization(self, molecule):\n serialized = molecule.to_dict()\n molecule_copy = Molecule.from_dict(serialized)\n assert molecule == molecule_copy", "def test_json_serialization(self, molecule):\n molecule_copy = Molecule.from_json(molecule.to_json())\n assert molecule_copy == molecule\n assert molecule_copy.n_conformers == molecule.n_conformers\n assert np.allclose(molecule_copy.conformers[0], molecule.conformers[0])", "def test_to_plain_python_obj_mixed(test_input):\n # It's enough that we don't get an exception here\n output = r.to_plain_python_obj(test_input)\n # We should not get a json conversion error\n json.dumps(output)", "def test_serialize_a_recommendation(self):\n recommendation = Recommendation(product_id=1, recommendation_product_id=2, relationship=Type.UP_SELL)\n data = recommendation.serialize()\n self.assertNotEqual(data, None)\n self.assertIn(\"product_id\", data)\n self.assertEqual(data[\"product_id\"], recommendation.product_id)\n self.assertIn(\"recommendation_product_id\", data)\n self.assertEqual(data[\"recommendation_product_id\"], recommendation.recommendation_product_id)\n self.assertIn(\"relationship\", data)\n self.assertEqual(data[\"relationship\"], recommendation.relationship.name)", "def test_generated_protocol_serialisation(self):\n # create a message\n reply_message = {1: \"number one\", 2: \"number two\", 7: \"number seven\"}\n # message 1\n message = TwoPartyNegotiationMessage(\n message_id=1,\n dialogue_reference=(str(0), \"\"),\n target=0,\n performative=TwoPartyNegotiationMessage.Performative.INFORM_REPLY,\n reply_message=reply_message,\n )\n\n # serialise the message\n encoded_message_in_bytes = TwoPartyNegotiationSerializer().encode(message)\n\n # deserialise the message\n decoded_message = TwoPartyNegotiationSerializer().decode(\n encoded_message_in_bytes\n )\n\n # Compare the original message with the serialised+deserialised message\n assert decoded_message.message_id == message.message_id\n assert decoded_message.dialogue_reference == message.dialogue_reference\n assert decoded_message.dialogue_reference[0] == message.dialogue_reference[0]\n assert decoded_message.dialogue_reference[1] == message.dialogue_reference[1]\n assert decoded_message.target == message.target\n assert decoded_message.performative == message.performative\n assert decoded_message.reply_message == message.reply_message", "def test_boolean_in_serializer() -> None:\n assert cv.custom_serializer(cv.boolean) == {\n \"type\": \"boolean\",\n }", "def pickling_check(instance):\n pkled_instance = pickle.loads(pickle.dumps(instance))\n equality_check(instance, pkled_instance)", "def json(self):\r\n try:\r\n import simplejson as json\r\n except ImportError:\r\n import json\r\n return Assert(json.loads(self.obj))", "def test_user_profile_serialization(self):\n\n # Construct a json representation of a UserProfile model\n user_profile_model_json = {}\n user_profile_model_json['id'] = 'testString'\n user_profile_model_json['iam_id'] = 'testString'\n user_profile_model_json['realm'] = 'testString'\n user_profile_model_json['user_id'] = 'testString'\n user_profile_model_json['firstname'] = 'testString'\n user_profile_model_json['lastname'] = 'testString'\n user_profile_model_json['state'] = 'testString'\n user_profile_model_json['email'] = 'testString'\n user_profile_model_json['phonenumber'] = 'testString'\n user_profile_model_json['altphonenumber'] = 'testString'\n user_profile_model_json['photo'] = 'testString'\n user_profile_model_json['account_id'] = 'testString'\n\n # Construct a model instance of UserProfile by calling from_dict on the json representation\n user_profile_model = UserProfile.from_dict(user_profile_model_json)\n assert user_profile_model != False\n\n # Construct a model instance of UserProfile by calling from_dict on the json representation\n user_profile_model_dict = UserProfile.from_dict(user_profile_model_json).__dict__\n user_profile_model2 = UserProfile(**user_profile_model_dict)\n\n # Verify the model instances are equivalent\n assert user_profile_model == user_profile_model2\n\n # Convert model instance back to dict and verify no loss of data\n user_profile_model_json2 = user_profile_model.to_dict()\n assert user_profile_model_json2 == user_profile_model_json", "def test_default_serialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = sy.serialize(obj, to_proto=True)\n\n assert sy.serialize(obj) == blob", "def _check_serialize(self, kwargs):\n for k in kwargs:\n if k in self.backend.TO_SERIALIZE:\n if isinstance(kwargs[k], dict):\n kwargs[k] = {j: self.backend.serialize(kwargs[k][j])\n for j in kwargs[k]}\n elif isinstance(kwargs[k], list):\n kwargs[k] = [self.backend.serialize(j)\n for j in kwargs[k]]\n else:\n raise TypeError('Your iterable should be a dict or a list')\n return kwargs", "def test_serialization_no_conformers(self):\n mol = Molecule.from_smiles(\"CCO\")\n\n dict_copy = Molecule.from_dict(mol.to_dict())\n assert mol == dict_copy\n\n # TODO: yaml_copy = Molecule.from_yaml(mol.to_yaml())\n with pytest.raises(NotImplementedError):\n mol.to_toml()\n\n bson_copy = Molecule.from_bson(mol.to_bson())\n assert mol == bson_copy\n\n json_copy = Molecule.from_json(mol.to_json())\n assert mol == json_copy\n\n # TODO: round-trip when from_xml is implemented\n mol_as_xml = mol.to_xml()\n with pytest.raises(NotImplementedError):\n Molecule.from_xml(mol_as_xml)\n\n messagepack_copy = Molecule.from_messagepack(mol.to_messagepack())\n assert mol == messagepack_copy\n\n pickle_copy = pickle.loads(pickle.dumps(mol))\n assert mol == pickle_copy", "def test_serialization_no_conformers(self):\n mol = Molecule.from_smiles(\"CCO\")\n\n dict_copy = Molecule.from_dict(mol.to_dict())\n assert mol == dict_copy\n\n # TODO: yaml_copy = Molecule.from_yaml(mol.to_yaml())\n with pytest.raises(NotImplementedError):\n mol.to_toml()\n\n bson_copy = Molecule.from_bson(mol.to_bson())\n assert mol == bson_copy\n\n json_copy = Molecule.from_json(mol.to_json())\n assert mol == json_copy\n\n # TODO: round-trip when from_xml is implemented\n mol_as_xml = mol.to_xml()\n with pytest.raises(NotImplementedError):\n Molecule.from_xml(mol_as_xml)\n\n messagepack_copy = Molecule.from_messagepack(mol.to_messagepack())\n assert mol == messagepack_copy\n\n pickle_copy = pickle.loads(pickle.dumps(mol))\n assert mol == pickle_copy", "def test_none(self):\n self.assertEqual(self.obj.to_json_string(None), '[]')", "def serialize(self, obj):\n return obj", "def objDictConsistency(self, cls, hard, dis=False):\n orig = cls(\"OK\")\n # the hard way\n if hard:\n p = self.dumpWithPreobjects(None, orig.__dict__, orig, dis=dis)\n d, obj = self.pickler.loads(p)[-1]\n else:\n p = self.dumpWithPreobjects(None, orig, orig.__dict__, dis=dis)\n obj, d = self.pickler.loads(p)[-1]\n self.assertTrue(type(obj)is type(orig))\n self.assertTrue(type(obj.__dict__) is type(orig.__dict__)) # @IgnorePep8\n self.assertEquals(set(obj.__dict__.keys()), set(orig.__dict__.keys()))\n self.assertTrue(obj.__dict__ is d)\n self.assertTrue(obj.isOk() is True)", "def backcast(self) -> bool:\n return self.__backcast", "def serialize(self):\n raise NotImplemented()", "def test_raw_data(self):\n self.assertEqual(self.tester.raw_data, 1)", "def serialize(self, data):\n return data", "def test_base_case_json(self):\n json_data = '{\"a\": 1}'\n json_flattened = json_flatten(json_data)\n self.assertEqual(json.loads(json_flattened), json.loads('{\"a\" : 1}'))", "def test_snp_json_roundtrip(self):\n given = self.ntwk1\n actual = rf.from_json_string(rf.to_json_string(given))\n self.assertEqual(actual, given)\n self.assertEqual(actual.frequency, given.frequency)\n self.assertEqual(actual.name, given.name)\n self.assertEqual(actual.comments, given.comments)\n self.assertEqual(actual.z0.tolist(), given.z0.tolist())\n self.assertEqual(actual.port_names, given.port_names)\n self.assertEqual(actual.variables, given.variables)", "def test_to_json_string(self):\n self.assertEqual(Base.to_json_string(None), \"[]\")\n self.assertTrue(type(Base.to_json_string(None)) is str)\n self.assertEqual(Base.to_json_string([]), \"[]\")\n self.assertTrue(type(Base.to_json_string([])) is str)\n myDict = {'id': 4, 'width': 3, 'height': 4, 'x': 1, 'y': 3}\n myDict2 = {'id': 3, 'width': 6, 'height': 2, 'x': 1, 'y': 9}\n jsonized = Base.to_json_string([myDict, myDict2])\n self.assertTrue(type(jsonized) is str)\n myDict3 = json.loads(jsonized)\n self.assertEqual(myDict3, [myDict, myDict2])", "def round_trip(message):\n return bits.bits_to_message(bits.message_to_bits(message)) == message" ]
[ "0.65497476", "0.64250165", "0.6318902", "0.6110213", "0.6092596", "0.60524327", "0.6047912", "0.60349566", "0.59723496", "0.59635586", "0.59397215", "0.59294575", "0.59294575", "0.59081197", "0.58982784", "0.58918864", "0.58906686", "0.5878805", "0.58623", "0.5853447", "0.5826075", "0.58153296", "0.5812717", "0.58071756", "0.5794471", "0.5785449", "0.5782361", "0.5774637", "0.57684493", "0.5760834", "0.57446843", "0.5724752", "0.5709062", "0.569147", "0.5687801", "0.56571716", "0.56280816", "0.5620996", "0.5619737", "0.56180525", "0.56041485", "0.5598943", "0.5598287", "0.5595587", "0.55944103", "0.55884326", "0.55857265", "0.5569962", "0.55649996", "0.5556069", "0.5549877", "0.5548616", "0.5548238", "0.55424994", "0.55392677", "0.5510641", "0.5503435", "0.5500031", "0.54999846", "0.54997605", "0.5494271", "0.5493163", "0.5482728", "0.54794973", "0.54633415", "0.54618675", "0.5460477", "0.5458001", "0.54513127", "0.544926", "0.54448813", "0.5435727", "0.5431473", "0.54137164", "0.5409626", "0.5409365", "0.54088485", "0.54026324", "0.5399572", "0.539559", "0.5390935", "0.53819275", "0.5372337", "0.53666735", "0.5365913", "0.53636456", "0.53634423", "0.5358752", "0.5352307", "0.5352307", "0.5345392", "0.5338859", "0.5332617", "0.5327083", "0.5325902", "0.53254426", "0.5323825", "0.53235537", "0.53190833", "0.53104806", "0.53097904" ]
0.0
-1
In rare cases, pandas can produce broken datasets when writing to hdf5, this function can be used to delete them so they can be either downloaded again or discarded USE WITH UTTERMOST CARE
def del_sensordata(self): organisation_id = '5af01e0210bac288dba249ad' animal_id = '5b6419ff36b96c52808951b1' with self.writefile() as file: del file[f'data/{organisation_id}/{animal_id}/sensordata']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_ds(self, dt):\n\n for k in self.datasets_keys:\n for F in self.datasets[k]:\n if F not in data[k].keys():\n continue \n max_date = data[k][F]['max_date'] \n \"\"\" Deleting unecessary ds \"\"\"\n if dt > max_date : # check max date and check if data is still loaded\n print(blue + 'Memory used before deleting : ' , process.memory_info().rss/1000000000 , cend) \n del data[k][F] \n print(\"*** Erasing dataset: \" , k , ' ' , F ) \n print(blue + 'Memory used after deleting : ' , process.memory_info().rss/1000000000 , cend) \n \n else:\n continue", "def _clean_up_temporary_files(dataset_dir):\n filename = _DATA_URL.split('/')[-1]\n filepath = os.path.join(dataset_dir, filename)\n tf.gfile.Remove(filepath)\n\n tmp_dir = os.path.join(dataset_dir, 'cifar-100-python')\n tf.gfile.DeleteRecursively(tmp_dir)", "def clear_data():\n for i in range(_MAX_NUM_TESTS):\n rand, ref = filename(i)\n if os.path.exists(rand):\n os.remove(rand)\n if os.path.exists(ref):\n os.remove(ref)", "def remove_group(self):\n try:\n with open_hdf5(self.file_name, mode=\"a\") as hdf_file:\n del hdf_file[self.h5_path]\n except KeyError:\n pass", "def __del__(self):\n self.h5file.close()", "def delete_dataset(dataset_path):\n force_rmtree(dataset_path)", "def _clean_up_temporary_files(dataset_dir):\n return", "def clean_chunk_files(dirpath):\n workdir = os.getcwd()\n os.chdir(dirpath)\n for filename in glob.glob(\"[0-9]*_[0-9]*_[0-9]*.hdf5\"):\n os.remove(filename)\n os.chdir(workdir)", "def delete_temp_dataset():\n\n bq.delete_dataset(temp_dataset_ref, delete_contents=True, not_found_ok=True)", "def close_and_delete_hdf5_handle(self):\n if self._hdf5_file is not None:\n self._hdf5_file.close()\n self._hdf5_file = None", "def teardown(self):\n super(TestCisPandasOutput, self).teardown()\n if os.path.isfile(self.tempfile):\n os.remove(self.tempfile)", "def check_hdf5_files(database):\n\n logger.info(\" Checking dataset Integrity\")\n remove_file = []\n for fname in database:\n try:\n f = h5py.File(fname, 'r')\n mol_names = list(f.keys())\n if len(mol_names) == 0:\n warnings.warn(' -> %s is empty ' % fname)\n remove_file.append(fname)\n f.close()\n except BaseException:\n warnings.warn(' -> %s is corrputed ' % fname)\n remove_file.append(fname)\n\n for name in remove_file:\n database.remove(name)\n if remove_file:\n logger.info(f'\\t -> Empty or corrput databases are removed:\\n'\n f'{remove_file}')\n\n return database", "def clear_brain():\n\n if os.path.exists(os.path.abspath(\"papaya_data\")):\n shutil.rmtree(os.path.abspath(\"papaya_data\"))", "def cleanup(self):\n if self.cleanup_allowed:\n shutil.rmtree(self.out_dir)\n self.train_df, self.valid_df, self.test_df = None, None, None", "def close_hdf_file(self):\n\t\tself.h5f.close()", "def del_data(self, name):\n raise NotImplementedError('Do I want to delete data from a df?')", "def cleanup(self):\r\n for f in [i for d in self.data.values() for i in d[\"filenames\"]]:\r\n try:\r\n os.unlink(f)\r\n except Exception: pass\r\n self.Destroy()", "def clean_old_data():\n logger.info('Cleaning standalone files on disk...')\n for absolute_path in glob.glob(MEDIA_URL + '*'):\n file_name = os.path.basename(absolute_path)\n try:\n relative_path = os.path.join(AUDIOS_URL, file_name)\n audio = Audio.objects.get(filename=relative_path)\n if audio.get_type() == 'episode':\n try:\n # If there are inactive audios on its being\n for e in audio.podcast.episode_set.exclude(pk=audio.podcast.active_episode.pk):\n if not e.is_active():\n logger.info('Inactive audio found in podcast set. Erasing files.')\n e.delete_files()\n except Exception, e:\n logger.exception(e.message)\n except ObjectDoesNotExist, e:\n logger.info('A file with no audio registered in database')\n if os.path.isfile(relative_path):\n logger.info('Erasing: %s' % relative_path)\n os.remove(relative_path)\n logger.info('... Done.')", "def disconnectAllHdfDBs() -> None:\n from armi.bookkeeping.db import Database3\n\n h5dbs = [db for db in gc.get_objects() if isinstance(db, Database3)]\n for db in h5dbs:\n db.close()", "def tearDown(self):\n\n self.h5file.close()\n self.h5file = None\n Path(self.h5fname).unlink() # comment this for debug only\n super().tearDown()", "def close_file(self, data_set):\n if hasattr(data_set, '_h5_base_group'):\n data_set._h5_base_group.close()\n # Removes reference to closed file\n del data_set._h5_base_group\n else:\n logging.warning(\n 'Cannot close file, data_set has no open hdf5 file')", "def cleanup():\n cat = CSVCatalog.CSVCatalog()\n cat.drop_table(\"people\", force_drop=True)\n cat.drop_table(\"batting\", force_drop=True)\n cat.drop_table(\"teams\", force_drop=True)", "def clean(args):\n with_dataset(args, Dataset._clean)", "def delete_datasets(self, base_url):\n response = requests.get(base_url + '/testdata')\n for index in range(len(response.json()['testdata'])):\n self.delete_dataset(base_url, response.json()['testdata'][index]['dataset'])", "def cleanup_file(name: str):\n if os.path.exists(name) and os.path.isfile(name): # h5\n os.remove(name)\n elif os.path.exists(name) and os.path.isdir(name): # tf\n shutil.rmtree(name)", "def _delete_cache_metadata(self, force_delete_file):\n if force_delete_file:\n self._delete_dirs_datasets_in_cache_dir_except_downloads()\n else:\n msg = 'All metadata files of all datasets will be lost if you proceed! ' + \\\n 'Set both \\'force_delete_file=True\\' and \\'force_delete_metadata=True\\' ' + \\\n 'to proceed with the deletion of dbcollection.json and all metadata files.'\n warnings.warn(msg, UserWarning, stacklevel=2)", "def teardown(self):\n super(TestCisPandasInput, self).teardown()\n if os.path.isfile(self.tempfile):\n os.remove(self.tempfile)", "def cleanup():\n cat = CSVCatalog.CSVCatalog()\n cat.drop_table(\"people\")\n cat.drop_table(\"batting\")\n cat.drop_table(\"teams\")", "def tearDown(cls):\n\n # cls.test_mmp_series_object.clean_out_data_seriesobj()\n # reusable data struct\n cls.test_mmp_series_object.clean_out_data_seriesobj()\n cls.test_dataset_testresults.clear()\n # reusable results file\n # os.remove(cls.temp_file_output_series.name)", "def delete_dataset(self, dataset):\n raise NotImplementedError('delete_dataset')", "def teardown_module(module):\n for datafile in datafiles:\n os.remove(datafile)", "def clean_data_fragments(self) -> None:\n read_path: Path = Path(os.environ[\"DATA_PATH\"]) / \"fragments\"\n try:\n shutil.rmtree(read_path / \"__MACOSX\")\n except FileNotFoundError:\n print('Folder \"__MACOSX\" already removed.')\n\n # delete non-binarized images\n frag_paths: list = list((read_path / \"image-data\").iterdir())\n frags_binarized: list = [fp for fp in frag_paths if \"binarized\" in fp.name]\n frags_delete: set = set(frag_paths).difference(set(frags_binarized))\n for frag in frags_delete:\n frag.unlink()\n frag_paths = frags_binarized\n for frag_path in frag_paths:\n # Python 3.8 hack, seems to be supported without str() on 3.9\n shutil.move(str(frag_path.resolve()), str(read_path.resolve()))\n\n (read_path / \"image-data\").rmdir() # delete empty folder", "def delete(dtype, name, rootdir=None):\n # type and the name\n # delete them\n num_deleted = 0\n for dataset in FreezableAPI.datasets(dtype,name,rootdir=rootdir,fullpath=True):\n # delete it\n shutil.rmtree(dataset)\n num_deleted += 1\n return num_deleted", "def delete_all():\n if os.path.exists(DATA_DIR):\n shutil.rmtree(DATA_DIR)", "async def delete_raw_data():\n await expire_directories(\".rewrite\", REWRITE_DAYS)\n await expire_directories(\"undelete\", UNDELETE_DAYS)\n\n cutoff = datetime.now(timezone.utc) - timedelta(days=DATA_LAKE_DAYS)\n # wraparound to previous month, just in case\n last_month = cutoff - timedelta(days=cutoff.day + 1)\n for day in (\n last_month,\n cutoff,\n ):\n await expire_directories(\n storage.iothub_data_dir + day.strftime(\"/%Y/%m\"), DATA_LAKE_DAYS,\n )", "def clean_dataset(dataset, pollsters, output_name):\n # No banned pollsters\n no_banned = pollsters[pollsters['Banned by 538'] == 'no'].Pollster\n # Interviews with no banned pollster\n dataset = dataset[dataset.pollster.isin(no_banned)]\n # Non tracked interviews\n dataset = dataset[dataset.tracking == False]\n # Reset index\n dataset = dataset.reset_index(drop=True)\n # Save clean data\n dataset.to_csv(\"./data/\" + output_name, index=False)\n # Return clean data\n return dataset", "def test_cleanup():\n os.remove(test_file[:-4] + \"_no_grave.h5m\")", "def delete_data_file(path):\n cprint(f\"### Function Name:-> {inspect.stack()[0][3]} ###\", 'yellow', 'on_grey', attrs=['bold'])\n try:\n os.remove(path)\n except FileNotFoundError:\n pass", "def _truncate_heart_data(session):\n session.execute('''DELETE FROM Heart''')\n logger.info(\"truncating Heart table\")", "def delete_intermediate_csvs(wk_dir):\n # Remove intermediate csv tables\n out_files = os.listdir(wk_dir)\n delete_keys = [\"int_metrics\",\"region_dims\"]\n delete_list = [f for f in out_files if any(x in f for x in delete_keys)]\n for f in delete_list:\n os.remove(f)", "def purge():\n all_hashes = read_all()\n used_hashes = read_used()\n\n for kind, hashes in used_hashes.items():\n to_remove = all_hashes[kind].difference(hashes)\n if kind == 'evs':\n delete_from_directory_by_hashes(EV_DIRECTORY, to_remove)\n elif kind == 'cache':\n delete_from_directory_by_hashes(CACHE_DIRECTORY, to_remove)\n elif kind == 'seeds':\n delete_from_directory_by_hashes(SEED_DIRECTORY, to_remove)\n\n reset_used()", "def tearDownClass(cls):\n # cleanup and close HDF5 file\n super().tearDownClass()\n cls.f.cleanup()", "def prune_data(self, ts):\n sql = \"delete from %s where dateTime < %d\" % (self.dbm.table_name, ts)\n self.dbm.getSql(sql)\n try:\n # sqlite databases need some help to stay small\n self.dbm.getSql('vacuum')\n except Exception as e:\n pass", "def __del__(self):\r\n train_data_sources = list(self._train_data.values())\r\n test_data_sources = list(self._test_data.values())\r\n all_data_sources = train_data_sources + test_data_sources\r\n for data_source in all_data_sources:\r\n data_source.cleanup()\r\n self._tester.__del__()", "def remove_dataset_dbgap_link(apps, schema_editor):\n SourceDataset = apps.get_model('trait_browser', 'SourceDataset')\n for dataset in SourceDataset.objects.all():\n dataset.dbgap_link = ''\n dataset.save()", "def clear_db():\n from example_data import ExampleDataLoader\n ExampleDataLoader.clean_db()", "def remove_data():\n # Removing the existing data\n col_answer_given.remove()\n col_answer_not_given.remove()\n col_q_not_given.remove()\n col_to_summarize.remove()", "def delLocalData(self):\n try:\n if len(self.localFilename): os.remove(self.localFilename)\n except Exception as e:\n pass", "def delete_dataset_without_original_url():\n logging.warning(\n \"*** deleting all netex files created by transport.data.gouv.fr ***\"\n )\n r = requests.get(\"https://transport.data.gouv.fr/api/datasets\")\n r.raise_for_status()\n datasets = r.json()\n\n print_resource = lambda r: f\"\\n\\t*[url = {r['url']} | extras = {r.get('extras')}]\"\n print_resources = lambda rs: [print_resource(r) for r in rs]\n\n for d in datasets:\n dataset_name = d[\"title\"]\n if d[\"type\"] != \"public-transit\":\n continue\n\n dataset_id = d[\"id\"]\n\n community_resources = _find_community_resources(dataset_id)\n logging.info(\"community ressources : %s\", print_resources(community_resources))\n old_community_resources = [\n r\n for r in community_resources\n if \"transport:original_resource_url\" not in r.get(\"extras\", {})\n ]\n if old_community_resources:\n logging.info(\n \"old community ressources : %s\",\n print_resources(old_community_resources),\n )\n _delete_community_resources(dataset_id, old_community_resources)\n logging.info(\"deleted community resource for the dataset %s\", dataset_id)", "def delete_frame_data(self, count):\n\n for metric, array in self._array_dict.items():\n self._array_dict[metric] = np.delete(array, count)", "def clean_outputs(self) -> None:\n\n def _delete_if_not_none(fn: Optional[str]) -> None:\n if fn is not None:\n Path(fn).unlink()\n\n _delete_if_not_none(self.config[\"LOG_FILE\"])\n\n for file_ in self.exporter.get_all_files():\n file_.unlink()", "def temp_fix():\n import os\n from dateutil.parser import parse\n from gather_data import read_df_from_file, get_dataframe_pickle_files\n\n df_pick_files = get_dataframe_pickle_files(df_pickle_dir='/Users/ken/Downloads')\n for f in df_pick_files:\n t1 = parse_date_from_filename(f)\n ee_stats = read_df_from_file(f)\n fname = '/Users/ken/Downloads/ee_stats_' + t1.strftime('%Y-%m-%d') + '.pkl'\n print 'saving to %s' % fname,\n with open(fname, 'wb') as fh:\n pickle.dump(ee_stats, fh, protocol=pickle.HIGHEST_PROTOCOL)\n print 'done'", "def delete_data(self, name):\n assert name, \"Must input a valid dataset name.\"\n try:\n self.data[\"dataset\"].pop(name)\n self.update_categories()\n self.write_data_cache(self.data)\n except KeyError:\n raise KeyError(\"The dataset \\'{}\\' does not exist in the cache.\".format(name))", "def clear_loaded_shapefiles(self):\n self.shapes = pd.DataFrame()", "def close(self):\n\t\tif self.is_open:\n\t\t\tself.hdf5file.close()\n\t\t\tself.is_open = False", "def free_finalizer(self, dataset: dict):\n # for gc being late\n if dataset:\n if dataset['vrtx']:\n dataset['vrtx'].release()\n if dataset['indx']:\n dataset['indx'].release()\n dataset.clear()", "def selenium_teardown():\n families_to_delete, visits_to_delete, responses_to_delete = [], [], []\n\n families_to_delete.extend(Family.objects.filter(study_id_number=59638))\n families_to_delete.extend(Family.objects.filter(study_id_number=83695))\n for f in families_to_delete:\n visits_to_delete.extend(f.visit_set.all())\n for v in visits_to_delete:\n responses_to_delete.extend(v.response_set.all())\n\n for r in responses_to_delete:\n r.delete()\n for v in visits_to_delete:\n v.delete()\n for f in families_to_delete:\n f.delete()", "def cleanup_database():\n with sqlite3.connect(DB_STRING) as con:\n con.execute(\"DROP TABLE data\")", "def delFiles(self):\r\n \r\n logStr = \"{0:s}.{1:s}: \".format(self.__class__.__name__, sys._getframe().f_code.co_name)\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'Start.')) \r\n \r\n try: \r\n if os.path.exists(self.h5File): \r\n os.remove(self.h5File) \r\n logger.debug(\"{0:s} File {1:s} deleted.\".format(logStr,self.h5File)) \r\n except XmError:\r\n raise \r\n except Exception as e:\r\n logStrFinal=\"{:s}Exception: Line: {:d}: {!s:s}: {:s}\".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))\r\n logger.error(logStrFinal) \r\n raise XmError(logStrFinal) \r\n finally:\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'_Done.'))", "def clean(notebook, execute):\n nb = select_notebook(notebook)\n nb.clean_assets(delete=execute)", "def delete_plots():\n return Plot.delete_plots()", "def close(self):\n path_lst = self.h5_path.split(\"/\")\n last = self.history[-1].strip()\n if len(last) > 0:\n hist_lst = last.split(\"/\")\n self.h5_path = \"/\".join(path_lst[: -len(hist_lst)])\n if len(self.h5_path.strip()) == 0:\n self.h5_path = \"/\"\n del self.history[-1]", "def delete_ffmlp_data():\n import shutil\n ffmlp_dir = \"%s/data/fnc-1/mlp_models/temp_models\" % (\n path.dirname(path.dirname(path.abspath(__file__))))\n if (os.path.exists(ffmlp_dir)):\n for the_file in os.listdir(ffmlp_dir):\n file_path = os.path.join(ffmlp_dir, the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n except Exception as e:\n print(e)", "def tearDownClass(self):\n remove('temp_mol_file.csv')", "def clean_filelist(fnlist):\n cntClean = 0\n for fn in fnlist:\n try:\n with h5py.File(fn,\n 'r+') as handle: # ref: https://docs.h5py.org/en/stable/high/file.html?highlight=h5py.File#h5py.File\n if args.groupName in list(handle.keys()): # clean if found any group named 'Analyses'\n del handle[args.groupName]\n cntClean += 1\n except: ## avoid corrupted fast5 files\n pass\n return cntClean", "def dataset(bf):\n ds = bf.create_dataset(\"test_dataset_{}\".format(uuid4()))\n\n yield ds\n\n bf._api.datasets.delete(ds)", "def wipe_test_data(db='default'):\n import ikwen_shavida.movies.models\n import ikwen_shavida.reporting.models\n import ikwen_shavida.sales.models\n import ikwen_shavida.shavida.models\n import ikwen.partnership.models\n import ikwen.core.models\n for name in ('Category', 'Trailer', 'Movie', 'SeriesEpisode', 'Series'):\n model = getattr(ikwen_shavida.movies.models, name)\n model.objects.using(db).all().delete()\n for name in ('StreamLogEntry', 'HistoryEntry'):\n model = getattr(ikwen_shavida.reporting.models, name)\n model.objects.using(db).all().delete()\n for name in ('SalesConfig', 'VODBundle', 'RetailBundle', 'VODPrepayment', 'RetailPrepayment', 'ContentUpdate', 'UnitPrepayment'):\n model = getattr(ikwen_shavida.sales.models, name)\n model.objects.using(db).all().delete()\n for name in ('OperatorProfile', 'Customer'):\n model = getattr(ikwen_shavida.shavida.models, name)\n model.objects.using(db).all().delete()\n for name in ('Application', 'Service', 'Config', 'ConsoleEventType', 'ConsoleEvent', 'Country', ):\n model = getattr(ikwen.core.models, name)\n model.objects.using(db).all().delete()\n for name in ('PartnerProfile', 'ApplicationRetailConfig'):\n model = getattr(ikwen.partnership.models, name)\n model.objects.using(db).all().delete()", "def pytest_sessionfinish(session, exitstatus):\n\n # dat files are created when using attachements\n print(\"\\n-------------------------\\nClean dpytest_*.dat files\")\n fileList = glob.glob('./dpytest_*.dat')\n for filePath in fileList:\n try:\n os.remove(filePath)\n except Exception:\n print(\"Error while deleting file : \", filePath)", "def shutdown(self):\n del self.model\n del self.train_dataset\n del self.test_dataset", "def __delitem__(self, key):\n if self.file_exists:\n try:\n with open_hdf5(self.file_name, mode=\"a\") as store:\n del store[self._get_h5_path(key)]\n except (AttributeError, KeyError):\n pass", "def UninstallPandasTools():\n global _originalSettings\n Chem.Mol.__ge__ = _originalSettings['Chem.Mol.__ge__']\n Chem.Mol.__str__ = _originalSettings['Chem.Mol.__str__']", "def data_clean_up(dict_of_dfs, outdir):\n # rvs_to_be_shared = id_mapping['SUBJECT_NUMBER'].tolist()\n for key, df in dict_of_dfs.items():\n for ind, subjnum in df['participant_id'].items():\n if subjnum in rvs_to_be_shared:\n onid = openneuro_id_lookup(subjnum)\n if onid:\n dict_of_dfs[key].at[ind, 'participant_id'] = '-'.join(['sub', onid])\n else:\n dict_of_dfs[key] = dict_of_dfs[key].drop(index=ind, axis=0)\n dict_of_dfs[key] = reorder_cols(dict_of_dfs[key])\n dict_of_dfs[key] = remove_blank_rows(dict_of_dfs[key])\n dict_of_dfs = remove_nans(dict_of_dfs)\n return dict_of_dfs", "def save_dataset(fname, dname, data):\n with h5py.File(fname, 'w') as w:\n try:\n dset = w.create_dataset(dname, (data.shape[0], data.shape[1]))\n dset[:] = data\n except:\n pass\n\n w.flush()", "def cleanup(self):\n\n # uninstall sourcedata\n if self.conversion.install_dataset_path.exists():\n # without the ChangeWorkingDir the command does not operate inside\n # of dataset_path\n with utils.ChangeWorkingDir(self.dataset_path):\n datalad.uninstall(\n path=self.conversion.install_dataset_name,\n dataset=self.dataset_path,\n recursive=True\n )\n\n # remove bids conversion\n bids_dir = self._get_bids_dir()\n if bids_dir.exists():\n self.log.info(\"Remove %s\", bids_dir)\n shutil.rmtree(bids_dir)", "def tearDown(self):\n\n for fname in self.fnames:\n FileSystem.unlink(fname)", "def clean_data():\n for clean_file in clean_files:\n file_list = [f for f in os.listdir(\".\") if f.endswith(clean_file)]\n for f in file_list:\n os.remove(f)", "def _clear_disk_cache(delete_tarball=False):\n\n base_dir = get_cachedir()\n\n if base_dir is None:\n print('No cache dir found, not deleting anything.')\n return\n\n data_dir = base_dir / 'data'\n pickle_files = data_dir.glob('*.pickle')\n\n for pickle_file in pickle_files:\n try:\n pickle_file.unlink()\n except OSError:\n print('Could not remove file \"{}\"'.format(pickle_file))\n\n if delete_tarball:\n tarball_path = base_dir / 'data.tar.bz2'\n try:\n tarball_path.unlink()\n except:\n print('Could not remove file \"{}\"'.format(tarball_path))", "def test_remove_existing_images(self):\n disk.merge_datasets(self.input_datasets,\n self.output_dataset, remove_existing_images=True)\n self.assertEqual(5, len(self.output_dataset.metadata()))\n\n overwritten_image_metadata = self.output_dataset.image_metadata(\n \"loc1\", \"src0\")\n self.assertEqual(\n {\"dataset\": 2}, overwritten_image_metadata[\"metadata\"])", "def clean_master():", "def clear_data():\n logger.info(\"Delete Structure instances\")\n Structure.objects.all().delete()\n logger.info(\"Delete StructureType instances\")\n StructureType.objects.all().delete()\n logger.info(\"Delete Industry instances\")\n Industry.objects.all().delete()\n logger.info(\"Delete Price instances\")\n PriceList.objects.all().delete()\n logger.info(\"Delete Stock instances\")\n Stock.objects.all().delete()\n logger.info(\"Delete News instances\")\n News.objects.all().delete()\n logger.info(\"Delete NewsImages instances\")\n NewsImage.objects.all().delete()\n logger.info(\"Delete News Sections instances\")\n NewsCategorySection.objects.all().delete()\n logger.info(\"Delete Analysis instances\")\n AnalysisOpinion.objects.all().delete()\n logger.info(\"Delete Analysis Images instances\")\n AnalysisImage.objects.all().delete()\n logger.info(\"Delete Analysis Sections instances\")\n AnalysisCategorySection.objects.all().delete()", "def removeRtree(self):\n try:\n os.remove(str(self.dim)+'d_index.data')\n os.remove(str(self.dim)+'d_index.index')\n print('Files removed')\n except:\n print('No such files')", "def purge_history(sc, table, history_table, keep_latest_n):\n\n if sys.platform != \"darwin\":\n # remove the corresponding s3 location - safety check that the location is a run_id location in particular buckets.\n # wants to make sure we are deleting s3 path with expected pattern.\n # Expected S3 path is in the format of - {some s3 bucket}/{some folder}/{**optional subfolders**}/{job name folder}/{folder with run id}/*.{file extension}\n\n path_regex=re.compile('s3://MyCompany[.a-z_-]*/[.a-z_-]*(/[.a-z_-]*)?/[a-z-_]*/run_id=\\d{8}_\\d{4}')\n path_regex_group = re.compile(r'^s3://(?P<bucket>.*?)/(?P<key>.*)')\n\n client = boto3.client('s3')\n s3 = boto3.resource('s3')\n s3_rm_path = []\n keys_in_parent = []\n keys_to_purge = []\n\n if history_table is not None:\n partitions = sc.sql(\"show partitions {hist_table_name}\".format(hist_table_name=history_table)).collect()\n\n # modifying this code as higher version of hive has key as 'partition', instead of 'result' . Hive 2.3.3-amzn-2\n # partitions = [_i.result for _i in partitions]\n\n partitions = [_i.asDict().values()[0] for _i in partitions]\n partitions.sort(reverse=True)\n\n if len(partitions) > keep_latest_n:\n partitions_to_purge = partitions[keep_latest_n:]\n\n for _i in range(len(partitions_to_purge)):\n partition_val = partitions_to_purge[_i].split('=')[1]\n df = sc.sql(\"describe formatted {hist_table_name} partition (run_id='{partition_val}')\".format(hist_table_name=history_table, partition_val=partition_val))\n\n s3_rm_path.append(df.where(df.col_name.startswith('Location')).select('data_type').collect()[0]['data_type'])\n\n # drop this partition from the table\n sc.sql(\"alter table {hist_table_name} drop if exists partition (run_id='{partition_val}')\".format(hist_table_name=history_table, partition_val=partition_val))\n\n else:\n # delete old s3 run_ids which will be there in the parent folder\n df = sc.sql(\"describe formatted {table_name}\".format(table_name=table))\n location = df.where(df.col_name.startswith('Location')).select('data_type').collect()[0]['data_type']\n m = re.match(path_regex_group, location).groupdict()\n bucket_name = m['bucket']\n parent_key = m['key'].split(\"=\")[0].replace(\"run_id\", \"\")\n response = client.list_objects_v2(Bucket=bucket_name, Prefix=parent_key, Delimiter=\"/\")\n list_of_keys = [i['Prefix'] for i in response['CommonPrefixes']]\n\n for i in list_of_keys:\n keys_in_parent.append(i.split(\"run_id=\")[1].replace(\"/\", \"\"))\n\n keys_in_parent.sort(reverse=True)\n\n if len(keys_in_parent) > keep_latest_n:\n keys_to_purge = keys_in_parent[keep_latest_n:]\n\n for _i in keys_to_purge:\n s3_rm_path.append(os.path.join(\"s3://\", bucket_name, parent_key, \"run_id=\"+_i))\n\n # remove the paths from s3\n for _i in s3_rm_path:\n if re.match(path_regex, _i):\n m = re.match(path_regex_group, _i).groupdict()\n bucket = s3.Bucket(m['bucket'])\n for obj in bucket.objects.filter(Prefix=m['key']):\n s3.Object(bucket.name, obj.key).delete()", "def remove_DateIndex(dataset):\n dataset.to_csv('normalized_Dataset.csv', mode='w', header=True)\n dataset = pd.read_csv(\"normalized_Dataset.csv\")\n dataset = dataset.drop('date', axis=1)\n return dataset", "def clean():\n try:\n os.unlink(options.coords + 'mirza_mrna_input' + '.fa')\n os.unlink(options.coords + 'mirza_mirna_input' + '.fa')\n os.unlink(options.coords + 'mirza_mirna_expressions' + '.fa')\n except:\n pass", "def remove_data(data=None): #clear\n data = get_data(data)\n shutil.rmtree(data)", "def remove_stale_files(self) -> None:\n\n for db in self.dbnodes:\n db.remove_stale_dbnode_files()", "def clean_cache(dataset_id=None, # type: str\n # format='csv', # type: str\n platform_id=None, # type: str\n base_url=None, # type: str\n cache_root=None # type: Union[str, Path]\n ):\n if dataset_id is not None:\n # clean a specific dataset on a specific platform\n path_pattern = get_cached_dataset_entry(dataset_id, format=\"*\", platform_id=platform_id, base_url=base_url,\n cache_root=cache_root)\n for cached_file in glob(str(path_pattern.file_path)):\n print(\"[odsclient] Removing cached dataset entry for %r: %r\" % (dataset_id, cached_file))\n os.remove(cached_file)\n else:\n if cache_root is None:\n cache_root = CACHE_ROOT_FOLDER\n else:\n cache_root = str(cache_root)\n\n if platform_id is not None:\n p = platform_id\n elif base_url is not None:\n p = baseurl_to_id_str(base_url)\n else:\n p = None\n\n if p is None:\n # clean the whole cache\n print(\"[odsclient] Removing entire cache folder %r\" % cache_root)\n rmtree(cache_root, ignore_errors=True)\n else:\n # clean an entire platform cache\n path_to_delete = \"%s/%s/\" % (cache_root, p)\n print(\"[odsclient] Removing cache for platform %r: folder %r\" % (p, path_to_delete))\n rmtree(path_to_delete, ignore_errors=True)", "def delEvery():\n delMain()\n delFile()\n delPuls()\n delSat()\n delFreq()\n delTemp()\n delGly()\n delDlr()\n label['text'] = \"All json files have been deleted !\"", "def create_cleaned_dataset(PDBbind_dataset_path, general_set_PDBs_path, refined_set_PDBs_path, output_name, plot = False):\n \n # load dataset\n data = pd.read_csv(PDBbind_dataset_path)\n \n # check for NaNs in affinity data\n if data['-log(Kd/Ki)'].isnull().any() != False:\n print('There are NaNs present in affinity data!')\n \n # create list of PDB id's\n pdbid_list = list(data['PDB ID'])\n \n # remove affinity values that do not have structural data by searching PDBs\n missing = []\n for i in range(len(pdbid_list)):\n pdb = pdbid_list[i]\n if os.path.isdir(str(general_set_PDBs_path) + pdb)==False and os.path.isdir(str(refined_set_PDBs_path) + pdb)==False:\n missing.append(pdb)\n data = data[~np.in1d(data['PDB ID'], list(missing))]\n\n # distinguish PDB id's in general and refined sets\n general_dict = {}\n refined_dict = {}\n for i in range(len(pdbid_list)):\n pdb = pdbid_list[i]\n if os.path.isdir(str(general_set_PDBs_path) + pdb)==True:\n general_dict[pdb] = 'general'\n if os.path.isdir(str(refined_set_PDBs_path) + pdb)==True:\n refined_dict[pdb] = 'refined'\n \n # add 'set' column to data and fill with 'general' or 'refined'\n data['set'] = np.nan\n data.loc[np.in1d(data['PDB ID'], list(general_dict)), 'set'] = 'general'\n data.loc[np.in1d(data['PDB ID'], list(refined_dict)), 'set'] = 'refined'\n \n # write out csv of cleaned dataset\n data[['PDB ID', '-log(Kd/Ki)', 'set']].to_csv(output_name, index=False)\n \n # read in and view the cleaned dataset\n display(pd.read_csv(output_name))\n \n if plot == True:\n # plot affinity distributions for general and refined sets\n grid = sns.FacetGrid(data, row='set', row_order=['general', 'refined'],\n size=3, aspect=2)\n grid.map(sns.distplot, '-log(Kd/Ki)')\n else:\n return", "def test_34_save_ds(self, tempfile_h5):\n example = Example(groups=7, origins=5, )\n example.save_dataset_to_netcdf(tempfile_h5)", "def clean(self):\n\t\tself.archiver.closeFile()", "def delete_dataset(self, identifier):\n # Delete the dataset directory if it exists. Otherwise return False\n dataset_dir = self.get_dataset_dir(identifier)\n if not os.path.isdir(dataset_dir):\n return False\n shutil.rmtree(dataset_dir)\n return True", "def clean(dataset_path: str) -> str:\n def _remove_unused(text: str):\n clean_data = text.lower().strip()\n clean_data = re.sub(\n r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+',\n \" \", clean_data)\n clean_data = re.sub(r\"<.*>\", \"\", clean_data)\n clean_data = re.sub(r\"@[a-zA-Z0-9_]+\", \"\", clean_data)\n clean_data = clean_data.replace(\"\\n\", \"\")\\\n .replace(\"#\", \"\")\n return clean_data\n\n dtypes = {\n \"id\": int,\n \"keyword\": str,\n \"location\": str,\n \"text\": str\n }\n\n if \"train\" in dataset_path:\n dtypes[\"target\"] = int\n\n new_path = _make_new_filepath(dataset_path, \"clean\")\n df = pd.read_csv(f\"/data/{dataset_path}\", index_col=\"id\", dtype=dtypes)\n df[\"text\"] = df[\"text\"].apply(_remove_unused)\n df.to_csv(f\"/data/{new_path}\")\n return new_path", "def close_raster_file(self):\n try:\n if self.dataset:\n del self.dataset\n self.dataset = None\n except AttributeError:\n pass", "def wipe(self):", "def wipe(self):", "def make_dataframes(folders, file_stem):\n\n print \"Making one big dataframe...\"\n df_orig = load_df(folders, file_stem, n_files=500)\n # df_orig = load_df(folders, \"output\")\n # df_orig = load_df(folders, \"output_ma1Lt11\")\n # df_orig = load_df(folders, \"output_good\")\n\n print len(df_orig.index), 'entries in dataframe'\n\n # Drop columns to save space\n drop_cols = [\n 'h1u', 'h1d', 'h1b', 'h1V', 'h1G', 'h1A',\n 'h2u', 'h2d', 'h2b', 'h2V', 'h2G', 'h2A',\n 'Brh3gg', 'Brh3tautau', 'Brh3bb', 'Brh3ww',\n 'Brh3zz', 'Brh3gammagamma', 'Brh3zgamma',\n 'Brh3h1h1', 'Brh3h2h2', 'Brh3h1h2',\n 'Brh3a1a1', 'Brh3a1z',\n # 'bsgamma', 'bsmumu', 'btaunu', 'delms', 'delmd']\n ]\n\n for col in drop_cols:\n if col in df_orig.columns.values:\n df_orig.drop(col, inplace=True, axis=1)\n print \"After dropping columns:\", df_orig.columns.values, len(df_orig.columns.values), \"columns\"\n\n # Remove any duplicate entries\n df_orig.drop_duplicates(inplace=True)\n\n # Load up the glu-glu cross sections for 13 TeV\n print \"Adding in cross-sections...\"\n # cs = pd.read_csv(\"parton_lumi_ratio.csv\")\n cs = pd.read_csv(\"YR3_cross_sections.csv\")\n masses = cs[\"MH [GeV]\"]\n mass_len = len(masses)\n xsec_ggf13 = cs[\"ggF 13TeV Cross Section [pb]\"]\n xsec_vbf13 = cs[\"VBF 13TeV Cross Section [pb]\"]\n # xsec_wh13 = cs[\"WH 13TeV Cross Section [pb]\"]\n # xsec_zh13 = cs[\"ZH 13TeV Cross Section [pb]\"]\n xsec_ggf8 = cs[\"ggF 8TeV Cross Section [pb]\"]\n xsec_vbf8 = cs[\"VBF 8TeV Cross Section [pb]\"]\n\n def find_closest_mass_ind(mass):\n pos = bisect_left(masses, mass)\n if pos == mass_len:\n return mass_len - 1\n return pos\n\n print 'Storing nearest-mass indices'\n df_orig['mass_ind_h1'] = df_orig.apply(lambda row: find_closest_mass_ind(row['mh1']), axis=1)\n df_orig['mass_ind_h2'] = df_orig.apply(lambda row: find_closest_mass_ind(row['mh2']), axis=1)\n df_orig['mass_ind_h3'] = df_orig.apply(lambda row: find_closest_mass_ind(row['mh3']), axis=1)\n\n # ALL XSEC STORED ARE CORRECTLY SCALED BY REDUCED COUPLING\n print \"Storing 13 TeV gg xsec\"\n df_orig[\"xsec_ggf13_h1\"] = df_orig['h1ggrc2'] * xsec_ggf13[df_orig['mass_ind_h1']].values\n df_orig[\"xsec_ggf13_h2\"] = df_orig['h2ggrc2'] * xsec_ggf13[df_orig['mass_ind_h2']].values\n df_orig[\"xsec_ggf13_h3\"] = df_orig['h3ggrc2'] * xsec_ggf13[df_orig['mass_ind_h3']].values\n\n print \"Storing 13 TeV vbf xsec\"\n df_orig[\"xsec_vbf13_h1\"] = df_orig['h1vvrc2'] * xsec_vbf13[df_orig['mass_ind_h1']].values\n df_orig[\"xsec_vbf13_h2\"] = df_orig['h2vvrc2'] * xsec_vbf13[df_orig['mass_ind_h2']].values\n df_orig[\"xsec_vbf13_h3\"] = df_orig['h3vvrc2'] * xsec_vbf13[df_orig['mass_ind_h3']].values\n\n print \"Storing 8 TeV ggf xsec\"\n df_orig[\"xsec_ggf8_h1\"] = df_orig['h1ggrc2'] * xsec_ggf8[df_orig['mass_ind_h1']].values\n df_orig[\"xsec_ggf8_h2\"] = df_orig['h2ggrc2'] * xsec_ggf8[df_orig['mass_ind_h2']].values\n df_orig[\"xsec_ggf8_h3\"] = df_orig['h3ggrc2'] * xsec_ggf8[df_orig['mass_ind_h3']].values\n\n print \"Storing 8 TeV vbf xsec\"\n df_orig[\"xsec_vbf8_h1\"] = df_orig['h1vvrc2'] * xsec_vbf8[df_orig['mass_ind_h1']].values\n df_orig[\"xsec_vbf8_h2\"] = df_orig['h2vvrc2'] * xsec_vbf8[df_orig['mass_ind_h2']].values\n df_orig[\"xsec_vbf8_h3\"] = df_orig['h3vvrc2'] * xsec_vbf8[df_orig['mass_ind_h3']].values\n\n # Now add in individual channel xsec\n store_channel_xsec(df_orig)\n print df_orig.columns.values\n\n # Make some subsets here:\n print \"Making subsets...\"\n\n # Points passing all experimental constraints chosen\n df_pass_all = subset_pass_constraints(df_orig)\n # df_pass_all = None\n\n # subset with 2m_tau < ma1 < 10\n df_ma1Lt10 = None\n # df_ma1Lt10 = subset_var(df_pass_all, 3.554, 10.5, \"ma1\")\n\n mhmin, mhmax = 122.1, 128.1\n # subset with h1 as h_125\n # df_h1SM = subset_var(df_pass_all, mhmin, mhmax, \"mh1\")\n df_h1SM = None\n\n # subset with h2 as h_125\n # df_h2SM = subset_var(df_pass_all, mhmin, mhmax, \"mh2\")\n df_h2SM = None\n\n n_orig = len(df_orig.index)\n\n def percent_str(numerator, denominator):\n return \"%.3f %% \" % (100*numerator/float(denominator))\n\n print \"Running over\", n_orig, \"points\"\n if isinstance(df_pass_all, pd.DataFrame):\n n_pass_all = len(df_pass_all.index)\n print n_pass_all, \"points passing all constraints (= %s)\" % percent_str(n_pass_all, n_orig)\n # print len(df_ma1Lt10.index), \"of these have 2m_tau < ma1 < 10 GeV (= %s)\" % percent_str(len(df_ma1Lt10.index), n_pass_all)\n # print len(df_h1SM.index), \"points in the h1 = h(125) subset (= %s)\" % percent_str(len(df_h1SM.index), n_pass_all)\n # print len(df_h2SM.index), \"points in the h2 = h(125) subset (= %s)\" % percent_str(len(df_h2SM.index), n_pass_all)\n print \"\"\n\n return df_orig, df_pass_all, df_ma1Lt10, df_h1SM, df_h2SM", "def clean():\n clean_files()", "def delete_classifier_data(exp_id: str, job_id: str) -> None:\n filepath = '%s-classifier-data.pb.xz' % (job_id)\n fs = GcsFileSystem(feconf.ENTITY_TYPE_EXPLORATION, exp_id)\n if fs.isfile(filepath):\n fs.delete(filepath)", "def clear_data():\n dir_list = [\"generated/*\", \"pub/static/*\", \"var/cache/*\", \"var/page_cache/*\", \"var/view_preprocessed/*\", \"var/tmp/*\"]\n\n for item in dir_list:\n print(\"[ - ] Removing\", item, \"\\n\")\n subprocess.run([\"rm\", \"-rf\", item])" ]
[ "0.65211636", "0.65018326", "0.6463773", "0.6445838", "0.6362268", "0.63500494", "0.6349943", "0.63297856", "0.6250076", "0.6238", "0.62326133", "0.6211138", "0.61991334", "0.61900306", "0.61059207", "0.60499495", "0.6021087", "0.6016084", "0.5999302", "0.5968676", "0.5964416", "0.59298074", "0.58888656", "0.5877281", "0.58721775", "0.5865451", "0.5863317", "0.58229905", "0.58229697", "0.58197284", "0.58082336", "0.5801774", "0.5778328", "0.5761612", "0.5752276", "0.5751541", "0.5736541", "0.57195604", "0.57133794", "0.5706852", "0.56485486", "0.5640136", "0.56326544", "0.56177", "0.5617681", "0.56167805", "0.5616129", "0.5613392", "0.5605788", "0.5604268", "0.56041306", "0.5600489", "0.55953044", "0.559373", "0.5592756", "0.55898046", "0.55819905", "0.55782634", "0.55763155", "0.55712503", "0.55596286", "0.55595624", "0.55586743", "0.55508244", "0.5548324", "0.55341065", "0.55292153", "0.551601", "0.5515662", "0.5508604", "0.54923755", "0.54912597", "0.54907155", "0.54799944", "0.54721034", "0.54646415", "0.54618204", "0.5461009", "0.54585034", "0.5457738", "0.5454089", "0.54500175", "0.5447695", "0.5438294", "0.5434918", "0.5430643", "0.54228234", "0.54226905", "0.54181373", "0.541778", "0.5417367", "0.54107827", "0.5408594", "0.54055756", "0.540223", "0.540223", "0.53994703", "0.5395217", "0.5390551", "0.5387356" ]
0.57039887
40
Fixture for client object passed as an argument to the test functions
def client(): client = Client() return client
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_for_client():", "def test_create_client(self):\n pass", "def test_client_create(self):\n pass", "def setUp(self):\n self.client = DummyClient()", "def test_client_retrieve(self):\n pass", "def setUp(self):\n self.client = Client()", "def setUp(self):\n self.client = Client()", "def setUp(self):\n self.client = Client()", "def setUp(self):\n self.client = Client()", "def test_get_client(self):\n pass", "def setUp(self):\n self.client = api.Client(config.get_config(), api.json_handler)", "def testclient():\n base_url = PARAMS.get(\"url\") + \"/v2\"\n client = Client(\n base_url=base_url,\n headers={\n \"Authorization\": f\"GenieKey {PARAMS.get('token')}\",\n }\n )\n return client", "def test_client():\n testing_client = app.app.test_client()\n testing_client.testing = True\n\n yield testing_client", "def setUp(self):\n self.c = Client(host=\"localhost\")", "def test_client_list(self):\n pass", "def client(test_app):\n yield test_app.test_client()", "def setUpFixture(self):\n pass", "def __init__(self, client, use_stubs=True):\n super().__init__(client, use_stubs)", "def __init__(self, client, use_stubs=True):\n super().__init__(client, use_stubs)", "def __init__(self, client, use_stubs=True):\n super().__init__(client, use_stubs)", "def __init__(self, client, use_stubs=True):\n super().__init__(client, use_stubs)", "def __init__(self, client, use_stubs=True):\n super().__init__(client, use_stubs)", "def test_update_client(self):\n pass", "def test_test_client_model(self):\n pass", "def client(app):\n yield app.test_client()", "def client(app):\n yield app.test_client()", "def test_query_client_instantiated():\n client = ConfigureClients()\n assert client.query_client", "def setUp(self):\r\n super(SSLClientTest, self).setUp()\r\n self.client = Client()\r\n self.factory = RequestFactory()\r\n self.mock = Mock()", "def test_delete_client(self):\n pass", "def test_get_clients_succeeds_with_valid_client_id_in_params(\n valid_client_model, client, request_headers\n):\n res = client.get(clients_url(1), headers=request_headers)\n response = res.get_json()\n\n assert response['success']\n assert response['message'] == 'client fetched successfully'\n assert response['data']['username'] == 'Leroy Jenkins'\n assert res.status_code == 200", "def setUp(self):\n super().setUp()\n self.client = APIClient()", "def client():\n yield tests.example_server.app.test_client()", "def test_client_verification_create(self):\n pass", "def setUp(self):\n self.client = APIClient()", "def setUp(self):\n self.client = APIClient()", "def setUp(self):\n server = TestServer()\n servers = OrderedDict([(\"default\", server)])\n self.client = TurboTestClient(servers=servers)\n\n self.ref_a = RecipeReference.loads(\"liba/1.0@conan/stable\")\n self.client.create(self.ref_a, conanfile=GenConanfile())\n\n self.ref_b = RecipeReference.loads(\"libb/1.0@conan/stable\")\n self.client.create(self.ref_b, conanfile=GenConanfile().with_requirement(self.ref_a))\n\n self.ref_c = RecipeReference.loads(\"libc/1.0@conan/stable\")\n self.client.create(self.ref_c, conanfile=GenConanfile().with_requirement(self.ref_a))\n\n self.ref_d = RecipeReference.loads(\"libd/1.0@conan/stable\")\n self.client.create(self.ref_d, conanfile=GenConanfile().with_requirement(self.ref_b))\n\n self.ref_e = RecipeReference.loads(\"libe/1.0@conan/stable\")\n self.client.create(self.ref_e, conanfile=GenConanfile().with_requirement(self.ref_d))\n\n self.ref_f = RecipeReference.loads(\"libf/1.0@conan/stable\")\n conanfile = GenConanfile().with_requirement(self.ref_c).with_requirement(self.ref_d)\n self.client.create(self.ref_f, conanfile=conanfile)", "def client():\n from youtube_podcast_api.main import app\n from fastapi.testclient import TestClient\n yield TestClient(app)", "def setUp(self):\n\n self.client = get_client()\n self.fake = Faker()\n self.sim = Simulate()\n\n self.generate_authorizations(10)", "def test_client_create(self, mock_input, mock_pass):\n # Patch username and password.\n mock_input.return_value = \"user\"\n mock_pass.return_value = \"pass\"\n\n # Instantiate Agave object making reference to local mock server.\n local_uri = \"http://localhost:{port}/\".format(port=self.mock_server_port)\n ag = Agave(api_server=local_uri)\n\n # Create client.\n ag.clients_create(\"client-name\", \"some description\")\n\n assert ag.api_key == \"some api key\"\n assert ag.api_secret == \"some secret\"", "def test_auth_client_instantiated():\n client = ConfigureClients()\n assert client.auth_client", "def set_up_client():\n #creating new flask app and a test client\n app = create_app('test')\n client = app.test_client()\n\n #creating the application context and\n #allowing test functions to run by calling test client\n #and finally cleaning house\n ctx = app.app_context()\n ctx.push()\n yield client\n ctx.pop()", "def __init__(self, client):\n self.client = client", "def __init__(self, client):\n\n self.client = client", "def setUp(self):\n\n self.client = APIClient()", "def setUp(self):\n\n self.client = APIClient()", "def setUp(self):\n\n self.client = APIClient()", "def test_client_update(self):\n pass", "def client():\n return app.test_client()", "def test_list_clients(self):\n pass", "def test_client(self, client: Mock) -> None:\n database_connection()\n client.MongoClient.assert_called_once()", "def setUp(self):\n rand = ''.join(\n [random\n .choice(string.ascii_letters + string.digits) for n in range(16)])\n self.secret_key = 'sk_test_16c58271c29a007970de0353d8a47868df727cd0'\n self.random_ref = util.utf8(rand)\n self.test_email = '[email protected]'\n self.test_amount = 5000\n self.plan = 'Basic'\n self.client = TransactionResource(self.secret_key, self.random_ref)\n # self.client.initialize(util.utf8(self.test_amount),\n # util.utf8(self.test_email),\n # util.utf8(self.plan))", "def fixtures():", "def setUp(self):\n self.app = app\n self.client = self.app.test_client", "def setUp(self):\n self.client = Client()\n self.token = \"\"\n self.auth_headers = \"\"\n # mockup's\n self.customer_data = {\n \"name\": \"Create Test Case\",\n \"email\": \"[email protected]\",\n \"phone\": \"11967675454\" \n }\n self.product_data = {\n \"name\": \"Tooth paste\", \n \"description\": \"Protects the enamel\", \n \"image_link\": \"https://en.wikipedia.org/wiki/Toothpaste#/media/File:Toothbrush,_Toothpaste,_Dental_Care_(571741)_(cropped).jpg\", \n \"price\": .99\n }\n self.invoice_data = {\n \"customer_id\": 0,\n \"total_value\": 0.0,\n \"total_quantity\": 0.0,\n \"total_discount\": 0.0\n }\n self.invoice_item_data = {\n \"invoice_id\": 0,\n \"product_id\": 0,\n \"quantity\": 10,\n \"quote_price\": 9.99,\n \"discount_value\": 0.0\n }\n self.shoppingcart_data = {\n \"customer_id\": 0,\n \"product_id\": 0,\n \"quantity\": 0,\n \"discount_value\": 0.0,\n \"is_closed\": False\n }\n self.auth_user = {\n \"first_name\": \"Renato\",\n \"last_name\": \"Aloi\",\n \"username\": \"renato.aloi\",\n \"password\": \"123456\",\n \"email\": \"[email protected]\"\n }\n\n # trying to get a token\n try:\n # create user\n user = User.objects.create_user(\n self.auth_user[\"username\"],\n self.auth_user[\"email\"],\n self.auth_user[\"password\"]\n )\n user.first_name = self.auth_user[\"first_name\"]\n user.last_name = self.auth_user[\"last_name\"]\n user.save()\n response = self.client.post(\"/auth\", self.auth_user)\n if response.status_code == 200:\n self.token = response.data[\"token\"] if \"token\" in response.data else \"\"\n self.auth_headers = { \"HTTP_AUTHORIZATION\": \"Token {}\".format(self.token)}\n except Exception as e:\n print(str(e))", "def setUp(self):\r\n super(CLITestAuthKeystoneWithId, self).setUp()\r\n self.client = client.HTTPClient(user_id=USER_ID,\r\n tenant_id=TENANT_ID,\r\n password=PASSWORD,\r\n auth_url=AUTH_URL,\r\n region_name=REGION)", "def client():\n from csuibot import app\n app.config['TESTING'] = True\n return app.test_client()", "def client():\n\n gcomics_scrape.APP.config['TESTING'] = True\n test_client = gcomics_scrape.APP.test_client()\n\n yield test_client", "def client(app):\n\n return app.test_client()", "def setUp(self):\n super(BaseTest, self).setUp()\n\n api_client_config = disk_manager_exercise_client.Configuration()\n api_client_config.host = self.ENDPOINT\n\n self.client = disk_manager_exercise_client.api.Disk-manager-exerciseApi(\n api_client=disk_manager_exercise_client.ApiClient(\n configuration=api_client_config\n )\n )", "def client(app):\n return app.test_client()", "def client(app):\n return app.test_client()", "def client(app):\n return app.test_client()", "def client(app):\n return app.test_client()", "def client(app):\n return app.test_client()", "def client(app):\n return app.test_client()", "def client(app):\n return app.test_client()", "def client(app):\n return app.test_client()", "def client(app):\n return app.test_client()", "def test_client_verification_retrieve(self):\n pass", "def setUp(self):\r\n super(CLITestAuthKeystoneWithIdandName, self).setUp()\r\n self.client = client.HTTPClient(username=USERNAME,\r\n user_id=USER_ID,\r\n tenant_id=TENANT_ID,\r\n tenant_name=TENANT_NAME,\r\n password=PASSWORD,\r\n auth_url=AUTH_URL,\r\n region_name=REGION)", "def client():\n flask_app = create_app(\"testing\")\n test_client = flask_app.test_client()\n\n # create app context\n ctx = flask_app.app_context()\n ctx.push()\n\n yield test_client\n \"\"\"\n TESTS RUN HERE\n \"\"\"\n\n ctx.pop()", "def mock_client_fixture():\n with mock.patch(f\"{PROMETHEUS_PATH}.prometheus_client\") as client:\n counter_client = mock.MagicMock()\n client.Counter = mock.MagicMock(return_value=counter_client)\n setattr(counter_client, \"labels\", mock.MagicMock(return_value=mock.MagicMock()))\n yield counter_client", "def mock_client_fixture():\n with mock.patch(f\"{PROMETHEUS_PATH}.prometheus_client\") as client:\n counter_client = mock.MagicMock()\n client.Counter = mock.MagicMock(return_value=counter_client)\n setattr(counter_client, \"labels\", mock.MagicMock(return_value=mock.MagicMock()))\n yield counter_client", "def __init__(self, client):\n self._client = client", "def setup_method(self) -> None:\n self.client = Mock()", "def test_client(client):\n\n response = client.get('/hello')\n assert response.data == b'Hello World'\n assert response.status == '200 OK'\n assert db_wrapper.database.is_closed()", "def test_init(self, tmpdir):\n \n # Test #1: anonymous user\n client = RestClient(host=self.host, username='')\n assert client.host == self.host\n assert client.username is None\n assert client.cert is None\n assert client.verify is True\n\n # Test #2: authenticated user\n client = RestClient(host=self.host, username='user', password='pswd',\n verify=False)\n assert client.host == self.host\n assert client.username == 'user'\n assert client.cert is None\n assert client.verify is False\n\n # Test #3 missing cert file\n with raises(ValueError):\n client = RestClient(host=self.host, username='user', password='pswd',\n cert='certfile.pem')\n \n # Test #4 existing cert file\n cert = Path(tmpdir, 'certfile.pem')\n with open(cert, 'w') as f:\n f.write('This is certifiable!')\n client = RestClient(host=self.host, username='user', password='pswd',\n cert=cert)\n assert client.host == self.host\n assert client.username == 'user'\n assert client.cert == cert\n assert client.verify is True", "def setUp(self):\r\n super(CLITestAuthKeystone, self).setUp()\r\n self.mox = mox.Mox()\r\n self.client = client.HTTPClient(username=USERNAME,\r\n tenant_name=TENANT_NAME,\r\n password=PASSWORD,\r\n auth_url=AUTH_URL,\r\n region_name=REGION)\r\n self.addCleanup(self.mox.VerifyAll)\r\n self.addCleanup(self.mox.UnsetStubs)", "def client(flask_app):\n with flask_app.test_client() as client:\n yield client", "def client(self):\n app.testing = True\n client = app.test_client()\n\n with app.app_context():\n yield client", "def setUpClass(cls):\n\n cls.client = get_client()", "def setUpClass(cls):\n\n cls.client = get_client()", "def setUpClass(cls):\n\n cls.client = get_client()", "def setup_class(cls):\n\n # Create the test client\n cls.client = app.test_client()", "def setUp(self):\n\t\tself.conn = Client([\"127.0.0.1:11211\"], debug = 1)", "def setUp(self):\n self.test_data = self.read_data('test_data/clients.txt')", "def setup_class(cls):\n cls.client = APP.test_client()", "def setUp(self):\n self.client = mock.create_autospec(CQLClient)\n\n self.maas_client = mock.create_autospec(MaasClient)\n patcher = mock.patch('bobby.worker.MaasClient')\n self.addCleanup(patcher.stop)\n _MaasClient = patcher.start()\n _MaasClient.return_value = self.maas_client", "def init_client(self, client):\n self.client = client", "def setUp(self):\r\n super(CLITestNameorID, self).setUp()\r\n self.mox = mox.Mox()\r\n self.endurl = test_cli20.ENDURL\r\n self.client = client.Client(token=test_cli20.TOKEN,\r\n endpoint_url=self.endurl)\r\n self.addCleanup(self.mox.VerifyAll)\r\n self.addCleanup(self.mox.UnsetStubs)", "def test_mgmt_client_instantiated():\n client = ConfigureClients()\n assert client.mgmt_client", "def test_get_api_v1_client(self):\n\n client = get_api_v1_client()\n self.assertEqual(type(client), Client)", "def client_setup(self):\n self.client = Client()", "def create_client(self) -> None:\n pass", "def setUp(self):\n #creamos un cliente http\n self.client = Client()\n #creamos un usuario en la base de datos de prueba\n self.user = User.objects.create_user('super', '[email protected]', 'super')", "def setUp(self) -> None:\n self.client = APIClient()", "def test_setup_db_for_use_retruns_right_client(self):\n\n assert isinstance(self.test_client, InfluxDBClient) is True", "def setUp(self):\n self.client_socket = open_client_socket()", "def setUp(self):\n self.client_socket = open_client_socket()", "def setUp(self):\n # create our test user\n self.test_user1 = get_user_model().objects.create(**USER1_PARAMS)\n self.test_user2 = get_user_model().objects.create(**USER2_PARAMS)\n self.rogue_user = get_user_model().objects.create(**ROGUE_USER_PARAMS)\n self.test_admin = get_user_model().objects.create(**ADMIN_USER_PARAMS)\n site = Site.objects.get_current()\n self.test_blog = Blog.objects.create(site=site, owner=self.test_user1,\n **TEST_BLOG_PARAMS)\n self.test_category1 = Category.objects.create(\n blog=self.test_blog,\n **CAT1_PARAMS\n )\n self.client = Client()\n # self.post = Post.objects.create(\n # title=\"Test User 1 Post\",\n # body=\"This is some stuff.\\n\\nSome stuff, you know.\",\n # blog=self.test_blog,\n # author=self.test_user1.author\n # )\n # self.post.save()\n # enable remote access for test_user1\n self.test_user1.author.remote_access_enabled = True\n self.test_user1.author.save()\n\n # disable remote access for test_user2\n self.test_user2.author.remote_access_enabled = False\n self.test_user2.author.save()\n\n self.rogue_user.author.remote_access_enabled = True\n self.rogue_user.author.save()\n\n self.test_admin.author.remote_access_enabled = True\n self.test_admin.author.save()", "def __init__(self, client=None):\n self._client = client" ]
[ "0.754755", "0.74860317", "0.73783517", "0.7377558", "0.73457175", "0.73118144", "0.73118144", "0.73118144", "0.73118144", "0.73099387", "0.7090056", "0.6969668", "0.69263554", "0.69252616", "0.6855763", "0.67790884", "0.6763768", "0.66879356", "0.66879356", "0.66879356", "0.66879356", "0.66879356", "0.66871935", "0.66700125", "0.66320586", "0.66320586", "0.6620445", "0.6619108", "0.6595552", "0.657404", "0.65671366", "0.65582985", "0.65518063", "0.6539", "0.6539", "0.64958805", "0.64844424", "0.6472316", "0.6467648", "0.6448942", "0.64420515", "0.644079", "0.64374536", "0.64096534", "0.64096534", "0.64096534", "0.6407292", "0.64030117", "0.63957757", "0.6390917", "0.63611466", "0.6354151", "0.6332913", "0.6332071", "0.6319841", "0.6304861", "0.63043684", "0.62998074", "0.62992954", "0.62924063", "0.62924063", "0.62924063", "0.62924063", "0.62924063", "0.62924063", "0.62924063", "0.62924063", "0.62924063", "0.6288148", "0.62826324", "0.62622607", "0.62500477", "0.62500477", "0.62470746", "0.6245663", "0.6220356", "0.6217691", "0.62170297", "0.6215161", "0.6214367", "0.62045413", "0.62045413", "0.62045413", "0.6203933", "0.6201471", "0.619432", "0.6193479", "0.6189278", "0.61803573", "0.61742747", "0.6168315", "0.61633015", "0.614459", "0.61375254", "0.6131381", "0.61241364", "0.6121421", "0.6115733", "0.6115733", "0.61135817", "0.6099132" ]
0.0
-1
Fixture for user object passed as an argument to the user create view test function
def user(): user = User.objects.create(name='Janek', surname='Kowalski', internal_id='PUHgjdJ', is_administrator=True, is_payment_creator=True, is_payment_approver=False, can_delete_payment=True) return user
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_user_creation(self):\r\n \r\n self.assertIsInstance(self.user, User)", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def test_user():\n user_data = {\n \"name\": \"Brad\",\n \"username\": \"brad345\",\n \"email\": \"[email protected]\",\n \"password\": \"facebook\",\n \"location\": {\n \"city\": \"Philadelphia\",\n \"state\": \"Pennsylvania\",\n \"country\": \"United States\"\n }\n }\n return UserFactory.create_user(user_data)", "def test_createUser_single(self):\n #TODO: this and other tests", "def test_good_user(self):\n user = self.datautils.create_user()\n self.request.user = user\n self.request.matchdict = {'user_id': int(user.id)}\n result = user_id_get_view(self.request)['d']\n expected = {\n 'id': user.id,\n 'username': user.username,\n 'created': user.created,\n 'email': user.email,\n }\n self.assertEqual(result, expected)", "def setUp(self):\n\n self.user = self.client.users.create({})", "def test_user_is_really_create():\n response = api_helper.get_user(user_name=pytest.test_user.name)\n assert response.status_code == 200\n response_json_data = response.json()[\"data\"]\n assert len(response_json_data) == 1\n check_user_data_in_response(response_json_data[0])\n pytest.test_user.id = response_json_data[0]['id']", "def test_create_user(self):\n user = User(\"Gideon Bamuleseyo\", \"[email protected]\", \"secret\")\n self.assertEqual(user.name, \"Gideon Bamuleseyo\")\n self.assertEqual(user.email, \"[email protected]\")\n self.assertEqual(user.password, \"secret\")", "def setUp(self):\n self.new_user = User(\"Juma\",\"12345\")", "def test_create_user(self):\n data = {\n 'username': 'foobar',\n 'email': '[email protected]',\n 'password': 'somepassword'\n }\n\n request = self.factory.post(self.create_url, data, format='json')\n view = UserViewSet.as_view({\"post\": \"create\"})\n response = view(request)\n self.assertEqual(User.objects.count(), 2)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(response.data['username'], data['username'])\n self.assertEqual(response.data['email'], data['email'])\n self.assertFalse('password' in response.data)", "def test_made_user(self, user=None):\n user_attr = dict(is_active=True, is_authenticated=True, is_anonymous=False, is_staff=False, is_superuser=False)\n attr_by_type = {\n 'anonymous': {'is_active': False, 'is_authenticated': False, 'is_anonymous': True},\n 'superuser': {'is_staff': True, 'is_superuser': True},\n 'staff': {'is_staff': True, 'is_superuser': False},\n 'user': {'is_staff': False, 'is_superuser': False},\n 'inactive': {'is_staff': False, 'is_superuser': False, 'is_active': False},\n }\n user_attr.update(attr_by_type.get(self.user_type, {}))\n lookup_type = {'anonymous': AnonymousUser, 'superuser': MockSuperUser, 'staff': MockStaffUser, 'user': MockUser}\n user_class = lookup_type.get(self.user_type, None)\n if not self.mock_users and not self.user_type == 'anonymous':\n user_class = get_user_model()\n user = user or self.user\n self.assertIsNotNone(getattr(self, 'user', None))\n self.assertIsNotNone(user_class)\n self.assertIsInstance(user, user_class)\n for key, value in user_attr.items():\n self.assertEqual(value, getattr(self.user, key, None))", "def setUp(self):\n self.new_user = User(\"Hamisi\",\"python\")", "def setUp(self):\n self.new_user = User('JosphatOtieno','jose@otis45')", "def test_create_user(self):\n data = {\n \"firstname\": \"John\",\n \"lastname\": \"Doe\",\n \"password\": \"supersecret\",\n \"password_repeat\": \"supersecret\",\n }\n res = self.post(url=\"/users\", data=data)\n self.assertEqual(res.status_code, 200)\n self.assertIn(b\"Created user.\", res.data)\n\n user = User.query.filter_by(id=6).first()\n self.assertTrue(user)\n self.assertEqual(user.firstname, \"John\")\n self.assertEqual(user.lastname, \"Doe\")\n self.assertFalse(user.is_verified)", "def test_user_instance(self):\n db.session.add(self.user)\n db.session.commit()\n\n user = User.query.filter_by(user_name = \"john_doe\").first()\n users = User.query.all()\n\n self.assertTrue(len(users) > 0)\n self.assertEqual(user.user_name, \"john_doe\")\n self.assertEqual(user.email, \"[email protected]\")\n self.assertEqual(user.profile_pic_path, \"app/static/images\")\n self.assertEqual(user.first_name, \"John\")\n self.assertEqual(user.last_name, \"Doe\")\n self.assertEqual(user.headline, \"Food Blogger\")\n self.assertEqual(user.bio, \"Mainly writes on Chinese cuisine\")", "def setUp(self):\n self.client = Client()\n #creamos un usuario en la db\n self.user = User.objects.create_user('super', '[email protected]', 'super')", "def setUp(self):\n \n self.new_user = User_prof(username = 'munga',bio = 'funny thing to say')", "def setUp(self):\n \n self.new_user = User_prof(username = 'munga',bio = 'funny thing to say')", "def test_user(self):\n u = self.d.user('example')\n self.assertEqual(u.username, 'example')\n self.assertEqual(u.name, 'Example Sampleman')", "def create_fake_data():\n User.create_fake_users()", "def test_user_instance(self):\n self.assertIsInstance(self.new_user, User)", "def setUp(self):\n self.user_1 = User()", "def test_add_user(self):\n pass", "def inner_test(user: models.User):\n pass", "def setUp(self):\n #creamos un cliente http\n self.client = Client()\n #creamos un usuario en la base de datos de prueba\n self.user = User.objects.create_user('super', '[email protected]', 'super')", "def setUp(self):\n\n self.user_1 = User.objects.create_user(\n first_name=\"John\",\n last_name=\"Kenedy\",\n username=\"johnny\",\n password=\"Phrase908\",\n email=\"[email protected]\",\n )\n self.user_2 = User.objects.create_user(\n first_name=\"Kent\",\n last_name=\"Philip\",\n username=\"kenty\",\n password=\"Phrase908\",\n email=\"[email protected]\",\n )", "def test_create_user_object():\n from .scripts.initializedb import create_user_object\n user_object = create_user_object(\"test\", \"test\", \"test\")\n assert isinstance(user_object, User)", "def setUp(self):\n self.new_user = User.objects.create_user(first_name='John', last_name='Doe', username='john_doe', email='[email protected]', bio='I am new here.', password='test_password', website='example.com', social_media={\n 'facebook':'Facebook link',\n 'Dribble': 'Dribble link',\n })", "def test_able_to_create_a_user():\n response = api_helper.create_user(pytest.test_user)\n assert response.status_code == 201\n check_user_data_in_response(response.json()[\"data\"])", "def test_create_user(self):\n first_name = \"b\"\n last_name = \"b\"\n username = \"b\"\n email = \"b\"\n password = \"b\"\n\n manager = UserManager()\n result = manager.create(first_name, last_name, username, email, password)\n self.assertTrue(result)\n\n user = User.objects.get(username=username)\n self.assertEqual(first_name, user.first_name)\n self.assertEqual(last_name, user.last_name)\n self.assertEqual(username, user.username)\n self.assertEqual(email, user.email)\n self.assertEqual(password, user.testdata.password)\n self.assertEqual(username, user.testdata.username)\n self.assertEqual(email, user.testdata.email)\n self.assertNotEqual(user.authtests, None)", "def test_create_user(self):\n user = User(email=\"[email protected]\", password=\"testpassword\")\n\n self.assertEqual(user.email, \"[email protected]\")\n self.assertNotEqual(user.password, \"testpassword\")\n self.assertFalse(user.confirmed)\n self.assertIsNone(user.confirmed_at)\n self.assertIsNotNone(user.created_at)\n self.assertIsNotNone(user.confirmation_token)", "def test_create(km_user_factory):\n models.Profile.objects.create(\n is_private=True, km_user=km_user_factory(), name=\"My Profile\"\n )", "def test_create(self):\n userValue = {'name': 'User Test 1',\n 'login': 'usertest1',\n 'user_profile_id': self.user_profile2.id,\n }\n Users = self.env['res.users']\n user_test = Users.create(userValue)\n newUser = self.env['res.users'].browse(user_test.id)\n self.assertEqual(userValue['name'], newUser['name'])", "def inner_test(param: models.User):\n self.assertEqual(param, user)", "def test_create_user(self):\n url = reverse('create_user')\n data = {\n 'first_name': 'Jimbo',\n 'email': '[email protected]',\n 'password': 'jimboland',\n 'postal_code': 'jimbo',\n }\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(User.objects.count(), 1)\n self.assertEqual(User.objects.get().first_name, 'Jimbo')", "def user():\n return UserFactory", "def test_known_user(self):\n self.sign_in()\n u = User.objects.create(first_name = \"David\",\n last_name = 'Smith',\n password='******',\n email='[email protected]',\n phone_number='012-345-6789')\n response = self.client.get(reverse('backend:user_details', args=(u.pk,)))\n self.assertEqual(response.status_code, 200)\n self.assertDictEqual(response.json(), u.json_detail())", "def setUp(self):\n # Se crea el Request factory pars simular peticiones\n self.factory = RequestFactory()\n # Se crea el User que realiza las peticiones\n self.user = User.objects.create_user(username='testuser', email='[email protected]', password='test')", "def setUp(self):\n\n self.new_user = User(\"Danlon\", \"Situma\", \"Dasi202\", \"passcode\")", "def setUp(self):\n self. user = User.objects.create_user(username='fredbob',\n first_name='Fred',\n last_name='Bob',\n email='[email protected]',\n password='foobar')", "def test_user(self):\n return True", "def test_user1_method1():\n assert u is not None, \"Could not create a new User object\"", "def test_user_factory(self):\n user = UserFactory(name='User')\n assert user.pk\n assert user.check_password('1234') # default password\n assert user.categories.exists() # default category is set", "def test_create_user_endpoint(self, **kwargs):\n first_name = kwargs.get('first_name', self.test_args[\"user_details\"][\"first_name\"])\n last_name = kwargs.get('last_name', self.test_args[\"user_details\"][\"last_name\"])\n password = kwargs.get('password', self.test_args[\"user_details\"][\"password\"])\n email = kwargs.get('email', Workflows.generate_new_email(suffix=self.global_config[\"email_id_suffix\"]))\n custom_data = {\"first_name\": first_name, \"last_name\": last_name, \"password\": password, \"email\": email}\n kwargs[\"data\"] = {\"user\": custom_data, \"client_id\": self.global_config[\"client_id\"],\n \"client_secret\": self.global_config[\"client_secret\"]}\n\n restapi = Rest(base_uri=self.global_config[\"base_url\"])\n response = restapi.post(**kwargs)\n\n if kwargs.get(\"return_response_obj\", False):\n return response\n\n print(\"Verify Response body\")\n assert json.loads(response.text)[\"message\"] == self.test_args[\"expected_result\"], \"Test Failed\"\n return None", "def test_set_user_field(self):\n pass", "def test_create_defined_user(self):\r\n self._auto_auth(\r\n username='robot', password='test',\r\n email='[email protected]', full_name=\"Robot Name\"\r\n )\r\n\r\n # Check that the user has the correct info\r\n user = User.objects.get(username='robot')\r\n self.assertEqual(user.username, 'robot')\r\n self.assertTrue(user.check_password('test'))\r\n self.assertEqual(user.email, '[email protected]')\r\n\r\n # Check that the user has a profile\r\n user_profile = UserProfile.objects.get(user=user)\r\n self.assertEqual(user_profile.name, \"Robot Name\")\r\n\r\n # By default, the user should not be global staff\r\n self.assertFalse(user.is_staff)", "def test_create(self, client):\n count = get_user_model().objects.count()\n data = factory.build(dict, FACTORY_CLASS=UserFactory)\n url = reverse('users:create')\n response = client.post(url, data)\n assert response.status_code == 302\n\n user = get_user_model().objects.last()\n assert user.username == data['username']\n assert user.birthday == datetime.datetime.strptime(data['birthday'], '%Y-%m-%d').date()\n assert get_user_model().objects.count() == count + 1", "def test_create_user_endpoint_creates_user(caplog):\n caplog.set_level('INFO')\n\n _request_create_user(SEED_USER_DATA)\n created_user = Advisor.objects.get(email=SEED_USER_DATA['email'])\n\n user_data_keys = SEED_USER_DATA.keys() - set(['token'])\n for key in user_data_keys:\n assert str(getattr(created_user, key)) == SEED_USER_DATA[key]\n\n user_info = [\n 'Creating a user: {',\n f' \"dit_team_id\": \"{SEED_USER_DATA[\"dit_team_id\"]}\",',\n f' \"email\": \"{SEED_USER_DATA[\"email\"]}\",',\n f' \"first_name\": \"{SEED_USER_DATA[\"first_name\"]}\",',\n f' \"last_name\": \"{SEED_USER_DATA[\"last_name\"]}\",',\n f' \"sso_email_user_id\": \"{SEED_USER_DATA[\"sso_email_user_id\"]}\"',\n '}',\n ]\n user_token = f'Created a token `{SEED_USER_DATA[\"token\"]}` for user {created_user.id}.'\n assert caplog.messages == [\n '\\n'.join(user_info),\n user_token,\n ]", "def setUp(self):\n self.client = Client()\n self.user = User.objects.create_user('testuser', '[email protected]', 'q2w3E$R%')", "def test_user_creation(self):\n self.assertTrue(User.objects.exists())", "def __init__(self, *args, **kwargs):\n super(UserModelUnitTest, self).__init__(*args, **kwargs)", "def setup_test_user(self):\n self.setup_test_tenant()\n self.test_user = rand_name('test_user_')\n self.test_password = rand_name('pass_')\n self.test_email = self.test_user + '@testmail.tm'\n resp, self.user = self.client.create_user(self.test_user,\n self.test_password,\n self.tenant['id'],\n self.test_email)\n self.users.append(self.user)", "def test_create_user(self):\n self.assertIsInstance(\n User.objects.create_user(username=\"username\", email=\"[email protected]\", password=\"password\"), User)", "def setUp(self):\r\n\r\n # Get the Flask test client\r\n self.client = app.test_client()\r\n app.config['TESTING'] = True\r\n\r\n # Connect to test database\r\n connect_to_db(app, \"postgresql:///test_db\")\r\n\r\n # Create tables and add sample data\r\n db.create_all()\r\n \r\n self.user = crud.create_user(email='[email protected]', password = 'K9#n*Hs73', fname = 'Mary', lname = 'Crews', job = 'Night Auditor',\r\n current_location = 'Florida', place_of_birth = 'Iowa', dob ='1977-11-03', isAdmin =False)", "def setUp(self):\n user = Users.query.first()", "def setUp(self):\n self.user = User.objects.create_user(username='Marry', email='[email protected]', password='secret')\n self.user.first_name = 'Marry'\n self.user.last_name = 'Tomson'\n self.user.save()", "def setUp(self):\n self.superuser = User.objects.create_superuser(\n 'admin',\n '[email protected]',\n 'StrongPassword123'\n )\n self.client.login(\n username='admin',\n password='StrongPassword123'\n )\n\n self.invalid_user = User.objects.create_user(\n 'user',\n '[email protected]',\n 'hello123'\n )\n\n User.objects.create_user(\n 'user_1',\n '[email protected]'\n 'user1password'\n )\n\n User.objects.create_user(\n 'user_2',\n '[email protected]'\n 'user2password'\n )\n\n User.objects.create_user(\n 'user_3',\n '[email protected]'\n 'user3password'\n )\n\n self.users = UserForAdminModelSerializer(User.objects.all(), many=True)", "def setUp(self):\n self.new_users = User(\"Zephon Makale\", \"1234xyz\") #Create User object", "def test_valid_serializer(self, db, api_factory):\r\n # data is valid \r\n user = User.objects.create_user(email='[email protected]', password='admin1600', user_type='D')\r\n request = api_factory.get(self.url)\r\n request.user = user\r\n serializer = DoctorDetailSerializer(data=self.data, context={\"request\": request})\r\n assert serializer.is_valid() == True", "def setUp(self):\n self.user1 = User.objects.create_user(username='jack', email='[email protected]', password='secret')\n self.user1.first_name = \"Jack\"\n self.user1.last_name = \"Smith\"\n self.user1.save()", "def test_user_instances(self):\n obj = User()\n self.assertIsInstance(obj, User)", "def test_user_view(self):\n with self.app.app_context():\n u = user(save=True)\n\n response = self.client.get('/user/%s' % u.slug)\n eq_(response.status_code, 200)\n\n response = self.client.get('/user/not-a-real-user')\n eq_(response.status_code, 404)", "def setUp(self):\n self.new_user = User(username='burens', password='12345')", "def test_signup_view(self, user_data, client: Client):\n response = client.post(\n reverse_lazy(\"users:signup\"),\n {\n \"username\": user_data.email,\n \"email\": user_data.email,\n \"password1\": user_data.password,\n \"password2\": user_data.password,\n },\n content_type=\"application/x-www-form-urlencoded\",\n )\n assert response.status_code == 200", "def test_instance(self):\n self.assertIsInstance(self.user_1, User)", "def test_create(self):\n \n name=\"mytest\"\n email=\"[email protected]\"\n \n #test user can be created successfully when given correct values\n user = users.create(Request(name, email)) \n self.assertIsInstance(user, User)\n self.assertEquals(user.name, name)\n self.assertEquals(user.email, email)\n \n #ensure that an error is raised when essential attributes are missed\n self.assertRaises(datastore_errors.BadValueError, users.create, None)", "def test_api_user_post(self):\n pass", "def setUp(self):\n self.new_user = User(username=\"Hey\")\n self.new_user.save()", "def setUp(self):\n self.new_user = User('Valentine', 'Robai', '0712345678', '[email protected]', 'vrobai',\n 'password')", "def test_create_user(self):\n \n new_user = {\"username\": \"beny1976\", \"vocab_count\": 0, \"name\": \"beny rood\", \"sex\": \"male\", \"dob\": \"18/10/1979\"}\n msg = app.create_user(predefined_user=new_user)\n self.assertTrue(msg != \"\")", "def test_check_user(self):\n self.new_user.save_user()\n test_user = User(\"Test\", \"user\", \"test\", \"walIas15\")\n test_user.save_user()\n test_user.check_user(\"test\", \"walIas15\")", "def test_users_post(self):\n pass", "def test_users_post(self):\n pass", "def test_create_user(profile_data):\n email = \"email@localhost\"\n username = \"username\"\n user = api.create_user(username, email, profile_data, {\"first_name\": \"Bob\"})\n\n assert isinstance(user, User)\n assert user.email == email\n assert user.username == username\n assert user.first_name == \"Bob\"\n\n if \"name\" in profile_data:\n assert user.profile.name == profile_data[\"name\"]\n else:\n assert user.profile.name is None", "def test_init(self):\n self.assertEqual(self.new_user.name,\"trinity\")\n self.assertEqual(self.new_user.email,\"[email protected]\")\n self.assertEqual(self.new_user.pin,\"123\")", "def setUp(self):\n db.drop_all()\n db.create_all()\n\n self.client = app.test_client()\n\n self.testuser = User.signup(\n username=\"testuser\",\n password=\"testuser\",\n email=\"[email protected]\",\n phone_number=\"662-996-3356\",\n image_url=None,\n )\n\n self.testuser_id = 8989\n self.testuser.id = self.testuser_id\n\n self.u1 = User.signup(\"abc\", \"password\", \"[email protected]\", None, None)\n self.u1_id = 778\n self.u1.id = self.u1_id\n self.u2 = User.signup(\"efg\", \"password\", \"[email protected]\", None, None)\n self.u2_id = 884\n self.u2.id = self.u2_id\n self.u3 = User.signup(\"hij\", \"password\", \"[email protected]\", None, None)\n self.u4 = User.signup(\"testing\", \"password\", \"[email protected]\", None, None)\n\n db.session.commit()", "def test_post_user(self):\n data = {\n \"name\": \"Francis\",\n \"email\": \"[email protected]\",\n \"password\": \"asdf\"\n }\n\n response = self.client.post(\"/api/users\",\n data=json.dumps(data),\n content_type=\"application/json\",\n headers=[(\"Accept\", \"application/json\")]\n )\n\n self.assertEqual(response.status_code, 201)\n self.assertEqual(response.mimetype, \"application/json\")\n self.assertEqual(urlparse(response.headers.get(\"Location\")).path,\n \"/api/elections\")\n\n data = json.loads(response.data.decode(\"ascii\"))\n self.assertEqual(data[\"id\"], 1)\n self.assertEqual(data[\"name\"], \"Francis\")\n\n users = session.query(models.User).all()\n self.assertEqual(len(users), 1)\n\n user = users[0]\n self.assertEqual(user.name, \"Francis\")", "def test_resource_user_resource_add_user_post(self):\n pass", "def add_testuser(self):\n user = UserFactory.create()\n user.username = 'testuser'\n user.set_password('testuser')\n user.save()\n return user.profile", "def testCreateIsAllowed(self):\n self.users.create([(u'user', u'secret', u'User', u'[email protected]')])\n user = getUser(u'user')\n self.assertEqual(u'user', user.username)", "def testGetUser(self):\n UserAPI().create([(u'user', u'secret', u'User', u'[email protected]')])\n user = getUser(u'user')\n self.store.commit()\n with login(u'fluiddb', self.admin.objectID, self.transact) as session:\n result = yield self.facade.getUser(session, u'user')\n self.assertEqual(u'user', result.username)\n self.assertEqual(str(user.objectID), result.objectId)\n self.assertEqual(u'User', result.name)\n self.assertEqual(u'USER', result.role)", "def test_save_user(self):\n self.new_user.save_user()\n self.assertEqual(len(User.UserDetails), 1)", "def test_create_user_exists(self):\n payload = {\n \"user\": {\n \"email\": \"[email protected]\",\n \"password\": \"useruser111\",\n \"is_doctor\": False,\n \"is_hospital_admin\": True\n },\n 'first_name': 'Test',\n 'last_name': 'JustUser'\n }\n sample_user(\n payload['user']['email'], payload['user']['password'],\n payload['user']['is_doctor'], payload['user']['is_hospital_admin']\n ),\n response = self.client.post(HOSPITAL_ADMIN_REGISTER_URL, payload, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def setUp(self):\n User.users = {}\n self.app = User('[email protected]', 'admin', 'admin')\n # Set some default user data\n self.user_data = {\n 1: {\n 'email': '[email protected]',\n 'username': 'admin',\n 'password': 'admin' \n }\n \n }", "def setUp(self):\n\n User.query.delete()\n\n user = User.register(**TEST_USER_DATA)\n db.session.add(user)\n\n db.session.commit()\n\n self.user = user\n self.user_id = user.id", "def test_create_method(self):\n serializer = RegistrationSerializer(data=self.user_data)\n self.assertTrue(serializer.is_valid())\n new_user = serializer.save()\n self.assertIsInstance(new_user, User)", "def test_users_dictionary(self):\n new_user = self.app\n self.assertEqual(len(new_user.users), 0)\n new_user.create_user()\n self.assertIsInstance(new_user, User)\n self.assertEqual(len(new_user.users), 1)", "def setUp(self):\n self.new_users = User('Dennis', 'Kiplangat', 'kiplangat18')", "def setUp(self):\n\n # Create client\n self.client = Client()\n\n # Create the admin user\n self.admin_user = get_user_model().objects.create_superuser(\n email='[email protected]',\n password='adminTesting123'\n )\n\n # Login the admin user\n self.client.force_login(self.admin_user)\n\n # Create the reqular user\n self.user = get_user_model().objects.create_user(\n email='[email protected]',\n password='userTesting123',\n name='Test user full name'\n )", "def test_user_profile_view_constraint(self):\n another_user = AnotherUserFactory()\n params = {'pk': another_user.id}\n profile_response = self.client.get(reverse('api:users-detail', kwargs=params))\n self.assertTrue(profile_response.status_code == 200)\n user_data = profile_response.data\n self.assertFalse(bool(user_data.get('coins')))\n self.assertFalse(user_data.get('email') == self.user.email)\n self.assertFalse(user_data.get('username') == self.user.username)\n self.assertFalse(user_data.get('description') == self.user.description)\n self.assertFalse(user_data.get('gender') == self.user.gender)\n self.assertFalse(user_data.get('birth_date') == self.user.birth_date)", "def perform_create(self, serializer):\n # Capture the user information\n test_result = serializer.save()\n test_result.user = self.request.user\n test_result.save()", "def test_user_(self):\n obj = User()\n self.assertIsInstance(obj.email, str)\n self.assertIsInstance(obj.password, str)\n self.assertIsInstance(obj.first_name, str)\n self.assertIsInstance(obj.last_name, str)", "def setUp(self):\n\n self.user_1 = User.objects.create_user(\n username='testuser', password='12345',\n email='[email protected]'\n )\n\n # self.profile_1 = Profile.objects.create(user=self.user_1,\n # image='profile_default.jpg')", "def setUp(self):\n\n db.drop_all()\n db.create_all()\n\n u = User.signup(\"test1\", \"[email protected]\", \"password\", None)\n uid = 1234\n u.id = uid\n\n db.session.commit()\n\n self.u = User.query.get(uid)\n\n self.uid = uid\n\n # self.client = app.test_client()", "def setUp(self):\n user = UserFactory(username='mike', email='[email protected]')\n user.set_password('password')\n user.save()", "def test_create_user(self):\n url = reverse('rest_register')\n data = {\n 'email': '[email protected]',\n 'password1': 'notshortpassword',\n 'password2': 'notshortpassword'\n }\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(User.objects.count(), 1)\n self.assertEqual(User.objects.get().email, '[email protected]')", "def setUp(self):\n db.create_all()\n\n self.user = User(user_name=\"john_doe\", email=\"[email protected]\", password=\"password\", profile_pic_path=\"app/static/images\", first_name=\"John\", last_name=\"Doe\", headline=\"Food Blogger\", bio=\"Mainly writes on Chinese cuisine\")", "def test_user(self):\n user = User.objects.get(username=\"test01\")\n print(user.username)\n print(user.email)\n self.assertEqual(user.email,\"[email protected]\")\n # cat = Animal.objects.get(name=\"cat\")\n # self.assertEqual(lion.speak(), 'The lion says \"roar\"')\n # self.assertEqual(cat.speak(), 'The cat says \"meow\"')", "def test_api_user_get(self):\n pass" ]
[ "0.7581336", "0.7469687", "0.7469687", "0.7469687", "0.74045193", "0.7388603", "0.71562016", "0.7155055", "0.7099242", "0.69998574", "0.6995502", "0.6981704", "0.6978693", "0.6965282", "0.69559574", "0.6923249", "0.6908104", "0.6863743", "0.68313307", "0.68313307", "0.6827611", "0.6818544", "0.68165684", "0.68077576", "0.6807076", "0.67853117", "0.6782773", "0.67720747", "0.6772004", "0.6771604", "0.6757246", "0.6755387", "0.674393", "0.67438567", "0.6733519", "0.6713205", "0.66973096", "0.66926783", "0.6690025", "0.6684842", "0.6675022", "0.66599184", "0.6646792", "0.6639715", "0.6633866", "0.6630006", "0.6628959", "0.661461", "0.6614578", "0.6607339", "0.6605394", "0.66035503", "0.6595285", "0.659319", "0.659078", "0.6588896", "0.65796006", "0.6577623", "0.65713215", "0.6569648", "0.65679437", "0.65451264", "0.6544755", "0.6538498", "0.6537985", "0.65358055", "0.6533865", "0.6525588", "0.6520379", "0.65101385", "0.650909", "0.6508781", "0.65086186", "0.6495198", "0.6495198", "0.6476767", "0.64737755", "0.6471283", "0.64673686", "0.64625", "0.6462456", "0.64541113", "0.6453214", "0.6445933", "0.6441818", "0.6439583", "0.6439175", "0.6438746", "0.6438211", "0.6435324", "0.6434524", "0.6432895", "0.64301676", "0.64247715", "0.6421029", "0.6415225", "0.64125234", "0.6409645", "0.64072514", "0.64036065", "0.6397185" ]
0.0
-1
Fixture for company object passed as an argument to the company create view test function
def company(): company = Company.objects.create(name='Tre G.M.B.H.', country='Germany') return company
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_website_companies_create(self):\n pass", "def test_create_company_1(self):\n company_data = {\n \"_id\": \"sbucks\",\n \"headquarters\": \"Seattle\",\n \"name\": \"Starbucks Inc.\",\n }\n\n resp = self.app.post('/companies', data=json.dumps(company_data),\n content_type='application/json')\n self.assertEqual(resp.status_code, HTTPStatus.CREATED)\n\n # cleanup\n del_resp = self.app.delete(f'/companies/{company_data[\"_id\"]}')\n self.assertEqual(del_resp.status_code, HTTPStatus.OK)", "def test_create_company_props_using_post(self):\n pass", "def test_website_companies_get_details(self):\n pass", "def test_create_company_2(self):\n company_data = {\n \"_id\": \"sbucks\",\n \"headquarters\": \"Seattle\",\n \"name\": \"Starbucks Inc.\",\n }\n\n resp = self.app.post('/companies', data=json.dumps(company_data),\n content_type='application/json')\n self.assertEqual(resp.status_code, HTTPStatus.CREATED)\n\n resp = self.app.post('/companies', data=json.dumps(company_data),\n content_type='application/json')\n self.assertEqual(resp.status_code, HTTPStatus.CONFLICT)\n\n # cleanup\n del_resp = self.app.delete(f'/companies/{company_data[\"_id\"]}')\n self.assertEqual(del_resp.status_code, HTTPStatus.OK)", "def test_create_company_3(self):\n companies_data = [\n {\n \"_id\": \"sbucks\",\n \"headquarters\": \"Seattle\",\n \"name\": \"Starbucks Inc.\",\n },\n {\n \"_id\": \"salesforce\",\n \"headquarters\": \"Toronto\",\n \"name\": \"Salesforce Inc.\",\n },\n ]\n\n resp = self.app.post('/companies', data=json.dumps(companies_data),\n content_type='application/json')\n self.assertEqual(resp.status_code, HTTPStatus.CREATED)\n\n # cleanup\n for company in companies_data:\n del_resp = self.app.delete(f'/companies/{company[\"_id\"]}')\n self.assertEqual(del_resp.status_code, HTTPStatus.OK)", "def setUp(self):\n self.admin = User.objects.get(username='admin')\n self.client = APIClient()\n self.client.force_authenticate(user=self.admin)\n self.data = {'name': 'testCompany', 'address': {\n 'address1': '123 fake st', 'address2': 'fake address 2',\n 'city': 'nowhere', 'state': 'IN', 'zip': '90210'}}\n self.url = reverse('Company-list')", "def test_company_patch(self):\n companyPK = Company.objects.get(name=self.admin.profile.company.name).pk\n url = reverse('Company-detail', kwargs={'pk': companyPK})\n data = {'name': 'NewTestCompany'}\n response = self.client.patch(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(Company.objects.get(pk=companyPK).name,\n 'NewTestCompany')", "def test_website_companies_update(self):\n pass", "def test_company_put_permissions(self):\n companyPK = Company.objects.get(name=self.admin.profile.company.name).pk\n url = reverse('Company-detail', kwargs={'pk': companyPK + 1})\n data = {'name': 'NewTestCompany', 'address': {'address1': '123 fake st',\n 'address2': 'fake address 2',\n 'city': 'nowhere', 'state': 'IN', 'zip': '90210'}}\n response = self.client.put(url, data, format='json')\n #This is 404 instead of 403 because there is no way to view a company\n #that you arent an employee of.\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertNotEqual(Company.objects.get(pk=companyPK).name,\n 'NewTestCompany')", "def test_companies(self, setup_data):\n term = 'abc defg'\n\n url = reverse('api-v3:search:basic')\n response = self.api_client.get(\n url,\n data={\n 'term': term,\n 'entity': 'company',\n },\n )\n\n assert response.status_code == status.HTTP_200_OK\n assert response.data['count'] == 2\n assert response.data['results'][0]['name'].startswith(term)\n assert [{'count': 2, 'entity': 'company'}] == response.data['aggregations']", "def test_get_all_companies(self):\n create_company()\n res = self.client.get(ALL_COMPANIES_LIST)\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def test_all_companies(self, setup_data):\n url = reverse('api-v3:search:basic')\n response = self.api_client.get(\n url,\n data={\n 'term': '',\n 'entity': 'company',\n },\n )\n\n assert response.status_code == status.HTTP_200_OK\n assert response.data['count'] > 0", "def create_company(self):\n self.driver.get(f'{self.base_url}/company-register')\n\n # Fill the company name\n enter_random_string = WebDriverWait(self.driver, 20).until(expected_conditions.presence_of_element_located((By.NAME, 'companyName')))\n enter_random_string.send_keys(self.random_string)\n\n # Press \"Save and Continue\"\n self.driver.find_element_by_xpath('/html/body/div[1]/div/div[3]/div/div[2]/div/div[2]/div[2]/div[2]/div/button').click()\n\n # Wait for the page to load (5 seconds)\n sleep(5)", "def test_get_subscribe_company(self):\n company = create_company()\n user_company = create_usercompany(self.user, company)\n res = self.client.get(COMPANY_STOCK_LIST)\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def setup_data(es_with_collector):\n country_uk = constants.Country.united_kingdom.value.id\n country_us = constants.Country.united_states.value.id\n uk_region = constants.UKRegion.south_east.value.id\n CompanyFactory(\n name='abc defg ltd',\n trading_names=['helm', 'nop'],\n address_1='1 Fake Lane',\n address_town='Downtown',\n address_country_id=country_uk,\n uk_region_id=uk_region,\n )\n CompanyFactory(\n name='abc defg us ltd',\n trading_names=['helm', 'nop', 'qrs'],\n address_1='1 Fake Lane',\n address_town='Downtown',\n address_country_id=country_us,\n registered_address_country_id=country_us,\n )\n es_with_collector.flush_and_refresh()", "def test_company_patch_permissions(self):\n companyPK = Company.objects.get(name=self.admin.profile.company.name).pk\n url = reverse('Company-detail', kwargs={'pk': companyPK + 1})\n data = {'name': 'NewTestCompany'}\n response = self.client.put(url, data, format='json')\n #This is 404 instead of 403 because there is no way to view a company\n #that you arent an employee of.\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertNotEqual(Company.objects.get(pk=companyPK).name,\n 'NewTestCompany')", "def test_subscribe_company(self):\n company = create_company()\n res = self.client.post(get_companyview_url(company.pk))\n self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)\n user_company_count = UserCompany.objects.filter(\n user=self.user,\n companystock=company\n ).count()\n self.assertEqual(user_company_count, 1)", "def test_companies_company_id_push_get(self):\n pass", "def test_form_submition_and_product_creation(user_company, client, authenticated_user):\n add_product_url = reverse('add-product')\n response = client.post(add_product_url, {\n 'name': 'Test_product_name',\n 'serial_number': 'XZ001', \n 'manufacturer': 'Test company',\n 'price_net': 415.26,\n 'description': fake.paragraph(),\n 'stock': 16\n })\n assert response.status_code == 302\n product = Product.objects.get(name='Test_product_name')\n assert response.url == reverse('product-detail',kwargs={'pk': product.pk}) \n assert product.user == authenticated_user\n assert product in Product.objects.all()", "def test_object_contains_data(self):\n self.assertEqual(Company.objects.count(), 0)\n\n term = \"Foobar\"\n company = Company.objects.create(name=\"%s Plumbing\" % term)\n self.assertEqual(Company.objects.count(), 1)\n\n url = reverse(\"search\")\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n data = re.search(r\"formChoices\\: (\\{.*\\}),.*setFieldDescription\", str(response.content))\n field_dict = eval(data.group(1))\n self.assertEqual(set(field_dict.keys()), {\"fields\", \"operators\"})\n\n _company = re.search(r'\\<option value=\"(\\d+)\"\\>Company\\<\\/option\\>', str(response.content))\n company_model = _company.group(1)\n self.assertIsNotNone(company_model)\n\n self.assertIn(company_model, field_dict.get(\"fields\"))\n self.assertIn(company_model, field_dict.get(\"operators\"))\n\n name_field = next((x for x in field_dict[\"fields\"][company_model] if x[1] == \"Name\"))\n self.assertEqual(len(name_field), 3)\n field_id = name_field[0]\n\n self.assertIn(field_id, field_dict[\"operators\"][company_model])\n self.assertIn(\"contains\", field_dict[\"operators\"][company_model][field_id])\n operator = \"contains\"\n\n data = {\n \"form-TOTAL_FORMS\": 1,\n \"form-INITIAL_FORMS\": 0,\n \"form-MIN_NUM_FORMS\": 0,\n \"form-MAX_NUM_FORMS\": 10,\n \"model\": company_model,\n \"form-0-type\": \"and\",\n \"form-0-field\": field_id,\n \"form-0-operator\": operator,\n \"form-0-term\": \"foo\",\n \"form-0-end_term\": None,\n }\n url += \"?\" + urlencode(data)\n\n response = self.client.get(url)\n self.assertIn(\"<h2>1 Compan\", str(response.content))\n self.assertIn(company.name, str(response.content))", "def __init__(self, company_id):\n self.company_id = company_id", "def company(self, company):\n self._company = company", "def test_get_company_props_by_company_id_using_get(self):\n pass", "def fixture_fixture_business_details_example():\n test_example = BusinessDetails(\n business_problem=BUSINESS_PROBLEM,\n business_stakeholders=BUSINESS_STAKEHOLDERS,\n line_of_business=LINE_OF_BUSINESS,\n )\n return test_example", "def test_costcenter_permissions(self):\n self.data['company'] = User.objects.get(\n username='c2e1').profile.company.pk\n response = self.client.post(self.url, self.data, format='json')\n #cost center is created, but provided company is ignored.\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertNotEqual(CostCenter.objects.latest('pk').company,\n self.data['company'])", "def test_add_creates_a_new_version(self):\n assert Version.objects.count() == 0\n\n response = self.api_client.post(\n reverse('api-v4:company:collection'),\n data={\n 'name': 'Acme',\n 'trading_names': ['Trading name'],\n 'business_type': {'id': BusinessTypeConstant.company.value.id},\n 'sector': {'id': random_obj_for_model(Sector).id},\n 'address': {\n 'line_1': '75 Stramford Road',\n 'town': 'London',\n 'country': {\n 'id': Country.united_kingdom.value.id,\n },\n },\n 'uk_region': {'id': UKRegion.england.value.id},\n },\n )\n\n assert response.status_code == status.HTTP_201_CREATED\n response_data = response.json()\n assert response_data['name'] == 'Acme'\n assert response_data['trading_names'] == ['Trading name']\n\n company = Company.objects.get(pk=response_data['id'])\n\n # check version created\n assert Version.objects.get_for_object(company).count() == 1\n version = Version.objects.get_for_object(company).first()\n assert version.revision.user == self.user\n assert version.field_dict['name'] == 'Acme'\n assert version.field_dict['trading_names'] == ['Trading name']\n assert not any(set(version.field_dict) & set(EXCLUDED_BASE_MODEL_FIELDS))", "def fixture_candy():\n yield Person(name=\"Candy\", age=13, hobbies=[\"Gardening\"])", "def test_add_organization(self):\n pass", "def test_customer_create(self):\n self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])", "def test_client_tax_information_create(self):\n pass", "def test_create_customer(self):\n url = reverse('customers-list')\n data = {\n 'first_name': self.customer_first_name,\n 'last_name': self.customer_last_name,\n 'email': self.customer_email\n }\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Customer.objects.count(), 1)\n self.assertEqual(Customer.objects.get().first_name, 'John')", "def company(self, company):\n\n self._company = company", "def company(self, company):\n\n self._company = company", "def test_create_account(self):\n url = reverse('hospital_list')\n data = {'name': 'DabApps','mobile': 846800258}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Hospital.objects.count(), 1)\n self.assertEqual(Hospital.objects.get().name, 'DabApps')\n self.assertEqual(Hospital.objects.get().mobile, 846800258)", "def test_create_bulk_academic(self):\n pass", "def test_ach_create_for_business(self):\n\n business = self.client.businesses.create({})\n\n self.ach_model[\"business_token\"] = business.token\n\n funding_source = self.client.funding_sources.ach.create(self.ach_model)\n\n self.verify[\"business_token\"] = business.token\n\n verify_ach_response_model(self, funding_source, self.verify)", "def setUp(self, **file_test_data):\n self._test_data = file_test_data\n self.create_tasks_response = create_company_tasks(assert_response=False, **file_test_data)\n self._id = self.create_tasks_response.data['content'][0]['id']", "def setUp(self):\n #Inheriting the base class functionality\n super(CreateTaskAPITestCase, self).setUp()\n # Create the org using serializer\n create_org_data = {\n 'name': 'Ecell NITRR Open Source',\n 'tagline': 'We love open source.'\n }\n serializer = CreateOrgSerializer(data=create_org_data)\n if serializer.is_valid():\n self.org = serializer.save()[0]", "def test_default_pricelist_with_company(self):\n company1_pricelist = self.env[\"product.pricelist\"].create({\n \"name\": \"company 1 pricelist\",\n \"currency_id\": self.currency.id,\n \"company_id\": self.company1.id,\n \"sequence\": 2,\n })\n\n # make sure this doesn't pick the company2 pricelist\n new_config = self.env[\"pos.config\"].create({\n \"name\": \"usd config\"\n })\n\n self.assertEqual(new_config.pricelist_id, company1_pricelist,\n \"POS config incorrectly has pricelist %s\" % new_config.pricelist_id.display_name)", "def test_edit_contact_list(self):\n c1 = ContactFactory(company_id=self.company.id)\n contact_list = ContactList.objects.first()\n data = ContactListSerializer(contact_list).data\n\n data['title'] = 'Nestle'\n data['contact_ids'] = [c1.id]\n\n url, parsed = self.prepare_urls('v1:contact_list-detail', subdomain=self.company.subdomain, kwargs={'pk':contact_list.id})\n \n response = self.client.put(url, data, HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n self.authenticate_user()\n response = self.client.put(url, data, HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n url, parsed = self.prepare_urls('v1:contact_list-detail', subdomain=self.company.subdomain, kwargs={'pk':contact_list.id})\n response = self.client.get(url, HTTP_HOST=parsed.netloc)\n content = json.loads(response.content)\n self.assertEqual(content['title'], 'Nestle')\n self.assertEqual(content['contacts'], [c1.id])", "def test_create_contact_list(self):\n c1 = ContactFactory(company_id=self.company.id)\n data = {\n 'title': 'ContactList1',\n 'contact_ids': [c1.id],\n }\n\n url, parsed = self.prepare_urls('v1:contact_list-list', subdomain=self.company.subdomain)\n \n response = self.client.post(url, data, HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n self.authenticate_user()\n response = self.client.post(url, data, HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n content = json.loads(response.content)\n self.assertEqual(content['title'], 'ContactList1')\n self.assertEqual(content['contacts'], [c1.id])\n self.assertNotEqual(content['company_id'], None)\n self.assertNotEqual(content['owner'], None)\n\n url, parsed = self.prepare_urls('v1:contact_list-list', subdomain=self.company.subdomain)\n response = self.client.get(url, HTTP_HOST=parsed.netloc)\n content = json.loads(response.content)\n self.assertEqual(self.contact_lists_count+1, len(content))", "def setUp(self):\n #Inheriting the base class functionality\n super(SearchOrgTestCase,self).setUp()\n # Create the org using serializer\n data_org = {\n \"name\":'Test_Org',\n \"tagline\":'testing is everything'\n }\n serializer = CreateOrgSerializer(data = data_org)\n if serializer.is_valid():\n self.org = serializer.save()[0]", "def test_create(self):\n pass", "def test_get_all_company_props_using_get(self):\n pass", "def test_create_contract_admin_page(self):\n # asserts that there aren't any properties in changelist view\n response = self.client.get('/admin/contracts/contract/')\n content = response.content\n self.assertNotIn('table', content)\n self.assertIn(\n '<a href=\"/admin/contracts/contract/add/\" class=\"addlink\">',\n content)\n\n # creates the contract\n payload = self.contract_one_data\n payload['tenant'] = payload['tenant'].id\n payload['property'] = payload['property'].id\n response = self.client.post(\n '/admin/contracts/contract/add/', payload, follow=True)\n self.assertEqual(response.status_code, 200)\n\n # checks it shows in listing\n response = self.client.get('/admin/contracts/contract/')\n content = response.content\n self.assertIn('table', content)\n self.assertIn(str(self.contract_one_data['rent']), content)", "def test_companies_fail(self):\n with self.assertRaises(AssertionError):\n self.resource.companies(-1)", "def test_update_company_props_using_put(self):\n pass", "def setUpFixture(self):\n pass", "def test_create_team(self):\n pass", "def test_delete_company_props_using_delete(self):\n pass", "def test_create_account(self):\n url = reverse('portal-list')\n data = {'brandID': 5, 'status' : 'Enabled'}\n response = self.client.post(url, data, format='json')\n\n #response = self.client.get(url)\n #print response\n #response = self.client.get('/v1/portal/1/')\n #print response\n #self.assertEqual(response.data[\"ud\"], {'id': 1, 'brandID': 4})\n self.assertEqual(response.data[\"brandID\"], 5)\n\n \"\"\"\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Account.objects.count(), 1)\n self.assertEqual(Account.objects.get().name, 'DabApps')\n \"\"\"", "def setUp(self):\n Contact.objects.create(name='contact1', email='[email protected]', phone='1263636', address=\"address 1\")\n Contact.objects.create(name='contact2', email='[email protected]')", "def fixture_andy():\n yield Person(name=\"Andy\", age=12, hobbies=[\"Star Wars\", \"Bicycles\"])", "def test_contractors_created(self):\n # Currently, there is just 1 Organization in the database, the org_existing\n OrganizationFactory(name='Existing Organization')\n self.assertEqual(Organization.objects.count(), 1)\n\n self.call_command(filename='power_plant_import/tests/data/six_rows.csv')\n\n # Get the Projects that were created during the import\n (project_ouessant1, project_ouessant2, project_liaoning) = self.get_created_projects()\n # The CSV file mentions two contractors, 1 for the project_ouessant1, and\n # 2 for the project_ouessant2\n contractor1 = Organization.objects.get(name='Contractor One')\n contractor2 = Organization.objects.get(name='Contractor Two')\n self.assertEqual(set(project_ouessant1.contractors.all()), set([contractor1]))\n self.assertEqual(\n set(project_ouessant2.contractors.all()),\n set([contractor1, contractor2])\n )\n self.assertEqual(project_liaoning.contractors.count(), 0)", "def test_update_creates_a_new_version(self):\n company = CompanyFactory(name='Foo ltd.')\n\n assert Version.objects.get_for_object(company).count() == 0\n\n response = self.api_client.patch(\n reverse('api-v4:company:item', kwargs={'pk': company.pk}),\n data={'name': 'Acme'},\n )\n\n assert response.status_code == status.HTTP_200_OK\n assert response.json()['name'] == 'Acme'\n\n # check version created\n assert Version.objects.get_for_object(company).count() == 1\n version = Version.objects.get_for_object(company).first()\n assert version.revision.user == self.user\n assert version.field_dict['name'] == 'Acme'", "def test_create_customer(self):\n create_customer_url = reverse(\"customer_list\")\n\n customer_info = {\"first_name\": \"Denny\", \"last_name\": \"Wayne\"}\n\n response = self.client.post(\n create_customer_url, data=customer_info, format=\"json\"\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Customer.objects.count(), 5)\n self.assertEqual(Customer.objects.get(pk=5).first_name, \"Denny\")\n self.assertEqual(Customer.objects.get(pk=5).last_name, \"Wayne\")", "def test_13_company_1_address(self):\n with mock_api(company_1_address):\n import_record(self.session, 'magento.res.partner',\n self.backend_id, '9999256')\n cr, uid = self.cr, self.uid\n partner_ids = self.model.search(cr, uid,\n [('magento_id', '=', '9999256'),\n ('backend_id', '=', self.backend_id)])\n self.assertEqual(len(partner_ids), 1)\n partner = self.model.browse(cr, uid, partner_ids[0])\n # Company of the billing address\n self.assertEqual(partner.name, 'Marechal')\n self.assertEqual(partner.type, 'default')\n # all addresses as contacts\n self.assertEqual(len(partner.child_ids), 1)\n self.assertEqual(len(partner.magento_bind_ids), 1)\n self.assertEqual(len(partner.magento_address_bind_ids), 0)\n self.assertEqual(partner.child_ids[0].type, 'invoice',\n msg=\"The billing address should be of \"\n \"type 'invoice'\")", "def test_companies_company_id_data_bank_accounts_account_id_get(self):\n pass", "def create(self, validated_data):\n admins = Group.objects.create(\n name=validated_data['name'] + ' Admins')\n accountants = Group.objects.create(\n name=validated_data['name'] + ' Accountants')\n validated_data['accountants'] = accountants\n validated_data['admins'] = admins\n company = super(CompanySerializer, self).create(validated_data)\n company.save()\n return company", "def setUp(self):\n self.staff = get_user_model().objects.create_doctor(\n email='[email protected]',\n password='testpass@4',\n username='tempuser4'\n )\n self.staff.is_staff = True\n self.staff.save()\n self.staff.refresh_from_db()\n\n self.client = APIClient()\n self.client.force_authenticate(self.staff)\n\n self.speciality = models.Speciality.objects.create(\n name='Speciality1'\n )", "def setUp(self):\n\n self.caffe = Caffe.objects.create(\n name='kafo',\n city='Gliwice',\n street='Wieczorka',\n house_number='14',\n postal_code='44-100'\n )\n self.filtry = Caffe.objects.create(\n name='filtry',\n city='Warszawa',\n street='Filry',\n house_number='14',\n postal_code='44-100'\n )\n\n self.kate = Employee.objects.create(\n username='KateT',\n first_name='Kate',\n last_name='Tempest',\n telephone_number='12345678',\n email='[email protected]',\n favorite_coffee='flat white',\n caffe=self.caffe\n )\n\n self.cash_report = CashReport.objects.create(\n creator=self.kate,\n caffe=self.caffe,\n cash_before_shift=2000,\n cash_after_shift=3000,\n card_payments=500,\n amount_due=1900\n )\n\n Company.objects.create(name='GoodCake', caffe=self.caffe)\n Company.objects.create(name='Tesco', caffe=self.caffe)\n\n Expense.objects.create(\n name='Cakes',\n company=Company.objects.get(name='GoodCake'),\n caffe=self.caffe\n )\n\n Expense.objects.create(\n name='Supply',\n company=Company.objects.get(name='Tesco'),\n caffe=self.caffe\n )\n\n FullExpense.objects.create(\n expense=Expense.objects.get(name='Cakes'),\n amount=50,\n cash_report=CashReport.objects.first(),\n caffe=self.caffe\n )\n\n FullExpense.objects.create(\n expense=Expense.objects.get(name='Supply'),\n amount=500,\n cash_report=CashReport.objects.first(),\n caffe=self.caffe\n )", "def test_home_view_two_object(self):\n self.create_obj()\n UserData.objects.create(\n first_name=\"aaaaaa\",\n last_name=\"aaaaa\",\n date_of_birth='1998-02-23',\n bio=\"aaa\",\n email=\"[email protected]\",\n jabber=\"aaaaa\",\n skype=\"aaaaa\",\n other_contacts=\"aaaaa\"\n )\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(UserData.objects.first(), response.context['data'])", "def test_the_view_render_Contact_instance(self):\n\n my_info = self.response.context_data['info']\n self.assertIsInstance(my_info, Contact)\n\n model_instance = Contact.objects.first()\n self.assertIn(model_instance.name, self.response.content)\n self.assertIn(model_instance.surname, self.response.content)\n self.assertIn(model_instance.email, self.response.content)\n self.assertIn(model_instance.bio, self.response.content)\n self.assertIn(model_instance.skype, self.response.content)\n self.assertIn(model_instance.contacts, self.response.content)", "def setUp(self):\n self.initial_year = 2020\n self.name = \"Testy McTesterson\"\n self.birth_date = datetime(2000, 2, 1) # 1 February 2000\n self.retirement_date = datetime(2065, 6, 26) # 26 June 2065\n self.gross_income = Money(100000) # $100000\n self.raise_rate = Decimal(1) # 100%\n self.tax_treatment = Tax(\n {self.initial_year: {\n Money(0): Decimal('0.1'),\n Money(1000): Decimal('0.2'),\n Money(100000): Decimal('0.3')}\n },\n inflation_adjust={\n year: Decimal(1 + (year - self.initial_year) / 16)\n for year in range(self.initial_year, self.initial_year + 100)\n },\n personal_deduction={self.initial_year: Money(100)},\n credit_rate={self.initial_year: Decimal('0.15')})\n self.spouse = Person(\n initial_year=self.initial_year,\n name=\"Spouse\",\n birth_date=1998,\n retirement_date=2063,\n gross_income=Money(50000),\n raise_rate=self.raise_rate,\n spouse=None,\n tax_treatment=self.tax_treatment)\n self.owner = Person(\n initial_year=self.initial_year,\n name=self.name,\n birth_date=self.birth_date,\n retirement_date=self.retirement_date,\n gross_income=self.gross_income,\n raise_rate=self.raise_rate,\n spouse=self.spouse,\n tax_treatment=self.tax_treatment)", "def test_create_virtual_account_beneficiary(self):\n pass", "def setUp(self):\n self.testUser = User.objects.get(username=\"c1e1\")\n self.client = APIClient()\n self.client.force_authenticate(user=self.testUser)\n self.data = {\n \"tracking\": 1234,\n \"mail_class\": \"12\",\n \"return_address\": {\n \"address1\": \"1234\",\n \"address2\": \"1234\",\n \"city\": \"1234\",\n \"state\": \"12\",\n \"zip\": 1234\n },\n \"rate\": 1234,\n \"address\": {\n \"address1\": \"1234\",\n \"address2\": \"1234\",\n \"city\": \"1234\",\n \"state\": \"12\",\n \"zip\": 1234\n },\n \"cost_center\": CostCenter.objects.filter(company=\n self.testUser.profile.company.pk)[0].pk\n }\n self.url = reverse('MailPiece-list')", "def test_create_contact(self):\n \n url = reverse('contact-list')\n contact = self.get_dummy_contact()\n\n response = self.client.post(url, contact,\n format='json',\n HTTP_AUTHORIZATION=self.get_auth())\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Contact.objects.count(), 1)\n self.assertEqual(Contact.objects.get().email_address, contact['email_address'])", "def test_customer_creation():\n agent = AgentFactory()\n customer = CustomerFactory(agent=agent)\n assert agent == customer.agent\n\n customer.name = 'customer test name 1'\n customer.customer_type = 'hom'\n customer.save()\n assert customer.name == 'customer test name 1'\n\n customer.name = 'customer test name 2'\n customer.customer_type = 'oth'\n customer.save()\n assert customer.name == 'customer test name 2'", "def test_create_account_campaign(self, create):\n \"\"\"Campaigns should be created\"\"\"\n row = {'PROJ_NAME1': 'Argentina Fund', 'PROJ_NO': '789-CFD',\n 'SUMMARY': 'Some Sum'}\n sync.create_account(row, None)\n self.assertTrue(create.called)\n account, row, name, acc_type = create.call_args[0]\n self.assertEqual(account.name, 'Argentina Fund')\n self.assertEqual(account.code, '789-CFD')\n self.assertEqual(account.category, Account.COUNTRY)\n self.assertEqual(0, len(Account.objects.filter(pk=account.pk)))", "def fixture_pandy():\n yield Person(name=\"Pandy\", age=12, hobbies=[\"Fortnite\"])", "def test_get_business_new_corp(client):\n rv_cp = client.post('/api/v1/businesses/CP')\n rv_bc = client.post('/api/v1/businesses/BC')\n\n assert 200 == rv_cp.status_code\n assert 200 == rv_bc.status_code", "def __init__(self, name, **company_data):\n self.name = name\n self.__dict__.update(company_data)", "def test_get_organization(self):\n pass", "def test_get_context_data(self):\n # First test it with no data at all\n response = self.client.get(self.url)\n for key in self.context_keys:\n self.assertIn(key, response.context, f\"'{key}' should be in context of DetailedView\")\n\n # Now test with an Ailment but no Employees\n AilmentFactory()\n response = self.client.get(self.url)\n for key in self.context_keys:\n self.assertIn(key, response.context, f\"'{key}' should be in context of DetailedView\")\n\n # Test with a birthplace to see if it appears in top birthplaces\n place = PlaceFactory()\n EmployeeFactory(place_of_birth=place)\n response = self.client.get(self.url)\n self.assertIn(str(place), response.context, \"Top place of birth should be in context of DetailedView\")", "def test_perform_create(self):\n data = {\n 'name': 'Jane Joe',\n 'crm': 1234,\n 'email': '[email protected]',\n 'phone': '+55998754128'\n }\n response = self.unath_client.post(reverse('doctor-list'), data=data)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n response = self.client.post(reverse('doctor-list'), data=data)\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)", "def test_create_account(self):\n url = reverse('account:accounts')\n data = {'name': 'Test Account 1'}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Account.objects.count(), 1)\n self.assertEqual(Account.objects.get().name, 'Test Account 1')", "def test_search_organizations_post(self):\n pass", "def setUp(self):\n self.tenant_root_domain = Tenant.objects.get_tenant_root_domain()\n self.site = Site.objects.create(\n name=f\"a.{self.tenant_root_domain}\",\n domain=f\"a.{self.tenant_root_domain}\")\n self.tenant = Tenant.objects.create(name=\"A\", site=self.site)\n self.domain = Domain.objects.create(domain=\"a.com\", tenant=self.tenant)\n\n self.other_site = Site.objects.create(\n name=f\"other.{self.tenant_root_domain}\",\n domain=f\"other.{self.tenant_root_domain}\"\n )\n self.other_tenant = Tenant.objects.create(\n name=\"Other\", site=self.other_site)\n self.other_domain = Domain.objects.create(\n domain=\"other.com\", tenant=self.other_tenant)\n\n self.marketing_page = Site.objects.create(\n name=\"Marketingpage\", domain=\"landingpage.com\")\n\n self.site_not_linked = Site.objects.create(\n name=f\"notlinked.{self.tenant_root_domain}\",\n domain=f\"notlinked.{self.tenant_root_domain}\")\n\n self.home_url = reverse(\"home\")\n self.secret_url = reverse(\"tenants:dashboard\")", "def test_add_product_view_for_authenticated_users(user_company, client):\n add_product_url = reverse('add-product')\n response = client.get(add_product_url)\n assert response.status_code == 200", "def test_str(self):\n company = CompanyFactory()\n self.assertEqual(str(company),\n f\"Company {company.name} in {company.city}\")", "def test_create_warranty(self):\n pass", "def test_load_fixture(caplog):\n caplog.set_level('INFO')\n\n with pytest.raises(Advisor.DoesNotExist):\n Advisor.objects.get(pk=ADVISER_FIXTURE['pk'])\n\n response = _request_load_fixture({'fixture': [ADVISER_FIXTURE]})\n\n assert response.status_code == status.HTTP_201_CREATED\n\n adviser = Advisor.objects.get(pk=ADVISER_FIXTURE['pk'])\n assert adviser.email == ADVISER_FIXTURE['fields']['email']\n assert adviser.first_name == ADVISER_FIXTURE['fields']['first_name']\n assert adviser.last_name == ADVISER_FIXTURE['fields']['last_name']\n assert str(adviser.dit_team_id) == ADVISER_FIXTURE['fields']['dit_team']\n\n fixture_info = [\n 'Loading fixture: [',\n ' {',\n ' \"fields\": {',\n f' \"dit_team\": \"{ADVISER_FIXTURE[\"fields\"][\"dit_team\"]}\",',\n f' \"email\": \"{ADVISER_FIXTURE[\"fields\"][\"email\"]}\",',\n f' \"first_name\": \"{ADVISER_FIXTURE[\"fields\"][\"first_name\"]}\",',\n f' \"last_name\": \"{ADVISER_FIXTURE[\"fields\"][\"last_name\"]}\"',\n ' },',\n ' \"model\": \"company.advisor\",',\n f' \"pk\": \"{ADVISER_FIXTURE[\"pk\"]}\"',\n ' }',\n ']',\n ]\n assert caplog.messages == ['\\n'.join(fixture_info)]", "def test_create(self):\n retreat = Retreat.objects.create(\n name=\"random_retreat\",\n details=\"This is a description of the retreat.\",\n seats=40,\n address_line1=\"123 random street\",\n postal_code=\"123 456\",\n state_province=\"Random state\",\n country=\"Random country\",\n timezone=\"America/Montreal\",\n price=3,\n start_time=LOCAL_TIMEZONE.localize(datetime(2130, 1, 15, 8)),\n end_time=LOCAL_TIMEZONE.localize(datetime(2130, 1, 17, 12)),\n min_day_refund=7,\n min_day_exchange=7,\n refund_rate=100,\n is_active=True,\n accessibility=True,\n form_url=\"example.com\",\n carpool_url='example2.com',\n review_url='example3.com',\n has_shared_rooms=True,\n room_type=Retreat.DOUBLE_OCCUPATION,\n toilet_gendered=True,\n )\n\n self.assertEqual(retreat.__str__(), \"random_retreat\")", "def test_name(self):\n\n self.assertEqual(self.bakery.name, \"bakery\")\n self.assertEqual(self.bakery.caffe, self.caffe)\n\n with self.assertRaises(Exception):\n Company.objects.create(name=\"bakery\", caffe=self.caffe)\n\n Company.objects.create(name=\"bakery\", caffe=self.filtry)", "def test_office_creation(self):\n url = '/api/v1/consultorios/'\n data = {\n \"hospital\": \"Angeles Roma\",\n \"office\": \"306\"\n }\n request = self.client.post(url, data)\n\n self.assertEqual(request.status_code, status.HTTP_201_CREATED)", "def setUp(self):\n self.testUser = User.objects.get(username=\"c1e1\")\n self.client = APIClient()\n self.client.force_authenticate(user=self.testUser)\n self.data = {'name': 'testCostCenter',\n 'address': {'address1': '123 fake st', 'address2': 'fake address 2',\n 'city': 'nowhere', 'state': 'IN', 'zip': '90210'}}\n self.url = reverse('CostCenter-list')", "def test_client_create_view(self):\r\n response = self.test_client.get(reverse('client_create'))\r\n self.assertEqual(response.status_code, 200)", "def test_create(self, mock_decorator):\n response = self.client.post(\n '/api/bce_institutions/0802145Y',\n content_type='application/json',\n headers={'Authorization': 'Bearer token'},\n data=json.dumps({\n 'is_institution': True\n }))\n self.assertEqual(response.status_code, 200)\n response_json = json.loads(response.data.decode('utf8'))\n self.assertEqual(\n response_json,\n {'institution': {'uai': '0802145Y', 'is_institution': True}}\n )\n self.assertEqual(BceInstitution.query.count(), 1)", "def setUp(self):\r\n self.course = CourseFactory.create(metadata={\"max_student_enrollments_allowed\": 1})\r\n\r\n self.about = ItemFactory.create(\r\n category=\"about\", parent_location=self.course.location,\r\n data=\"OOGIE BLOOGIE\", display_name=\"overview\"\r\n )", "def company(request):\n domain = request.GET.get(\"domain\")\n version = get_version_or_leave(request, \"company\", domain)\n\n if version == '1':\n\n return company_v1(request)\n\n else:\n\n api_access_logging(\n request,\n \"company\",\n domain,\n \"400\",\n \"4\",\n None\n )\n return Response(\n {\n \"error_code\": \"4\",\n \"detail\": errors_for_customers[\"4\"]\n },\n status=status.HTTP_400_BAD_REQUEST\n )", "def test_create(self):\n self.assertEqual(Routine.objects.count(), 2)\n payload = {\n 'name': 'Monday routine',\n }\n self.client.post('/routines/', data=payload)\n self.assertEqual(Routine.objects.count(), 3)", "def test_beneficiaries_create_callback_that_will_pass(self):\n post_body = {\n 'lastname': 'Doe',\n 'lastname2': '',\n 'middlename': '',\n 'firstname': 'Jane',\n 'nativename': '',\n 'nationality_country_iso_code': 'FRA',\n 'code': '',\n 'date_of_birth': '1970-07-01',\n 'country_of_birth_iso_code': 'FRA',\n 'gender': 'Male',\n 'address': '42 Rue des fleurs',\n 'postal_code': '75000',\n 'city': 'Paris',\n 'country_iso_code': 'FRA',\n 'msisdn': '1123131413',\n 'email': '[email protected]',\n 'id_type': 'PASSPORT',\n 'id_country_iso_code': '',\n 'id_number': '1123131413',\n 'occupation': 'Teacher',\n 'bank_accout_holder_name': '',\n 'province_state': ''\n }\n print('the test function name: {}'.format(sys._getframe().f_code.co_name))\n url = reverse('beneficiary:beneficiaries-create')\n response = self.client.post(url, data=post_body, content_type='application/json')\n return self.assertTrue(response.status_code, 201)", "def setUp(self):\n self.client = APIClient()\n self.order_data = {\n \"customer\": {\n \"first_name\": \"Larosh\",\n \"last_name\": \"Tanbari\",\n \"address\": \"Coppistr\"\n },\n \"size\": \"BIG\"\n }\n self.response = self.client.post(\n reverse(\"get_all_or_create\"),\n data=self.order_data,\n format=\"json\"\n )", "def test_home_view_one_object(self):\n self.create_obj()\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n self.assertIn('42 Coffee Cups Test Assignment', response.content)\n self.assertIn('Name', response.content)\n self.assertIn('Last name', response.content)\n self.assertIn('Date of birth', response.content)\n self.assertIn('bio', response.content)\n self.assertIn('Email', response.content)\n self.assertIn('Jabber', response.content)\n self.assertIn('Andrei', response.content)\n self.assertIn('Herasko', response.content)\n self.assertIn('Feb. 23, 1998', response.content)\n self.assertIn('[email protected]', response.content)\n self.assertIn('[email protected]', response.content)\n self.assertIn('ander2299', response.content)", "def test_create_view(self):\n supplement = SupplementFactory(user=self.user_1)\n time = get_utc_now()\n\n post_data = {\n \"supplement_uuid\": str(supplement.uuid),\n \"time\": time.isoformat(),\n \"quantity\": 5,\n }\n\n response = self.client_1.post(self.url, data=post_data)\n self.assertEqual(response.status_code, 200, response.data)\n\n data = response.data\n supplement_name = data[\"supplement\"][\"name\"]\n self.assertEqual(supplement.name, supplement_name)\n self.assertIsNotNone(data[\"display_name\"])", "def test_teams_create(self):\n pass", "def test_create_account_project(self, create):\n row = {'PROJ_NAME1': 'Some Proj', 'PROJ_NO': '121-212',\n 'SECTOR': 'IT'}\n sync.create_account(row, None)\n self.assertTrue(create.called)\n account, row, issue_map = create.call_args[0]\n self.assertEqual(account.name, 'Some Proj')\n self.assertEqual(account.code, '121-212')\n self.assertEqual(account.category, Account.PROJECT)\n self.assertEqual(0, len(Account.objects.filter(pk=account.pk)))", "def test_create_organization(self):\n self.test_login_user()\n url = reverse('MGA:create_organization')\n data = {'name': \"event\"}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)", "def setUp(self):\n Beneficiary.objects.create(id=1, lastname='Doe', lastname2='', middlename='', firstname='Jane', nativename='',\n nationality_country_iso_code='FRA', code='', date_of_birth='1970-07-01',\n country_of_birth_iso_code='FRA', gender='Male', address='42 Rue des fleurs',\n postal_code='75000', city='Paris', country_iso_code='FRA', msisdn='1123131413',\n email='[email protected]', id_type='PASSPORT', id_country_iso_code='',\n id_number='1123131413', occupation='Teacher', bank_accout_holder_name='',\n province_state='')\n self.client = Client()" ]
[ "0.7572055", "0.6949927", "0.6910108", "0.6783742", "0.6782509", "0.6768939", "0.6648319", "0.64719594", "0.62653613", "0.62246877", "0.61053544", "0.60505855", "0.604088", "0.60324633", "0.5979618", "0.5958282", "0.59542584", "0.5907634", "0.58789223", "0.58328825", "0.58327824", "0.58036125", "0.57520914", "0.5742583", "0.57406455", "0.57193995", "0.5707857", "0.5695296", "0.5690856", "0.5662348", "0.5616009", "0.5603953", "0.55843186", "0.55843186", "0.55829394", "0.5542265", "0.5535739", "0.55228096", "0.552067", "0.5517729", "0.55023897", "0.54993176", "0.54974204", "0.54956603", "0.54902285", "0.54879504", "0.5477563", "0.545704", "0.5446027", "0.5441096", "0.5437628", "0.54284", "0.5427608", "0.54195195", "0.5416001", "0.54057264", "0.5395568", "0.53743625", "0.53686726", "0.5365954", "0.5358239", "0.5353564", "0.53496486", "0.5348752", "0.53427136", "0.53425986", "0.53399867", "0.5338131", "0.53357965", "0.5332353", "0.5331894", "0.53313136", "0.53304625", "0.5321829", "0.5314818", "0.5308265", "0.5305371", "0.52971476", "0.52968496", "0.5287282", "0.5286717", "0.5276434", "0.5274734", "0.52685916", "0.5263389", "0.5255811", "0.5253352", "0.52475065", "0.523882", "0.5234495", "0.52340895", "0.5232402", "0.5231601", "0.5230394", "0.5225594", "0.5217457", "0.52146083", "0.51999426", "0.51916075", "0.51903033" ]
0.662038
7
Fixture for admin object passed as an argument to the admin create view test function
def administrator(): administrator = Administrator.objects.create(name='Michał', surname='Paluch', login='Udfsr43', password='Password_3', password_repeat='Password_3') return administrator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_admin(self):\n assert(admin)", "def test_an_admin_view(admin_client):\n response = admin_client.get('/admin/')\n assert status(response) == 'ok'", "def test_add_admin(self):\n self.test_create_user()\n self.test_create_organization()\n url = reverse('MGA:add_admin')\n data = {'admin id': 1, 'org_id': 1}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_create__admin_valid(self):\n testing_config.sign_in('[email protected]', 123567890)\n\n json_data = {\n 'email': '[email protected]',\n 'isAdmin': True, 'isSiteEditor': True}\n with test_app.test_request_context(self.request_path, json=json_data):\n actual_json = self.handler.do_post()\n self.assertEqual('[email protected]', actual_json['email'])\n self.assertTrue(actual_json['is_site_editor'])\n self.assertTrue(actual_json['is_admin'])\n\n new_appuser = user_models.AppUser.query(\n user_models.AppUser.email == '[email protected]').get()\n self.assertEqual('[email protected]', new_appuser.email)\n self.assertTrue(new_appuser.is_admin)\n\n # Clean up\n new_appuser.key.delete()", "def test_post_creation_admin(self):\n url = reverse('post-list')\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n title = 'Test Title'\n body = 'Test Body'\n self.client.force_authenticate(user=self.superuser)\n response = self.client.post(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertIn(title, response.content)\n self.assertIn(body, response.content)\n self.assertIn(user_url, response.content)", "def test_get_create_post_as_admin_user(self):\n login = self.client.login(username='testuser_admin', password='password12345')\n\n if login:\n url = reverse('blogs:create')\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'create.html')\n self.client.logout()\n else:\n # TODO Make this dynamic rather than hard coded text string\n self.fail('Login Failed for testuser_staff')", "def test_post_create_post_as_admin_user(self):\n login = self.client.login(username='testuser_admin', password='password12345')\n\n if login:\n\n new_title = \"this is a new title for create\"\n new_content = \"this is new content for create by nicholas herriot\"\n new_published_date = datetime.date(2018, 3, 3)\n data = {'title': new_title, 'content': new_content, 'publish': new_published_date}\n\n url = reverse('blogs:create')\n response = self.client.post(url, data)\n\n # There should now be a new blog create with this title and content, so check that it does exist\n new_blog = Post.objects.get(title='this is a new title for create')\n\n self.assertEqual(new_blog.title, new_title)\n self.assertEqual(new_blog.content, new_content)\n # TODO Get the publish date check working correclty\n # self.assertEqual(new_blog.publish, new_published_date)\n\n self.assertEqual(response.status_code, 302)\n self.client.logout()\n else:\n # TODO Make this dynamic rather than hard coded text string\n self.fail('Login Failed for testuser_staff')", "def test_add_admin_to_org(self):\n pass", "def setUp(self):\n self.superuser = User.objects.create_superuser(\n 'admin',\n '[email protected]',\n 'StrongPassword123'\n )\n self.client.login(\n username='admin',\n password='StrongPassword123'\n )\n\n self.invalid_user = User.objects.create_user(\n 'user',\n '[email protected]',\n 'hello123'\n )\n\n User.objects.create_user(\n 'user_1',\n '[email protected]'\n 'user1password'\n )\n\n User.objects.create_user(\n 'user_2',\n '[email protected]'\n 'user2password'\n )\n\n User.objects.create_user(\n 'user_3',\n '[email protected]'\n 'user3password'\n )\n\n self.users = UserForAdminModelSerializer(User.objects.all(), many=True)", "def setUpFixture(self):\n pass", "def test_create_admin():\n os.environ[\"ADMIN_EMAIL\"] = \"[email protected]\"\n os.environ[\"ADMIN_PASSWORD\"] = \"password\"\n\n output = io.StringIO()\n call_command(\"createadmin\", stdout=output)\n\n assert get_user_model().objects.count() == 1\n\n admin = get_user_model().objects.get()\n\n assert admin.check_password(os.environ[\"ADMIN_PASSWORD\"])\n assert admin.first_name == \"Admin\"\n assert admin.last_name == \"User\"\n assert admin.is_staff\n assert admin.is_superuser\n assert admin.email_addresses.count() == 1\n\n email = admin.email_addresses.get()\n\n assert email.email == os.environ[\"ADMIN_EMAIL\"]\n assert email.is_primary\n assert email.is_verified", "def test_admin_json(self):\n\n self.run_admin_test(\"json\")", "def test_create_drink_created_by_admin(self):\n self.test_create_admin_user()\n self.test_create_seting_bar()\n user = UserBase.objects.get(username='admin')\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + user.token)\n url = reverse('drink-list')\n data = {\n 'name': 'Testing Drink',\n 'ingredients':'[{\"unit\":\"0\",\"ratio\":2,\"ingredient\":2001},{\"unit\":\"0\",\"ratio\":2,\"ingredient\":2001}]'\n }\n\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_admin(self):\r\n \r\n self.assertEqual(False, self.user.isAdmin)", "def test_09_admin_users_as_admin(self):\r\n self.register()\r\n res = self.app.get('/admin/users', follow_redirects=True)\r\n assert \"Manage Admin Users\" in res.data, res.data", "def test_households_in_admin_unit(self):", "def test_create(self):\n self.app\n pass", "def setUp(self):\n\n # Create client\n self.client = Client()\n\n # Create the admin user\n self.admin_user = get_user_model().objects.create_superuser(\n email='[email protected]',\n password='adminTesting123'\n )\n\n # Login the admin user\n self.client.force_login(self.admin_user)\n\n # Create the reqular user\n self.user = get_user_model().objects.create_user(\n email='[email protected]',\n password='userTesting123',\n name='Test user full name'\n )", "def test_create(self):\n pass", "def fixtures():", "def test_admin_get(self, *args, **kwargs):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n res = self.view.get(self.request, *args, **kwargs)\n nt.assert_equal(res.status_code, 200)", "def test_admin_get(self, *args, **kwargs):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n res = self.view.get(self.request, *args, **kwargs)\n nt.assert_equal(res.status_code, 200)", "def test_admin(client, admin, default_user):\n response = client.get(\"/user\")\n assert response.status_code == 401\n\n \"\"\"\n 2. Get all users with default user credentials\n \"\"\"\n response = client.get(\"/user\", headers=default_user.headers)\n assert response.status_code == 401\n\n \"\"\"\n 3. Get all users with admin credentials\n \"\"\"\n response = client.get(\"/user\", headers=admin.headers)\n assert response.status_code == 200\n assert b\"DefaultUser\" in response.data\n\n \"\"\"\n 4. Modify default user info with admin credentials\n \"\"\"\n response = client.put(\"/user/%s\" % default_user.id, json={\n \"phone_number\": \"063\",\n \"lastname\": \"Snippetko\"\n }, headers=admin.headers)\n assert response.status_code == 403\n\n \"\"\"\n 5. Create movie\n \"\"\"\n response = client.post('/movie', json={\n \"name\": \"Once upon time in Hollywood ...\",\n \"picture\": \"70-th.png\",\n \"info\": \"10-th film by Qwentin Tarantino\",\n \"actors\": \"Bred Pitt, Leonardo Di Caprio\",\n \"duration\": \"2:45:56\"\n }, headers=admin.headers)\n assert response.status_code == 201\n assert b'Once upon time in Hollywood ...' in response.data\n assert b\"2:45:56\" in response.data\n\n # movie id (\"Once upon time in Hollywood\")\n movie_id = response.get_json()['Movie']['id']\n\n \"\"\"\n 6. Update movie\n \"\"\"\n response = client.put('/movie/%s' % movie_id, json={\n \"actors\": \"Bred Pitt, Leonardo Di Caprio, Margo Robbie\"\n }, headers=admin.headers)\n assert response.status_code == 200\n assert b\"Bred Pitt, Leonardo Di Caprio, Margo Robbie\" in response.data\n\n \"\"\"\n 7. Create movie schedule\n \"\"\"\n response = client.post('/schedule', json={\n \"date\": \"14-09-2019\",\n \"time\": \"15:45:00\",\n \"movie_id\": movie_id\n }, headers=admin.headers)\n assert response.status_code == 201\n assert b\"14-09-2019\" in response.data\n assert b\"15:45:00\" in response.data\n\n # movie schedule id\n movie_schedule_id = response.get_json()[\"MovieSchedule\"]['id']\n\n \"\"\"\n 8. Update movie schedule\n \"\"\"\n response = client.put('/schedule/%s' % movie_schedule_id, json={\n \"time\": \"18:55:00\"\n }, headers=admin.headers)\n assert response.status_code == 200\n assert b\"18:55:00\" in response.data\n assert b\"14-09-2019\" in response.data\n\n \"\"\"\n 9. Delete movie(when there are movie schedule on it)\n \"\"\"\n response = client.delete('/movie/%s' % movie_id, headers=admin.headers)\n assert response.status_code == 409\n\n \"\"\"\n 10. Delete movie schedule\n \"\"\"\n response = client.delete('/schedule/%s' % movie_schedule_id,\n headers=admin.headers)\n assert response.status_code == 200\n\n \"\"\"\n 11. Deleting again\n \"\"\"\n response = client.delete('/schedule/%s' % movie_schedule_id,\n headers=admin.headers)\n assert response.status_code == 404\n\n \"\"\"\n 12. Get all movies\n \"\"\"\n response = client.get('/movie')\n assert response.status_code == 200\n assert b\"Bred Pitt, Leonardo Di Caprio, Margo Robbie\" in response.data\n assert b\"Once upon time in Hollywood ...\" in response.data\n\n \"\"\"\n 13. Delete movie\n \"\"\"\n response = client.delete('/movie/%s' % movie_id, headers=admin.headers)\n assert response.status_code == 200\n\n \"\"\"\n 13. Again getting all movies\n \"\"\"\n response = client.get('/movie')\n assert response.status_code == 200\n assert b\"Bred Pitt, Leonardo Di Caprio, Margo Robbie\" not in response.data\n assert b\"Once upon time in Hollywood ...\" not in response.data", "def test_admin_user(self):\n user = self.template_users['staff_user']\n self.client.login(email=user['email'], password=user['password'])\n\n # Admins can see everything\n response = self.client.get(reverse('api:log-list'))\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['count'], self.object_count)\n\n # Deletion should be possible\n response = self.client.post(reverse('api:log-erase'), {\n 'before': str(timezone.now()),\n 'max_severity': LogEntry.ERROR,\n })\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['deleted'], self.object_count)\n self.assertEqual(LogEntry.objects.count(), 0)", "def test_handle_create_as_admin(self, mock_uuid):\r\n mock_uuid.uuid4.return_value = \"1\"\r\n team = Team(\"GTID\", \"team-name\", \"name\")\r\n calling_user = User(user)\r\n calling_user.permissions_level = Permissions.admin\r\n self.mock_facade.retrieve.return_value = calling_user\r\n self.mock_facade.query.return_value = [team]\r\n project = Project(\"GTID\", [\"repo-link\"])\r\n project_attach = [project.get_attachment()]\r\n with self.app.app_context():\r\n resp, code = \\\r\n self.testcommand.handle(\"project create repo-link team-name\",\r\n user)\r\n expect = {'attachments': project_attach}\r\n self.assertDictEqual(resp, expect)\r\n self.assertEqual(code, 200)\r\n self.mock_facade.query.assert_called_once_with(Team,\r\n [(\"github_team_name\",\r\n \"team-name\")])\r\n self.mock_facade.store.assert_called_once_with(project)", "def test_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n nt.assert_true(self.view.test_func())", "def test_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n nt.assert_true(self.view.test_func())", "def test_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n nt.assert_true(self.view.test_func())", "def test_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n nt.assert_true(self.view.test_func())", "def test_collection_viewset_create_as_superuser(mocker, post_data, logged_in_apiclient):\n mocker.patch(\"ui.serializers.get_moira_client\")\n client, user = logged_in_apiclient\n user.is_superuser = True\n user.save()\n url = reverse(\"models-api:collection-list\")\n result = client.post(url, post_data, format=\"json\")\n assert result.status_code == status.HTTP_201_CREATED\n assert \"videos\" not in result.data", "def test_post_update_admin(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n self.client.force_authenticate(user=self.superuser)\n title = 'Random New Title'\n body = 'Random New Body'\n response = self.client.put(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertIn(title, response.content)\n self.assertIn(body, response.content)\n self.assertIn(user_url, response.content)", "def setUpTestData(cls):\n cls.test_resource = Resource(name='Test', slug='test', description='')\n cls.test_resource.full_clean()\n cls.test_resource.save()\n cls.test_faculty = Faculty(name='Test', slug='test')\n cls.test_faculty.full_clean()\n cls.test_faculty.save()\n cls.test_department = Department(name='Test', slug='test', faculty=cls.test_faculty)\n cls.test_department.full_clean()\n cls.test_department.save()\n cls.test_agreement = Agreement(title='test-one',\n slug='test-one',\n resource=cls.test_resource,\n body='body',\n redirect_url='https://example.com',\n redirect_text='example-redirect')\n cls.test_agreement.full_clean()\n cls.test_agreement.save()\n cls.test_user = get_user_model().objects.create_user(username='test',\n first_name='test',\n last_name='test',\n email='[email protected]',\n password='testtesttest')", "def test_data_admin_page(self):\n self.login(self.data_admin.user.username)\n self._go_to_data_admin_page()\n self.check_page_title(self.data_admin_config.get('PAGE_TITLE'))\n self.check_page_contains_ids(self.data_admin_config.get('ADMIN_LINKS'))", "def test_detail_views(self):\n obj = self.create_post(title='Some new title for new test')\n response = self.client.get(obj.get_absolute_url())\n # TODO You need to check that the description and title are present in the html returned from the server Dilshad\n self.assertEqual(response.status_code, 200)", "def test_create_user_page(self):\n\n # Get the admin url and send a GET request\n url = reverse('admin:core_user_add')\n res = self.client.get(url)\n\n # Assertions\n self.assertEqual(res.status_code, 200)", "def setUp(self):\n\n self.user = User.objects.create_user(username=USER_USERNAME, email=USER_EMAIL, password=USER_PWD)\n self.user.is_staff = True\n self.user.save()\n\n logged = self.client.login(username=USER_USERNAME, password=USER_PWD)\n self.assertEqual(logged, True)\n\n client = Client.objects.create(name='Fulano de Tal')\n specie = Specie.objects.create(name='Felina')\n breed = Breed.objects.create(specie=specie, name='Maine Coon')\n Animal.objects.create(owner=client, specie=specie, breed=breed, animal_name='Bidu', fur='l')", "def test_is_admin_user(self):\n admin = User.objects.get(email='[email protected]')\n self.assertEqual(admin.is_staff, True)", "def test_admin_accessible(self) -> None:\n response = self.client.get(\"/admin/\")\n self.assertEqual(200, response.status_code)", "def setUp(self):\n self.admin = User.objects.get(username='admin')\n self.client = APIClient()\n self.client.force_authenticate(user=self.admin)\n self.data = {'name': 'testCompany', 'address': {\n 'address1': '123 fake st', 'address2': 'fake address 2',\n 'city': 'nowhere', 'state': 'IN', 'zip': '90210'}}\n self.url = reverse('Company-list')", "def test_01_admin_index(self):\r\n self.register()\r\n res = self.app.get(\"/admin\", follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"There should be an index page for admin users and apps\"\r\n assert \"Settings\" in res.data, err_msg\r\n divs = ['featured-apps', 'users', 'categories', 'users-list']\r\n for div in divs:\r\n err_msg = \"There should be a button for managing %s\" % div\r\n assert dom.find(id=div) is not None, err_msg", "def setUp(self):\n\n super(VMTest, self).setUp()\n self.config_drive = None\n self.mock = mox.Mox()\n self.admin_context = context.RequestContext('admin', '',\n is_admin=True)", "def test_page_list_admin(self):\n user = self.get_superuser()\n title_1 = 'page'\n title_2 = 'inner'\n title_3 = 'page 3'\n page = create_page(title_1, 'page.html', 'en', published=True)\n page_2 = create_page(title_2, 'page.html', 'en', published=True, parent=page)\n page_3 = create_page(title_3, 'page.html', 'en', published=False)\n\n with self.login_user_context(user):\n url = reverse('api:page-list')\n response = self.client.get(url, format='json')\n self.assertEqual(len(response.data), 3)\n for page in response.data:\n self.assertIn(page.get('title'), {title_1, title_2, title_3})", "def setUp(self):\n super(IndexViewsTestCase, self).setUp()\n params = POST1_PARAMS.copy()\n params['blog'] = self.test_blog\n params['author'] = self.test_user1.author\n self.add_post(params)", "def test_lesson_list_admin(client, auth_user, init_database, add_data):\n response = client.post(url_for('root.index'),data=dict(email='[email protected]',password='password'))\n # try to get home\n response = client.get(url_for('lessons.list'))\n assert response.status_code == 200\n #assert 0\n assert b'Grade 1 ' in response.data #part of the table with lessons for grade 2\n assert b'Grade 2 ' in response.data #part of the table with lessons for grade 2\n assert b'Grade 6 ' in response.data #part of the table with lessons for grade 6", "def setUp(self):\n TCBase.setUp(self)\n\n # ---\n\n resp = self.request(\n self.client.post,\n '/admin/survey',\n {\n 'title': 'title',\n 'description': 'description',\n 'start_date': '2018-01-01',\n 'end_date': '2018-03-01',\n 'target': ujson.dumps([1, 3])\n },\n self.admin_access_token\n )\n\n survey_id = self.get_response_data(resp)['id']\n\n self.json_request(\n self.client.post,\n '/admin/survey/question',\n {\n 'survey_id': survey_id,\n 'questions': [\n {\n 'title': 'title',\n 'is_objective': False\n },\n {\n 'title': 'title',\n 'is_objective': False\n }\n ]\n },\n self.admin_access_token\n )", "def setUp(self):\n self.superuser = User.objects.create_superuser(\n 'admin',\n '[email protected]',\n 'StrongPassword123'\n )\n self.client.login(\n username='admin',\n password='StrongPassword123'\n )", "def setUp(self):\n self.superuser = User.objects.create_superuser(\n 'admin',\n '[email protected]',\n 'StrongPassword123'\n )\n self.client.login(\n username='admin',\n password='StrongPassword123'\n )", "def test_admin_list(self):\n response = self.client.get('/tests/dashboard/')\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, '<a href=\"/admin/auth/group/\">Group</a>', html=True)\n self.assertContains(response, '<a href=\"/admin/auth/user/\">User</a>', html=True)", "def test_collection_viewset_create_as_staff(mocker, post_data, logged_in_apiclient):\n mocker.patch(\"ui.serializers.get_moira_client\")\n client, user = logged_in_apiclient\n user.is_staff = True\n user.save()\n url = reverse(\"models-api:collection-list\")\n result = client.post(url, post_data, format=\"json\")\n assert result.status_code == status.HTTP_201_CREATED\n assert \"videos\" not in result.data\n\n # the creation should work also without a JSON request\n result = client.post(url, post_data)\n assert result.status_code == status.HTTP_201_CREATED\n assert \"videos\" not in result.data", "def setUp(self):\r\n super(SysadminBaseTestCase, self).setUp()\r\n self.user = UserFactory.create(username='test_user',\r\n email='[email protected]',\r\n password='foo')\r\n self.client = Client()", "def test_project_admin_views(self):\n \n self._check_project_admin_view(self.testproject,\"admin:index\")\n \n # check page add view \n self._check_project_admin_view(self.testproject,\"admin:comicmodels_page_add\")\n \n # check page edit view for first page in project\n firstpage = get_first_page(self.testproject) \n self._check_project_admin_view(self.testproject,\"admin:comicmodels_page_change\",args=[firstpage.pk])\n \n # check page history view for first page in project\n firstpage = get_first_page(self.testproject)\n self._check_project_admin_view(self.testproject,\"admin:comicmodels_page_history\",args=[firstpage.pk])\n \n # check overview of all pages\n self._check_project_admin_view(self.testproject,\"admin:comicmodels_page_changelist\")\n \n \n # Do the same for registration requests: check of standard views do not crash\n \n # Create some registrationrequests \n rr1 = RegistrationRequest.objects.create(user=self.participant,project=self.testproject)\n rr2 = RegistrationRequest.objects.create(user=self.participant,project=self.testproject,status=RegistrationRequest.REJECTED)\n rr3 = RegistrationRequest.objects.create(user=self.participant,project=self.testproject,status=RegistrationRequest.ACCEPTED)\n \n # Using root here because projectadmin cannot see objects created above. Don't know why but this is not tested here.\n self._check_project_admin_view(self.testproject,\"admin:comicmodels_registrationrequest_change\",args=[rr1.pk],user=self.root)\n \n self._check_project_admin_view(self.testproject,\"admin:comicmodels_registrationrequest_history\",args=[rr1.pk],user=self.root)\n \n self._check_project_admin_view(self.testproject,\"admin:comicmodels_registrationrequest_changelist\",user=self.root)\n \n # see if adding a page crashes the admin\n create_page_in_projectadmin(self.testproject,\"test_project_admin_page_add\")\n \n # Projectadminsite has the special feature that any 'comicsite' field in a form is automatically\n # set to the project this projectadmin is for. Test this by creating a\n # page without a project. \n create_page_in_projectadmin(self.testproject,\"test_project_admin_page_add_without_comicsite\",comicsite_for_page=None)\n \n # check that expected links are present in main admin page", "def test_admin_course_add_view(self):\n user = UserFactory(is_staff=True, is_superuser=True)\n self.client.login(username=user.username, password=\"password\")\n\n # Get the admin change view\n url = reverse(\"admin:courses_course_add\")\n self.client.get(url, follow=True)", "def test_admin_create_user(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love',\n username='love',\n password='Andela8',\n role='attendant'\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Summer Love has been registered')\n self.assertEqual(resp.status_code, 201)", "def test_create_post(self):\n self.test_category = Category.objects.create(name='django')\n self.testuser1 = User.objects.create_superuser(\n username='test_user1', password='123456789')\n # self.testuser1.is_staff = True\n\n self.client.login(username=self.testuser1.username,\n password='123456789')\n\n data = {\"title\": \"new\", \"author\": 1,\n \"excerpt\": \"new\", \"content\": \"new\"}\n url = reverse('blog_api:listcreate')\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)", "def admin(self, view):\n view.admin = True\n return view", "def test_get_post_list_admin(self):\n url = reverse('post-list')\n self.client.force_authenticate(user=self.superuser)\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_create_view(self):\n supplement = SupplementFactory(user=self.user_1)\n time = get_utc_now()\n\n post_data = {\n \"supplement_uuid\": str(supplement.uuid),\n \"time\": time.isoformat(),\n \"quantity\": 5,\n }\n\n response = self.client_1.post(self.url, data=post_data)\n self.assertEqual(response.status_code, 200, response.data)\n\n data = response.data\n supplement_name = data[\"supplement\"][\"name\"]\n self.assertEqual(supplement.name, supplement_name)\n self.assertIsNotNone(data[\"display_name\"])", "def test_create_contract_admin_page(self):\n # asserts that there aren't any properties in changelist view\n response = self.client.get('/admin/contracts/contract/')\n content = response.content\n self.assertNotIn('table', content)\n self.assertIn(\n '<a href=\"/admin/contracts/contract/add/\" class=\"addlink\">',\n content)\n\n # creates the contract\n payload = self.contract_one_data\n payload['tenant'] = payload['tenant'].id\n payload['property'] = payload['property'].id\n response = self.client.post(\n '/admin/contracts/contract/add/', payload, follow=True)\n self.assertEqual(response.status_code, 200)\n\n # checks it shows in listing\n response = self.client.get('/admin/contracts/contract/')\n content = response.content\n self.assertIn('table', content)\n self.assertIn(str(self.contract_one_data['rent']), content)", "def setUp(self):\n\n self.new_user = User(username=\"john\", email=\"[email protected]\", pass_secure=\"trial1\")\n\n self.new_blog = Blog(title=\"business blog\", description=\"asdfghjkl\")", "def test_admin_index(self):\n response = self.client.get('/admin/')\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"Djrill\")", "def test_admin_event_admin_list(self):\n response = self.client.get(\"/admin/appointment/event/\")\n self.assertEqual(response.status_code, 200)", "def setUpClass(cls, user=''):\n super().setUpClass(first_admin)", "def setUp(self):\n self.tenant_root_domain = Tenant.objects.get_tenant_root_domain()\n self.site = Site.objects.create(\n name=f\"a.{self.tenant_root_domain}\",\n domain=f\"a.{self.tenant_root_domain}\")\n self.tenant = Tenant.objects.create(name=\"A\", site=self.site)\n self.domain = Domain.objects.create(domain=\"a.com\", tenant=self.tenant)\n\n self.other_site = Site.objects.create(\n name=f\"other.{self.tenant_root_domain}\",\n domain=f\"other.{self.tenant_root_domain}\"\n )\n self.other_tenant = Tenant.objects.create(\n name=\"Other\", site=self.other_site)\n self.other_domain = Domain.objects.create(\n domain=\"other.com\", tenant=self.other_tenant)\n\n self.marketing_page = Site.objects.create(\n name=\"Marketingpage\", domain=\"landingpage.com\")\n\n self.site_not_linked = Site.objects.create(\n name=f\"notlinked.{self.tenant_root_domain}\",\n domain=f\"notlinked.{self.tenant_root_domain}\")\n\n self.home_url = reverse(\"home\")\n self.secret_url = reverse(\"tenants:dashboard\")", "async def test_auth_admin_is_admin(app):\n # Admin user defined in MockPAMAuthenticator.\n name = 'admin'\n user = add_user(app.db, app, name=name, admin=False)\n assert user.admin is False\n cookies = await app.login_user(name)\n assert user.admin is True", "def setUp(self):\n super().setUp()\n Tenant.objects.get_or_create(schema_name=\"public\")", "def test_creating_supply_admin(self):\n request = self.factory.post(\n '/api/supplies/', {'name': '3d printer', 'state': 'good state', 'description': 'prints 3d objects'})\n force_authenticate(request, user=self.testadmin)\n response = SupplyListView.as_view()(request)\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n try:\n supply = Supply.objects.get(name='3d printer')\n self.assertEqual(supply.name, '3d printer')\n self.assertEqual(supply.state, 'good state')\n self.assertEqual(supply.description, 'prints 3d objects')\n except Supply.DoesNotExist:\n self.fail()", "def test_collection_viewset_detail_as_superuser(mocker, logged_in_apiclient):\n mocker.patch(\"ui.serializers.get_moira_client\")\n client, user = logged_in_apiclient\n user.is_superuser = True\n user.save()\n\n collection = CollectionFactory(owner=UserFactory())\n url = reverse(\"models-api:collection-detail\", kwargs={\"key\": collection.hexkey})\n result = client.get(url)\n assert result.status_code == status.HTTP_200_OK\n assert \"videos\" in result.data\n\n result = client.put(\n url,\n {\"title\": \"foo title\", \"owner\": user.id, \"view_lists\": [], \"admin_lists\": []},\n format=\"json\",\n )\n assert result.status_code == status.HTTP_200_OK\n assert result.data[\"title\"] == \"foo title\"\n\n # user can delete the collection\n result = client.delete(url)\n assert result.status_code == status.HTTP_204_NO_CONTENT", "def test_03_admin_featured_apps_as_admin(self):\r\n self.register()\r\n self.signin()\r\n res = self.app.get('/admin/featured', follow_redirects=True)\r\n assert \"Manage featured applications\" in res.data, res.data", "def test_create_collection(self):\n pass", "async def test_rendering_template_admin(\n hass: HomeAssistant, mock_api_client: TestClient, hass_admin_user: MockUser\n) -> None:\n hass_admin_user.groups = []\n resp = await mock_api_client.post(const.URL_API_TEMPLATE)\n assert resp.status == HTTPStatus.UNAUTHORIZED", "def test_delete_admin_from_org(self):\n pass", "def test_otoroshi_controllers_adminapi_templates_controller_template_spec(self):\n pass", "def testLoggedInCreator(self):\r\n\t\t\r\n\t\t# Create objects to be used for this test\r\n\t\tadmin = User.objects.get(username=\"admin\")\r\n\t\tpeebs = User.objects.get(username=\"peebs\")\r\n\t\tdr1 = DataRequest.objects.create(name=\"Important Data Needed!\", description=\"A very important piece of data\", slug=\"important-data-needed\", creator=admin)\r\n\t\tdr2 = DataRequest.objects.create(name=\"datarequest\", description=\"description\", slug=\"datarequest\", creator=peebs)\r\n\t\t\r\n\t\t# The changes to the data\r\n\t\tpost_data = {\r\n\t\t\t\"name\": \"datarequest2\",\r\n\t\t\t\"description\": \"description2\",\r\n\t\t}\r\n\t\t# log in as creator\r\n\t\tlogin = self.client.login(username='admin', password='admin')\r\n\t\tself.failUnless(login, 'Could not login')\r\n\t\t\r\n\t\t# Check that the slug will change\r\n\t\told_slug = dr1.slug\r\n\t\t\r\n\t\t# Edit the datarequest\r\n\t\tdr1_edit_url = reverse(\"epic.datarequests.views.edit_datarequest\", args=[], kwargs={'item_id':dr1.id})\r\n\t\tresponse = self.client.post(dr1_edit_url, post_data)\r\n\t\t\r\n\t\t# Grad the datarequest again since it has hopfully changed in the database\r\n\t\tdr1 = DataRequest.objects.get(pk=dr1.id)\r\n\t\t\r\n\t\t# Verify that the changes were made\r\n\t\tdr1_url = reverse(\"epic.datarequests.views.view_datarequest\", args=[], kwargs={'item_id':dr1.id})\r\n\t\tresponse = self.client.get(dr1_url)\r\n\t\tself.assertTrue(post_data['name'] in response.content)\r\n\t\tself.assertTrue(post_data['description'] in response.content)\r\n\t\tself.assertFalse('Important Data' in response.content)\r\n\t\tself.assertFalse('piece' in response.content)\r\n\t\t\r\n\t\tnew_slug = dr1.slug\r\n\t\tself.assertFalse(old_slug == new_slug)", "def testLoggedInCreator(self):\r\n\t\tadmin = User.objects.get(username=\"admin\")\r\n\t\tpeebs = User.objects.get(username=\"peebs\")\r\n\t\tdr1 = DataRequest.objects.create(name=\"Important Data Needed!\", description=\"A very important piece of data\", slug=\"important-data-needed\", creator=admin)\r\n\t\tdr2 = DataRequest.objects.create(name=\"datarequest\", description=\"description\", slug=\"datarequest\", creator=peebs)\r\n\t\t\r\n\t\t# login as the creator\r\n\t\tlogin = self.client.login(username='admin', password='admin')\r\n\t\tself.failUnless(login, 'Could not login')\r\n\t\t\r\n\t\t# Go to the edit page\r\n\t\tdr1_edit_url = reverse(\"epic.datarequests.views.edit_datarequest\", args=[], kwargs={'item_id':dr1.id})\r\n\t\tresponse = self.client.get(dr1_edit_url)\r\n\t\tself.assertEqual(response.status_code, 200)\r\n\r\n\t\t# Check that the correct stuff is on the page\r\n\t\tself.assertTrue(dr1.description in response.content)\r\n\t\tself.assertTrue(dr1.name in response.content)\r\n\t\tself.assertTrue('Edit Data Request' in response.content)", "def test_post_create_post_as_staff_user(self):\n login = self.client.login(username='testuser_staff', password='password12345')\n\n if login:\n\n new_title = \"this is a new title for create\"\n new_content = \"this is new content for create by nicholas herriot\"\n new_published_date = datetime.date(2018, 3, 3)\n data = {'title': new_title, 'content': new_content, 'publish': new_published_date}\n\n url = reverse('blogs:create')\n response = self.client.post(url, data)\n\n # There should now be a new blog create with this title and content, so check that it does exist\n new_blog = Post.objects.get(title='this is a new title for create')\n # print(\"*** New blog created as a slug of: {}\".format(new_blog.slug))\n # print(\"*** New blog created as a titel of: {}\".format(new_blog.title))\n # print(\"*** New blog created as content of: {}\".format(new_blog.content))\n # print(\"*** New blog created as a published date of: {}\".format(new_blog.publish))\n\n self.assertEqual(new_blog.title, new_title)\n self.assertEqual(new_blog.content, new_content)\n # TODO Get the publish date check working correctly\n # self.assertEqual(new_blog.publish, new_published_date)\n\n self.assertEqual(response.status_code, 302)\n self.client.logout()\n else:\n # TODO Make this dynamic rather than hard coded text string\n self.fail('Login Failed for testuser_staff')", "def test_admin_can_login_to_web_portal(admin):", "def setUp(self):\n self.test_faculty = Faculty(name='Test', slug='test')\n self.test_faculty.full_clean()\n self.test_faculty.save()\n self.test_department = Department(name='Test', slug='test', faculty=self.test_faculty)\n self.test_department.full_clean()\n self.test_department.save()\n self.test_resource = Resource(name='Test', slug='test', description='')\n self.test_resource.full_clean()\n self.test_resource.save()\n self.test_agreement = Agreement(title='test-one',\n slug='test-one',\n resource=self.test_resource,\n body='body',\n redirect_url='https://example.com',\n redirect_text='example-redirect')\n self.test_agreement.full_clean()\n self.test_agreement.save()", "def test_admin_xml(self):\n\n self.run_admin_test(\"xml\")", "def test_get_update_blog_post_as_admin_user(self):\n test_blog = Post.objects.get(title=\"test1\")\n login = self.client.login(username='testuser_admin', password='password12345')\n\n if login:\n url = reverse('blogs:updated', kwargs={'slug': test_blog.slug})\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'update.html')\n self.assertIn('test1', str(response.content))\n self.assertIn('this is content 1', str(response.content))\n self.client.logout()\n else:\n # TODO Make this dynamic rather than hard coded text string\n self.fail('Login Failed for testuser_admin')", "def setUp(self):\n response = self.client.post('/trainer/create/',\n {\"name\": \"Blue\",\n \"las_name\": \"Oak\"})\n self.trainer_id = response.json()[\"id\"]", "def setUpTestData(cls):\n cls.post = PostFactory(\n author__first_name='Peter',\n author__last_name='Mustermann',\n title='My test title',\n subtitle='A subtitle for the test post',\n views=10,\n last_viewed=(timezone.now() - datetime.timedelta(days=1)),\n is_active=True,\n activation_date=None\n )", "def test_post_options_admin(self):\n url = reverse('post-list')\n self.client.force_authenticate(user=self.superuser)\n response = self.client.options(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertIn('Post List', response.content)", "def setUp(self):\n self.superuser = User.objects.create_superuser(\n 'admin',\n '[email protected]',\n 'StrongPassword123'\n )", "def test_user_can_change_admin(self):\n self.assertTrue(self.story.user_can_change(self.admin_user))", "def test_admin_public(self):\n with self.login(self.user_admin):\n self.assertTrue(current_user.is_authenticated)\n self.assertEqual(current_user, self.user_admin)\n\n rv = self.client.get('/')\n self.assertEqual(b'public', rv.data)", "def test_admin_event_admin_add(self):\n response = self.client.get(\"/admin/appointment/event/add/\")\n self.assertEqual(response.status_code, 200)", "def test_first_user_is_admin(self):\n user = User.objects.create(username='username', email='[email protected]')\n self.assertTrue(user.is_staff)\n self.assertTrue(user.is_superuser)\n user = User.objects.create(username='username2', email='[email protected]')\n self.assertFalse(user.is_staff)\n self.assertFalse(user.is_superuser)", "def setUp(self):\n # create our test user\n self.test_user1 = get_user_model().objects.create(**USER1_PARAMS)\n self.test_user2 = get_user_model().objects.create(**USER2_PARAMS)\n self.rogue_user = get_user_model().objects.create(**ROGUE_USER_PARAMS)\n self.test_admin = get_user_model().objects.create(**ADMIN_USER_PARAMS)\n site = Site.objects.get_current()\n self.test_blog = Blog.objects.create(site=site, owner=self.test_user1,\n **TEST_BLOG_PARAMS)\n self.test_category1 = Category.objects.create(\n blog=self.test_blog,\n **CAT1_PARAMS\n )\n self.client = Client()\n # self.post = Post.objects.create(\n # title=\"Test User 1 Post\",\n # body=\"This is some stuff.\\n\\nSome stuff, you know.\",\n # blog=self.test_blog,\n # author=self.test_user1.author\n # )\n # self.post.save()\n # enable remote access for test_user1\n self.test_user1.author.remote_access_enabled = True\n self.test_user1.author.save()\n\n # disable remote access for test_user2\n self.test_user2.author.remote_access_enabled = False\n self.test_user2.author.save()\n\n self.rogue_user.author.remote_access_enabled = True\n self.rogue_user.author.save()\n\n self.test_admin.author.remote_access_enabled = True\n self.test_admin.author.save()", "def test_get_context_data(self):\n self.view.object = self.obj\n context = self.view.get_context_data()\n self.assertIn(\"code\", context)\n self.assertIn(\"edit\", context)\n self.assertTrue(context[\"edit\"])", "def setUp(self):\n self.staff = get_user_model().objects.create_doctor(\n email='[email protected]',\n password='testpass@4',\n username='tempuser4'\n )\n self.staff.is_staff = True\n self.staff.save()\n self.staff.refresh_from_db()\n\n self.client = APIClient()\n self.client.force_authenticate(self.staff)\n\n self.speciality = models.Speciality.objects.create(\n name='Speciality1'\n )", "def setUp(self):\r\n\r\n User.query.delete()\r\n Article.query.delete()\r\n db.session.commit()\r\n\r\n u = User(\r\n username=\"testuser\",\r\n password=\"HASHED_PASSWORD\",\r\n location=\"US-VA\"\r\n )\r\n\r\n db.session.add(u)\r\n db.session.commit()\r\n\r\n a = Article(\r\n path='testPath',\r\n url='https://wwww.testurl.com',\r\n location=\"US\",\r\n title=\"testTitle\",\r\n excerpt=\"testExcerpt\",\r\n image=\"https://www.testurl.com/testimg.jpg\",\r\n source=\"testSource\",\r\n published_date=\"2020-04-25 05:06:00\",\r\n saved_by=u.id\r\n )\r\n\r\n db.session.add(a)\r\n db.session.commit()\r\n\r\n self.client = app.test_client()\r\n app.config['TESTING'] = True\r\n app.config['WTF_CSRF_ENABLED'] = False", "def setUp(self):\n self.client = Client()\n #creamos un usuario en la db\n self.user = User.objects.create_user('super', '[email protected]', 'super')", "def setUp(self) -> None:\n self.serializer = EmployeeSerializer\n self.view = EmployeeDetailView()", "def test_get_create_post_as_staff_user(self):\n\n login = self.client.login(username='testuser_staff', password='password12345')\n\n if login:\n url = reverse('blogs:create')\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'create.html')\n self.client.logout()\n else:\n # TODO Make this dynamic rather than hard coded text string\n self.fail('Login Failed for testuser_staff')", "def setUpTestData(cls):\n super().setUpTestData()\n cls.staff_user = StaffFactory(course_key=cls.course.id, password=TEST_PASSWORD)", "def test_super_admin_get(self, *args, **kwargs):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n res = self.view.get(self.request, *args, **kwargs)\n nt.assert_equal(res.status_code, 200)", "def test_super_admin_get(self, *args, **kwargs):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n res = self.view.get(self.request, *args, **kwargs)\n nt.assert_equal(res.status_code, 200)", "def test_blog_add():", "def setUp(self):\n user = User.objects.create(username=\"nerd\")\n self.post_name = 'name1'\n self.post = Post(name=self.post_name, owner=user)", "def test_renders(self):\r\n self._login_admin()\r\n res = self.app.get('/admin/new')\r\n self.assertTrue(\r\n 'Add Bookmark' in res.body,\r\n \"Should see the add bookmark title\")", "def setUp(self):\n User.users = {}\n self.app = User('[email protected]', 'admin', 'admin')\n # Set some default user data\n self.user_data = {\n 1: {\n 'email': '[email protected]',\n 'username': 'admin',\n 'password': 'admin' \n }\n \n }" ]
[ "0.7291503", "0.6833036", "0.6821154", "0.6697566", "0.668051", "0.66122943", "0.64742976", "0.6402408", "0.6336394", "0.63025343", "0.6262725", "0.62581766", "0.62279063", "0.6192794", "0.61778784", "0.61756825", "0.61681473", "0.6146008", "0.6134811", "0.6131507", "0.60937697", "0.60937697", "0.6089611", "0.60758567", "0.6064063", "0.5968191", "0.5968191", "0.5968191", "0.5968191", "0.59581244", "0.5955738", "0.5949259", "0.5947666", "0.59419686", "0.5935675", "0.59308594", "0.591284", "0.59099996", "0.58849996", "0.58777875", "0.5876318", "0.5876173", "0.5874345", "0.5873346", "0.58520514", "0.5850327", "0.5850327", "0.5847958", "0.58476675", "0.584643", "0.5823994", "0.58168656", "0.58144104", "0.58103466", "0.5809255", "0.5800647", "0.57995045", "0.57888365", "0.5788521", "0.57768774", "0.5758363", "0.57570827", "0.574944", "0.5746828", "0.57447785", "0.5743275", "0.5742983", "0.5719794", "0.5715972", "0.57050896", "0.56979066", "0.56941116", "0.5686544", "0.5669393", "0.5660722", "0.56590086", "0.56584704", "0.56551075", "0.5650944", "0.56505346", "0.5646516", "0.564633", "0.56453127", "0.5640534", "0.5635351", "0.5630221", "0.5624642", "0.56223506", "0.5615701", "0.56153053", "0.5615238", "0.56031084", "0.5596753", "0.55960375", "0.5595705", "0.55939144", "0.55939144", "0.5571539", "0.5569598", "0.5568691", "0.55671555" ]
0.0
-1
Fixture for bank object passed as an argument to the admin bank view test function
def bank(): bank = Bank.objects.create(name='Random Bank') return bank
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_client_bank_account_retrieve(self):\n pass", "def test_client_bank_accounts_list(self):\n pass", "def test_client_bank_account_create(self):\n pass", "def test_create_virtual_account_beneficiary(self):\n pass", "def test_edit_boat(self):\n pass", "def test_households_in_admin_unit(self):", "def test_get_virtual_account_beneficiary(self):\n pass", "def test_create_account(self):\n url = reverse('portal-list')\n data = {'brandID': 5, 'status' : 'Enabled'}\n response = self.client.post(url, data, format='json')\n\n #response = self.client.get(url)\n #print response\n #response = self.client.get('/v1/portal/1/')\n #print response\n #self.assertEqual(response.data[\"ud\"], {'id': 1, 'brandID': 4})\n self.assertEqual(response.data[\"brandID\"], 5)\n\n \"\"\"\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Account.objects.count(), 1)\n self.assertEqual(Account.objects.get().name, 'DabApps')\n \"\"\"", "def fixtures():", "def example_bank_account():\n \n return BankAccount(\"Test User\", 1000.0)", "def setUp(self):\n self.admin = User.objects.get(username='admin')\n self.client = APIClient()\n self.client.force_authenticate(user=self.admin)\n self.data = {'name': 'testCompany', 'address': {\n 'address1': '123 fake st', 'address2': 'fake address 2',\n 'city': 'nowhere', 'state': 'IN', 'zip': '90210'}}\n self.url = reverse('Company-list')", "def test_client_bank_account_update(self):\n pass", "def test_create_boat(self):\n pass", "def test_get_virtual_account_beneficiaries(self):\n pass", "def test_success_client_balance_view(self):\n data = dict(email='[email protected]', pin=8527)\n self.client.post(reverse('account_login'), data)\n response = self.client.get(reverse('account_balance', args=[self.acc.id]))\n self.assertEqual(response.status_code, 200)\n\n self.assertEqual(len(response.data), 2)\n self.assertEqual(response.data[0].get('balance'), 100)\n self.assertEqual(response.data[0].get('currency'), 980)\n\n self.assertEqual(response.data[1].get('balance'), 1200)\n self.assertEqual(response.data[1].get('currency'), 840)", "def test_client_bank_account_delete(self):\n pass", "def test_balance(self):\n\n self.assertEqual(self.cash_report.balance(), 150)", "def fixture_fixture_business_details_example():\n test_example = BusinessDetails(\n business_problem=BUSINESS_PROBLEM,\n business_stakeholders=BUSINESS_STAKEHOLDERS,\n line_of_business=LINE_OF_BUSINESS,\n )\n return test_example", "def test_show_bag(self):\n response = self.client.get('/shopping_bag/')\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'shopping_bag/bag.html')", "def setUpFixture(self):\n pass", "def setUp(self):\n Beneficiary.objects.create(id=1, lastname='Doe', lastname2='', middlename='', firstname='Jane', nativename='',\n nationality_country_iso_code='FRA', code='', date_of_birth='1970-07-01',\n country_of_birth_iso_code='FRA', gender='Male', address='42 Rue des fleurs',\n postal_code='75000', city='Paris', country_iso_code='FRA', msisdn='1123131413',\n email='[email protected]', id_type='PASSPORT', id_country_iso_code='',\n id_number='1123131413', occupation='Teacher', bank_accout_holder_name='',\n province_state='')\n self.client = Client()", "def test_create_warranty(self):\n pass", "def test_create_ban(self):\n pass", "def test_admin(self):\n assert(admin)", "def test_duo_account_list(self):\n pass", "def test_withdraw_amount_view(self):\n self.account.current_balance = 100000\n self.account.save()\n\n amount = random.randint(10, 100000)\n client.force_authenticate(user=self.account.user, token=self.token)\n url = reverse('customer_withdraw')\n request = client.post(url, {'amount': amount}, format='json')\n self.account.refresh_from_db()\n self.assertEqual(100000-amount, self.account.current_balance)", "def bank_account():\n return BankAccount()", "def test_load_fixture(caplog):\n caplog.set_level('INFO')\n\n with pytest.raises(Advisor.DoesNotExist):\n Advisor.objects.get(pk=ADVISER_FIXTURE['pk'])\n\n response = _request_load_fixture({'fixture': [ADVISER_FIXTURE]})\n\n assert response.status_code == status.HTTP_201_CREATED\n\n adviser = Advisor.objects.get(pk=ADVISER_FIXTURE['pk'])\n assert adviser.email == ADVISER_FIXTURE['fields']['email']\n assert adviser.first_name == ADVISER_FIXTURE['fields']['first_name']\n assert adviser.last_name == ADVISER_FIXTURE['fields']['last_name']\n assert str(adviser.dit_team_id) == ADVISER_FIXTURE['fields']['dit_team']\n\n fixture_info = [\n 'Loading fixture: [',\n ' {',\n ' \"fields\": {',\n f' \"dit_team\": \"{ADVISER_FIXTURE[\"fields\"][\"dit_team\"]}\",',\n f' \"email\": \"{ADVISER_FIXTURE[\"fields\"][\"email\"]}\",',\n f' \"first_name\": \"{ADVISER_FIXTURE[\"fields\"][\"first_name\"]}\",',\n f' \"last_name\": \"{ADVISER_FIXTURE[\"fields\"][\"last_name\"]}\"',\n ' },',\n ' \"model\": \"company.advisor\",',\n f' \"pk\": \"{ADVISER_FIXTURE[\"pk\"]}\"',\n ' }',\n ']',\n ]\n assert caplog.messages == ['\\n'.join(fixture_info)]", "def test_edit_view(self):\n self.client.post(reverse('misago:admin:users:bans:new'), data={\n 'check_type': '0',\n 'banned_value': 'Admin',\n })\n\n test_ban = Ban.objects.get(banned_value='admin')\n form_link = reverse('misago:admin:users:bans:edit', kwargs={'pk': test_ban.pk})\n\n response = self.client.post(form_link, data={\n 'check_type': '1',\n 'banned_value': '[email protected]',\n 'user_message': 'Lorem ipsum dolor met',\n 'staff_message': 'Sit amet elit',\n 'expires_on': '',\n })\n self.assertEqual(response.status_code, 302)\n\n response = self.client.get(reverse('misago:admin:users:bans:index'))\n response = self.client.get(response['location'])\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, '[email protected]')", "def test_get_boat(self):\n pass", "def test_deposit_amount_view(self):\n client.force_authenticate(user=self.account.user, token=self.token)\n url = reverse('customer_deposit')\n amount_1 = random.randint(10, 50000)\n amount_2 = random.randint(10, 50000)\n\n request_1 = client.post(url, {'amount': amount_1}, format='json')\n self.account.refresh_from_db()\n\n request_2 = client.post(url, {'amount': amount_2}, format='json')\n self.account.refresh_from_db()\n\n self.assertEqual(amount_1 + amount_2, self.account.current_balance)", "def test_get_virtual_accounts(self):\n pass", "def test_create_virtual_account(self):\n pass", "def test_duo_account_post(self):\n pass", "def test_home_view_two_object(self):\n self.create_obj()\n UserData.objects.create(\n first_name=\"aaaaaa\",\n last_name=\"aaaaa\",\n date_of_birth='1998-02-23',\n bio=\"aaa\",\n email=\"[email protected]\",\n jabber=\"aaaaa\",\n skype=\"aaaaa\",\n other_contacts=\"aaaaa\"\n )\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(UserData.objects.first(), response.context['data'])", "def load_test_account() -> BankAccount:\n budget_manager = BudgetCreator.load_test_budget_manager()\n return TroublemakerBankAccount('123123', 'HSBC', 1000, budget_manager)", "def test_get_ban(self):\n pass", "def test_billing_info(self):\n # Test get does not contain delete button\n response = self.client.get(reverse(\"billing_info\"))\n self.assertNotContains(\n response,\n '<a class=\"btn btn-danger\" href=\"/plan/billing/delete/\">Delete</a>',\n html=True,\n )\n\n # Test create\n parameters = {\n \"country\": \"GR\",\n \"tax_number\": \"GR104594676\",\n \"name\": \"bar\",\n \"street\": \"baz\",\n \"city\": \"bay\",\n \"zipcode\": \"bax\",\n }\n response = self.client.post(\n reverse(\"billing_info\") + \"?next=/plan/pricing/\", parameters\n )\n self.assertRedirects(\n response,\n \"/plan/pricing/\",\n status_code=302,\n target_status_code=200,\n fetch_redirect_response=True,\n )\n self.assertEqual(self.user.billinginfo.tax_number, \"EL104594676\")\n\n # Test get contains delete button\n response = self.client.get(reverse(\"billing_info\"))\n self.assertContains(\n response,\n '<a class=\"btn btn-danger\" href=\"/plan/billing/delete/\">Delete</a>',\n html=True,\n )\n\n # Test update\n del parameters[\"tax_number\"]\n parameters[\"name\"] = \"foo\"\n response = self.client.post(\n reverse(\"billing_info\") + \"?next=/plan/pricing/\", parameters\n )\n self.user.billinginfo.refresh_from_db()\n self.assertEqual(self.user.billinginfo.name, \"foo\")\n self.assertEqual(self.user.billinginfo.tax_number, \"\")\n\n # Test delete\n response = self.client.post(reverse(\"billing_info_delete\"))\n with self.assertRaises(BillingInfo.DoesNotExist):\n self.user.billinginfo.refresh_from_db()", "def setUpTestData(cls):\n User.objects.create_user('Claire', '[email protected]', '12345678')\n User.objects.create_user('Georgie', '[email protected]', '12345678')\n User.objects.create_user('Tristan', '[email protected]', '12345678')\n\n Expense.objects.create(\n date=date.today(),\n description=\"Test balance 1\",\n category=\"Food\",\n amount=20,\n converted_amount=20,\n currency=\"GBP\",\n who_for=\"Everyone\",\n who_paid=\"Georgie\"\n )\n Expense.objects.create(\n date=date.today(),\n description=\"Test balance 2\",\n category=\"Food\",\n amount=10,\n converted_amount=10,\n currency=\"GBP\",\n who_for=\"Everyone\",\n who_paid=\"Claire\"\n )\n Expense.objects.create(\n date=date.today(),\n description=\"Test balance 3\",\n category=\"Food\",\n amount=30,\n converted_amount=30,\n currency=\"GBP\",\n who_for=\"Everyone\",\n who_paid=\"Tristan\"\n )", "def test_get_virtual_account_by_id(self):\n pass", "def test_create_contract_admin_page(self):\n # asserts that there aren't any properties in changelist view\n response = self.client.get('/admin/contracts/contract/')\n content = response.content\n self.assertNotIn('table', content)\n self.assertIn(\n '<a href=\"/admin/contracts/contract/add/\" class=\"addlink\">',\n content)\n\n # creates the contract\n payload = self.contract_one_data\n payload['tenant'] = payload['tenant'].id\n payload['property'] = payload['property'].id\n response = self.client.post(\n '/admin/contracts/contract/add/', payload, follow=True)\n self.assertEqual(response.status_code, 200)\n\n # checks it shows in listing\n response = self.client.get('/admin/contracts/contract/')\n content = response.content\n self.assertIn('table', content)\n self.assertIn(str(self.contract_one_data['rent']), content)", "def test_duo_account_get(self):\n pass", "def test_create_manual_account02(self, client):\n user = UserFactory.get_user()\n institution = InstitutionFactory.get_manual_institution()\n account = Account.objects.create_manual_account(\n user.id, institution.id, '1111111', '')\n assert isinstance(account, Account)\n assert account.type_ds == Account.DEBT\n\n account = Account.objects.create_manual_account(\n user.id, institution.id, '2222222', 'some')\n assert Item.objects.count() == 1", "def test_create_drink_created_by_admin(self):\n self.test_create_admin_user()\n self.test_create_seting_bar()\n user = UserBase.objects.get(username='admin')\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + user.token)\n url = reverse('drink-list')\n data = {\n 'name': 'Testing Drink',\n 'ingredients':'[{\"unit\":\"0\",\"ratio\":2,\"ingredient\":2001},{\"unit\":\"0\",\"ratio\":2,\"ingredient\":2001}]'\n }\n\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_client_tax_information_create(self):\n pass", "def setUp(self):\n\n self.user = User.objects.create_user(username=USER_USERNAME, email=USER_EMAIL, password=USER_PWD)\n self.user.is_staff = True\n self.user.save()\n\n logged = self.client.login(username=USER_USERNAME, password=USER_PWD)\n self.assertEqual(logged, True)\n\n client = Client.objects.create(name='Fulano de Tal')\n specie = Specie.objects.create(name='Felina')\n breed = Breed.objects.create(specie=specie, name='Maine Coon')\n Animal.objects.create(owner=client, specie=specie, breed=breed, animal_name='Bidu', fur='l')", "def test_alien_data(self):", "def test_business_model(self):\n\n self.user.save()\n query_user = User.query.filter_by(email='[email protected]').first()\n\n business = Business('CosmasTech', 'Technology', 'Nairobi',\n 'AI is transforming human life', query_user.id)\n business.save()\n\n query_res = Business.query.filter_by(id=1).first()\n self.assertEqual(query_res.name, 'cosmastech')", "def test_get_insumo(self):", "def test_beneficiaries_retrieve_validate_content_that_will_pass(self):\n print('the test function name: {}'.format(sys._getframe().f_code.co_name))\n url = reverse('beneficiary:beneficiary-entity-by-id-retrieve', kwargs={'pk': 1})\n response = self.client.get(url)\n\n # serialize all model object data\n beneficiaries = Beneficiary.objects.get(pk=1)\n serializer = BeneficiarySerializer(beneficiaries, many=False)\n self.assertEqual(response.json(), serializer.data)\n self.assertEqual(response.status_code, 200)", "def bank(self):\n return self.random_element(self.banks)", "def test_create_virtual_account_transfer(self):\n pass", "def test_accounts(self):\n self.assertEqual(AccountsConfig.name, \"accounts\")", "def test_create_account(self):\n url = reverse('hospital_list')\n data = {'name': 'DabApps','mobile': 846800258}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Hospital.objects.count(), 1)\n self.assertEqual(Hospital.objects.get().name, 'DabApps')\n self.assertEqual(Hospital.objects.get().mobile, 846800258)", "def test_beneficiaries_create_callback_that_will_pass(self):\n post_body = {\n 'lastname': 'Doe',\n 'lastname2': '',\n 'middlename': '',\n 'firstname': 'Jane',\n 'nativename': '',\n 'nationality_country_iso_code': 'FRA',\n 'code': '',\n 'date_of_birth': '1970-07-01',\n 'country_of_birth_iso_code': 'FRA',\n 'gender': 'Male',\n 'address': '42 Rue des fleurs',\n 'postal_code': '75000',\n 'city': 'Paris',\n 'country_iso_code': 'FRA',\n 'msisdn': '1123131413',\n 'email': '[email protected]',\n 'id_type': 'PASSPORT',\n 'id_country_iso_code': '',\n 'id_number': '1123131413',\n 'occupation': 'Teacher',\n 'bank_accout_holder_name': '',\n 'province_state': ''\n }\n print('the test function name: {}'.format(sys._getframe().f_code.co_name))\n url = reverse('beneficiary:beneficiaries-create')\n response = self.client.post(url, data=post_body, content_type='application/json')\n return self.assertTrue(response.status_code, 201)", "def test_delete_boat(self):\n pass", "def test_wallets_post(self):\n pass", "def setUp(self):\n self.testUser = User.objects.get(username=\"c1e1\")\n self.client = APIClient()\n self.client.force_authenticate(user=self.testUser)\n self.data = {\n \"tracking\": 1234,\n \"mail_class\": \"12\",\n \"return_address\": {\n \"address1\": \"1234\",\n \"address2\": \"1234\",\n \"city\": \"1234\",\n \"state\": \"12\",\n \"zip\": 1234\n },\n \"rate\": 1234,\n \"address\": {\n \"address1\": \"1234\",\n \"address2\": \"1234\",\n \"city\": \"1234\",\n \"state\": \"12\",\n \"zip\": 1234\n },\n \"cost_center\": CostCenter.objects.filter(company=\n self.testUser.profile.company.pk)[0].pk\n }\n self.url = reverse('MailPiece-list')", "def test_accessible_borrow_list_for_student(self):\n client1 = APIClient()\n client1.login(username=self.students[0].username, password=\"salam*123\")\n client1.post(\"/borrows/\", data={\"book\": 1})\n client2 = APIClient()\n client2.login(username=self.students[1].username, password=\"salam*123\")\n client2.post(\"/borrows/\", data={\"book\": 2})\n response = client1.get(\"/borrows/\")\n self.assertEqual(response.json()[\"count\"], 1)\n borrow_id = response.json()[\"results\"][0][\"id\"]\n borrow = Borrow.objects.get(id=borrow_id)\n self.assertEqual(borrow.student, self.students[0])", "def setUp(self):\n self.testUser = User.objects.get(username=\"c1e1\")\n self.client = APIClient()\n self.client.force_authenticate(user=self.testUser)\n self.data = {'name': 'testCostCenter',\n 'address': {'address1': '123 fake st', 'address2': 'fake address 2',\n 'city': 'nowhere', 'state': 'IN', 'zip': '90210'}}\n self.url = reverse('CostCenter-list')", "def test_get_virtual_account_payments(self):\n pass", "def test_save(self):", "def test_save(self):", "def test_delete_view(self):\n self.client.post(reverse('misago:admin:users:bans:new'), data={\n 'check_type': '0',\n 'banned_value': 'TestBan',\n })\n\n test_ban = Ban.objects.get(banned_value='testban')\n\n response = self.client.post(reverse('misago:admin:users:bans:delete', kwargs={\n 'pk': test_ban.pk\n }))\n self.assertEqual(response.status_code, 302)\n\n response = self.client.get(reverse('misago:admin:users:bans:index'))\n self.client.get(response['location'])\n response = self.client.get(response['location'])\n\n self.assertEqual(response.status_code, 200)\n self.assertNotContains(response, test_ban.banned_value)", "def test_companies_company_id_data_bank_accounts_account_id_transactions_get(self):\n pass", "def test_individual_ACH(self):\n form_data = self.form_data()\n form_data['payment_type'] = 'DirectDebit'\n form = DonationPaymentForm(data=form_data)\n self.assertTrue(form.is_valid())", "def test_mock_datasource_accounts(self):\n self.assertIsInstance(self.test_data, UrjanetData)\n self.assertIsNotNone(self.test_data.accounts)\n self.assertEqual(len(self.test_data.accounts), 2)\n\n # This account is loaded because it has a meter\n account1 = self.test_data.accounts[0]\n self.assertIsInstance(account1, Account)\n self.assertEqual(account1.PK, 1)\n self.assertEqual(account1.UtilityProvider, \"test_provider\")\n self.assertEqual(account1.AccountNumber, \"acct_1\")\n self.assertEqual(account1.RawAccountNumber, \"raw_acct_1\")\n self.assertEqual(account1.SourceLink, \"test_link\")\n self.assertEqual(account1.StatementType, \"test_stmt_type\")\n self.assertEqual(account1.StatementDate, date(2016, 2, 5))\n self.assertEqual(account1.IntervalStart, date(2016, 1, 1))\n self.assertEqual(account1.IntervalEnd, date(2016, 2, 1))\n self.assertEqual(account1.TotalBillAmount, Decimal(100))\n self.assertEqual(account1.AmountDue, Decimal(200))\n self.assertEqual(account1.NewCharges, Decimal(300))\n self.assertEqual(account1.OutstandingBalance, Decimal(400))\n self.assertEqual(account1.PreviousBalance, Decimal(500))\n self.assertEqual(len(account1.floating_charges), 0)\n self.assertEqual(len(account1.meters), 1)\n\n # This account is loaded because it has floating charges\n account2 = self.test_data.accounts[1]\n self.assertIsInstance(account2, Account)\n self.assertEqual(account2.PK, 3)\n self.assertEqual(account2.UtilityProvider, \"test_provider\")\n self.assertEqual(account2.AccountNumber, \"acct_1\")\n self.assertEqual(account2.RawAccountNumber, \"raw_acct_1\")\n self.assertEqual(account2.SourceLink, \"test_link\")\n self.assertEqual(account2.StatementType, \"test_stmt_type\")\n self.assertEqual(account2.StatementDate, date(2016, 4, 5))\n self.assertEqual(account2.IntervalStart, date(2016, 3, 1))\n self.assertEqual(account2.IntervalEnd, date(2016, 4, 1))\n self.assertEqual(account2.TotalBillAmount, Decimal(102))\n self.assertEqual(account2.AmountDue, Decimal(202))\n self.assertEqual(account2.NewCharges, Decimal(302))\n self.assertEqual(account2.OutstandingBalance, Decimal(402))\n self.assertEqual(account2.PreviousBalance, Decimal(502))\n self.assertEqual(len(account2.floating_charges), 1)\n self.assertEqual(len(account2.meters), 0)", "def setUp(self):\n\n self.caffe = Caffe.objects.create(\n name='kafo',\n city='Gliwice',\n street='Wieczorka',\n house_number='14',\n postal_code='44-100'\n )\n self.filtry = Caffe.objects.create(\n name='filtry',\n city='Warszawa',\n street='Filry',\n house_number='14',\n postal_code='44-100'\n )\n\n self.kate = Employee.objects.create(\n username='KateT',\n first_name='Kate',\n last_name='Tempest',\n telephone_number='12345678',\n email='[email protected]',\n favorite_coffee='flat white',\n caffe=self.caffe\n )\n\n self.cash_report = CashReport.objects.create(\n creator=self.kate,\n caffe=self.caffe,\n cash_before_shift=2000,\n cash_after_shift=3000,\n card_payments=500,\n amount_due=1900\n )\n\n Company.objects.create(name='GoodCake', caffe=self.caffe)\n Company.objects.create(name='Tesco', caffe=self.caffe)\n\n Expense.objects.create(\n name='Cakes',\n company=Company.objects.get(name='GoodCake'),\n caffe=self.caffe\n )\n\n Expense.objects.create(\n name='Supply',\n company=Company.objects.get(name='Tesco'),\n caffe=self.caffe\n )\n\n FullExpense.objects.create(\n expense=Expense.objects.get(name='Cakes'),\n amount=50,\n cash_report=CashReport.objects.first(),\n caffe=self.caffe\n )\n\n FullExpense.objects.create(\n expense=Expense.objects.get(name='Supply'),\n amount=500,\n cash_report=CashReport.objects.first(),\n caffe=self.caffe\n )", "def fixture_candy():\n yield Person(name=\"Candy\", age=13, hobbies=[\"Gardening\"])", "def setUp(self):\n # a user with 2 charts\n user = User.objects.create_user(\n username = 'test_user1',\n email = '[email protected]',\n password = 'test_user1_test'\n )\n UserChart.objects.create(\n user_id = user,\n chart_type = 'bar',\n title = 'This is a test chart title 1',\n subtitle = 'This is a test chart subtitle 1'\n )\n UserChart.objects.create(\n user_id = user,\n chart_type = 'bar',\n title = 'This is a test chart title 2',\n subtitle = 'This is a test chart subtitle 2'\n )\n\n # a user with 1 chart\n user = User.objects.create_user(\n username = 'test_user2',\n email = '[email protected]',\n password = 'test_user2_test'\n )\n UserChart.objects.create(\n user_id = user,\n chart_type = 'bar',\n title = 'This is a test chart title 3',\n subtitle = 'This is a test chart subtitle 3'\n )", "def setUp(self):\n # Setup dummy custmers\n Customer.objects.create(name=\"Mike Zinyoni\", phone=\"+263784528370\", email=\"[email protected]\", address=\"Stand #11 Lorraine Drive, Bluffhill Harare Zimbabwe\")\n Customer.objects.create(name=\"Josh Nyamulomo\", phone=\"+26356839021\", email=\"[email protected]\", address=\"Stand #5 Lorraine Drive, Bluffhill Harare Zimbabwe\")\n Customer.objects.create(name=\"Brian Mpofu\", phone=\"+26390839021\", email=\"[email protected]\", address=\"Stand #25 Lorraine Drive, Bluffhill Harare Zimbabwe\")\n # Setup dummy items\n Item.objects.create(name=\"Chicken thighs\", description=\"Chunky big chicken thighs from Irvines chickens\", price=4.99, unit=\"Kg\")\n Item.objects.create(name=\"Beef steak\", description=\"Premium quality beef steak from Caswell meats\", price=6.99, unit=\"Kg\")\n Item.objects.create(name=\"Kefalos Youghgut\", description=\"Healthy and tasty youghgut available in strawberry, banana and butter milk flavour\", price=5.21, unit=\"litre\")\n Item.objects.create(name=\"Eversharp pen\", description=\"Pens available in: blue , red, green and black ink\", price=0.99, unit=\"dozen\")\n Item.objects.create(name=\"Proton Bread\", description=\"Fresh 700g bread\", price=0.9, unit=\"loaf\")\n # Setup dummy Invoice along side the invoice line\n invoice_1 = Invoice(customer=Customer.objects.get(id=1),total=0)\n invoice_1.save()\n InvoiceLine.objects.create(invoice=invoice_1,item=Item.objects.get(id=1), quantity=2, amount=(Item.objects.get(id=1).price*2))\n InvoiceLine.objects.create(invoice=invoice_1,item=Item.objects.get(id=4), quantity=1, amount=(Item.objects.get(id=4).price*1))\n InvoiceLine.objects.create(invoice=invoice_1,item=Item.objects.get(id=3), quantity=6, amount=(Item.objects.get(id=3).price*6))\n invoice_1.total = sum(invoiceLine.amount for invoiceLine in invoice_1.invoiceLines.all())\n invoice_1.save()\n \n invoice_2 = Invoice(customer=Customer.objects.get(id=3),total=0)\n invoice_2.save()\n InvoiceLine.objects.create(invoice=invoice_2,item=Item.objects.get(id=5), quantity=12, amount=(Item.objects.get(id=5).price*12))\n InvoiceLine.objects.create(invoice=invoice_2,item=Item.objects.get(id=4), quantity=2, amount=(Item.objects.get(id=4).price*2))\n invoice_2.total = sum(invoiceLine.amount for invoiceLine in invoice_2.invoiceLines.all())\n invoice_2.save()\n \n invoice_3 = Invoice(customer=Customer.objects.get(id=2),total=0)\n invoice_3.save()\n InvoiceLine.objects.create(invoice=invoice_3,item=Item.objects.get(id=5), quantity=12, amount=(Item.objects.get(id=5).price*12))\n InvoiceLine.objects.create(invoice=invoice_3,item=Item.objects.get(id=4), quantity=2, amount=(Item.objects.get(id=4).price*2))\n InvoiceLine.objects.create(invoice=invoice_3,item=Item.objects.get(id=1), quantity=2, amount=(Item.objects.get(id=1).price*2))\n InvoiceLine.objects.create(invoice=invoice_3,item=Item.objects.get(id=4), quantity=1, amount=(Item.objects.get(id=4).price*1))\n InvoiceLine.objects.create(invoice=invoice_3,item=Item.objects.get(id=3), quantity=6, amount=(Item.objects.get(id=3).price*6))\n invoice_3.total = sum(invoiceLine.amount for invoiceLine in invoice_3.invoiceLines.all())\n invoice_3.save()\n\n invoice_4 = Invoice(customer=Customer.objects.get(id=1),total=0)\n invoice_4.save()\n InvoiceLine.objects.create(invoice=invoice_4,item=Item.objects.get(id=1), quantity=6, amount=(Item.objects.get(id=1).price*6))\n invoice_4.total = sum(invoiceLine.amount for invoiceLine in invoice_4.invoiceLines.all())\n invoice_4.save()", "def test_confirm_customization_details(self):\n pass", "def test_client_tax_information_retrieve(self):\n pass", "def test_get_bans(self):\n pass", "def bank(self, bank):\n\n self._bank = bank", "def test_api_response_data(self):", "def simple_banking_management_functional():\n create_user('private', **USERS['Andreas'])\n create_user('company', **USERS['carrot_inc'])\n\n result = search_private_user('Andreas', 'Gustafsson')\n result_2 = search_company_user('carrot')\n\n register_account('savings', USERS['Andreas']['id_nr'])\n register_account('salary', USERS['Andreas']['id_nr'])\n\n deposit('savings', 100, USERS['Andreas']['id_nr'])\n deposit('salary', 20, USERS['Andreas']['id_nr'])\n\n withdraw('savings', 50, USERS['Andreas']['id_nr'])\n withdraw('salary', 30, USERS['Andreas']['id_nr'])\n\n print(BANK[USERS['Andreas']['id_nr']])", "def test_seeded_data():\n assert Tweet.all()", "def fixture_andy():\n yield Person(name=\"Andy\", age=12, hobbies=[\"Star Wars\", \"Bicycles\"])", "def test_client_tax_information_list(self):\n pass", "def test_data_index(self):\n response = self.client.get(reverse('index'))\n contacts = About_me.objects.all()\n self.assertEqual(response.status_code, 200)\n contact = contacts[0]\n self.assertContains(response, contact.name, 1)\n self.assertContains(response, contact.surname, 1)\n self.assertContains(\n response,\n contact.birth_date.strftime('%B %d, %Y').replace('0', ''), 1\n )\n self.assertContains(response, contact.bio, 1)\n self.assertContains(response, contact.email, 1)\n self.assertContains(response, contact.jabber, 1)\n self.assertContains(response, contact.skype, 1)\n self.assertContains(response, contact.contacts, 1)", "def test_companies_company_id_data_bank_accounts_account_id_get(self):\n pass", "def test_owner(self):\n self.business_item_class.businesses_list = [{\"owner\": \"chairman\", \"business_name\":\"Maendeleo\", \"category\":\"Backaend\", \"location\":\"myhomecity\"},\n {\"owner\": \"chairmanwe\", \"business_name\":\"NshMaendeleo\", \"category\":\"Backaend\", \"location\":\"myhomecity\"}]\n user = \"chairman\"\n msg = self.business_item_class.getOwner(user)\n self.assertEqual(msg, [{\"owner\": \"chairman\", \"business_name\":\"Maendeleo\", \"category\":\"Backaend\", \"location\":\"myhomecity\"}])", "def test_inactive_account(self):", "def test_create(self, mock_decorator):\n response = self.client.post(\n '/api/bce_institutions/0802145Y',\n content_type='application/json',\n headers={'Authorization': 'Bearer token'},\n data=json.dumps({\n 'is_institution': True\n }))\n self.assertEqual(response.status_code, 200)\n response_json = json.loads(response.data.decode('utf8'))\n self.assertEqual(\n response_json,\n {'institution': {'uai': '0802145Y', 'is_institution': True}}\n )\n self.assertEqual(BceInstitution.query.count(), 1)", "def setUp(self):\n self.database = Mock()\n self.database.reports_overviews.find_one.return_value = dict(_id=\"id\")", "def test_beneficiaries_retrieve_that_will_pass(self):\n print('the test function name: {}'.format(sys._getframe().f_code.co_name))\n url = reverse('beneficiary:beneficiary-entity-by-id-retrieve', kwargs={'pk': 1})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)", "def test_create(self):\n pass", "def test_wallets_get(self):\n pass", "def test_ran_out_book_for_borrow(self):\n book = Book.objects.get(pk=1)\n self.assertEqual(book.copies, 1)\n client1 = APIClient()\n client1.login(username=self.students[0].username, password=\"salam*123\")\n response = client1.post(\"/borrows/\", data={\"book\": book.id})\n self.assertEqual(response.status_code, 201)\n client2 = APIClient()\n client2.login(username=self.students[1].username, password=\"salam*123\")\n response = client2.post(\"/borrows/\", data={\"book\": book.id})\n self.assertEqual(response.status_code, 400)", "def test_init(self):\n self.assertEqual(self.new_account.account_name, \"Instagram\")\n self.assertEqual(self.new_account.username, \"jLuseno161\")\n self.assertEqual(self.new_account.password, \"joy161\")", "def test_trade(self):\n pass", "def test_create_account(self):\n url = reverse('account:accounts')\n data = {'name': 'Test Account 1'}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Account.objects.count(), 1)\n self.assertEqual(Account.objects.get().name, 'Test Account 1')", "def test_index(self):", "def test_fixtures(self):\n\n self.assertEqual(OrderType.objects.count(), 2,\n 'Incorrect order type count')\n self.assertEqual(Stock.objects.count(), 3,\n 'Incorrect stocks count')\n self.assertEqual(OrderStatus.objects.count(), 3,\n 'Incorrect statuses count')", "def test_client_bank_account_partial_update(self):\n pass", "def __init__(self, customer, bank, account, limit, bank_bal = 0):\n\n self._customer = customer\n self._bank = bank\n self._account = account\n self._limit = limit\n self._balance = bank_bal # To store customer spendings.", "def test_update_virtual_account_by_id(self):\n pass", "def test_bed(self):\n #TODO write bed tests", "def test_inmate_edit_view(self):\n self.test_inmate_validation() # cheap way to get an inmate\n inmate = models.Inmate.objects.all()[0]\n c = Client()\n response = c.get('/lemur/inmate/edit/' + str(inmate.pk), follow=True)\n self.assertEqual(response.status_code, 200)" ]
[ "0.68055016", "0.67185485", "0.65409213", "0.6355551", "0.6263593", "0.6235724", "0.6129963", "0.6102043", "0.6074391", "0.6055766", "0.6043796", "0.60177153", "0.5949179", "0.58791125", "0.58271164", "0.5814391", "0.5790518", "0.5788916", "0.5783457", "0.5749937", "0.57424164", "0.57399696", "0.5725626", "0.57009697", "0.5700514", "0.5685216", "0.56530917", "0.5649742", "0.56407994", "0.56237066", "0.56092644", "0.55978227", "0.5587545", "0.5584761", "0.55812234", "0.5564027", "0.55586964", "0.5555057", "0.554492", "0.55419123", "0.55349225", "0.5531896", "0.55314964", "0.5520929", "0.5507812", "0.5486015", "0.54802", "0.54765326", "0.54754215", "0.5475043", "0.54535854", "0.5452364", "0.542759", "0.5426123", "0.54150355", "0.54146856", "0.5413735", "0.5408577", "0.5401373", "0.5399629", "0.53994375", "0.5395711", "0.5395711", "0.53868675", "0.53866136", "0.53773385", "0.5377288", "0.5374891", "0.53732145", "0.53579473", "0.5356202", "0.5355435", "0.5354807", "0.5343197", "0.5341592", "0.5340088", "0.5331369", "0.5329975", "0.53294116", "0.5325188", "0.53191614", "0.5316043", "0.5310559", "0.5306214", "0.5303378", "0.53028715", "0.5302628", "0.52924955", "0.52913165", "0.5284359", "0.52807826", "0.5279699", "0.52770376", "0.5275701", "0.5260337", "0.52532846", "0.52502036", "0.5250006", "0.524769", "0.52470124" ]
0.6220651
6
Fixture for account object passed as an argument to the account create view test function
def account(): bank_test = Bank.objects.create(name='R-Bank') company_test = Company.objects.create(name='Tre Belarus', country='Belarus') account = Account.objects.create(iban_number='TEEdddddddfs', swift_code='tertrefdsf', bank=bank_test, company=company_test) return account
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_account(self):\n url = reverse('account:accounts')\n data = {'name': 'Test Account 1'}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Account.objects.count(), 1)\n self.assertEqual(Account.objects.get().name, 'Test Account 1')", "def test_account_view_good(flask_server, create_account):\n import json\n import requests\n\n data = create_account\n\n req = requests.post('{}/account/view'.format(API_URL), data=data)\n assert req.status_code == 200\n assert json.loads(req.content.decode('utf-8')) == [data['name'], data['code'], 0]", "def test_create_account(self):\n url = reverse('portal-list')\n data = {'brandID': 5, 'status' : 'Enabled'}\n response = self.client.post(url, data, format='json')\n\n #response = self.client.get(url)\n #print response\n #response = self.client.get('/v1/portal/1/')\n #print response\n #self.assertEqual(response.data[\"ud\"], {'id': 1, 'brandID': 4})\n self.assertEqual(response.data[\"brandID\"], 5)\n\n \"\"\"\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Account.objects.count(), 1)\n self.assertEqual(Account.objects.get().name, 'DabApps')\n \"\"\"", "def create_test_account(self):\n if not hasattr(self, \"headers\"):\n self.headers = {\"Content-Type\": \"application/json\"}\n self.account = {\n \"account_number\": \"11223344\",\n \"pin\": \"1234\",\n \"first_name\": \"John\",\n \"last_name\": \"Doe\",\n \"opening_balance\": 100000\n }\n with self.app.app_context():\n db.session.query(Account.account_number).filter_by(\n account_number=self.account[\"account_number\"]).delete()\n db.session.commit()\n self.client.post(\"/accounts/create\",\n data=json.dumps(self.account),\n headers=self.headers)", "def test_client_bank_account_create(self):\n pass", "def test_create_virtual_account(self):\n pass", "def test_create_account_campaign(self, create):\n \"\"\"Campaigns should be created\"\"\"\n row = {'PROJ_NAME1': 'Argentina Fund', 'PROJ_NO': '789-CFD',\n 'SUMMARY': 'Some Sum'}\n sync.create_account(row, None)\n self.assertTrue(create.called)\n account, row, name, acc_type = create.call_args[0]\n self.assertEqual(account.name, 'Argentina Fund')\n self.assertEqual(account.code, '789-CFD')\n self.assertEqual(account.category, Account.COUNTRY)\n self.assertEqual(0, len(Account.objects.filter(pk=account.pk)))", "def test_get_account(self):\n account = Account(self.client, \"[email protected]\", {})\n\n self.assertEqual(account.email, \"[email protected]\")\n self.assertEqual(account.state, \"PA\")\n self.assertEqual(account.city, \"Philadelphia\")\n self.assertEqual(account.phone, \"123-456-7890\")\n self.assertEqual(account.tax_id, \"\")\n self.assertEqual(account.balance, 0)\n self.assertEqual(account.company, \"Linode\")\n self.assertEqual(account.address_1, \"3rd & Arch St\")\n self.assertEqual(account.address_2, \"\")\n self.assertEqual(account.zip, \"19106\")\n self.assertEqual(account.first_name, \"Test\")\n self.assertEqual(account.last_name, \"Guy\")\n self.assertEqual(account.country, \"US\")\n self.assertIsNotNone(account.capabilities)\n self.assertIsNotNone(account.active_promotions)\n self.assertEqual(account.balance_uninvoiced, 145)\n self.assertEqual(account.billing_source, \"akamai\")\n self.assertEqual(account.euuid, \"E1AF5EEC-526F-487D-B317EBEB34C87D71\")", "def test_valid_account_create(self):\n ident_choice = UserIdentificationLabel.objects.get(slug=\"ident2\")\n form_data = {\n 'email': '[email protected]',\n 'organization_name': 'transhealth',\n 'password1': 'BEDrocks@123',\n 'password2': 'BEDrocks@123',\n 'first_name': 'BamBam',\n 'last_name': 'Rubble',\n 'identification_choice': str(ident_choice.pk),\n }\n response = self.client.post(self.url, form_data, follow=True)\n\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, 'Please check your email')\n\n # verify username is lowercase\n User = get_user_model()\n u = User.objects.get(email=\"[email protected]\")\n self.assertEqual(u.username, \"[email protected]\")\n self.assertEqual(u.email, \"[email protected]\")\n\n # Ensure developer account does not have a crosswalk entry.\n self.assertEqual(Crosswalk.objects.filter(user=u).exists(), False)\n\n # verify user has identification label chosen\n exist = User.objects.filter(useridentificationlabel__users=u).filter(useridentificationlabel__slug='ident2').exists()\n self.assertEqual(exist, True)", "def test_create_account_project(self, create):\n row = {'PROJ_NAME1': 'Some Proj', 'PROJ_NO': '121-212',\n 'SECTOR': 'IT'}\n sync.create_account(row, None)\n self.assertTrue(create.called)\n account, row, issue_map = create.call_args[0]\n self.assertEqual(account.name, 'Some Proj')\n self.assertEqual(account.code, '121-212')\n self.assertEqual(account.category, Account.PROJECT)\n self.assertEqual(0, len(Account.objects.filter(pk=account.pk)))", "def test_create_virtual_account_beneficiary(self):\n pass", "def test_create_account(self):\n data = self.user_data.copy()\n response = self.client.post(self.url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(response.data.get('message'), 'you have logged in successfully')", "def test_duo_account_post(self):\n pass", "def test_create(api: API):\n api.user.create.return_value = 123456\n account = Account(api, \"USERNAME\", \"PASSWORD\")\n api.user.create.assert_called_once()\n assert account.create()", "def test_create_manual_account02(self, client):\n user = UserFactory.get_user()\n institution = InstitutionFactory.get_manual_institution()\n account = Account.objects.create_manual_account(\n user.id, institution.id, '1111111', '')\n assert isinstance(account, Account)\n assert account.type_ds == Account.DEBT\n\n account = Account.objects.create_manual_account(\n user.id, institution.id, '2222222', 'some')\n assert Item.objects.count() == 1", "def test_create_accounts(client):\n request_test = client.post('/checks_and_balances/api/v1.0/new_account',\n json={\n 'name': 'Esteban',\n 'surname': 'Díaz',\n 'product': 'Awesome Account',\n 'balance': 10000.05\n })\n\n wanted_result = {'New Account': {'id': 3, 'name': 'Esteban', 'surname': 'Díaz', 'product': 'Awesome Account', 'balance': 10000.05}}\n\n json_data = request_test.get_json(force=True)\n\n assert json_data['New Account'] == wanted_result['New Account']", "def test_duo_account_get(self):\n pass", "def test_create_virtual_account_client(self):\n pass", "def test_create_virtual_account_transfer(self):\n pass", "def test_create_account_double(self, Campaign):\n row = {'PROJ_NAME1': 'China Fund', 'PROJ_NO': '777-CFD',\n 'SUMMARY': 'Some Sum', 'LOCATION': 'CHINA'}\n sync.create_account(row, None)\n row['PROJ_NO'] = '778-CFD'\n sync.create_account(row, None)\n account = Account.objects.get(code='777-CFD')\n self.assertEqual(account.name, 'China Fund')\n account.delete()\n account = Account.objects.get(code='778-CFD')\n self.assertEqual(account.name, 'China Fund (778-CFD)')\n account.delete()", "def test_duo_account_list(self):\n pass", "def test_accounts(self):\n self.assertEqual(AccountsConfig.name, \"accounts\")", "def test_init(self):\n self.assertEqual(self.new_account.account_name, \"Instagram\")\n self.assertEqual(self.new_account.username, \"jLuseno161\")\n self.assertEqual(self.new_account.password, \"joy161\")", "def test_03_account_index(self):\r\n # Without users\r\n with self.flask_app.app_context():\r\n res = self.app.get('/account/page/15', follow_redirects=True)\r\n assert res.status_code == 404, res.status_code\r\n\r\n self.create()\r\n res = self.app.get('/account', follow_redirects=True)\r\n assert res.status_code == 200, res.status_code\r\n err_msg = \"There should be a Community page\"\r\n assert \"Community\" in res.data, err_msg", "def test_create_account(self):\n url = reverse('hospital_list')\n data = {'name': 'DabApps','mobile': 846800258}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Hospital.objects.count(), 1)\n self.assertEqual(Hospital.objects.get().name, 'DabApps')\n self.assertEqual(Hospital.objects.get().mobile, 846800258)", "def test_client_bank_account_retrieve(self):\n pass", "def test_account_view_inexistent_account(flask_server, create_account):\n import requests\n\n data = create_account\n data['name'] += '123'\n\n req = requests.post('{}/account/view'.format(API_URL), data=data)\n assert req.content == b'No such account in database'\n assert req.status_code == 400", "def test_myaccount(self):\n create_user_object()\n self.client.login(username='testuser', password='testabc123')\n response = self.client.get(reverse('infinite:myaccount'))\n self.assertEqual(response.status_code, 200)", "def test_create_address(self): \n url = reverse('v1:addresses-list', args=[1])\n profile = ProfileFactory.create()\n \n address_data = {'profile' : str(profile.id),\n 'street_address' : 'test street',\n 'city' : 'test city',\n 'state' : 'test state', \n 'zip_code' : 'test zip',\n 'country' : 'test country',\n }\n\n # Check Anonymous User should return 403\n response = self.client.post(url, address_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n\n # Profile Owner User\n self.client.credentials(Authorization='Bearer ' + 'regularusertoken')\n response = self.client.post(url, address_data, format='json')\n response_data = response.json()\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n \n self.assertIsNotNone(response_data[\"profile\"])\n self.assertEqual(ProfileAddress.objects.get(profile=response_data['profile']).street_address, 'test street')\n self.assertEqual(ProfileAddress.objects.get(profile=response_data['profile']).city, address_data['city'])\n self.assertEqual(len(ProfileAddress.objects.all()), 1)\n self.assertEqual(Profile.objects.get(pk=profile.id).id, uuid.UUID(response_data['profile']))", "def test_lookup_account(self):\n pass", "def test_create_account(self):\n\n main_page = pages.mainpage.MainPage(self.driver)\n main_page.click_sign_in_button()\n\n letters = string.ascii_lowercase\n email_address = ''.join(random.choice(letters) for i in range(15)) + '@testmail.mail'\n\n sign_in_page = pages.signinpage.SignInPage(self.driver)\n sign_in_page.enter_create_account_email_addres(email_address)\n sign_in_page.click_create_account_button() \n\n registration_page = pages.accountregistrationpage.AccountRegistrationPage(self.driver)\n registration_page.fill_in_account_registration_form()\n registration_page.click_regiser_button()\n\n self.assertEqual('My account - My Store', registration_page.get_page_title(), 'User is not in My account page')", "def example_bank_account():\n \n return BankAccount(\"Test User\", 1000.0)", "def test_valid_account_create_is_a_developer(self):\n ident_choice = UserIdentificationLabel.objects.get(slug=\"ident1\")\n form_data = {\n 'invitation_code': '1234',\n 'email': '[email protected]',\n 'organization_name': 'transhealth',\n 'password1': 'BEDrocks@123',\n 'password2': 'BEDrocks@123',\n 'first_name': 'Hank',\n 'last_name': 'Flinstone',\n 'identification_choice': str(ident_choice.pk),\n }\n self.client.post(self.url, form_data, follow=True)\n up = UserProfile.objects.get(user__email='[email protected]')\n self.assertEqual(up.user_type, 'DEV')", "def test_account_accepts_additional_params(\n mocker, api: API, first_name: str, last_name: str, email: str\n):\n mocker.patch.object(Account, \"does_exist\", return_value=False)\n start_refresh = mocker.patch.object(Account, \"start_refresh\")\n accept_all_terms = mocker.patch.object(Account, \"accept_all_terms\")\n\n Account(\n api,\n \"USERNAME\",\n \"PASSWORD\",\n first_name=first_name,\n last_name=last_name,\n email=email,\n create_owners=False,\n accept_terms=False,\n )\n\n api.user.create.assert_called_with(\n username=\"USERNAME\",\n password=\"PASSWORD\",\n first_name=first_name,\n last_name=last_name,\n email=email,\n )\n start_refresh.assert_not_called()\n accept_all_terms.assert_not_called()", "def test_get_account_from_state(self):\n state = State('test-state')\n account = Account('test-account')\n state.add_account(account)\n self.assertEqual(state.get_account('test-account'), account)", "def setUp(self):\n self.account_normaal = self.e2e_create_account('normaal', '[email protected]', 'Normaal')", "def test_create(self):\n pass", "def test_save_account(self):\n self.new_account.save_account() # add account to list\n self.assertEqual(len(Credential.credential_list),\n 1) # check length of list", "def test_update_account_page(self):\n with self.client:\n self.client.post(\n url_for('login'),\n data=dict(\n email='[email protected]',\n password='admin2016'\n ),\n follow_redirects=True\n )\n response = self.client.post(\n url_for('account'),\n data=dict(\n first_name=\"Admin\",\n last_name=\"One\",\n email=\"[email protected]\"\n ),\n follow_redirects=True\n )\n self.assertIn(b'Admin', response.data)\n self.assertEqual(response.status_code, 200)", "def create(self, account):\n model = models.load('Account', account)\n\n return self.client.create_account(model=model)", "def test_account_information(self):\r\n res = self.testapp.get(u'/api/v1/admin/account?api_key=' + API_KEY,\r\n status=200)\r\n\r\n # make sure we can decode the body\r\n user = json.loads(res.body)\r\n\r\n self.assertEqual(\r\n user['username'], 'admin',\r\n \"Should have a username of admin {0}\".format(user))\r\n\r\n self.assertTrue(\r\n 'password' not in user,\r\n 'Should not have a field password {0}'.format(user))\r\n self.assertTrue(\r\n '_password' not in user,\r\n 'Should not have a field password {0}'.format(user))\r\n self.assertTrue(\r\n 'api_key' not in user,\r\n 'Should not have a field password {0}'.format(user))\r\n self._check_cors_headers(res)", "def test_create_customer(self):\n url = reverse('customers-list')\n data = {\n 'first_name': self.customer_first_name,\n 'last_name': self.customer_last_name,\n 'email': self.customer_email\n }\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Customer.objects.count(), 1)\n self.assertEqual(Customer.objects.get().first_name, 'John')", "def create_account():\n\n return render_template('account.html')", "def test_dietitian_edit_account(self):\n\n data = {\"fname\": \"Jill\", \"lname\": \"Jones\", \n \"email\": \"[email protected]\", \"street-address\": \"33 Blue St\", \n \"city\": \"San Francisco\", \"state\": \"CA\", \"zipcode\": \"43223\"}\n\n result = self.client.post(\"/dietitian/1/account/edit\", data=data,\n follow_redirects=True)\n\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"successfully updated\", result.data)", "def test_create_doctor(self):\n test_password = 'ooooooooooooooooooooooo'\n username = faker.first_name()\n data = {'username': username, 'email': faker.email(), 'password1': test_password, 'password2': test_password, 'is_doctor': True}\n response = self.client.post(self.url, data, format='json')\n # import pudb; pudb.set_trace()\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Doctor.objects.count(), 1)\n # self.assertEqual(Account.objects.get().name, 'DabApps')", "def test_account_name(self):\n account = Account('test-account')\n self.assertEqual(account.name, 'test-account')", "def test_route_existing_account():\n\n account_repository = AccountRepositoryMock()\n get_account = GetAccountMock(account_repository)\n withdraw_account = WithdrawAccount(account_repository, get_account)\n withdraw_account_controller = WithdrawAccountController(withdraw_account)\n\n attributes = {\n \"type\": \"withdraw\",\n \"origin\": \"100\",\n \"amount\": faker.random_number(digits=3),\n }\n\n response = withdraw_account_controller.route(HttpRequest(body=attributes))\n\n assert get_account.account_id_param[\"account_id\"] == int(attributes[\"origin\"])\n assert account_repository.update_account_params[\"account_id\"] == int(\n attributes[\"origin\"]\n )\n\n assert (\n account_repository.update_account_params[\"balance\"]\n == get_account.account_balance_param[\"balance\"] - attributes[\"amount\"]\n )\n\n assert response.status_code == 201\n assert response.body", "def test_createUser_single(self):\n #TODO: this and other tests", "def test_get_virtual_account_by_id(self):\n pass", "def test_client_tax_information_create(self):\n pass", "def setUp(self):\n self.new_account = Credential(\"Instagram\", \"jLuseno161\", \"joy161\")", "def test_admin_can_create_a_employee(self):\n\n account_data = {\n \"username\": \"Mike\",\n \"email\": \"[email protected]\",\n \"password\": \"1234567\",\n \"confirm_password\": \"1234567\"\n }\n response = self.client.post(\n reverse('accounts:create-user'),\n account_data,\n format=\"json\")\n \"\"\"Test the api has bucket creation capability.\"\"\"\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertTrue(\"data\" in json.loads(response.content))", "def test_add_account(self):\n person1 = self.owner\n person2 = Person(\n self.initial_year, \"Spouse\", self.initial_year - 20,\n retirement_date=self.retirement_date,\n gross_income=Money(50000),\n spouse=person1, tax_treatment=self.tax_treatment)\n # Add an account and confirm that the Person passed as owner is\n # updated.\n account1 = Account(owner=person1)\n account2 = Account(owner=person1)\n self.assertEqual(person1.accounts, {account1, account2})\n self.assertEqual(person2.accounts, set())", "def test_get_virtual_accounts(self):\n pass", "def test_create_valid_alt(self):\n url = '/api/users/'\n username = str(uuid1())[:8]\n data = {\n 'email': '{}@dbca.wa.gov.au'.format(username),\n 'name': 'Doe, John',\n 'username': username,\n 'ad_dn': 'CN={},OU=Users,DC=domain'.format(username),\n 'expiry_date': datetime.now().isoformat(),\n 'active': True,\n 'ad_guid': str(uuid1()),\n 'given_name': 'John',\n 'surname': 'Doe',\n 'title': 'Content Creator',\n 'date_ad_updated': datetime.now().isoformat(),\n }\n response = self.client.post(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n self.assertTrue(DepartmentUser.objects.filter(email=data['email']).exists())", "def test_mock_datasource_accounts(self):\n self.assertIsInstance(self.test_data, UrjanetData)\n self.assertIsNotNone(self.test_data.accounts)\n self.assertEqual(len(self.test_data.accounts), 2)\n\n # This account is loaded because it has a meter\n account1 = self.test_data.accounts[0]\n self.assertIsInstance(account1, Account)\n self.assertEqual(account1.PK, 1)\n self.assertEqual(account1.UtilityProvider, \"test_provider\")\n self.assertEqual(account1.AccountNumber, \"acct_1\")\n self.assertEqual(account1.RawAccountNumber, \"raw_acct_1\")\n self.assertEqual(account1.SourceLink, \"test_link\")\n self.assertEqual(account1.StatementType, \"test_stmt_type\")\n self.assertEqual(account1.StatementDate, date(2016, 2, 5))\n self.assertEqual(account1.IntervalStart, date(2016, 1, 1))\n self.assertEqual(account1.IntervalEnd, date(2016, 2, 1))\n self.assertEqual(account1.TotalBillAmount, Decimal(100))\n self.assertEqual(account1.AmountDue, Decimal(200))\n self.assertEqual(account1.NewCharges, Decimal(300))\n self.assertEqual(account1.OutstandingBalance, Decimal(400))\n self.assertEqual(account1.PreviousBalance, Decimal(500))\n self.assertEqual(len(account1.floating_charges), 0)\n self.assertEqual(len(account1.meters), 1)\n\n # This account is loaded because it has floating charges\n account2 = self.test_data.accounts[1]\n self.assertIsInstance(account2, Account)\n self.assertEqual(account2.PK, 3)\n self.assertEqual(account2.UtilityProvider, \"test_provider\")\n self.assertEqual(account2.AccountNumber, \"acct_1\")\n self.assertEqual(account2.RawAccountNumber, \"raw_acct_1\")\n self.assertEqual(account2.SourceLink, \"test_link\")\n self.assertEqual(account2.StatementType, \"test_stmt_type\")\n self.assertEqual(account2.StatementDate, date(2016, 4, 5))\n self.assertEqual(account2.IntervalStart, date(2016, 3, 1))\n self.assertEqual(account2.IntervalEnd, date(2016, 4, 1))\n self.assertEqual(account2.TotalBillAmount, Decimal(102))\n self.assertEqual(account2.AmountDue, Decimal(202))\n self.assertEqual(account2.NewCharges, Decimal(302))\n self.assertEqual(account2.OutstandingBalance, Decimal(402))\n self.assertEqual(account2.PreviousBalance, Decimal(502))\n self.assertEqual(len(account2.floating_charges), 1)\n self.assertEqual(len(account2.meters), 0)", "def setUp(self):\n\n self.user = self.client.users.create({})", "def test_showing_dietitian_account(self):\n\n result = self.client.get(\"/dietitian/1/account\")\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"Account Details\", result.data)\n\n result = self.client.get(\"/dietitian/2/account\", follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"not authorized\", result.data)", "def test_patient_edit_account(self):\n\n data = {\"fname\": \"Jill\", \"lname\": \"Jones\", \"email\": \"[email protected]\",\n \"street-address\": \"33 Blue St\", \"city\": \"San Francisco\", \n \"state\": \"CA\", \"zipcode\": \"43223\", \"phone\": \"8884445555\",\n \"birthdate\":\"1984-05-05\"}\n\n result = self.client.post(\"/patient/1/account/edit\", data=data,\n follow_redirects=True)\n\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"successfully updated\", result.data)", "def test_client_bank_accounts_list(self):\n pass", "def test_get_account(client):\n request_test = client.post('/checks_and_balances/api/v1.0/account',\n json={'id': 0})\n json_data = request_test.get_json(force=True)\n\n wanted_result = {\n 'id': 0,\n 'name': u'David',\n 'surname': u'Vélez',\n 'product': u'Check Account',\n 'balance': 10.05\n }\n\n assert json_data == wanted_result", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def test_add_account_to_state(self):\n state = State('test-state')\n account = Account('test-account')\n state.add_account(account)\n self.assertEqual(state.accounts.get('test-account'), account)", "def setUp(self):\n self.testUser = User.objects.get(username=\"c1e1\")\n self.client = APIClient()\n self.client.force_authenticate(user=self.testUser)\n self.data = {\n \"tracking\": 1234,\n \"mail_class\": \"12\",\n \"return_address\": {\n \"address1\": \"1234\",\n \"address2\": \"1234\",\n \"city\": \"1234\",\n \"state\": \"12\",\n \"zip\": 1234\n },\n \"rate\": 1234,\n \"address\": {\n \"address1\": \"1234\",\n \"address2\": \"1234\",\n \"city\": \"1234\",\n \"state\": \"12\",\n \"zip\": 1234\n },\n \"cost_center\": CostCenter.objects.filter(company=\n self.testUser.profile.company.pk)[0].pk\n }\n self.url = reverse('MailPiece-list')", "def test_create_user(profile_data):\n email = \"email@localhost\"\n username = \"username\"\n user = api.create_user(username, email, profile_data, {\"first_name\": \"Bob\"})\n\n assert isinstance(user, User)\n assert user.email == email\n assert user.username == username\n assert user.first_name == \"Bob\"\n\n if \"name\" in profile_data:\n assert user.profile.name == profile_data[\"name\"]\n else:\n assert user.profile.name is None", "def test_home_view_two_object(self):\n self.create_obj()\n UserData.objects.create(\n first_name=\"aaaaaa\",\n last_name=\"aaaaa\",\n date_of_birth='1998-02-23',\n bio=\"aaa\",\n email=\"[email protected]\",\n jabber=\"aaaaa\",\n skype=\"aaaaa\",\n other_contacts=\"aaaaa\"\n )\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(UserData.objects.first(), response.context['data'])", "def test_create_virtual_account_pay_out(self):\n pass", "def test_create_user(self):\n url = reverse('create_user')\n data = {\n 'first_name': 'Jimbo',\n 'email': '[email protected]',\n 'password': 'jimboland',\n 'postal_code': 'jimbo',\n }\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(User.objects.count(), 1)\n self.assertEqual(User.objects.get().first_name, 'Jimbo')", "def test_create_user_endpoint(self, **kwargs):\n first_name = kwargs.get('first_name', self.test_args[\"user_details\"][\"first_name\"])\n last_name = kwargs.get('last_name', self.test_args[\"user_details\"][\"last_name\"])\n password = kwargs.get('password', self.test_args[\"user_details\"][\"password\"])\n email = kwargs.get('email', Workflows.generate_new_email(suffix=self.global_config[\"email_id_suffix\"]))\n custom_data = {\"first_name\": first_name, \"last_name\": last_name, \"password\": password, \"email\": email}\n kwargs[\"data\"] = {\"user\": custom_data, \"client_id\": self.global_config[\"client_id\"],\n \"client_secret\": self.global_config[\"client_secret\"]}\n\n restapi = Rest(base_uri=self.global_config[\"base_url\"])\n response = restapi.post(**kwargs)\n\n if kwargs.get(\"return_response_obj\", False):\n return response\n\n print(\"Verify Response body\")\n assert json.loads(response.text)[\"message\"] == self.test_args[\"expected_result\"], \"Test Failed\"\n return None", "def test_create_email_account(self):\n first = 'create_email'\n last = 'account_test'\n user_id = first + last\n email_addr = first + last + '@' + self.email_dom\n user = SpokeUser(self.org_name)\n user.create(email_addr, first, last)\n \n org = '%s=%s' % (self.org_attr, self.org_name)\n people = '%s=%s' % (self.container_attr, self.user_container)\n uid = '%s=%s' % (self.user_key, user_id)\n dn = '%s,%s,%s,%s' % (uid, people, org, self.base_dn)\n dn_info = {'objectClass': ['top', 'inetOrgPerson', self.user_class,\n self.imap_class, self.smtp_class],\n self.imap_enable: ['TRUE'],\n self.imap_mailbox: [user_id],\n self.imap_domain: [self.email_dom],\n self.imap_partition: [self.imap_partition_def],\n self.smtp_destination: [email_addr],\n self.smtp_enable: ['TRUE'],\n self.smtp_pri_address: [email_addr]\n }\n expected_result = [(dn, dn_info)] \n acc = SpokeEmailAccount(self.org_name, user_id)\n result = acc.create(email_addr)['data']\n self.assertEqual(result, expected_result)\n user.delete(first, last)", "def test_creating_new_dietitian(self):\n\n form_data = {\"fname\": \"Jill\", \"lname\": \"Jones\", \n \"email\": \"[email protected]\", \"password\": \"password\", \n \"street-address\": \"33 Blue St\", \"city\": \"San Francisco\", \n \"state\": \"CA\", \"zipcode\": \"43223\"}\n\n dietitian_id = create_new_dietitian_account(form_data)\n\n self.assertEqual(2, dietitian_id)", "def test_customer_create(self):\n self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])", "def setUp(self):\n self.admin = User.objects.get(username='admin')\n self.client = APIClient()\n self.client.force_authenticate(user=self.admin)\n self.data = {'name': 'testCompany', 'address': {\n 'address1': '123 fake st', 'address2': 'fake address 2',\n 'city': 'nowhere', 'state': 'IN', 'zip': '90210'}}\n self.url = reverse('Company-list')", "def test_user_creation(self):\r\n \r\n self.assertIsInstance(self.user, User)", "def test_create_customer(self):\n create_customer_url = reverse(\"customer_list\")\n\n customer_info = {\"first_name\": \"Denny\", \"last_name\": \"Wayne\"}\n\n response = self.client.post(\n create_customer_url, data=customer_info, format=\"json\"\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Customer.objects.count(), 5)\n self.assertEqual(Customer.objects.get(pk=5).first_name, \"Denny\")\n self.assertEqual(Customer.objects.get(pk=5).last_name, \"Wayne\")", "def test_account_creation_good(flask_server):\n import requests\n\n req = requests.post('{}/account/create'.format(API_URL),\n data={'name': 'foo', 'password': 'bar', 'code': '123'})\n assert req.content == b'ok'\n assert req.status_code == 200", "def test_get_virtual_account_beneficiary(self):\n pass", "def test_valid_serializer(self, db, api_factory):\r\n # data is valid \r\n user = User.objects.create_user(email='[email protected]', password='admin1600', user_type='D')\r\n request = api_factory.get(self.url)\r\n request.user = user\r\n serializer = DoctorDetailSerializer(data=self.data, context={\"request\": request})\r\n assert serializer.is_valid() == True", "def test_create_valid(self):\n url = '/api/users/'\n username = str(uuid1())[:8]\n data = {\n 'EmailAddress': '{}@dbca.wa.gov.au'.format(username),\n 'DisplayName': 'Doe, John',\n 'SamAccountName': username,\n 'DistinguishedName': 'CN={},OU=Users,DC=domain'.format(username),\n 'AccountExpirationDate': datetime.now().isoformat(),\n 'Enabled': True,\n 'ObjectGUID': str(uuid1()),\n 'GivenName': 'John',\n 'Surname': 'Doe',\n 'Title': 'Content Creator',\n 'Modified': datetime.now().isoformat(),\n }\n response = self.client.post(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n # A DepartmentUser with that email should now exist.\n self.assertTrue(DepartmentUser.objects.filter(email=data['EmailAddress']).exists())", "def test_create(self, client):\n count = get_user_model().objects.count()\n data = factory.build(dict, FACTORY_CLASS=UserFactory)\n url = reverse('users:create')\n response = client.post(url, data)\n assert response.status_code == 302\n\n user = get_user_model().objects.last()\n assert user.username == data['username']\n assert user.birthday == datetime.datetime.strptime(data['birthday'], '%Y-%m-%d').date()\n assert get_user_model().objects.count() == count + 1", "def load_test_account() -> BankAccount:\n budget_manager = BudgetCreator.load_test_budget_manager()\n return TroublemakerBankAccount('123123', 'HSBC', 1000, budget_manager)", "def test_create(km_user_factory):\n models.Profile.objects.create(\n is_private=True, km_user=km_user_factory(), name=\"My Profile\"\n )", "def test_create_person(self):\n user = User.objects.create(username='test_user')\n user.set_password('test123')\n user.save()\n self.client.login(username='test_user', password='test123')\n\n data = {\n 'first_name': 'Emilia',\n 'last_name': 'Clarke',\n 'aliases': 'Emi'\n }\n response = self.client.post(self.url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Person.objects.count(), 1)\n self.assertEqual(Person.objects.first().first_name, 'Emilia')", "def setUpTestData(cls):\n cls.test_resource = Resource(name='Test', slug='test', description='')\n cls.test_resource.full_clean()\n cls.test_resource.save()\n cls.test_faculty = Faculty(name='Test', slug='test')\n cls.test_faculty.full_clean()\n cls.test_faculty.save()\n cls.test_department = Department(name='Test', slug='test', faculty=cls.test_faculty)\n cls.test_department.full_clean()\n cls.test_department.save()\n cls.test_agreement = Agreement(title='test-one',\n slug='test-one',\n resource=cls.test_resource,\n body='body',\n redirect_url='https://example.com',\n redirect_text='example-redirect')\n cls.test_agreement.full_clean()\n cls.test_agreement.save()\n cls.test_user = get_user_model().objects.create_user(username='test',\n first_name='test',\n last_name='test',\n email='[email protected]',\n password='testtesttest')", "def test_api_create_atmuser(self):\n users_num = ATMUser.objects.count()\n\n atmuser = ATMUser.objects.get(card='0000000000000000') # get admin\n view = ATMUserViewSet.as_view({'post': 'create'})\n\n data = {'card': '7777777777777777', 'password': '7777', 'cash': 700}\n request = factory.post(reverse('atmuser-list'), data, format='json')\n\n force_authenticate(request, user=atmuser)\n response = view(request)\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(ATMUser.objects.count(), users_num + 1)", "def test_create_account(self):\n response = self.client.post(\"http://localhost:8000/api/signup/\",\n data={\"username\": \"tests\", \"password\": \"TestTest\"})\n first_user = MyUser.objects.get()\n self.assertEqual(response.status_code, HTTP_201_CREATED)\n self.assertEqual(first_user.username, 'tests')\n response = self.client.post(\"http://localhost:8000/api/signup/\",\n data={\"username\": \"tests2\", \"password\": \"TestTest\"})\n self.assertEqual(response.status_code, HTTP_201_CREATED)\n self.assertTrue(MyUser.objects.filter(username=\"tests2\").exists())\n user = MyUser.objects.get(username=\"tests2\")\n response = self.client.put(f\"http://localhost:8000/api/users/{user.pk}/\", data={\"email\": \"[email protected]\"})\n # Not logged shouldnt change anything\n self.assertEqual(response.status_code, HTTP_401_UNAUTHORIZED)\n user.set_password(\"TestTest\")\n user.save()\n self.assertTrue(self.client.login(username=\"tests2\", password=\"TestTest\"))\n response = self.client.patch(f\"http://localhost:8000/api/users/{user.pk}/\", data={\"email\": \"[email protected]\"})\n # Logged, should change\n self.assertEqual(response.status_code, HTTP_200_OK)\n self.assertEqual(MyUser.objects.get(username=\"tests2\").email, \"[email protected]\")\n # Dont update others users\n response = self.client.patch(f\"http://localhost:8000/api/users/{first_user.pk}/\", data={\"email\": \"[email protected]\"})\n self.assertEqual(response.status_code, HTTP_400_BAD_REQUEST)", "def test_create_new_student_user(self):\n data = {\n 'username': 'John',\n 'email': '[email protected]',\n 'password': 'test123!',\n 'phone': '1234567890',\n 'first_name': 'Chuck',\n 'last_name': 'Norris',\n 'university': {\n 'name': \"random_university\"\n },\n 'academic_field': {'name': \"random_field\"},\n 'academic_level': {'name': \"random_level\"},\n 'gender': \"M\",\n 'birthdate': \"1999-11-11\",\n }\n\n response = self.client.post(\n reverse('user-list'),\n data,\n format='json',\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(json.loads(response.content)['phone'], '1234567890')\n\n user = User.objects.get(email=\"[email protected]\")\n activation_token = ActionToken.objects.filter(\n user=user,\n type='account_activation',\n )\n\n self.assertEqual(1, len(activation_token))", "def test_signup(self):\n res = self.client.get(\"/registration\")\n data = res.data.decode(\"utf-8\")\n assert res.status == \"200 OK\"\n assert \"Create Account\" in data", "def test_create_team(self):\n pass", "def bank_account():\n return BankAccount()", "def test_create_user(self):\n data = {\n 'username': 'foobar',\n 'email': '[email protected]',\n 'password': 'somepassword'\n }\n\n request = self.factory.post(self.create_url, data, format='json')\n view = UserViewSet.as_view({\"post\": \"create\"})\n response = view(request)\n self.assertEqual(User.objects.count(), 2)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(response.data['username'], data['username'])\n self.assertEqual(response.data['email'], data['email'])\n self.assertFalse('password' in response.data)", "def test_create_user_profile(self):\n user_profile_dict = {\n 'date_of_birth': datetime.datetime(1992, 10, 27),\n 'region': 'Serbia',\n 'bio': 'This is a test user account',\n 'user_type': models.UserProfile.UserType.REGULAR,\n 'phone': '065123'\n }\n\n user = get_user_model().objects.create_user(\n email=self.test_user_email,\n password=self.test_user_pass,\n name=self.test_user_name\n )\n\n user_profile = models.UserProfile.objects.filter(user_id=user).first()\n user_profile.date_of_birth = user_profile_dict['date_of_birth']\n user_profile.region = user_profile_dict['region']\n user_profile.bio = user_profile_dict['bio']\n user_profile.user_type = user_profile_dict['user_type']\n user_profile.phone = user_profile_dict['phone']\n user_profile.save()\n\n self.assertEqual(user_profile.date_of_birth,\n user_profile_dict['date_of_birth'])\n self.assertEqual(user_profile.region,\n user_profile_dict['region'])\n self.assertEqual(user_profile.bio, user_profile_dict['bio'])\n self.assertEqual(user_profile.user_type,\n user_profile_dict['user_type'])", "def test_signup_view(self, user_data, client: Client):\n response = client.post(\n reverse_lazy(\"users:signup\"),\n {\n \"username\": user_data.email,\n \"email\": user_data.email,\n \"password1\": user_data.password,\n \"password2\": user_data.password,\n },\n content_type=\"application/x-www-form-urlencoded\",\n )\n assert response.status_code == 200", "def test_inactive_account(self):", "def add_account(self, account):\n self.accounts[account.account_number] = account.json()\n # We should save in database the new account using self.di, but not now in order to get our tests passed", "def setUp(self):\n self.college = self.setup_college()\n self.university = self.setup_university()\n self.faculty = self.setup_faculty()\n self.url = reverse('account:user-register')\n self.login_url = reverse('account:user-login')\n self.education = EducationSerializer(self.setup_education()).data\n self.data = {\n \"username\": \"testUser\",\n \"email\": \"[email protected]\",\n \"first_name\": \"Test\",\n \"last_name\": \"User\",\n \"password\": \"1234\",\n \"confirm_password\": \"1234\",\n \"profile\": {\n \"user\": 1,\n \"contact_number\": \"9860476499\",\n \"address\": \"kapan\",\n \"education\": self.education\n }\n }\n self.response = self.client.post(self.url, data=self.data, format='json')\n user = User.objects.get()\n user.is_active = True\n user.save()\n self.login_data = {\n \"email\": \"[email protected]\",\n \"password\": \"1234\"\n }", "def setUp(self):\n self.client = Client()\n #creamos un usuario en la db\n self.user = User.objects.create_user('super', '[email protected]', 'super')", "def test_create_account_sector(self):\n row = {'PROJ_NAME1': 'Renewal Fund', 'PROJ_NO': 'SPF-REN',\n 'LOCATION': 'D/OSP/GGM', 'SUMMARY': 'Some Sum',\n 'SECTOR': 'RENEW'}\n sync.create_account(row, None)\n account = Account.objects.get(code='SPF-REN')\n campaign = Campaign.objects.get(account=account)\n mapping = SectorMapping.objects.get(pk='RENEW')\n self.assertEqual(mapping.campaign, campaign)\n account.delete() # cascades" ]
[ "0.7160241", "0.69238716", "0.6886644", "0.68595505", "0.68389684", "0.681058", "0.6805492", "0.66778797", "0.66131806", "0.66090906", "0.6590591", "0.6572082", "0.6519173", "0.6509788", "0.6508603", "0.6420267", "0.6328433", "0.62413573", "0.62157965", "0.6204515", "0.619508", "0.6189853", "0.6188303", "0.6168797", "0.61651695", "0.61568826", "0.6155524", "0.6155039", "0.6118962", "0.6117101", "0.6079836", "0.6051736", "0.6039326", "0.6036465", "0.60339856", "0.6028046", "0.6014614", "0.6007986", "0.59594", "0.5951135", "0.59447473", "0.59410036", "0.58999014", "0.5873096", "0.58691657", "0.58667684", "0.58655393", "0.5863843", "0.58520854", "0.58423615", "0.5839784", "0.5831123", "0.58265126", "0.5807613", "0.5794784", "0.5784879", "0.57846403", "0.5778011", "0.57724816", "0.5761986", "0.5755168", "0.5751407", "0.5751407", "0.5751407", "0.57321143", "0.57160187", "0.5706231", "0.5704117", "0.57023454", "0.5701132", "0.5696848", "0.56906337", "0.5690237", "0.5688609", "0.56709874", "0.56705767", "0.566069", "0.5659791", "0.56576633", "0.56566525", "0.5656237", "0.5655634", "0.5643976", "0.56420934", "0.5634676", "0.5634496", "0.56302977", "0.5603955", "0.56022614", "0.5592248", "0.55882806", "0.55867743", "0.55843246", "0.55841297", "0.5580491", "0.55772734", "0.5569316", "0.55646974", "0.5563845", "0.5559437" ]
0.6343079
16
return a working WLAN(STA_IF) instance or None
def get_connection(ssid,password): # First check if there already is any connection: if wlan_sta.isconnected(): return wlan_sta connected = False try: # ESP connecting to WiFi takes time, wait a bit and try again: time.sleep(3) if wlan_sta.isconnected(): return wlan_sta # Search WiFis in range wlan_sta.active(True) connected = do_connect(ssid,password) except OSError as e: print("exception", str(e)) # start web server for connection manager: # if not connected: # connected = start() return wlan_sta if connected else None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_wifi(self):\n return self._wifi", "def init_wlan_sta():\n\n print('WLAN: STA mode')\n wlan.init(mode=WLAN.STA)\n if not wlan.isconnected():\n wlan.connect(WLAN_SSID, auth=WLAN_AUTH, timeout=5000)\n while not wlan.isconnected():\n machine.idle() # save power while waiting", "def joinwifi():\n station = network.WLAN(network.STA_IF) # initiate a station mode\n\n if not station.isconnected():\n print('connecting to network:', ssid())\n station.active(True)\n station.connect(ssid(), password())\n \n\n while not station.isconnected():\n pass\n\n # deactivating access point mode\n ap = network.WLAN(network.AP_IF)\n ap.active(False)\n\n ip = station.ifconfig()[0]\n print('connected as:', ip)\n\n return ip", "def connect(self):\n # check if network is connected. If yes: return, finished\n # 2019-0801 changed: if self._wlan.isconnected():\n if self.isconnected:\n if USE_DEBUG:\n print('WLAN already connected')\n return self._wlan.ifconfig()\n\n # activate Wifi interface\n if self._wlan.active() is False:\n self._wlan.active(True)\n # scan available networks for the required one\n nets = self._wlan.scan()\n for net in nets:\n ssid = net[0]\n if ssid == bytearray(self._config['SSID']): # must use bytearray!\n if USE_DEBUG:\n print(\"Startup WiFi ...\" + self._config['SSID'])\n # specify if static or dynamic IP is requested\n # STATIC IP: an IP is given\n # DYNAMIC IP: None\n if self._config['STATIC_IP'] is not '':\n if USE_DEBUG:\n print('WifiManager::Static IP configuration')\n # configure network for static IP\n self._wlan.ifconfig((self._config['STATIC_IP'],\n self._config['MASKER'],\n self._config['GATEWAY_IP'],\n self._config['DNS']))\n\n # connect to SSID... either for STATIC or DYNAMIC IP\n self._wlan.connect(self._config['SSID'],\n self._config['PASSWRD'])\n while not self.isconnected:\n idle() # save power while waiting\n time.sleep_ms(100) # give it some time\n if USE_DEBUG:\n print(\"Network '{}' connection succeeded!\".format(ssid))\n break\n\n # check connection, if not succesfull: raise exception\n if not self._wlan.active():\n raise exception('Network {0} not found.'.format(ssid))\n\n # returns network configuration...\n # although 'myPy.local' should work on MacOS X (Bonjour)\n return self._wlan.ifconfig()", "def GetWirelessInterface(self):\n return str(self.wifi.wireless_interface)", "def SelectInterface(self, interface=None):\n # Check that we have an online WLAN interface.\n interfaces = self.GetInterfaces()\n\n # Ensure there are WLAN interfaces available.\n if not interfaces:\n raise WiFiError('No available WLAN interfaces.')\n\n # If a specific interface is specified, check that it exists.\n if interface:\n if interface not in interfaces:\n raise WiFiError('Specified interface %s not available' %\n interface)\n return interface\n\n # If no interface is specified, check the uniqueness.\n if len(interfaces) != 1:\n raise WiFiError(\n 'There are multiple interfaces. '\n 'Please specify one from: %r' % interfaces)\n return interfaces[0]", "def connect(self):\n self.sta_if = network.WLAN(network.STA_IF)\n self.sta_if.active(False)\n sleep(1)\n self.sta_if.active(True)\n\n dbg(\"Interface active\")\n if self.check_ap(self.ssid):\n # connect to access point\n if not self.sta_if.isconnected():\n dbg('connecting to AP...')\n self.sta_if.active(True)\n self.sta_if.connect(self.ssid, self.key)\n while not self.sta_if.isconnected():\n machine.idle()\n # Do we need a timeout here?\n dbg(self.sta_if.ifconfig())\n else:\n dbg(\"WLAN already connected\")\n dbg(self.sta_if.ifconfig())\n else:\n dbg(\"Target SSID not found.\")\n reset(\"Could not connect to network - target SSID is not availble.\", HARD)", "def _ValidateInterface(self, interface=None):\n if interface:\n return interface\n\n interfaces = self.GetInterfaces()\n if not interfaces:\n raise WiFiError('No available WLAN interfaces.')\n\n # Arbitrarily choose first interface.\n return interfaces[0]", "def connect_wifi(ssid='', pwd=''):\n wlan = network.WLAN(network.STA_IF)\n\n if wlan.isconnected(): # Let the user know if the device\n print(\"Debug:\\tAlready connected\") # was already connected\n\n if not wlan.active(): # Mark the network as active if it\n wlan.active(True) # is not already active\n\n wlan.connect(ssid,pwd)\n\n while not wlan.isconnected(): # Hang program execution until\n pass # the device is connected\n\n print(\"Debug:\\tConnection to %s successful\" % ssid)\n print(\"Debug:\\tLocal IP is %s, network.WLAN object returned.\" % wlan.ifconfig()[0])\n\n return wlan", "def DetectWirelessInterface(self):\n iface = self.wifi.DetectWirelessInterface()\n if iface:\n print 'Automatically detected wireless interface ' + iface\n else:\n print \"Couldn't detect a wireless interface.\"\n return str(iface)", "def get_wlans(config, sta_if):\n matches = []\n now = time()\n while time() < now + config['network']['timeout_seconds'] and not matches:\n print('wifi scanning')\n sta_if.active(True)\n foundNetworks = {n[0].decode(\"utf-8\"):n for n in sta_if.scan()}\n matches = [x for x in config['network']['wlans'] if x['essid'] in foundNetworks]\n if matches:\n for match in matches:\n print(\"found wlan %s\" % match['essid'])\n return matches", "def get_latest_wifi(self):\n data = self.collection.find({\"sensor_type\": \"wifi\"}, {\"_id\": False}).sort([\n (\"timestamp\", -1)]).limit(1)\n\n if self.is_empty(data):\n return None\n else:\n return data[0]", "def interface(self):\n if self._interface is None:\n expression = expressions.WPA_INTERFACE\n name = expressions.INTERFACE_NAME\n command = self.interface_list_command\n self._interface = self._match(expression,\n name,\n command)\n return self._interface", "def GetWirelessIP(self):\n ip = self.wifi.GetIP()\n return ip", "def connectToWifi(strip, start):\r\n wifi = network.WLAN(network.STA_IF)\r\n wifi.active(True)\r\n wifi.connect(SSID,PW)\r\n while not wifi.isconnected():\r\n # only flash the wifi connection wait signal if starting\r\n if start:\r\n ledFlash(strip, LED_COLOR_BLUE, 0.5)\r\n pass\r\n return wifi", "def connect():\n \n print(\"*****Starting connection*****\")\n \n ssid = id_key.network_id #hidden ssid\n key = id_key.network_key #hidden key\n \n station = network.WLAN(network.STA_IF)\n \n if station.isconnected() == True:\n print(\"*****Already connected*****\")\n return\n \n station.active(True)\n station.connect(ssid, key)\n \n while station.isconnected() == False:\n pass\n \n print(\"*****Connection successful*****\")\n print(station.ifconfig())", "def addWlan(self, station): \n phyInt.phy[station] = phyInt.totalPhy[self.currentPhy][3:]\n os.system(\"iw phy phy%s set netns %s\" % (phyInt.phy[station], station.pid)) \n wif = station.cmd(\"iwconfig 2>&1 | grep IEEE | awk '{print $1}'\").split(\"\\n\")\n wif.pop()\n for iface in wif:\n if iface[:4]==\"wlan\":\n try:\n self.nextWlan[str(station)] += 1\n except:\n self.nextWlan[str(station)] = 0\n netxWlan = self.nextWlan[str(station)] \n self.renameIface(station, netxWlan, iface)\n self.currentPhy+=1", "def using_network(ssid, password, antenna=0):\n import network\n sta_if = network.WLAN(network.STA_IF)\n if not sta_if.isconnected():\n print('connecting to network...')\n sta_if.active(True)\n sta_if.config(antenna=antenna) # select antenna, 0=chip, 1=external\n sta_if.connect(ssid, password)\n while not sta_if.isconnected():\n # Check the status\n status = sta_if.status()\n # Constants aren't implemented for PYBD as of MicroPython v1.13.\n # From: https://github.com/micropython/micropython/issues/4682\n # 'So \"is-connecting\" is defined as s.status() in (1, 2) and \"is-connected\" is defined as s.status() == 3.'\n #\n if status <= 0:\n # Error States?\n return False\n #if ((status == network.WLAN.STAT_IDLE) or (status == network.WLAN.STAT_WRONG_PASSWORD)\n # or (status == network.WLAN.STAT_NO_AP_FOUND) or (status == network.WLAN.STAT_CONNECT_FAIL)):\n # Problems so return\n # return False\n\n print('network config:', sta_if.ifconfig())\n return True", "def wchan(self):\n if hasattr(self, \"_wchan\"):\n return self._wchan\n else:\n return None", "def active(self) -> NetworkWirelessAP | None:\n return self._active", "def is_connected():\n sta_if = network.WLAN(network.STA_IF)\n return sta_if.isconnected()", "def destination_station(self) -> \"Station\" | None:\n if not self.destination_station_code:\n return None\n return self.rail.stations[self.destination_station_code]", "def _guess_lan_address():\n blacklist = [\"127.0.0.1\", \"0.0.0.0\", \"255.255.255.255\"]\n for interface in netifaces.interfaces():\n addresses = netifaces.ifaddresses(interface)\n for option in addresses.get(netifaces.AF_INET, []):\n if \"broadcast\" in option and \"addr\" in option and not option[\"addr\"] in blacklist:\n if __debug__: dprint(\"interface \", interface, \" address \", option[\"addr\"])\n return option[\"addr\"]\n #Exception for virtual machines/containers\n for interface in netifaces.interfaces():\n addresses = netifaces.ifaddresses(interface)\n for option in addresses.get(netifaces.AF_INET, []):\n if \"addr\" in option and not option[\"addr\"] in blacklist:\n if __debug__: dprint(\"interface \", interface, \" address \", option[\"addr\"])\n return option[\"addr\"]\n dprint(\"Unable to find our public interface!\", level=\"error\")\n return None", "def GetDefaultWiredNetwork(self):\n config = ConfigParser.ConfigParser()\n config.read(self.wired_conf)\n profileList = config.sections()\n for profile in profileList:\n if config.has_option(profile, \"default\"):\n if misc.to_bool(config.get(profile, \"default\")):\n return profile\n return None", "def createWIFIAccessPoint():\n ifname = config.get(\"interface\", \"wifi\")\n ipaddress = config.get(\"hotspot\", \"ip\")\n prefix = int(config.get(\"hotspot\", \"prefix\"))\n ssid = config.get(\"hotspot\", \"ssid\")\n password = config.get(\"hotspot\", \"password\")\n ################################\n s_wifi = dbus.Dictionary(\n {\n \"ssid\": dbus.ByteArray(ssid.encode(\"utf-8\")),\n \"mode\": \"ap\",\n })\n s_wsec = dbus.Dictionary(\n {\n \"key-mgmt\": \"wpa-psk\",\n \"psk\": password\n })\n s_con = dbus.Dictionary(\n {\"type\": \"802-11-wireless\",\n \"interface-name\":ifname ,\n \"uuid\": str(uuid.uuid4()),\n \"id\": ssid,\n \"autoconnect\":dbus.Boolean(True)\n })\n addr1 = dbus.Dictionary({\"address\": ipaddress, \"prefix\": dbus.UInt32(prefix)})\n dns = []\n s_ip4 = dbus.Dictionary(\n {\n \"address-data\": dbus.Array([addr1], signature=dbus.Signature(\"a{sv}\")),\n \"dns\": dbus.Array(dns, signature=dbus.Signature('u'), variant_level=1),\n \"method\": \"manual\",\n })\n s_ip6 = dbus.Dictionary({\"method\": \"ignore\"})\n con = dbus.Dictionary(\n {\n \"802-11-wireless\": s_wifi,\n \"802-11-wireless-security\":s_wsec,\n \"connection\": s_con,\n \"ipv4\": s_ip4,\n \"ipv6\": s_ip6\n })\n try:\n logging.info(\"Creating hotspot connection: {} - {}\".format(s_con[\"id\"], s_con[\"uuid\"]))\n ##########\n bus = dbus.SystemBus()\n proxy = bus.get_object(\n \"org.freedesktop.NetworkManager\", \"/org/freedesktop/NetworkManager/Settings\"\n )\n settings = dbus.Interface(proxy, \"org.freedesktop.NetworkManager.Settings\")\n connection = settings.AddConnection(con)\n logging.info(f\"Created access point connection {connection}\")\n except Exception as e:\n logging.error(\"Hotspot connection creation failed\")\n logging.error(e)", "def init_wlan_ap():\n\n print('WLAN: AP mode')\n wlan.init(mode=WLAN.AP,\n ssid='ttn-be-mapper',\n auth=(WLAN.WPA2, 'reppam-eb-ntt'),\n channel=7,\n antenna=WLAN.INT_ANT)", "def wifi_connect(self, vap: VirtualAPHostapd) -> bool:\n config_file_name = \"boardfarm_tmp.conf\"\n config_file_path = \"/tmp/{}\".format(config_file_name)\n\n # Create network configuration for SSID\n bssid = \"bssid={}\".format(vap.bssid)\n ssid = \"ssid=\\\"{}\\\"\".format(vap.get_ssid())\n key = \"psk=\\\"{}\\\"\".format(vap.get_psk())\n network_config = \"network={{\\n{}\\n{}\\n{}\\n}}\".format(bssid, ssid, key)\n # Clean up previous configuration\n self.sendline(\"rm -f \\\"{}\\\"\".format(config_file_path))\n self.expect(self.prompt)\n self.sendline(\"echo \\'{}\\' > \\\"{}\\\"\".format(network_config, config_file_path))\n self.expect(self.prompt)\n # Start wpa_supplicant with created configuration\n # Typical coommand on RPI: wpa_supplicant -B -c/tmp/temp.conf -iwlan0 -Dnl80211,wext\n self.sudo_sendline(\"wpa_supplicant -B -D{} -i{} -c{}\".format(\n self.driver_name, self.iface_wifi, config_file_path))\n self.expect(\"Successfully initialized wpa_supplicant\")\n return bool(self.match)", "def wifi_channel(self):\n return self._wifi_channel", "def GetIwconfig(self):\n return self.wifi.GetIwconfig()", "def get_GW():\n p = sr1(IP(dst=\"google.com\", ttl=0) / ICMP() / \"XXXXXXXXXXX\",verbose=0)\n return p.src", "def get_device(self):\n addr = self.address\n servers = [server for server in pyrax.cloudservers.list()\n if addr in server.networks.get(\"private\", \"\")]\n try:\n return servers[0]\n except IndexError:\n return None", "def nearest_weather_station(self) -> caimira.data.weather.WxStationRecordType:\n return caimira.data.weather.nearest_wx_station(\n longitude=self.location_longitude, latitude=self.location_latitude\n )", "def scapy_layers_dot11_Dot11_sta_bssid(self):\n\tif self.haslayer(Dot11ProbeReq) or self.hasflag('FCfield', 'to-DS'):\n\t\treturn self.addr2\n\telse:\n\t\treturn self.addr1", "def wpsConnect():\n \n SSID = \"none\"\n # scan networks on interface wlan0, to see some nice networks\n subprocess.check_output([\"wpa_cli\", \"-i\", \"wlan0\", \"scan\"]) \n sleep(1);\n \n #get and decode results\n wpa = subprocess.check_output([\"wpa_cli\", \"-i\", \"wlan0\", \"scan_results\"]).decode(\"UTF-8\")\n \n #parse response to get MAC address of router that has WPS-PBC state\n active_spot_reg = re.search(\"(([\\da-f]{2}:){5}[\\da-f]{2})(.*?)\\[WPS-PBC\\]\", wpa)\n \n #check if found any\n if not (active_spot_reg is None):\n if active_spot_reg.group(1):\n \n #connect via wps_pbc\n subprocess.check_output([\"wpa_cli\", \"-i\", \"wlan0\", \"wps_pbc\", active_spot_reg.group(1)])\n SSID = active_spot_reg.group(5)\n \n print(active_spot_reg.group(1) + \" \" + SSID)\n print(wpa)\n \n return(SSID)", "def fetchWANIP():\n logging.info(\"Trying to fetch WAN IP\")\n _wanIf = config.get(\"interface\", \"wan\")\n _wanip = None\n try:\n bus = dbus.SystemBus()\n proxy = bus.get_object(\"org.freedesktop.NetworkManager\", \"/org/freedesktop/NetworkManager\")\n manager = dbus.Interface(proxy, \"org.freedesktop.NetworkManager\")\n devices = manager.GetDevices()\n for device in devices:\n devProxy = bus.get_object(\"org.freedesktop.NetworkManager\", device)\n devConfIface = dbus.Interface(devProxy, \"org.freedesktop.DBus.Properties\")\n devConf = devConfIface.GetAll(\"org.freedesktop.NetworkManager.Device\")\n if devConf['Interface'] == _wanIf:\n actConProxy = bus.get_object(\"org.freedesktop.NetworkManager\", devConf[\"ActiveConnection\"])\n actConIface = dbus.Interface(actConProxy, \"org.freedesktop.DBus.Properties\")\n actConConf = actConIface.GetAll(\"org.freedesktop.NetworkManager.Connection.Active\")\n actConIP4Proxy = bus.get_object(\"org.freedesktop.NetworkManager\", actConConf['Ip4Config'])\n actConIP4Iface = dbus.Interface(actConIP4Proxy, \"org.freedesktop.DBus.Properties\")\n actConIP4Conf = actConIP4Iface.GetAll(\"org.freedesktop.NetworkManager.IP4Config\")\n _wanip = actConIP4Conf[\"AddressData\"][0][\"address\"]\n for dnsEntry in actConIP4Conf[\"NameserverData\"]:\n wandns.append(dnsEntry[\"address\"])\n logging.info(f\"WAN DNS server fetched for {_wanIf} - {dnsEntry['address']}\")\n logging.info(f\"WAN IP fetched for {_wanIf} - {_wanip}\")\n except Exception as e:\n logging.error(\"Trying to fetch WAN IP error\")\n logging.error(e)\n # return WAN IP\n return _wanip", "def wa(self, council: str = \"1\") -> WA:\n return WA(self, council)", "def get_defaultgw(self):\n return self.get_ipv4_defaultgw()", "def get_station(div=0, wd=0, abbrev=None, as_dataframe=False,\n input_file=None, output_file=None, suds_cache=None):\n # ensure div is a list of ints\n if type(div) is not list: div = [div]\n # ensure wd is a list\n if type(wd) is not list: wd = [wd]\n # ensure name is a homogeneous list of str if it exists\n if abbrev is not None and type(abbrev) is not list:\n abbrev = [abbrev]\n assert all(type(a) is str for a in abbrev)\n\n stations = []\n if input_file is None:\n # use the Co DWR SOAP service\n suds_client = _get_client(CODWR_WSDL_URL)\n\n # get the water division/districts\n dists = get_water_district(div, wd, as_dataframe=False, suds_cache=suds_cache)\n if dists is None:\n # no matching division/district(s)\n return None\n\n if abbrev is None:\n for d in dists:\n # get the stations for the division/district\n sites = suds_client.service.GetSMSTransmittingStations(d['div'], d['wd'])\n if sites is None:\n return None\n\n # get the parameters for the stations\n sparms = suds_client.service.GetSMSTransmittingStationVariables(d['div'], d['wd'])\n if sparms is None:\n # hmmm - we have stations but no parameters...\n raise ValueError(\"Service returned no parameters for transmitting station(s).\")\n\n # the SOAP service returns each parameter for a station as a\n # separate row <abbrev, parameter> which we will compact into\n # a dict {abbrev,[parameter,parameter,...]}\n params = {}\n for sp in sparms.StationVariables:\n spd = dict(sp)\n if spd['abbrev'] not in params:\n params[spd['abbrev']] = []\n params[spd['abbrev']].append(spd['variable'])\n\n # build up the complete station description (including the water\n # district name and parameters) and add it to the station list\n # to the station list\n for site in sites.Station:\n sited = dict(site)\n sited['waterDistrictName'] = d['waterDistrictName']\n sited['parameters'] = params[sited['abbrev']]\n stations.append(sited)\n\n else:\n for a in abbrev:\n site = suds_client.service.GetSMSTransmittingStations(0, 0, a)\n if site is not None:\n sited = dict(site)\n\n for d in dists:\n if d['div'] == sited['div'] and d['wd'] == sited['wd']:\n sited['waterDistrictName'] = d['waterDistrictName']\n break\n\n # retrieve the station parameters and attach them to the\n # station information\n sparms = suds_client.service.GetSMSTransmittingStationVariables(sited['div'],\n sited['wd'],\n sited['abbrev'])\n if sparms is None:\n # hmmm - we have stations but no parameters...\n raise ValueError(\"Service returned no parameters for station \"\n + sited['abbrev'])\n for sp in sparms.StationVariables:\n spd = dict(sp)\n if sited['parameters'] is None:\n sited['parameters'] = []\n assert(spd['abbrev'] == sited['abbrev'])\n sited['parameters'].append(spd['variable'])\n\n else:\n # retrieve the list of sites in the specified file\n print(\"Nothing yet\")\n\n if as_dataframe is True:\n stations = pd.DataFrame(stations)\n\n return stations if len(stations) > 0 else None", "def getlan():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n s.connect(('10.255.255.255', 1))\n lan = str(s.getsockname()[0])\n s.close()\n except socket.error:\n s.close()\n sys.exit('>> Unable to find LAN IP')\n\n return lan", "def get_wl(self, strict=True):\n ax, wl, ds, ss = self.get_response()\n if (ax is True) and (strict is True):\n wl = None\n return wl", "def select_wireless_endpoint_uuid(self):\n\n for device in self.meetingpoint.DeviceListGet():\n # is the status Available?\n if device.StatusGet() == DeviceStatus.Available:\n # yes, return the UUID\n return device.DeviceIdentifierGet()\n\n # No device found, return None\n return None", "def select_wireless_endpoint_uuid(self):\n from byteblowerll.byteblower import DeviceStatus_Available\n\n for device in self.meetingpoint.DeviceListGet():\n # is the status Available?\n if device.StatusGet() == DeviceStatus_Available:\n # yes, return the UUID\n return device.DeviceIdentifierGet()\n\n # No device found, return None\n return None", "def set_802_11_wireless(self, pardus_profile):\n if pardus_profile.connection_type == \"802-11-wireless\":\n return _802_11_Wireless(pardus_profile)\n else:\n return \"none\"", "def getMacAddress(self, wlanInterface):\n self.wlanInterface = wlanInterface\n self.storeMacAddress=[]\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', '%s'[:15]) % str(self.wlanInterface))\n self.storeMacAddress.append(''.join(['%02x:' % ord(char) for char in info[18:24]])[:-1])\n return self.storeMacAddress", "def get_network_adapter() -> network.NetworkAdapter:\n if (ip := os.getenv('ref_ip')) is not None: # noqa: SIM112\n return network.get_adapter_containing_ip(ip)\n # get next available loopback adapter\n return next(adapter for adapter in network.get_adapters() if adapter.is_loopback)", "def wlanScan(self, iface):\n log.debug('WlanInterface wlanScan()')\n ret = WlanScan( self._handle, \n byref(iface.InterfaceGuid), \n None,None,None)\n if ret != ERROR_SUCCESS:\n raise WinError(ret)", "def connect() -> None:\n wlan = network.WLAN(network.STA_IF)\n wlan.active(True)\n\n if not wlan.isconnected():\n wlan.connect(config.WIFI_SSID, config.WIFI_PASSWORD)\n # Wait for connection.\n for _ in range(20):\n if wlan.isconnected():\n return\n utime.sleep(1)\n\n raise Exception('Could not connect to network')", "def _setup_wifi_ap(self):\n context = self._get_ap_context()\n try:\n check_output(['ifconfig', context['hostname']])\n logger.info('wifi ap {} already setup'.format(context['hostname']))\n return True\n except CalledProcessError:\n logger.info('Setting up virtual access point interface')\n call(['service', 'hostapd', 'stop'])\n call(['service', 'dnsmasq', 'stop'])\n\n self._write_system_template('/etc/dnsmasq.conf', 'access_point/dnsmasq.conf')\n self._write_system_template('/etc/hostapd/hostapd.conf', 'access_point/hostapd.conf', context)\n self._write_system_template('/etc/network/interfaces', 'access_point/interfaces.conf', context)\n self._write_system_template('/etc/default/hostapd', 'access_point/default_hostapd.conf', context)\n self._write_system_template('/etc/dhcpcd.conf', 'access_point/dhcpcd.conf', context)\n \n call(['systemctl', 'enable', 'hostapd', ])\n call(['systemctl', 'enable', 'dnsmasq', ])\n return True", "def wwn(self) -> SmartSsdWwn:\n return self._wwn", "def GetCurrentNetwork(self, iwconfig=None):\n current_network = str(self.wifi.GetCurrentNetwork(iwconfig))\n return current_network", "def checkWifi():\n try:\n subprocess.check_output(\"iwgetid\")\n return True\n except subprocess.CalledProcessError: # if not connected\n return False", "async def _async_wifi_entities_list(\n avm_wrapper: AvmWrapper, device_friendly_name: str\n) -> list[FritzBoxWifiSwitch]:\n _LOGGER.debug(\"Setting up %s switches\", SWITCH_TYPE_WIFINETWORK)\n\n #\n # https://avm.de/fileadmin/user_upload/Global/Service/Schnittstellen/wlanconfigSCPD.pdf\n #\n wifi_count = len(\n [\n s\n for s in avm_wrapper.connection.services\n if s.startswith(\"WLANConfiguration\")\n ]\n )\n _LOGGER.debug(\"WiFi networks count: %s\", wifi_count)\n networks: dict = {}\n for i in range(1, wifi_count + 1):\n network_info = await avm_wrapper.async_get_wlan_configuration(i)\n # Devices with 4 WLAN services, use the 2nd for internal communications\n if not (wifi_count == 4 and i == 2):\n networks[i] = {\n \"ssid\": network_info[\"NewSSID\"],\n \"bssid\": network_info[\"NewBSSID\"],\n \"standard\": network_info[\"NewStandard\"],\n \"enabled\": network_info[\"NewEnable\"],\n \"status\": network_info[\"NewStatus\"],\n }\n for i, network in networks.copy().items():\n networks[i][\"switch_name\"] = network[\"ssid\"]\n if (\n len(\n [\n j\n for j, n in networks.items()\n if slugify(n[\"ssid\"]) == slugify(network[\"ssid\"])\n ]\n )\n > 1\n ):\n networks[i][\"switch_name\"] += f\" ({WIFI_STANDARD[i]})\"\n\n _LOGGER.debug(\"WiFi networks list: %s\", networks)\n return [\n FritzBoxWifiSwitch(\n avm_wrapper, device_friendly_name, index, data[\"switch_name\"]\n )\n for index, data in networks.items()\n ]", "def get_aff_net(sta):\n pass", "def associate_wireless_device(self) -> Optional[str]:\n return pulumi.get(self, \"associate_wireless_device\")", "def _get_ws(self):\n try:\n create_connection\n except:\n from websocket import create_connection\n\n if self._ws is None:\n try:\n self._ws = create_connection(('wss://{}:8080/api/ws'.format(self._wshost)), timeout=10)\n\n payload = {\n 'action' : \"userOnline\",\n 'userAgent' : 'app',\n 'version' : 6,\n 'nonce' : gen_nonce(15),\n 'apkVesrion': \"1.8\",\n 'os' : 'ios',\n 'at' : self.get_bearer_token(),\n 'apikey' : self.get_user_apikey(),\n 'ts' : str(int(time.time())),\n 'model' : 'iPhone10,6',\n 'romVersion': '11.1.2',\n 'sequence' : str(time.time()).replace('.','')\n }\n\n self._ws.send(json.dumps(payload))\n wsresp = self._ws.recv()\n # _LOGGER.error(\"open socket: %s\", wsresp)\n\n except (socket.timeout, ConnectionRefusedError, ConnectionResetError):\n _LOGGER.error('failed to create the websocket')\n self._ws = None\n\n return self._ws", "def default_workstation(self):\n w, created = Workstation.objects.get_or_create(\n slug=settings.DEFAULT_WORKSTATION_SLUG\n )\n\n if created:\n w.title = settings.DEFAULT_WORKSTATION_SLUG\n w.save()\n\n return w", "def SetWirelessInterface(self, interface):\n print \"setting wireless interface %s\" % (str(interface))\n self.wifi.wireless_interface = noneToBlankString(interface)\n self.wired.wireless_interface = noneToBlankString(interface)\n config = ConfigParser.ConfigParser()\n config.read(self.app_conf)\n config.set(\"Settings\",\"wireless_interface\", interface)\n configfile = open(self.app_conf, \"w\")\n config.write(configfile)", "def get_wotd():\n\treturn wotd", "def ConnectWifi(SSID, pwd):\n import network\n sta_if = network.WLAN(network.STA_IF)\n if not sta_if.isconnected():\n DisplayMsg('Connecting to network...',8)\n sta_if.active(True)\n sta_if.connect(SSID, pwd)\n while not sta_if.isconnected():\n pass\n return True", "def start(self, bs, nextIface, ssid, mode, channel, \n country_code, auth_algs, wpa, wpa_key_mgmt, rsn_pairwise, wpa_passphrase):\n self.apName.append(bs)\n self.apSSID[str(bs)] = ssid\n self.apMode[str(bs)] = mode\n self.cmd = (\"echo \\'\")\n \"\"\"General Configurations\"\"\" \n self.cmd = self.cmd + (\"interface=%s\" % nextIface) # the interface used by the AP\n \"\"\"Not using at the moment\"\"\"\n self.cmd = self.cmd + (\"\\ndriver=nl80211\")\n if(ssid!=None):\n self.cmd = self.cmd + (\"\\nssid=%s\" % ssid) # the name of the AP\n if(mode==\"g\" or mode==\"n\"):\n self.cmd = self.cmd + (\"\\nhw_mode=g\") \n elif (mode==\"b\"):\n self.cmd = self.cmd + (\"\\nhw_mode=b\") \n elif (mode==\"a\"):\n self.cmd = self.cmd + (\"\\nhw_mode=a\")\n if(channel!=None):\n self.cmd = self.cmd + (\"\\nchannel=%s\" % channel) # the channel to use \n if(mode==\"ac\"):\n self.cmd = self.cmd + (\"\\nwme_enabled=1\") \n self.cmd = self.cmd + (\"\\nieee80211ac=1\")\n self.cmd = self.cmd + (\"\\nwme_enabled=1\") \n self.cmd = self.cmd + (\"\\nieee80211n=1\")\n if(mode==\"n\"):\n self.cmd = self.cmd + (\"\\nht_capab=[HT40+][SHORT-GI-40][DSSS_CCK-40]\")\n \n #Not used yet!\n if(country_code!=None):\n self.cmd = self.cmd + (\"\\ncountry_code=%s\" % country_code) # the country code\n if(auth_algs!=None):\n self.cmd = self.cmd + (\"\\nauth_algs=%s\" % auth_algs) # 1=wpa, 2=wep, 3=both\n if(wpa!=None):\n self.cmd = self.cmd + (\"\\nwpa=%s\" % wpa) # WPA2 only\n if(wpa_key_mgmt!=None):\n self.cmd = self.cmd + (\"\\nwpa_key_mgmt=%s\" % wpa_key_mgmt ) \n if(rsn_pairwise!=None):\n self.cmd = self.cmd + (\"\\nrsn_pairwise=%s\" % rsn_pairwise) \n if(wpa_passphrase!=None):\n self.cmd = self.cmd + (\"\\nwpa_passphrase=%s\" % wpa_passphrase) \n \n #elif(len(self.baseStationName)>self.countAP and len(self.baseStationName) != 1):\n # \"\"\"From AP2\"\"\"\n # self.cmd = self.apcommand\n #self.cmd = self.cmd + \"\\n\"\n # self.cmd = self.cmd + (\"\\nbss=%s\" % self.newapif[self.nextIface]) # the interface used by the AP\n # if(self.ssid!=None):\n # self.cmd = self.cmd + (\"\\nssid=%s\" % self.ssid ) # the name of the AP\n #self.cmd = self.cmd + (\"\\nssid=%s\" % self.ssid) # the name of the AP\n # if(self.auth_algs!=None):\n # self.cmd = self.cmd + (\"\\nauth_algs=%s\" % self.auth_algs) # 1=wpa, 2=wep, 3=both\n # if(self.wpa!=None):\n # self.cmd = self.cmd + (\"\\nwpa=%s\" % self.wpa) # WPA2 only\n # if(self.wpa_key_mgmt!=None):\n # self.cmd = self.cmd + (\"\\nwpa_key_mgmt=%s\" % self.wpa_key_mgmt ) \n # if(self.rsn_pairwise!=None):\n # self.cmd = self.cmd + (\"\\nrsn_pairwise=%s\" % self.rsn_pairwise) \n # if(self.wpa_passphrase!=None):\n # self.cmd = self.cmd + (\"\\nwpa_passphrase=%s\" % self.wpa_passphrase) \n # self.countAP = len(self.baseStationName)\n # self.apcommand = \"\"\n return self.cmd", "def network_interface(self): \n return self._network_interface", "def GetLastUsedWiredNetwork(self):\n config = ConfigParser.ConfigParser()\n config.read(self.wired_conf)\n profileList = config.sections()\n for profile in profileList:\n if config.has_option(profile,\"lastused\"):\n if misc.to_bool(config.get(profile,\"lastused\")):\n return profile\n return None", "def _find_device(self):\n for bus in usb.busses():\n for dev in bus.devices:\n if dev.idVendor == self.vendor_id and dev.idProduct == self.product_id:\n if self.device_id is None or dev.filename == self.device_id:\n log.info('found station on USB bus=%s device=%s' % (bus.dirname, dev.filename))\n return dev\n return None", "def _from_wei(self, wei):\n return self.server.fromWei(wei, 'ether')", "def utility(self):\r\n\r\n if \"controllerDatasetLayers\" in self._flc.properties and \\\r\n \"utilityNetworkLayerId\" in self._flc.properties.controllerDatasetLayers:\r\n from arcgis.features._utility import UtilityNetworkManager\r\n url = \"%s/UtilityNetworkServer\" % os.path.dirname(self._flc.url)\r\n return UtilityNetworkManager(url=url,\r\n version=self)\r\n return None", "def getMac(self):\n # Import netifaces here to prevent error importing this module in setup.py\n import netifaces\n interfaces = ['eth0', 'wlan0']\n try:\n interfaces.append(netifaces.gateways()['default'][netifaces.AF_INET][1])\n except:\n pass\n for interface in interfaces:\n try:\n return netifaces.ifaddresses(interface)[netifaces.AF_LINK][0]['addr']\n except ValueError:\n pass\n except:\n exception('Error getting MAC address')\n return None", "def instance():\n global inst\n try:\n inst\n except:\n inst = BNVMAPI(None)\n return inst", "def _get_state(self):\n fw_wp_en = (self._interface.get('fw_wp_en') == 'on')\n fw_wp = (self._interface.get('fw_wp') == 'on')\n if fw_wp_en:\n return self._STATE_FORCE_ON if fw_wp else self._STATE_FORCE_OFF\n else:\n return self._STATE_ON if fw_wp else self._STATE_OFF", "def IsWirelessUp(self):\n return self.wifi.IsUp()", "def infradevice(self):\n return self.broker.infradevice(**{\"DeviceRouteID\": self.DeviceRouteID})", "def get_netiface():\n ip = mu.get_ip()\n for interface in netifaces.interfaces():\n addrs = netifaces.ifaddresses(interface)\n if netifaces.AF_INET in addrs.keys():\n i_addr = addrs[netifaces.AF_INET][0]['addr']\n if i_addr == ip:\n return interface\n\n # Return None if no interface found\n return None", "def _get_interface(self):\n return self.__interface", "def _get_interface(self):\n return self.__interface", "def _get_interface(self):\n return self.__interface", "def _get_interface(self):\n return self.__interface", "def _get_interface(self):\n return self.__interface", "def _get_interface(self):\n return self.__interface", "def _get_interface(self):\n return self.__interface", "def _get_interface(self):\n return self.__interface", "def _get_interface(self):\n return self.__interface", "def _get_interface(self):\n return self.__interface", "def _get_interface(self):\n return self.__interface", "def _get_interface(self):\n return self.__interface", "def _get_interface(self):\n return self.__interface", "def _get_interface(self):\n return self.__interface", "def _get_interface(self):\n return self.__interface", "def getWhitebeet(self):\r\n if hasattr(self, \"whitebeet\"):\r\n return self.whitebeet\r\n else:\r\n return None", "def Connect(self):\n if self.ap.encrypted and not self.passkey:\n raise WiFiError('Require passkey to connect to encrypted network')\n self._DisconnectAP()\n\n # Create temporary directory.\n if self._user_tmp_dir:\n self._tmp_dir = self._user_tmp_dir\n else:\n self._tmp_dir_handle = self._device.temp.TempDirectory()\n self._tmp_dir = self._tmp_dir_handle.__enter__()\n\n # First, bring the device up. If it is already up, this will succeed\n # anyways.\n logging.debug('Bringing up ifconfig...')\n self._device.CheckCall(['ifconfig', self.interface, 'up'])\n\n # Authenticate to the server.\n auth_fns = {\n 'wep': self._AuthenticateWEP,\n 'wpa': self._AuthenticateWPA,\n 'wpa2': self._AuthenticateWPA}\n auth_process = auth_fns.get(\n self.ap.encryption_type, self._AuthenticateOpen)()\n next(auth_process)\n\n # Grab an IP address.\n dhcp_process = self._dhcp_fn(**self._dhcp_args)\n self.ip = next(dhcp_process)\n\n # Store for disconnection.\n self._auth_process = auth_process\n self._dhcp_process = dhcp_process", "def associate(self, sta, ssid): \n self.host = sta\n self.host.cmd(\"iw dev %s-wlan0 connect %s\" % (sta, ssid))\n self.confirmInfraAssociation(self.host)", "def getInstance():\n return net()", "def get_mac(self) -> str:\n self.sendline(\"iw {} info\".format(self.iface_dut))\n # We are looking for MAC definition of STA\n # wdev 0x1\n # addr 96:4e:c9:cc:7a:2c\n # type managed\n self.expect(\"addr (?P<mac>..:..:..:..:..:..)\\r\\n\\t(type|ssid)\")\n return self.match.group('mac')", "def get_switch(self):\n\n svc = \"urn:upnp-org:serviceId:SwitchPower1\"\n if not svc in self.services:\n raise RuntimeError, \"Device doesn't support the service\"\n \n status = self.get_variable(svc, \"Status\")\n return status == 1", "def get_data_interface(nwbf, data_interface_name):\n for module_name in nwbf.processing:\n module = nwbf.processing[module_name]\n if data_interface_name in module.data_interfaces:\n return module.get(data_interface_name) \n return None", "def get_hwaddress(ifname):\n # It might be possible that more than one link-layer address is associated\n # with the interface. If this is the case, we simply return the first.\n # If there's no such interface or the interface has no MAC address,\n # we notify caller via ValueError.\n macdict = getmacaddrs()\n try:\n addr = macdict[ifname][0]\n except (IndexError, KeyError):\n raise ValueError(\"%s\" % ifname)\n return addr", "def current_worker():\n try:\n return worker_thread_data.worker\n except AttributeError:\n return None", "def ReadWirelessNetworkProfile(self, id):\n config = ConfigParser.ConfigParser()\n config.read(self.wireless_conf)\n cur_network = self.LastScan[id]\n essid_key = \"essid:\" + cur_network[\"essid\"]\n bssid_key = cur_network[\"bssid\"]\n if self.debug_mode:\n print bssid_key\n if config.has_section(essid_key)and \\\n misc.stringToNone(config.get(essid_key, 'use_settings_globally')):\n return self._read_wireless_profile(config, cur_network, \n essid_key)\n elif config.has_section(bssid_key):\n return self._read_wireless_profile(config, cur_network, bssid_key)\n else:\n cur_network[\"has_profile\"] = False\n return \"500: Profile Not Found\"", "def DetectPhyName(self, interface):\n output = self._device.CheckOutput(\n ['iw', 'dev', interface, 'info'], log=True)\n m = self._RE_WIPHY.search(output)\n return ('phy' + m.group(1)) if m else None", "def wfi() -> None:", "def get_workspace(self) -> Workspace:\n if self.ws:\n return self.ws\n self.ws = Workspace.from_config()\n return self.ws", "def GetCurrentInterface(self):\n return self.current_interface" ]
[ "0.68340015", "0.63972205", "0.616661", "0.5945816", "0.5928931", "0.5894898", "0.5855707", "0.58045965", "0.56598383", "0.5637595", "0.5623451", "0.5612626", "0.55965465", "0.5566321", "0.5533868", "0.5496373", "0.544632", "0.54028994", "0.5389177", "0.5349265", "0.53430164", "0.53034437", "0.52939045", "0.5263459", "0.5214871", "0.51879346", "0.51630616", "0.5155382", "0.51538885", "0.51451933", "0.5131504", "0.5121014", "0.5118689", "0.509792", "0.5094259", "0.50837076", "0.5047158", "0.5032503", "0.50246066", "0.5016984", "0.4999239", "0.49936152", "0.49928778", "0.49924582", "0.49880388", "0.49847254", "0.49812254", "0.49786255", "0.49701068", "0.496253", "0.49624294", "0.49563736", "0.4955054", "0.4951151", "0.49490258", "0.49395975", "0.49351162", "0.4924062", "0.4897824", "0.48794535", "0.48759136", "0.48694637", "0.4867822", "0.48597175", "0.48586258", "0.48557982", "0.48399073", "0.48361143", "0.48268598", "0.48211858", "0.48080543", "0.4799682", "0.4799682", "0.4799682", "0.4799682", "0.4799682", "0.4799682", "0.4799682", "0.4799682", "0.4799682", "0.4799682", "0.4799682", "0.4799682", "0.4799682", "0.4799682", "0.4799682", "0.47959793", "0.4785695", "0.4765598", "0.47617635", "0.4746485", "0.47354454", "0.4728736", "0.47064897", "0.4701384", "0.4701327", "0.46899554", "0.4687649", "0.4681062", "0.46763644" ]
0.61334884
3
Constructor for object DeleteRenewKeys that reads content from config.json
def __init__(self): # open json config file that reads in information config_path = open("config.json", "r") config_json = config_path.read() config_dict = json.loads(config_json) # assign object variables self.project_id = config_dict["project-id"] self.bucket_name = config_dict["bucket-name"] self.location_id = config_dict["key-location"] self.key_ring_id = config_dict["key-ring-id"] self.crypto_key_id = config_dict["crypto-key-id"] self.service_account_email = config_dict["service-account-email"] # close the file config_path.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n try:\n with open(os.path.expanduser(\"~/.dkeyrc\"), 'r') as f:\n self.__cfgdata = json.load(f)\n except Exception as e:\n print(\"Error: Unable to load config JSON at ~/.dkeyrc -- %s\" % (e))\n sys.exit(1)", "def __init__(self, key=None):\n\n self.key = key\n self.cryptor = None\n self.file_ext_targets = ['txt']", "def __init__(self):\n with open('config.json', encoding='UTF-8') as json_data_file:\n self.config = json.load(json_data_file)\n self._get_credential()\n self.file_tree = [{}] * 100", "def __init__(self, config_file_name=\"config.json\"):\n with open(config_file_name, \"r\") as config:\n f = dict(json.load(config))\n for key, value in f.items():\n setattr(self, key, value)", "def __init__(self):\n fd = open(\"conf/redis_config.json\", \"r\")\n tmp = fd.read()\n data = json.loads(tmp)\n self.database = redis.StrictRedis(\n host=data[\"host\"], \n port=data[\"port\"], \n password=None,\n decode_responses=True\n )\n self.key = data[\"key\"]", "def __init__(self, config):\n try:\n config['volume_id']\n config['access_key']\n config['secret_access_key']\n config['region']\n except KeyError, e:\n logging.error(repr(e))\n raise ImproperlyConfigured()\n\n if not config.has_key('keep'):\n config['keep'] = 5\n\n self.config = config", "def __init__(self, config_dict: dict):\n self._config = config_dict", "def _reinit(self):\n cfg_path = self[CONFIG_FILE_KEY] if CONFIG_FILE_KEY in self else None\n for attr in list(self.keys()):\n del self[attr]\n self.__init__(cfg=cfg_path)", "def __init__(self, config_file):\n with open(config_file, 'r') as file:\n self.config = json.load(file)\n self.set_config(self.config)", "def __init__(self,\n publicKey=None,\n jsonPath=None,\n title=None,\n fields=[],\n privateKey=None,\n deleteKey=None,\n baseUrl='http://data.sparkfun.com',\n encoder=encoders.plain_json):\n self.publicKey = publicKey\n self.privateKey = privateKey\n self.deleteKey = deleteKey\n self.title = title\n self._encoder = encoder\n if jsonPath:\n self._jsonKeys = json.load(open(jsonPath))\n else:\n self._baseUrl = baseUrl\n self._jsonKeys = {\n 'title': title,\n 'publicKey': publicKey,\n 'deleteKey': deleteKey,\n 'privateKey': privateKey,\n 'inputUrl': self._get_url_from_base('input'),\n 'outputUrl': self._get_url_from_base('output'),\n 'manageUrl': self._get_url_from_base('streams')\n }\n\n\n self._session = requests.Session()\n if not only_strings_in(fields):\n raise ValueError(\"String type expected for *fields\")\n\n self._fields = fields\n self._stats = None\n self._last_headers = None", "def __init__(self, path, number_keys=1):\n\n self.path = path\n self.keyring = []\n if os.path.exists(path):\n self.keyring = read_keys(path)\n else:\n for n in range(number_keys):\n key = generate_key(generate_random())\n self.keyring.append(key)\n write_keys(path, self.keyring)", "def initialize(self, keys: List[str]):", "def __init__(self, key_path = None):\n self.keyindex = 0\n# with open(key_path, \"r\") as file:\n# self.keylist = [key.strip() for key in file.readlines()]\n self.keylist = ['AIzaSyBXu79WmxVrUuEf02A5IXshNZVNL59IW40','AIzaSyCItjSKR_I3AXr3ilSlU0bQNfb6HA60nq4','AIzaSyAEFVBLRROF7HGE-eORrAxWeURbU1tZ998',\n 'AIzaSyCg3a9GEDsXpa-yzR6as9muOxnNQyBWSNA','AIzaSyCrzGl_nmnH68AuyhVuOwYtwGrwUh6HAwE','AIzaSyCZDq1wuhcidz2q0TB5erBEBss83jrxQyg','AIzaSyCXb6-7tB_MZDuIqaCrG3BHGuxdMvq3Wfs',\n 'AIzaSyBg1FBuBQl-xi2wJUFoLmg-AY60DmS-guo','AIzaSyCITQISwxpMchmEN2UMXoqwVkAWjetm0Oc','AIzaSyC8n_YsHpFjPLt4_7sES_PTYzsBFgn7K9k','AIzaSyBWUajFFwTyww0DHlZBnqEkMmaRVTEcrcY',\n 'AIzaSyDMjp-z3DH_S2BRFCcfnRlE1T55qRE6lCQ','AIzaSyDouKf0AZOj4KNXaTAVQQi_QmpRpNHP03A','AIzaSyAwXggWXK_gDOmmelvVSx4Cztmid6bmCEA','AIzaSyAM1RAjCd_W8lVcb53pkh8LkBunowhSHTA',\n 'AIzaSyBuUYRPLmzrbfK7XTL9F3Q0FadHkhQ_-KA','AIzaSyDSEvf00-boovEMrhIv9vD__5tAjPLDoA8','AIzaSyCvqdi6-eYK9RkQmSXxdXT57AJvzVFY5k8','AIzaSyC9TDWLqhLyc4vzwOgs_C342WWHGF5-uf0',\n 'AIzaSyDzanvcDte2vjMSnpGEUaZd0DW7Xb-axoU','AIzaSyDEc0Ql28e2ePEOMoahnkNRBDQu15nTyBk','AIzaSyDYzkxN6ksoFL148uPyO-wH47G3OalEKag','AIzaSyB0sheZQR7f1KV4fBJi5NaN5jejsNujKSg', 'AIzaSyBCx_s4xAMBGG1ncMp20EykJMf4mGMdwLw','AIzaSyAKwTmYwSX2OMVlfUfBnr53kDfh1iNqj4I','AIzaSyBuG0L3dA5Y62nutoHB2Hhvk-OA6EJK3Fg',\n 'AIzaSyAcyo6AaTgnuzaW1TbjTrJChRdt-h1on2E','AIzaSyBcAiKz6BMl35rwV7BGT3DuztYQPtQUDJc','AIzaSyBLuKB_DU4NmGca6XGW5-BcYSxBB_2OmiA',\n 'AIzaSyDCWWX_Jd1iGzXaUHs7y_6_zpgejzGG-V0', 'AIzaSyBuUYRPLmzrbfK7XTL9F3Q0FadHkhQ_-KA','AIzaSyDCWWX_Jd1iGzXaUHs7y_6_zpgejzGG-V0','AIzaSyBHiE5t86GKVX-YuKrV-09flVtZ1R1s6mA',\n 'AIzaSyAaO1JqWWUkQ8ygQZZlSOuv4ZPvw1w5sJ4','AIzaSyAcyo6AaTgnuzaW1TbjTrJChRdt-h1on2E','AIzaSyAE9Z7Rot5-15x_nr1jeoORrSniUn5CQp0','AIzaSyBcAiKz6BMl35rwV7BGT3DuztYQPtQUDJc',\n 'AIzaSyBuG0L3dA5Y62nutoHB2Hhvk-OA6EJK3Fg','AIzaSyCWTP0LHJM9wbN_r1NtsmDWJJSDSYu8UVY','AIzaSyCavNozQXAmu43IQp5ksdpWGIsmdYNunhw','AIzaSyANi91i0WifIsS3mQ0dS-A9u7UftcgKw_g',\n 'AIzaSyD3WZOZ0m3vTVbInYKL-S9e5DdgpDOYEPU','AIzaSyDgqZ54-02e4ovkANlcQzwOwNDCShysCiM','AIzaSyBchj0Xv_PfZVKrWWzIznl1ydDzGITzLZA',\n 'AIzaSyD-c05sMNbULYOo9D8Gaw_Q16QfSTBwcEs','AIzaSyC9gRiOvIWlonL_pZHIdPxUhOjn6cOFMR8','AIzaSyD88ZrGnD-kJYu4y8ZclvtJE59NUl2QYw4','AIzaSyDMoOVtszehKuLfUnKAYXPmWGYollIhqko',\n 'AIzaSyCtNyBNJMIDl_cDUlXZraohg-2eU8hc8oQ','AIzaSyD0G9TkGLuZ0BfdpHqY6PRwvpJtb2WdRx0']\n #AIzaSyBHiE5t86GKVX-YuKrV-09flVtZ1R1s6mA\n #AIzaSyBtsUWrMb9wtn7yhx0-NcquACQ7E8o1VBo", "def __init__(self, json_key_path, logger=logging, chunk_size=_CHUNK_SIZE):\n assert chunk_size % _CHUNK_SIZE_MULTIPLE == 0, (\n 'chunk_size must be a multiple of %d B' % _CHUNK_SIZE_MULTIPLE)\n self.chunk_size = chunk_size\n\n self.logger = logger\n\n credentials = service_account.Credentials.from_service_account_file(\n json_key_path, scopes=(_GCS_SCOPE,))\n # Google Cloud Storage is depend on bucket instead of project, so we don't\n # need to put project name to arguments. However, this client is general\n # Google Cloud client, so the project can't be None; instead it can be an\n # empty string.\n self.client = storage.Client(project='', credentials=credentials)", "def reinit(self):\n self.keys = {}\n fh = open(self.path, \"w\")\n json.dump(self.keys, fh)\n fh.close()\n os.chmod(self.path, 0o600)", "def initialize(self):\n\n if not os.path.exists(CONFIG_Download):\n with io.open(CONFIG_Download, 'w') as fh:\n json.dump(DEFAULT_Download, fh, sort_keys=True, indent=4, separators=(\",\", \": \"))\n return()\n\n # Load all options.\n with io.open(CONFIG_Download, 'r') as fh:\n data = json.load(fh)\n \n for key, value in data.items():\n setattr(self, key, value)", "def from_dict(cls, dikt) -> \"DeleteImageResponseContent\":\n return util.deserialize_model(dikt, cls)", "def __init__(self, config_file_name=\"config.json\"):\n self.config_file_name = config_file_name\n self._config = self._open_config_file()", "def from_json_dict(d: Dict[str, Any]) -> \"HttpConfigurationFile\":\n self = HttpConfigurationFile()\n self.id = d.get(\"Id\")\n self.display_name = d.get(\"DisplayName\")\n self.connection_type = d.get(\"ConnectionType\")\n self.uri = d.get(\"Uri\")\n self.api_key = d.get(\"ApiKey\")\n self.cert_path = d.get(\"CertPath\")\n return self", "def from_dict(cls, _dict: Dict) -> 'GatewayChangeRequestGatewayClientGatewayDelete':\n args = {}\n if 'type' in _dict:\n args['type'] = _dict.get('type')\n else:\n raise ValueError('Required property \\'type\\' not present in GatewayChangeRequestGatewayClientGatewayDelete JSON')\n return cls(**args)", "def from_dict(cls, dikt) -> 'ComAdobeGraniteMaintenanceCrxImplRevisionCleanupTaskProperties':\n return util.deserialize_model(dikt, cls)", "def FromDict(raw_config, factory):\r\n c = Config()\r\n c.raw_config = raw_config\r\n c.base_directory = raw_config[\"base_directory\"]\r\n c.scratch_directory = raw_config[\"scratch_directory\"]\r\n\r\n sections = [\"resources\"]\r\n for section in sections:\r\n section_list = getattr(c, section)\r\n LoadSectionList(raw_config.get(section, []), section_list, factory)\r\n # Two-phase load.\r\n for section in sections:\r\n section_list = getattr(c, section)\r\n for resource in section_list:\r\n resource.Init(c)\r\n return c", "def test_delete_hyperflex_node_config_policy(self):\n pass", "def __init__(self, environment):\n with open('config.json') as f:\n self.config = eval(f.read())\n self.config = self.config[environment]", "def config():\n return {\n \"COMPONENT_NAME\": \"testing-deleter\",\n \"DEST_SITE\": \"NERSC\",\n \"DISK_BASE_PATH\": \"/path/to/rucio/rse/root\",\n \"HEARTBEAT_PATCH_RETRIES\": \"3\",\n \"HEARTBEAT_PATCH_TIMEOUT_SECONDS\": \"30\",\n \"HEARTBEAT_SLEEP_DURATION_SECONDS\": \"60\",\n \"INPUT_STATUS\": \"detached\",\n \"LTA_REST_TOKEN\": \"fake-lta-rest-token\",\n \"LTA_REST_URL\": \"http://RmMNHdPhHpH2ZxfaFAC9d2jiIbf5pZiHDqy43rFLQiM.com/\",\n \"OUTPUT_STATUS\": \"source-deleted\",\n \"RUN_ONCE_AND_DIE\": \"False\",\n \"SOURCE_SITE\": \"WIPAC\",\n \"WORK_RETRIES\": \"3\",\n \"WORK_SLEEP_DURATION_SECONDS\": \"60\",\n \"WORK_TIMEOUT_SECONDS\": \"30\",\n }", "def post_config_root_delete(self, resource_id, resource_dict):\n pass", "def load_from_dict(\n cls, dictionary: Dict[str, Any], *, location: Path\n ) -> \"Configuration\":\n\n schema = {\n \"type\": \"object\",\n \"additionalProperties\": False,\n \"required\": [\"destination\", \"namespace\", \"requirements\"],\n \"properties\": {\n \"destination\": {\"type\": \"string\"},\n \"namespace\": {\"type\": \"string\"},\n \"requirements\": {\"type\": \"string\"},\n \"protected-files\": {\"type\": \"array\", \"items\": {\"type\": \"string\"}},\n \"patches-dir\": {\"type\": \"string\"},\n \"transformations\": {\n \"type\": \"object\",\n \"additionalProperties\": False,\n \"properties\": {\n \"substitute\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"additionalProperties\": False,\n \"required\": [\"match\", \"replace\"],\n \"properties\": {\n \"match\": {\"type\": \"string\"},\n \"replace\": {\"type\": \"string\"},\n },\n },\n },\n \"drop\": {\"type\": \"array\", \"items\": {\"type\": \"string\"}},\n },\n },\n \"license\": {\n \"type\": \"object\",\n \"additionalProperties\": False,\n \"properties\": {\n \"directories\": {\n \"type\": \"object\",\n \"patternProperties\": {\"^.*$\": {\"type\": \"string\"}},\n },\n \"fallback-urls\": {\n \"type\": \"object\",\n \"patternProperties\": {\"^.*$\": {\"type\": \"string\"}},\n },\n },\n },\n \"typing-stubs\": {\n \"type\": \"object\",\n \"patternProperties\": {\n \"^.*$\": {\"type\": \"array\", \"items\": {\"type\": \"string\"}},\n },\n },\n },\n }\n\n try:\n validate(dictionary, schema)\n except ValidationError as e:\n raise ConfigurationError(str(e))\n\n def path_or_none(key: str) -> Optional[Path]:\n if key in dictionary:\n return Path(dictionary[key])\n return None\n\n return Configuration(\n base_directory=location,\n destination=Path(dictionary[\"destination\"]),\n namespace=dictionary[\"namespace\"],\n requirements=Path(dictionary[\"requirements\"]),\n protected_files=dictionary.get(\"protected-files\", []),\n patches_dir=path_or_none(\"patches-dir\"),\n substitute=dictionary.get(\"transformations\", {}).get(\"substitute\", {}),\n drop_paths=dictionary.get(\"transformations\", {}).get(\"drop\", []),\n license_fallback_urls=dictionary.get(\"license\", {}).get(\n \"fallback-urls\", {}\n ),\n license_directories=dictionary.get(\"license\", {}).get(\"directories\", {}),\n typing_stubs=dictionary.get(\"typing-stubs\", {}),\n )", "def __init__(self, **config):\n\n super().__init__(**config)\n\n for required in [\n \"CLIENT_ID\", \"CLIENT_SECRET\", \"USER_ID\"]:\n if not required in config:\n raise RuntimeError(\n \"Required key is missing from the config: {}\".format(\n required))\n\n self.client_id = config['CLIENT_ID']\n self.client_secret = config['CLIENT_SECRET']\n self.user_id = config['USER_ID']\n\n self._refresh_token = None\n self._access_token = None\n self._auto_refresh = config.get('auto_refresh', True)\n self._refresh_thread = None\n self._refresh_thread_running = False", "def configFromKeys(self, config, keys=[]):\n if not keys:\n return config\n elif len(keys) > 1:\n return self.configFromKeys(config[keys[0]], keys[1:])\n else:\n return config[keys[0]]", "def __init__(self, c_config, client):\n self.id = c_config['id']\n self.nodes = []\n self.client = client\n self.period = 20\n self.__init_load(c_config)", "def _delete(self, **kwargs):\n\n resource_name = self._get_resource_name(**kwargs)\n config = misc_utils.resolve_config(\n kwargs.pop('config', None),\n kwargs.pop('config_file', None),\n required=False\n )\n\n return self._make_request(\n uri='%s/%s' % (self._metadata['uri'], resource_name),\n method='DELETE',\n config=config\n )", "def _init_config_(self):\n self._config= {}", "def from_dict(cls, json_object):\n config = UniterConfig(vocab_size_or_config_json_file=-1)\n for key, value in json_object.items():\n config.__dict__[key] = value\n return config", "def test_create_delete_key(self):\n client = IPythonClient()\n dac = Context(client)\n # Create and push a key/value.\n key, value = dac._generate_key(), 'test'\n dac._push({key: value})\n # Delete the key.\n dac.delete_key(key)\n dac.close()\n client.close()", "def __init__(self, **options):\n\n super().__init__(**options)\n\n self._private_key = None\n self._public_key = None\n\n self._load_keys(**options)", "def __init__(self, config_path, normalize=False):\n self.config = {}\n _config_dict = {}\n self._config_path = Utils.expand_path(config_path)\n self.update = None\n self.normalize = normalize", "def __init__(self, config):\n self.config = self.default_config()\n for key in config:\n if config[key].endswith('*'):\n config[key] = config[key][:-1]\n self.config[key] = re.split('\\s*->\\s*', config[key])", "def test_delete_hyperflex_vcenter_config_policy(self):\n pass", "def __init__(self, config: Dict[str, Any]) -> None:\n self.config = config", "def __init__(self, name=None):\n self.name = name or \"default\"\n config_path = os.path.join(get_config_directory(), self.name + JSON)\n try:\n with open(config_path, mode='r') as config_file:\n self.config_dict = json.load(config_file)\n except Exception as ex:\n raise ColinConfigException(\"Config file '{}' cannot be loaded.\".format(config_path))", "def main():\n print(\"Reading from config.json\")\n download_decrypt_store = DownloadDecryptStore()\n print(\"Downloading key from storage-bucket\")\n file_path = download_decrypt_store.download_key_from_blob()\n print(\"Decrypting downloaded file\")\n download_decrypt_store.decrypt_from_file(file_path)\n print(\"Completed\")", "def __init__(self, cfg: RAW_CFG, keyvals: Mapping[str, Any], *args, **kwargs) -> None:\n super().__init__(cfg, *args, **kwargs)\n\n for k in self.INDEX_KEYS:\n if k not in keyvals:\n raise ConfigException(f\"Key value {k} missing from keyvals: {keyvals!r}\")\n self.keyvals = keyvals", "def delete(self, ckey):\n obj = self\n keys = ckey.split('.')\n for key in keys:\n if key == keys[-1]:\n del obj[key]\n break\n if isinstance(obj, DotDict):\n obj = super(DotDict, obj).__getitem__(key)\n else:\n obj = obj.__getitem__(key)", "def delete_key_command(client: KeyVaultClient, args: dict[str, Any]) -> CommandResults:\n vault_name = args['vault_name']\n key_name = args['key_name']\n response = client.delete_key_request(vault_name, key_name)\n\n outputs = copy.deepcopy(response)\n outputs['deletedDate'] = convert_timestamp_to_readable_date(\n outputs['deletedDate'])\n outputs['scheduledPurgeDate'] = convert_timestamp_to_readable_date(\n outputs['scheduledPurgeDate'])\n\n readable_response = copy.deepcopy(outputs)\n readable_response['keyId'] = readable_response['key']['kid']\n\n outputs['attributes'] = convert_time_attributes_to_iso(outputs['attributes'])\n outputs[VAULT_NAME_CONTEXT_FIELD] = vault_name\n\n readable_output = tableToMarkdown(f'Delete {key_name}',\n readable_response,\n ['keyId', 'recoveryId', 'deletedDate',\n 'scheduledPurgeDate'],\n removeNull=True,\n headerTransform=pascalToSpace)\n command_results = CommandResults(\n outputs_prefix='AzureKeyVault.Key',\n outputs_key_field='recoveryId',\n outputs=outputs,\n raw_response=response,\n readable_output=readable_output,\n ignore_auto_extract=True\n )\n\n return command_results", "def __init__(self, file):\n self.__config = file\n with open(self.__config) as json_file:\n data = json.load(json_file)\n self.__data = data", "def __init__(self, ogc_config: OGCConfiguration, config_filename: Union[str, None],\n url_login: str, credentials: dict, catalog_name: str = CATALOG_FILENAME,\n token_prefix: Optional[str] = \"\", token_suffix: Optional[str] = \"\"):\n super().__init__(ogc_config, config_filename, catalog_name)\n\n self._publish_mutex = Lock()\n self._observation_cnt_mutex = Lock()\n\n self._sequences = []\n\n self._url_login = url_login\n self._credential = credentials\n self._token_prefix = token_prefix\n self._token_suffix = token_suffix\n self._cloud_token = \"\"\n self.update_cloud_token()\n\n if config_filename:\n config_file = util.load_from_file(config_filename)\n try:\n self._site_name = config_file[CLOUD_KEY][SITE_NAME_KEY]\n self._tenant_id = config_file[CLOUD_KEY][TENANT_ID_KEY]\n self._site_id = config_file[CLOUD_KEY][SITE_ID_KEY]\n except KeyError as ex:\n logging.critical('Missing parameter: \"'+str(ex)+'\" in configuration file.')\n sys.exit(ERROR_MISSING_PARAMETER)\n else:\n try:\n self._site_name = os.environ[SITE_NAME_KEY.upper()]\n self._tenant_id = os.environ[TENANT_ID_KEY.upper()]\n self._site_id = os.environ[SITE_ID_KEY.upper()]\n except KeyError as ex:\n logging.critical('Missing environmental variable: \"'+str(ex)+'\"')\n sys.exit(ERROR_MISSING_ENV_VARIABLE)", "def __init__(self, config: Union[str, Path, TextIOWrapper] = None):\n if not isinstance(config, TextIOWrapper):\n config = Path(config) if config else Path(self._DEFAULT_LOCATION)\n config = config.expanduser().absolute()\n with open(config, 'r') as fp:\n self._config = json.load(fp)\n else:\n self._config = json.load(config)\n self._store = self._config.get('credsStore', None)\n if self._store not in self._SUPPORTED_STORES:\n raise UnsupportedStore(f'Credential store \"{self._store}\" not supported')\n # TODO: Support the other methods besides secretservice when we can actually test with them\n self._cmd = ['docker-credential-secretservice', 'get']", "def __init__(self, config_obj, *args, **kwargs):\r\n self._config_obj = config_obj\r\n \r\n # Holds version info for the VersionId SOAP object.\r\n self._version_info = {'service_id': 'ship', 'major': '7', \r\n 'intermediate': '0', 'minor': '0'}\r\n self.DeletionControlType = None\r\n \"\"\"@ivar: Holds the DeletrionControlType WSDL object.\"\"\"\r\n self.TrackingId = None\r\n \"\"\"@ivar: Holds the TrackingId WSDL object.\"\"\"\r\n # Call the parent FedexBaseService class for basic setup work.\r\n super(FedexDeleteShipmentRequest, self).__init__(self._config_obj, \r\n 'ShipService_v7.wsdl',\r\n *args, **kwargs)", "def delete_key(self,\r\n dkey):\r\n\r\n\r\n if (input(queries.DELETE_CONF_BEG\r\n +dkey+queries.DELETE_CONF_END) in YESTERMS):\r\n\r\n if dkey in self.keys():\r\n\r\n for i_temp in self.get_all_indexes():\r\n if dkey in self.get_keys_from_note(i_temp):\r\n tempnote = self.get_note(i_temp).delete_keys({dkey})\r\n self.add_note(i_temp,note=tempnote)\r\n if self.get_keys_from_note(i_temp) == set():\r\n temp = self.get_keys_from_note(i_temp)\r\n temp.add(VOIDTERM)\r\n self.add_note(i_temp,\r\n keyset_only=temp)\r\n self.add_keys_tags(i_temp,\r\n {VOIDTERM})\r\n\r\n self.delete_keys_tags(i_temp, {dkey})", "def from_dict(cls, json_object):\r\n config = RobertaModelConfig(vocab_size_or_config_json_file=-1)\r\n for key, value in json_object.items():\r\n config.__dict__[key] = value\r\n return config", "def test_delete(self):\n mock = MagicMock(\n return_value={\"retcode\": 0, \"stderr\": \"error\", \"stdout\": \"salt\"}\n )\n with patch.dict(openstack_config.__salt__, {\"cmd.run_all\": mock}):\n self.assertEqual(\n openstack_config.delete(\"/etc/keystone/keys.conf\", \"sql\", \"connection\"),\n \"salt\",\n )\n\n mock = MagicMock(\n return_value={\"retcode\": 1, \"stderr\": \"error\", \"stdout\": \"salt\"}\n )\n with patch.dict(openstack_config.__salt__, {\"cmd.run_all\": mock}):\n self.assertRaises(\n CommandExecutionError,\n openstack_config.delete,\n \"/etc/key/keystone.conf\",\n \"sql\",\n \"connection\",\n )", "def test_delete_api_key(self):\n pass", "def __init__(self, filename):\n self._filename = filename\n fp = open(filename)\n self._contents = json.loads(fp.read())\n for key in self._contents.keys():\n #\n # Some .json keys begin with an @ sign, which represents ???.\n # The caller should not have to know which fields have @ signs\n # and which don't. For each key that begins with an @ sign,\n # create a secondary key consisting of the same string without\n # the @ sign, and having the same value.\n if re.search(\"^@\", key):\n secondaryKey = re.sub(\"^@\", \"\", key)\n self._contents[secondaryKey] = self._contents[key]\n self._dataFileName = re.sub(\".json\", \"\", self._filename)\n self._validate()", "def delete(cls, *keys):\n todelete = []\n namespace, kind, member = Schema.Get(cls)\n for key in keys:\n assert isinstance(key, str)\n todelete.append(Key(namespace, kind, key)) \n Lisa.delete(*todelete)", "def __init__(self):\n self._key = ''", "def from_dict(cls, dikt: dict) -> 'TokenConfig':\n return util.deserialize_model(dikt, cls)", "def __init__(self):\n self.keyingMethod=fileSize\n self.keyToFile=dict()", "def __init__(self, filename, unique_keys):\n self.filename = filename\n self.data = None\n self.unique_keys = sorted(list(set(unique_keys)))\n self._reload()", "def test_07_delete(self, mock_readall, mock_writeall, mock_shred,\n mock_config, mock_verks):\n self._init()\n udocker.Config = mock_config\n udocker.Config.tmpdir = \"/tmp\"\n mock_readall.return_value = self.credentials\n kstore = udocker.KeyStore(\"filename\")\n kstore.delete(self.url)\n mock_writeall.assert_called_once_with({})", "def initialize(self,check_params=True):\n\n if not os.path.exists(CONFIG_Correlation):\n with io.open(CONFIG_Correlation, 'w') as fh:\n json.dump(DEFAULT_Correlation, fh, sort_keys=True, indent=4, separators=(\",\", \": \"))\n return()\n\n # Load all options.\n with io.open(CONFIG_Correlation, 'r') as fh:\n data = json.load(fh)\n \n for key, value in data.items():\n setattr(self, key, value)\n\t\n if check_params:\n self.check_params()", "def __init__(self, config_path: str = \"config.json\"):\n # Change here if you want to relocate you config file\n self.config = {}\n self.load_configuration(config_path)\n self.app_name = self.config.get('app_name', self.APP_NAME)", "def __init__(self, key=None):\n self._current_dcc = key\n self._naming_spec = dict()\n self._default_config = config_defaults.DEFAULT_CONFIGURATION\n\n if type(self)._CURRENT_CONFIG == OrderedDict():\n self._set_CURRENT_CONFIG__(config_defaults.DEFAULT_CONFIGURATION)\n self._set_CURRENT_CONFIG__(self._load_file_config())\n self._set_CURRENT_CONFIG__(self._get_env_config(), clean=True)\n\n self.SaveConfig()", "def configure_deletes(conf):\n print()\n if conf.get('purge', None) is None:\n conf['purge'] = yes_no(\n 'Would you like the sync to be able to delete files between devices?', default=False)\n if conf['purge'] and conf.get('purge_limit') is None:\n conf['purge_limit'] = numeric_response(\n 'How long, in days, should deleted items still be monitored before being forgotten?', default=7)\n if conf['purge'] and conf.get('backup', None) is None:\n conf['backup'] = yes_no(\n 'Would you like to backup deleted files?', default=False)\n if conf['backup'] and conf.get('backup_path', None) is None:\n prompt = 'Provide a path for the backups'\n conf['backup_path'] = simple_response(prompt, default='DEFAULT')\n if conf['backup'] and conf.get('backup_limit', None) is None:\n prompt = 'How long, in days, would you like to keep backed up files? (-1 to never delete)'\n conf['backup_limit'] = numeric_response(prompt, default=7)\n return conf", "def pre_config_root_delete(self, resource_id):\n pass", "def test_delete_hyperflex_ucsm_config_policy(self):\n pass", "def __init__(self, config):\n self._config = config\n self.reload()", "def post_config_node_delete(self, resource_id, resource_dict):\n pass", "async def delete(self, key: str):", "def from_dict(cls, dikt) -> 'TokenEKey':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, json_object):\n config = BertConfig(vocab_size_or_config_json_file=-1)\n for key, value in json_object.items():\n config.__dict__[key] = value\n return config", "def from_dict(cls, json_object):\n config = BertConfig(vocab_size_or_config_json_file=-1)\n for key, value in json_object.items():\n config.__dict__[key] = value\n return config", "def from_dict(cls, json_object):\n config = BertConfig(vocab_size_or_config_json_file=-1)\n for key, value in json_object.items():\n config.__dict__[key] = value\n return config", "def __init__(self):\n self.charm_config = hookenv.config()\n self.kv = unitdata.kv()\n if not self.synapse_signing_key_file:\n self.synapse_signing_key_file = \"{}/{}.signing.key\".format(\n self.synapse_conf_dir, self.get_server_name()\n )", "def delete_account_key(configuration):\n os.remove(configuration.cm_key)", "def __init__(self, fileconf=None, client_token=None,\n cluster_url=None, cluster_unsafe=False, cluster_timeout=None,\n storage_url=None, storage_unsafe=False,\n retry_count=5, retry_wait=1.0,\n cluster_custom_certificate=None, storage_custom_certificate=None,\n sanitize_bucket_paths=True, show_bucket_warnings=True):\n self._version = \"qarnot-sdk-python/\" + __version__\n self._http = requests.session()\n self._retry_count = retry_count\n self._retry_wait = retry_wait\n self._sanitize_bucket_paths = sanitize_bucket_paths\n self._show_bucket_warnings = show_bucket_warnings\n if fileconf is not None:\n self.storage = None\n if isinstance(fileconf, dict):\n warnings.warn(\"Dict config should be replaced by constructor explicit arguments.\")\n self.cluster = None\n if fileconf.get('cluster_url'):\n self.cluster = fileconf.get('cluster_url')\n if fileconf.get('storage_url'):\n self.storage = fileconf.get('storage_url')\n auth = fileconf.get('client_auth')\n self.timeout: int = int(fileconf.get('cluster_timeout'))\n if fileconf.get('cluster_unsafe'):\n self._http.verify = False\n elif fileconf.get('cluster_custom_certificate'):\n self._http.verify = fileconf.get('cluster_custom_certificate')\n else:\n cfg = config.ConfigParser()\n with open(fileconf) as cfg_file:\n cfg.read_string(cfg_file.read())\n\n self.cluster = None\n if cfg.has_option('cluster', 'url'):\n self.cluster = cfg.get('cluster', 'url')\n if cfg.has_option('storage', 'url'):\n self.storage = cfg.get('storage', 'url')\n if cfg.has_option('client', 'token'):\n auth = cfg.get('client', 'token')\n elif cfg.has_option('client', 'auth'):\n warnings.warn('auth is deprecated, use token instead.')\n auth = cfg.get('client', 'auth')\n else:\n auth = None\n self.timeout = None\n if cfg.has_option('cluster', 'timeout'):\n self.timeout = cfg.getint('cluster', 'timeout')\n if cfg.has_option('cluster', 'unsafe') \\\n and cfg.getboolean('cluster', 'unsafe'):\n self._http.verify = False\n elif cfg.has_option('cluster', 'custom_certificate'):\n self._http.verify = cfg.get('cluster', 'custom_certificate')\n if cfg.has_option('storage', 'unsafe') \\\n and cfg.getboolean('storage', 'unsafe'):\n storage_unsafe = True\n if cfg.has_option('storage', 'custom_certificate'):\n storage_custom_certificate = cfg.get('storage', 'custom_certificate')\n else:\n self.cluster = cluster_url\n self.timeout = cluster_timeout\n self._http.verify = not cluster_unsafe\n if not cluster_unsafe and cluster_custom_certificate:\n self._http.verify = cluster_custom_certificate\n self.storage = storage_url\n auth = client_token\n\n if not self._http.verify:\n urllib3.disable_warnings()\n\n if self.cluster is None:\n self.cluster = os.getenv(\"QARNOT_CLUSTER_URL\")\n\n if self.storage is None:\n self.storage = os.getenv(\"QARNOT_STORAGE_URL\")\n\n if auth is None:\n auth = os.getenv(\"QARNOT_CLIENT_TOKEN\")\n\n if os.getenv(\"QARNOT_CLUSTER_UNSAFE\") is not None:\n self._http.verify = not os.getenv(\"QARNOT_CLUSTER_UNSAFE\") in [\"true\", \"True\", \"1\"]\n\n if os.getenv(\"QARNOT_CLUSTER_TIMEOUT\") is not None:\n self.timeout = int(os.getenv(\"QARNOT_CLUSTER_TIMEOUT\"))\n\n if auth is None:\n raise QarnotGenericException(\"Token is mandatory.\")\n self._http.headers.update({\"Authorization\": auth})\n\n self._http.headers.update({\"User-Agent\": self._version})\n\n if self.cluster is None:\n self.cluster = \"https://api.qarnot.com\"\n\n api_settings = self._get(get_url(\"settings\")).json()\n\n if self.storage is None:\n self.storage = api_settings.get(\"storage\", \"https://storage.qarnot.com\")\n\n if self.storage is None: # api_settings[\"storage\"] is None\n self._s3client = None\n self._s3resource = None\n return\n\n user = self.user_info\n session = boto3.session.Session()\n conf = botocore.config.Config(user_agent=self._version)\n\n should_verify_or_certificate_path = True\n if storage_unsafe:\n should_verify_or_certificate_path = not storage_unsafe\n elif storage_custom_certificate is not None:\n should_verify_or_certificate_path = storage_custom_certificate\n\n self._s3client = session.client(service_name='s3',\n aws_access_key_id=user.email,\n aws_secret_access_key=auth,\n verify=should_verify_or_certificate_path,\n endpoint_url=self.storage,\n config=conf)\n self._s3resource = session.resource(service_name='s3',\n aws_access_key_id=user.email,\n aws_secret_access_key=auth,\n verify=should_verify_or_certificate_path,\n endpoint_url=self.storage,\n config=conf)", "def test_delete_namespaced_build_config(self):\n pass", "def __init__(self, base='', *path_parts):\n self._config = {}\n self.path = join(base, *path_parts)\n\n if not isfile(self.path):\n raise ImproperlyConfigured('Not a file')\n\n with open(self.path, 'r') as secret_file:\n content = secret_file.read()\n\n for line in content.splitlines():\n if line and not line.startswith('#'):\n line_parts = line.split('=', 1)\n self._config[line_parts[0]] = line_parts[1]", "def delete(self, resource, keys, url_prefix, auth, session, send_opts):\n success = True\n exc = HTTPErrorList('At least one key-value update failed.')\n\n for key in keys:\n req = self.get_metadata_request(\n resource, 'DELETE', 'application/json', url_prefix, auth, key)\n prep = session.prepare_request(req)\n resp = session.send(prep, **send_opts)\n if resp.status_code == 204:\n continue\n err = (\n 'Delete failed for {}: {}, got HTTP response: ({}) - {}'\n .format(resource.name, key, resp.status_code, resp.text))\n exc.http_errors.append(HTTPError(err, request=req, response=resp))\n success = False\n\n if not success:\n raise exc", "def __init__(self, conf_file_location: str, template_dir: str, target_dir: str, hard_reset: bool):\n self.config: Config = yaml_loader.load(conf_file_location, Config)\n self.massage_config_file()\n self.config_dict: Dict = as_dict(self.config)\n self.template_dir = template_dir\n self.target_dir = target_dir\n self.hard_reset = hard_reset", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n force_delete = dictionary.get('forceDelete')\n id = dictionary.get('id')\n include_marked_for_removal = dictionary.get('includeMarkedForRemoval')\n retry = dictionary.get('retry')\n\n # Return an object of this model\n return cls(\n force_delete,\n id,\n include_marked_for_removal,\n retry\n)", "def from_dict(cls, dikt) -> 'CancelClaimRequest':\n return util.deserialize_model(dikt, cls)", "def test_delete_api_key_from_org(self):\n pass", "def __init__(self, config, json_files, generate_dict=False,\n mode=ModeType.EVAL):\n self.config = config\n self.logger = Logger(config)\n self._init_dict()\n self.sample_index = []\n self.sample_size = 0\n self.mode = mode\n\n self.files = json_files\n for i, json_file in enumerate(json_files):\n with open(json_file) as fin:\n self.sample_index.append([i, 0])\n while True:\n json_str = fin.readline()\n if not json_str:\n self.sample_index.pop()\n break\n self.sample_size += 1\n self.sample_index.append([i, fin.tell()])\n\n def _insert_vocab(files, _mode=InsertVocabMode.ALL):\n for _i, _json_file in enumerate(files):\n with open(_json_file) as _fin:\n for _json_str in _fin:\n try:\n self._insert_vocab(json.loads(_json_str), mode)\n except:\n print(_json_str)\n\n # Dict can be generated using:\n # json files or/and pretrained embedding\n if generate_dict:\n # Use train json files to generate dict\n # If generate_dict_using_json_files is true, then all vocab in train\n # will be used, else only part vocab will be used. e.g. label\n vocab_json_files = config.data.train_json_files\n mode = InsertVocabMode.LABEL\n if self.config.data.generate_dict_using_json_files:\n mode = InsertVocabMode.ALL\n self.logger.info(\"Use dataset to generate dict.\")\n _insert_vocab(vocab_json_files, mode)\n\n if self.config.data.generate_dict_using_all_json_files:\n vocab_json_files += self.config.data.validate_json_files + \\\n self.config.data.test_json_files\n _insert_vocab(vocab_json_files, InsertVocabMode.OTHER)\n\n if self.config.data.generate_dict_using_pretrained_embedding:\n self.logger.info(\"Use pretrained embedding to generate dict.\")\n self._load_pretrained_dict()\n self._print_dict_info()\n\n self._shrink_dict()\n self.logger.info(\"Shrink dict over.\")\n self._print_dict_info(True)\n self._save_dict()\n self._clear_dict()\n self._load_dict()", "def __init__(self, mods, key):\n self.mods = mods\n self.key = key", "def from_dict(cls, dikt) -> 'StartConfiguration':\n return util.deserialize_model(dikt, cls)", "def __init__(\n self,\n client: k8s.ApiClient,\n namespace: str,\n resource: dict,\n source_file: str,\n ) -> None:\n\n self.client = client\n self.namespace = namespace\n self.resource = resource\n self.source_file = source_file", "def pre_config_node_delete(self, resource_id):\n pass", "def revoke_from_key(self, authkey):\n certs = []\n try:\n clean_pem = OpenSSL.crypto.dump_privatekey(\n OpenSSL.crypto.FILETYPE_PEM, OpenSSL.crypto.load_privatekey(\n OpenSSL.crypto.FILETYPE_PEM, authkey.pem))\n except OpenSSL.crypto.Error as error:\n logger.debug(error, exc_info=True)\n raise errors.RevokerError(\n \"Invalid key file specified to revoke_from_key\")\n\n with open(self.list_path, \"rb\") as csvfile:\n csvreader = csv.reader(csvfile)\n for row in csvreader:\n # idx, cert, key\n # Add all keys that match to marked list\n # Note: The key can be different than the pub key found in the\n # certificate.\n _, b_k = self._row_to_backup(row)\n try:\n test_pem = OpenSSL.crypto.dump_privatekey(\n OpenSSL.crypto.FILETYPE_PEM, OpenSSL.crypto.load_privatekey(\n OpenSSL.crypto.FILETYPE_PEM, open(b_k).read()))\n except OpenSSL.crypto.Error as error:\n logger.debug(error, exc_info=True)\n # This should never happen given the assumptions of the\n # module. If it does, it is probably best to delete the\n # the offending key/cert. For now... just raise an exception\n raise errors.RevokerError(\"%s - backup file is corrupted.\")\n\n if clean_pem == test_pem:\n certs.append(\n Cert.fromrow(row, self.config.cert_key_backup))\n if certs:\n self._safe_revoke(certs)\n else:\n logger.info(\"No certificates using the authorized key were found.\")", "def __init__(__self__,\n resource_name: str,\n args: ObjectStorageKeyArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(self, keydata):\n if isinstance(keydata, basestring):\n keydata = json.loads(keydata)\n assert isinstance(keydata, dict), keydata\n self.dict = keydata", "def __init__(\n self, \n config: baasdatagw_models.Config,\n ):\n if UtilClient.is_unset(config):\n raise TeaException({\n 'code': 'ParameterMissing',\n 'message': \"'config' can not be unset\"\n })\n self._access_key_id = config.access_key_id\n self._access_key_secret = config.access_key_secret\n self._security_token = config.security_token\n self._endpoint = config.endpoint\n self._protocol = config.protocol\n self._user_agent = config.user_agent\n self._read_timeout = UtilClient.default_number(config.read_timeout, 20000)\n self._connect_timeout = UtilClient.default_number(config.connect_timeout, 20000)\n self._http_proxy = config.http_proxy\n self._https_proxy = config.https_proxy\n self._no_proxy = config.no_proxy\n self._socks_5proxy = config.socks_5proxy\n self._socks_5net_work = config.socks_5net_work\n self._max_idle_conns = UtilClient.default_number(config.max_idle_conns, 60000)\n self._max_idle_time_millis = UtilClient.default_number(config.max_idle_time_millis, 5)\n self._keep_alive_duration_millis = UtilClient.default_number(config.keep_alive_duration_millis, 5000)\n self._max_requests = UtilClient.default_number(config.max_requests, 100)\n self._max_requests_per_host = UtilClient.default_number(config.max_requests_per_host, 100)", "def FromDict(resource, raw_config):\r\n resource.name = raw_config[\"name\"]\r\n resource.raw_config = raw_config", "def __init__(self, config, opsdroid=None):\n super().__init__(config, opsdroid=opsdroid)\n self.config = config\n self.client = None\n self.host = self.config.get(\"host\", \"localhost\")\n self.port = self.config.get(\"port\", 6379)\n self.database = self.config.get(\"database\", 0)\n self.password = self.config.get(\"password\", None)\n self.reconnect = self.config.get(\"reconnect\", False)", "def __init__(self, basekey=\"\"):\n self.basekey = basekey", "def run(self):\n keys = self.admin_barbican.create_key()\n self.admin_barbican.orders_delete(keys.order_ref)", "def __initConfiguration(self):\n conf = configparser.ConfigParser()\n with open(self.configFile, \"r\") as f:\n conf.readfp(f)\n self.orgConf = conf\n # check additionalSection\n adSection = self.additionalSection\n if adSection in conf:\n adSection = conf[adSection]\n self.conf = {}\n for i in [self.CLIENT_ID, self.CLIENT_SECRET, self.AUTHZ_ENDPOINT,\n self.TOKEN_ENDPOINT, self.REDIRECT_URI, self.SCOPE]:\n if adSection != None and i in adSection:\n self.conf[i] = adSection[i]\n else:\n self.conf[i] = conf[\"DEFAULT\"][i]", "def __init__(self, storage_config: Dict[str, Any], storage_paths: List[str], local_dir: str):\n _ = storage_config\n self.local_dir = local_dir\n self.storage_paths = storage_paths\n self._file_records = {} # type: Dict[str, datetime.datetime]", "def __init__(self):\n self.config = {}", "def keys_from_config(cls, config, keys, filename):\n args = {}\n # Extract required arguments from configuration\n for arg in keys:\n if arg not in config:\n raise ValueError(\n f\"The key {arg!r} is not in the specified configuration\"\n f\" file {filename}\"\n )\n args[arg] = config[arg]\n\n return args", "def __init__(self, os_creds, keypair_settings):\n super(self.__class__, self).__init__(os_creds)\n\n self.keypair_settings = keypair_settings\n self.__delete_keys_on_clean = True\n\n # Attributes instantiated on create()\n self.__keypair = None" ]
[ "0.5615627", "0.53399825", "0.53382677", "0.5235249", "0.51820284", "0.5108353", "0.5022169", "0.5007471", "0.49882397", "0.49877107", "0.49747235", "0.49552137", "0.49455333", "0.49418396", "0.49205673", "0.49144816", "0.48787165", "0.48667872", "0.48570824", "0.48304826", "0.48157325", "0.4771355", "0.47699887", "0.47655568", "0.47552896", "0.47410834", "0.47330502", "0.4721344", "0.4715724", "0.47137156", "0.46981567", "0.46866468", "0.46795112", "0.46785203", "0.46759537", "0.46579877", "0.4653045", "0.4652375", "0.46521115", "0.46442083", "0.464122", "0.4637768", "0.46359414", "0.46340597", "0.46299973", "0.46256366", "0.46171463", "0.46166012", "0.46147805", "0.46068263", "0.45941773", "0.45852044", "0.45845357", "0.45831376", "0.45826042", "0.4576383", "0.45721236", "0.45710886", "0.4569504", "0.4567405", "0.4566749", "0.45646015", "0.45569468", "0.45546064", "0.45505744", "0.45482633", "0.4544378", "0.4537191", "0.4535223", "0.45310566", "0.45310566", "0.45310566", "0.4528885", "0.45237568", "0.45232004", "0.45215416", "0.4515507", "0.45114717", "0.45036906", "0.44999698", "0.4499129", "0.4496652", "0.44957498", "0.44940016", "0.44881698", "0.44824848", "0.4475953", "0.44733238", "0.4472038", "0.4468367", "0.44643047", "0.4462249", "0.44622007", "0.44594797", "0.44574517", "0.44549623", "0.4449773", "0.44482198", "0.44477734", "0.44447422" ]
0.52679664
3
Decrypts input ciphertext using a symmetric CryptoKey.
def decrypt_symmetric(self, ciphertext): from google.cloud import kms_v1 # Creates an API client for the KMS API. client = kms_v1.KeyManagementServiceClient() # The resource name of the CryptoKey. name = client.crypto_key_path_path(self.project_id, self.location_id, self.key_ring_id, self.crypto_key_id) # Use the KMS API to decrypt the data. response = client.decrypt(name, ciphertext) return response.plaintext
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decrypt_symmetric(secret_key, ciphertext, ttl=None):\n f = Fernet(secret_key)\n # fernet requires the ciphertext to be bytes, it will raise an exception\n # if it is a string\n return f.decrypt(bytes(ciphertext), ttl)", "def decrypt(priv_key, ciphertext):\n pk_encrypted_secret_key = ciphertext['pk_encrypted_secret_key']\n sym_encrypted_data = ciphertext['sym_encrypted_data']\n # TODO: secure delete\n secret_key = decrypt_pk(priv_key, pk_encrypted_secret_key)\n encoded_string = decrypt_symmetric(secret_key, sym_encrypted_data)\n return decode_data(encoded_string)", "def decrypt(self, ciphertext, key):\n iv = ciphertext[:AES.block_size]\n cipher = AES.new(key, AES.MODE_CBC, iv, segment_size=64)\n plaintext = cipher.decrypt(ciphertext[AES.block_size:])\n return self.pkcs7_unpad(plaintext)", "def decrypt(key, ciphertext):\n data = fk(keyGen(key)[1], ip(ciphertext))\n return fp(fk(keyGen(key)[0], swapNibbles(data)))", "def decrypt(self, ciphertext: bytes,\n padding: AsymmetricPadding) -> bytes:\n pass", "def decrypt(private_key, ciphertext):\n if len(ciphertext) < 512 + 16:\n return None\n msg_header = ciphertext[:512]\n msg_iv = ciphertext[512:512+16]\n msg_body = ciphertext[512+16:]\n try:\n symmetric_key = PKCS1_OAEP.new(private_key).decrypt(msg_header)\n except ValueError:\n return None\n if len(symmetric_key) != 32:\n return None\n return AES.new(symmetric_key,\n mode=AES.MODE_CFB,\n IV=msg_iv).decrypt(msg_body)", "def decrypt(ciphertext: str, key: str) -> str:\n return encrypt(ciphertext, key)", "def decrypt(ciphertext, key):\n\ttry:\n\t\tfrom Cryptodome.Cipher import AES\n\texcept ImportError:\n\t\tfrom Crypto.Cipher import AES\n\n\tif not isPython2():\n\t\tif isString(ciphertext):\n\t\t\tciphertext = ciphertext.encode(\"latin-1\")\n\t\tif isString(key):\n\t\t\tkey = key.encode(\"latin-1\")\n\t\t\n\tiv = ciphertext[:AES.block_size]\n\tcipher = AES.new(key, AES.MODE_CBC, iv)\n\tplaintext = cipher.decrypt(ciphertext[AES.block_size:])\n\treturn plaintext", "def sym_dec(self, ciph, passphrase):\n (rfd, wfd) = xpipe()\n os.write(wfd, passphrase + '\\n')\n plain = xsystem([self.sslname, self.symmetric, '-d', '-pass',\n 'fd:' + str(rfd)], ciph)\n xclose(wfd)\n xclose(rfd)\n if not plain:\n warning('keymanagement: Unable to decrypt because %s does not exist\\n' %(self.sslname))\n return None\n\n return plain", "def decrypt(ciphertext):\n # AES decrypt\n iv = ciphertext[:16]\n ciphertext = ciphertext[16:]\n aes = AES.new(aes_key, AES.MODE_CBC, iv)\n return unpad(aes.decrypt(ciphertext))", "def decrypt_block(self, ciphertext):\n assert len(ciphertext) == 16\n\n cipher_state = bytes2matrix(ciphertext)\n\n add_round_key(cipher_state, self._key_matrices[-1])\n inv_shift_rows(cipher_state)\n inv_sub_bytes(cipher_state)\n\n for i in range(self.n_rounds - 1, 0, -1):\n add_round_key(cipher_state, self._key_matrices[i])\n inv_mix_columns(cipher_state)\n inv_shift_rows(cipher_state)\n inv_sub_bytes(cipher_state)\n \n add_round_key(cipher_state, self._key_matrices[0])\n\n return matrix2bytes(cipher_state)", "def decrypt_cbc(key, ciphertext):\n\tmessage = ''\n\tfor i in range(0, len(ciphertext)/16 - 1):\n\t\tiv = ciphertext[i*16:(i+1)*16]\n\t\tinputblock = ciphertext[(i+1)*16:(i+2)*16]\n\t\tcipher = AES.new(key, AES.MODE_CBC, iv)\n\t\tmessage +=cipher.decrypt(inputblock)\n\tif ord(message[-1]) <=16:\n\t\tmessage = message[:-ord(message[-1])]\n\treturn message", "def decrypt(cypher, priv_key):\n\n if not isinstance(priv_key, key.PrivateKey):\n raise TypeError(\"You must use the private key with decrypt\")\n\n return gluechops(cypher, priv_key.d, priv_key.n, decrypt_int)", "def decryptEncryptionKey(cipherString, key):\n\tencryptionType, iv, cipherText, mac = decodeCipherString(cipherString)\n\t# log.debug(\"mac:%s\", mac)\n\t# log.debug(\"iv:%s\", iv)\n\t# log.debug(\"ct:%s\", cipherText)\n\tassert mac is None\n\tif encryptionType != 0:\n\t\traise UnimplementedError(\"can not decrypt type:%s\" % encryptionType)\n\tcipher = cryptography.hazmat.primitives.ciphers.Cipher(\n\t algorithms.AES(key), modes.CBC(iv), backend=default_backend())\n\tdecryptor = cipher.decryptor()\n\tplainText = decryptor.update(cipherText) + decryptor.finalize()\n\t# log.debug(\"mackey before unpad:%s\", plainText[32:])\n\treturn plainText[:32], plainText[32:64]", "def _decrypt(data):\n cipher = AES.new(bytes(_AES_KEY), AES.MODE_CBC, bytes(_AES_IV))\n return cipher.decrypt(data)", "def decrypt(self, cypher):\n\n if self.crypt_private == \"\":\n raise ValueError(\"Error decrypting: No private encryption key found for {}\".format(self))\n\n key_private = RsaPrivateKey.Read(self.crypt_private)\n return key_private.Decrypt(cypher)", "def decrypt_ctr(key, ciphertext):\n\tmessage = ''\n\tiv = ciphertext[0:16]\n\tfor i in range(16, len(ciphertext), 16):\n\t\tinputblock = ciphertext[i:i+16]\n\t\tcipher = AES.new(key, AES.MODE_ECB)\n\t\txorkey = cipher.encrypt(long_to_bytes(bytes_to_long(iv)+(i/16-1)))\n\t\tif len(inputblock) == 16:\n\t\t\tmessage += strxor(inputblock, xorkey)\n\t\telse:\n\t\t\tmessage += strxor(inputblock, xorkey[:len(inputblock)])\n\treturn message", "def decrypt(self, ciphertext):\n return self._transform(ciphertext, self._backward)", "def aes_decrypt(encrypted_data, key):\r\n cipher = aes_cipher_from_key(key)\r\n padded_data = cipher.decrypt(encrypted_data)\r\n return unpad(padded_data)", "def decrypt(ciphertext, key, iv):\n cipher = AES.new(key, AES.MODE_CFB, iv)\n msg = cipher.decrypt(ciphertext)\n return msg", "def asym_dec(self, ciph, keyfile):\n ciph = ciph.split('\\0')\n ciphkey_len = int(ciph[0])\n ciph = '\\0'.join(ciph[1:])\n ciphkey = ciph[:ciphkey_len]\n ciph = ciph[ciphkey_len:]\n\n passphrase = xsystem([self.sslname, 'rsautl', '-decrypt', '-inkey',\n keyfile], ciphkey)\n if not passphrase:\n warning('keymanagement: Unable to perform asymmetric decryption\\n')\n return None\n\n return self.sym_dec(ciph, passphrase)", "def Decrypt(self, input_bytes):\n data_bytes = input_bytes[keyczar.HEADER_SIZE:] # remove header\n if len(data_bytes) < self.block_size + util.HLEN: # IV + sig\n raise errors.ShortCiphertextError(len(data_bytes))\n\n iv_bytes = data_bytes[:self.block_size] # first block of bytes is the IV\n ciph_bytes = data_bytes[self.block_size:-util.HLEN]\n sig_bytes = data_bytes[-util.HLEN:] # last 20 bytes are sig\n if not self.hmac_key.Verify(input_bytes[:-util.HLEN], sig_bytes):\n raise errors.InvalidSignatureError()\n\n plain = AES.new(self.key_bytes, AES.MODE_CBC, iv_bytes).decrypt(ciph_bytes)\n return self.__UnPad(plain)", "def decrypt(data, key, iv):\n decryptor = AES.new(key, AES.MODE_CBC, iv=iv)\n return decryptor.decrypt(data)", "def rsa_decrypt(cypher, privatekey):\r\n \r\n # A key object is created to interact with the PyCrypto\r\n # encryption suite. The object contains key data and\r\n # the necessary rsa functions.\r\n temp_key_obj = _rsa_keydict_to_keyobj(privatekey = privatekey) \r\n \r\n return _rsa_gluechops(cypher, temp_key_obj, temp_key_obj.decrypt)", "def decrypt(\r\n key: bytes,\r\n cipher_text: bytes,\r\n) -> str:\r\n block_size = 16\r\n iv = cipher_text[:block_size]\r\n cipher = AES.new(key, AES.MODE_CBC, iv)\r\n plain_text = cipher.decrypt(cipher_text[block_size:]).decode('utf-8')\r\n return _unpad(plain_text)", "def decrypt(algorithm, key, encrypted_data, associated_data):\n decryptor = Decryptor(algorithm, key, associated_data, encrypted_data.iv, encrypted_data.tag)\n return decryptor.update(encrypted_data.ciphertext) + decryptor.finalize()", "def decrypt():\n plaintext = \"\"\n i = 0\n while i < len(ciphertext):\n if i%2==1:\n try:\n plaintext += key[ ciphertext[i-1]+ciphertext[i] ]\n except KeyError:\n plaintext += ciphertext[i-1]+ciphertext[i]\n i += 1\n return plaintext", "def decrypt(text,key):\r\n aes = pyaes.AESModeOfOperationCTR(key)\r\n decrypted = aes.decrypt(text)\r\n return decrypted", "def decrypt(self, cypher):\n\n cypher = b64decode(cypher)\n key_private = RsaPrivateKey.Read(self.crypt_private)\n return key_private.Decrypt(cypher)", "def decrypt_key(data, key):\n data = MegaCrypto.base64_decode(data)\n return sum((MegaCrypto.str_to_a32(MegaCrypto.cbc_decrypt(data[_i:_i + 16], key))\n for _i in range(0, len(data), 16)), ())", "def decrypt(self, ciphertext: str) -> str:\n\n return self.run(ciphertext, Cryptography.DECRYPT)", "def decrypt(self):\n # Grab the initialization vector from the front of the cipher-text\n iv = self.ciphertext[:AES.block_size]\n # Create a new AES object in Cipher Block Chaining mode\n cipher = AES.new(self.key, AES.MODE_CBC, iv)\n return cipher.decrypt(self.ciphertext)[AES.block_size:].rstrip().decode(\"utf-8\"), iv", "def AES_decrypt(ciphertext: bytes) -> Text:\n text = b64decode(ciphertext)\n cipher = AES.new(secret_key, mode, IV)\n return Padding.unpad(cipher.decrypt(text), bs).decode('utf-8')", "def decrypt_aes256(data, key, iv):\n decryptor = AES.new(key, AES.MODE_CBC, iv)\n return decryptor.decrypt(data)", "def decrypt_data(data, encryption_key):\n assert isinstance(data, str)\n obj = AES.new(encryption_key, AES.MODE_CBC, 'This is an IV456')\n bytes_data = bytes.fromhex(data)\n return Pad.unpad(obj.decrypt(bytes_data)).decode()", "def decrypt(self, message):\n #check validity of _private_key\n if self._private_key is None:\n raise Exception(\"invalid private key\")\n\n output = \"\"\n\n d = self._private_key[0]\n n = self._private_key[1]\n\n for i in xrange(len(ciphertext)):\n m = pow(ciphertext[i], d, n)\n output += int_to_string(m)\n return output", "def _decrypt(self, data, key):\n seed1 = key\n seed2 = 0xEEEEEEEE\n result = BytesIO()\n\n for i in range(len(data) // 4):\n seed2 += self.encryption_table[0x400 + (seed1 & 0xFF)]\n seed2 &= 0xFFFFFFFF\n value = struct.unpack(\"<I\", data[i*4:i*4+4])[0]\n value = (value ^ (seed1 + seed2)) & 0xFFFFFFFF\n\n seed1 = ((~seed1 << 0x15) + 0x11111111) | (seed1 >> 0x0B)\n seed1 &= 0xFFFFFFFF\n seed2 = value + seed2 + (seed2 << 5) + 3 & 0xFFFFFFFF\n\n result.write(struct.pack(\"<I\", value))\n\n return result.getvalue()", "def decrypt_data ( aes_key, data ) :\n decoded_data = decode_data( data )\n salt = decoded_data[ 0 : Crypto.Cipher.AES.block_size ]\n encrypted_data = decoded_data[ Crypto.Cipher.AES.block_size : ]\n cipher = Crypto.Cipher.AES.new( aes_key, Crypto.Cipher.AES.MODE_CFB, salt )\n decrypted_data = cipher.decrypt( encrypted_data )\n\n return decrypted_data", "def decrypt(key, cipher, use_custom=False):\n result = logic(key, cipher, use_custom)\n return array.array(\"B\", result)", "def cbc_decrypt(encrypted, key, iv):\n aes = AES.new(key, AES.MODE_CBC, iv)\n return strip_padding(aes.decrypt(base64.b64decode(encrypted)).decode())", "def decrypt_data_key(self, dataKeyCypher, token, userGroup):\n masterKey = self.retrieve_master_key(token=token, userGroup=userGroup)\n box = secret.SecretBox(masterKey)\n if isinstance(dataKeyCypher, str):\n dataKeyCypher = dataKeyCypher.encode('cp855')\n try:\n plainText = box.decrypt(dataKeyCypher).decode('utf-8')\n except Exception:\n raise UnableToDecryptException(\"Unable to verify cyphertext/key pair\")\n return plainText", "def xor_decrypt(ciphertext, key):\n\n\tdecrypted_char = ''\n\tdecrypted_str = ''\n\n\tfor char in ciphertext:\n\t\tdecrypted_char = chr(char ^ key)\n\t\tdecrypted_str += decrypted_char\n\n\treturn decrypted_str", "def decrypt(ciphertext: str) -> Iterable:\n return simplesubstitution.decrypt(KEY, ciphertext)", "def Decrypt(self, input_bytes):\n ciph_bytes = input_bytes[keyczar.HEADER_SIZE:]\n decrypted = self.key.decrypt(ciph_bytes)\n return self.__Decode(decrypted)", "def decrypt(cipherString, key, macKey, decode=True):\n\tencryptionType, iv, ct, mac = decodeCipherString(cipherString)\n\tif encryptionType != 2:\n\t\traise UnimplementedError(\"can not decrypt {} decryption method\".format(\n\t\t cipherString[0]))\n\tcmac = hmac.new(macKey, iv + ct, 'sha256').digest()\n\tif not macsEqual(mac, cmac):\n\t\tlog.debug(\"macsEqual error:%s:%s\", mac, cmac)\n\t\traise IOError(\"Invalid mac on decrypt\")\n\tcipher = cryptography.hazmat.primitives.ciphers.Cipher(\n\t algorithms.AES(key), modes.CBC(iv), backend=default_backend())\n\tdecryptor = cipher.decryptor()\n\tplainText = decryptor.update(ct) + decryptor.finalize()\n\tunpad = padding.PKCS7(128).unpadder()\n\tplainText = unpad.update(plainText) + unpad.finalize()\n\tif decode:\n\t\treturn plainText.decode('utf-8')\n\treturn plainText", "def decipher2(s, key): # s = message\n return decipher_raw2(s, key).rstrip(bytes('\\x00'.encode('utf-8')))", "def decrypt_data(self, encrypted_data):\n raise NotImplementedError", "def decrypt(ciphertext, key, iv, tag, associated_data=''):\n\n decryptor = Cipher(\n algorithms.AES(key), modes.GCM(iv, tag),\n backend=default_backend()).decryptor()\n\n decryptor.authenticate_additional_data(associated_data)\n\n return decryptor.update(ciphertext) + decryptor.finalize()", "def _decrypt(self, b, strip_padding=True):\n from cryptography.hazmat.primitives.ciphers \\\n import Cipher, algorithms, modes\n from cryptography.hazmat.backends import default_backend\n\n backend = default_backend()\n cypher = Cipher(\n algorithms.AES(self.__key), modes.CBC(self.__iv), backend=backend)\n decryptor = cypher.decryptor()\n result = decryptor.update(b) + decryptor.finalize()\n if strip_padding:\n result = result[:-result[-1]]\n return result", "def decrypt(self, key_file, input_file, output_file=None):\n data = self.__input_encrypted(input_file)\n iv = data[:AES.block_size]\n key = self.import_key(key_file)\n cipher = AES.new(key, AES.MODE_CBC, iv)\n\n data = self.__unpad(cipher.decrypt(data[AES.block_size:]))\n if output_file != None:\n with open(output_file, \"w\") as f:\n f.write(data)\n return data", "def decrypt_epic(aes_key, encrypted_data):\n # Decode encrypted string\n decoded = base64.b64decode(encrypted_data)\n\n # Decrypt decoded string\n decoded_readable = CryptDecrypt(aes_key, decoded).decode('utf-8')\n return decoded_readable", "def decrypt_epic(aes_key, encrypted_data):\n # Decode encrypted string\n decoded = base64.b64decode(encrypted_data)\n\n # Decrypt decoded string\n decoded_readable = CryptDecrypt(aes_key, decoded).decode('utf-8')\n return decoded_readable", "def decrypt_pk(priv_key, ciphertext):\n try:\n plaintext = priv_key.decrypt(\n b64decode(ciphertext),\n padding.OAEP(\n mgf=padding.MGF1(algorithm=CryptoHash()),\n algorithm=CryptoHash(),\n label=None\n )\n )\n except UnsupportedAlgorithm as e:\n # a failure to dencrypt someone else's data is not typically a fatal\n # error, but in this particular case, the most likely cause of this\n # error is an old cryptography library\n logging.error(\"Fatal error: encryption hash {} unsupported, try upgrading to cryptography >= 1.4. Exception: {}\".format(\n CryptoHash, e))\n # re-raise the exception for the caller to handle\n raise e\n return plaintext", "def decrypt(self, encrypted):\n\n encrypted = base64.b64decode(encrypted)\n IV = encrypted[:self.BLOCK_SIZE]\n aes = AES.new(self.key, AES.MODE_CBC, IV)\n return self._unpad(aes.decrypt(encrypted[self.BLOCK_SIZE:]))", "def decrypt(self, data):", "def decrypt(self, key, value):\n key = hashlib.sha256(key).digest()[:self.BLOCK_SIZE]\n iv = value[:16]\n crypted = value[16:]\n cipher = AES.new(key,AES.MODE_CBC,iv)\n return self.pkcs5_unpad(cipher.decrypt(crypted))", "def decryptAESBlock(key, ct):\n\tif len(ct) != 16 and len(ct) != 32:\n\t\traise Exception(\"Ciphertext is not length 16 or 32\")\n\tcipher = AES.new(key, AES.MODE_ECB)\n\treturn cipher.decrypt(ct)", "def decrypt(data, key):\n data = six.ensure_binary(data)\n try:\n data = privy.peek(hidden=data, password=key)\n except ValueError:\n error = \"Unable to decrypt {cnt} bytes of data using key {k}, invalid key!\"\n error = error.format(cnt=len(data), k=key)\n raise exceptions.ModuleError(error)\n return six.ensure_text(data)", "def decrypt(self, key, encrypted):\n output = []\n padded_key = padd_key(key, encrypted)\n for i in range(len(encrypted)):\n dec_ascii = (ord(encrypted[i]) - ord(padded_key[i])) % 256\n output.append(chr(dec_ascii))\n return ''.join(output)", "def decrypt(key, cipher, plaintext):\n\n rsa = Rsa()\n\n try:\n k = TomlKeyFormatter().from_string(key.read())\n\n c = cipher.read()\n p = rsa.decrypt(c, k)\n\n plaintext.write(p)\n\n except KeyFormatError:\n click.echo(\"ERROR: Key is in bad format\")\n\n except DecryptError:\n click.echo(\"ERROR: Key is wrong or message was badly padded before encryption\")", "def decrypt(self, input, key, iv) :\n pass", "def decipher(s, key): # s = message\n return decipher_raw(s, key).rstrip(bytes('\\x00'.encode('utf-8')))", "def decrypt_message(encrypted_message):", "def decrypt(ciphertext):\n base_decode = {'16': base64.b16decode,\n '32': base64.b32decode, '64': base64.b64decode}\n cleartext = ciphertext+''\n for i in range(encrypt_times):\n cleartext = base_decode[get_base(cleartext)](cleartext)\n return cleartext", "def decrypt(self, encrypted: str) -> str: # type: ignore\n passphrase = self.passphrase\n encrypted = base64.b64decode(encrypted) # type: ignore\n assert encrypted[0:8] == b\"Salted__\"\n salt = encrypted[8:16]\n key_iv = self.bytes_to_key(passphrase.encode(), salt, 32 + 16)\n key = key_iv[:32]\n iv = key_iv[32:]\n aes = AES.new(key, AES.MODE_CBC, iv)\n try:\n return self.unpad(aes.decrypt(encrypted[16:])).decode() # type: ignore\n except UnicodeDecodeError:\n raise ValueError(\"Wrong passphrase\")", "def decrypt_raw(self, key, data):\n iv = data[:AES.block_size]\n cipher = AES.new(key, AES.MODE_CBC, iv)\n data = cipher.decrypt(data[AES.block_size:])\n return self.__unpad(data)", "def decrypt(self, ciphertext, output=None):\n\n if self.decrypt not in self._next:\n raise TypeError(\"decrypt() cannot be called after encrypt()\")\n self._next = [self.decrypt]\n \n if output is None:\n plaintext = create_string_buffer(len(ciphertext))\n else:\n plaintext = output\n\n if not is_writeable_buffer(output):\n raise TypeError(\"output must be a bytearray or a writeable memoryview\")\n \n if len(ciphertext) != len(output):\n raise ValueError(\"output must have the same length as the input\"\n \" (%d bytes)\" % len(plaintext))\n\n\n result = raw_ctr_lib.CTR_decrypt(self._state.get(),\n c_uint8_ptr(ciphertext),\n c_uint8_ptr(plaintext),\n c_size_t(len(ciphertext)))\n if result:\n if result == 0x60002:\n raise OverflowError(\"The counter has wrapped around in\"\n \" CTR mode\")\n raise ValueError(\"Error %X while decrypting in CTR mode\" % result)\n \n if output is None:\n return get_raw_buffer(plaintext)\n else:\n return None", "def decrypt(key: str, encrypted: str) -> str:\n\n key_len = len(key)\n decrypted = ''\n\n # Go through the encrypted string in chunks the length of the key\n for i in range(0, len(encrypted), key_len):\n chunk = encrypted[i:i + key_len] # Pull out a chunk the size of the key\n\n # Apply the key to the chunk\n for j, c in enumerate(chunk):\n decrypted += chr(ord(key[j]) ^ ord(c))\n\n return decrypted", "def decrypt(self, ciphertext):\n text = []\n # ciphertext = ciphertext.upper()\n for char in ciphertext:\n try:\n key = math_utils.mult_mod_inv(self.a, len(self.characters)) * (self.characters.index(char) - self.b) % len(self.characters)\n # If character is not in set for cipher,\n # directly append it without transformation\n except ValueError:\n text.append(char)\n else:\n text.append(self.characters[key])\n return ''.join(text)", "def decrypt(self, message):\n return self._keypair.decrypt(message)", "def decrypt(self, path: Union[bytes, str], ciphertext: bytes) -> bytes:\n path = _to_bytes_or_null(path)\n plaintext = ffi.new(\"uint8_t **\")\n plaintext_size = ffi.new(\"size_t *\")\n ret = lib.Fapi_Decrypt(\n self._ctx, path, ciphertext, len(ciphertext), plaintext, plaintext_size\n )\n _chkrc(ret)\n return bytes(ffi.unpack(plaintext[0], plaintext_size[0]))", "def decrypt_ctr(self, ciphertext, iv):\n assert len(iv) == 16\n\n blocks = []\n nonce = iv\n for ciphertext_block in split_blocks(ciphertext):\n # CTR mode decrypt: ciphertext XOR decrypt(nonce)\n block = xor_bytes(ciphertext_block, self.decrypt_block(nonce))\n blocks.append(block)\n nonce = inc_bytes(nonce)\n\n return unpad(b''.join(blocks))", "def test_decrypt_key(self):\n key = b'0' * 32\n\n encrypted = encrypt('message', key=key)\n assert decrypt(encrypted, key=key) == 'message'", "def decrypt(project_id, location_id, key_ring_id, crypto_key_id,\n ciphertext_file_name, plaintext_file_name):\n\n # Creates an API client for the KMS API.\n kms_client = googleapiclient.discovery.build('cloudkms', 'v1')\n\n # The resource name of the CryptoKey.\n name = 'projects/{}/locations/{}/keyRings/{}/cryptoKeys/{}'.format(\n project_id, location_id, key_ring_id, crypto_key_id)\n\n # Read encrypted data from the input file.\n with io.open(ciphertext_file_name, 'rb') as ciphertext_file:\n ciphertext = ciphertext_file.read()\n\n # Use the KMS API to decrypt the data.\n crypto_keys = kms_client.projects().locations().keyRings().cryptoKeys()\n request = crypto_keys.decrypt(\n name=name,\n body={'ciphertext': base64.b64encode(ciphertext).decode('ascii')})\n response = request.execute()\n plaintext = base64.b64decode(response['plaintext'].encode('ascii'))\n\n # Write the decrypted data to a file.\n with io.open(plaintext_file_name, 'wb') as plaintext_file:\n plaintext_file.write(plaintext)\n\n print('Saved plaintext to {}.'.format(plaintext_file_name))", "def decrypt(data, key, iv, save_path=None):\n if isinstance(data, str):\n with open(data, 'rb') as f:\n data = f.read()\n pad_ch = '\\0'\n length = int(data[:16].rstrip(pad_ch.encode('utf-8')).decode('utf-8'))\n data = data[16:]\n key = _pad16(key)\n iv = _pad16(iv)\n cipher = AES.new(key, AES.MODE_CBC, iv)\n data = cipher.decrypt(data)\n data = data[:length]\n if save_path:\n with open(save_path, 'wb') as f:\n f.write(data)\n return data", "def decrypt(self, ciphertext):\n\n # Note that the state of the cipher is updated by each operation,\n # and the offset into the stream is implicit, which means that\n # it is almost always an error to use the encrypt and decrypt\n # methods of the same instance, so we do a simple check to ensure\n # that this isn't the case.\n #\n if self.prev_crypto_op and self.prev_crypto_op != self.decrypt:\n raise RuntimeError('Same instance used for encrypt/decrypt')\n self.prev_crypto_op = self.decrypt\n\n return self.rc4.update(ciphertext)", "def decrypt(key, input_token):\n try:\n target = decrypt_string(input_token.strip(), key=key)\n except InvalidToken:\n click.echo('Error: Token is invalid')\n sys.exit(1)\n\n click.echo('The decrypted result is: ', nl=False)\n click.echo(click.style(target, fg='blue'))", "def decrypt(cls, ciphertext_and_tag, aad, key, iv):", "def decrypt(data, private_key):\r\n\r\n # Retrieve session key, tag, ciphertext and nonce from file\r\n enc_session_key, nonce, tag, ciphertext = \\\r\n [ file_in.read(x) for x in (private_key.size_in_bytes(), 16, 16, -1) ]\r\n\r\n\r\n # Decrypt the session key\r\n session_key = cipher_rsa.decrypt(enc_session_key)\r\n\r\n # Decrypt the data with the AES session key\r\n cipher_aes = AES.new(session_key, AES.MODE_EAX, nonce)\r\n data = cipher_aes.decrypt_and_verify(ciphertext, tag)\r\n\r\n return data", "def heat_decrypt(value, encryption_key=None):\n encryption_key = get_valid_encryption_key(encryption_key)\n auth = base64.b64decode(value)\n iv = auth[:AES.block_size]\n cipher = AES.new(encryption_key, AES.MODE_CFB, iv)\n res = cipher.decrypt(auth[AES.block_size:])\n return res", "def decrypt_message(self, cipher):\n\t\tmessage = cipher ** self.private_key % self.hidden_primes_product\n\t\treturn message", "def dh_decrypt(priv, ciphertext):\n Group1,private, public = dh_get_key()#generate new DH pair for Bob\n iv=ciphertext[0]\n cipher=ciphertext[1]\n tag=ciphertext[2]\n pubA=ciphertext[3]\n \n #Bob derives shared secret key by multiplying his public key with Alice's private key\n shared2 = pubA.pt_mul(priv)#qA * dB\n print \"key from dec is\", shared2\n\n hashedKey=sha256(shared2.export()).digest()\n \n aes = Cipher(\"aes-128-gcm\")\n plain = aes.quick_gcm_dec(hashedKey[:16], iv, cipher, tag)#where to get IV and tag from ???\n \n return plain.encode(\"utf8\")", "def decrypt_kms_data(encrypted_data):\n if not AWS_REGION:\n return\n\n kms = boto3.client('kms', region_name=AWS_REGION)\n\n decrypted = kms.decrypt(CiphertextBlob=encrypted_data)\n\n if decrypted.get('KeyId'):\n # Decryption succeed\n decrypted_value = decrypted.get('Plaintext', '')\n if isinstance(decrypted_value, bytes):\n decrypted_value = decrypted_value.decode('utf-8')\n return decrypted_value", "def decrypt(self, input_u8):\n if self.__prev_key == self.__new_key:\n self.__randomize()\n key_map = {b:i for i, b in enumerate(self.cipher)}\n i = 0\n while i < len(input_u8):\n input_u8[i] = key_map[input_u8[i] ^ self.cipher[i%256]]\n i += 1\n return input_u8.decode(\"utf-8\")", "def decrypt(self,message, key):\n return self.translateMessage(message, key, \"decrypt\")", "def decrypt(path, key):\n key = load_key(key)\n\n if p.isdir(path):\n # encrypt a directory\n return decrypt_dir(path, key)\n # decrypt a file\n path = decrypt_file(path, key)\n # check if file contains suffix\n if \"-encrypted.zip\" in path:\n return decrypt_dir(path, key)\n return", "def _decrypt_encrypted_data(encryption_algorithm_info, encrypted_content, password):\n\n decrypt_func = crypto_funcs[encryption_algorithm_info.encryption_cipher]\n\n # Modern, PKCS#5 PBES2-based encryption\n if encryption_algorithm_info.kdf == 'pbkdf2':\n\n if encryption_algorithm_info.encryption_cipher == 'rc5':\n raise ValueError(pretty_message(\n '''\n PBES2 encryption scheme utilizing RC5 encryption is not supported\n '''\n ))\n\n enc_key = pbkdf2(\n encryption_algorithm_info.kdf_hmac,\n password,\n encryption_algorithm_info.kdf_salt,\n encryption_algorithm_info.kdf_iterations,\n encryption_algorithm_info.key_length\n )\n enc_iv = encryption_algorithm_info.encryption_iv\n\n plaintext = decrypt_func(enc_key, encrypted_content, enc_iv)\n\n elif encryption_algorithm_info.kdf == 'pbkdf1':\n derived_output = pbkdf1(\n encryption_algorithm_info.kdf_hmac,\n password,\n encryption_algorithm_info.kdf_salt,\n encryption_algorithm_info.kdf_iterations,\n encryption_algorithm_info.key_length + 8\n )\n enc_key = derived_output[0:8]\n enc_iv = derived_output[8:16]\n\n plaintext = decrypt_func(enc_key, encrypted_content, enc_iv)\n\n elif encryption_algorithm_info.kdf == 'pkcs12_kdf':\n enc_key = pkcs12_kdf(\n encryption_algorithm_info.kdf_hmac,\n password,\n encryption_algorithm_info.kdf_salt,\n encryption_algorithm_info.kdf_iterations,\n encryption_algorithm_info.key_length,\n 1 # ID 1 is for generating a key\n )\n\n # Since RC4 is a stream cipher, we don't use an IV\n if encryption_algorithm_info.encryption_cipher == 'rc4':\n plaintext = decrypt_func(enc_key, encrypted_content)\n\n else:\n enc_iv = pkcs12_kdf(\n encryption_algorithm_info.kdf_hmac,\n password,\n encryption_algorithm_info.kdf_salt,\n encryption_algorithm_info.kdf_iterations,\n encryption_algorithm_info.encryption_block_size,\n 2 # ID 2 is for generating an IV\n )\n plaintext = decrypt_func(enc_key, encrypted_content, enc_iv)\n\n return plaintext", "def decrypt(self, cipher):\n D = (((int_mapping(c) - k) % 26) for k, c in zip(cycle(self.key), cipher))\n return ''.join(char_mapping(n) for n in D)", "def decrypt_message(message: bytes, receiver_private_key: RsaKey) -> bytes:\n iv = message[:IV_LEN]\n enc_aes_key = message[IV_LEN:IV_LEN + receiver_private_key.size_in_bytes()] # Assume encryption has been done with same key size\n enc_message = message[IV_LEN + receiver_private_key.size_in_bytes():]\n\n cipher_rsa = PKCS1_OAEP.new(receiver_private_key)\n aes_key = cipher_rsa.decrypt(enc_aes_key)\n\n cipher_aes = AES.new(aes_key, AES.MODE_CBC, iv)\n return unpad(cipher_aes.decrypt(enc_message), AES.block_size) # Padding have to be removed", "def _decrypt_data_key(self, encrypted_data_key, algorithm, encryption_context):\n # Wrapped EncryptedDataKey to deserialized EncryptedData\n encrypted_wrapped_key = aws_encryption_sdk.internal.formatting.deserialize.deserialize_wrapped_key(\n wrapping_algorithm=self.config.wrapping_key.wrapping_algorithm,\n wrapping_key_id=self.key_id,\n wrapped_encrypted_key=encrypted_data_key,\n )\n # EncryptedData to raw key string\n plaintext_data_key = self.config.wrapping_key.decrypt(\n encrypted_wrapped_data_key=encrypted_wrapped_key, encryption_context=encryption_context\n )\n # Raw key string to DataKey\n return DataKey(\n key_provider=encrypted_data_key.key_provider,\n data_key=plaintext_data_key,\n encrypted_data_key=encrypted_data_key.encrypted_data_key,\n )", "def decryptor(iv = os.urandom(16), key = os.urandom(32), bc = backend):\n\tcipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend = bc)\n\treturn iv, key, cipher.decryptor()", "def decrypt_message(self, encrypted_message):\n f = Fernet(bytes(self.key))\n decrypted_message = f.decrypt(encrypted_message)\n return decrypted_message", "def decrypt(ciphertext, key, verbose=False):\n Nb = 4\n Nk = int((len(key) * 4) / 32)\n Nr = Nk + 6\n w = key_expansion(text_to_bytes(key), Nb, Nr, Nk)\n state = text_to_matrix(ciphertext)\n\n print_round(0, 'iinput', matrix_to_text(state), verbose)\n add_round_key(state, w, Nr, Nb)\n print_round(0, 'ik_sch', get_round_key(\n w, Nr, Nb), verbose)\n\n for round in range(Nr-1, 0, -1):\n round_num = Nr-round\n\n print_round(round_num, 'istart', matrix_to_text(state), verbose)\n\n inv_shift_rows(state)\n print_round(round_num, 'is_row', matrix_to_text(state), verbose)\n\n inv_sub_bytes(state)\n print_round(round_num, 'is_box', matrix_to_text(state), verbose)\n\n print_round(round_num, 'ik_sch', get_round_key(w, round, Nb), verbose)\n add_round_key(state, w, round, Nb)\n\n print_round(round_num, 'ik_add', matrix_to_text(state), verbose)\n inv_mix_columns(state)\n\n print_round(Nr, 'istart', matrix_to_text(state), verbose)\n\n inv_shift_rows(state)\n print_round(Nr, 'is_row', matrix_to_text(state), verbose)\n\n inv_sub_bytes(state)\n print_round(Nr, 'is_box', matrix_to_text(state), verbose)\n\n print_round(Nr, 'ik_sch', get_round_key(w, 0, Nb), verbose)\n add_round_key(state, w, 0, Nb)\n\n print_round(Nr, 'ioutput', matrix_to_text(state), verbose)\n\n return matrix_to_text(state)", "def decrypt(self, phrase):\n keyword = input(\"What keyword is it encrypted with? \")\n plaintext = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n ciphertext = []\n decrypted = []\n for i in keyword.upper():\n if i not in ciphertext:\n ciphertext.append(i)\n for i in plaintext:\n if i not in ciphertext:\n ciphertext.append(i)\n\n key_dict = dict(zip(plaintext, ciphertext))\n\n for i in phrase.upper():\n if i == \" \":\n decrypted.append(\" \")\n else:\n for key, value in key_dict.items():\n if i == value:\n decrypted.append(key)\n\n return \"\".join(decrypted)", "def decrypt( raw, key, iv ):\n result = ''\n tmp_iv = iv \n ciphertext = pad(raw)\n\n for i in xrange(0, len(ciphertext) / BS):\n lower_bound = i * 16\n upper_bound = (i+1) * 16\n \n tmp = AES.new(key, AES.MODE_OFB, tmp_iv).decrypt( ciphertext[lower_bound:upper_bound] )\n tmp_iv = ciphertext[lower_bound:upper_bound]\n result += tmp\n\n return result", "def decrypt(pwd, data):\n\n ct = b64decode(data['ct'])\n salt = b64decode(data['salt'])\n tag_start = len(ct) - data['ts'] // 8\n tag = ct[tag_start:]\n ciphertext = ct[:tag_start]\n\n mode_class = getattr(modes, data['mode'].upper())\n algo_class = getattr(algorithms, data['cipher'].upper())\n\n kdf = _kdf(data['ks'], iters=data['iter'], salt=salt)[0]\n key = kdf.derive(bytes(pwd, \"utf-8\"))\n cipher = Cipher(\n algo_class(key),\n mode_class(\n b64decode(data['iv']),\n tag,\n min_tag_length=data['ts'] // 8\n ),\n backend=_BACKEND\n )\n\n dec = cipher.decryptor()\n return dec.update(ciphertext) + dec.finalize()", "def decrypt(text: str, key: str = None):\n if not text.isdecimal():\n raise ValueError(\"Encrypted text must contain only numbers.\")\n tmpres = []\n lkey = []\n if key is not None:\n lkey = list(key.encode(\"utf-8\"))\n i = 0\n counter = 0\n while i < len(text):\n l = int(text[i])\n tmp = text[i + 1:i + l + 1]\n i += l + 1\n if not tmp:\n break\n if lkey:\n c = int(tmp) - lkey[counter % len(lkey)]\n else:\n pm = 1 if tmp[0] == \"0\" else -1\n ri = int(tmp[1]) * pm\n c = int(tmp[2:]) - ri\n tmpres.append(c)\n counter += 1\n return bytes(tmpres).decode(\"utf8\")", "def decrypt(self, enc):\n\n enc = base64.b64decode(enc)\n iv = enc[:AES.block_size]\n cipher = AES.new(self.key, AES.MODE_CBC, iv)\n return self._unpad(cipher.decrypt(enc[AES.block_size:])).decode('utf-8')", "def decrypt(self, input, iv) :\n pass", "def decrypt_message(encrypted_message):\r\n\r\n # conversion to bytes\r\n encrypted_message = bytes(encrypted_message, \"ascii\")\r\n\r\n # loading key\r\n key = load_key()\r\n\r\n # creating a fernet object\r\n f = Fernet(key)\r\n\r\n # decrypting the messsage\r\n decrypted_message = f.decrypt(encrypted_message)\r\n\r\n return decrypted_message.decode()" ]
[ "0.78190506", "0.76649165", "0.73512477", "0.7301102", "0.72868747", "0.7224076", "0.7158116", "0.7080847", "0.7073697", "0.70620877", "0.7024779", "0.70223695", "0.70039576", "0.7003598", "0.6956859", "0.6855221", "0.6831517", "0.68168175", "0.6813381", "0.679899", "0.6788728", "0.67878425", "0.6756865", "0.6752442", "0.6739608", "0.6719981", "0.6691214", "0.6678506", "0.66594785", "0.66182584", "0.66132504", "0.6607353", "0.66063964", "0.6603671", "0.65841115", "0.6542221", "0.6539751", "0.6534119", "0.6532965", "0.65320563", "0.6524521", "0.6511772", "0.6491341", "0.64805007", "0.64759415", "0.6445496", "0.64319265", "0.6427043", "0.6425292", "0.6398293", "0.6387526", "0.6387526", "0.63856655", "0.6370615", "0.6364649", "0.63607186", "0.6356446", "0.63557285", "0.63520575", "0.6336347", "0.63339955", "0.6319352", "0.63163847", "0.63155735", "0.63137394", "0.6312156", "0.6299927", "0.6294557", "0.62918305", "0.62869567", "0.62836784", "0.6278321", "0.62650764", "0.6254009", "0.62452596", "0.62258905", "0.6193308", "0.61921656", "0.61911064", "0.6175598", "0.61710525", "0.6168516", "0.6158199", "0.6147649", "0.61361694", "0.61159754", "0.61037695", "0.6096927", "0.608178", "0.6074527", "0.60701764", "0.60679096", "0.6065262", "0.6050375", "0.60467535", "0.60466623", "0.6046185", "0.6024708", "0.6021477", "0.60080266" ]
0.8391707
0
Method that decrypts a file using the decrypt_symmetric method and writes the output of this decryption to a file named gcpkey.json
def decrypt_from_file(self, file_path): # open and decrypt byte file f = open(file_path, "rb").read() decrypted = self.decrypt_symmetric(f) json_string = decrypted.decode("utf-8") # write string to json file destination_file_name = Path("downloaded-key/gcp-key.json") destination_file_name.touch(exist_ok=True) # creates file if it does not yet exist destination_file_name.touch(exist_ok=True) # creates file if it does not yet exist destination_file_name.write_text(json_string)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decrypt_symmetric(self, ciphertext):\n from google.cloud import kms_v1\n\n # Creates an API client for the KMS API.\n client = kms_v1.KeyManagementServiceClient()\n\n # The resource name of the CryptoKey.\n name = client.crypto_key_path_path(self.project_id, self.location_id, self.key_ring_id,\n self.crypto_key_id)\n # Use the KMS API to decrypt the data.\n response = client.decrypt(name, ciphertext)\n return response.plaintext", "def decrypt(self, filename):\n\t f = Fernet(self.key)\n\t with open(filename, \"rb\") as file:\n\t # read the encrypted data\n\t encrypted_data = file.read()\n\t # decrypt data\n\t decrypted_data = f.decrypt(encrypted_data)\n\t # write the original filename\n\t return decrypted_data", "def decrypt(project_id, location_id, key_ring_id, crypto_key_id,\n ciphertext_file_name, plaintext_file_name):\n\n # Creates an API client for the KMS API.\n kms_client = googleapiclient.discovery.build('cloudkms', 'v1')\n\n # The resource name of the CryptoKey.\n name = 'projects/{}/locations/{}/keyRings/{}/cryptoKeys/{}'.format(\n project_id, location_id, key_ring_id, crypto_key_id)\n\n # Read encrypted data from the input file.\n with io.open(ciphertext_file_name, 'rb') as ciphertext_file:\n ciphertext = ciphertext_file.read()\n\n # Use the KMS API to decrypt the data.\n crypto_keys = kms_client.projects().locations().keyRings().cryptoKeys()\n request = crypto_keys.decrypt(\n name=name,\n body={'ciphertext': base64.b64encode(ciphertext).decode('ascii')})\n response = request.execute()\n plaintext = base64.b64decode(response['plaintext'].encode('ascii'))\n\n # Write the decrypted data to a file.\n with io.open(plaintext_file_name, 'wb') as plaintext_file:\n plaintext_file.write(plaintext)\n\n print('Saved plaintext to {}.'.format(plaintext_file_name))", "def _decrypt(self):\n self._outfile = os.path.join(self.dest, self.plain_file)\n self._infile = self.encrypted_file\n self._log.info(\"Decrypting file '%s' to '%s'\", self.encrypted_file, self._outfile)\n with open(self.encrypted_file, \"rb\") as enc_file:\n openssl(\n \"enc\",\n \"-aes-256-cbc\",\n \"-d\",\n \"-pass\",\n \"file:{secret}\".format(secret=self.secret.keyfile),\n _in=enc_file,\n _out=self._outfile,\n )\n self._log.info(\"File '%s' decrypted to '%s'\", self.encrypted_file, self._outfile)\n return True", "def _decrypt(self, src_filepath, dest_filepath):\r\n self.log.info(\"Decrypting file {0} to {1}.\".format(src_filepath, dest_filepath))\r\n\r\n gpg = gnupg.GPG(options=self.gpg_options)\r\n key_data = open(self.key_file, mode='rb').read()\r\n import_result = gpg.import_keys(key_data)\r\n self.log.info(\"Key import results: {0}\".format(import_result.results))\r\n\r\n with open(src_filepath, 'rb') as f:\r\n status = gpg.decrypt_file(f,\r\n passphrase=self._passphrase,\r\n output=dest_filepath)\r\n self.log.info(\"ok: {0}, status:{1}, stderr: {2}\".format(status.ok, status.status, status.stderr))\r\n\r\n if status.ok and self.remove_encrypted:\r\n os.remove(src_filepath)\r\n\r\n if not status.ok:\r\n raise AirflowException(\"Failed to decrypt file {0}: {1}\"\r\n .format(src_filepath, status.stderr))\r\n\r\n self.log.info(\"Completed file decryption.\")", "def decrypt_file(open_name:str, write_name:str, key:str):\n\n\n with open(write_name, \"wb\") as f:\n key = key.encode()\n for (i, part_of_picture) in enumerate(PictureSlices(open_name)):\n round_key = b\"%d%s randominfix %d\" % (i, key, i)\n decrypted_part = xor(part_of_picture, sha512(round_key).digest())\n f.write(decrypted_part)", "def asym_dec(self, ciph, keyfile):\n ciph = ciph.split('\\0')\n ciphkey_len = int(ciph[0])\n ciph = '\\0'.join(ciph[1:])\n ciphkey = ciph[:ciphkey_len]\n ciph = ciph[ciphkey_len:]\n\n passphrase = xsystem([self.sslname, 'rsautl', '-decrypt', '-inkey',\n keyfile], ciphkey)\n if not passphrase:\n warning('keymanagement: Unable to perform asymmetric decryption\\n')\n return None\n\n return self.sym_dec(ciph, passphrase)", "def decrypt_file(self, file_name, key):\n with open(file_name, 'rb') as fo:\n try:\n ciphertext = fo.read()\n except:\n print \"[-] Error opening file {0} for reading.\".format(file_name)\n return\n try:\n dec = self.decrypt(ciphertext, key)\n except:\n print \"[-] Decryption failed.\"\n return\n\n with open(file_name[:-4], 'wb') as fo:\n try:\n fo.write(dec)\n except:\n print \"[-] Error writing out file {0}\".format(file_name[:-4])\n return\n\n os.chmod(file_name[:-4], 0600)\n return file_name[:-4]", "def decrypt_file(filename, key):\n f = Fernet(key)\n with open(filename, \"rb\") as file:\n # read the encrypted data\n encrypted_data = file.read()\n # decrypt data\n decrypted_data = f.decrypt(encrypted_data)\n # delete file\n remove(filename)\n # generate new filename\n new_filename = generate_new_filename(filename, key, False)\n # write the encrypted file\n with open(new_filename, \"wb\") as file:\n print(\"Decrypted: \" + new_filename)\n file.write(decrypted_data)\n\n return new_filename", "def decrypt_file(self, input_file_name='', output_file_name=''):\n\n # Checking if input and output files selected right\n assert input_file_name and isfile(input_file_name), \"Input file wasn't selected!\"\n assert output_file_name, \"Output file wasn't selected!\"\n\n with open(output_file_name, 'wb') as output_file:\n # To iterate file as int values, I'm using generator\n input_file = self._open_file_longint(input_file_name)\n try:\n alpha = input_file.__next__()\n beta = input_file.__next__()\n except StopIteration:\n raise AssertionError(\"Input file is empty! Nothing to decrypt.\")\n\n x = self.keys['private']\n p = self.keys['public']['p']\n\n while alpha and beta:\n message_byte = bytes(chr((beta % p * (pow(alpha, (p - 1 - x), p))) % p), \"ascii\")\n output_file.write(message_byte)\n try:\n alpha = input_file.__next__()\n beta = input_file.__next__()\n except StopIteration:\n alpha = 0\n beta = 0\n return 1", "def decrypt_file(path_to_enc_file, target_ext):\n\t\tencrypted_string = EncryptDecrypt.file_to_string(path_to_enc_file)\n\t\tdecrypted_string = EncryptDecrypt.hex_to_ascii_string(encrypted_string)\n\t\tenc_file_name, _ = os.path.splitext(path_to_enc_file)\n\t\twith open(enc_file_name+\".\"+target_ext, \"w+\") as df:\n\t\t\tdf.write(decrypted_string)\n\t\t#os.remove(path_to_enc_file)", "def main():\n # file = None\n # for arg in sys.argv:\n # if \".txt\" in arg or \".py\" not in arg or \".log\" not in arg:\n # file = arg\n\n file = input(\"Enter a file: \")\n\n file_data = Cryptography()\n file_data.file = file\n\n crypt_type = input(\"Please enter 'E' to encrypt or 'D' to decrypt\\n>> \")\n file_data.crypt_type = crypt_type\n\n crypt_type = \"encrypt\" if crypt_type == 'E' else \"decrypt\"\n\n file_data.crypt_method = file_data.crypt_method\n\n key = input(\"Please enter a key for your data\\n>> \")\n file_data.key = key\n\n print(f\"crypt_method: {file_data.crypt_method}\")\n new_data = file_data.crypt_methods[file_data.crypt_method]()\n\n crypt_methods = defaultdict(str,\n {'C': \"Caesar\",\n 'M': \"Monoalphabetic\",\n 'P': \"Polyalphabetic\"})\n\n if DEBUG is False:\n crypt_method = crypt_methods[file_data.crypt_method]\n new_file_name = f\"{crypt_method}_{crypt_type.capitalize()}ed.txt\"\n logger.info(f\"{type(new_data)}: {new_data}\")\n Cryptography.write(new_file_name, new_data)\n print(f\"Your new {crypt_type}ed file has been created as \" +\n f\"{new_file_name}.\")", "def decrypt_file(self, key):\n k, iv, meta_mac = MegaCrypto.get_cipher_key(key)\n ctr = Crypto.Util.Counter.new(\n 128, initial_value=(\n (iv[0] << 32) + iv[1]) << 64)\n cipher = Crypto.Cipher.AES.new(\n MegaCrypto.a32_to_str(k),\n Crypto.Cipher.AES.MODE_CTR,\n counter=ctr)\n\n self.pyfile.setStatus(\"decrypting\")\n self.pyfile.setProgress(0)\n\n file_crypted = encode(self.last_download)\n file_decrypted = file_crypted.rsplit(self.FILE_SUFFIX)[0]\n\n try:\n f = open(file_crypted, \"rb\")\n df = open(file_decrypted, \"wb\")\n\n except IOError, e:\n self.fail(e.message)\n\n encrypted_size = os.path.getsize(file_crypted)\n\n checksum_activated = self.config.get(\n \"activated\", default=False, plugin=\"Checksum\")\n check_checksum = self.config.get(\n \"check_checksum\", default=True, plugin=\"Checksum\")\n\n cbc_mac = MegaCrypto.Checksum(\n key) if checksum_activated and check_checksum else None\n\n progress = 0\n for chunk_start, chunk_size in MegaCrypto.get_chunks(encrypted_size):\n buf = f.read(chunk_size)\n if not buf:\n break\n\n chunk = cipher.decrypt(buf)\n df.write(chunk)\n\n progress += chunk_size\n self.pyfile.setProgress(int((100.0 / encrypted_size) * progress))\n\n if checksum_activated and check_checksum:\n cbc_mac.update(chunk)\n\n self.pyfile.setProgress(100)\n\n f.close()\n df.close()\n\n self.log_info(_(\"File decrypted\"))\n os.remove(file_crypted)\n\n if checksum_activated and check_checksum:\n file_mac = cbc_mac.digest()\n if file_mac == meta_mac:\n self.log_info(_('File integrity of \"%s\" verified by CBC-MAC checksum (%s)') %\n (self.pyfile.name.rsplit(self.FILE_SUFFIX)[0], meta_mac))\n else:\n self.log_warning(_('CBC-MAC checksum for file \"%s\" does not match (%s != %s)') %\n (self.pyfile.name.rsplit(self.FILE_SUFFIX)[0], file_mac, meta_mac))\n self.checksum_failed(\n file_decrypted, _(\"Checksums do not match\"))\n\n self.last_download = decode(file_decrypted)", "def decrypt_text_file(self):\r\n\t\t#Ensures that the file has something that can be decrypted.\r\n\t\tfile_contains_message = True\r\n\t\twhile file_contains_message:\r\n\t\t\tfile_exists = True\r\n\t\t\t#Checks to see if the file exists.\r\n\t\t\twhile file_exists:\r\n\t\t\t\tself.text_file_name = input(\"Please enter the name of the text file you wish to decrypt in format |file_name.txt|.--> \")\r\n\t\t\t\tif \".txt\" in self.text_file_name:\r\n\t\t\t\t\tfile_exists = Doc_Control().check_for_file(self.text_file_name)\r\n\t\t\t\telse: \r\n\t\t\t\t\tcontinue\r\n\t\t\t#Decrypts message but verifys correct key before giving user their decrypted message.\r\n\t\t\twhile True: \r\n\t\t\t\tself.message = Doc_Control().open_file(self.text_file_name)\r\n\t\t\t\tif self.message != \"\" and len(self.message) > 4:\r\n\t\t\t\t\tfile_contains_message = False\r\n\t\t\t\t\tbreak\r\n\t\t\t\telse:\r\n\t\t\t\t\tprint(\"Your file does not contain an encryptable message.\")\r\n\t\t\t\t\tbreak\r\n\t\tself.right_key = True\r\n\t\twhile self.right_key:\r\n\t\t\tself.setup_key_decrypt()\r\n\t\t\tself.my_code = Decryptor(self.message, self.key).transfer_decrypt()\r\n\t\t\tself.verify_decrypt_key()\r\n\t\tself.output_file = Doc_Control().assign_output_file()\r\n\t\toutput_file_obj = open(self.output_file, 'w')\r\n\t\toutput_file_obj.write(self.my_code)\r\n\t\toutput_file_obj.close()\t\t\r\n\t\tprint(\"\\nYour file has been decrypted.\")", "def decrypt_csv_file(encrypted_csv_path, decrypted_csv_path, password):\n csv_list = decrypt_csv(encrypted_csv_path, password)\n write_csv(csv_list, decrypted_csv_path)", "def decrypt(self, path):\n with open(path, \"rb\") as fileh:\n gpg = qpgpg.GPG()\n try:\n decrypted = gpg.decrypt_file(fileh)\n except qpgpg.GPG.DecryptionException:\n raise\n else:\n return decrypted", "def decrypt_file(filename, auth_tag, bytestring):\n\treceived_nonce = bytestring[:16]\n\tciphertext = bytestring[16:]\n\tcipher = AES.new(my_privaeskey, AES.MODE_GCM, received_nonce)\n\tplaintext = cipher.decrypt_and_verify(ciphertext, auth_tag)\n\tf = open(filename, 'w')\n\tf.write(plaintext.decode('ascii'))", "def decryptFile(files, key):\n\tfrom os.path import splitext\n\tfrom os import unlink\n\tfrom tarfile import open as openTar\n\t\n\tif isString(files):\n\t\tfiles = [files]\n\n\tfor filename in files:\n\t\tif splitext(filename)[1][1:].upper() == ENCRYPTED_EXTENSION:\n\t\t\twith open(filename, 'rb') as fo:\n\t\t\t\tcyphered = fo.read()\n\t\t\tcontent = BytesIO(decrypt(cyphered, key))\n\t\t\ttarFilename = splitext(filename)[0]+\".TAR\"\n\t\t\ttarCopy = open(tarFilename,\"wb\")\n\t\t\ttarCopy.write(content.getvalue())\n\t\t\ttarCopy.close()\n\t\t\twith openTar(fileobj=content, mode=\"r\") as fo:\n\t\t\t\tfo.extractall(splitext(filename)[0])\n\t\t\tunlink(tarFilename)\n\t\t\ttry:\n\t\t\t\tunlink(filename)\n\t\t\texcept:\n\t\t\t\tpass", "def decryptor(file_name, key):\n\twith open(file_name, 'rb') as dfile:\n\t\tciphertext = dfile.read()\n\t\tdec = decrypt(key, ciphertext)\n\t\tdfile.close()\n\t\tdtext = \"The encrypted file was opened by macupdate.py by the user: \"\n\t\tcreateLog(dtext, 'logs/macupdate.log')\n\t\treturn dec", "def do_android_decryption(self):\r\n self.aes_decryption_key = self.extract_aes_key()\r\n self.decrypt_device_file()\r\n # join is optimized and does not cause O(n^2) total memory copies.\r\n self.decrypted_file = b\"\\n\".join(self.good_lines)", "def decrypt(fileLocation):\n key = load_key()\n f = Fernet(key)\n with open(fileLocation, \"rb\") as file:\n # read the encrypted data\n encrypted_data = file.read()\n # decrypt data\n decrypted_data = f.decrypt(encrypted_data)\n # write the original file\n with open(fileLocation, \"wb\") as file:\n file.write(decrypted_data)", "def decrypt_file(file, key_str):\n encrypt_data = read_raw(file)\n key = fe.key_encode(key_str)\n iv = encrypt_data[:fe.AES.block_size]\n cipher = fe.create_cipher(key, iv)\n image = cipher.decrypt(encrypt_data[fe.AES.block_size:])\n return image", "def decrypt(self, input_file, output_file):\n self.key %= 26\n plaintext = \"\"\n with open(input_file) as encrypted_text:\n self.text = encrypted_text.read()\n for char in self.text:\n if char.isalpha():\n if 65 <= ord(char) <= 90: #char is between A and Z\n if ord(char) - self.key >= 65: #65 = ord('A')\n plaintext += chr(ord(char) - self.key)\n elif ord(char) - self.key < 65:\n plaintext += chr(ord(char) - self.key + 26)\n if 97 <= ord(char) <= 122:\n if ord(char) - self.key >= 97:\n plaintext += chr(ord(char) - self.key)\n elif ord(char) - self.key < 97:\n plaintext += chr(ord(char) - self.key + 26)\n else:\n plaintext += char\n\n decrypted_file = open(output_file, 'w')\n decrypted_file.write(plaintext)\n print \"Created file: ces-decrypted.txt\"", "def decrypt(outfile, keyfile):\n decrypted = \"\"\n for index, o in enumerate(outfile):\n mod = index % 7\n k = keyfile[mod]\n d = decrypt_char(o, k)\n d_ord = ord(d)\n d_hex = hex(d_ord)\n o_repr = repr(o)\n print(f\"{index:2d} {mod:2d} {o_repr: >7s} {k: >2s} {d: >2s} {d_ord:3d} {d_hex: >5s}\")\n decrypted += d\n return decrypted", "def crypt_file(self, file_path, encrypted=False):\n\n with open(file_path, 'rb+') as f:\n _data = f.read()\n\n if not encrypted:\n## print(f'File contents pre encryption: {_data}')\n data = self.cryptor.encrypt(_data)\n## print(f'File contents post encryption: {data}')\n else:\n data = self.cryptor.decrypt(_data)\n## print(f'File content post decryption: {data}')\n\n file=open(file_path,'wb')\n file.write(data)", "def __output_encrypted(self, data, key_len, filename, iv):\n with open(filename, \"w\") as f:\n f.write(START_HEADER + \"\\n\")\n\n key = \"Description\"\n val = \"Crypted file\"\n f.write(self.gen_key_val(key, val))\n\n key = \"Method\"\n val = \"AES\"\n f.write(self.gen_key_val(key, val))\n\n key = \"File name\"\n val = filename\n f.write(self.gen_key_val(key, val))\n\n key = \"IV\"\n val = binascii.hexlify(iv)\n f.write(self.gen_key_val(key, val))\n\n key = \"Data\"\n val = base64.b64encode(data)\n # val = data\n f.write(self.gen_key_val(key, val))\n\n f.write(END_HEADER + \"\\n\")", "def decrypt(path, key):\n key = load_key(key)\n\n if p.isdir(path):\n # encrypt a directory\n return decrypt_dir(path, key)\n # decrypt a file\n path = decrypt_file(path, key)\n # check if file contains suffix\n if \"-encrypted.zip\" in path:\n return decrypt_dir(path, key)\n return", "def encrypt_file(file, target_path, key):\n file_name = file.split('/')[-1] + '.enc'\n image = convert_content(file, key)\n write_raw(image, os.path.join(target_path, file_name))", "def decrypt_using_gpg(self, gpg_file, extract_target=None):\n if not os.path.isfile(f\"{gpg_file}.gpg\"):\n os.symlink(gpg_file, f\"{gpg_file}.gpg\")\n\n gpg_file_link = f\"{gpg_file}.gpg\"\n tar_fn = f\"{gpg_file}.tar.gz\"\n try:\n cmd = [\n \"gpg\",\n \"--verbose\",\n \"--batch\",\n \"--yes\",\n f\"--output={tar_fn}\",\n \"--pinentry-mode\",\n \"loopback\",\n f\"--passphrase-file={env.GPG_PASS_FILE}\",\n \"--decrypt\",\n gpg_file_link,\n ]\n run(cmd, suppress_stderr=True)\n log(f\"#> GPG decrypt {ok()}\")\n _remove(gpg_file)\n os.unlink(gpg_file_link)\n except Exception as e:\n print_tb(e)\n raise e\n # finally:\n # os.unlink(gpg_file_link)\n\n if extract_target:\n try:\n untar(tar_fn, extract_target)\n except Exception as e:\n raise Exception(\"Could not extract the given tar file\") from e\n finally:\n cmd = None\n _remove(f\"{extract_target}/.git\")\n _remove(tar_fn)", "def _write_encrypted_pem(self, passphrase, tmpfile):\n key = PKey()\n key.generate_key(TYPE_RSA, 1024)\n pem = dump_privatekey(FILETYPE_PEM, key, \"blowfish\", passphrase)\n with open(tmpfile, \"w\") as fObj:\n fObj.write(pem.decode(\"ascii\"))\n return tmpfile", "def decrypt_file(fname, key):\n decrypted_fname = \"{}.decrypted\".format(fname)\n ccipher = AESCipher(key=key)\n with open(fname, \"r\") as fd:\n data = ccipher.decrypt(fd.read())\n with open(decrypted_fname, \"w\") as fd_out:\n fd_out.write(data)\n fd_out.truncate()\n return decrypted_fname", "def sym_dec(self, ciph, passphrase):\n (rfd, wfd) = xpipe()\n os.write(wfd, passphrase + '\\n')\n plain = xsystem([self.sslname, self.symmetric, '-d', '-pass',\n 'fd:' + str(rfd)], ciph)\n xclose(wfd)\n xclose(rfd)\n if not plain:\n warning('keymanagement: Unable to decrypt because %s does not exist\\n' %(self.sslname))\n return None\n\n return plain", "def export_key(self, filename, aes_key):\n with open(filename, \"w\") as f:\n f.write(START_HEADER + \"\\n\")\n\n key = \"Description\"\n val = \"Secret key\"\n f.write(self.gen_key_val(key, val))\n\n key = \"Method\"\n val = \"AES\"\n f.write(self.gen_key_val(key, val))\n\n key = \"Key length\"\n val = str(self.convert_num_hex(len(aes_key)))\n f.write(self.gen_key_val(key, val))\n\n key = \"Secret key\"\n val = binascii.hexlify(aes_key)\n f.write(self.gen_key_val(key, val))\n\n f.write(END_HEADER + \"\\n\")", "def decrypt_data(self, master_pass, website, filename): \n\n if os.path.isfile(filename):\n try:\n with open(filename, 'r') as jdata:\n jfile = json.load(jdata)\n nonce = bytes.fromhex(jfile[website][\"nonce\"])\n password = bytes.fromhex(jfile[website][\"password\"])\n except KeyError:\n raise PasswordNotFound\n else:\n raise PasswordFileDoesNotExist\n # add extra characters and take first 16 to make sure key is right.\n formatted_master_pass = master_pass + \"================\"\n master_pass_encoded = formatted_master_pass[:16].encode(\"utf-8\")\n cipher = AES.new(master_pass_encoded, AES.MODE_EAX, nonce = nonce)\n plaintext_password = cipher.decrypt(password).decode(\"utf-8\")\n\n return plaintext_password", "def decrypt_file(self, file, crypted_file):\n pyAesCrypt.decryptFile(file, crypted_file, self.key, self.bufferSize)", "def decrypt(priv_key, ciphertext):\n pk_encrypted_secret_key = ciphertext['pk_encrypted_secret_key']\n sym_encrypted_data = ciphertext['sym_encrypted_data']\n # TODO: secure delete\n secret_key = decrypt_pk(priv_key, pk_encrypted_secret_key)\n encoded_string = decrypt_symmetric(secret_key, sym_encrypted_data)\n return decode_data(encoded_string)", "def main():\n print(\"Reading from config.json\")\n download_decrypt_store = DownloadDecryptStore()\n print(\"Downloading key from storage-bucket\")\n file_path = download_decrypt_store.download_key_from_blob()\n print(\"Decrypting downloaded file\")\n download_decrypt_store.decrypt_from_file(file_path)\n print(\"Completed\")", "def decrypt_0(self, inFile, outFile):\n in_ = open(inFile, \"r\")\n out = open(outFile, \"w\")\n self.decrypt(in_, out)\n in_.close()\n out.close()", "def decryptDir(cipFilename, key):\n\timport zipfile\n\tfrom os import remove\n\tdecryptFile(cipFilename, key)\n\tzipFilename = normalizePath(getDirectoryFilename(cipFilename) + \"\\\\\" + getName(cipFilename) + \".zip\")\n\tunzip = zipfile.ZipFile(zipFilename, 'r')\n\tunzip.extractall(getDirectoryFilename(cipFilename))\n\tunzip.close()\n\tremove(zipFilename)", "def do_ios_decryption(self):\r\n try:\r\n self.aes_decryption_key = self.extract_aes_key()\r\n except DecryptionKeyInvalidError:\r\n self.aes_decryption_key = self.get_backup_encryption_key()\r\n self.used_ios_decryption_key_cache = True\r\n \r\n self.decrypt_device_file()\r\n # join is optimized and does not cause O(n^2) total memory copies.\r\n self.decrypted_file = b\"\\n\".join(self.good_lines)", "def decrypt_to_file_object(data, key, iv, save_path=None):\n data = decrypt(data, key, iv, save_path)\n return io.BytesIO(data)", "def test_encrypt_symmetric(self):\n fake_passphrase = 'fake_passphrase'\n fake_input_file = 'fake-input-file'\n fake_output_dir = 'fake-output-dir'\n fake_output_extension = '.fake-extension'\n\n with patch('os.path.exists', return_value=True):\n with patch('iceit.crypto.gnupg.GPG') as mock_gpg:\n mock_gpg.return_value = mock_gpg\n with patch('iceit.crypto.open', mock_open(read_data=\"fake input file data\"), create=True) as mock_open_obj:\n encryptor = self.test_init()\n\n output_file_name = encryptor.encrypt_symmetric(\n passphrase=fake_passphrase, input_file=fake_input_file,\n output_dir=fake_output_dir, output_extension=fake_output_extension)\n\n self.assertEqual(1, mock_gpg.encrypt_file.call_count)\n\n # make sure the output file name is composed correctly\n (call_name, call_args, call_kwargs) = mock_gpg.encrypt_file.mock_calls[0]\n self.assertTrue(call_kwargs['symmetric'])\n file_name = call_kwargs['output']\n self.assertTrue(file_name.startswith(fake_output_dir))\n self.assertTrue(fake_input_file in file_name)\n self.assertTrue(file_name.endswith(fake_output_extension))\n\n self.assertEqual(file_name, output_file_name)", "def decrypt(self, key_file, input_file, output_file=None):\n data = self.__input_encrypted(input_file)\n iv = data[:AES.block_size]\n key = self.import_key(key_file)\n cipher = AES.new(key, AES.MODE_CBC, iv)\n\n data = self.__unpad(cipher.decrypt(data[AES.block_size:]))\n if output_file != None:\n with open(output_file, \"w\") as f:\n f.write(data)\n return data", "def decrypt_file(path, key):\n # if file extension does not end with encrypted file extension, skip\n if os.path.splitext(path)[1] != settings.ENCRYPTED_FILE_EXTENSION:\n return\n f = Fernet(key)\n # keep reading, decrypting and writting to file separate\n # incase decrypting fail file doesn't get truncated\n # read\n try:\n with open(path, \"rb\") as file:\n file_content = file.read()\n # decrypt\n text = f.decrypt(file_content)\n # write to file\n with open(path, \"wb\") as file:\n file.write(text)\n # remove encrypted file extension from file\n path_components = os.path.split(path)\n except PermissionError:\n # not enough permission, skip\n return\n except FileNotFoundError:\n # file is an alias, skip\n return\n os.rename(\n path,\n os.path.join(\n path_components[0], os.path.splitext(path_components[1])[0]\n )\n )", "def main(key_file: Optional[str]) -> None:\n # Generate a new 256-bit private key if no key is specified.\n if not key_file:\n customer_key_bytes = os.urandom(32)\n else:\n with open(key_file, \"rb\") as f:\n customer_key_bytes = f.read()\n\n google_public_key = get_google_public_cert_key()\n wrapped_rsa_key = wrap_rsa_key(google_public_key, customer_key_bytes)\n\n b64_key = base64.b64encode(customer_key_bytes).decode(\"utf-8\")\n\n print(f\"Base-64 encoded private key: {b64_key}\")\n print(f\"Wrapped RSA key: {wrapped_rsa_key.decode('utf-8')}\")", "def passwd_decryption(self):\n with open(self.key_path, 'rb') as input_key:\n for line in input_key:\n key = line\n with open(self.pass_path, 'rb') as input_password:\n for line in input_password:\n password = line\n cipher_suit = Fernet(key)\n plain_password = cipher_suit.decrypt(password)\n plain_password = bytes(plain_password).decode('utf-8')\n \n return plain_password", "def decrypt(self, data):", "def _get_encrypted_file(zipfile, pgp_key):\n rsa_pub, _ = pgpy.PGPKey.from_blob(pgp_key)\n message = pgpy.PGPMessage.new(zipfile, file=True)\n pgpfile = '{}.pgp'.format(zipfile)\n encrypted_message = rsa_pub.encrypt(message)\n with open(pgpfile, 'wb') as encrypted_file:\n encrypted_file.write(encrypted_message.__bytes__())\n return pgpfile", "def decrypt(self,timerPrinting=False):\n\n\t\tt = time.time() \n\t\tif self.extension in self.path:\n\t\t\twith open(self.path,'rb') as infile:\n\t\t\t\tfile_data = infile.read()\n\t\t\t# #Start Checking the Platform\n\t\t\t# if platform.system() == 'Windows':\n\t\t\t# \tself.path = self.path.split('\\\\')[-1]\n\t\t\t# elif platform.system() == 'Linux':\n\t\t\t# \tself.path = self.path.split('/')[-1]\n\t\t\t# # END Checking\n\t\t\t# print('Decryption of '+ self.path +\"...\")\n\t\t\t######################### Blowfish Decryption Algorithm ###############\n\t\t\tbs = Blowfish.block_size\n\t\t\trealData = base64.b64decode(file_data)[8:]\n\t\t\tiv = base64.b64decode(file_data)[:8]\n\t\t\tdecrypt = Blowfish.new(self.key, Blowfish.MODE_CBC, iv)\n\t\t\tself.decrypt = decrypt.decrypt(realData)\n\t\t\t########################### End Blowfish #########################\n\t\t\t#print('Writing in your file...')\n\t\t\tself.out = self.path.replace(self.extension,'')\n\t\t\tos.remove(self.path)\n\t\t\twith open(self.out,'wb') as outfile:\n\t\t\t\toutfile.write(self.decrypt)\n\t\t\tif timerPrinting:\n\t\t\t\tprint(\"Done in \",time.time() - t)\n\t\t\t\n\t\telse:\n\t\t\tprint('The File is Not Encrypted To Decrypted.')", "def generate_key():\n key = Fernet.generate_key()\n with open(\"Secret.key\",\"wb\")as key_file:\n key_file.write(key)", "def decrypt_symmetric(secret_key, ciphertext, ttl=None):\n f = Fernet(secret_key)\n # fernet requires the ciphertext to be bytes, it will raise an exception\n # if it is a string\n return f.decrypt(bytes(ciphertext), ttl)", "def decrypt(self, key, dir):\n self.encrypt(key, dir)", "def download_key_from_blob(self):\n source_blob_name = \"generated-keys/{}\".format(self.service_account_email)\n destination_name = self.service_account_email\n\n # generate destination folder and file if they do not yet exist\n Path(\"downloaded-key/\").mkdir(parents=True, exist_ok=True) # creates folder if not exists\n folder = Path(\"downloaded-key/\") # folder where all the newly generated keys go\n destination_file_name = folder / \"{}\".format(destination_name) # file named after service-account name\n destination_file_name.touch(exist_ok=True)\n\n # download the file and store it locally\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(self.bucket_name)\n blob = bucket.blob(source_blob_name)\n blob.download_to_filename(destination_file_name)\n\n # prints source and destination indicating successful download\n print('Encrypted key downloaded to -----> \\n {}.'.format(\n source_blob_name,\n destination_file_name))\n\n return destination_file_name", "def sign(file_name: str) -> None:\n print(\"Signing the file...\")\n file_name = os.path.join('data', file_name)\n\n file1 = open(\"data/key.txt\", \"r\")\n file2 = open(\"data/secret_key.txt\", \"r\")\n p = int(file1.readline().rstrip())\n q = int(file1.readline().rstrip())\n g = int(file1.readline().rstrip())\n h = int(file1.readline().rstrip())\n a = int(file2.readline().rstrip())\n\n loop = True\n while loop:\n r = random.randint(1, q - 1)\n c1 = square_multiply(g, r, p)\n c1 = c1 % q\n c2 = sha_hash(file_name) + (a * c1)\n rinverse = compute_inverse(r, q)\n c2 = (c2 * rinverse) % q\n\n if c1 != 0 and c2 != 0:\n loop = False\n\n print('hash = ', sha_hash(file_name))\n print('c1 = ', c1)\n print('c2 = ', c2)\n file = open(\"data/signature.txt\", \"w\")\n file.write(str(c1))\n file.write(\"\\n\")\n file.write(str(c2))\n print(\"cipher stored at signature.txt\")", "def save(self, filepath, client_data_list, new_passphrase=None):\n import struct\n\n plain_text = json.dumps(\n client_data_list, sort_keys=True, indent=4, separators=(',', ': '),\n cls=ClientDataEncoder)\n header = b''.join([\n struct.pack(\"!I\", self.__magic_number),\n struct.pack(\"!I\", self.__file_version),\n struct.pack(\"!I\", self.__key_stretches),\n struct.pack(\"!I\", self.__magic_number)])\n data = b''.join([\n header,\n bytes(plain_text, 'utf-8')])\n if new_passphrase is not None:\n self.__key = self._produce_key(new_passphrase)\n self.__iv = self._produce_iv(self.__key)\n cypher_text = self._encrypt(data)\n with open(filepath, 'wb') as f:\n f.write(header)\n f.write(cypher_text)", "def decrypt(path, key, default, output, url, token, vaultpath):\n if not key:\n key = getpass('Encryption key: ')\n\n path, file_type, file_mtime = get_file_type_and_mtime(path)\n data = get_config(path, file_type, default=False)\n data = decrypt_credentials(data, key)\n\n # Only merge the DEFAULT section after decrypting.\n if default:\n data = merge_default(data)\n\n if url:\n try:\n import hvac\n except:\n print('''\nTo use Hashicorp's Vault you must install the hvac package.\nTo install it try using the following command:\n\n pip install hvac\n''')\n exit(3)\n\n if not token:\n token = os.environ.get('VAULT_TOKEN', '')\n if not token:\n token = getpass('Vault token: ')\n \n client = hvac.Client(url=url, token=token)\n if not vaultpath:\n vaultpath = path\n\n if vaultpath[0] == '~':\n vaultpath = vaultpath[1:]\n if vaultpath[0] == '.':\n vaultpath = vaultpath[1:]\n if vaultpath[0] == '.':\n vaultpath = vaultpath[1:]\n if vaultpath[0] == '/':\n vaultpath = vaultpath[1:]\n\n data = merge_default(data)\n for heading in data:\n # kargs = { heading: json.dumps(data[heading]) }\n client.write(vaultpath + '/' + heading, **data[heading])\n\n else:\n\n if output:\n if output[0] == '.':\n output = output[1:]\n file_type = '.' + output.lower()\n\n with open(path + file_type, 'w') as save_file:\n if file_type == '.json':\n json.dump(data, save_file, indent=2)\n\n elif file_type in {'.ini', '.conf'}:\n if default:\n default_section = 'DEFAULT'\n else:\n default_section = 'DEFAULT' + os.urandom(16).hex()\n config_ini = configparser.ConfigParser(\n dict_type=OrderedDict,\n default_section=default_section,\n interpolation=None)\n for heading in data:\n config_ini.add_section(heading)\n for item in data[heading]:\n config_ini.set(heading, item, data[heading][item])\n config_ini.write(save_file)\n\n else:\n write_yaml(save_file, data)", "def SaveToFile(self):\n\n if len(self.paris) == 0:\n with open(self.fileLoc, \"w\") as file:\n file.write(\" \")\n return\n\n data = \"\"\n for x in self.paris.iterkeys():\n data += \"%\" + x + \":\" + self.paris[x] + \"%\"\n \n data = self.Encrypt(data)\n\n with open(self.fileLoc, \"w\") as file:\n file.write(data)", "def decrypt(\n in_file: str,\n out_file: str,\n pass_phrase: str,\n salt: str = DEFAULT_HS_SALT,\n digest_algo=hashes.SHA256(),\n length=32,\n iterations=100000,\n encoding: str = 'utf-8') -> None:\n kdf = PBKDF2HMAC(\n algorithm=digest_algo,\n length=length,\n salt=salt.encode(encoding),\n iterations=iterations,\n backend=default_backend()\n )\n key = base64.urlsafe_b64encode(kdf.derive(pass_phrase.encode(encoding)))\n f = Fernet(key)\n check_argument(os.path.exists(in_file), \"Input file \\\"{}\\\" does not exist\", in_file)\n with open(in_file) as f_in_file:\n with open(out_file, 'w') as f_out_file:\n f_out_file.write(f.decrypt(f_in_file.read().encode(encoding)).decode(encoding))\n check_state(os.path.exists(out_file), \"Unable to decrypt file \\\"{}\\\"\", in_file)", "def get_client_secret(filename):\n with open(filename) as file:\n json_file = json.load(file)\n\n cyphertext = json_file['CiphertextBlob']\n blob = base64.b64decode(cyphertext)\n client = boto3.client('kms')\n secret = client.decrypt(CiphertextBlob=blob)['Plaintext']\n s = secret.decode('ascii')\n return json.loads(s)", "def encrypt_and_store_file(path_to_original_file):\n\t\toriginal_file_name, _ = os.path.splitext(path_to_original_file)\n\t\toutput_string = EncryptDecrypt.ascii_string_to_hex(EncryptDecrypt.file_to_string(path_to_original_file))\n\t\twith open(original_file_name+\".enc\", \"w+\") as save_file:\n\t\t\tsave_file.write(output_string)\n\t\tos.remove(path_to_original_file)", "def decode_encrypted_wallet(password: str, path=\"wallet.dat\"):\n db = DB()\n db.open(path, \"main\", DB_BTREE, DB_THREAD | DB_RDONLY)\n data = defaultdict(list)\n\n # iterate database\n for k, v in db.items():\n key_name = k[1:1+k[0]].decode()\n if key_name == 'ckey':\n # encryptedKey: [(py, encrypted_sk), ]\n data[key_name].append((k[6:6+33], v[1:1+96]))\n elif key_name == 'mkey':\n # masterKey: encrypted_key, salt, DerivationIterations\n data[key_name] = [v[1:1+48], v[50:50+8], int.from_bytes(v[4+58:4+58+8], 'little')]\n elif key_name == 'key':\n # normalKey:\n raise Exception('this wallet is not encrypted!')\n db.close()\n\n # decrypt\n cp = Pycrypto.set_key_from_passphrase(\n password.encode(), data['mkey'][1], data['mkey'][2])\n mk = cp.decrypt(data['mkey'][0]) # import masterKey as key\n cp.set_key(mk)\n for pk, encrypted_sk in data['ckey']:\n cp.set_iv(double_hash(pk)) # import doubleHashed pk as IV\n sk = cp.decrypt(encrypted_sk)\n if sk2pk(sk) != pk:\n raise Exception('wrong password! {} {}'.format(sk2pk(sk).hex(), pk.hex()))\n ck = hashlib.new('ripemd160', hashlib.sha256(pk).digest()).digest()\n yield sk, pk, ck", "def decrypt(ctx, input, output):\n gpg_key = _get_gpg_key(_get_pem(ctx().source), ctx().user, ctx().verbose)\n _run_gpg_with_key(gpg_key, [\n '--decrypt', '--recipient',\n ctx().user, '--trust-model', 'always', '--armor'\n ], input, output, ctx().verbose)", "def main(file, key, encrypt, host, username, password):\n ssh = SSHClient()\n ssh.load_system_host_keys()\n if password != '-':\n ssh.connect(host, username=username, password=password)\n else:\n ssh.connect(host, username=username)\n \n scp = SCPClient(ssh.get_transport())\n\n if encrypt:\n print(\"Encrypting... \", end=\"\")\n to_send = encrypt_file(file, key)\n print(\"Done.\")\n print(\"Sending to {}...\".format(host), end=\"\")\n scp.put(to_send)\n print(\"Done.\")\n else:\n print(decrypt_file(file, key))", "def load_key():\n return open(\"Secret.key\",\"rb\").read()", "def ExportToJson(self, filepath):\n accountinfo = json.dumps({DataConnection.accountName : self._accountName , \n DataConnection.accountKey : self._accountKey, \n DataConnection.accountKind : self._accountKind})\n encryptedinfo = Secrets._encryptContents(accountinfo)\n filehandle = open(filepath, 'wb')\n filehandle.write(encryptedinfo)\n print(\"Account info has been stored to '%s'\" % filepath)\n return True", "def write_key():\n key = fernet.Fernet.generate_key()\n keyfile = open(KEY_PATH,'wb')\n keyfile.write(key)\n keyfile.close()", "def generate_key():\n key = Fernet.generate_key()\n with open(\"secret.key\", \"wb\") as key_file:\n key_file.write(key)", "def encrypt_file(filename, key):\n # init fermet\n f = Fernet(key)\n with open(filename, \"rb\") as file:\n # read all file data\n file_data = file.read()\n # encrypt data\n encrypted_data = f.encrypt(file_data)\n # delete file\n remove(filename)\n # generate new filename\n new_filename = generate_new_filename(filename, key, True)\n # write the encrypted file\n with open(new_filename, \"wb\") as file:\n print(\"Encrypted: \" + new_filename)\n file.write(encrypted_data)\n\n return new_filename", "def encryptFile(files, key, output = None):\n\tfrom os.path import commonprefix, split, normpath, join\n\tif isString(files):\n\t\tfiles = [files]\n\tcontent = tarList(files)\n\tcyphered = encrypt(content, key)\n\tif output == None:\n\t\toutput = join(split(normpath(commonprefix(files)))[0],getTimeString() + \".\" + ENCRYPTED_EXTENSION)\n\twith open(output, 'wb') as fo:\n\t\tfo.write(cyphered)\n\treturn output", "def decrypt(ciphertext: str, key: str) -> str:\n return encrypt(ciphertext, key)", "def test_encrypt_decrypt_file(tmp_path):\n encryption_material = SnowflakeFileEncryptionMaterial(\n query_stage_master_key=\"ztke8tIdVt1zmlQIZm0BMA==\",\n query_id=\"123873c7-3a66-40c4-ab89-e3722fbccce1\",\n smk_id=3112,\n )\n data = \"test data\"\n input_file = tmp_path / \"test_encrypt_decrypt_file\"\n encrypted_file = None\n decrypted_file = None\n try:\n with input_file.open(\"w\", encoding=UTF8) as fd:\n fd.write(data)\n\n (metadata, encrypted_file) = SnowflakeEncryptionUtil.encrypt_file(\n encryption_material, input_file\n )\n decrypted_file = SnowflakeEncryptionUtil.decrypt_file(\n metadata, encryption_material, encrypted_file\n )\n\n contents = \"\"\n with codecs.open(decrypted_file, \"r\", encoding=UTF8) as fd:\n for line in fd:\n contents += line\n assert data == contents, \"encrypted and decrypted contents\"\n finally:\n input_file.unlink()\n if encrypted_file:\n os.remove(encrypted_file)\n if decrypted_file:\n os.remove(decrypted_file)", "def test_encrypt_decrypt_file(tmp_path):\n encryption_material = SnowflakeFileEncryptionMaterial(\n query_stage_master_key=\"ztke8tIdVt1zmlQIZm0BMA==\",\n query_id=\"123873c7-3a66-40c4-ab89-e3722fbccce1\",\n smk_id=3112,\n )\n data = \"test data\"\n input_file = tmp_path / \"test_encrypt_decrypt_file\"\n encrypted_file = None\n decrypted_file = None\n try:\n with input_file.open(\"w\", encoding=UTF8) as fd:\n fd.write(data)\n\n (metadata, encrypted_file) = SnowflakeEncryptionUtil.encrypt_file(\n encryption_material, input_file\n )\n decrypted_file = SnowflakeEncryptionUtil.decrypt_file(\n metadata, encryption_material, encrypted_file\n )\n\n contents = \"\"\n with codecs.open(decrypted_file, \"r\", encoding=UTF8) as fd:\n for line in fd:\n contents += line\n assert data == contents, \"encrypted and decrypted contents\"\n finally:\n input_file.unlink()\n if encrypted_file:\n os.remove(encrypted_file)\n if decrypted_file:\n os.remove(decrypted_file)", "def decrypt_str(message):\n filename = f'/tmp/{get_temp_filename()}'\n filename_encrypted = f'{filename}.pem'\n filename_plain = f'{filename}.plain'\n pem_file = open(filename_encrypted, 'w')\n pem_file.write(message)\n pem_file.close()\n cmd = [\n \"openssl\",\n \"cms\",\n \"-decrypt\",\n \"-inform\", \"PEM\",\n \"-in\", filename_encrypted,\n \"-inkey\", server_key_files[\"key\"],\n \"-recip\", server_key_files[\"crt\"],\n \"-out\", filename_plain\n ]\n res_text = \"\"\n try:\n exec_cmd(cmd)\n with open(filename_plain, \"r\") as plain:\n res_text = plain.read()\n plain.close()\n os.unlink(filename_plain)\n except (OSError, subprocess.CalledProcessError) as err:\n logging.error(\"decrypt_str failed: %s\", err)\n finally:\n os.unlink(filename_encrypted)\n\n return res_text", "def decrypt_file(key=self.public, in_filename, out_filename=None, chunksize=24*1024):\n if not out_filename:\n out_filename = os.path.splitext(in_filename)[0]\n\n with open(in_filename, 'rb') as infile:\n origsize = struct.unpack('<Q', infile.read(struct.calcsize('Q')))[0]\n iv = infile.read(16)\n decryptor = AES.new(key, AES.MODE_CBC, iv)\n\n with open(out_filename, 'wb') as outfile:\n while True:\n chunk = infile.read(chunksize)\n if len(chunk) == 0:\n break\n outfile.write(decryptor.decrypt(chunk))\n\n outfile.truncate(origsize)", "def to_file(self, data, file, pubkey_id):\n must_close = False\n if isinstance(file, str):\n try:\n file = open(file, \"wb\")\n except PermissionError as e:\n raise GPG.EncryptionException(str(e))\n\n result = subprocess.run(\n [GPG.bin, \"--encrypt\", \"-r\", pubkey_id],\n input=data,\n stdout=file,\n stderr=subprocess.PIPE\n )\n if must_close:\n file.close()\n if result.returncode == 0:\n # It was successful\n return\n else:\n raise GPG.EncryptionException(result.stderr)", "def test_encrypt_decrypt(self):\n with open(self.file_path, \"rt\") as file:\n start_file = file.read()\n nonce1 = globals.generate_random_nonce()\n nonce2 = globals.generate_random_nonce()\n encrypted_file_path, additional_data = self.file_crypt.encrypt_file(\n self.file_path,\n nonce1,\n nonce2)\n file_decrypted = self.file_crypt.decrypt_file(\n file_path=encrypted_file_path,\n additional_data=additional_data)\n with open(file_decrypted, \"rt\") as file:\n end_file = file.read()\n self.assertEqual(start_file, end_file, \"Files differ!\")", "def compress_and_encrypt(files, password=None, pgp_key=''):\n if pgp_key:\n zipfile = _get_compressed_file(files)\n return _get_encrypted_file(zipfile, pgp_key)\n else:\n return _get_compressed_file(files, password)", "def write_cipher_text(self, data: bytes, out_file: BinaryIO, filename: str):\n\n cipher_text, tag, nonce, session_key = self.encrypt(data)\n session_key_file = f'{self.file_folder}/{self.user_id}_{filename}.bin'\n\n if not os.path.exists(session_key_file):\n with open(session_key_file, 'wb') as f:\n f.write(session_key)\n\n out_file.write(nonce)\n out_file.write(tag)\n out_file.write(cipher_text)", "def write_cipher_text(self, data: bytes, out_file: BinaryIO, filename: str):\n\n cipher_text, tag, nonce, session_key = self.encrypt(data)\n session_key_file = f'{self.file_folder}/{self.user_id}_{filename}.bin'\n\n if not os.path.exists(session_key_file):\n with open(session_key_file, 'wb') as f:\n f.write(session_key)\n\n out_file.write(nonce)\n out_file.write(tag)\n out_file.write(cipher_text)", "def _get_decryption_key(self, **options):\n\n return self._get_encryption_key(**options)", "def google_kms_encrypted_env_secret(secret_key: str) -> str:\n\n key_id = os.getenv('KEY_ID')\n ciphertext = os.getenv(secret_key)\n\n client = kms.KeyManagementServiceClient()\n # Call the API.\n decrypt_response = client.decrypt(\n request={'name': key_id, 'ciphertext': base64.b64decode(ciphertext)})\n return decrypt_response.plaintext.decode()", "def main():\n\n # performs crib dragging using initial values\n plaintext1, plaintext2 = crib_drag('', '', 0, 0)\n\n if plaintext1 is None or plaintext2 is None:\n print('No possible English decryption using the current dictionary')\n return\n\n # find the key and creates file with results\n plaintext1 = plaintext1[:CIPHER_LEN]\n plaintext2 = plaintext2[:CIPHER_LEN]\n key = find_key(plaintext1, plaintext2)\n\n with open('plaintext1.txt', 'w') as plain_file:\n plain_file.write(plaintext1)\n with open('plaintext2.txt', 'w') as plain_file:\n plain_file.write(plaintext2)\n with open('key.txt', 'wb') as plain_file:\n plain_file.write(key)", "def ReadFromFile(self):\n\n data = \"\"\n try:\n with open(self.fileLoc, \"r\") as file:\n data += file.read()\n except IOError:\n with open(self.fileLoc, \"w\") as file:\n file.write(\" \")\n return {}\n \n if len(data) == 0:\n return {}\n\n data = self.Decrypt(data)\n\n data = \"\".join(data.split())\n kvstrings = data.split(\"%\")\n kvstrings = filter(None, kvstrings)\n\n pairs = {}\n for x in kvstrings:\n kv = x.split(\":\")\n pairs[kv[0]] = kv[1]\n\n return pairs", "def _decrypt_pvtkey(self, pvtkey_file: str, passphrase: str) -> str:\n\n keydata: str = None\n if pvtkey_file:\n try:\n keydata = asyncssh.public_key.read_private_key(pvtkey_file,\n passphrase)\n except Exception as e:\n self.logger.error(\n f\"ERROR: Unable to read private key file {pvtkey_file}\"\n f\"for jump host due to {str(e)}\")\n\n return keydata", "def decrypt(self, cypher):\n\n cypher = b64decode(cypher)\n key_private = RsaPrivateKey.Read(self.crypt_private)\n return key_private.Decrypt(cypher)", "def __input_encrypted(self, filename):\n fields = self.input_file(filename)\n\n if (\"Description\" not in fields or \"Method\" not in fields or\n \"Data\" not in fields or \"IV\" not in fields or\n fields[\"Method\"] != \"AES\"):\n raise Exception(\"AES crypted file not formated correctly.\")\n\n data = fields[\"Data\"]\n iv = fields[\"IV\"]\n return binascii.unhexlify(iv) + base64.b64decode(data)", "def rsa_file_to_privatekey(filename):\r\n fileobject = file(filename,'r')\r\n privatekeystring = fileobject.read()\r\n fileobject.close()\r\n\r\n return rsa_string_to_privatekey(privatekeystring)", "def decrypt(key, ciphertext):\n data = fk(keyGen(key)[1], ip(ciphertext))\n return fp(fk(keyGen(key)[0], swapNibbles(data)))", "def decrypt_image(key, in_filename, out_filename):\n chunksize = 64 * 1024\n # out_filename = in_filename + \".dec\"\n\n with open(in_filename, 'rb') as infile:\n origsize = struct.unpack('<Q', infile.read(struct.calcsize('Q')))[0]\n iv = infile.read(16)\n decryptor = AES.new(key, AES.MODE_CBC, iv)\n\n with open(out_filename, 'wb') as outfile:\n while True:\n chunk = infile.read(chunksize)\n if len(chunk) == 0:\n break\n outfile.write(decryptor.decrypt(chunk))\n\n outfile.truncate(origsize)", "def testCryptFileRoundtrip(self):\n try:\n ok = True\n fp = self.__testFilePath\n cu = CryptUtils()\n ky = cu.newKey()\n self.__makeTestFile(fp)\n #\n hDOrg = cu.getFileHash(fp)\n fnEnc = fp + \".enc\"\n ok = cu.encryptFile(fp, fnEnc, ky)\n self.assertEqual(ok, True)\n fnRec = fnEnc + \".rec\"\n ok = cu.decryptFile(fnEnc, fnRec, ky)\n self.assertEqual(ok, True)\n hDRec = cu.getFileHash(fnRec)\n self.assertEqual(hDOrg[\"hashDigest\"], hDRec[\"hashDigest\"])\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def write_key(self):\n\t key = Fernet.generate_key()\n\t with open(\"key.key\", \"wb\") as key_file:\n\t key_file.write(key)", "def parse_secrets_file(self, path_to_file) -> dict:\n config = self.import_secrets_file(path_to_file)\n\n self.traverse_and_decrypt(config)\n\n return config", "def decrypt(self,password,indata):\n key = hashlib.sha256(password).digest()\n return decrypt_file(key,indata)", "def hack_text_file(self):\r\n\t\t#Ensures that the file has something that can be hacked.\r\n\t\tfile_contains_message = True\r\n\t\twhile file_contains_message:\r\n\t\t\tfile_exists = True\r\n\t\t\t#Checks to see if the file exists.\r\n\t\t\twhile file_exists:\r\n\t\t\t\tself.text_file_name = input(\"Please enter the name of the text file you wish to decrypt in format |file_name.txt|.--> \")\r\n\t\t\t\tif \".txt\" in self.text_file_name:\r\n\t\t\t\t\tfile_exists = Doc_Control().check_for_file(self.text_file_name)\r\n\t\t\t\telse: \r\n\t\t\t\t\tcontinue\r\n\t\t\t#Verifys file contains a message before before running through hack and giving user their list of decryption hits.\r\n\t\t\twhile True: \r\n\t\t\t\tself.message = Doc_Control().open_file(self.text_file_name)\r\n\t\t\t\tif self.message != \"\" and len(self.message) > 4:\r\n\t\t\t\t\tfile_contains_message = False\r\n\t\t\t\t\tbreak\r\n\t\t\t\telse:\r\n\t\t\t\t\tprint(\"Your file does not contain a hackable message.\")\r\n\t\t\t\t\tbreak\t\t\t\r\n\t\tmax_key = len(self.message)\r\n\t\tself.i = 1\r\n\t\tpotential_hits = []\r\n\t\t#Runs through all potential keys. \r\n\t\tfor self.i in range(1, max_key):\r\n\t\t\tprint(f\"Trying key #{self.i} of {max_key} possible keys\")\t\t\t\r\n\t\t\tself.my_code = Decryptor(self.message, self.i).transfer_decrypt()\r\n\t\t\tself.hack_plausible = False\r\n\t\t\tself.verify_hack_key()\r\n\t\t\tif self.hack_plausible:\r\n\t\t\t\tpotential_hits.append(f\"Key #{self.i} yeilded {self.percent_english}% english words after decryption.\\n\" + \"\\t\" + self.my_code[:50])\r\n\t\tprint(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n\")\r\n\t\tprint(\"Hacking results:\\n\")\r\n\t\tfor hit in potential_hits:\r\n\t\t\tprint(\"\\t\" + hit + \"|\\n\")", "def generate_keyfile(csrf_key, session_key):\n output = file_template.safe_substitute(dict(\n csrf_key=csrf_key, session_key=session_key\n ))\n if os.path.exists(file_name):\n if options.force is None:\n print \"Warning: secret_keys.py file exists. Use '-f' flag to force overwrite.\"\n else:\n write_file(output)\n else:\n write_file(output)", "def decrypt_csv(in_path, password, key_length=32):\n with open(in_path, 'rb') as in_file:\n password = str.encode(password)\n block_size = AES.block_size\n salt = in_file.read(block_size)#[len(b'Salted__'):]\n key, i_v = derive_key_and_iv(password, salt, key_length, block_size)\n cipher = AES.new(key, AES.MODE_CBC, i_v)\n next_chunk = b''\n finished = False\n csv_bytes = b''\n while not finished:\n chunk = next_chunk\n next_chunk = cipher.decrypt(in_file.read(1024 * block_size))\n if len(next_chunk) == 0:\n padding_length = chunk[-1]\n if padding_length < 1 or padding_length > block_size:\n raise ValueError(\"Password incorrect\")\n chunk = chunk[:-padding_length]\n finished = True\n csv_bytes += chunk\n try:\n csv_list = bytes_to_csv(csv_bytes)\n except:\n raise ValueError(\"Password incorrect\")\n return csv_list", "def generate_key(self):\n self.key = Fernet.generate_key()\n with open(\"secret.key\", \"wb\") as key_file:\n key_file.write(self.key)", "def decrypt(self, cryptod, secret):\n try:\n # From json to python crypto dict\n data = base64.b64decode(\n bytes(cryptod['ciphervalue'], encoding=self.encoding))\n # Decrypt\n iv = base64.b64decode(bytes(cryptod['iv'], encoding=self.encoding))\n algorithm = self._algorithm(\n secret=secret, name=cryptod['algorithm'])\n cipher = Cipher(algorithm, modes.CBC(iv), backend=self.backend)\n decryptor = cipher.decryptor()\n data = decryptor.update(data) + decryptor.finalize()\n # Unpad\n unpadder = padding.PKCS7(cipher.algorithm.block_size).unpadder()\n data = unpadder.update(data) + unpadder.finalize()\n # Unzip\n data = str(gzip.decompress(data), encoding=self.encoding)\n cipher = None\n # json string\n except ValueError as ve:\n raise ValueError('Decrypt failure!') from ve\n try:\n data = json.loads(data)\n except ValueError as ve:\n raise ValueError('JSON formatting failure!') from ve\n return data", "def decrypt_file(self, in_filename, out_filename=None, chunksize=24*1024):\n\t\tif not out_filename:\n\t\t\t#file_name = in_filename.split('/')[-1]\n\t\t\tout_filename = os.path.splitext(in_filename)[0]\n\n\t\twith open(in_filename, 'rb') as infile:\n\t\t\torigsize = struct.unpack('<Q', infile.read(struct.calcsize('Q')))[0]\n\t\t\tiv = infile.read(16)\n\t\t\tdecryptor = AES.new(self.key, AES.MODE_CBC, iv)\n\n\t\t\twith open(out_filename, 'wb') as outfile:\n\t\t\t\twhile True:\n\t\t\t\t\tchunk = infile.read(chunksize)\n\t\t\t\t\tif len(chunk) == 0:\n\t\t\t\t\t\tbreak\n\t\t\t\t\t#print \"chunks\", decryptor.decrypt(chunk)\n\t\t\t\t\toutfile.write(decryptor.decrypt(chunk))\n\t\t\t\toutfile.truncate(origsize)\n\n\t\t\treturn out_filename\n\t\t#f = open(out_filename, 'r')\n\t\t#print f.read()", "def decryptor(infile: str, outfile: str, password: str, mode: str) -> int:\n\n dec = Decrypt(infile)\n\n if mode.upper() == 'AES':\n decrypted_data = dec.AES(password)\n elif mode.upper() == 'DES':\n decrypted_data = dec.DES(password)\n elif mode.upper() == 'SALSA20':\n decrypted_data = dec.Salsa20(password)\n else:\n return 2\n\n if not decrypted_data:\n cleanup(outfile)\n return 3\n\n if not outfile.endswith(dec.extension):\n outfile += dec.extension\n write_data(decrypted_data, outfile)\n return 0" ]
[ "0.64325106", "0.63213474", "0.622638", "0.61633515", "0.61598235", "0.6144704", "0.608841", "0.60840195", "0.6062278", "0.5996128", "0.5985062", "0.59499764", "0.59409845", "0.59369785", "0.5880555", "0.5879515", "0.5877775", "0.58735037", "0.5852493", "0.58381975", "0.58349544", "0.5832787", "0.58187425", "0.5814456", "0.58140635", "0.58129454", "0.5790948", "0.5751469", "0.5715876", "0.5712323", "0.57103556", "0.5706531", "0.56945425", "0.5679782", "0.5673808", "0.56637216", "0.56634766", "0.5651403", "0.56448495", "0.56246454", "0.5600333", "0.5598589", "0.55969286", "0.55812955", "0.55747354", "0.554528", "0.55398905", "0.5529448", "0.5513171", "0.55084646", "0.5486902", "0.54713213", "0.5466501", "0.5464208", "0.545665", "0.54353803", "0.5415958", "0.5383902", "0.53752184", "0.53697455", "0.53608555", "0.53594494", "0.53570336", "0.533442", "0.53307307", "0.53014594", "0.5297996", "0.5294497", "0.52938205", "0.5289477", "0.5263846", "0.5263846", "0.52559125", "0.5254674", "0.5250981", "0.5230822", "0.5216849", "0.52125317", "0.52125317", "0.52111965", "0.5209466", "0.5199281", "0.51929116", "0.51883894", "0.5180487", "0.5173309", "0.51725894", "0.5170496", "0.5159254", "0.5159099", "0.5155619", "0.51486224", "0.5147568", "0.5143199", "0.51382947", "0.5137792", "0.51370823", "0.5134478", "0.51333684", "0.5128722" ]
0.801987
0
Downloads key for configured service account and stores it in the folder generatedkey/
def download_key_from_blob(self): source_blob_name = "generated-keys/{}".format(self.service_account_email) destination_name = self.service_account_email # generate destination folder and file if they do not yet exist Path("downloaded-key/").mkdir(parents=True, exist_ok=True) # creates folder if not exists folder = Path("downloaded-key/") # folder where all the newly generated keys go destination_file_name = folder / "{}".format(destination_name) # file named after service-account name destination_file_name.touch(exist_ok=True) # download the file and store it locally storage_client = storage.Client() bucket = storage_client.get_bucket(self.bucket_name) blob = bucket.blob(source_blob_name) blob.download_to_filename(destination_file_name) # prints source and destination indicating successful download print('Encrypted key downloaded to -----> \n {}.'.format( source_blob_name, destination_file_name)) return destination_file_name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_key():\n data = check_args(('cloudProvider', ))\n provider = jobs.init_provider(data, True)\n key = encrypt_key(provider.get_key(), data['username'])\n return make_response(keyName=provider.keyname, key=key)", "def generate_key():\r\n # generating key\r\n key = Fernet.generate_key()\r\n\r\n key_dir = os.path.join(os.path.dirname(__file__), \"resources/key\")\r\n\r\n # writing key in file\r\n with open(key_dir, \"wb\") as keyFile:\r\n keyFile.write(key)", "def generate_key():\n key = Fernet.generate_key()\n with open(\"Secret.key\",\"wb\")as key_file:\n key_file.write(key)", "def _keypath(self) -> pathlib.Path:\n home = pathlib.Path.home()\n keyfile = home / \".cmdc\" / \"apikey\"\n keyfile.parent.mkdir(parents=True, exist_ok=True)\n return keyfile", "def generate_key(self):\n self.key = Fernet.generate_key()\n with open(\"secret.key\", \"wb\") as key_file:\n key_file.write(self.key)", "def generate_key():\n key = Fernet.generate_key()\n with open(\"pass.key\", \"wb\") as key_file:\n key_file.write(key)", "def setup_keys():\n if os.path.isfile(\"key.txt\"):\n message = \"Key already generated\"\n else:\n secret = secrets.token_urlsafe(64)\n message = \"Secret generated and saved in key.txt\"\n with open(\"key.txt\", \"w\") as fd:\n fd.write(secret)\n return json.dumps({'message': message})", "def util_generate_key(conf_file=None):\n keyname = DebRepo(**config(conf_file=conf_file)).generate_key()\n print(keyname)", "def generate_key():\n key = Fernet.generate_key()\n with open(\"secret.key\", \"wb\") as key_file:\n key_file.write(key)", "def load_key():\n return open(\"Secret.key\",\"rb\").read()", "def private_key():\n return \"Toholampi summer festival 2017 has the most harcore rock bands\"", "def apikey(serv):\n path = os.path.join(os.path.abspath(os.path.dirname(__file__)),\n '{0}.key'.format(serv))\n key = open(path, \"r\").read().rstrip()\n return key", "def generate_key(domain_name):\n key = rsa.generate_private_key(\n public_exponent=65537,\n key_size=2048,\n backend=default_backend()\n )\n\n #storing client's private key\n with open(domain_name + \".key\", \"wb\") as f:\n f.write(key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption(),\n ))\n\n return key", "def CreateKeyFile():\n keyfile = tempfile.mkstemp()[1]\n cmd = [\n 'openssl',\n 'genrsa',\n '-out', keyfile,\n '2048'\n ]\n _RunCommand(cmd)\n return keyfile", "def create_key ():", "def private_key(self):", "def gen_tlsauth_key():\n cmd = ['/usr/sbin/openvpn', '--genkey', 'secret', 'ta.tmp']\n ret = subprocess.check_call(cmd)\n with open('ta.tmp') as key:\n key = key.read()\n os.remove('ta.tmp')\n return key", "def load_key():\n return open(\"secret.key\", \"rb\").read()", "def write_key(api_key, output_path, client_module=pyalveo):\n client = client_module.Client(api_key, API_URL, use_cache=False)\n outfile = open(output_path, 'w')\n outfile.write(api_key)\n outfile.close()", "def get_api_key ():\n PROJECT_PATH = os.path.abspath(os.path.dirname(__name__))\n key_file = open(PROJECT_PATH + \"/key_api.txt\", \"r\")\n return (key_file.read()).rstrip('\\n')", "def read_key():\n path = os.path.join(os.path.dirname(__file__), 'data')\n f = open(os.path.join(path, 'credential.txt'), 'r')\n key = f.read()\n f.close()\n return key", "def write_key():\n key = fernet.Fernet.generate_key()\n keyfile = open(KEY_PATH,'wb')\n keyfile.write(key)\n keyfile.close()", "def generate_key(self):\n key = rsa.generate_private_key(\n public_exponent=self.settings['key_public_exponent_size'],\n key_size=self.settings['key_size'],\n backend=default_backend()\n )\n return key", "def _set_keystore_path(self) -> None:\n response = self.single_call(\"hmy keys location\").strip()\n if not os.path.exists(response):\n os.mkdir(response)\n self.keystore_path = response", "def generate_secret_key(self, server_name: str) -> str:\n if self.config_in_use():\n raise BaseConfigInUseError()\n\n signing_key_path = join(self.config_dir, server_name + \".signing.key\")\n subprocess.run([\"generate_signing_key.py\", \"-o\", signing_key_path])\n with open(signing_key_path, \"r\") as f:\n return f.read()", "def _get_api_key():\n api_key_directory = os.getenv(\"KOKORO_GFILE_DIR\")\n api_key_file = os.path.join(api_key_directory, \"resultstore_api_key\")\n assert os.path.isfile(api_key_file), (\n \"Must add --api_key arg if not on \"\n \"Kokoro or Kokoro environment is not set up properly.\"\n )\n with open(api_key_file, \"r\") as f:\n return f.read().replace(\"\\n\", \"\")", "def get_access_key(self, keyfile):\n my_key = AccessKey.create_key_from_file(keyfile)\n my_key.store_keys()\n return my_key.key", "def download_data_key(self, name):\n temp_data_key = self._get_data_key(name)\n # File wasn't found on s3 so we return.\n if not temp_data_key:\n return\n\n output_file = \"/dev/shm/\" + name + \".tmp.key\"\n\n try:\n file = open(output_file, \"w\")\n except Exception as e:\n print \"[-] Error opening /dev/shm for writing.\"\n return\n\n file.write(temp_data_key)\n os.chmod(output_file, 0600)\n\n print \"[+] {0} data key saved to {1}\".format(name, output_file)", "def get_private_key(self):\n# _log.debug(\"get_private_key: node_name={}\".format(self.node_name))\n with open(os.path.join(self.runtime_dir, \"private\", \"private.key\"), 'rb') as f:\n return f.read()", "def getLocalKey(cmd, path):\n\n executeCmd(cmd)\n out = subprocess.Popen(\"cat\" + \" \" + path, shell=True,\n stdout=subprocess.PIPE)\n key = out.stdout.read().rstrip('\\n')\n logging.debug(\"Local key has been generated successfully : %s \", key)\n return key", "def get_skey_file(addresses_path, address_type, name):\n return get_address_key_file(addresses_path, address_type, 'signing_key', name)", "def adminGetUserKey(name):\n keys = hl.getUser(\"Name\",name)[\"Keys\"]\n #If on a production server, use actual path\n if os.path.isdir(keys_dir):\n filename = keys_dir + keys + '.ovpn' \n #if not os.path.exists(filename):\n # hl.zipUserKeys(keys) \n \n return send_file(filename, as_attachment=True)\n #Else use relative dev path\n else:\n return send_file('static\\\\Test_client1.zip', as_attachment=True)", "def create_keypair(address_type, addresses_path, address_prefix, name):\n vkey_file = get_vkey_file(addresses_path, address_prefix, name)\n skey_file = get_skey_file(addresses_path, address_prefix, name)\n\n if(path.exists(vkey_file)) :\n print(address_prefix, \"key pair already exists for\", name)\n return\n \n makedirs(path.dirname(vkey_file), mode=0o777, exist_ok=True)\n\n run_params = ['cardano-cli', address_type, 'key-gen', '--verification-key-file', vkey_file, '--signing-key-file', skey_file]\n subprocess_run(run_params, capture_output=False, text=True)\n return", "def intermediate():\n click.echo(STEP_PATH / \"secrets/intermediate_ca_key\")", "def generate_key():\n return get_token_generator().generate_token()", "def _get_key_path(self, key_name, serial):\n return '%s%s/%d_%s.key' % (self.ca_dir, PRIVATE_DIR_NAME, serial,\n key_name)", "def load_key():\n return open(\"pass.key\", \"rb\").read()", "def main(key_file: Optional[str]) -> None:\n # Generate a new 256-bit private key if no key is specified.\n if not key_file:\n customer_key_bytes = os.urandom(32)\n else:\n with open(key_file, \"rb\") as f:\n customer_key_bytes = f.read()\n\n google_public_key = get_google_public_cert_key()\n wrapped_rsa_key = wrap_rsa_key(google_public_key, customer_key_bytes)\n\n b64_key = base64.b64encode(customer_key_bytes).decode(\"utf-8\")\n\n print(f\"Base-64 encoded private key: {b64_key}\")\n print(f\"Wrapped RSA key: {wrapped_rsa_key.decode('utf-8')}\")", "def write_key(self):\n\t key = Fernet.generate_key()\n\t with open(\"key.key\", \"wb\") as key_file:\n\t key_file.write(key)", "def generate_key():\n key = crypto.Key.generate_key()\n click.echo('Private Key (len {}):: \\n{}'.format(\n len(key.get_privkey()),\n hexlify(key.get_privkey())))\n click.echo('Public Key (len {})::\\n{}'.format(\n len(key.get_pubkey()),\n hexlify(key.get_pubkey())))", "def _get_key_link(self, key_name):\n return '%s%s/%s.key' % (self.ca_dir, PRIVATE_DIR_NAME, key_name)", "def generate_key_image(\n self, output: OutputInfo, private_view_key: bytes, private_spend_key: bytes\n ) -> bytes:", "def build_key(key):\n return os.path.join(PREFIX, key)", "def upload_key():\n data = check_args(('cloudProvider', 'key'))\n provider = jobs.init_provider(data, True)\n key = decrypt_key(data['key'], data['username'])\n provider.save_key(key)\n return make_response()", "def get_service_acct_pem_file(args):\n # Now that we have the email\n with tempfile.NamedTemporaryFile() as ptwelve:\n with tempfile.NamedTemporaryFile() as pem:\n subprocess.check_call([\n 'gcloud', 'iam', 'service-accounts', 'keys', 'create',\n ptwelve.name,\n '--key-file-type=p12',\n '--project', args.project,\n '--iam-account', args.service_account,\n ])\n subprocess.check_call([\n 'openssl', 'pkcs12',\n '-in', ptwelve.name,\n '-out', pem.name,\n '-nodes',\n '-passin', 'pass:notasecret',\n ])\n yield pem.name", "def generate_key(self, **options):\n\n return security_utils_services.generate_rsa_key(**options)", "def reveal_seed():\n password = getpass.getpass('Password from keystore: ') # Prompt the user for a password of keystore file\n\n configuration = Configuration().load_configuration()\n api = get_api()\n\n try:\n wallet = api.get_private_key(configuration, password)\n click.echo('Account prv key: %s' % str(wallet.get_private_key().hex()))\n\n except InvalidPasswordException:\n click.echo('Incorrect password!')", "def setup_service_key():\n if get_var('AFS_AKIMPERSONATE'):\n keytab = get_var('KRB_AFS_KEYTAB')\n if keytab and not os.path.exists(keytab):\n cell = get_var('AFS_CELL')\n realm = get_var('KRB_REALM')\n enctype = get_var('KRB_AFS_ENCTYPE')\n _KeytabKeywords().create_service_keytab(keytab, cell, realm, enctype, akimpersonate=True)\n if get_var('AFS_KEY_FILE') == 'KeyFile':\n run_keyword(\"Create Key File\")\n elif get_var('AFS_KEY_FILE') == 'rxkad.keytab':\n run_keyword(\"Install rxkad-k5 Keytab\")\n elif get_var('AFS_KEY_FILE') == 'KeyFileExt':\n run_keyword(\"Create Extended Key File\", get_var('KRB_AFS_ENCTYPE'))\n else:\n raise AssertionError(\"Unsupported AFS_KEY_FILE! %s\" % (get_var('AFS_KEY_FILE')))", "def PRIVATE_RSA_KEYFILE_PATH() :\n return os.path.join( config.CONFIG_PATH(), \"%s-private.pem\" % RSA_KEYPAIR_PREFIX() )", "def get_key_data_filepath():\n global key_filepath, directory\n filename = 'key.csv'\n key_filepath = os.path.join(directory, filename)", "def generate_key():\n return get_random_bytes(KEY_SIZE)", "def test_create_digital_access_key(self):\n pass", "def save(self, save_dir):\n path = os.path.join(save_dir, self.name + \".pem\")\n with open(path, \"wb\") as f:\n f.write(self.public_key)", "def create_account_key(configuration):\n # Checking if the specified key file already exists\n if os.path.exists(configuration.cm_key):\n raise KeyAlreadyExistsError(\"Key file already exists\")\n else:\n logger.debug(\"The key file does not exist. All good.\")\n\n logger.info(\"Generating private key\")\n private_key = rsa.generate_private_key(\n public_exponent=65537,\n key_size=4096,\n backend=default_backend()\n )\n\n logging.info('Saving private key to: %s', configuration.cm_key)\n\n # Saving private key to file - we must be careful with the permissions\n with os.fdopen(os.open(configuration.cm_key, os.O_WRONLY | os.O_CREAT, 0o440), 'w') as key_file:\n key_file.write(private_key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption()\n ))", "def _GetCredentials():\n return service_account.Credentials.from_service_account_file(\n KEY_FILE, scopes=_SCOPES)", "def load_key():", "def _get_path_to_key_file():\n\n if 'private_key_path' not in ctx.node.properties:\n raise NonRecoverableError(\n 'Unable to get key file path, private_key_path not set.')\n\n return os.path.expanduser(ctx.node.properties['private_key_path'])", "def generate_access_key(self):\n\t\tfrom app import app\n\t\ts = JSONWebSignatureSerializer(app.config['SECRET_KEY'])\n\t\taccess_key = s.dumps({'username': self.username}) \n\t\tself.access_key = access_key", "def get(self, key_name: str, password: str = None) -> PrivateKey:\n pass", "def get_vkey_file(addresses_path, address_type, name):\n return get_address_key_file(addresses_path, address_type, 'verification_key', name)", "def load_key(self):\n\t return open(\"key.key\", \"rb\").read()", "def root():\n click.echo(STEP_PATH / \"secrets/root_ca_key\")", "def gen_key(app):\n\tos.system('lxc-attach -n %s -- ssh-keygen -t rsa -N \"\" -f key' % app)", "def install_secret_key(app, filename='secret_key'):\n filename = os.path.join(app.instance_path, filename)\n\n try:\n app.config['SECRET_KEY'] = open(filename, 'rb').read()\n except IOError:\n print('Error: No secret key. Create it with:')\n full_path = os.path.dirname(filename)\n if not os.path.isdir(full_path):\n print('mkdir -p {filename}'.format(filename=full_path))\n print('head -c 24 /dev/urandom > {filename}'.format(filename=filename))\n sys.exit(1)", "def generate_input_key(\n self, output: OutputInfo, private_view_key: bytes, private_spend_key: bytes\n ) -> bytes:", "def create_privatekey():\n \n # Generate the private key\n key_jwk = wallet.create_JWK()\n response_jwk = key_jwk.export(private_key=True, as_dict=True)\n\n return response_jwk", "def _save_keys(self) -> None:\n algorithm = self.algorithm_combobox.currentText()\n filename = AesKeyGenerator(algorithm).save_session_key()\n msg_success(f\"Created keys as {filename}\")", "def get_synapse_signing_key(self):\n if not path.exists(self.synapse_signing_key_file):\n key_id = \"a_\" + self.random_string(4)\n key_content = generate_signing_key(key_id)\n with open(self.synapse_signing_key_file, \"w+\") as key_file:\n write_signing_keys(key_file, (key_content,))\n return self.synapse_signing_key_file", "def create_private_key_temp_file(cls, file_suffix):\n tmp_file = tempfile.NamedTemporaryFile(mode='w+b', suffix=file_suffix)\n f = open(tmp_file.name, \"w+\")\n f.write(DSConfig.private_key())\n f.close()\n return tmp_file", "def public_key(self):\n keyfile = self._get_field('System', 'keyfile')\n return join(self.key_path, keyfile)", "def genKey(self, otherKey):\n self.sharedSecret = self.genSecret(self.privateKey, otherKey)\n #print(\"Shared secret:\")\n #print(self.sharedSecret)\n s = hashlib.sha256()\n s.update(bytes(str(self.sharedSecret).encode()))\n self.key = s.digest()", "def getKey(filename):\n try:\n fh = open(filename, 'rb')\n except IOError:\n logging.debug(\"getKey(): Creating new secret key.\")\n key = OpenSSL.rand.bytes(32)\n writeKeyToFile(key, filename)\n else:\n logging.debug(\"getKey(): Secret key file found. Loading...\")\n key = fh.read()\n fh.close()\n return key", "def private_key_path(self):\n if self._private_key_path is not None:\n return self._private_key_path\n\n location = self.settings.Location\n if location.AttachmentName:\n self._private_key_path = 'kdbx-attachment:///{}/{}'.format(\n self.entry.path, location.AttachmentName.text)\n return self._private_key_path\n else:\n self._private_key_path = location.FileName.text\n return self._private_key_path", "def keys(self) -> None:\r\n path = Path('./config/key')\r\n global key\r\n # If the file path does not exist, create one \r\n if not path.exists():\r\n os.makedirs(path)\r\n while True:\r\n # read key.key file\r\n try:\r\n file = open(path / 'key.key', 'rb')\r\n key = file.read()\r\n file.close\r\n # when key.key file does not exist. Create one\r\n except FileNotFoundError:\r\n key = Fernet.generate_key()\r\n file = open(path / 'key.key', 'wb')\r\n file.write(key)\r\n file.close()\r\n continue\r\n break", "def create_user_key_file(username: str):\n\n user: User = UserModel().get_user(username=username)\n user_key: Key = user.public_key\n\n public_key: bytes = user_key.public_key\n\n if not os.path.exists(\"./ssh_ca\"):\n os.mkdir(\"./ssh_ca\")\n\n with open(f\"./ssh_ca/{username}.pub\") as public_key_file:\n public_key_file.write(public_key.decode())", "def load_device_key(self, filename):\n pass", "def gen_keys_old(name):\n d = 'keys'\n if not os.path.isdir(d):\n os.mkdir(d)\n if not os.path.isfile('%s/%s.pem'%(d,name)):\n open('%s/%s.pem'%(d,name),'w').write(Crypto.PublicKey.RSA.generate(1024,os.urandom).exportKey('PEM'))", "def gen_api_key():\r\n m = hashlib.sha256()\r\n m.update(get_random_word(12))\r\n return unicode(m.hexdigest()[:12])", "def create_apikey(self, username, api_key):\r\n return 'ApiKey %s:%s' % (username, api_key)", "def generate_key(self):\n cmd = self.generate_key_cmd()\n self.show(cmd)\n if self.dryrun:\n return None\n s, _, _ = self.as_user(cmd)\n assert s == 0, ('failed to generate key', cmd)\n keyname = self.extract_key_name()\n return keyname", "def task_cache_vm_key():\n return {'actions': [(common.rm_rf, [ssh_key_path,\n authorized_keys_path]),\n (common.mkdir_p, [ssh_key_path.parent,\n authorized_keys_path.parent]),\n f'ssh-keygen -q -N \"\" -m PEM -C hat -f {ssh_key_path}',\n f'mv {ssh_key_path}.pub {authorized_keys_path}'],\n 'uptodate': [ssh_key_path.exists,\n authorized_keys_path.exists]}", "def key_path(self):\n keypath = self._get_field('System', 'keypath')\n localpath = \"/\".join(__file__.split('/')[:-1])\n return join(localpath, keypath)", "def _create_fernet_key(self) -> str:\n\n client = boto3.client(\"ssm\", endpoint_url=os.environ.get(\"AWS_ENDPOINT\"))\n\n try:\n response = client.get_parameter(Name=self.object_name, WithDecryption=True)\n return response[\"Parameter\"][\"Value\"]\n except client.exceptions.ParameterNotFound:\n return Fernet.generate_key().decode()", "def public_key(self):", "def gen_key():\n key = os.urandom(32) # 256 bit\n return base64.b64encode(key).rstrip('=') # strip off padding", "def temp_api_key(cloud):\n payload = {'name': 'pelion_e2e_dynamic_api_key'}\n r = cloud.account.create_api_key(payload, expected_status_code=201)\n resp = r.json()\n\n log.info('Created new developer api key for test case, id: {}'.format(resp['id']))\n\n yield resp\n\n log.info('Cleaning out the generated test case developer api key, id: {}'.format(resp['id']))\n cloud.account.delete_api_key(resp['id'], expected_status_code=204)", "def load_key():\r\n\r\n key_dir = os.path.join(os.path.dirname(__file__), \"resources/key\")\r\n\r\n try:\r\n return open(key_dir, \"rb\").read()\r\n except:\r\n return None", "def private_key(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"private_key\")", "def private_key(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"private_key\")", "def generate_private_key():\n\treturn binascii.hexlify(os.urandom(32)).decode('utf-8').upper()", "def get_or_create_key_name(self, gen_key=True):\n keyname = self.extract_key_name()\n if keyname:\n self.report(f'found keyname: {keyname}')\n elif gen_key:\n keyname = self.generate_key()\n self.report(f'generated key: {keyname}')\n else:\n print(f'gpg key for debrepo was not found for user {self.user}. '\n 'please use $0 generate_key, then try this command again')\n self.report('no keyname')\n keyname = None\n return keyname", "def generateKeyFile(n: int, e: int, typ: str, filename: str):\n print(\"Generating\", typ, \"key\")\n message = str(hex(n) + '\\n' + hex(e))\n message_bytes = message.encode('ascii')\n base64_bytes = base64.b64encode(message_bytes)\n \n key = str(base64_bytes.decode(\"ascii\")) # we decode to remove the wierd characters\n \n if typ == \"private\" :\n f = open(filename + \".priv\", \"w\")\n f.write(\"---begin \" + filename + \" private key---\\n\")\n f.write(key+'\\n')\n f.write(\"---end \" + filename + \" key---\")\n f.close()\n elif typ == \"public\" :\n f = open(filename + \".pub\", \"w\")\n f.write(\"---begin \" + filename + \" public key---\\n\")\n f.write(key+'\\n')\n f.write(\"---end \" + filename + \" key---\")\n f.close()\n else :\n print(\"wrong type\")\n return", "async def apikey_bing(self, ctx, key):\n settings = loadauth()\n settings['apikey'] = key\n saveauth(settings)\n return await self.bot.say(\"Bing API key saved.\")", "def load_or_create_client_key(key_file):\n # this is based on txacme.endpoint.load_or_create_client_key, but doesn't\n # hardcode the 'client.key' filename\n acme_key_file = FilePath(key_file)\n if acme_key_file.exists():\n logger.info(\"Loading ACME account key from '%s'\", acme_key_file)\n key = serialization.load_pem_private_key(\n acme_key_file.getContent(), password=None, backend=default_backend()\n )\n else:\n logger.info(\"Saving new ACME account key to '%s'\", acme_key_file)\n key = generate_private_key(\"rsa\")\n acme_key_file.setContent(\n key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption(),\n )\n )\n return JWKRSA(key=key)", "def _generateSecretKey():\n return f\"secret.{str(datetime.now())}\"", "def newKeyGenerate():\n generate()\n return '', 204", "def randomKeyFile(file_name):\n\twith open(file_name, \"w\") as kfile:\n\t\tkey = stringGen(256)\n\t\tkfile.write(key)\n\t\tkfile.close()", "def generate_key(self):\n\n self.key = Fernet.generate_key()\n self.cryptor = Fernet(self.key)", "def getRemoteKey(cmd, path, ip, user, passwd):\n\n sshToOtherClient(ip, user, passwd, cmd)\n showKeyCmd = 'cat %s' % (path)\n remote_key = sshToOtherClient(ip, user, passwd, showKeyCmd)\n logging.debug(\"Remote key for %s has been generated successfully : %s\",\n ip, remote_key)\n return remote_key", "def get_or_create_dmcrypt_key(\n _uuid,\n key_dir,\n ):\n path = os.path.join(key_dir, _uuid)\n\n # already have it?\n if os.path.exists(path):\n return path\n\n # make a new key\n try:\n if not os.path.exists(key_dir):\n os.makedirs(key_dir, stat.S_IRUSR|stat.S_IWUSR|stat.S_IXUSR)\n with file('/dev/urandom', 'rb') as i:\n key = i.read(256)\n fd = os.open(path, os.O_WRONLY|os.O_CREAT,\n stat.S_IRUSR|stat.S_IWUSR)\n assert os.write(fd, key) == len(key)\n os.close(fd)\n return path\n except:\n raise Error('unable to read or create dm-crypt key', path)" ]
[ "0.7312609", "0.6830154", "0.6603424", "0.6562619", "0.6516955", "0.6498773", "0.6481392", "0.64763385", "0.64518285", "0.6347369", "0.6296295", "0.6269412", "0.62465113", "0.6237751", "0.61916953", "0.61603403", "0.6143181", "0.6117199", "0.61056596", "0.6093765", "0.6090465", "0.6089385", "0.60882056", "0.6084267", "0.60782576", "0.60771036", "0.6071095", "0.60572505", "0.6015782", "0.599508", "0.5978808", "0.59436375", "0.5940787", "0.5939642", "0.59052587", "0.589871", "0.5894338", "0.5865731", "0.5845822", "0.584547", "0.5841524", "0.5826917", "0.58193237", "0.5804642", "0.5791712", "0.5786319", "0.5784429", "0.5783573", "0.57745504", "0.5762617", "0.5760442", "0.5750464", "0.5746498", "0.5734234", "0.5731181", "0.5730284", "0.57297444", "0.5728288", "0.5705765", "0.5686954", "0.5683211", "0.56771183", "0.56768644", "0.5676562", "0.56760675", "0.5667979", "0.5663855", "0.5662629", "0.5662368", "0.56588894", "0.5656575", "0.56554043", "0.5630237", "0.56245536", "0.5612576", "0.5605049", "0.5597093", "0.55943394", "0.5571422", "0.5569988", "0.5561786", "0.5554072", "0.55473965", "0.5546681", "0.554592", "0.5533174", "0.55177224", "0.55104184", "0.55104184", "0.55070084", "0.5494401", "0.5484943", "0.5479097", "0.5476925", "0.54760766", "0.5473449", "0.5473177", "0.5473014", "0.547207", "0.54678506" ]
0.803482
0
main method that executes the workings of the script
def main(): print("Reading from config.json") download_decrypt_store = DownloadDecryptStore() print("Downloading key from storage-bucket") file_path = download_decrypt_store.download_key_from_blob() print("Decrypting downloaded file") download_decrypt_store.decrypt_from_file(file_path) print("Completed")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_script(self) -> None:\n main()", "def run():\n main()", "def main():\n run_program()", "def run_script(self):\n pass", "def main():\n pass", "def main():\n return", "def main():\n\n BASIC.run(PROGRAM)", "def main(self) -> None:\n pass", "def main():\n #print( \"main() entered...\" )\n initialize()\n getCmdOptions()\n printVersionInfo()\n\n rc = doWork()\n\n cleanUp()\n sys.exit(rc)", "def main():\n print(\"It works!!! ;-)\")\n ###TODO### do something with the various methods/functions of this file", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main(self):\r\n pass", "def main():\n\tpass", "def script(self):", "def main() -> None:", "def main() -> None:", "def main() -> None:", "def main() -> None:", "def main():\n print(\"is Running!\")", "def main(): \n # Parse Arguments\n args = parse_arguments()\n\n # Print outdir\n print(\"Writing output to \" + args.outdir)\n\n # Print start statement\n print('Starting script for ' + args.file + ' at ' + str(datetime.datetime.now()), flush=True)\n\n # Put all the files in a function that will further handle the files as dataframe\n create_df(args.file, args.outdir)\n\n # Script is finished\n print('All done for ' + args.file + ' at ' + str(datetime.datetime.now()), flush=True)", "def main():\n tester = Tester()\n # parse args, load configuration and create all required objects.\n tester.setup_experiment()\n # GO!\n tester.run_experiment()", "def main(self):", "def main():\n tng.api.runner()", "def main() -> None:\n return", "def main():\n\n pass", "def main():\n Main()", "def main() -> None:\n worker = Worker()\n worker.do_work()", "def main():\n args = parse_args()\n process_args(args)", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n challenge = Challenge()\n # Get the number of saved files on last execution\n last_saved = challenge.get_last_execution()\n # Get the total of products to save\n total_objects = len(challenge.get_products(\"product_groups.json\"))\n\n # While there are products to be saved\n while last_saved < total_objects:\n create_products()\n # Updates last_saved number\n last_saved = challenge.get_last_execution()\n\n logging.info(\"[INFO] Execution done with no errors!\")\n # Sends to runner a signal different from the crash signal\n # Indicates terminated execution\n os._exit(1)", "def main(*args):\r\n print(START_MESSAGE)\r\n print(\"Script Location:\", location)\r\n print(\"Arguments Passed:\", args)", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n print(\"clewsy CLEWs Model Building Script.\")\n print(\"When using clewsy please reference:\")\n print(\"T. Niet and A. Shivakumar (2020): clewsy: Script for building CLEWs models.\")\n main(sys.argv[1:])", "def main():\n ...", "def main():\r\n# Checking if argument was provided\r\n if len(sys.argv) <=1:\r\n print_usage()\r\n sys.exit(1)\r\n \r\n for arg in sys.argv:\r\n # Checking if help was called\r\n if arg == \"-h\" or arg == \"--help\":\r\n print_usage()\r\n sys.exit(1)\r\n \r\n # Checking for verbose mode \r\n if arg == \"-v\" or arg == \"--verbose\":\r\n global verbose_flag\r\n verbose_flag=1\r\n\r\n # Checking for input file\r\n if arg == \"-f\" or arg == \"--file\":\r\n global default_input_path\r\n global default_output_path\r\n default_input_path = sys.argv[2]\r\n default_output_path=default_input_path[:-4] + \"_results.txt\"\r\n\r\n #if arg == \"-u\" or arg == \"--url\":\r\n # input_url = sys.argv[2]\r\n\t \r\n if os.name == \"nt\":\r\n os.system('cls')\r\n else:\r\n os.system('clear')\r\n \r\n process_from_file()", "def main():\n\tcli = Cli()\n\tcli.run()", "def main():\n\n # Chdir into script directory so to properly resolve relative paths in configuration\n os.chdir(os.path.dirname(os.path.realpath(__file__)) + \"/\")\n\n # Disable proxy as we access localhost, both to avoid overhead and issues with proxy misconfiguration\n os.environ['NO_PROXY'] = '*'\n\n # Stop any GraphDB server that we previously started and is possibly still around due to script interruption/crash\n shell(f\"{cmd_graphdb} stopall\")\n\n # Generate synthetic traces, both for populating the repositories and for the {sf, sp, pf, pp} tests\n prepare_traces()\n \n # Generate central repositories (if needed)\n for size, approach in itertools.product(sizes, approaches):\n prepare_repository(size, approach)\n \n # Run experiments (if needed)\n for size, approach in itertools.product(sizes, approaches):\n run_experiments(size, approach)", "def run_main():\n main(sys.argv)", "def main():\n opt = parse_opts()\n run(opt)", "def main():\n opt = parse_opts()\n run(opt)", "def main() -> None:\n\n download_data_tools.initial_message()\n\n # S&P 500 companies, initial year and time step\n stocks: List[str] = download_data_tools.get_stocks(['all'])\n dates: List[str] = ['1992-01', '2012-12']\n time_step: str = '1d'\n\n # Basic folders\n download_data_tools.start_folders()\n\n # Run analysis\n # Download data\n portfolio_download_data(stocks, dates, time_step)\n\n print('Ay vamos!!!')", "def Run():\r\n pass", "def main():\n sys.exit(RBExt().run(sys.argv[1:]))", "def main(self):\n self.jamf_url = self.env.get(\"JSS_URL\")\n self.jamf_user = self.env.get(\"API_USERNAME\")\n self.jamf_password = self.env.get(\"API_PASSWORD\")\n self.script_path = self.env.get(\"script_path\")\n self.script_name = self.env.get(\"script_name\")\n self.script_category = self.env.get(\"script_category\")\n self.script_priority = self.env.get(\"script_priority\")\n self.osrequirements = self.env.get(\"osrequirements\")\n self.script_info = self.env.get(\"script_info\")\n self.script_notes = self.env.get(\"script_notes\")\n self.script_parameter4 = self.env.get(\"script_parameter4\")\n self.script_parameter5 = self.env.get(\"script_parameter5\")\n self.script_parameter6 = self.env.get(\"script_parameter6\")\n self.script_parameter7 = self.env.get(\"script_parameter7\")\n self.script_parameter8 = self.env.get(\"script_parameter8\")\n self.script_parameter9 = self.env.get(\"script_parameter9\")\n self.script_parameter10 = self.env.get(\"script_parameter10\")\n self.script_parameter11 = self.env.get(\"script_parameter11\")\n self.replace = self.env.get(\"replace_script\")\n self.sleep = self.env.get(\"sleep\")\n # handle setting replace in overrides\n if not self.replace or self.replace == \"False\":\n self.replace = False\n\n # clear any pre-existing summary result\n if \"jamfscriptuploader_summary_result\" in self.env:\n del self.env[\"jamfscriptuploader_summary_result\"]\n script_uploaded = False\n\n # obtain the relevant credentials\n token = self.handle_uapi_auth(self.jamf_url, self.jamf_user, self.jamf_password)\n\n # get the id for a category if supplied\n if self.script_category:\n self.output(\"Checking categories for {}\".format(self.script_category))\n\n # check for existing category - requires obj_name\n obj_type = \"category\"\n obj_name = self.script_category\n category_id = self.get_uapi_obj_id_from_name(\n self.jamf_url,\n obj_type,\n obj_name,\n token,\n )\n\n if not category_id:\n self.output(\"WARNING: Category not found!\")\n category_id = \"-1\"\n else:\n self.output(\n \"Category {} found: ID={}\".format(self.script_category, category_id)\n )\n else:\n self.script_category = \"\"\n category_id = \"-1\"\n\n # handle files with a relative path\n if not self.script_path.startswith(\"/\"):\n found_template = self.get_path_to_file(self.script_path)\n if found_template:\n self.script_path = found_template\n else:\n raise ProcessorError(f\"ERROR: Script file {self.script_path} not found\")\n\n # now start the process of uploading the object\n if not self.script_name:\n self.script_name = os.path.basename(self.script_path)\n\n # check for existing script\n self.output(\n \"Checking for existing '{}' on {}\".format(self.script_name, self.jamf_url)\n )\n self.output(\n \"Full path: {}\".format(self.script_path),\n verbose_level=2,\n )\n obj_type = \"script\"\n obj_name = self.script_name\n obj_id = self.get_uapi_obj_id_from_name(\n self.jamf_url,\n obj_type,\n obj_name,\n token,\n )\n\n if obj_id:\n self.output(\n \"Script '{}' already exists: ID {}\".format(self.script_name, obj_id)\n )\n if self.replace:\n self.output(\n \"Replacing existing script as 'replace_script' is set to {}\".format(\n self.replace\n ),\n verbose_level=1,\n )\n else:\n self.output(\n \"Not replacing existing script. Use replace_script='True' to enforce.\",\n verbose_level=1,\n )\n return\n\n # post the script\n self.upload_script(\n self.jamf_url,\n self.script_name,\n self.script_path,\n category_id,\n self.script_category,\n self.script_info,\n self.script_notes,\n self.script_priority,\n self.script_parameter4,\n self.script_parameter5,\n self.script_parameter6,\n self.script_parameter7,\n self.script_parameter8,\n self.script_parameter9,\n self.script_parameter10,\n self.script_parameter11,\n self.osrequirements,\n token,\n obj_id,\n )\n script_uploaded = True\n\n # output the summary\n self.env[\"script_name\"] = self.script_name\n self.env[\"script_uploaded\"] = script_uploaded\n if script_uploaded:\n self.env[\"jamfscriptuploader_summary_result\"] = {\n \"summary_text\": \"The following scripts were created or updated in Jamf Pro:\",\n \"report_fields\": [\n \"script\",\n \"path\",\n \"category\",\n \"priority\",\n \"os_req\",\n \"info\",\n \"notes\",\n \"P4\",\n \"P5\",\n \"P6\",\n \"P7\",\n \"P8\",\n \"P9\",\n \"P10\",\n \"P11\",\n ],\n \"data\": {\n \"script\": self.script_name,\n \"path\": self.script_path,\n \"category\": self.script_category,\n \"priority\": str(self.script_priority),\n \"info\": self.script_info,\n \"os_req\": self.osrequirements,\n \"notes\": self.script_notes,\n \"P4\": self.script_parameter4,\n \"P5\": self.script_parameter5,\n \"P6\": self.script_parameter6,\n \"P7\": self.script_parameter7,\n \"P8\": self.script_parameter8,\n \"P9\": self.script_parameter9,\n \"P10\": self.script_parameter10,\n \"P11\": self.script_parameter11,\n },\n }", "def run():\n\n call_args = sys.argv[1:]\n main(call_args)", "def main():\n greetings()\n run_jarvis()", "def main(self):\n try:\n self.parse_args()\n self.run()\n return 0\n except AnalysisBackendError as e:\n L.error(e)\n return 1", "def main():\n pass", "def _main():\n import argparse\n\n # Get command line arguments\n parser = argparse.ArgumentParser(description=\"This wrapper script will run \\\n a pickled Python function on \\\n some pickled retrieved data \\\n via 0MQ. You almost never \\\n want to run this yourself.\")\n parser.add_argument('home_address',\n help='IP address of submitting host.')\n parser.add_argument('module_dir',\n help='Directory that contains module containing pickled\\\n function. This will get added to PYTHONPATH \\\n temporarily.')\n args = parser.parse_args()\n\n # Make warnings from built-in warnings module get formatted more nicely\n logging.captureWarnings(True)\n logging.basicConfig(format=('%(asctime)s - %(name)s - %(levelname)s - ' +\n '%(message)s'), level=logging.INFO)\n logger = logging.getLogger(__name__)\n\n logger.info(\"Appended {0} to PYTHONPATH\".format(args.module_dir))\n sys.path.insert(0, args.module_dir)\n\n logger.debug(\"Job ID: %i\\tHome address: %s\\tModule dir: %s\",\n os.environ['JOB_ID'],\n args.home_address, args.module_dir)\n\n # Process the database and get job started\n _run_job(os.environ['JOB_ID'], args.home_address)", "def main():\n return 0" ]
[ "0.77707267", "0.75500584", "0.75495726", "0.7460941", "0.73748064", "0.73399204", "0.7278216", "0.72609", "0.72570956", "0.72451305", "0.72448987", "0.72448987", "0.72448987", "0.72448987", "0.72448987", "0.72448987", "0.72448987", "0.72448987", "0.72448987", "0.72448987", "0.72448987", "0.72448987", "0.72448987", "0.72448987", "0.72448987", "0.72448987", "0.72448987", "0.72448987", "0.72448987", "0.72448987", "0.72448987", "0.72448987", "0.72189707", "0.71731365", "0.71233696", "0.71157926", "0.71157926", "0.71157926", "0.71157926", "0.70954806", "0.70872545", "0.707239", "0.7031396", "0.70100623", "0.6989527", "0.6923501", "0.6917616", "0.6902497", "0.6883878", "0.68774277", "0.68774277", "0.68774277", "0.68774277", "0.68774277", "0.68774277", "0.68774277", "0.68774277", "0.68774277", "0.68774277", "0.68774277", "0.68774277", "0.68774277", "0.68774277", "0.68774277", "0.68774277", "0.68774277", "0.68774277", "0.68774277", "0.68774277", "0.68774277", "0.68774277", "0.68774277", "0.68774277", "0.68718386", "0.68494195", "0.6845766", "0.6845766", "0.6845766", "0.6845766", "0.6845766", "0.6845766", "0.6845766", "0.6845766", "0.6832166", "0.68279356", "0.68259555", "0.680188", "0.67993104", "0.6798014", "0.6795608", "0.6795608", "0.67934585", "0.6790074", "0.67677253", "0.6755732", "0.6740314", "0.67355275", "0.67138976", "0.6699548", "0.6698046", "0.6688771" ]
0.0
-1
Calculate overlap among trajectories
def trajectory_overlap(gt_trajs, pred_traj): max_overlap = 0 max_index = 0 for t, gt_traj in enumerate(gt_trajs): s_viou = viou_sx(gt_traj['sub_traj'], gt_traj['duration'], pred_traj['sub_traj'], pred_traj['duration']) o_viou = viou_sx(gt_traj['obj_traj'], gt_traj['duration'], pred_traj['obj_traj'], pred_traj['duration']) so_viou = min(s_viou, o_viou) if so_viou > max_overlap: max_overlap = so_viou max_index = t return max_overlap, max_index
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def overlap_cost(track_a, track_b):\n return 1 - overlap(track_a.bbox, track_b.bbox)", "def overlap_with(self, other):", "def poverlap(t1, t2, size1, size2):\n x0 = t1[0]\n y0 = t1[1]\n x1 = t1[0] + size1[0]\n y1 = t1[1] + size1[1]\n\n x2 = t2[0]\n y2 = t2[1]\n x3 = t2[0] + size2[0]\n y3 = t2[1] + size2[1]\n \n ol = max(0, min(x1, x3) - max(x0, x2)) * max(0, min(y1, y3) - max(y0, y2))\n\n return ol / float(2*(size2[0]*size2[1]) - ol)", "def findOverlap( columns, t, minOverlap ):\n for c in columns:\n c.setOverlap() # defaults to 0.0\n for s in c.getConnectedSynapses():\n c.setOverlap( c.getOverlap() + s.getSourcetInput( t ) )\n\n if c.getOverlap() < minOverlap:\n c.setOverlap()\n else:\n c.boostOverlap()", "def listOfOverlappingTTPairs():\n listOfHalfModules = listOfTTHalfModules()\n ttmap = TTModulesMap_instance\n pairs = []\n regions = {'A':1, 'B':2, 'C':3}\n print \"Overlapping TT half modules:\"\n for hm1 in listOfHalfModules:\n for hm2 in listOfHalfModules:\n # they must be different\n if hm1 == hm2: continue\n # they must be both on top or both on bottom\n if locateTTHalfModule(hm1)[3] != locateTTHalfModule(hm2)[3]: continue\n # they must be on the same layer\n if locateTTHalfModule(hm1)[0] != locateTTHalfModule(hm2)[0]: continue\n # avoid duplicates\n if (hm1, hm2) in pairs: continue\n if (hm2, hm1) in pairs: continue\n # they must be contiguous:\n if (locateTTHalfModule(hm1)[1] == locateTTHalfModule(hm2)[1]):\n if (abs(locateTTHalfModule(hm1)[2] - locateTTHalfModule(hm2)[2]) == 1):\n pairs.append( (hm1, hm2) )\n else:\n num1 = locateTTHalfModule(hm1)[2]\n num2 = locateTTHalfModule(hm2)[2]\n max1 = ttmap.numberOfModules[locateTTHalfModule(hm1)[0]]['Region'+locateTTHalfModule(hm1)[1]] - 1\n max2 = ttmap.numberOfModules[locateTTHalfModule(hm2)[0]]['Region'+locateTTHalfModule(hm2)[1]] - 1\n nreg1 = regions[locateTTHalfModule(hm1)[1]]\n nreg2 = regions[locateTTHalfModule(hm2)[1]]\n if ( (num1==max1 and num2==0 and nreg2-nreg1==1) or (num2==max2 and num1==0 and nreg1-nreg2==1) ):\n pairs.append( (hm1, hm2) )\n print '\\t', hm1, hm2\n ## - same region\n #if ((abs(locateTTHalfModule(hm1)[2] - locateTTHalfModule(hm2)[2]) != 1)\n # and (locateTTHalfModule(hm1)[0] != locateTTHalfModule(hm2)[0])): continue\n ## - or neighbouring region\n #elif not ((locateTTHalfModule(hm1)[0] != locateTTHalfModule(hm2)[0])\n # and ( ( (ttmap.numberOfModules[locateTTHalfModule(hm1)[0]] == locateTTHalfModule(hm1)[2]+1 )\n # and (locateTTHalfModule(hm2)[2] == 0) )\n # or ( (ttmap.numberOfModules[locateTTHalfModule(hm2)[0]] == locateTTHalfModule(hm2)[2]+1 )\n # and (locateTTHalfModule(hm1)[2] == 0) ) ) ): continue\n ## append to list of pairs\n #pairs.append( (hm1, hm2) )\n print\n return pairs", "def getOverlap(self):\n return 0.5", "def overlap(table1, table2):\n out = np.zeros(np.size(table1, axis=0), dtype='bool')\n for i in range(np.size(table1, axis=0)):\n s1_s2 = table1[i, 0] < table2[:, 0] \n s1_e2 = table1[i, 0] <= table2[:, 1]\n e1_s2 = table1[i, 1] < table2[:, 0]\n e1_e2 = table1[i, 1] < table2[:, 1]\n # no overlap occurs when all four parameters above either == 0 or 1\n sum_params = np.sum(np.array([s1_s2, s1_e2, e1_s2, e1_e2]), axis=0)\n olap = (sum_params == 1) | (sum_params == 2) | (sum_params == 3)\n out[i] = np.any(olap)\n return out", "def overlap(t1start, t1end, t2start, t2end):\n\n return (t1start <= t2start <= t1end) or (t2start <= t1start <= t2end)", "def overlap(list1,list2):\n \n coord=[]\n for pos1 in list1:\n #print 'pos in list1 is', pos1\n coord.append(('S',int(pos1.split('-')[0]), 'l1'))\n #print 'S is ', pos1.split('-')[0]\n coord.append(('E',int(pos1.split('-')[1]),'l1'))\n #print 'E is ', pos1.split('-')[1]\n #print coord \n for pos2 in list2:\n #print 'pos in list2 is', pos2\n coord.append(('S',int(pos2.split('-')[0]),'l2'))\n #print 'S is ', pos2.split('-')[0]\n coord.append(('E', int(pos2.split('-')[1]),'l2'))\n #print 'E is ', pos2.split('-')[1]\n #print coord\n \n coord.sort(key = lambda x : x[0], reverse = True)\n #print 'coord after first sort \\n', coord\n coord.sort(key = lambda x : x[1])\n #print 'coord after 2nd sort by number \\n', coord\n # PART 1: SEARCHES FOR OVERLAPS BETWEEN 2 HISTONE MARKS\n new_coord_list = [] #initialize new list to which to move all those that don't overlap\n #index = 0 #position in list \n spos=0 # start pos initialized \n ct=0\n ovl=[]\n for pos in coord:\n new_coord_list.append(pos)\n #print pos, 'doesn\\'t overlap'\n index = int(new_coord_list.index(pos)) \n if pos[0]=='S':\n ct+=1\n if ct==2:\n spos=pos[1]\n if pos[0]=='E':\n ct-=1\n if ct==1:\n if not spos==pos[1]:\n #print spos, '-', pos[1], 'overlap'\n ovl.append(('ovl', spos, pos[1])) # add to overlap vector the positions that overlap\n #print 'overlap found! :', [str(spos),str(pos[1]),'ovl']\n #print 'removing ', new_coord_list[index]\n del new_coord_list[index]\n #print 'removing', new_coord_list[index-1]\n del new_coord_list[index-1]\n \n # \n new_coord_list.sort(key = lambda x : x[0], reverse = True)\n start=0\n end = 0\n two_hist_away_from_cent_of_peak = 0\n two_hist_away_list = []\n for nc_pos in new_coord_list:\n if nc_pos[0]=='S':\n if (start<=two_hist_away_from_cent_of_peak) and (two_hist_away_from_cent_of_peak !=0) and (end!=0): \n #if center_of_peak <= two_hist_away_from_cent_of_peak and (two_hist_away_from_cent_of_peak !=0):\n two_hist_away_list.append('-'.join([str(start),str(end), 'tha']))\n start= nc_pos[1]\n if nc_pos[0]=='E':\n end = nc_pos[1]\n center_of_peak= (start+nc_pos[1])/2\n two_hist_away_from_cent_of_peak = center_of_peak + 300\n # print 'new_coord_list: ', new_coord_list\n return ovl, new_coord_list", "def pred_overlap(t, h):\n a_set = set(get_pred(t))\n b_set = set(get_pred(h))\n return len(a_set&b_set)/float(len(a_set|b_set))", "def overlap(t1, t2):\n t1 = dict(min=np.min(t1), max=np.max(t1))\n t2 = dict(min=np.min(t2), max=np.max(t2))\n for t in (t1, t2):\n t['dur'] = t['max'] - t['min']\n\n # Ensure t1 min < t2 min\n if t2['min'] < t1['min']:\n print('t2 starts earlier')\n t1, t2 = t2, t1\n \n # var names wrt t2\n min_inside = t2['min'] >= t1['min'] and t2['min'] <= t1['max']\n max_inside = t2['max'] <= t1['max']\n if min_inside and max_inside:\n # t2 completely contained by t1\n return (t2['min'], t2['max'])\n elif min_inside:\n # t2 partially contained by t1\n return (t2['min'], t1['max'])\n else:\n # no overlap\n return (None, None)", "def calculate_overlaps(drives, dist_tol, time_tol):\n \n for i1 in range(len(drives)-1):\n d1 = drives[i1]\n \n for i2 in range(i1+1, len(drives)):\n d2 = drives[i2]\n \n #stop trying if d1 ends more than time_tol before d2 starts\n #note that drives are chronologically ordered\n if d2.coords[0].time - d1.coords[-1].time > time_tol:\n break\n \n overlap = ol.compute_overlap(d1, d2, dist_tol, time_tol)\n if overlap:\n ol1 = ol.Overlap(d1, d2, overlap[0], overlap[1])\n d1.append_overlap(ol1)\n ol2 = ol.Overlap(d2, d1, overlap[2], overlap[3])\n d2.append_overlap(ol2)", "def overlap(x,y):\n if (x[0]<=y[-1] and x[-1]>y[0]) or (y[0]<=x[-1] and y[-1]>x[0]):\n return 1\n else: return 0", "def cal_overlaps(boxes1, boxes2):\n area1 = (boxes1[:, 0] - boxes1[:, 2]) * (boxes1[:, 1] - boxes1[:, 3]) # (Nsample, 1)\n area2 = (boxes2[:, 0] - boxes2[:, 2]) * (boxes2[:, 1] - boxes2[:, 3]) # (Msample, 1)\n\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0])) # (Nsample, Msample)\n\n # calculate the intersection of boxes1(anchor) and boxes2(GT box)\n for i in range(boxes1.shape[0]):\n overlaps[i][:] = cal_iou(boxes1[i], area1[i], boxes2, area2)\n\n return overlaps", "def test_compute_overlap(self):\n # box1 contained in box2\n box1 = ((1, 2), (1, 2), (1, 2))\n box2 = ((1, 3), (1, 3), (1, 3))\n mapping = {box1: [1, 2, 3, 4], box2: [1, 2, 3, 4, 5]}\n # box1 in box2, so complete overlap\n np.testing.assert_almost_equal(\n dc.dock.binding_pocket.compute_overlap(mapping, box1, box2), 1)\n # 4/5 atoms in box2 in box1, so 80 % overlap\n np.testing.assert_almost_equal(\n dc.dock.binding_pocket.compute_overlap(mapping, box2, box1), .8)", "def calc_overlap(self, start, stop):\n\n overlaps = []\n for s in self.map:\n e = self.map[s]\n if s >= start or s <= stop:\n # We found an overlap\n if e <= stop:\n overlaps.append({\"start\": s, \"stop\": e})\n else:\n overlaps.append({\"start\": s, \"stop\": stop})\n elif e >= start or e <= stop:\n if s >= start:\n overlaps.append({\"start\": s, \"stop\": e})\n else:\n overlaps.append({\"start\": start, \"stop\": e})\n return overlaps", "def get_overlap(self, transposon):\n return max(0, min(self.last-transposon.first,\n transposon.last-self.first,\n len(self), len(transposon)))", "def overlap(line1, line2):\n\tx1, x2 = line1\n\tx3, x4 = line2\n\tonLeft = min(x1, x2) <= min(x3, x4)\n\tif onLeft:\n\t\treturn max(max((x1, x2)) - min((x3, x4)), 0) > 0\n\treturn max(max((x3, x4)) - min((x1, x2)),0) > 0", "def overlap(component1, component2):\n if component1[0].start <= component2[0].stop and component2[0].start <= component1[0].stop:\n if component1[1].start <= component2[1].stop and component2[1].start <= component1[1].stop:\n return True\n return False", "def get_overlap(a, b):\n return max(0, min(a[1], b[1]) - max(a[0], b[0]))", "def count_overlap(self, time, other_object, other_time):\n ti = np.where(time == self.times)[0][0]\n ma = np.where(self.masks[ti].ravel() == 1)\n oti = np.where(other_time == other_object.times)[0]\n obj_coords = np.zeros(self.masks[ti].sum(), dtype=[('x', int), ('y', int)])\n other_obj_coords = np.zeros(other_object.masks[oti].sum(), dtype=[('x', int), ('y', int)])\n obj_coords['x'] = self.i[ti].ravel()[ma]\n obj_coords['y'] = self.j[ti].ravel()[ma]\n other_obj_coords['x'] = other_object.i[oti][other_object.masks[oti] == 1]\n other_obj_coords['y'] = other_object.j[oti][other_object.masks[oti] == 1]\n return float(np.intersect1d(obj_coords,\n other_obj_coords).size) / np.maximum(self.masks[ti].sum(),\n other_object.masks[oti].sum())", "def compute_overlaps(boxes1, boxes2):\n # Areas of anchors and GT boxes\n area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])\n area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])\n\n # Compute overlaps to generate matrix [boxes1 count, boxes2 count]\n # Each cell contains the IoU value.\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))\n for i in range(overlaps.shape[1]):\n box2 = boxes2[i]\n overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1)\n return overlaps", "def compute_overlaps(boxes1, boxes2):\n # Areas of anchors and GT boxes\n area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])\n area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])\n\n # Compute overlaps to generate matrix [boxes1 count, boxes2 count]\n # Each cell contains the IoU value.\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))\n for i in range(overlaps.shape[1]):\n box2 = boxes2[i]\n overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1)\n return overlaps", "def compute_overlaps(boxes1, boxes2):\n # Areas of anchors and GT boxes\n area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])\n area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])\n\n # Compute overlaps to generate matrix [boxes1 count, boxes2 count]\n # Each cell contains the IoU value.\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))\n for i in range(overlaps.shape[1]):\n box2 = boxes2[i]\n overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1)\n return overlaps", "def overlap(path1, path2):\n DataL1 = BedTool(path1).sort()\n DataL2 = BedTool(path2).sort()\n overlap = DataL1.intersect(DataL2, wao=True)\n Overlap_df = overlap.to_dataframe()\n Strand1 = list(Overlap_df.iloc[:, 5])\n Strand2 = list(Overlap_df.iloc[:, 11])\n p_p, m_m, p_m, m_p, same_strand, opposite_strand, convergent, divergent = orientation(Strand1, Strand2)\n return p_p, m_m, p_m, m_p, same_strand, opposite_strand, convergent, divergent", "def compute_overlap(self, skymap1, skymap2, single_skymap1, single_skymap2):\n from ligo.skymap.postprocess.crossmatch import crossmatch\n from astropy.coordinates import SkyCoord\n ra, dec = self.get_ra_dec_from_skymap(single_skymap1)\n coord = SkyCoord(ra, dec, unit=\"rad\")\n result = crossmatch(skymap2, coord)\n searched_prob_1 = np.min([result.searched_prob, 1.0])\n ra, dec = self.get_ra_dec_from_skymap(single_skymap2)\n coord = SkyCoord(ra, dec, unit=\"rad\")\n result = crossmatch(skymap1, coord)\n searched_prob_2 = np.min([result.searched_prob, 1.0])\n return np.max([1-searched_prob_1, 1-searched_prob_2])", "def define_overlap_operations(self):\n self._d_i = lambda q:np.roll(q,-1,axis=-1) - q\n self._d_j = lambda q:np.roll(q,-1,axis=-2) - q", "def overlap(self, *args, type='bbox'):\n return self.phy2abs.overlap(*args, type=type)", "def check_overlaps(self, verbose = False):\n if hasattr(self.phot, \"data\") and hasattr(self, 'spec'):\n for i, spectrum in enumerate(self.spec):\n if verbose:print(i, spectrum)\n for j, filtername in enumerate(self.phot.data_filters):\n if verbose:print(j, filtername)\n\n if hasattr(self.phot.data_filters[filtername], \"_lower_edge\") and \\\n hasattr(self.phot.data_filters[filtername], \"_upper_edge\") and \\\n hasattr(self.spec[spectrum], \"data\"):\n blue_bool = self.phot.data_filters[filtername]._lower_edge > self.spec[spectrum].min_wavelength\n red_bool = self.phot.data_filters[filtername]._upper_edge < self.spec[spectrum].max_wavelength\n\n if blue_bool and red_bool:\n within = True\n else:\n within = False\n\n if verbose:print(within)\n if within:\n self.spec[spectrum]._add_to_overlapping_filters(filtername, verbose=verbose)\n else:\n warnings.warn(\"SNClass.check_overlaps - something went wrong... no data?\")\n pass", "def create_overlap_metric(anchor_boxes):\n y, x, h, w = np.transpose(anchor_boxes)\n ab_area = w * h\n y0 = y - h // 2\n x0 = x - w // 2\n y1 = y + h // 2\n x1 = x + w // 2\n\n def overlap(gt_boxes):\n overlaps = []\n for gt_box in gt_boxes:\n gt_y0, gt_x0, gt_y1, gt_x1 = gt_box\n int_y0 = np.maximum(gt_y0, y0)\n int_x0 = np.maximum(gt_x0, x0)\n int_y1 = np.minimum(gt_y1, y1)\n int_x1 = np.minimum(gt_x1, x1)\n int_area = np.maximum(0, int_x1 - int_x0) * np.maximum(0, int_y1 - int_y0)\n overlaps.append(int_area / ab_area)\n overlaps = np.transpose(overlaps)\n gt_indices = np.argmax(overlaps, axis=1)\n overlaps = np.squeeze(np.take_along_axis(overlaps, gt_indices[:, np.newaxis], axis=1))\n gt_boxes = np.take(gt_boxes, gt_indices, axis=0)\n return overlaps, gt_boxes\n return overlap", "def overlaps(a, b):\n\n dx = a.x - b.x\n dy = a.y - b.y\n try:\n radius = a.radius + b.radius\n except AttributeError:\n radius = getattr(a, 'radius', 0.5) + getattr(b, 'radius', 0.5)\n\n return dx * dx + dy * dy <= radius * radius", "def overlapSelf(Reframe,GTframe):\n x1 = Reframe[0]\n y1 = Reframe[1]\n width1 = Reframe[2] - Reframe[0]\n height1 = Reframe[3] - Reframe[1]\n\n x2 = GTframe[0]\n y2 = GTframe[1]\n width2 = GTframe[2] - GTframe[0]\n height2 = GTframe[3] - GTframe[1]\n\n endx = max(x1 + width1, x2 + width2)\n startx = min(x1, x2)\n width = width1 + width2 - (endx - startx)\n\n endy = max(y1 + height1, y2 + height2)\n starty = min(y1, y2)\n height = height1 + height2 - (endy - starty)\n\n if width <= 0 or height <= 0:\n ratio = 0\n else:\n Area = width * height\n Area1 = width1 * height1\n ratio = Area * 1. / Area1\n return ratio", "def overlaps(self, other):\n return _binary_op(arctern.ST_Overlaps, self, other).astype(bool, copy=False)", "def print_overlaps(gt_list, det_list):\n\n overlap_list = []\n high = 0\n for i_1, grt in enumerate(gt_list):\n for i_2, det in enumerate(det_list):\n overlap = overlap_between(grt, det)\n print(i_1, i_2, overlap)\n if overlap > high:\n high = overlap\n overlap_list.append(high)\n high = 0\n\n print(overlap_list)", "def overlap(self):\n if self._overlap is None:\n self.overlap = 0\n if self._overlap < -(self.offset + self.radius) / tan(self.theta):\n raise Exception('Invalid overlap. Minimum overlap = -(offset + radius) / tan(theta).')\n if self._overlap > self.stem:\n raise Exception('Invalid overlap. Maximum overlap = stem.')\n return self._overlap", "def unit_overlap(self, RTSummary , SystemSummary):\n\n RTSummary_words = self.nlpWrapper.stemmer(tokens=self.nlpWrapper.tokenize(RTSummary))\n SystemSummary_words = self.nlpWrapper.stemmer(tokens=self.nlpWrapper.tokenize(RTSummary))\n commonWords = set(RTSummary_words)&set(SystemSummary_words)\n commonWords = sorted(commonWords, key = lambda k : RTSummary_words.index(k))\n\n return (len(commonWords) / (len(RTSummary)+len(SystemSummary)-len(commonWords)))", "def main():\n line1 = Line(1, 5)\n line2 = Line(5, 8)\n print(LineUtil.is_overlap(line1, line2))", "def get_overlaps(file_name):\r\n\r\n place = {}\r\n size = {}\r\n sap = {}\r\n overlapping = []\r\n active_list = []\r\n max_width = 0\r\n\r\n with open(file_name + \".scl\") as f:\r\n for i, line in enumerate(f):\r\n\r\n line = line.strip()\r\n if line:\r\n if line.split()[0] == \"Sitespacing\":\r\n sitespacing = line.split()[2]\r\n if line.split()[0] == \"SubrowOrigin\":\r\n starting_x = line.split()[2]\r\n ending_x = int(starting_x) + int(sitespacing) * int(line.split()[5])\r\n if ending_x > max_width:\r\n max_width = ending_x\r\n\r\n divider = max_width // 10\r\n\r\n with open(file_name + \".nodes\") as f:\r\n for i, line in enumerate(f):\r\n\r\n line = line.strip()\r\n if line:\r\n if re.match(r'[a-z]{1}[0-9]+', line.split()[0]):\r\n if len(line.split()) == 3:\r\n size[line.split()[0]] = [line.split()[1], line.split()[2]]\r\n\r\n with open(file_name + \".pl\") as f:\r\n for i, line in enumerate(f):\r\n\r\n line = line.strip()\r\n if line:\r\n if re.match(r'[a-z]{1}[0-9]+', line.split()[0]):\r\n if line.split()[0] in size:\r\n place[line.split()[0]] = [line.split()[1], line.split()[2]]\r\n sap_num = int(line.split()[1]) // divider\r\n if sap_num not in sap.keys():\r\n sap[sap_num] = []\r\n sap[sap_num].append([line.split()[0], int(line.split()[1]),\r\n int(line.split()[1]) + int(size[line.split()[0]][0]), int(line.split()[2]),\r\n \"start\"])\r\n\r\n sap[sap_num].append([line.split()[0], int(line.split()[1]),\r\n int(line.split()[1]) + int(size[line.split()[0]][0]),\r\n int(line.split()[2]) + int(size[line.split()[0]][1]), \"end\"])\r\n\r\n for lista in sap.values():\r\n lista.sort(key=lambda x: x[3])\r\n lista.sort(key=lambda x: x[4], reverse=True)\r\n for element in lista:\r\n if element[4] == \"start\":\r\n if len(active_list) == 0:\r\n active_list.append(element[0])\r\n else:\r\n for node in active_list:\r\n if int(place[node][0]) < int(place[element[0]][0]) + int(size[element[0]][0]) \\\r\n and int(place[node][0]) + int(size[node][0]) > int(place[element[0]][0]) \\\r\n and int(place[node][1]) < int(place[element[0]][1]) + int(size[element[0]][1]) \\\r\n and int(place[node][1]) + int(size[node][1]) > int(place[element[0]][1]):\r\n overlap = (node, element[0])\r\n overlapping.append(overlap)\r\n active_list.append(element[0])\r\n else:\r\n active_list.remove(element[0])\r\n return overlapping", "def _overlap(x1, w1, x2, w2):\r\n if x1+w1 < x2-w2: return False\r\n if x1-w1 > x2+w2: return False\r\n\r\n return True", "def overlaps(self, other):\n pass", "def overlaps(self, other): # -> bool:\n ...", "def overlap(self, other):\n\t\toverlap = self.contains(other.startX, other.startY) or \\\n\t\t\tself.contains(other.startX, other.endY) or \\\n\t\t\tself.contains(other.endX, other.startY) or \\\n\t\t\tself.contains(other.endX, other.endY)\n\n\t\tintersectY1 = self.startY <= other.startY <= self.endY and \\\n\t\t\tself.startY <= other.endY <= self.endY and \\\n\t\t\t(other.startX <= self.startX <= other.endX or \\\n\t\t\tother.startX <= self.endX <= other.endX)\n\n\t\tintersectY2 = other.startY <= self.startY <= other.endY and \\\n\t\t\t other.startY <= self.endY <= other.endY and \\\n\t\t\t (self.startX <= other.startX <= self.endX or \\\n\t\t\t self.startX <= other.endX <= self.endX)\n\n\t\tintersectY = intersectY1 or intersectY2\n\n\t\tintersectX1 = self.startX <= other.startX <= self.endY and \\\n\t\t\tself.startX <= other.endX <= self.endX and \\\n\t\t (other.startY <= self.startY <= other.endY or \\\n\t\t\tother.startY <= self.endY <= other.endY)\n\n\t\tintersectX2 = other.startX <= self.startX <= other.endX and \\\n\t\t\tother.startX <= self.endX <= other.endX and \\\n\t\t (self.startY <= other.startY <= self.endY or \\\n\t\t\tself.startY <= other.endY <= self.endY)\n\n\t\tintersectX = intersectX1 or intersectX2\n\n\t\treturn overlap or intersectX or intersectY", "def overlaps(*objs):\n return set.intersection(*(set(range(*extent(obj))) for obj in objs))", "def overlap_area(a, b):\n return min(a[2] - b[0], b[2] - a[0]) * min(a[3] - b[1], b[3] - a[1])", "def tOverlap(ts1, ts2, *args, **kwargs):\n idx_1in2 = tOverlapHalf(ts2, ts1, *args, **kwargs)\n idx_2in1 = tOverlapHalf(ts1, ts2, *args, **kwargs)\n if len(idx_2in1) == 0:\n idx_2in1 = None\n if len(idx_1in2) == 0:\n idx_1in2 = None\n return idx_1in2, idx_2in1", "def get_overlap_time(begin_at_infected, end_at_infected, begin_at_contact, end_at_contact):\n\n\tbegin_at_infected = begin_at_infected\n\tbegin_at_contact = begin_at_contact\n\tend_at_infected = end_at_infected\n\tend_at_contact = end_at_contact\n\treturn (min(end_at_infected, end_at_contact) - max(begin_at_infected, begin_at_contact))", "def _overlap(c1, c2, index='dice'):\n set1 = set(c1)\n set2 = set(c2)\n intersection_num = float(len(set1 & set2))\n try:\n if index == 'dice':\n total_num = len(set1 | set2) + intersection_num\n overlap = 2.0 * intersection_num / total_num\n elif index == 'percent':\n overlap = 1.0 * intersection_num / len(set1)\n else:\n raise Exception(\"Only support 'dice' and 'percent' as overlap indices at present.\")\n except ZeroDivisionError as e:\n print(e)\n overlap = np.nan\n return overlap", "def overlaps(x1, x2, y1, y2):\n\n return x1 <= y2 and y1 <= x2", "def location_overlap(example, df):\n\n def haversin(lat1, lon1, lat2, lon2):\n \"\"\"\n Finds haversin distance (distance along great circle) in miles between two points. Points are defined by latitude and longitude. Radius of the Earth is assumed to be halfway between radius at the equator and radius at the pole.\n \"\"\"\n r = 3956.545 # Radius of the Earth in miles\n\n # Conver to radians\n lat1 = np.pi/180*lat1\n lon1 = np.pi/180*lon1\n lat2 = np.pi/180*lat2\n lon2 = np.pi/180*lon2\n\n # Haversin formula\n d = 2*r*np.arcsin(np.sqrt(\\\n np.sin((lat2 - lat1)/2)**2 + \\\n np.cos(lat1) * np.cos(lat2)*\\\n np.sin((lon2 - lon1)/2)**2))\n return d\n\n def is_overlap(row, example):\n \"\"\"\n Find if two tutors overlap in location:\n Is the distance between the tutors less than the radius of the non-example tutor?\n \"\"\"\n d = haversin(row['lat'],row['lon'], example['lat'], example['lon'])\n if d<(row['zip_radius']):\n return True\n else:\n return False\n\n close_tuts = df[df.apply(lambda x: is_overlap(x, example), axis=1)]\n\n return close_tuts", "def BD_overlap(df_OTU):\n # min BD for each library\n func = lambda x: np.min(x['BD_mid'])\n BD_mins = df_OTU.apply_by_group(func,groups=['library'],inplace=False)\n # max BD for each library\n func = lambda x: np.max(x['BD_mid'])\n BD_maxs = df_OTU.apply_by_group(func,groups=['library'],inplace=False)\n \n # overlap: max of BD_mins, min of BD_maxs\n BD_overlap_min = np.max(BD_mins['values'].values)\n BD_overlap_max = np.min(BD_maxs['values'].values)\n \n return BD_overlap_min, BD_overlap_max", "def _calculate_area_overlap(self, wake_velocities, freestream_velocities, turbine):\n count = np.sum(freestream_velocities - wake_velocities <= 0.05)\n return (turbine.grid_point_count - count) / turbine.grid_point_count", "def agent_overlap(t_drs, h_drs, replacements):\n t_agents = get_agent(t_drs) \n h_agents = get_agent(h_drs)\n length = len(t_agents) + len(h_agents)\n if len(t_agents) is 0:\n return 0\n common = 0\n for agent in t_agents:\n if agent in h_agents:\n h_agents.pop(h_agents.index(agent))\n common =+ 1\n if common > 1:\n print(common)\n \n return len(h_agents)/len(t_agents) #seems to work better then real comparison\n '''\n else:\n for replacement in replacements:\n if get_agent(replacement[15]) == get_agent(replacement[16]):\n return 1\n '''", "def overlapping(y_true, y_pred, inds_inside):\n overlaps = overlap(y_pred, y_true[:, :4])\n\n argmax_overlaps_inds = keras.backend.argmax(overlaps, axis=1)\n\n gt_argmax_overlaps_inds = keras.backend.argmax(overlaps, axis=0)\n\n indices = keras.backend.stack(\n [\n tensorflow.range(keras.backend.shape(inds_inside)[0]),\n keras.backend.cast(argmax_overlaps_inds, tensorflow.int32)\n ]\n )\n\n indices = keras.backend.transpose(indices)\n\n max_overlaps = tensorflow.gather_nd(overlaps, indices)\n\n return argmax_overlaps_inds, max_overlaps, gt_argmax_overlaps_inds", "def overlap(a, b):\n return not(a[2]<=b[0] or a[3]<=b[1] or a[0]>=b[2] or a[1]>=b[3])", "def calcOverlap(intervals):\n bp = 0 \n for i in intervals:\n bp += sum([overlapCases(i, j) for j in intervals])\n return(bp)", "def measure_overlap(self, other):\n return np.dot(self.vector, other.vector)", "def test_overlap(self):\r\n rect1 = Rectangle(10, 20, 30, 40)\r\n rect2 = Rectangle(50, 60, 70, 80)\r\n\r\n # overlap should be commutative\r\n assert not rect1.overlap_with(rect2)\r\n assert not rect2.overlap_with(rect1)\r\n assert not Rectangle.overlap(rect1, rect2)\r\n assert not Rectangle.overlap(rect2, rect1)\r\n\r\n rect1 = Rectangle(-10, -20, 10, 60)\r\n rect2 = Rectangle(0, 50, 100, 200)\r\n assert rect1.overlap_with(rect2)\r\n assert rect2.overlap_with(rect1)\r\n assert Rectangle.overlap(rect1, rect2)\r\n assert Rectangle.overlap(rect2, rect1)\r\n\r\n # rectangles with only same boarder are not considered overlapped\r\n rect1 = Rectangle(-30, -10, -20, 0)\r\n rect2 = Rectangle(-20, -5, 30, 20)\r\n rect3 = Rectangle(-40, 0, 30, 20)\r\n assert not rect1.overlap_with(rect2)\r\n assert not rect1.overlap_with(rect3)\r\n assert not Rectangle.overlap(rect2, rect1)\r\n assert not Rectangle.overlap(rect3, rect1)", "def time_overlap(d1, d2):\n gt1, gt2, vt1, vt2 = parse_date(d1[\"t1\"]), parse_date(d1[\"t2\"]), parse_date(d2[\"t1\"]), parse_date(d2[\"t2\"])\n return (gt1 != vt2) and (vt1 != gt2) and (gt1 <= vt2) and (vt1 <= gt2)", "def find_overlap_range(x1, lenght1, x2, length2):\n\n\n highest_start_point = max(x1, x2)\n lowest_end_point = min(x1 + lenght1, x2 + length2)\n \n if highest_start_point >= lowest_end_point:\n return None\n \n overlap_length = lowest_end_point - highest_start_point\n \n return (highest_start_point, overlap_length)", "def can_overlap(self):\n return False", "def overlap_calib(t1, y1, t2, y2):\n tstart, tend = overlap(t1, t2)\n if tstart is None:\n raise Exception('No overlap between time series')\n y1 = y1[(t1 >= tstart) & (t1 <= tend)]\n y2 = y2[(t2 >= tstart) & (t2 <= tend)]\n C = y1.mean() / y2.mean()\n return C", "def detect_overlap_1d(first, first_length, second, second_length):\n first_end = first + first_length - 1\n second_end = second + second_length - 1\n return second_end >= first and first_end >= second", "def create_pos_overlap_metric(anchor_boxes):\n y, x, h, w = np.transpose(anchor_boxes)\n y0 = y - h // 2\n x0 = x - w // 2\n y1 = y + h // 2\n x1 = x + w // 2\n\n def pos_overlap(gt_boxes):\n pos_overlaps = []\n for gt_box in gt_boxes:\n gt_y0, gt_x0, gt_y1, gt_x1 = gt_box\n gt_area = (gt_x1 - gt_x0) * (gt_y1 - gt_y0)\n int_y0 = np.maximum(gt_y0, y0)\n int_x0 = np.maximum(gt_x0, x0)\n int_y1 = np.minimum(gt_y1, y1)\n int_x1 = np.minimum(gt_x1, x1)\n int_area = np.maximum(0, int_x1 - int_x0) * np.maximum(0, int_y1 - int_y0)\n pos_overlaps.append(int_area / gt_area)\n # Group by anchor boxes\n pos_overlaps = np.transpose(pos_overlaps)\n # Get max metric index\n gt_indices = np.argmax(pos_overlaps, axis=1)\n # Choose max metric\n pos_overlaps = np.squeeze(np.take_along_axis(pos_overlaps, gt_indices[:, np.newaxis], axis=1))\n # Take respective ground-truth boxes. No reason to return indices, at least in RPN\n gt_boxes = np.take(gt_boxes, gt_indices, axis=0)\n return pos_overlaps, gt_boxes\n return pos_overlap", "def overlap(start1, end1, start2, end2):\n return not (end1 < start2 or end2 < start1)", "def _overlapping(self, atom1, atom2):\n\n if np.linalg.norm(atom1.pos-atom2.pos) < (atom1.rad+atom2.rad):\n return True\n else:\n return False", "def get_overlap(self, other):\n return self.intersection_over_union(other)", "def overlap(start_idx1, end_idx1, start_idx2, end_idx2):\n head = min(end_idx1, end_idx2)\n tail = max(start_idx1, start_idx2)\n return head >= tail", "def do_overlap(ds,iterno,algo=\"FordRollett\",ignore=1,unit_weights=False,top=None,bottom=None,\n exact_angles=None,drop_frames='',drop_tubes = '', use_gains = [],do_sum=False,\n do_interp = False, dumpfile=None):\n import time\n from Reduction import overlap,interpolate\n # Get sensible values\n if top is None: top = ds.shape[1]-1\n if bottom is None: bottom = 0\n\n # Vertically integrate\n # Dimensions are step,vertical,tube\n\n b = ds[:,bottom:top,:].intg(axis=1).get_reduced()\n\n # Determine pixels per tube interval\n\n tube_pos = ds.axes[-1]\n if tube_pos.ndim == 2: #very old data, just take one slice\n tube_pos = tube_pos[0]\n tubesep = abs(tube_pos[0]-tube_pos[-1])/(len(tube_pos)-1)\n tube_steps = ds.axes[0]\n bin_size = abs(tube_steps[0]-tube_steps[-1])/(len(tube_steps)-1)\n pixel_step = int(round(tubesep/bin_size))\n bin_size = tubesep/pixel_step\n print '%f tube separation, %d steps before overlap, ideal binsize %f' % (tubesep,pixel_step,bin_size)\n dropped_frames = parse_ignore_spec(drop_frames)\n dropped_tubes = parse_ignore_spec(drop_tubes)\n\n # Drop frames from the end as far as we can\n\n for empty_no in range(b.shape[0]-1,0,-1):\n print \"Trying %d\" % empty_no\n if empty_no not in dropped_frames:\n break\n dropped_frames.remove(empty_no)\n print \"All frames after %d empty so dropped\" % empty_no\n b = b[:empty_no+1]\n\n # Do we need to add dummy missing frames?\n\n extra_steps = b.shape[0]%pixel_step\n if extra_steps > 0:\n start_drop = b.shape[0]\n # gumpy has no resize\n new_b = zeros([((b.shape[0]/pixel_step)+1)*pixel_step,b.shape[1]])\n new_b[:b.shape[0]] = b\n b = new_b\n extra_dropped_frames = range(start_drop,b.shape[0])\n print \"Filled out array from %d to %d with dummy frames\" % (start_drop,b.shape[0])\n dropped_frames |= set(extra_dropped_frames)\n else:\n extra_dropped_frames = []\n \n # Zero out dropped frames\n\n print 'Dropped frames: ' + `dropped_frames`\n b_zeroed = copy(b)\n\n # Make a simple array to work out which sectors are missing frames\n\n frame_check = array.ones(b.shape[0])\n\n # Zero out all matching steps\n\n all_zeroed = copy(b)\n region_starts = [a*pixel_step for a in range(b.shape[0]/pixel_step)]\n for frame_no in dropped_frames:\n b_zeroed[frame_no] = 0\n b_zeroed.var[frame_no] = 0\n dropped_step = frame_no%pixel_step\n ref_drop_steps = [r+dropped_step for r in region_starts]\n for drop_step in ref_drop_steps:\n frame_check[drop_step] = 0\n all_zeroed[drop_step] = 0\n all_zeroed.var[drop_step] = 0\n\n # Now drop out whole detectors\n\n for tube_no in dropped_tubes:\n b_zeroed[:,tube_no] = 0\n b_zeroed.var[:,tube_no] = 0\n all_zeroed[:,tube_no] = 0\n all_zeroed.var[:,tube_no] = 0\n\n # Interpolation. If requested, we first interpolate the data onto a regular angular grid,\n # which is the assumption underlying the regain calculation. However, as the deviations\n # from regularity are usually minor, this step can usually be skipped\n \n if do_interp:\n if exact_angles != None:\n h_correction = read_horizontal_corrections(exact_angles)\n else:\n h_correction = None\n \n all_zeroed = interpolate.interpolate(all_zeroed,dropped_frames,tube_steps,tube_steps[0],\n bin_size,len(tube_pos),h_correction=h_correction)\n b_zeroed = interpolate.interpolate(b_zeroed,dropped_frames,tube_steps,tube_steps[0],\n bin_size,len(tube_pos),h_correction=h_correction)\n\n \n c = all_zeroed.reshape([b.shape[0]/pixel_step,pixel_step,b.shape[-1]])\n frame_check = frame_check.reshape([b.shape[0]/pixel_step,pixel_step])\n frame_sum = frame_check.intg(axis=1)\n print `b.shape` + \"->\" + `c.shape`\n print 'Relative no of frames: ' + `frame_sum`\n\n # Output the starting data for external use\n\n if dumpfile is not None:\n dump_tube_intensities(dumpfile,raw=b_zeroed)\n if len(use_gains)==0: #we have to calculate them\n if c.shape[0] == 1: #can't be done, there is no overlap\n return None,None,None,None,None\n if do_sum:\n # sum the individual unoverlapped sections. Reshape is required as the\n # intg function removes the dimension\n d = c.intg(axis=1).reshape([c.shape[0],1,c.shape[2]]) #array of [rangeno,stepno,tubeno]\n # normalise by the number of frames in each section\n else:\n d = c #no op\n # Note gumpy can't do transposes of more than two axes at once\n e = d.transpose((2,0)) #array of [tubeno,stepno,section]\n e = e.transpose((1,2)) #array of [tubeno,section,stepno]\n print \"Data shape: \" + repr(e.shape)\n print \"Check shape: \" + repr(frame_sum.shape)\n # create the mask: any values of zero are assumed to be incorrect and masked out\n pixel_mask = array.ones_like(e[ignore:])\n for one_tube in range(len(e[ignore:])):\n if not e[ignore+one_tube].any(): #all zero\n pixel_mask[one_tube] = 0 #mask it out\n gain,dd,interim_result,residual_map,chisquared,oldesds,first_ave,weights = \\\n iterate_data(e[ignore:],iter_no=iterno,unit_weights=unit_weights,pixel_mask=pixel_mask)\n else: #we have been provided with gains\n gain = use_gains\n chisquared=0.0\n # calculate errors based on full dataset\n # First get a full model\n reshape_ds = b_zeroed.reshape([b.shape[0]/pixel_step,pixel_step,b.shape[-1]])\n start_ds = reshape_ds.transpose((2,0))[ignore:] #array of [tubeno,stepno,section]\n start_ds = start_ds.transpose((1,2))\n start_var = start_ds.var\n\n # Our new pixel mask has to have all of the steps in\n\n pixel_mask = array.ones_like(start_ds)\n for one_tube in range(len(start_ds)):\n if not start_ds[one_tube].any(): #all zero\n pixel_mask[one_tube] = 0 #mask it out\n\n # Normalise gains so that average is 1.0\n\n gain = gain*len(gain)/gain.sum()\n model,wd,model_var,esds = overlap.apply_gain(start_ds,1.0/start_var,gain,\n calc_var=True,bad_steps=dropped_frames,pixel_mask=pixel_mask)\n\n # model and model_var have shape tubeno*pixel_step + no_steps (see shift_tube_add_new)\n\n print 'Have full model and errors at %f' % time.clock()\n\n # step size could be less than pixel_step if we have a short non-overlap scan\n\n real_step = pixel_step\n if len(tube_steps)< pixel_step:\n real_step = len(tube_steps)\n # and we have to prune the output data too\n holeless_model = zeros([real_step*start_ds.shape[0]])\n holeless_var = zeros_like(holeless_model)\n for tube_set in range(start_ds.shape[0]):\n holeless_model[tube_set*real_step:(tube_set+1)*real_step]=model[tube_set*pixel_step:(tube_set+1)*pixel_step] \n holeless_var[tube_set*real_step:(tube_set+1)*real_step]=model_var[tube_set*pixel_step:(tube_set+1)*pixel_step] \n model = holeless_model\n model_var = holeless_var\n cs = Dataset(model)\n cs.var = model_var\n\n # Now build up the important information\n\n cs.title = ds.title\n cs.copy_cif_metadata(ds)\n\n # construct the axes\n\n if exact_angles is None or do_interp:\n axis = arange(len(model))\n new_axis = axis*bin_size + ds.axes[0][0] + ignore*pixel_step*bin_size\n if not do_interp:\n axis_string = \"\"\"Following application of gain correction, two theta values were recalculated assuming a step size of %8.3f \n and a tube separation of %8.3f starting at %f.\"\"\" % (bin_size,tubesep,ds.axes[0][0]+ignore*pixel_step*bin_size)\n else:\n axis_string = \"\"\"Gain correction was performed after interpolating observed values onto a\n regular angular grid with a step size of %8.3f and a tube separation of %8.3f starting at %f.\"\"\" % (bin_size,tubesep,ds.axes[0][0]+ignore*pixel_step*bin_size)\n else:\n new_axis = calculate_average_angles(tube_steps,exact_angles,pixel_step,tubesep,\n extra_dummy=extra_dropped_frames)\n # Remove ignored tubes\n \n new_axis = new_axis[ignore*real_step:]\n \n axis_string = \\\n \"\"\"Following application of gain correction, two theta values were recalculated using a tube separation of \n%8.3f and the recorded positions of the lowest angle tube, and then adding an average of the \nangular corrections for the tubes contributing to each two theta position.\"\"\" % (tubesep)\n cs.set_axes([new_axis],anames=['Two theta'],aunits=['Degrees'])\n print 'New axis goes from %f to %f in %d steps' % (new_axis[0],new_axis[-1],len(new_axis))\n print 'Total %d points in output data' % len(cs)\n # prepare info for CIF file\n import math\n detno = map(lambda a:\"%d\" % a,range(len(gain)))\n gain_as_strings = map(lambda a:\"%.4f\" % a,gain)\n gain_esd = [\"%.4f\" % a for a in esds]\n cs.harvest_metadata(\"CIF\").AddCifItem((\n ((\"_[local]_detector_number\",\"_[local]_refined_gain\",\"_[local]_refined_gain_esd\"),),\n ((detno,gain_as_strings,gain_esd),))\n )\n if len(use_gains)==0:\n info_string = \"After vertical integration between pixels %d and %d,\" % (bottom,top) + \\\n \"\"\" individual tube gains were iteratively refined using the Ford/Rollett algorithm (Acta Cryst. (1968) B24,293). \n Final gains are stored in the _[local]_refined_gain loop.\"\"\" + axis_string\n else:\n info_string = \"After vertical integration between pixels %d and %d,\" % (bottom,top) + \\\n \" individual tube gains were corrected based on a previous iterative refinement using the Ford/Rollett algorithm. The gains used\" + \\\n \"are stored in the _[local]_refined_gain loop.\" + axis_string\n cs.add_metadata(\"_pd_proc_info_data_reduction\",info_string,append=True)\n return cs,gain,esds,chisquared,c.shape[0]", "def get_overlap_metrics():\n return [DiceCoefficient(),\n JaccardCoefficient(),\n AreaUnderCurve(),\n CohenKappaMetric(),\n RandIndex(),\n AdjustedRandIndex(),\n InterclassCorrelation(),\n VolumeSimilarity(),\n MutualInformation()]", "def overlap(g, node_1, node_2):\n inter = len(set(nx.neighbors(g, node_1)).intersection(set(nx.neighbors(g, node_2))))\n return float(inter)", "def word_overlap_phi(t1, t2):\n overlap = set([w1 for w1 in t1.leaves() if w1 in t2.leaves()])\n return Counter(overlap)", "def GetOverlappingItems(self):\r\n\r\n area_bbox = self.area.GetBoundingBox()\r\n\r\n if hasattr(self.board, 'GetModules'):\r\n modules = self.board.GetModules()\r\n else:\r\n modules = self.board.GetFootprints()\r\n\r\n tracks = self.board.GetTracks()\r\n\r\n self.overlappings = []\r\n\r\n for zone in self.board.Zones():\r\n if zone.GetZoneName() != self.area.GetZoneName():\r\n if zone.GetBoundingBox().Intersects(area_bbox):\r\n self.overlappings.append(zone)\r\n\r\n for item in tracks:\r\n if (type(item) is pcbnew.PCB_VIA) and (item.GetBoundingBox().Intersects(area_bbox)):\r\n self.overlappings.append(item)\r\n if type(item) is pcbnew.PCB_TRACK:\r\n self.overlappings.append(item)\r\n\r\n for item in modules:\r\n if item.GetBoundingBox().Intersects(area_bbox):\r\n for pad in item.Pads():\r\n self.overlappings.append(pad)\r\n for zone in item.Zones():\r\n self.overlappings.append(zone)\r\n\r\n # TODO: change algorithm to 'If one of the candidate area's edges overlaps with target area declare candidate as overlapping'\r\n for i in range(0, self.board.GetAreaCount()):\r\n item = self.board.GetArea(i)\r\n if item.GetBoundingBox().Intersects(area_bbox):\r\n if item.GetNetname() != self.net:\r\n self.overlappings.append(item)", "def overlaps_graph(boxes1, boxes2):\n # 1. Tile boxes2 and repeate boxes1. This allows us to compare\n # every boxes1 against every boxes2 without loops.\n # TF doesn't have an equivalent to np.repeate() so simulate it\n # using tf.tile() and tf.reshape.\n b1 = np.reshape(np.tile(np.expand_dims(boxes1, 1),\n [1, 1, np.shape(boxes2)[0]]), [-1, 4])\n b2 = np.tile(boxes2, [np.shape(boxes1)[0], 1])\n # 2. Compute intersections\n b1_y1, b1_x1, b1_y2, b1_x2 = np.split(b1, 4, axis=1)\n b2_y1, b2_x1, b2_y2, b2_x2 = np.split(b2, 4, axis=1)\n y1 = np.maximum(b1_y1, b2_y1)\n x1 = np.maximum(b1_x1, b2_x1)\n y2 = np.minimum(b1_y2, b2_y2)\n x2 = np.minimum(b1_x2, b2_x2)\n intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0)\n # 3. Compute unions\n b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)\n b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)\n union = b1_area + b2_area - intersection\n # 4. Compute IoU and reshape to [boxes1, boxes2]\n iou = intersection / union\n overlaps = np.reshape(iou, [np.shape(boxes1)[0], np.shape(boxes2)[0]])\n return overlaps", "def crossing(self, *args):\n return self.overlap(*args, type='point')", "def compute_overlap(self, *skymaps):\n masked_skymaps = [self.mask_skymap(m, self.percent) for m in skymaps]\n joint_masked_skymaps = np.multiply(*masked_skymaps)\n return self.count_masked_pixel(joint_masked_skymaps)/np.amin([self.count_masked_pixel(m) for m in masked_skymaps])", "def compute_overlap_rate(box, boxes):\n # Calculate intersection areas\n\n x1 = np.maximum(box[0], boxes[:, 0])\n x2 = np.minimum(box[1], boxes[:, 1])\n intersection = np.maximum(x2 - x1, 0)\n boxes_area = boxes[:, 1] - boxes[:, 0]\n\n overlap = intersection/boxes_area\n\n return overlap", "def check_sim_overlaps(self, verbose = False):\n if hasattr(self.phot, \"data\") and hasattr(self, 'spec'):\n for i, spectrum in enumerate(self.sim_spec):\n if verbose:print(i, spectrum)\n for j, filtername in enumerate(self.phot.data_filters):\n if verbose:print(j, filtername)\n\n if hasattr(self.phot.data_filters[filtername], \"_lower_edge\") and \\\n hasattr(self.phot.data_filters[filtername], \"_upper_edge\") and \\\n hasattr(self.sim_spec[spectrum], \"data\"):\n blue_bool = self.phot.data_filters[filtername]._lower_edge > self.sim_spec[spectrum].min_wavelength\n red_bool = self.phot.data_filters[filtername]._upper_edge < self.sim_spec[spectrum].max_wavelength\n\n if blue_bool and red_bool:\n within = True\n else:\n within = False\n\n if verbose:print(within)\n if within:\n self.sim_spec[spectrum]._add_to_overlapping_filters(filtername, verbose=verbose)\n else:\n warnings.warn(\"SNClass.check_sim_overlaps - something went wrong... no data?\")\n pass", "def calculate_overlap(self, r1, r2):\n\n # We know that reads that can be glued,\n # share at least half of their length.\n # Make sure one is not shorter than\n # the half of another.\n\n if len(r1) / 2 + len(r1) % 2 <= len(r2) \\\n and len(r2) / 2 + len(r2) % 2 <= len(r1):\n\n # prepare second halves for overlap pre-check\n\n tail1 = r1[len(r1) / 2:]\n tail2 = r2[len(r2) / 2:]\n \n # case 1: r1 contains r2 completely\n #\n # For example,\n #\n # ATCGCCGGAT\n # TCGCCGGA\n \n pos = r1.find(r2)\n if pos != -1:\n self.reads[r1].overlaps[r2] = pos + len(r2) - len(r1)\n \n # case 2: r2 contains r1 completely\n #\n # For example,\n #\n # TCGCCGGA\n # ATCGCCGGAT\n \n pos = r2.find(r1)\n if pos != -1:\n self.reads[r2].overlaps[r1] = pos + len(r1) - len(r2)\n \n # case 3: end of r1 overlaps with beginning of r2\n #\n # For example,\n #\n # ATCGCCGGAT\n # TCGCCGGATGC\n #\n # First check that at least half of r1 is in r2\n # If there is a match, calculate the expected length \n # of overlap and check if they indeed overlap.\n\n \n pos = r2.find(tail1)\n if pos != -1:\n overlap = pos + len(tail1)\n if r1[-overlap:] == r2[:overlap]:\n self.reads[r1].overlaps[r2] = len(r2) - overlap\n \n # case 4: end of r2 overlaps with beginning of r1\n #\n # For example,\n #\n # CGCCGGATCC\n # TCGCCGGAT\n #\n # First check that at least half of r2 is in r1\n # If there is a match, calculate the expected length \n # of overlap and check if they indeed overlap.\n \n pos = r1.find(tail2)\n if pos != -1: \n overlap = pos + len(tail2)\n if r2[-overlap:] == r1[:overlap]:\n self.reads[r2].overlaps[r1] = len(r1) - overlap", "def checkForOverlappingTasks(tasks, machines):\n for m in machines:\n compatibleTasks = []\n for t in tasks:\n if m == t.machine:\n compatibleTasks.append(t)\n slots = [] # time slot\n for ct in compatibleTasks:\n thisSlot = (ct.tBegin, ct.tEnd)\n if thisSlot not in slots:\n slots.append(thisSlot)\n # print(thisSlot)\n slots = sorted(slots)\n for s, slt in enumerate(slots[:-1]):\n for slt2 in slots[s+1:]:\n if slt[1] > slt2[0]:\n print(slt)\n print(slt2)\n return True\n return False", "def get_overlapping_indices(self):\n return self._get_atomic_overlaps()", "def findOverlapOrNearest(gs, ts, tree, start, end):\n #step 1, find overlaps\n rs = set()\n for i in range(start, end + 1):\n if i in gs:\n rs.add(gs[i])\n if len(rs) > 0:\n rs = list(rs)\n return rs, [0] * len(rs)\n #find the nearest one\n else:\n d, i = tree.query([(start + end) / 2], k=1)\n g = gs[ts[i][0]]\n #d = ts[i][0] - (start+end)/2\n d = int(d)\n return [g], [d]", "def temporal_intersection(track1, track2):\n\n b1, e1 = track1[[0, -1], 0]\n b2, e2 = track2[[0, -1], 0]\n\n b = max(b1, b2)\n e = min(e1, e2)\n\n inter = max(0, e - b + 1)\n\n return inter", "def rOverlap (x1, y1, w1, h1, x2, y2, w2, h2):\n if x1<=x2<=(x1+w1) or y1<=y2<=(y1+h1):\n return True\n elif x1<=(x2+w2)<=(x1+w1):\n return True\n else:\n return False", "def overlap(l1, l2):\n l1, l2 = list(l1), list(l2)\n l1.sort()\n l2.sort()\n lines = [l1, l2]\n lines.sort(key=lambda x: x[0])\n if lines[0][1] >= lines[1][0]:\n return True\n return False", "def overlap(self, a, b):\n return np.maximum(a, b)", "def _get_collisions(self):\n\n collisions = []\n for i in range(self.n_atoms):\n for j in range(i+1, self.n_atoms):\n if self._overlapping(self.atoms[i], self.atoms[j]):\n if not((i, j) in self.overlap):\n collisions.append((i, j))\n else:\n try:\n self.overlap.remove((i, j))\n except ValueError:\n pass\n\n for i, j in collisions:\n for entry in self.overlap:\n if i in entry or j in entry:\n self.overlap.remove(entry)\n\n self.overlap += collisions\n return collisions", "def overlaps(self, other):\n\n if self.ll.x >= other.ur.x:\n return False\n \n if self.ll.y >= other.ur.y:\n return False\n \n if self.ur.x <= other.ll.x:\n return False\n \n if self.ur.y <= other.ll.y:\n return False\n \n return True", "def overlap(p1: Tuple, p2: Tuple) -> bool:\n if (p2[1] - p1[0]) * (p2[0] - p1[1]) <= 0:\n return True\n else:\n return False", "def is_overlap(self, transposon):\n if self.first <= transposon.last <= self.last:\n return True\n elif self.first <= transposon.first <= self.last:\n return True\n else:\n return False", "def test_idx_overlap():\n # Base array\n arr = np.arange(10)\n\n # Test subset overlap\n idx = u.idx_overlap(arr, np.arange(5, 8))\n assert len(idx) == 3\n\n # Test complete overlap\n idx = u.idx_overlap(arr, np.arange(-5, 20))\n assert len(idx) == 8\n\n # Test partial right overlap\n idx = u.idx_overlap(arr, np.arange(5, 20))\n assert len(idx) == 4\n\n # Test partial left overlap\n idx = u.idx_overlap(arr, np.arange(-5, 5))\n assert len(idx) == 4\n\n # Test no overlap\n idx = u.idx_overlap(arr, np.arange(10, 20))\n assert len(idx) == 0", "def check_overlap(current, hit, overlap = 200):\n for prev in current:\n p_coords = prev[2:4]\n coords = hit[2:4]\n if get_overlap(coords, p_coords) >= overlap:\n return True\n return False", "def overlapping(x,y):\n for i in range(0,len(x)):\n for j in range(0,len(y)):\n if x[i] == y[j]:\n return True\n else:\n continue#reapet until finished all number in the list\n return False", "def check_recon_overlaps(self, verbose = False):\n if hasattr(self.phot, \"data\") and hasattr(self, 'recon_spec'):\n for i, spectrum in enumerate(self.recon_spec):\n if verbose:print(i, spectrum)\n for j, filtername in enumerate(self.phot.data_filters):\n if verbose:print(j, filtername)\n\n if hasattr(self.phot.data_filters[filtername], \"_lower_edge\") and \\\n hasattr(self.phot.data_filters[filtername], \"_upper_edge\") and \\\n hasattr(self.recon_spec[spectrum], \"data\"):\n blue_bool = self.phot.data_filters[filtername]._lower_edge > self.recon_spec[spectrum].min_wavelength\n red_bool = self.phot.data_filters[filtername]._upper_edge < self.recon_spec[spectrum].max_wavelength\n\n if blue_bool and red_bool:\n within = True\n else:\n within = False\n\n if verbose:print(within)\n if within:\n self.recon_spec[spectrum]._add_to_overlapping_filters(filtername)\n else:\n warnings.warn(\"SNClass.check_sim_overlaps - something went wrong... no data?\")\n pass", "def span_overlap(a: Tuple[int, int], b: Tuple[int, int]) -> bool:\n return not (a[0] > b[1] or a[1] < b[0])", "def intersection(self, axis2):", "def compute_overlap(*skymaps):\n # Simply sum over all pixels\n # NOTE To avoid under/over-flow, add the log of pdfs then exponentiate\n _out = np.zeros_like(skymaps[0])\n for skymap in skymaps:\n _out += np.log(skymap)\n return np.nansum(np.exp(_out))", "def get_instance_overlap(kt_mod, kh_mod, kth_mod):\n kt = get_number_of_instances(kt_mod)\n kh = get_number_of_instances(kh_mod)\n kth = get_number_of_instances(kth_mod)\n if kh == 0 or kt == 0 or kth == 0:\n return 0\n else: \n return 1 - (kth - kt) / kh", "def _bbox_overlap(self, other):\n reg0 = self.bbox\n reg1 = other.bbox\n return (reg0[0] <= reg1[2] and reg1[0] <= reg0[2] and\n reg0[1] <= reg1[3] and reg1[1] <= reg0[3])", "def window_index(serieslength,windowsize,overlap):\r\n\r\n p1=0\r\n p2=p1 + windowsize\r\n pt1=[p1]\r\n pt2=[p2]\r\n while p2 < serieslength:\r\n p1 = p2 - overlap\r\n p2 = min((p1 + windowsize, serieslength))\r\n pt1.append(p1)\r\n pt2.append(p2)\r\n \r\n return pt1, pt2", "def get_synset_overlap(sentence_a, sentence_b):\n def synsets(word):\n sense_lemmas = []\n for pos in ('n'):#,'a'):\n for i in xrange(5):\n try:\n sense_lemmas += [lemma.name \n for lemma in wn.synset('{0}.{1}.0{2}'.format(word, pos, i)).lemmas]\n except WordNetError: \n pass\n return sense_lemmas\n\n a_set = set(lemma for word in sentence_a for lemma in synsets(word))\n b_set = set(lemma for word in sentence_b for lemma in synsets(word))\n score = len(a_set&b_set)/float(len(a_set|b_set))\n \n return score" ]
[ "0.70222414", "0.6725737", "0.66626245", "0.6478671", "0.6425438", "0.6360582", "0.63547766", "0.63308674", "0.63305753", "0.6300792", "0.6285758", "0.6278362", "0.6266364", "0.62542456", "0.62484795", "0.6247598", "0.614467", "0.613606", "0.61254627", "0.6118002", "0.6102571", "0.59964085", "0.59964085", "0.59964085", "0.5995658", "0.5987914", "0.59875274", "0.5986674", "0.5967984", "0.59564435", "0.5948885", "0.59356636", "0.59264344", "0.59151256", "0.5914234", "0.591176", "0.5896833", "0.58822143", "0.5880712", "0.5869183", "0.58612007", "0.5861163", "0.58604777", "0.58570886", "0.58526397", "0.5834781", "0.58226484", "0.5821577", "0.58096886", "0.57926613", "0.5789783", "0.57844675", "0.5780949", "0.5778546", "0.57760125", "0.5772912", "0.5763242", "0.57509536", "0.574569", "0.57413805", "0.5733827", "0.5733514", "0.5725694", "0.57255846", "0.5718151", "0.57114375", "0.57082283", "0.57007164", "0.56982684", "0.5688017", "0.567517", "0.56509274", "0.56486994", "0.5647286", "0.5645552", "0.5640002", "0.5635422", "0.56226265", "0.5610432", "0.5602314", "0.55825925", "0.5576832", "0.5574414", "0.5573818", "0.5569495", "0.5559269", "0.55503833", "0.5546776", "0.5536908", "0.5535931", "0.5526861", "0.5516717", "0.551109", "0.55071485", "0.55022436", "0.550178", "0.54979604", "0.5497912", "0.5497359", "0.54954135" ]
0.68951005
1
Encodes a list of strings to a single string.
def encode (self, strs): if strs == []: return "null" return chr(257).join(strs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def encode_as_str(list_to_encode, sep = \"|\"):\n return sep.join([str(x) for x in list_to_encode])", "def _encode_list(source: list) -> bytes:\n result_data = b\"l\"\n\n for item in source:\n result_data += encode(item)\n\n return result_data + b\"e\"", "def stringer(list):\n\tstring = \"\"\n\tfor x in list:\n\t\tstring = string + str(x)\n\treturn string", "def encode(self, strs):\r\n return ''.join(len(x).to_bytes(4, byteorder='big').decode() + x for x in strs)", "def listtostring(self, charlist):\n s = \"\"\n for char in charlist:\n s += char\n return s", "def encode_list(L):\n return \"&\".join([\"%s=%s\" % (index, element) for index, element in enumerate(L)])", "def encode_list(value: list, inner_encoder: typing.Callable) -> bytes:\n return encode_vector_of_t(list(map(inner_encoder, value)))", "def encode(self, strs):", "def encode(self, strs):", "def unicode_list_to_str(u_code_list): #This is just a function for me. Has nothing to do with flask or anything, okay?\n out_list = \"\"\n for item in u_code_list:\n out_list = out_list + str(item) + \"-\"\n return out_list.rstrip(\"-\") #removes the extra '-' (i.e 2-3-4-1-)", "def encode(self, strs):\n even = 0\n odd = 1\n rst = ''\n while even<len(strs) and odd<len(strs):\n rst += strs[odd]\n rst += ','\n rst += strs[even]\n odd += 2\n even +=2\n if even<len(strs):rst+=strs[even]\n elif odd<len(strs): rst+=strs[odd]\n return rst", "def encode(strs):\n res = ''\n for string in strs.split():\n res += str(len(string)) + \":\" + string\n return res", "def list_to_str(input_str):\r\n\r\n return \" \".join([str(val) for val in input_str])", "def list2string(a_list):\n\n the_string = ''\n for elem in a_list:\n the_string += str(elem)\n return the_string", "def _char_list_to_string(char_list):\n ret = \"\"\n for i in char_list:\n ret+=i\n return ret", "def list_to_str(\n l: list,\n c: str,\n ) -> str:\n\n s = c.join(map(str, l))\n\n return s", "def listToString(L):\r\n S = ''\r\n for x in L:\r\n S += str(x)\r\n return S", "def list_to_str(list_to_convert):\n return ' '.join(to_str(item) for item in list_to_convert)", "def list_string(join_list):\n joined_list = '[{}]'.format(join_list, join_list)\n return joined_list", "def list_to_string(in_list):\n if not in_list:\n return \"[]\"\n else:\n return \"\\n- \" + \"\\n- \".join(in_list)", "def convert_list_to_unicode_str(data):\n string = ''\n for i, val in enumerate(data):\n # string = string + unicode(unichr(int(val)))\n string = string + str(int(val))\n return string", "def encode(self, strs):\n s = \"\"\n for i in strs:\n s += str(len(i)) + \"#\" + i\n return s", "def encode_strings(o):\n\tif isinstance(o, list):\n\t\treturn [encode_strings(x) for x in o]\n\tif isinstance(o, dict):\n\t\treturn {k.encode('utf-8'): encode_strings(v) for k, v in o.items()}\n\tif isinstance(o, unicode):\n\t\treturn o.encode('utf-8')\n\treturn o", "def _convertListToString(self, list_of_objects):\n return (';').join(list_of_objects)", "def encode1(arr):\n rv = ''\n for item in arr:\n if rv != '':\n rv += '+,'\n for char in item:\n if char == '+':\n rv += '++'\n else:\n rv += char\n return rv", "def encode(self, strs):\n se = ''\n for s in strs:\n se += str(len(s)) + ':' + s\n return se", "def encode(self, strs):\n encoded_str = \"\"\n for s in strs:\n encoded_str += \"%0*x\" % (8, len(s)) + s\n return encoded_str", "def join_list(items: Iterable[str]) -> str:\n\n return ITEM_SEPARATOR.join(items)", "def list_to_str(value, encode=None):\n result = []\n for index, v in enumerate(value):\n if isinstance(v, dict):\n result.append(dict_to_str(v, encode))\n continue\n\n if isinstance(v, list):\n result.append(list_to_str(v, encode))\n continue\n\n if encode:\n result.append(encode(v))\n else:\n result.append(v)\n\n return result", "def encode_commands(command_list: List[str]) -> List[str]:\n return ['-'.join(x.split(' ')) for x in command_list]", "def list_to_str(a_list):\n new_str = \"\"\n for item in a_list:\n item = str(item).replace(\"\\'\", \"\\'\\'\")\n if new_str:\n new_str += \", '\" + item + \"'\"\n else:\n new_str = \"'\" + item + \"'\"\n return new_str", "def create_list_string(list_):\n return f\"[{' '.join(list_)}]\"", "def listToStringFormat(self, list) ->str:\n string = ''\n for element in list:\n string = string + str(element) + \"\\n\"\n return string", "def encode_int_list(L):\n return str(L).replace(\" \", \"\")", "def escape_list(l):\n return [_escape_harlowe_html(item) if isinstance(item, text_type) else str(item) for item in l]", "def _reg_encode_utf16_list(self, xlist):\n t = '' \n for x in xlist: \n t += self._reg_encode_utf16(x + u'\\u0000') # null term \n t += self._reg_encode_utf16(u'\\u0000') # end of list (double null) \n return t", "def str_transform_list(L):\n return [str(x) for x in L]", "def encode_queue(self, queue):\n return b\"\".join(queue)", "def get_list_as_str(list_to_convert):\n return \", \".join([\"'{}'\".format(list_item) for list_item in list_to_convert])", "def listToString(s):\n # initialize an empty string\n str1 = \"\"\n\n # traverse in the string\n for ele in s:\n str1 += ele\n\n # return string\n return str1", "def encode(blocks_args):\n block_strings = []\n for block in blocks_args:\n block_strings.append(BlockDecoder._encode_block_string(block))\n return block_strings", "def encode(blocks_args):\n block_strings = []\n for block in blocks_args:\n block_strings.append(BlockDecoder._encode_block_string(block))\n return block_strings", "def ListToStr(val):\n return ''.join(['%c' % c for c in val])", "def encrypt_byte_list_in_str(bytearray_list, public_encryption_key_obj):\n encrypted_str_list = []\n for bytearray_str in bytearray_list:\n message_text_enc = public_encryption_key_obj.encrypt(str(bytearray_str.decode(\"utf-8\")), 16)[0]\n encrypted_str_list.append(message_text_enc)\n encrypted_message_str = \"\".join(encrypted_str_list)\n return encrypted_message_str", "def encode_base32_from_list(list_of_int: List[int]) -> str:\n data = BytesIO()\n for i in list_of_int:\n buf = b\"\"\n while True:\n towrite = i & 0x7f\n i >>= 7\n if i:\n buf += bytes((towrite | 0x80,))\n else:\n buf += bytes((towrite,))\n break\n data.write(buf)\n data.seek(0)\n return b32encode(data.read()).decode().replace('=', '')", "def stringify(List):\n if List is None:\n return ''\n if not List:\n return ''\n\n return '-'.join(str(elem) for elem in List)", "def convert_int_encoded_cards_to_str_encoded(cards: List[int]) -> List[str]:\n return [card_strings[i] for i in cards]", "def encode(self, obj):\n # type: (List[List[Any]]) -> str\n raise NotImplementedError()", "def to_string(student_list):\n student_info = \"\"\n for student in student_list:\n student_info += f\"{str(student)}\\n\"\n return student_info", "def EncodePOSIXShellList(lst):\n\n encoded_arguments = []\n for argument in lst:\n encoded_arguments.append(EncodePOSIXShellArgument(argument))\n return ' '.join(encoded_arguments)", "def string_encoder(string: str) -> List[bytes]:\n return [w.encode(\"utf-8\") for w in string.split()]", "def _toStr(toList):\n\n names = [formataddr(i) for i in zip(*toList)]\n return ', '.join(names)", "def join_str(lst, new_line=False):\n if new_line:\n j_str = \"/n\".join([str(i) for i in lst])\n else:\n j_str = \"\".join([str(i) for i in lst])\n return j_str", "def list_to_str( L ):\n if len(L) == 0: return ''\n return L[0] + list_to_str( L[1:] )", "def list_to_str( L ):\n if len(L) == 0: return ''\n return L[0] + list_to_str( L[1:] )", "def encode(self, o):\n # This is for extremely simple cases and benchmarks.\n if isinstance(o, basestring):\n if isinstance(o, str):\n _encoding = self.encoding\n if (_encoding is not None\n and not (_encoding == 'utf-8')):\n o = o.decode(_encoding)\n if self.ensure_ascii:\n return encode_basestring_ascii(o)\n else:\n return encode_basestring(o)\n # This doesn't pass the iterator directly to ''.join() because the\n # exceptions aren't as detailed. The list call should be roughly\n # equivalent to the PySequence_Fast that ''.join() would do.\n chunks = self.iterencode(o, _one_shot=True)\n if not isinstance(chunks, (list, tuple)):\n chunks = list(chunks)\n return ''.join(chunks)", "def list_to_sentence(self, list):\n sentence = \"\\n\"\n for i in range(0, len(list)):\n if i == len(list) - 1:\n sentence += \"'\" + list[i] + \"'\"\n else:\n sentence += \"'\" + list[i] + \"'\\n\"\n return sentence", "def urlEncode(s):\n\treturn string.join(map(lambda c: _urlEncode[c], list(s)), '')", "def list_to_string(list):\n if len(list) == 1:\n string = '{}x1'.format(list[0])\n elif list[1:] == list[:-1]:\n string = '{}x{}'.format(list[1], len(list))\n else:\n string = ''\n for i in range(len(list) - 1):\n string += str(list[i]) + ','\n string += str(list[-1])\n return string", "def encoder(list_of_str, key):\n tokenized = self.tokenizer.encode_commands(list_of_str)\n hidden = self.tokenizer.tokenize(tokenized)\n hidden = hidden.permute(1, 0, 2).reshape(hidden.size(1), -1) # correct for bididrectional\n return hidden", "def bytearray_join(glue, list_of_barrays):\n res = list_of_barrays[0]\n for i in range(1,len(list_of_barrays)):\n res += glue + list_of_barrays[i]\n return res", "def intern_list(l):\n out = []\n for item in l:\n if isinstance(item, StringTypes):\n item = intern(item.encode(\"utf8\"))\n out.append(item)\n return out", "def convertListToString(list):\n return re.sub(r'[^\\w ]', '', str(list))", "def string_list(out, name, items):\n print(f\"const char* const {name}[] = {{\", file=out)\n for item in items:\n print(f\" \\\"{item}\\\",\", file=out)\n print(\" nullptr,\", file=out)\n print(\"};\", file=out)\n print(\"\", file=out)\n pass", "def encode_queue(self, queue):\n return \"[\" + \",\".join(queue) + \"]\"", "def encode(entries: list, separator_type_id=0xff) -> bytes:\n if not isinstance(entries, list) and not isinstance(entries, EntryList):\n raise ValueError('The parameter entries must be of type list')\n result = b''\n last_type_id = None\n for entry in entries:\n if not isinstance(entry, Entry):\n raise ValueError('The parameter entries must only contain elements of type tlv8.Entry')\n if entry.type_id == separator_type_id:\n raise ValueError('Separator type id {st} occurs with list of entries!'.format(st=separator_type_id))\n if last_type_id == entry.type_id:\n # must insert separator of two entries of the same type succeed one an other\n result += pack('<B', separator_type_id) + b'\\x00'\n result += entry.encode()\n last_type_id = entry.type_id\n return result", "def listToString(s):\n # initialize an empty string\n str1 = \"\"\n\n # traverse in the string\n for ele in s:\n try:\n str1 = str1 + \" \" + ele\n except:\n pass\n\n # return string\n return str1", "def join(self, iterable) -> String:\n pass", "def join_and_sanitize(list_):\n if isinstance(list_, str):\n return list_\n\n new_list = []\n for item in list_:\n if isinstance(item, str):\n new_list.append(item)\n elif isinstance(item, int):\n new_list.append(str(item))\n elif isinstance(item, float):\n new_list.append(str(item))\n elif isinstance(item, unicode):\n new_list.append(str(item))\n else:\n raise Exception('Invalid type when attempting to join and sanitize')\n\n return ' '.join(new_list)", "def get_keywords_as_string(keywords):\r\n if type(keywords) is list:\r\n keywords = ', '.join(keywords)\r\n if type(keywords) is str:\r\n final_keywords = keywords\r\n elif type(keywords) is unicode:\r\n final_keywords = keywords.encode('utf-8')\r\n elif keywords is None:\r\n final_keywords = \"\"\r\n else:\r\n raise TypeError(\"keywords argument must be a string or a list of strings; got a %s\" % type(keywords))\r\n return final_keywords", "def encode(self, o):\r\n # This is for extremely simple cases and benchmarks.\r\n if isinstance(o, binary_type):\r\n _encoding = self.encoding\r\n if (_encoding is not None and not (_encoding == 'utf-8')):\r\n o = o.decode(_encoding)\r\n if isinstance(o, string_types):\r\n if self.ensure_ascii:\r\n return encode_basestring_ascii(o)\r\n else:\r\n return encode_basestring(o)\r\n # This doesn't pass the iterator directly to ''.join() because the\r\n # exceptions aren't as detailed. The list call should be roughly\r\n # equivalent to the PySequence_Fast that ''.join() would do.\r\n chunks = self.iterencode(o, _one_shot=True)\r\n if not isinstance(chunks, (list, tuple)):\r\n chunks = list(chunks)\r\n if self.ensure_ascii:\r\n return ''.join(chunks)\r\n else:\r\n return u''.join(chunks)", "def encode(sourcelist,code):\n answer = \"\"\n for s in sourcelist:\n co = find(lambda p: p.name == s, code)\n if ( not co ):\n import sys\n print >> sys.stderr, \"Warning: symbol\",`s`,\"has no encoding!\"\n pass\n else:\n answer = answer + co.word\n pass\n return answer", "def convert_list_to_string(key, data, errors, context): # noqa\n value = data.get(key, None)\n\n if not value:\n return\n\n if not isinstance(value, list):\n return\n\n data[key] = '{' + ','.join(map(str, value)) + '}'", "def numList2String(l):\n\treturn ''.join(map(chr, l))", "def encode_map(value: list) -> bytes:\n raise NotImplementedError()", "def _valuelistToBytestring(valuelist, numberOfRegisters):\n MINVALUE = 0\n MAXVALUE = 65535\n\n _checkInt(numberOfRegisters, minvalue=1, description='number of registers')\n\n if not isinstance(valuelist, list):\n raise TypeError('The valuelist parameter must be a list. Given {0!r}.'.format(valuelist))\n\n for value in valuelist:\n _checkInt(value, minvalue=MINVALUE, maxvalue=MAXVALUE, description='elements in the input value list')\n\n _checkInt(len(valuelist), minvalue=numberOfRegisters, maxvalue=numberOfRegisters, \\\n description='length of the list')\n\n numberOfBytes = _NUMBER_OF_BYTES_PER_REGISTER * numberOfRegisters\n\n bytestring = ''\n for value in valuelist:\n bytestring += _numToTwoByteString(value, signed=False)\n\n assert len(bytestring) == numberOfBytes\n return bytestring", "def join(self, values):\n values = [self.unpack(v) for v in ensure_list(values)]\n return self.pack(values)", "def encode_strings(self):\n self.version = u2b_if_py2(self.version)\n self.short = u2b_if_py2(self.short)\n self.description = u2b_if_py2(self.description)\n self.destination = [u2b_if_py2(m) for m in self.destination]", "async def deliver_card(list_of_lists: List[List[str]]) -> str:\n\n final_string = []\n for sublist in list_of_lists:\n final_string.append('\\u200A'.join(sublist))\n\n # add blank emoji to first line to accommodate compact mode w/o resizing emojis\n return '<:blank:589560784485613570>\\n' + '\\n'.join(final_string)", "def encode(self, seq):", "def JoinList(LIST):\r\n if type(LIST) == list:\r\n out = ', '.join(LIST)\r\n elif type(LIST) == str:\r\n out = LIST\r\n return out", "def join(self, iterable):\n result = ANSIString(\"\")\n last_item = None\n for item in iterable:\n if last_item is not None:\n result += self._raw_string\n if not isinstance(item, ANSIString):\n item = ANSIString(item)\n result += item\n last_item = item\n return result", "def _encode_message_set(cls, messages):\n message_set = []\n for message in messages:\n encoded_message = KafkaProtocol._encode_message(message)\n message_set.append(struct.pack('>qi%ds' % len(encoded_message), 0,\n len(encoded_message),\n encoded_message))\n return b''.join(message_set)", "def print_string_list(string_list):\n\tfinal_string = \"\"\n\n\tfor string in string_list:\n\t\tfinal_string += string + \" \"\n\n\treturn final_string", "def _stringify(iterable: Iterable, joinable: str = \"\\n\") -> str:\n return joinable.join(json.dumps(doc, default=json_util.default) for doc in iterable)", "def l2s(l):\n return ''.join(l)", "def list_stringify(inlist):\n outlist = []\n for item in inlist:\n if not isinstance(item, list):\n if not isinstance(item, str):\n thisitem = str(item)\n else:\n thisitem = item\n else:\n thisitem = list_stringify(item)\n outlist.append(thisitem)\n return outlist", "def enc(elements):\n encoded = ''\n for key, dtype, value in elements:\n binary = enc_elem(dtype, value)\n encoded += struct.pack('>HBH', key, dtype, len(binary)) + binary\n return encoded", "def _encode(timestamps: List[int], encoder: Encoder, encoding: Encoding) -> Encoding:\n enc = encoder(timestamps[0])\n tss = [enc.encode(ts) for ts in timestamps[1:]]\n return encoding(timestamps[0], tss)", "def encode(self, o):\n # This is for extremely simple cases and benchmarks...\n if isinstance(o, basestring):\n if isinstance(o, str):\n _encoding = self.encoding\n if (_encoding is not None \n and not (_encoding == 'utf-8' and _need_utf8)):\n o = o.decode(_encoding)\n return encode_basestring_ascii(o)\n # This doesn't pass the iterator directly to ''.join() because it\n # sucks at reporting exceptions. It's going to do this internally\n # anyway because it uses PySequence_Fast or similar.\n chunks = list(self.iterencode(o))\n return ''.join(chunks)", "def join_list(jlist, joiner=', '):\n if len(jlist) == 0:\n jlist = '[]'\n else:\n jlist = joiner.join(jlist)\n return jlist", "def encode_vector_of_t(value: list):\n return encode_u32(len(value)) + bytes([i for j in value for i in j])", "def num_to_str(numList):\n\n\tresult = ''\n\tfor num in numList:\n\t\tresult += str(num)\n\treturn result", "def _list2str(self, data, delimiter=\",\", classify=lambda x: x):\n res = \"\"\n for i in range(len(data)):\n res += classify(data[i])\n if i != len(data) - 1:\n res += delimiter + \" \"\n return res", "def list_str(lis):\r\n as_str = \"\"\r\n for item in lis:\r\n as_str += \" \" + str(item) + \",\"\r\n return as_str[:-1]", "def listToStr(lst):\n return ','.join(lst)", "def list_join(the_list):\n return ' '.join(the_list)", "def process_list(a_list: list):\n\n return ', '.join(str(s) for s in a_list) if a_list else Presenter.DEFAULT", "def stringify(self, value):\n if isinstance(value, list):\n return \", \".join(value)\n else:\n return str(value)", "def base64_encode_array(inArray):\n return base64.b64encode(inArray)" ]
[ "0.7459969", "0.74273384", "0.7255417", "0.6790711", "0.67658377", "0.66986156", "0.6680612", "0.6643305", "0.6643305", "0.65740526", "0.6573746", "0.6484516", "0.6398631", "0.6318684", "0.63106364", "0.628163", "0.62610334", "0.62113565", "0.62003165", "0.6199526", "0.6192243", "0.6165363", "0.6148294", "0.61314744", "0.6131046", "0.61162215", "0.6098301", "0.60955316", "0.60897946", "0.60729337", "0.6063809", "0.6060903", "0.6019881", "0.6015104", "0.6014509", "0.6012305", "0.6008111", "0.5982801", "0.5969048", "0.59442574", "0.5938981", "0.5938981", "0.59149665", "0.58947814", "0.5890197", "0.587099", "0.5869343", "0.5839435", "0.5831696", "0.58064973", "0.5799336", "0.5798512", "0.57765913", "0.5767193", "0.5767193", "0.57495874", "0.5705027", "0.57032883", "0.5694359", "0.5688175", "0.5683872", "0.5680268", "0.56599253", "0.565083", "0.56290054", "0.56267005", "0.56206656", "0.55623466", "0.55507946", "0.5550293", "0.5544197", "0.5539542", "0.55391383", "0.55319643", "0.55262303", "0.5512621", "0.551143", "0.55107546", "0.5510385", "0.55024344", "0.550059", "0.5500227", "0.5500088", "0.5491247", "0.54883087", "0.5487295", "0.54827636", "0.5476229", "0.5470515", "0.5468355", "0.5464467", "0.54627466", "0.5456117", "0.54519844", "0.54519385", "0.54515576", "0.54463947", "0.5434909", "0.5432349", "0.5430452" ]
0.7258633
2
Decodes a single string to a list of strings.
def decode (self, s): if s == "null": return [] return s.split(chr(257))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decode(self, s):\n lststr = s.split(',')\n if s=='': return []\n rst = []\n for i in range(len(lststr)):\n rst.append(lststr[i])\n return rst", "def parse_string_list(data):\n txt = data.decode()\n x = ast.literal_eval(txt)\n return x", "def _decode_list(data: BencodedString) -> list:\n result_list = []\n data.del_prefix(1)\n\n while True:\n if data.bytes:\n if data.bytes[0] != END_MARKER:\n result_list.append(_decode(data))\n else:\n data.del_prefix(1)\n break\n else:\n raise ValueError(\n \"Cannot decode a list, reached end of the bencoded string \"\n \"before the end marker was found. Most likely the bencoded \"\n \"string is incomplete or incorrect.\"\n )\n\n return result_list", "def _strings_to_list(one_or_more_strings):\n if isinstance(one_or_more_strings, str):\n return [one_or_more_strings]\n else:\n return list(one_or_more_strings)", "def decode(self, s):\n res = []\n i, j, length = 0, 0, len(s)\n while i < length:\n if s[j] == ':':\n num = int(s[i:j])\n res.append('' + s[j+1:j+1+num])\n i = j+1+num\n j = j+1+num\n else:\n j+=1\n return res", "def str2list(input):\n if isinstance(input, str):\n return [input]\n\n else:\n return input", "def string_to_list(string):\r\n al_dict = {'a':0, 'b':1, 'c':2, 'd':3, 'e':4, 'f':5, 'g':6, 'h':7, 'i':8, \r\n 'j':9, 'k':10, 'l':11, 'm':12, 'n':13, 'o':14, 'p':15, 'q':16, \r\n 'r':17, 's':18, 't':19, 'u':20, 'v':21, 'w':22, 'x':23, 'y':24, \r\n 'z':25}\r\n \r\n out = list(string)\r\n \r\n for i in range(len(string)):\r\n out[i] = al_dict[out[i]]\r\n \r\n return out", "def str2list(string):\n return [string[i:i + 2] for i in range(0, len(string), 2)]", "def string_to_list(string: str, sep: str):\n return string.strip(\"][\").split(sep)", "def string_list(s):\n\n if not isinstance(s, str):\n raise ValueError(f\"Not a string: {s!r}\")\n return [p for p in [part.strip() for part in s.split(\",\")] if p]", "def test_string_to_list_string(self):\n assert_equals(\n str_to_list('a, b, c'),\n ['a', 'b', 'c']\n )", "def _convert_str_to_list(cls, v: Union[List[str], str]) -> List[str]:\n if isinstance(v, str):\n return v.split(\",\")\n return v # cov: ignore", "def test_string_to_list_string(self):\n\n assert_equals(\n str_to_list('a, b, c'),\n ['a', 'b', 'c']\n )", "def decode(self, s):\n i = 0\n strs = []\n while i < len(s):\n l = int(s[i:i+8], 16)\n strs.append(s[i+8:i+8+l])\n i += 8+l\n return strs", "def from_str(cls, string):\n # If quotes are found, parse it as a Python string literal after adding\n # brackets around\n if '\"' in string or \"'\" in string:\n string = '[' + string + ']'\n l = ast.literal_eval(string)\n return [str(x) for x in l]\n # Otherwise, just split on commas\n else:\n return string.split(',')", "def explode(_string):\n if not _string or not isinstance(_string, str):\n return _string\n else:\n return list(_string)", "def convert_string_to_list(string_val):\n result_list = []\n\n list_string = string_val.split(',')\n for val in list_string:\n val = str(val.strip())\n val = val.replace(\"(\", \"\")\n val = val.replace(\")\", \"\")\n val = val.replace(\"L\", \"\")\n val = val.replace(\"[\", \"\")\n val = val.replace(\"]\", \"\")\n if val not in (\"\", \"None\"):\n result_list.append(int(val))\n\n return result_list", "def string_to_list(s):\n return list(filter(lambda x: x, s.strip().split(' ')))", "def decode_chain_list(in_bytes):\n bstrings = numpy.frombuffer(in_bytes, numpy.dtype('S' + str(mmtf.utils.constants.CHAIN_LEN)))\n return [s.decode(\"ascii\").strip(mmtf.utils.constants.NULL_BYTE) for s in bstrings]", "def cs_string_to_typed_list(cs_str: str, sep=\",\", type_conv_fcn=float):\n try:\n list_strings = cs_str.split(sep)\n if all(map(lambda s: s.strip() == '', cs_str.split(sep))):\n # we are getting a list of empty strings we return [] and do not print warning\n return []\n return list([type_conv_fcn(x) for x in list_strings])\n except:\n warnings.warn('Could not convert string {s} to a typed list'.format(s=cs_str))\n return []", "def __ui_convert_ids_string_to_list(string_of_ids):\n if string_of_ids == \"\":\n return []\n string_of_ids = string_of_ids.strip()\n string_of_ids = string_of_ids.replace(\",\", \" \")\n\n done = False\n while not done:\n if string_of_ids.find(\" \") == -1:\n done = True\n else:\n string_of_ids = string_of_ids.replace(\" \", \" \")\n list_of_ids = string_of_ids.split(\" \")\n for id_index in range(len(list_of_ids)):\n list_of_ids[id_index] = int(list_of_ids[id_index])\n return list_of_ids", "def format_string_to_list(self, avi_string):\n\n repls = ('[', ''), (']', ''), (\"'\", \"\")\n avi_string = reduce(lambda a, kv: a.replace(*kv), repls, avi_string)\n return avi_string.split(',')", "def decode_list(data):\n return_value = []\n for item in data:\n if isinstance(item, unicode):\n item = item.encode('utf-8')\n elif isinstance(item, list):\n item = decode_list(item)\n elif isinstance(item, dict):\n item = decode_dict(item)\n return_value.append(item)\n return return_value", "def test_string_to_list(self):\n self.assertEqual([1, 2, 3], string_to_list('1-3'))\n self.assertEqual([6, 7, 8, 9, 10], string_to_list('6-10'))", "def decode(string,root):\n ## split the string into a list\n ## then copy the elements of the list one by one.\n answer = []\n clist = list( string )\n ## start from root\n currentnode = root\n for c in clist:\n if ( c=='\\n' ): continue ## special case for newline characters\n assert ( c == '0' )or( c == '1')\n currentnode = currentnode[int(c)]\n if isinstance( currentnode , str ) :\n answer.append( currentnode )\n currentnode = root\n pass\n assert (currentnode == root) ## if this is not true then we have run out of characters and are half-way through a codeword\n return answer", "def decode1(s):\n rv = []\n idx = 0\n item = ''\n while True:\n try:\n if s[idx:idx+2] == '+,':\n rv.append(item)\n item = ''\n idx += 2\n elif s[idx:idx+2] == '++':\n item += '+'\n idx += 2\n else:\n item += s[idx]\n idx += 1\n except IndexError:\n rv.append(item)\n break\n return rv", "def decode_list(as_bytes: typing.List[int], inner_decoder: typing.Callable) -> list:\n raise NotImplementedError()", "def strToStrList(x):\n if type(x)==str:\n return x[2:-2].split(\"', '\")", "def string_to_list(value: str, intify: bool = False) -> Union[List[str], List[int]]:\n if not value:\n return [] # type: ignore[return-value]\n if value.startswith(\"[\") and value.endswith(\"]\"):\n value = value[1:-1]\n result = []\n for p in value.split(\",\"):\n p = p.strip()\n if p.startswith(\"'\") and p.endswith(\"'\"):\n p = p[1:-1]\n if p.startswith('\"') and p.endswith('\"'):\n p = p[1:-1]\n p = p.strip()\n if intify:\n p = int(p) # type: ignore[assignment]\n result.append(p)\n return result", "def _string_to_list(self, string):\n try:\n new_value = literal_eval(string)\n if isinstance(new_value, tuple):\n new_value = list(new_value)\n elif not isinstance(new_value, list):\n raise SyntaxError\n if not all(isinstance(i, int) for i in new_value):\n raise SyntaxError\n except (SyntaxError, ValueError):\n raise InvalidFieldValueError(\n f\"Value of field {self.field_nickname} must be a list of integers, e.g. [1, 2, 3, ...]\"\n )\n return new_value", "def strToList(S):\r\n if len(S) == 0: return []\r\n return [S[0]] + strToList(S[1:])", "def convert_string_to_list(key, data, errors, context): # noqa\n value = data.get(key, None)\n\n if not value:\n return\n\n if not isinstance(value, basestring):\n return\n\n if not value.startswith('{') or not value.endswith('}'):\n return\n\n value = value.replace('\"', '')\n data[key] = value[1:len(value)-1].split(',')", "def strToList(x):\n if type(x)==str:\n return x[2:-2].split(\"', '\")", "def decode_int_list(L):\n return [] if L == '[]' else [int(a) for a in L[1:-1].split(\",\")]", "def decode_objects(string, decode):\n\tints = json.loads(string)\n\treturn {decode(i) for i in ints}", "def to_list(value):\n if hasattr(value, '__iter__') and not isinstance(value, str):\n return list(value)\n return [value]", "def str2list(parser: Callable[[str], Any]) -> Callable[[str], List[Any]]:\n\n def _parse(string: str) -> List[Any]:\n return [parser(entry) for entry in string.split()]\n\n return _parse", "def decode_list(self, tokens: list) -> str:\r\n return NotImplementedError", "def decode(self, s):\n o = self._decoder.decode(s)\n return o", "def str_list_works(x):\n import ast\n x = ast.literal_eval(x)\n x = [n.strip() for n in x]\n return (x)", "def StrToList(val):\n return [ord(c) for c in val]", "def decode(self, s):", "def decode(self, s):", "def cdd_convert(string, field=self.field()):\n return [field(x) for x in string.split()]", "def buf_to_list(buf):\r\n buf_stripped = buf.raw.decode().rstrip('\\x00')\r\n# for ch in buf_stripped:\r\n# if (ch == '0') or (ch == '\\t') or (ch == '\\n'):\r\n# name = name.rstrip(',')\r\n# if len(name) > 0:\r\n# namelist.append(name)\r\n# name = ''\r\n# if ch == '\\000':\r\n# break\r\n# else:\r\n# name += ch\r\n#\r\n# return namelist\r\n return buf_stripped.split(', ')", "def stringInputToList(x):\n return list(filter(None, [y.strip() for y in x.split(',')]))", "def string_encoder(string: str) -> List[bytes]:\n return [w.encode(\"utf-8\") for w in string.split()]", "def split(self, string):\n if self.chars:\n return list(string)\n else:\n return string.split(' ')", "def _parseVec(self, str):\r\n\t\tvec = []\r\n\t\tsplt = str.split()\r\n\t\tfor i in range(0,len(splt)):\r\n\t\t\tvec.append(self._parseNumber(splt[i]))\r\n\t\treturn vec", "def decode(data: bytes) -> Iterable:\r\n decoder = Decoder(data)\r\n return decoder.decode()", "def decode_sequence(self, sequence=list) -> str:\n try:\n out = []\n for word in sequence:\n out.append(self.decode(word))\n return(out)\n except Exception as error:\n print(f\"Error: self.decode_sequence({sequence}) -> {error}\")", "def convert_unicode_field(string):\n values = []\n for codepoint in [s for s in string.split(DATA_FILE_CODEPOINT_SEPARATOR) if (s != DATA_FILE_VALUE_NOT_AVAILABLE) and (len(s) > 0)]:\n values.append(u\"\".join([hex_to_unichr(c) for c in codepoint.split(DATA_FILE_CODEPOINT_JOINER)]))\n return values", "def _bytestringToValuelist(bytestring, numberOfRegisters):\n _checkInt(numberOfRegisters, minvalue=1, description='number of registers')\n numberOfBytes = _NUMBER_OF_BYTES_PER_REGISTER * numberOfRegisters\n _checkString(bytestring, 'byte string', minlength=numberOfBytes, maxlength=numberOfBytes)\n\n values = []\n for i in range(numberOfRegisters):\n offset = _NUMBER_OF_BYTES_PER_REGISTER * i\n substring = bytestring[offset : offset + _NUMBER_OF_BYTES_PER_REGISTER]\n values.append(_twoByteStringToNum(substring))\n\n return values", "def zsplit(s: str) -> List[str]:\n s = s.strip(\"\\0\")\n if s:\n return s.split(\"\\0\")\n else:\n return []", "def from_input_to_list(inputted_string):\n\n created_list = [int(i) for i in inputted_string]\n\n return created_list", "def decode(string_list):\n assert isinstance(string_list, list)\n blocks_args = []\n for block_string in string_list:\n blocks_args.append(BlockDecoder._decode_block_string(block_string))\n return blocks_args", "def decode(string_list):\n assert isinstance(string_list, list)\n blocks_args = []\n for block_string in string_list:\n blocks_args.append(BlockDecoder._decode_block_string(block_string))\n return blocks_args", "def decode(self, s):\n i, str = 0, []\n while i < len(s):\n sharp = s.find(\"#\", i)\n l = int(s[i:sharp])\n str.append(s[sharp + 1:sharp + l + 1])\n i = sharp + l + 1\n return str", "def _to_list(self, file_str):\n data_list = file_str.split()\n return data_list", "def _parse_aux_codes(cls, aux_codes_str: str) -> list[str]:\n try:\n tokens = cls._auxiliary_codes.parseString(aux_codes_str)\n except pyparsing.ParseException as exc:\n raise ValueError(\n f\"Could not parse: {aux_codes_str!r}, error: {exc.msg},\"\n f\" error at char {exc.loc}\"\n ) from None\n return list(tokens)", "def decode(s):\n return ' '.join(decode_to_words(s))", "def strToList(x):\n if type(x)==str:\n return [int(i) for i in x[1:-1].split(\", \")]", "def parse(name: unicode) -> List[unicode]:\n ...", "def parse_list(string, dtype):\n # l = string.replace('[', '').replace(']', '').replace(' ', '').split(',')\n s = string.replace(' ', '') # remove all spaces first\n if s[0] == '[': # it's not only a single item\n s = s[1:-1] # remove [ and ] from start and end only\n else: # it's just a single item\n return dtype(s)\n if s[0] == '[': # it's a list of lists\n splitted = s.split('],')\n for i in range(len(splitted)-1):\n splitted[i] += ']' # splitting removed the closing bracket from all but the last item\n l = list(map(lambda x: parse_list(x, dtype), splitted))\n else:\n splitted = s.split(',')\n l = list(map(dtype, splitted))\n return l", "def separate_list_input(input_: str) -> List[str]:\n no_commas = input_.replace(\",\", \" \")\n # Each string is naturally unicode, this causes problems with M2Crypto SANs\n # TODO: check if above is still true when M2Crypto is gone ^\n return [str(string) for string in no_commas.split()]", "def decode_string(self, value):\r\n return value", "def decode_doomstring(byte_string):\n s = list()\n import sys\n if len(byte_string) > 0:\n for b in byte_string:\n if b == 0:\n break\n try:\n b = (b).to_bytes(1, 'little').decode('ascii')\n except Exception:\n # Encountered an invalid character, just ignore it\n continue\n s.append(b)\n return ''.join(s)\n else:\n return ''", "def decode_string(string):\n return unicode(string, 'utf-8')", "def to_list(x):\n import collections\n if not isinstance(x, collections.Iterable) or isinstance(x, str):\n x = [x]\n return x", "def decompress(self, value):\n if value is None:\n return [\"\"] * len(self.languages)\n return [value[language] for language in self.languages]", "def parse_str( s: str ) -> list:\n\n tree = ET.fromstring( s )\n if tree is None: return None\n return parse_tree( tree )", "def _providers_string_to_list(val):\n # Use a set to remove duplicates\n if type(val) == str:\n return list(set(val.replace(' ', '').split(',')))\n return list(set(val))", "def decode_sequence(sequence, encoding_to_item):\n return_sequence = []\n\n for i, itemset in enumerate(sequence):\n decoded_itemset = set()\n for item in itemset:\n decoded_itemset.add(encoding_to_item[item])\n return_sequence.append(decoded_itemset)\n return return_sequence", "def decoding_strings(data):\n if isinstance(data, str):\n data = data.replace(\"b'\", \"\")\n return data\n elif isinstance(data, bytes):\n return data.decode()\n else:\n return False", "def parse_list(value: str) -> list[str]:\n segments = _QUOTED_SEGMENT_RE.findall(value)\n for segment in segments:\n left, match, right = value.partition(segment)\n value = ''.join([left, match.replace(',', '\\000'), right])\n return [_dequote(x.strip()).replace('\\000', ',') for x in value.split(',')]", "def from_json_string(json_string):\n lis = []\n # not sure if empty means empty string or len(jstr) < 1\n if json_string is None or json_string == \"\":\n return lis\n return json.loads(json_string)", "def _clean_string(self, string):\n if string is None:\n return []\n str_list = string.strip().split(\",\")\n return [each.strip() for each in str_list]", "def str_to_list_json(s):\n s.replace(\"'\", '\"')\n return json.loads(s)", "def serialize_deserializedata(datastr):\r\n\r\n if type(datastr) != str:\r\n raise TypeError(\"Cannot deserialize non-string of type '\"+str(type(datastr))+\"'\")\r\n typeindicator = datastr[0]\r\n restofstring = datastr[1:]\r\n\r\n # this is essentially one huge case statement...\r\n\r\n # None\r\n if typeindicator == 'N':\r\n if restofstring != '':\r\n raise ValueError(\"Malformed None string '\"+restofstring+\"'\")\r\n return None\r\n\r\n # Boolean\r\n elif typeindicator == 'B':\r\n if restofstring == 'T':\r\n return True\r\n elif restofstring == 'F':\r\n return False\r\n raise ValueError(\"Malformed Boolean string '\"+restofstring+\"'\")\r\n\r\n # Integer / Long\r\n elif typeindicator == 'I':\r\n try:\r\n return int(restofstring) \r\n except ValueError:\r\n raise ValueError(\"Malformed Integer string '\"+restofstring+\"'\")\r\n\r\n\r\n # Float\r\n elif typeindicator == 'F':\r\n try:\r\n return float(restofstring) \r\n except ValueError:\r\n raise ValueError(\"Malformed Float string '\"+restofstring+\"'\")\r\n\r\n # Float\r\n elif typeindicator == 'C':\r\n try:\r\n return complex(restofstring) \r\n except ValueError:\r\n raise ValueError(\"Malformed Complex string '\"+restofstring+\"'\")\r\n\r\n\r\n\r\n # String\r\n elif typeindicator == 'S':\r\n return restofstring\r\n\r\n # List / Tuple / set / frozenset / dict\r\n elif typeindicator == 'L' or typeindicator == 'T' or typeindicator == 's' or typeindicator == 'f':\r\n # We'll split this and keep adding items to the list. At the end, we'll\r\n # convert it to the right type\r\n\r\n thislist = []\r\n\r\n data = restofstring\r\n # We'll use '0:' as our 'end separator'\r\n while data != '0:':\r\n lengthstr, restofdata = data.split(':', 1)\r\n length = int(lengthstr)\r\n\r\n # get this item, convert to a string, append to the list.\r\n thisitemdata = restofdata[:length]\r\n thisitem = serialize_deserializedata(thisitemdata)\r\n thislist.append(thisitem)\r\n\r\n # Now toss away the part we parsed.\r\n data = restofdata[length:]\r\n\r\n if typeindicator == 'L':\r\n return thislist\r\n elif typeindicator == 'T':\r\n return tuple(thislist)\r\n elif typeindicator == 's':\r\n return set(thislist)\r\n elif typeindicator == 'f':\r\n return frozenset(thislist)\r\n else:\r\n raise Exception(\"InternalError: not a known type after checking\")\r\n\r\n\r\n elif typeindicator == 'D':\r\n\r\n lengthstr, restofdata = restofstring.split(':', 1)\r\n length = int(lengthstr)\r\n\r\n # get this item, convert to a string, append to the list.\r\n keysdata = restofdata[:length]\r\n keys = serialize_deserializedata(keysdata)\r\n\r\n # The rest should be the values list.\r\n values = serialize_deserializedata(restofdata[length:])\r\n\r\n if type(keys) != list or type(values) != list or len(keys) != len(values):\r\n raise ValueError(\"Malformed Dict string '\"+restofstring+\"'\")\r\n \r\n thisdict = {}\r\n for position in xrange(len(keys)):\r\n thisdict[keys[position]] = values[position]\r\n \r\n return thisdict\r\n\r\n\r\n\r\n\r\n # Unknown!!!\r\n else:\r\n raise ValueError(\"Unknown typeindicator '\"+str(typeindicator)+\"' for data :\"+str(restofstring))", "def str_list(x):\n #import ast\n #x = ast.literal_eval(x)\n x = x.strip('][').split(', ')\n x1 = [n.strip('\\'') for n in x]\n return (x1)", "def to_seq (value):\n if not value:\n return []\n\n if isinstance (value, str):\n return [value]\n\n else:\n return value", "def _convert(input_string):\n return ''.join([l for l in input_string])", "def _deserialize(self, value, attr, data):\n if not isinstance(value, str):\n raise ValueError(\"Value must be a string\")\n return super(StringList, self)._deserialize(value.split(self.delimiter), attr, data)", "def split_sequence(self, sequence):\n\t\treturn [char for char in sequence]", "def _decode(data: BencodedString) -> Union[bytes, dict, int, list]:\n if not data.bytes:\n raise ValueError(\"Cannot decode an empty bencoded string.\")\n\n if data.bytes[0] == START_DICT:\n return _decode_dict(data)\n\n if data.bytes[0] == START_LIST:\n return _decode_list(data)\n\n if data.bytes[0] == START_INTEGER:\n return _decode_int(data)\n\n if chr(data.bytes[0]).isdigit():\n return _decode_bytes(data)\n\n raise ValueError(\n \"Cannot decode data, expected the first byte to be one of \"\n f\"'d', 'i', 'l' or a digit, got {chr(data.bytes[0])!r} instead.\"\n )", "def parse_string_to_listint(string):\n\tstring = string.split()\n\tlength = len(string)\n\tnewlist = []\n\tfirst = int (string[0])\n\tsecond = int (string[1])\n\tthird = int (string[2])\n\tforth = int (string[3])\n\tfifth = int (string[4])\n\tnewlist.append(first)\n\tnewlist.append(second)\n\tnewlist.append(third)\n\tnewlist.append(forth)\n\tnewlist.append(fifth)\n\treturn newlist", "def convert_str_2_int_list(s, d=\",\"):\n\n ss = s.split(d)\n\n temp = []\n\n for t in ss:\n temp.append( int(t) )\n\n return temp", "def _decode_result(self, result):\n if isinstance(result, list):\n return [self._decode_result(r) for r in result]\n elif isinstance(result, SimpleString):\n return result.value\n elif isinstance(result, SimpleError):\n return self._decode_error(result)\n else:\n return result", "def unquote(string, encoding='utf-8', errors='replace'):\n if isinstance(string, bytes):\n return unquote_to_bytes(string).decode(encoding, errors)\n if '%' not in string:\n string.split\n return string\n if encoding is None:\n encoding = 'utf-8'\n if errors is None:\n errors = 'replace'\n bits = _asciire.split(string)\n res = [bits[0]]\n append = res.append\n for i in range(1, len(bits), 2):\n append(unquote_to_bytes(bits[i]).decode(encoding, errors))\n append(bits[i + 1])\n return ''.join(res)", "def test_string_to_list_string_delimiter(self):\n assert_equals(\n str_to_list(' a | b | c ', delimiter='|'),\n ['a', 'b', 'c']\n )", "def from_json_string(json_string):\n new_list = []\n if json_string is None:\n return new_list\n else:\n return json.loads(json_string)", "def decode(self) -> Iterable:\r\n if self.data[0:1] not in (b'd', b'l'):\r\n return self.__wrap_with_tuple()\r\n return self.__parse()", "def from_json_string(json_string):\n\n the_list = []\n if json_string is not None and json_string != '':\n if type(json_string) != str:\n raise TypeError(\"json_string must be a string\")\n the_list = json.loads(json_string)\n return the_list", "def intern_list(l):\n out = []\n for item in l:\n if isinstance(item, StringTypes):\n item = intern(item.encode(\"utf8\"))\n out.append(item)\n return out", "def test_string_to_list_string_delimiter(self):\n\n assert_equals(\n str_to_list(' a | b | c ', delimiter='|'),\n ['a', 'b', 'c']\n )", "def _get_string_list(property_value):\n property_value = property_value.strip(b'\\x00').decode('utf-8')\n property_value = property_value.split('\\x00')\n return property_value, ''", "def blob_to_list(blob):\r\n splits = blob.split(blob_delimiter)\r\n items = []\r\n for item in splits:\r\n items.append(item.replace(blob_delimiter_replacement, blob_delimiter))\r\n return items", "def decode(self, encoded):\n decoded = []\n for codes in encoded:\n tmp = []\n for code in codes:\n try:\n word = self.vocab[code]\n tmp.append(word)\n except:\n tmp.append(self.unk_token)\n decoded.append(tmp)\n return decoded", "def convert_comma_separated_str_to_list(input_str: str, trim: bool = True) -> List[str]:\n comma_separated_str = input_str.strip() if trim else input_str\n if not comma_separated_str:\n return []\n\n result = []\n for part_str in comma_separated_str.split(\",\"):\n value = part_str\n if trim:\n value = value.strip()\n if not value:\n continue\n result.append(value)\n return result", "def from_json_string(json_string):\n\n l = []\n if json_string is None or len(json_string) <= 0:\n return l\n\n return json.loads(json_string)" ]
[ "0.7429849", "0.67665714", "0.6603133", "0.6484686", "0.64099807", "0.63982195", "0.63764083", "0.63426924", "0.63409954", "0.6335773", "0.62704915", "0.62629604", "0.6239383", "0.6189524", "0.6171236", "0.6159162", "0.6110307", "0.6100137", "0.6022035", "0.60078084", "0.59908146", "0.5987798", "0.5965413", "0.5963164", "0.59271646", "0.5915528", "0.5890541", "0.5882655", "0.588065", "0.5850877", "0.5848257", "0.5833426", "0.5812989", "0.58033067", "0.57873803", "0.5781864", "0.5778497", "0.5776298", "0.5745422", "0.5724607", "0.57064503", "0.5704728", "0.5704728", "0.5701095", "0.5689775", "0.56525767", "0.56336164", "0.56261355", "0.56260043", "0.5615872", "0.561512", "0.5613184", "0.560844", "0.56079453", "0.5604958", "0.5575554", "0.5575554", "0.55751586", "0.5567173", "0.55629545", "0.55575305", "0.5550387", "0.55466855", "0.55447227", "0.5536309", "0.5522478", "0.55192226", "0.5511603", "0.5503378", "0.549832", "0.54926705", "0.5492303", "0.5476053", "0.5467916", "0.5449892", "0.54462725", "0.5442703", "0.54381114", "0.5429448", "0.53783333", "0.5362456", "0.5358908", "0.53542554", "0.5351802", "0.53461033", "0.533964", "0.53289115", "0.53230125", "0.53193414", "0.5306858", "0.52942926", "0.52929175", "0.5289188", "0.52863264", "0.52818096", "0.5267094", "0.5244163", "0.52301127", "0.5225089", "0.5222917" ]
0.7317169
1
GameObjects by default don't have agency but they may still do things...
def update(self, dt): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def game_over(self):\n raise NotImplementedError(\"Abstract method\") # no mercy for stooges", "def object_detection(self):\r\n pass", "def playerdefeated(self):\n globalvalues.gameover_combat()", "def is_actor():\n return False", "def on_collision(self):", "def __init__(self):\n GameObject.__init__(self)\n\n # private attributes to hold the properties so they appear read only\n self._client_type = \"\"\n self._creatures = []\n self._lost = False\n self._name = \"Anonymous\"\n self._opponent = None\n self._reason_lost = \"\"\n self._reason_won = \"\"\n self._time_remaining = 0\n self._total_health = 0\n self._won = False", "def make_eligible(self):\n pass", "def check_trying_using(self):\r\n if self.opportunity or 'key' in inventory:\r\n if self.rect.colliderect(player):\r\n music_acceptor.usingPortalSound()\r\n player.rect.x = random.randrange(75, WIDTH - 125)\r\n player.rect.y = random.randrange(25, HEIGHT - 100)", "def game_over(self):\n self.over = True", "def enough_players():\n return True", "def is_building_eye(self):\r\n pass", "def can_exist_outside_of_game(self):\n return True", "def can_exist_outside_of_game(self):\n return True", "def game_allowed(self, uid=0):\n return True", "def draw_all_objects():\n\tglobal fuel_available\n\n\tbackground_module.draw_bg(win)\n\tbackground_module.draw_snow(win)\n\tobstacles_module.draw_obstacles(win)\n\tcoins_module.draw_coins(win)\n\tforeground_module.draw_fg(win)\n\n\tfor spark_object in effects_module.Coin_spark_effects.coin_effects_list:\n\t\tspark_object.draw(win)\n\tfor hit_effect_object in effects_module.Hit_effects.hit_effects_list:\n\t\thit_effect_object.draw(win)\n\n\tif num_of_lives == 0:\n\t\tplayer_module.player.y += 1\n\t\tplayer_module.propeller.draw(win)\n\t\tplayer_module.player.draw(win)\n\telif won_bool:\n\t\tplayer_module.draw_player(win, True)\n\telse:\n\t\tplayer_module.draw_player(win)\n\t\t\n\tbird_module.draw_bird(win)\n\tdynamic_obstacle_giftbox.draw_gift(win)\n\tdynamic_obstacle_olaf.draw_olaf(win)\n\tdynamic_obstacle_santa.draw_santa(win)\n\tdisplay_module.display_lives(win, num_of_lives)\n\tdisplay_module.draw_minimap(win,frame_count)\n\n\tif start_fuel:\n\t\tfuel_available -= 1\n\tfuel_available = display_module.fuel_bar.draw_fuel_bar(win, fuel_available, start_fuel)\n\n\tdisplay_module.draw_fuel(win)\n\tcursor.draw(win)", "def _ensure_is_alive(self):\n if self._hit_points == 0:\n raise UnitIsDead('Unit is dead!')", "def event11512000():\n header(11512000)\n end_if_this_event_on()\n if_player_owns_good(0, GOOD.Lordvessel)\n flag.enable(11512000)", "def bomb_defused(event_var):\r\n debug.write(\"[SourceRPG] Handling bomb_defused\", 1)\r\n if isFairForTeam(event_var['es_userteam']) or not int(unfairAdvantage):\r\n if es.isbot(event_var['userid']) and not int(botsGetXpOnEvents):\r\n return\r\n player = players[event_var['userid']]\r\n player.addXp( int(bombDefuseXp) * player['level'], 'defusing the bomb' )\r\n debug.write(\"[SourceRPG] bomb_defused handled\", 1)", "def event_player_bust(self) -> None:\n print(f\"Your hand contains {min(self.user.hand.value)}, you're bust\")\n self.event_house_wins()", "def battle_resting(self):\n pass", "def on_hit(self, game):\n raise NotImplementedError", "def event11512150():\n header(11512150, 1)\n ally, = define_args('i')\n if_event_flag_on(1, EVENT.DarkAnorLondo)\n if_entity_attacked_by(1, ally, CHR.Player)\n if_condition_true(0, 1)\n wait(1.0) # You have to attack them twice.\n if_entity_attacked_by(0, ally, CHR.Player)\n chr.set_team_type(ally, TeamType.hostile_ally)", "def isGameOver(self):\n pass", "async def check_game_over(self,\n spaceship: MapObject,\n max_x: int,\n max_y: int) -> NoReturn:\n\n for obj_id, obj in self._dynamic_objects.items():\n if not obj_id.startswith('rubbish'):\n continue\n if spaceship & obj:\n while True:\n draw_frame(self._canvas, max_x // 4, max_y // 2,\n self._all_frames['other']['game_over'])\n await sleep(0)", "def oops():\n global asteroids, gameStarted, bonus\n global lives, last_record\n global ship, isResume, resumeBling\n\n lives -= 1\n if lives <= 0:\n last_record = (score, playername)\n records.append(last_record)\n records.sort(lambda x, y: y - x, key = lambda tup: tup[0])\n record_timer.start()\n\n notch = score_to_level(score)\n if notch > hallOfHonor[playername]:\n hallOfHonor[playername] = notch\n\n gameStarted = False\n asteroids = set([])\n bonus = set([])\n cron_spawn.stop()\n\n soundtrack.rewind() # It's annoying!\n\n ship = Explosion([ship.pos[0]], [0], color_idx = 3)\n\n isResume = True\n resumeBling = True\n resume_timer.start()\n bling_timer.start()\n respawn_timer.start()", "def noCondition(self):\n result = Activatable(self.effects).canActivate(self.game)\n self.assertTrue(result, \"The Activatable should be activatable\")", "def clear_game_objects(self):\n self.game_objects = list() # [go for go in self.game_objects if go.dont_destroy]\n self.colliders = list()", "def eat_orbs(self):\n player_occupied_tile = self.arena[self.player.position[0]][self.player.position[1]]\n if player_occupied_tile == Tile.ORB:\n self.arena[self.player.position[0]][self.player.position[1]] = Tile.EMPTY\n self.player.length = self.player.length + 1\n self.spawn_orb()", "def just_died(self):\r\n self.dead = True", "def start_of_game(self):\n pass", "def run(self):\n pygame.init()\n pygame.display.set_caption(\"Genetic Game\")\n self.screen = pygame.display.set_mode((self.SCREEN_W, self.SCREEN_H), 0, 32)\n\n self.ominus_sprites = [OminusSprite(self.screen, o, PLAYERS_COLORS[o.id]) for o in self.model.get_players()]\n for o in self.ominus_sprites:\n self.agent_group.add(o)\n\n self.wall_sprites = [WallSprite(self.screen, w) for w in self.model.get_walls()]\n for w in self.wall_sprites:\n self.terrain_group.add(w)", "def untargeted(self):\n\t\tpass", "def disableGracePeriod(self):\n self.isInGraceInvulnerability=False\n self.setTexture(shipSpritePath)", "def ready(self):\r\n\t\t# Remove attract mode from mode queue - Necessary?\r\n\t\tself.game.modes.remove(self)\r\n\t\t# Initialize game\t\r\n\t\tself.game.start_game()\r\n\t\t# Add the first player\r\n\t\tself.game.add_player()\r\n #self.game.add_player()\r\n\t\t# Start the ball. This includes ejecting a ball from the trough.\r\n\t\tself.game.start_ball()", "def notify_game_over(self):\n self.is_game_over = True", "def removeIfDead(self):\n global HP, winColor, FPS, kills\n if self.health <= 0:\n if self.rank == \"firerate\":\n if P.boosts == 1:\n P.timer = 600\n else:\n P.boosts += 1\n\n if self.rank == \"healer\":\n if P.medkits == 1:\n HP = 100\n else:\n P.medkits += 1\n\n if self.rank == \"quadshot\":\n P.quadshot = True\n P.quadshottimer = 300\n FPS = 100\n\n if self.rank == \"helper\":\n if self.firsttime:\n self.image = pygame.transform.rotate(self.image, 180)\n self.firsttime = False\n self.y -= self.vel*3\n if self.y <= 0:\n del enemies[findPlace(self, enemies)]\n if yn(Frame, 3):\n projectiles.append(projectile(self.x+self.w+2, self.y+self.h//2, 8, yvel=0, r=True, l=False))\n projectiles.append(projectile(self.x-42, self.y+self.h//2, -8, yvel=0, r=False, l=True))\n else:\n del enemies[findPlace(self, enemies)]\n kills += 1", "def on_object(self, image, objects):\n for obj in objects:\n if self.is_object_recognition_appropriate(obj.name):\n self.say(\"I see a {}\".format(obj.name))", "def onEnemyEmpty(self):\n\t\tAI.onEnemyEmpty(self)\n\t\tCombat.onEnemyEmpty(self)", "def update(self, g):\n \n self.game = g\n \n #if the player is dead, KILL THEM\n if self.hp[0] <= 0 and self.dead == False:\n self.dead = True\n self.deadt = 0\n #clear debuffs\n\n if self.dead == True:\n self.deadt += g.deltaT / 1000.0\n if self.deadt > self.reviveTime: #recussitate after 30 seconds\n self.dead = False\n self.hp[0] = self.hp[1]\n return #if dead, ignore input and all other updates\n \n elif self.dead == False:\n self.hp[0] += self.regen * g.deltaT / 1000.0\n if self.hp[0] > self.hp[1]:\n self.hp[0] = self.hp[1]\n self.mana[0] += self.manaRegen * g.deltaT / 1000.0\n if self.mana[0] > self.mana[1]:\n self.mana[0] = self.mana[1]\n self.attackTimer += self.attackSpeedMultiplier * g.deltaT / 1000.0\n #check debuffs\n self.checkBurning()\n self.checkChilled()\n self.checkShocked()\n self.checkParalyzed()\n \n \n #AURA\n for skill in self.skill:\n if skill.skillKey == 0 and skill.active == True: #aura is on\n #take mana\n self.mana[0] -= float(skill.skillCost) * g.deltaT / 1000.0\n #damage all creeps in AoE\n r = 4 * 24 #the radius of the AoE, in pixels at zoom = 1.\n for creep in g.creeps:\n if ( (creep.rect.centerx - self.rect.centerx) ** 2 + (creep.rect.centery - self.rect.centery) ** 2 ) ** 0.5 < r:\n creep.take_damage( self.attack * 0.1 * g.deltaT / 1000.0, 2 ) #THIS SHOULD IGNORE ABSORBTION\n #apply debuffs, based on type\n if skill.skillAttr == 0: #fire\n creep.applyBurning()\n elif skill.skillAttr == 1: #frost\n creep.applyChilled()\n elif skill.skillAttr == 2: #lightning\n creep.applyShocked()\n \n #buff all players in AoE\n\n #AI\n if self.active == False and self.attackTimer >= self.attackDelay:\n self.do_ai()\n \n #collision detection\n self.collision = [False, False]\n #Needs to be floats to ensure the player doesn't get stuck in a wall (rounding errors cause this)\n self.futurex = self.x + self.speed * self.direction[0] * g.deltaT / 1000.0\n self.futurey = self.y + self.speed * self.direction[1] * g.deltaT / 1000.0\n \n #can't move outside the bounds of game area\n if self.futurex < 0 or self.futurex + self.rect.width > g.mapSize[0] * 24:\n #cannot move in x\n self.collision[0] = True\n if self.futurey < 0 or self.futurey + self.rect.height > g.mapSize[1] * 24:\n #cannot move in y\n self.collision[1] = True\n \n #tile collision\n for x in range( int(self.x / 24) - 1, int(self.x / 24) + 2):\n for y in range( int( (self.y + 8) / 24) - 1, int( (self.y + 8) / 24) + 2):\n if x > -1 and x < g.mapSize[0] and y > -1 and y < g.mapSize[1]:\n if g.tiles[x][y].blocking == True:\n #test if you would be in them (24 x 24 area, cut off head top)\n if self.futurex >= x * 24 and self.futurex <= x * 24 + 24 or \\\n self.futurex + 24 >= x * 24 and self.futurex + 24 <= x * 24 + 24:\n if self.futurey + 8 >= y * 24 and self.futurey + 8 <= y * 24 + 24 or \\\n self.futurey + 24 + 8 >= y * 24 and self.futurey + 24 + 8 <= y * 24 + 24:\n self.collision[0] = True\n self.collision[1] = True\n \n \n #move (or don't)\n if self.collision[0] == False:\n self.x += self.speed * self.direction[0] * g.deltaT / 1000.0\n self.rect.move_ip( (int)(self.x - self.rect.x), 0)\n if self.collision[1] == False:\n self.y += self.speed * self.direction[1] * g.deltaT / 1000.0\n self.rect.move_ip( 0, (int)(self.y - self.rect.y) )\n \n #parse direction\n if self.direction[0] == 1:\n self.frameDirection = 1\n elif self.direction[0] == -1:\n self.frameDirection = 3\n if self.direction[1] == 1:\n self.frameDirection = 0\n elif self.direction[1] == -1:\n self.frameDirection = 2\n \n #animate\n if self.direction != [0, 0]: #player is moving\n self.frameTimer += g.deltaT\n if self.frameTimer > self.frameDelay:\n self.frameTimer = 0\n self.frame += 1\n if self.frame > self.frameMax:\n self.frame = 0\n else: #player is idle\n self.frame = 0", "def is_active(self, physics):\n pass", "def game_is_over(self) -> models.Conclusion:\n raise NotImplementedError", "def yell():\n ground_description_int = GROUND_FEATURES_LIST[ZERO_BASE_PLYR_POS]\n if ground_description_int != 12:\n printmessage(\"You yell, but nobody hears you.\", 5, CYAN, 1)\n else:\n printmessage(\"You have found the ranger, amd won the game!\", 5, GREEN, 3)\n die(\"ranger\")", "def __hit_paddle(self, g_object):\n return g_object == self.__paddle", "def process_collision(self, obj, target):\n if obj == \"small_hex\" and not self.smallhex.small_hex_flag:\n self.ship.velocity.x = -self.ship.velocity.x\n self.ship.velocity.y = -self.ship.velocity.y\n self.gameevents.add(\"score-\", \"pnts\", self.config['Score']['small_hex_penalty'])\n self.gameevents.add(\"score-\", \"flight\", self.config['Score']['small_hex_penalty'])\n self.smallhex.small_hex_flag = True\n elif obj == \"shell\":\n #remove shell, target is index of shell in shell_list\n del self.shell_list[target]\n self.gameevents.add(\"score-\", \"pnts\", self.config['Score']['shell_hit_penalty'])\n self.gameevents.add(\"score-\", \"fortress\", self.config['Score']['shell_hit_penalty'])\n self.ship.take_damage()\n if not self.ship.alive:\n self.gameevents.add(\"destroyed\", \"ship\", \"shell\")\n self.gameevents.add(\"score-\", \"pnts\", self.config['Score']['ship_death_penalty'])\n self.gameevents.add(\"score-\", \"fortress\", self.config['Score']['ship_death_penalty'])\n self.ship.color = (255, 255, 0)\n elif self.config['Ship']['colored_damage']:\n g = 255 / self.ship.start_health * (self.ship.health - 1)\n self.ship.color = (255, g, 0)\n\n elif obj.startswith(\"missile_\"):\n #if missile hits fortress, need to check if it takes damage when mine is onscreen\n if target == \"fortress\" and (len(self.mine_list) == 0 or self.config['Fortress']['hit_fortress_while_mine']):\n if self.ship.shot_timer.elapsed() >= self.config['Fortress']['vlner_time']:\n self.gameevents.add(\"score+\", \"vlner\", 1)\n if self.ship.shot_timer.elapsed() < self.config['Fortress']['vlner_time'] and self.score.vlner >= self.config['Fortress']['vlner_threshold']:\n self.gameevents.add(\"destroyed\", \"fortress\")\n self.fortress.alive = False\n #r = choice([0,45,90,135,180,225,270,315])\n #if r:\n # self.explosion.rotate(r)\n self.fortress.reset_timer.reset()\n self.snd_explosion.play()\n self.gameevents.add(\"score+\", \"pnts\", self.config['Score']['destroy_fortress'])\n self.gameevents.add(\"score+\", \"fortress\", self.config['Score']['destroy_fortress'])\n self.score.vlner = 0\n self.destroyedFortresses += 1\n self.gameevents.add(\"reset\", \"VLNER\")\n #do we reset the mine timer?\n if self.config['Mine']['fortress_resets_mine']:\n self.mine_list.timer.reset()\n self.mine_list.flag = False\n elif self.ship.shot_timer.elapsed() < self.config['Fortress']['vlner_time'] and self.score.vlner < self.config['Fortress']['vlner_threshold']:\n self.gameevents.add(\"reset\", \"VLNER\")\n self.score.vlner = 0\n self.snd_vlner_reset.play()\n self.ship.shot_timer.reset()\n elif target.startswith(\"mine_\"):\n #deal with missile hitting mine\n #can the mine be hit?\n if len(self.mine_list) > 0:\n if self.mine_list[0].tagged == \"fail\":\n self.gameevents.add(\"collide\", \"fail_tagged_mine\")\n elif self.mine_list[0].tagged == \"disabled\":\n self.gameevents.add(\"collide\", \"disable_tagged_mine\")\n elif self.mine_list[0].tagged == \"untagged\":\n if self.score.iff in self.mine_list.foe_letters:\n self.mine_list[0].tagged = \"disable\"\n self.gameevents.add(\"collide\", \"untagged_foe_mine\")\n else:\n self.gameevents.add(\"collide\", \"friend_mine\")\n elif self.mine_list[0].tagged == \"tagged\" and self.score.iff in self.mine_list.foe_letters:\n self.gameevents.add(\"collide\", \"tagged_foe_mine\")\n elif obj.startswith(\"mine_\"):\n #mine hit the ship\n index = int(obj[-1])\n #check to see if mine is still alive, it is possible to shot and\n #collide with a mine at the same time, ties go to ship\n if index < len(self.mine_list):\n del self.mine_list[index]\n self.score.iff = ''\n self.score.intrvl = 0\n self.mine_list.flag = False\n self.mine_list.iff_flag = False\n self.mine_list.timer.reset()\n self.gameevents.add(\"score-\", \"pnts\", self.config['Score']['mine_hit_penalty'])\n self.gameevents.add(\"score-\", \"mines\", self.config['Score']['mine_hit_penalty'])\n self.mine2 -= self.config['Score']['mine_hit_penalty']\n self.ship.take_damage()\n if not self.ship.alive:\n self.gameevents.add(\"destroyed\", \"ship\", \"mine\")\n self.gameevents.add(\"score-\", \"pnts\", self.config['Score']['ship_death_penalty'])\n self.gameevents.add(\"score-\", \"mines\", self.config['Score']['ship_death_penalty'])\n self.mine2 -= self.config['Score']['ship_death_penalty']\n self.ship.color = (255, 255, 0)\n elif self.config['Ship']['colored_damage']:\n g = 255 / self.ship.start_health * (self.ship.health - 1)\n self.ship.color = (255, g, 0)\n elif obj == \"friend_mine\":\n #get rid of mine\n self.destroyedMines += 1\n self.mine_list.flag = False\n self.mine_list.iff_flag = False\n self.gameevents.add(\"score+\", \"mines\", self.config['Score']['energize_friend'])\n self.gameevents.add(\"score+\", \"pnts\", self.config['Score']['energize_friend'])\n #see how long mine has been alive. 0-100 points if destroyed within 10 seconds\n self.gameevents.add(\"score+\", \"mines\", 100 - 10 * math.floor(self.mine_list.timer.elapsed() / 1000))\n self.gameevents.add(\"score+\", \"speed\", 100 - 10 * math.floor(self.mine_list.timer.elapsed() / 1000))\n #print self.mine_list.timer.elapsed()\n #print 100 - 10 * math.floor(self.mine_list.timer.elapsed()/1000)\n self.mine_list.timer.reset()\n self.mine2 += 50\n #amazingly, missile can hit the mine in the same frame as the mine hits the ship\n if len(self.mine_list) > 0:\n del self.mine_list[0]\n self.score.iff = ''\n self.score.intrvl = 0\n elif obj == \"tagged_foe_mine\":\n #get rid of mine\n self.destroyedMines += 1\n self.mine_list.flag = False\n self.mine_list.iff_flag = False\n self.gameevents.add(\"score+\", \"mines\", self.config['Score']['destroy_foe'])\n self.gameevents.add(\"score+\", \"pnts\", self.config['Score']['destroy_foe'])\n #see how long mine has been alive. 0-100 points if destroyed within 10 seconds\n self.gameevents.add(\"score+\", \"mines\", 100 - 10 * math.floor(self.mine_list.timer.elapsed() / 1000))\n self.gameevents.add(\"score+\", \"speed\", 100 - 10 * math.floor(self.mine_list.timer.elapsed() / 1000))\n self.mine_list.timer.reset()\n self.mine2 += 75\n if len(self.mine_list) > 0:\n del self.mine_list[0]\n self.score.iff = ''\n self.score.intrvl = 0", "def play_game(self):\n # need everyone to pass to move to next phase?\n self.deal_cards()\n self.plant_food()", "async def play_axe(game_state) -> None:\n if len(game_state.active_player.zombies) > 0:\n play_weapon(game_state, Supply.AXE)\n else:\n game_state.active_player.print(f'You cannot play {Supply.AXE.value} for nothing!')", "def default_update_idle(self, player, gravity = True):\n if gravity: self.gravityUpdate()\n if self.bounce_count > 0: \n self.bounce()\n elif self.onGround:\n self.changeAnimation('idle', self.direction_id)\n self.xvel = 0", "def setupCollisions(self) :", "def non_social_action(self):\n\n if not self.agent.done:\n if self.opponenet.cashed and self.opponenet.pumps >= self.agent.pumps:\n self.EV = self.opponenet.pumps + np.random.randint(1,5)\n\n self.action_gating()", "def is_hero(self):\n return True", "def ai(self):\n if not self.alive:\n return\n\n assert hasattr(self, 'net'), \"Bird has ai enabled but does not appear to have any agents\"\n\n self.birth_time += 1\n\n activation = self.net.activate(self.get_inputs())[0]\n\n # if activation > 1e3:\n # print('unusually high activation of', activation)\n\n if activation > 0.9:\n self.flap()", "def game_over(self):\n return self.lives() < 0", "def action_normal(self):\n obs = self.observation\n shoot = False\n eb = self.__class__.enemy_base\n \n ammopacks = filter(lambda x: x[2] == \"Ammo\", obs.objects)\n if ammopacks:\n self.updateAllAmmoSpots(ammopacks)\n # Walk to ammo\n if obs.ammo < SUFFICIENT_AMMO:\n self.goal = self.getClosestLocation(ammopacks)\n self.motivation = MOTIVATION_AMMO\n self.debugMsg(\"*> Recharge (%d,%d)\" % (self.goal[0],self.goal[1]))\n \n '''if (obs.ammo > 0 and obs.foes):\n self.goal = self.getClosestLocation(obs.foes)\n self.debugMsg(\"*> Go to enemy (%d,%d)\" % self.goal)\n # If the enemy is within range, shoot.\n if(point_dist(self.goal, obs.loc) < self.settings.max_range\n and not line_intersects_grid(obs.loc, self.goal, self.grid, self.settings.tilesize)):\n self.debugMsg(\"*> Shoot (%d,%d)\" % self.goal)\n #if self.goal not in obs.friends:\n self.motivation = MOTIVATION_SHOOT_TARGET\n shoot = True'''\n \n # Attack strategy 1\n #########################\n # 1) Shoot live enemies #\n #########################\n # Aim at the closest enemy outside the enemy base\n if obs.ammo > 0 and obs.foes:\n living = filter(lambda x: point_dist(x[0:2], eb) > ENEMY_BASE_RANGE, obs.foes)\n self.debugMsg(\"Living: %s\" % (living,))\n if living:\n self.debugMsg(1)\n self.goal = min(living, key=lambda x: point_dist(obs.loc, x[0:2]))[0:2]\n self.motivation = MOTIVATION_SHOOT_TARGET\n self.debugMsg(2)\n # Check if enemy in fire range\n if (\n point_dist(self.goal, obs.loc) < self.settings.max_range and\n not line_intersects_grid(\n obs.loc, \n self.goal, \n self.grid, \n self.settings.tilesize\n )\n ):\n self.debugMsg(3)\n self.debugMsg(\"*> Shoot (%d,%d)\" % self.goal)\n #return self.getActionTriple(True,None,0) ###?? SHOULD WE STOP MOVING WHEN WE SHOOT?\n return self.getActionTriple(True)\n else:\n self.debugMsg(4)\n return self.getActionTriple()\n self.debugMsg(5)\n \n # Walk to an enemy CP\n if self.goal is None and len(self.friendlyCPs) < 2:\n self.goal = self.getClosestLocation(self.getQuietEnemyCPs())\n if self.goal:\n self.debugMsg(\"Crowded location: %d\" % self.getCrowdedValue(self.goal))\n self.motivation = MOTIVATION_CAPTURE_CP\n self.debugMsg(\"*> Capture (%d,%d)\" % (self.goal[0],self.goal[1]))\n \n '''# If you can't think of anything to do\n # at least walk to a friendly control point\n if self.goal is None:\n self.goal = self.getClosestLocation(self.getQuietRestlessFriendlyCPs())\n if self.goal:\n self.motivation = MOTIVATION_GUARD_CP\n self.debugMsg(\"*> Guard (%d,%d)\" % (self.goal[0],self.goal[1]))'''\n \n if self.goal is None:\n self.goal = max(\n self.__class__.ammoSpots,\n key=lambda x: point_dist(x, obs.loc),\n )\n self.debugMsg(\"Going to ammospot far away (%d, %d)\" % (self.goal[0],self.goal[1]))\n self.motivation = MOTIVATION_STAY_PUT\n \n\n if self.goal:\n return self.getActionTriple(shoot)\n else:\n return self.getActionTriple(shoot)", "def engageEnemyRobots(self, targetRobot):\n # self.log(\"engaging enemys\")\n enemyEngaged = False\n if SPECS.UNITS[self.me.unit].ATTACK_RADIUS[0] <= targetRobot['distance'] <= SPECS.UNITS[self.me.unit].ATTACK_RADIUS[1]: \n enemyEngaged = True\n return enemyEngaged", "def on_use(self):\n assert self.can_use, 'Used an unuseable item!'", "def event_house_bust(self) -> None:\n print(f\"The house's hand contains {min(self.house.hand.value)}, they're bust\")\n self.event_player_wins()", "def event11512060():\n header(11512060, 1)\n chr.disable(CHR.CapriciousThrall)\n end_if_this_event_on()\n end_if_event_flag_on(EVENT.CapriciousThrallDead)\n\n if_event_flag_on(0, EVENT.CapriciousThrallActive)\n chr.disable(CHR.SilverKnightArcherNearThrall)\n\n if_event_flag_on(1, EVENT.CapriciousThrallActive)\n if_host(1)\n if_player_inside_region(1, REGION.CapriciousThrallTrigger)\n if_condition_true(0, 1)\n\n # Ambush.\n flag.enable(EVENT.ThrallAmbushOngoing) # Ambush is ongoing. Note this MUST be enabled before the flag below.\n flag.enable(11512060) # One-off ambush is done.\n flag.enable(11502003) # Thrall won't appear in Sen's.\n flag.enable(11502004) # Thrall won't appear in Sen's.\n obj.enable(1511974)\n sfx.create_map_sfx(1511975)\n obj.enable(1511976)\n sfx.create_map_sfx(1511977)\n obj.enable(1511978)\n sfx.create_map_sfx(1511979)\n chr.enable(CHR.CapriciousThrall)\n anim.force_animation(CHR.CapriciousThrall, ANIM.ThrallAmbushAttack)\n wait(0.5)\n sound.enable_map_sound(1513804)\n boss.enable_boss_health_bar(CHR.CapriciousThrall, TEXT.CapriciousThrallName)\n wait(100.0) # Battle timer.\n end_if_event_flag_on(11512061) # Already dead and handled.\n boss.disable_boss_health_bar(CHR.CapriciousThrall, TEXT.CapriciousThrallName)\n sound.play_sound_effect(CHR.CapriciousThrall, SoundType.s_sfx, 777777777) # For effect.\n wait(3.0) # so sound effect can build up and slightly mask the abrupt music stop\n sound.disable_map_sound(1513804)\n anim.force_animation(CHR.CapriciousThrall, ANIM.ThrallRetreat)\n wait(1.4)\n chr.disable(CHR.CapriciousThrall)\n obj.disable(1511974)\n sfx.delete_map_sfx(1511975)\n obj.disable(1511976)\n sfx.delete_map_sfx(1511977)\n obj.disable(1511978)\n sfx.delete_map_sfx(1511979)\n message.status_explanation(TEXT.ThrallHasFled)\n flag.enable(11512008) # Message won't appear when you come back.", "def current_venue_requires_player_greeting() -> bool:\n venue_instance = CommonLocationUtils.get_venue_of_current_lot()\n if venue_instance is None:\n return False\n return venue_instance.requires_visitation_rights", "def test_none_meet(self, initial_placement_fixture):\n assert len(ctx.cluster.influx_db.aggregate_performance()) == 0, \\\n \"Test should run on the basic model\"\n self.generic_function(above_objective=0)", "def is_not_used(self):\n pass", "def game_over(self):\n # TODO: Define the game over condition for Adventure.\n # use self.over to determine if the game if over\n return self.over", "def setup_game(self):", "def collect(self, player: Player):\n player.set_invincible(True)", "def event_m10_29_4000010():\r\n \"\"\"State 0,2: [DC] [Preset] Enemies start in conjunction with OBJ\"\"\"\r\n assert event_m10_29_x45(z1=10291010, z2=10290403, z3=10290404, z4=10290405, z5=10290406, z6=129020011)\r\n \"\"\"State 1: Finish\"\"\"\r\n EndMachine()\r\n Quit()", "def __hit_bricks(self, g_object):\n return type(g_object) == GRect and g_object != self.__paddle", "def event11510130():\n header(11510130, 1)\n\n skip_if_event_flag_off(3, EVENT.AnorLondoGwynWarp)\n chr.disable(CHR.DarkwraithInBossRoom)\n chr.disable(CHR.SilverKnightArcherNearBossFog)\n end()\n\n # Changes to make when Dark Anor Londo begins\n skip_if_event_flag_on(10 + 2 * len(Darkwraiths), EVENT.DarkAnorLondo)\n chr.disable(6640)\n chr.disable(6650)\n chr.disable(CHR.ChapelMimic)\n chr.disable(CHR.AbyssalPrinceJareel)\n for darkwraith in Darkwraiths:\n chr.disable(darkwraith)\n if_event_flag_on(0, EVENT.DarkAnorLondo)\n chr.enable(6640)\n chr.enable(6650)\n chr.enable(CHR.ChapelMimic)\n chr.enable(CHR.AbyssalPrinceJareel)\n chr.disable_ai(CHR.AbyssalPrinceJareel) # maybe redundant\n for darkwraith in Darkwraiths:\n chr.enable(darkwraith)\n\n # Skips to here if Dark Anor Londo has already started.\n # Disable chapel chest (replaced by Mimic).\n obj.disable(OBJ.ChapelChest)\n obj.disable_activation(OBJ.ChapelChest, -1)\n for enemy_id in DarkAnorLondoAllies:\n chr.set_team_type(enemy_id, TeamType.fighting_ally)\n # Move Palace archer.\n warp.short_warp(1510301, 'region', REGION.MoveArcherInDarkPalace, -1)\n for enemy_id in DarkAnorLondoDisabled:\n chr.disable(enemy_id)\n for painting_guardian_id in range(1510150, 1510159):\n # Disable Painting Guardians on the floor (except one getting killed).\n chr.disable(painting_guardian_id)\n skip_if_event_flag_on(1, 11510861) # Skip if Darkmoon Guardian is already dead.\n warp.warp(CHR.DarkmoonGuardian, 'region', 1512451, -1)\n end_if_event_flag_on(1034) # Stop here if Darkmoon Knightess is already dead.\n warp.warp(CHR.DarkmoonKnightess, 'region', 1512450, -1)\n chr.set_nest(CHR.DarkmoonKnightess, 1512450)\n chr.set_standby_animation_settings_to_default(CHR.DarkmoonKnightess)", "def attack(self, robot):\n pass", "def run_game():\n\n # Initialize game and create a screen object\n pygame.init()\n ai_settings = Settings()\n\n # Set screen width and height\n screen = pygame.display.set_mode((ai_settings.screen_width, ai_settings.screen_height), pygame.RESIZABLE) \n pygame.display.set_caption(\"CharLee MacDennis 2: Electric Bugaloo\")\n\n # Make the Play button\n play_button = Button(ai_settings, screen, \"Play\")\n\n # Make the Puase button\n pause_button = Button(ai_settings, screen, \"Paused\")\n\n # Create an instance to store game statistics\n stats = GameStats(ai_settings);\n\n # Set stats.high_score to be equal to universal high score\n stats.high_score = gf.read_high_score()\n\n # Create a scoreboard\n sb = Scoreboard(ai_settings, screen, stats)\n\n\n # Make a ship, a group of ship bullets\n ship = Ship(ai_settings, screen)\n ship_bullets = Group()\n \n # Create alien and group of alien bullets\n aliens = Group()\n alien_bullets = Group()\n\n # Create the fleet of aliens\n gf.create_fleet(ai_settings, screen, ship, aliens)\n\n # Create clock for FPS limit\n clock = pygame.time.Clock()\n\n # Start the main game loop\n while True:\n # 60 fps\n clock.tick(120)\n\n # Watch for keyboard and mouse events\n gf.check_events(ai_settings, screen, stats, sb, play_button, ship, aliens, ship_bullets, alien_bullets)\n\n if stats.game_active:\n\n # Update ship status\n ship.update()\n\n # Update all bullets on screen\n gf.update_bullets(ai_settings, screen, stats, sb, ship, aliens, ship_bullets, alien_bullets)\n\n # Update aliens status\n gf.update_aliens(ai_settings, stats, screen, sb, ship, aliens, ship_bullets, alien_bullets)\n\n # Draw and refresh the screen\n gf.update_screen(ai_settings, screen, stats, sb, ship, aliens, ship_bullets, alien_bullets, play_button)", "def _default_step_action(self, agents):\n try:\n super()._default_step_action(agents)\n except NotImplementedError:\n pass\n # get collisions\n collisions = self.traci_handler.simulation.getCollidingVehiclesIDList()\n logger.debug('Collisions: %s', pformat(collisions))\n for veh in collisions:\n self.collisions[veh] += 1\n # get subscriptions\n self.veh_subscriptions = self.traci_handler.vehicle.getAllSubscriptionResults()\n for veh, vals in self.veh_subscriptions.items():\n logger.debug('Subs: %s, %s', pformat(veh), pformat(vals))\n running = set()\n for agent in agents:\n if agent in self.veh_subscriptions:\n running.add(agent)\n if len(running) == 0:\n logger.info('All the agent left the simulation..')\n self.end_simulation()\n return True", "def noyable(self):\n return False", "def roomAction(gc, room_mob):\n gc['statuscontent']['MobAction'].setText(\"\")\n gamestate = 2\n # print(\"room_mob['category']:\", room_mob['category'])\n # if mob is a monster when you press enter the player attacks and get attacked back if the mob is still alive\n if room_mob.category == 'monster':\n gc['statuscontent']['MobAction'].setText(\"You attacked the {}\".format(room_mob.name))\n play('jab.ogg')\n damage = random.randint(1, gc['player'].getAttack())\n left = room_mob.hp - damage\n\n # if mob has any hp left set new hp to that mob and let the mob hit back at the player\n # else mob is dead (set mob hp to 0) and tell that room i doesn't have any mob any more (hasmob = False)\n if left > 0:\n room_mob.setHP(left)\n gc['statuscontent']['MobHP'].setText(\"HP: \" + str(room_mob.hp))\n dice = random.randint(1, 100)\n \n if dice <= room_mob.attacktrigger*100:\n time.sleep(0.5)\n play('chopp.ogg')\n hurtPlayer(gc['player'], room_mob.damage)\n else:\n time.sleep(0.5)\n play('missed_chopp.ogg')\n \n else:\n time.sleep(0.5)\n play('zombie_pain.ogg')\n room_mob.setHP(0)\n gc['board'].setNoMob(gc['player'].relcoords())\n gc['statuscontent']['MobHP'].setText(\"DEAD!\")\n gc['statuscontent']['MobAction'].setText(\"Monster dropped {}\".format(room_mob.getLootDescription()))\n gc['player'].addInventory(room_mob.getLoot())\n gamestate = 1\n elif room_mob.category == 'treasure':\n print(\"OPEN TREASURE\")\n time.sleep(0.5)\n play('open_chest.ogg')\n gc['board'].setNoMob(gc['player'].relcoords())\n gc['statuscontent']['MobAction'].setText(\"You got {}\".format(room_mob.getLootDescription()))\n gc['player'].addInventory(room_mob.getLoot())\n # elif room_mob['category'] == 'trap':\n # dice = random.randint(1, 100)\n # print(\"Nu kommer jag till en fälla\")\n # if dice <= room_mob.attacktrigger*100:\n # hurtPlayer(gc['statusbar'], gc['statuscontent'], gc['player'], room_mob.damage)\n # print(\"Jag blev visst skadad\")\n gc['statuscontent']['Attack'].setText(\"Atk {}\".format(gc['player'].getAttack()))\n gc['statuscontent']['Gold'].setText(\"Gold: {}\".format(gc['player'].getTreasure()))\n return gc, room_mob, gamestate", "def randomly_spawn_mothership(self) -> None:\n return", "def attack(self):\n if not self.attack_mode and not self.damage_mode and not self.death_mode:\n self.attack_group = choice(self.attack_groups)\n self.attack_mode = True\n self.cut_frame_update = 0", "def main(self):\n _age = info.getInfo(self)\n _flag = game.check_age(self, _age)\n if _flag == False:\n exit()\n game.wants_to_play(0)", "def __init__(self, numOfGames, muteOutput, randomAI, AIforHuman):\n self.numOfGames = numOfGames\n self.muteOutput = muteOutput\n self.maxTimeOut = 30 \n\n self.AIforHuman = AIforHuman\n self.gameRules = GameRules()\n self.AIPlayer = TicTacToeAgent()\n\n if randomAI:\n self.AIPlayer = randomAgent()\n else:\n self.AIPlayer = TicTacToeAgent()\n if AIforHuman:\n self.HumanAgent = randomAgent()\n else:\n self.HumanAgent = keyboardAgent()", "def game_play(self):", "def __init__(self):\n GameObject.__init__(self)\n\n # private attributes to hold the properties so they appear read only\n self._acted = False\n self._dash_x = 0\n self._dash_y = 0\n self._energy = 0\n self._genarium = 0\n self._is_busy = False\n self._job = None\n self._legendarium = 0\n self._moves = 0\n self._mythicite = 0\n self._owner = None\n self._protector = None\n self._rarium = 0\n self._shield = 0\n self._x = 0\n self._y = 0", "def Ork(self):\n self.type = \"Ork\"\n self.image = pygame.image.load(\"Ork.gif\")\n self.cost = 2\n self.health = 40\n self.max_health = self.health\n self.base_damage = 3\n self.damagedice = (4,2)\n self.base_defense = 1\n self.defensedice = (3,1)\n self.color = GREEN1\n self.activate()", "def event11515492():\n header(11515492, 1)\n\n chr.disable(CHR.DarkOrnsteinGiant)\n chr.disable(CHR.DarkOrnsteinScion)\n chr.disable(CHR.DarkSmough)\n\n if_event_flag_on(1, EVENT.OrnsteinAndSmoughDead)\n if_event_flag_on(1, EVENT.DarkAnorLondo)\n if_event_flag_on(1, EVENT.JareelDead)\n if_event_flag_off(1, EVENT.DarkOrnsteinAndSmoughDead)\n if_event_flag_off(1, EVENT.AnorLondoGwynWarp)\n end_if_condition_false(1) # No other events use this flag, so safe to end here.\n\n chr.disable(CHR.DarkwraithInBossRoom) # Disable and kill Darkwraith in O&S room.\n chr.kill(CHR.DarkwraithInBossRoom, False)\n\n if_event_flag_off(2, EVENT.AnorLondoGwynWarp) # Just in case.\n if_player_inside_region(2, REGION.DarkOrnsteinAndSmoughTrigger)\n if_condition_true(0, 2)\n\n for fog_wall, fog_sfx in zip((1511990, 1511992, 1511988), (1511991, 1511993, 1511989)):\n obj.enable(fog_wall)\n sfx.create_map_sfx(fog_sfx)\n\n chr.enable(CHR.DarkSmough)\n anim.force_animation(CHR.DarkSmough, 3017) # fade-in charge.\n wait(4.0)\n boss.enable_boss_health_bar(CHR.DarkSmough, TEXT.DesecraterSmough)\n sound.enable_map_sound(1513800)\n wait(5.0)\n chr.enable(CHR.DarkOrnsteinGiant)\n anim.force_animation(CHR.DarkOrnsteinGiant, 4011)\n boss.enable_boss_health_bar_with_slot(CHR.DarkOrnsteinGiant, 1, TEXT.ForsakenKnightOrnstein)\n chr.ai_instruction(CHR.DarkOrnsteinGiant, 1, 0)\n run_event(11515495) # Smough in Phase 1.\n run_event(11515493) # Main/support swapping.\n run_event(11515496) # Monitor Phase 2 start.\n\n # Smough fade control.\n run_event_with_slot(11515480, 0, args=(11515470, 3010, 4000, 1.5), arg_types='iiif') # right to left\n run_event_with_slot(11515480, 1, args=(11515471, 3011, 4001, 1.5), arg_types='iiif') # left to right\n run_event_with_slot(11515480, 2, args=(11515472, 3012, 4002, 1.5), arg_types='iiif') # right to left\n run_event_with_slot(11515480, 3, args=(11515473, 3013, 4003, 1.5), arg_types='iiif') # overhead smash\n run_event_with_slot(11515480, 4, args=(11515474, 3014, 4004, 1.3), arg_types='iiif') # fast overhead smash\n run_event_with_slot(11515480, 5, args=(11515475, 3015, 4005, 2.0), arg_types='iiif') # left to right, more reach\n run_event_with_slot(11515480, 6, args=(11515476, 3019, 4009, 2.0), arg_types='iiif') # short charge\n run_event_with_slot(11515480, 7, args=(11515477, 3017, 4007, 3.0), arg_types='iiif') # long charge\n run_event_with_slot(11515480, 8, args=(11515478, 3018, 4008, 2.0), arg_types='iiif') # butt slam\n run_event_with_slot(11515480, 9, args=(11515479, 3016, 4006, 2.0), arg_types='iiif') # jumping hammer smash", "def won(self):\r\n return None", "def is_still_owner(self):\n raise tooz.NotImplemented", "def initialize_object_attributes(self, ai_game):\r\n self.ship_bullets = ai_game.ship_bullets\r\n self.alien_bullets = ai_game.alien_bullets\r\n self.aliens = ai_game.aliens\r\n self.explosions = ai_game.explosions\r\n self.stats = ai_game.stats\r\n self.sb = ai_game.sb\r\n self.ship = ai_game.ship", "def event11515392():\n header(11515392, 1)\n\n skip_if_event_flag_off(9, EVENT.OrnsteinAndSmoughDead)\n chr.disable(CHR.Ornstein)\n chr.disable(CHR.SuperOrnstein)\n chr.disable(CHR.Smough)\n chr.disable(CHR.SuperSmough)\n chr.kill(CHR.Ornstein, False)\n chr.kill(CHR.SuperOrnstein, False)\n chr.kill(CHR.Smough, False)\n chr.kill(CHR.SuperSmough, False)\n end()\n\n chr.disable(CHR.SuperOrnstein)\n chr.disable(CHR.SuperSmough)\n chr.disable_backread(CHR.SuperOrnstein)\n skip_if_event_flag_on(1, 11510000)\n chr.disable(CHR.Ornstein) # Disable default Ornstein for cutscene.\n chr.disable_ai(CHR.Ornstein)\n chr.disable_ai(CHR.Smough)\n if_event_flag_on(1, 11515393)\n if_player_inside_region(1, 1512990)\n if_condition_true(0, 1)\n\n skip_if_event_flag_on(8, 11510000)\n skip_if_multiplayer(2)\n cutscene.play_cutscene_to_player(150140, CutsceneType.skippable, CHR.Player)\n skip(1)\n cutscene.play_cutscene_to_player(150140, CutsceneType.unskippable, CHR.Player)\n wait_frames(1)\n chr.enable(CHR.Ornstein)\n chr.enable(CHR.Smough)\n flag.enable(11510000)\n\n chr.disable_ai(CHR.SilverKnightArcherNearBossFog)\n chr.enable_ai(CHR.Ornstein)\n chr.enable_ai(CHR.Smough)\n boss.enable_boss_health_bar_with_slot(CHR.Ornstein, 1, 5270)\n boss.enable_boss_health_bar_with_slot(CHR.Smough, 0, 2360)", "def trigger_violence(self):\n # First time offender get registered in the system and changes category into an Aggressor and a Victim\n if self.assaulted == 0:\n if self.stress > self.random.random():\n self.category = 'aggressor'\n self.assaulted += 1\n self.spouse.category = 'victim'\n self.spouse.got_attacked += 1\n\n # Second-time offender, checks to see if it is a recidivist.\n elif self.stress > self.random.random():\n self.assaulted += 1\n self.spouse.got_attacked += 1", "def robot_is_wanting_default(giver, object, receiver, ctxt) :\n if receiver==\"compliant robot\" :\n raise ActionHandled()", "def add_drawable(self, gameObject):\r\n if not self.sprite_group.has(gameObject):\r\n self.sprite_group.add(gameObject)", "def start_game(self):\n\n\t\tpass", "def __init__(self, name):\n self.in_wall = False", "def primers_are_useless(self):\r\n #TODO: send a message telling these primers can be taken out.\r\n for feature in self.gt_seq_region:\r\n if feature.attributes.active:\r\n feature.attributes.disable_feature(\"has no interesting sequence variation\")\r\n for feature in self.pcr_product:\r\n if feature.attributes.active:\r\n feature.attributes.disable_feature(\"has no interesting sequence variation\")\r\n for feature in self.forward_primer:\r\n if feature.attributes.active:\r\n feature.attributes.disable_feature(\"has no interesting sequence variation\")\r\n for feature in self.reverse_primer:\r\n if feature.attributes.active:\r\n feature.attributes.disable_feature(\"has no interesting sequence variation\")", "def tick_needs(self):\n # If not preoccupied, check needs and do stuff\n if not self.game.turns % 6:\n self.memories.tick_memories()\n self.mood_drain = 0\n for need in self.needs:\n # Mood is ticked last - dependent on others\n if need == \"mood\":\n continue\n setattr(self, need, max(getattr(self, f\"{need}_drain\") + getattr(self, need), 0))\n if getattr(self, need) == 0:\n self.mood_drain -= 1\n else:\n # If mood's not currently draining, increase mood equal to stats 75% filled\n if not self.mood_drain:\n positives = [x for x in self.needs if getattr(self, need) > 75]\n self.mood = min(len(positives) + self.mood, 100)\n\n # Draining mood based on unfufilled needs\n self.mood += self.mood_drain\n if self.work <= 0:\n self.mob_fired()\n\n if self.mood <= 0:\n self.mob_quits()\n\n if self.bladder <= 0:\n urine = game_objects[\"Urine\"]\n self.game.create_object(self.x, self.y, urine)\n self.bladder = self.max_bladder\n\n if self.bowels <= 0:\n poo = game_objects[\"Poo\"]\n self.game.create_object(self.x, self.y, poo)\n self.bowels = self.max_bowels", "async def game(self):\n pass", "def is_object_recognition_appropriate(self, name):\n if name not in self.objecttime or (time() - self.objecttime[name] > self.OBJECT_TIMEOUT):\n self.objecttime[name] = time()\n return True", "def __init__(self):\n\t\tself.playercolider()", "def __init__(self):\n GameObject.__init__(self)\n\n # private attributes to hold the properties so they appear read only\n self._bottle = None\n self._cowboy = None\n self._furnishing = None\n self._has_hazard = False\n self._is_balcony = False\n self._tile_east = None\n self._tile_north = None\n self._tile_south = None\n self._tile_west = None\n self._x = 0\n self._y = 0\n self._young_gun = None", "def hook_gain_this_card(self, game, player):\n empties = sum(1 for st in game.cardpiles if game[st].is_empty())\n for _ in range(empties):\n player.gain_card(\"Gold\")", "def enableGracePeriod(self):\n self.isInGraceInvulnerability=True\n self.gracePeriodStartTime=time.time()\n self.setTexture(shipDamagedSpritePath)", "def event11512200():\n header(11512200, 1)\n\n skip_if_event_flag_on(7, EVENT.AnorLondoGwynWarp)\n chr.disable(CHR.Gwyn)\n chr.disable(CHR.GiantCrow)\n chr.disable(BlackKnights[0])\n chr.disable(BlackKnights[1])\n chr.disable(BlackKnights[2])\n chr.disable(BlackKnights[3])\n end()\n\n for boss_id in (CHR.Ornstein, CHR.SuperOrnstein, CHR.Smough, CHR.SuperSmough):\n chr.disable(boss_id)\n for knight in BlackKnights:\n chr.enable_immortality(knight)\n chr.disable_health_bar(knight)\n chr.disable(knight)\n\n anim.force_animation(CHR.Player, ANIM.SummonFadeIn)\n\n chr.enable_invincibility(CHR.GiantCrow)\n chr.disable_gravity(CHR.GiantCrow)\n chr.disable_collision(CHR.GiantCrow)\n chr.set_special_effect(CHR.Gwyn, 620) # add light to Gwyn\n chr.set_special_effect(CHR.Gwyn, 3170) # add lightning to Gwyn's weapon\n\n chr.disable_ai(CHR.Gwyn)\n anim.force_animation(CHR.Gwyn, 200, loop=True)\n wait(2.0)\n anim.force_animation(CHR.Gwyn, 200)\n chr.enable_ai(CHR.Gwyn)\n sound.enable_map_sound(1513805)\n boss.enable_boss_health_bar(CHR.Gwyn, 5375)\n flag.disable(EVENT.AnorLondoGwynWarp)\n\n flag.enable(11515360)\n\n if DEBUG.FAST_GWYN_KNIGHTS:\n wait(10.0)\n else:\n wait(135.0) # Time it takes for Soul of Cinder music to get to piano part.\n\n chr.ai_instruction(CHR.Gwyn, 1, 0)\n anim.force_animation(CHR.Gwyn, 3030)\n wait(2.1)\n if __REMASTERED:\n light.set_area_texture_parambank_slot_index(15, 2)\n else:\n light.set_area_texture_parambank_slot_index(15, 1)\n wait(3.0)\n chr.rotate_to_face_entity(CHR.Gwyn, CHR.Player)\n\n end_if_event_flag_on(11512201) # Gwyn already dead, no Black Knights.\n\n flag.enable(BlackKnightTurnFlags[0]) # Sword spawns first.\n run_event(11512202) # Black Knight spawn manager\n for slot, (knight, knight_active_flag) in enumerate(zip(BlackKnights, BlackKnightActiveFlags)):\n run_event_with_slot(11512210, slot, args=(knight, knight_active_flag)) # Death triggers", "def run_game_logic(self):\n pass", "def event11512050():\n header(11512050, 1)\n ally, = define_args('i')\n\n skip_if_this_event_slot_off(3)\n chr.disable(ally)\n chr.kill(ally, False)\n end()\n\n if_entity_dead(1, ally)\n if_event_flag_on(1, EVENT.DarkAnorLondo)\n if_event_flag_off(1, EVENT.JareelDead)\n if_condition_true(0, 1)\n end()", "def gameOver(self):\n\t\treturn self.lives == 0", "async def attacking_logic(self):\n if len(self.units(UnitTypeId.ZERGLING)) >= 6:\n for zergling in self.units(UnitTypeId.ZERGLING):\n self.do(zergling.attack(self.enemy_start_locations[0]))" ]
[ "0.63586646", "0.58945036", "0.5857092", "0.5510857", "0.54797775", "0.54752845", "0.54633653", "0.54481524", "0.5443868", "0.5428182", "0.5414342", "0.54023975", "0.54023975", "0.5400207", "0.5384979", "0.53770864", "0.53669137", "0.53261596", "0.53248054", "0.52967083", "0.52828264", "0.52818656", "0.52776915", "0.52708393", "0.5264435", "0.5250424", "0.52470714", "0.52465516", "0.52348465", "0.5233008", "0.52321476", "0.5223093", "0.52088195", "0.5205007", "0.520309", "0.51953", "0.51869905", "0.5182719", "0.51792276", "0.5174176", "0.51685303", "0.51610583", "0.5152602", "0.5143342", "0.5141725", "0.5140165", "0.513721", "0.513567", "0.51182413", "0.51146066", "0.51122963", "0.5104584", "0.5100495", "0.5100037", "0.50959104", "0.5089763", "0.50827736", "0.5081428", "0.50797987", "0.50782114", "0.5073376", "0.5072919", "0.5071879", "0.5062819", "0.5062797", "0.50515395", "0.50509256", "0.50476503", "0.5046082", "0.50406086", "0.5038508", "0.5036986", "0.5034074", "0.50304633", "0.5028371", "0.5015209", "0.5011041", "0.5009996", "0.5005021", "0.5002504", "0.50020915", "0.4997127", "0.4996429", "0.4995168", "0.4995161", "0.49918473", "0.4990101", "0.49863172", "0.49815074", "0.4981093", "0.49797425", "0.4978169", "0.49777207", "0.49744204", "0.4974332", "0.49739674", "0.49675712", "0.4967467", "0.4966042", "0.49639523", "0.49633607" ]
0.0
-1
sort list of objects randomly then update everything in this world
def update(self, dt): random.shuffle(self.gameObjects) for item in self.gameObjects: description = item.update(dt)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dispatch_items_randomly(self, level):\n for item in self.list:\n item.position = Item.define_random_position(item, level)", "def populate_objects(self):\n if not self._random_object: # only populate the first object\n U.spawn_object(self.object_list[0], self.object_initial_position)\n else:\n rand_x = np.random.uniform(low=-0.35, high=0.35, size=(len(self.object_list),))\n rand_y = np.random.uniform(low=2.2, high=2.45, size=(len(self.object_list),))\n for idx, obj in enumerate(self.object_list):\n box_pos = Pose(position=Point(x=rand_x[idx],\n y=rand_y[idx],\n z=1.05))\n U.spawn_object(obj, box_pos)", "def _randomly_place_objects(self, urdfList):\n\n # Randomize positions of each object urdf.\n objectUids = []\n for urdf_name in urdfList:\n xpos = 0.35 + self._blockRandom * random.random()\n ypos = 0.28 + self._blockRandom * (random.random() - .5)\n angle = np.pi / 2 + self._blockRandom * np.pi * random.random()\n orn = p.getQuaternionFromEuler([0, 0, angle])\n urdf_path = os.path.join(self._urdfRoot, urdf_name)\n uid = p.loadURDF(urdf_path, [xpos, ypos, .05],\n [orn[0], orn[1], orn[2], orn[3]])\n objectUids.append(uid)\n # Let each object fall to the tray individual, to prevent object\n # intersection.\n for _ in range(300):\n p.stepSimulation()\n return objectUids", "def update(self):\n for object in reversed(self.addList):\n self.objects.append(object)\n self.addList.remove(object)\n\n for object in reversed(self.removeList):\n self.objects.remove(object)\n self.removeList.remove(object)\n\n self.objects = sorted(self.objects,key=priority)\n\n for object in self.objects:\n object.update()", "def randomize_herbs(self):\n random.shuffle(self.herbivores)", "def place_objects(self):\n placed_objects = []\n index = 0\n np.random.seed(300)\n # place objects by rejection sampling\n for _, obj_mjcf in self.mujoco_objects.items():\n horizontal_radius = obj_mjcf.get_horizontal_radius()\n bottom_offset = obj_mjcf.get_bottom_offset()\n success = False\n for _ in range(5000): # 5000 retries\n bin_x_half = self.bin_size[0] / 2 - horizontal_radius - 0.05\n bin_y_half = self.bin_size[1] / 2 - horizontal_radius - 0.05\n object_x = np.random.uniform(high=bin_x_half, low=-bin_x_half)\n object_y = np.random.uniform(high=bin_y_half, low=-bin_y_half)\n\n # make sure objects do not overlap\n object_xy = np.array([object_x, object_y, 0])\n pos = self.bin_offset - bottom_offset + object_xy\n location_valid = True\n for pos2, r in placed_objects:\n dist = np.linalg.norm(pos[:2] - pos2[:2], np.inf)\n if dist <= r + horizontal_radius:\n location_valid = False\n break\n\n # place the object\n if location_valid:\n # add object to the position\n placed_objects.append((pos, horizontal_radius))\n self.objects[index].set(\"pos\", array_to_string(pos))\n # random z-rotation\n quat = self.sample_quat()\n self.objects[index].set(\"quat\", array_to_string(quat))\n success = True\n print('object {} in pick place task: pos:{}, quat:{}'.format(index, pos, quat))\n break\n\n # raise error if all objects cannot be placed after maximum retries\n if not success:\n raise RandomizationError(\"Cannot place all objects in the bins\")\n index += 1", "def Shuffle(self):\r\n random.shuffle(self.cards_list)", "def test_shuffle(self):\n random.shuffle(self.liste)\n self.liste.sort()\n self.assertEqual(self.liste, list(range(10)))", "def test_shuffle(self):\n random.shuffle(self.liste)\n self.liste.sort()\n self.assertEqual(self.liste, list(range(10)))", "def totem_random():\n random_head()\n random_head()\n random_head()", "def detect_and_shuffle(self, *args):\n\n self._update_suspicion()\n self.remove_attackers()\n self.drop_buckets()\n buckets = self.get_buckets_to_sort()\n if len(buckets) > 0:\n self._reorder_buckets(buckets)\n self._sort_buckets(buckets)", "def put_items(self,*maplist):\n self.position_x = random.randint(0, (len(maplist) - 1))\n self.position_y = random.randint(1, (len(maplist[0]) - 2))\n\n while maplist[self.position_y][self.position_x] == \"x\":\n self.position_x = random.randint(0, (len(maplist) - 1))\n self.position_y = random.randint(1, (len(maplist[0]) - 2))", "def reindex_graphics(self):\n for obj in self.context.static_objects:\n self.canvas.children.remove(obj.widget.canvas)\n # fill _objects_z_index\n _objects_z_index = {}\n for obj in self.context.static_objects:\n y = obj.widget.pos[1]\n if not y in _objects_z_index:\n _objects_z_index[y] = []\n _objects_z_index[y].append(obj)\n _keys = _objects_z_index.keys()\n _keys.sort()\n _keys.reverse()\n for k in _keys:\n objs = _objects_z_index[k]\n for obj in objs:\n self.canvas.add(obj.widget.canvas)", "def randomize(self):\n if self.randomize_players is True:\n random.shuffle(self.player_field)", "def shuffle(self):\n self.logger.debug('Shuffling wallpaper queue')\n\n random.shuffle(self.wallpapers)\n self.index = 0", "def shuffle(self):\n for i in range(10):\n random.shuffle(self.set)", "def random_pos(self, ):\n self.pos_item['needle'] = self.shuffle_pos()\n self.pos_item['ether'] = self.shuffle_pos()\n self.pos_item['tube'] = self.shuffle_pos()", "def shuffle(self):\n\n args = list(self)\n random.shuffle(args)\n\n self.clear()\n super(DogeDeque, self).__init__(args)", "def generate_lists(self):\n scenelist = self.scenelist\n newbies = self.newbies\n claimlist = [ob for ob in self.claimlist if ob not in newbies]\n choices = self.valid_scene_choices\n num_scenes = self.NUM_SCENES - (len(claimlist) + len(scenelist))\n if num_scenes > 0:\n try:\n scenelist.extend(random.sample(choices, num_scenes))\n except ValueError:\n scenelist.extend(choices)\n scenelist = sorted(scenelist, key=lambda x: x.key.capitalize())\n self.caller.player_ob.db.random_scenelist = scenelist", "def organizeAndUpdate(self): \r\n for point in self.points:\r\n point.organize()\r\n point.update()", "def live(self):\n\t\t#random assignment of fittnes for now\n\t\tfor chrom in self.chromosomes:\n\t\t\tchrom.strength = random.random()\n\t\tself.chromosomes.sort(key=lambda chromosomes: chromosomes.strength, reverse = True)\n\n\t\tself.bestChromosomes = self.chromosomes[0:2]", "def Generate_Random( self ):\n print( 'Generating Random coordinates' )\n stands = self.Data.Stand.keys()\n stands.sort()\n for s in stands:\n trees = self.Data.Stand[s].Tree.keys()\n trees.sort()\n for t in trees:\n self.Data.Stand[s].Tree[t].X = random.uniform( 0, 208.71 )\n self.Data.Stand[s].Tree[t].Y = random.uniform( 0, 208.71 )", "def update(entity_list):\n for entity in entity_list:\n # Only moveable entities should have the dest field\n if \"dest\" in entity:\n # If no destination, pick a new random one\n if entity[\"dest\"] == None:\n entity[\"dest\"] = random_pos(100, 100)\n \n # Move one step towards destination\n cpos = entity[\"position\"]\n dest = entity[\"dest\"]\n entity[\"position\"] = move(cpos, dest) \n\n # Clear destination if it has been reached\n if entity[\"dest\"] == entity[\"position\"]:\n entity[\"dest\"] = None", "def randomize_in_place(list1, list2, init=0):\n np.random.seed(seed=init)\n np.random.shuffle(list1)\n np.random.seed(seed=init)\n np.random.shuffle(list2)", "def main():\n\n import random\n print( \"*** Initializing new list ... done. \" )\n print( \"*** Filling in 20 random values ... done.\" )\n\n l = []\n\n for i in range( 20 ):\n l.append( random.randint( 0, 100 ))\n\n print( \" ### Unsorted list: \" )\n print( l )\n\n print( \"\\n*** Sorting the list with Bubble Sort ... done.\" )\n bubbleSort( l )\n\n print( \" ### Sorted list: \")\n print( l )", "def sort(self):\r\n print(f'robot is holding nothing ({self._item})')\r\n # print(l)\r\n SortingRobot.swap_item(self)\r\n SortingRobot.move_right(self)\r\n print(f'SECOND FOR LOOP Robot picked up item {self._item} and is now moving right')\r\n SortingRobot.set_light_off(self)\r\n for i in range(101):\r\n while SortingRobot.can_move_right(self) == True:\r\n SortingRobot.set_light_on(self) # Holding None\r\n print(f'WHILE LOOP robot is holding {self._item}')\r\n if SortingRobot.compare_item(self) == None:\r\n SortingRobot.swap_item(self)\r\n # SortingRobot.set_light_off(self) # Holding None\r\n SortingRobot.move_right(self)\r\n print(f'WHILE LOOP TOP NONE IF STATEMENT robot is holding {self._item}')\r\n if (SortingRobot.compare_item(self) == None) and (SortingRobot.can_move_left(self) == False): #\r\n break\r\n # print(l)\r\n if (SortingRobot.compare_item(self) == -1) and (SortingRobot.can_move_right(self) == True): # held item is lower than table\r\n print(f'item held is lower so swapping (current item = {self._item}) and moving right')\r\n SortingRobot.swap_item(self)\r\n SortingRobot.move_right(self)\r\n print(f'new item held is {self._item}')\r\n print(f'1curent position = {self._position}')\r\n # SortingRobot.move_right(self)\r\n # print(f'curent position = {self._position}')\r\n # SortingRobot.move_right(self)\r\n # print(f'curent position = {self._position}')\r\n # print(l[self._position])\r\n # print(l)\r\n if (SortingRobot.compare_item(self) == 1) and (SortingRobot.can_move_right(self) == False):\r\n SortingRobot.swap_item(self)\r\n print(f'2curent position = {self._position}')\r\n if SortingRobot.compare_item(self) == 0: # held item is equal to table\r\n print(f'3curent position = {self._position}')\r\n SortingRobot.move_right(self)\r\n SortingRobot.swap_item(self)\r\n pass\r\n if SortingRobot.compare_item(self) == 1 and (SortingRobot.can_move_right(self) == True): # held item is higher than table\r\n SortingRobot.move_right(self)\r\n print(f'4curent position = {self._position}')\r\n # if (SortingRobot.compare_item(self) == None) and (SortingRobot.light_is_on(self) == True): # either held or table is None\r\n # SortingRobot.swap_item(self)\r\n # SortingRobot.set_light_off(self)\r\n # print(f'5curent position = {self._position}')\r\n # if SortingRobot.can_move_right(self) == True:\r\n # SortingRobot.move_right(self)\r\n # print(f'FINAL WHILE LOOP robot hit end and is moving left one by one and swapping items '\r\n # f'one by one (current item = {self._item})')\r\n # print(f'6curent position = {self._position}')\r\n # # print(l)\r\n if SortingRobot.can_move_right(self) == False:\r\n print(f'7curent position = {self._position}')\r\n while SortingRobot.can_move_left(self) == True:\r\n # print('item held is lower but hit end of list so ')\r\n SortingRobot.move_left(self)\r\n SortingRobot.swap_item(self)\r\n print(f'8curent position = {self._position}')\r\n # print(f'Ending List = {l}')\r\n if (SortingRobot.compare_item(self) == -1) and (\r\n SortingRobot.can_move_right(self) == False): # held item is lower than table\r\n print(f'9curent position = {self._position}')\r\n while SortingRobot.can_move_left(self) == True:\r\n print('item held is lower but hit end of list so moving left one by one')\r\n SortingRobot.move_left(self)\r\n SortingRobot.swap_item(self)\r\n print(f'Robot at end of list; continues to hold item {self._item}; moving back to start now')\r\n print(f'10curent position = {self._position}')\r\n if SortingRobot.compare_item(self) == 1 and (\r\n SortingRobot.can_move_right(self) == False): # held item is higher than table\r\n print(f'11curent position = {self._position}')\r\n SortingRobot.swap_item(self)\r\n while SortingRobot.can_move_left(self) == True:\r\n # print('item held is lower but hit end of list so ')\r\n SortingRobot.swap_item(self)\r\n SortingRobot.move_left(self)\r\n print(f'12curent position = {self._position}')", "def shuffle(self):\n reorder(self.cards) #importing shuffle as reorder", "def sort(self):\n self.model_list.sort()\n for model in self.model_list:\n model.sort()", "def bogosort(to_sort):\n # Be sure to sort the list at each pass in the while loop to make it extra\n # inefficient!\n while sorted(to_sort) != to_sort:\n shuffle(to_sort)", "def randomize(self):\n self.size = randint(1,5)\n self.resource = randint(1,3)\n self.temperature = randint(20, 1000)\n self.gravity = randint(0, 10)\n for key in self.get_atmosphere().keys():\n setattr(self, key, randint(0, 5))\n for attribute_count in range(randint(0, 3)):\n pa = PlanetaryAttribute.objects.order_by('?')[0]\n self.attributes.add(pa)", "def reset_window(self):\n self.sorting = False\n self.sort_list = []\n self.window.delete('all')\n for i in range(100):\n random_height = randint(40,280)\n line_id = self.window.create_line(4*i+50, 20, 4*i+50, random_height)\n self.sort_list.append([random_height, line_id])\n self.window.update()", "def _sort(self):\n self.population.sort()\n self.population.reverse()", "def move_objects(self):\n\n def get_new_obj_pose(curr_pos, curr_quat):\n angular_disp = 0.0\n delta_alpha = np.random.uniform(-angular_disp, angular_disp)\n delta_rot = Quaternion(axis=(0.0, 0.0, 1.0), radians=delta_alpha)\n curr_quat = Quaternion(curr_quat)\n newquat = delta_rot * curr_quat\n\n pos_ok = False\n while not pos_ok:\n const_dist = True\n if const_dist:\n alpha = np.random.uniform(-np.pi, np.pi, 1)\n d = 0.25\n delta_pos = np.array([d * np.cos(alpha), d * np.sin(alpha), 0.])\n else:\n pos_disp = 0.1\n delta_pos = np.concatenate([np.random.uniform(-pos_disp, pos_disp, 2), np.zeros([1])])\n newpos = curr_pos + delta_pos\n lift_object = False\n if lift_object:\n newpos[2] = 0.15\n if np.any(newpos[:2] > high_bound[:2]) or np.any(newpos[:2] < low_bound[:2]):\n pos_ok = False\n else:\n pos_ok = True\n\n return newpos, newquat\n\n for i in range(self.num_objects):\n curr_pos = self.sim.data.qpos[self._n_joints + i * 7: self._n_joints + 3 + i * 7]\n curr_quat = self.sim.data.qpos[self._n_joints + 3 + i * 7: self._n_joints + 7 + i * 7]\n obji_xyz, obji_quat = get_new_obj_pose(curr_pos, curr_quat)\n self.sim.data.qpos[self._n_joints + i * 7: self._n_joints + 3 + i * 7] = obji_xyz\n self.sim.data.qpos[self._n_joints + 3 + i * 7: self._n_joints + 7 + i * 7] = obji_quat.elements\n\n sim_state = self.sim.get_state()\n # sim_state.qpos[:] = sim_state.qpos\n sim_state.qvel[:] = np.zeros_like(sim_state.qvel)\n self.sim.set_state(sim_state)\n self.sim.forward()", "def shuffle(self):\n x = len(self.org)\n result = self.org[:]\n var = x\n for i in range(x):\n id = random.randrange(0, var)\n result[id], result[var - 1] = result[var - 1], result[id]\n var -= 1\n\n return result", "def shuffle(self):\n self.__c_elem().melange()", "def _shuffle(self, reinit_indexes = False):\n print('Shuffling data...')\n # set seed for reproducibility\n #random.seed()\n # shuffle identities\n random.shuffle(self.identities)\n # shuffle images associated to each identity\n for identity in self.groundtruth_metadata.keys():\n random.shuffle(self.groundtruth_metadata[identity]['metadata'])\n if reinit_indexes:\n self.groundtruth_metadata[identity]['index'] = 0\n print('Finished shuffling data!')", "def move1(self):\n\n options = self.location.exits.keys()\n self.location.objects.remove(a)\n print('fred is moving..')\n self.location = self.location.exits[random.choice(list(options))]\n self.location.objects.append(a)", "def sorting_alg(self, my_list):\n for i in range(len(my_list)):\n for j in range(i+1, len(my_list)):\n if my_list[i] > my_list[j]:\n my_list[i], my_list[j] = my_list[j], my_list[i]\n #print(my_list)\n #sleep(1)\n return my_list", "def update(self):\n for p in self.persons:\n p.update(self.max_step)\n if p.state == State.INFECTED:\n for other in self.persons:\n if np.linalg.norm(p.position - other.position) < self.infection_radius and id(p) != id(other):\n if other.state == State.HEALTHY:\n other.state = random.choices(\n list(State), weights=[0.0, 1.0, 0, 0])[0]\n elif other.state == State.RECOVERED:\n other.state = random.choices(\n list(State), weights=[0, 0.1, 0.8, 0])[0]", "def shuffle(self):\n random_list = list(self.nums)\n shuffle(random_list)\n # Alternative would be to loop over every item and randomly shuffle it:\n # for i in xrange(len(self.now) - 1):\n # idx = random.randint(i,len(self.now) - 1)\n # self.now[i],self.now[idx] = self.now[idx],self.now[i]\n return random_list", "def elapseTime(self, gameState):\n newParticleList = []\n # Pretend each particle is a ghost, and set its position semi-randomly based on how\n # likely the ghost is to move to that position\n for particle in self.particles:\n newPosDist = self.getPositionDistribution(gameState, particle)\n newParticleList.append(util.sample(newPosDist))\n self.particles = newParticleList", "def run(self):\n self.model.sort(0)\n self.sort_object.task_complete.emit()", "def sort_random(i):\n return np.random.rand()", "def update_order():", "def update_order():", "def random_sample_objs(num_per_cat):\n\n obj_path_lists = load_object_lists(g_render_objs)\n obj_path_list = []\n\n for pathes in obj_path_lists:\n pathes = list(pathes)\n random.shuffle(pathes)\n if num_per_cat > len(pathes):\n num_per_cat = len(pathes)\n samples = random.sample(pathes, num_per_cat)\n obj_path_list += samples\n\n return obj_path_list", "def shuffle(self):\n random.shuffle(self.cards)", "def shuffle(self):\n random.shuffle(self.cards)", "def shuffle(self):\n random.shuffle(self.cards)", "def shuffle(self):\n random.shuffle(self.cards)", "def shuffle(self):\n random.shuffle(self.cards)", "def reshuffle(self):\n self.cards = []\n self.fill_standard_deck()\n self.shuffle()", "def sort(self):\n self.cards.sort()", "def sort(self):\n self.cards.sort()", "def shuffle(self):\n random.shuffle(self.get_cards())", "def place_items(self):\n for item in self.item_kit:\n coords = self.maze.random_coordinates()\n item(coords, self.scale)", "def connectRandom(self):\n shuffle(self.houses)\n for house in self.houses:\n house.connectRandomBattery(self.batteries, self)\n self.calculateCosts()", "def shuffle(self):\n import random\n random.shuffle(self.cards)", "def order(orderedObjects):\n for idx, obj in enumerate(orderedObjects):\n obj.position = idx\n obj.save()", "def randomize(self):\n #first take care of all parameters (from N(0,1))\n x = self._get_params_transformed()\n x = np.random.randn(x.size)\n self._set_params_transformed(x)\n #now draw from prior where possible\n x = self._get_params()\n [np.put(x,i,p.rvs(1)) for i,p in enumerate(self.priors) if not p is None]\n self._set_params(x)\n self._set_params_transformed(self._get_params_transformed())#makes sure all of the tied parameters get the same init (since there's only one prior object...)", "def update_position(self):\n new_position = []\n for i in range(self.num_literals):\n r = random()\n position_i = 1 if r < self.sigmoid(self.velocity[i]) else 0\n new_position.append(position_i)\n self.position = new_position", "def __shuffle_cards(self):\n random.shuffle(self.cards)", "def sort_my_hands(self):\n self.hands_list.sort(reverse=True)", "def test_sort():\n data = [\"filename_{}.py\".format(i) for i in range(200)]\n temp = data[:]\n random.shuffle(temp)\n assert data == sort(temp)", "def _do_update(self):\n sample = np.random.choice(self._seeds, 1, replace=False, p=self._seed_weights)[0]\n index = self._seeds.index(sample)\n new_seed = random.choice([neb for neb in self._graph.neighbors(sample)])\n self._edges.add((sample, new_seed))\n self._nodes.add(sample)\n self._nodes.add(new_seed)\n self._seeds[index] = new_seed", "def _order_observations(self):\n\n list_observations_y = zip(self.list_observations, self.list_y)\n list_observations_y = sorted(\n list_observations_y,\n key=lambda obs_y: np.linalg.norm(np.array(obs_y[0]))\n )\n self.list_observations = [obs for obs, y in list_observations_y]\n self.list_y = [y for obs, y in list_observations_y]", "def selection_sort(master, canvas, user_list):\n\n for i in range(len(user_list)):\n\n low = find_min(i, user_list)\n\n canvas.delete(user_list[i].object)\n canvas.delete(user_list[low].object)\n\n user_list[i], user_list[low] = user_list[low], user_list[i]\n\n user_list[i].iteration = i\n user_list[low].iteration = low\n\n user_list[i].object = canvas.create_rectangle(\n space + wi * user_list[i].iteration,\n le - user_list[i].value,\n space + wi * (user_list[i].iteration + 1),\n le, fill=\"blue\")\n user_list[low].object = canvas.create_rectangle(\n space + wi * user_list[low].iteration,\n le - user_list[low].value,\n space + wi * (user_list[low].iteration + 1),\n le, fill=\"blue\")\n\n time.sleep(stopper)\n\n master.update()", "def __init__(self,x1,y1,x2,y2,numBoids,obstacles):\n self.circleVel=math3d.VectorN(1,1)\n self.boid_list=[] #This is the list the boids are appended to, creating a flock of boids\n self.numBoids=numBoids\n self.obstacle_list=obstacles\n\n for i in range(0,self.numBoids):\n x=random.randint(x1,x2)\n y=random.randint(y1,y2)\n self.circlePos=math3d.VectorN(x,y)\n self.boid_list.append(Boid(self.circlePos,2,self.circleVel))", "def run_sort_home_by_score(self):\n self.homes = self.python_sort(self.homes)", "def sort_list(self,list_):\r\n list_.sort()", "def randomize_components(self):\n # For loop applied to components named (4, 5, 6)\n for i in range(4, 7):\n while True:\n rand_pos = randrange(len(self.tiles)+1)\n if self.tiles[rand_pos] == 0:\n self.tiles[rand_pos] = i\n break", "def reorder( self ):\n self.sorted.sort(self.compareFunction)", "def sort(self):\n # Base Case\n # If the robot has reached the end of the list and his light is off (no swaps have occurred),\n if self.can_move_right() == False and self.light_is_on() == False:\n return\n\n # Grab the first card\n self.swap_item()\n\n # While the robot is still able to move right,\n while self.can_move_right():\n\n # Move right\n self.move_right()\n\n # Compare the item in his hand to that in front of him\n # If the item in front of him is greater than what he is holding (-1), swap items\n if self.compare_item() == -1:\n # Swap the item\n self.swap_item()\n # Turn his light on to indicate that a swap has occured\n self.set_light_on()\n \n # Once the robot can no longer move right, he is at the end of the list and holding the largest value\n # Swap items\n self.swap_item()\n\n # Now the robot needs to traverse back to index 0, grabbing the smallest value as he goes\n # Follow the same logic as when he moved right with the largest value\n\n # If he hits a empty slot in the list, everything in front of it has been sorted\n # He doesn't need to sort anymore, he is holding the smallest value left to be sorted. \n # Put it in the blank spot and turn to move back in the other direction\n\n while self.compare_item() is not None:\n\n # Move left\n self.move_left()\n\n # Compare the item in his hand to that in front of him\n # If the item in front of him is less than what he is holding (1), swap items\n if self.compare_item() == 1:\n # Swap the item\n self.swap_item()\n # Turn his light on to indicate that a swap has occured\n self.set_light_on()\n \n # Once self.compare_item() is None, that means he is in front of a blank space\n # - everything to the left of the blank space has already been sorted\n # Deposit what he is holding\n self.swap_item()\n\n # Reset the light to the off position\n self.set_light_off()\n\n # Move one spot over to the right\n self.move_right()\n\n # Re-run the process all over again\n self.sort()", "def start_sorting(self):\n if self.sorting:\n return None\n self.sorting = True\n\n passes = 0\n while self.sorting:\n swap_done = False\n for i in range(len(self.sort_list)-passes-1):\n if not self.sorting:\n break\n if self.sort_list[i][0] > self.sort_list[i+1][0]:\n self.sort_list[i], self.sort_list[i+1] = self.sort_list[i+1], self.sort_list[i]\n self.window.coords(self.sort_list[i][1], 4*i+50, 20, 4*i+50, self.sort_list[i][0])\n self.window.coords(self.sort_list[i+1][1], 4*(i+1)+50, 20, 4*(i+1)+50, self.sort_list[i+1][0])\n self.window.itemconfig(self.sort_list[i][1], fill='red')\n self.window.itemconfig(self.sort_list[i+1][1], fill='red')\n swap_done = True\n self.window.update()\n self.window.itemconfig(self.sort_list[i][1], fill='black')\n self.window.itemconfig(self.sort_list[i+1][1], fill='black')\n self.window.update()\n passes += 1\n\n if not swap_done:\n self.sorting = False\n for line in self.sort_list:\n self.window.itemconfig(line[1], fill='green')\n else:\n self.window.itemconfig(self.sort_list[i+1][1], fill='blue')\n self.window.update()", "def eval_randoms(count):\n\t\tfor person in Simulation.community:\n\t\t\tSimulation.community[person].eval_random_strategy(count)", "def _move_randomly(self):\n a, b = randint(0, len(self.state) - 1), randint(0, len(self.state) - 1)\n wiz1, wiz2 = self.state[a], self.state[b]\n self._swap_wizards(wiz1, wiz2)", "def test_sorting():\n circles = [Circle(i) for i in range(10, 1, -1)] \n sorted_circles = sorted(circles, key=Circle.sort_key)\n assert circles != sorted_circles", "def pre_randomize(self):\n \n # Perform a phase callback if available. Note,\n # only trigger pre_randomize callbacks on composite\n # fields that are actually being used as random\n if self.is_used_rand and self.rand_if is not None:\n self.rand_if.do_pre_randomize()\n \n for f in self.field_l:\n f.pre_randomize()", "def test_shuffle(self):\n liste = list(range(10))\n random.shuffle(liste)\n liste.sort()\n # assertEqual qui prend deux arguments en paramètre et vérifie le test si les arguments sont identiques\n self.assertEqual(liste, list(range(10)))", "def generator(self, random, args):\r\n locations = [i for i in range(len(self.weights))]\r\n random.shuffle(locations)\r\n return locations", "def sort_cards(self):\n self.cards.sort(key=operator.attrgetter('persona', 'rank'))\n self.update_position()", "def sort_objects_from_viewworld(self, viewworld):\n opaque_objects = []\n transparent_objects = []\n centers = []\n for guid in self.objects:\n obj = self.objects[guid]\n if isinstance(obj, BufferObject):\n if obj.opacity * self.opacity < 1 and obj.bounding_box_center is not None:\n transparent_objects.append(obj)\n centers.append(transform_points_numpy([obj.bounding_box_center], obj.matrix)[0])\n else:\n opaque_objects.append(obj)\n if transparent_objects:\n centers = transform_points_numpy(centers, viewworld)\n transparent_objects = sorted(zip(transparent_objects, centers), key=lambda pair: pair[1][2])\n transparent_objects, _ = zip(*transparent_objects)\n return opaque_objects + list(transparent_objects)", "def generate(self):\n for i in range(4):\n random_first = randomize_first_box()\n self.randomize(random_first)\n for i in range(9):\n random_pos = randomize_position()\n self.randomize(random_pos)\n self.board.solve()", "def init_chromosomes(self):\n\n #Checks to see chromo_list has a size\n if not self.chromo_list: \n if self.config is None:\n print \"You need to load a config,\\\n before initalizing chromosomes\"\n\n else:\n #Initializes Empty List\n self.chromo_list = ((self.day_length*self.num_days)*\\\n (self.config.get_num_rooms()))*[None]\n \n #Seeds the random with system time\n random.seed() \n for classes in self.config.classes_list:\n print \"Placing classes\"\n print \"DURATION: \" + str(classes.duration)\n #Gets random spot for class\n rand = random.randint(0,len(self.chromo_list))\n \n #Used to place classes with long durations\n total_duration = 0\n temp_index = rand\n if not self.hash_map.has_key(classes):\n self.hash_map[classes] = temp_index\n \n #Places class in schedule\n while total_duration < classes.duration\\\n and temp_index < len(self.chromo_list):\n new_chromo = self.insert_chromosome(Chromosome(),\\\n temp_index)\n self.number_chromosomes += 1\n print \"Number of chromosomes: \" + str(self.number_chromosomes)\n #Checks to see if the class is already in the hashmap,\n #if not Class object is added with value being location\n #in list Only adds when the class starts\n #Assigns the class to the new chromosome\n new_chromo._class = classes\n self.calculate_fitness(new_chromo,temp_index)\n total_duration += 1\n temp_index += 1", "def finalize(self):\n self.set_as_sort1()", "def post_randomize(self):\n \n # Perform a phase callback if available\n if self.is_used_rand and self.rand_if is not None:\n self.rand_if.do_post_randomize()\n \n for f in self.field_l:\n f.post_randomize()", "def mutate(self):\n\n if len(self.genes) < 250:\n for g in self.genes:\n\n if MUTATION_CHANCE < random.random(): # random.random() gives float in [0,1)\n g.mutate()\n\n else:\n k = int(MUTATION_CHANCE*len(self.genes))\n for g in random.sample(self.genes,int(k)): #int(k)\n g.mutate()\n\n #To add random gene\n if ADD_GENE_CHANCE < random.random():\n self.genes.append(Gene(self.size)) #Call to Gene to add to genes list\n\n #To randomly remove genes\n\n if REM_GENE_CHANCE < random.random() and len(self.genes)>0:\n self.genes.remove(random.choice(self.genes))", "def mutate(self, number_of_mutations):\n self.mutated.clear()\n mutations = []\n for i in range(number_of_mutations+1):\n old_gene = random.choice(self.genes)\n while old_gene in mutations:\n old_gene = random.choice(self.genes)\n # print(self.max_time)\n old_gene.start_time = random.choice(range(self.max_time - old_gene.finish))\n self.mutated.append(self.genes.index(old_gene))", "def update(self):\n self.platform_list.update()\n self.enemy_list.update()", "def sort(self):\r\n\t\treturn sorted(self.sample)", "def sort(self):\n self.deckcards.sort()", "def shuffle(self):\n self.train_edges = np.random.permutation(self.train_edges)\n self.nodes = np.random.permutation(self.nodes)\n self.batch_num = 0", "def mutate(self):\n # swap two cities in the path\n first = random.randrange(len(self.path))\n second = random.choice([i for i in range(len(self.path)) if i != first])\n tmp = self.path[first]\n self.path[first] = self.path[second]\n self.path[second] = tmp", "def shuffle(lol, seed):\n for l in lol:\n random.seed(seed)\n random.shuffle(l)", "def sort(self): # sort all entries to make room for new ones, determine best and worst\n ns = self.num_stored.value\n ys = np.asarray(self.ys[:ns])\n yi = ys.argsort()\n sortRuns = []\n for i in range(len(yi)):\n y = ys[yi[i]]\n xs = self.get_x(yi[i])\n sortRuns.append((y, xs))\n numStored = min(len(sortRuns),int(0.9*self.capacity)) # keep 90% best \n for i in range(numStored):\n self.replace(i, sortRuns[i][0], sortRuns[i][1])\n self.num_sorted.value = numStored \n self.num_stored.value = numStored \n return numStored", "def shuffle_question(self):\n r = random.SystemRandom()\n r.shuffle(self.question_list)", "def _shuffle(self):\n print \"Shuffled the bag\"\n # The 'random' library provides a really handy function we can\n # use called 'shuffle'. You provide 'shuffle' with a\n # 'sequence' (basically, a list) and the shuffle function\n # randomizes the placement of all items in the sequence\n # automatically. There is no return value from\n # \"random.shuffle\" because \"self.bag\" is modified in place.\n random.shuffle(self.bag)", "def shuffle(self, random_state=None): \n if random_state is None:\n random_state = self.random_state\n perm_ids = random_state.permutation(self.n_examples)\n self.u = self.u[perm_ids]\n self.v = self.v[perm_ids]\n self.rating = self.rating[perm_ids]", "def test_random_lst():\n from quick_sort import quick_sort\n lst_sort = sorted(rand_lst)\n assert quick_sort(rand_lst) == lst_sort", "def make_updates(self, x):\n global inc\n for stone in self.players[x].stones_reference:\n temp = Thread(target=stone.move_stone)\n temp.daemon = True\n temp.start()\n if not stone.is_alive:\n self.players[x].stones_reference.remove(stone)\n if self.num_players == 1:\n self.maps[0].control_music(self.players[0].min_x)\n\n rand_x = randrange(1, 100)\n rand_x_2 = randrange(1, 150)\n rand_x_3 = randrange(1, 75)\n if inc % rand_x == 0:\n self.maps[x].clouds[0].move_cloud()\n if inc % rand_x_2 == 0:\n self.maps[x].clouds[1].move_cloud()\n if inc % rand_x_3 == 0:\n self.maps[x].clouds[2].move_cloud()\n inc += 1" ]
[ "0.6626622", "0.6585694", "0.6483848", "0.6469369", "0.6035206", "0.6019736", "0.60169244", "0.5968468", "0.5968468", "0.5951621", "0.5919026", "0.59153605", "0.5901477", "0.5880968", "0.58774793", "0.5859918", "0.5817854", "0.58043724", "0.5801076", "0.5794008", "0.5786912", "0.57615674", "0.5758612", "0.57475036", "0.57418615", "0.57228833", "0.5717533", "0.5711719", "0.5704576", "0.5701705", "0.5677242", "0.566683", "0.566509", "0.5649081", "0.56480557", "0.5617821", "0.56135607", "0.5607445", "0.5597671", "0.55905527", "0.5589257", "0.55782896", "0.55748886", "0.5548621", "0.5548621", "0.5538088", "0.5532105", "0.5532105", "0.5532105", "0.5532105", "0.5532105", "0.5527228", "0.55115193", "0.55115193", "0.5510844", "0.5506308", "0.5502417", "0.5490936", "0.549042", "0.5489382", "0.5486671", "0.54853916", "0.5481351", "0.54780954", "0.54726815", "0.54638803", "0.54544616", "0.5453477", "0.544134", "0.5440153", "0.5430965", "0.5416272", "0.54118115", "0.54088885", "0.5404179", "0.5403302", "0.5393397", "0.53926295", "0.5383609", "0.5380372", "0.53739893", "0.53719974", "0.53708905", "0.53670233", "0.5364774", "0.53581583", "0.53554595", "0.5352786", "0.53461605", "0.53330946", "0.53324544", "0.53323627", "0.53302467", "0.5329374", "0.5328523", "0.5325965", "0.532425", "0.5320473", "0.5318197", "0.5296935" ]
0.6854891
0
add this to the world
def add_to_world(self, thing): thing.set_world_info(self.current_id, self) self.gameObjects.append(thing) self.current_id += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_world(self):\n raise NotImplementedError()", "def __init__(self, world):\n self.__init__(world, ArrayList())", "def update_world(self):\n pass", "def __init__(self, world, x, y, direction):\n self.ID = world.__register__(x, y, direction)", "def world(self):\n return World(self)", "def updateWorld(self):\n pass", "def create_the_world(cls):\n from muddery.server.mappings.element_set import ELEMENT\n world = ELEMENT(\"WORLD\")()\n world.setup_element(\"\")\n cls._world_data = world", "def world(self, value):\n self.worlds[self.world_index] = value", "def run_world(self):\n self.world_alive = True\n self.world_setable = False", "def __init__(self, wink, opp):\n super().__init__(wink, opp)\n opp.data[DOMAIN][\"entities\"][\"scene\"].append(self)", "def apply_to_world(self, world):\n # add the current obstacles\n for obstacle in self.current_obstacles:\n world.add_obstacle(obstacle)\n\n # program the robot supervisors\n for robot in world.robots:\n robot.supervisor.goal = self.current_goal[:]", "def world(self) -> World:\n return World(self)", "def save_world(self):\n pass", "def add(self):\n pass", "async def async_added_to_opp(self):\n self.opp.data[DOMAIN][\"entities\"][\"scene\"].append(self)", "def setup(self):\n build_world.start_level(self)", "def this_word(self):\n self.append = self.add_to_current_word", "def __init___0(self, world, list_):\n self.world = world\n self.list_ = list_", "def getWorld(self):\n return self.world", "def _setup_world(self, taskname):\n self.x0 = self._hyperparams[\"x0\"]\n self._world = [gym.make(taskname)\n for _ in range(self._hyperparams['conditions'])]", "def world(cls):\n try:\n return cls._world_data\n except AttributeError:\n cls.create_the_world()\n return cls._world_data", "def add(self):\n\n self.scene.projs.add(self)\n self.scene.all.add(self.scene.projs)\n self.rotate()", "def __init__(self):\n this = _sunpos.new_cLocation()\n try: self.this.append(this)\n except: self.this = this", "def __add__(self, this):\n return self.add(this)", "def build_world(self, width, height, entrance, agent, objects):\n env = WumpusEnvironment(width, height, entrance)\n if self.trace:\n agent = wumpus_environment.TraceAgent(agent)\n agent.register_environment(env)\n env.add_thing(agent, env.entrance)\n for (obj, loc) in objects:\n env.add_thing(obj, loc)\n print env.to_string()\n print self.objects \n return env", "def registeredBy(self, world):\n self.world = world\n self._determineSuffix()\n self.short = \"\"\n self.short = self.shortName(3)", "def add(self):\n self.inp.inputs.add(self)\n self.out.outputs.add(self)", "def append(self, this):\n return self.add(this)", "def add_site_to_context(self):\n g.current_site = self", "def introduce(self):\n print(f\"Hi, I am {self.name}!\")", "def create_scene(self):\n \n self.scene=soya.World()", "def add_word(self, word):\n word = self.map_word(word)\n super(InvariantLanguage, self).add_word(word)", "def start(self, world):\n self.sense(world)", "def __init__(self, game, world_file):\n self.game = game\n self.world_file = world_file\n self.floor_batch = game.floor_batch\n self.wall_batch = game.wall_batch\n self.lightmap = LightMap()\n self.tiles = {}\n self.load_world()\n self.load_tileset()\n self.player_light = self.lightmap.add_light(0,0,15)", "def add_body(self, planet):\n pass", "def add(self, *args):\n pass", "def add(self, *args):\n pass", "def Store(self,addThisThetaValue):\n self.addThisThetaValue = addThisThetaValue\n self.thetaAxis.append(self.addThisThetaValue)", "def init(self, world, mem):\n self.mem = mem\n self.world = world", "def __init__(self, lifespan=None):\n BaseWorld.__init__(self, lifespan)\n self.name = 'fruit'\n self.name_long = 'fruit selection world'\n print(\"Entering\", self.name_long)\n self.world_visualize_period = 1e6\n self.brain_visualize_period = 1e3\n\n # Break out the sensors into\n # 0: large?\n # 1: small?\n # 2: yellow?\n # 3: purple?\n # A sample sensor array would be\n # [1., 0., 1., 0.]\n # indicating a ripe peach.\n self.num_sensors = 4\n\n # Break out the actions into\n # 0: eat\n # 1: discard\n self.num_actions = 2\n self.actions = np.zeros(self.num_actions)\n self.reward = 0.\n\n # acted, eat, discard : boolean\n # These indicate whether the Becca chose to act on this\n # time step, and if it did, whether it chose to eat or discard\n # the fruit it was presented.\n self.acted = False\n self.eat = False\n self.discard = False\n\n # Grab a piece of fruit to get started.\n self.grab_fruit()", "def _addSite(self,site):\n self.sites.append(site)", "def add_thing(self, thing, location=None):\n if not isinstance(thing, Thing):\n thing = Agent(thing)\n if thing in self.things:\n print(\"Can't add the same thing twice\")\n else:\n thing.location = location if location is not None else self.default_location(thing)\n self.things.append(thing)\n if isinstance(thing, Agent):\n thing.performance = 0\n self.agents.append(thing)", "def add_thing(self, thing, location=None):\n if not isinstance(thing, Thing):\n thing = Agent(thing)\n if thing in self.things:\n print(\"Can't add the same thing twice\")\n else:\n thing.location = location if location is not None else self.default_location(thing)\n self.things.append(thing)\n if isinstance(thing, Agent):\n thing.performance = 0\n self.agents.append(thing)", "def set_up_world_ao(self):\n scene = self.set_as_active()\n new_world = bpy.context.blend_data.worlds.new('World of Wireframe')\n scene.world = new_world\n new_world.light_settings.use_ambient_occlusion = True\n new_world.light_settings.ao_factor = 0.3\n\n renderengine = scene.wirebomb.data_renderengine\n\n if renderengine == 'CYCLES':\n new_world.use_nodes = True\n new_world.node_tree.nodes[1].inputs[0].default_value = (1, 1, 1, 1)\n\n for node in new_world.node_tree.nodes:\n node.select = False\n \n elif renderengine == 'BLENDER_RENDER':\n new_world.horizon_color = (1, 1, 1)", "def to_world(self, x, y, **kwargs):", "def waypoint_add_global(self):\n pass", "def readd(self, d):\n\t\tworld = blazeworld.BlazeWorld()\n\t\tif \"gravityX\" in d:\n\t\t\tif type(d[\"gravityX\"]) == float or type(d[\"gravityX\"]) == int:\n\t\t\t\tworld.gravityX = float(d[\"gravityX\"])\n\t\t\telse:\n\t\t\t\tlog.error(\"Gravity X-component must be a number. Defaulting to 0\")\n\t\t\t\tworld.gravityX = 0.0\n\t\telse:\n\t\t\tlog.error(\"Gravity X-component not specified in file. Defaulting to 0\")\n\t\t\tworld.gravityX = 0.0\n\n\t\tif \"gravityY\" in d:\n\t\t\tif type(d[\"gravityY\"]) == float or type(d[\"gravityY\"]) == int:\n\t\t\t\tworld.gravityY = d[\"gravityY\"]\n\t\t\telse:\n\t\t\t\tlog.error(\"Gravity Y coordinate must be a number. Defaulting to -9.8\")\n\t\t\t\tworld.gravityY = -9.8\n\t\telse:\n\t\t\tlog.error(\"Gravity Y-component not specified in file. Defaulting to -9.8\")\n\t\t\tworld.gravityY = -9.8\n\n\t\tif \"shapes\" in d:\n\t\t\tfor shape in d[\"shapes\"]:\n\t\t\t\tworld.addShape(self.__parseShape(shape))\n\t\telse:\n\t\t\tlog.error(\"No shapes attribute specified in the file\")\n\n\t\treturn world", "def add_thing(self, thing, location = None):\n if not isinstance(thing, Thing):\n thing = Agent(thing)\n assert thing not in self.things, \"Don't add the same thing twice\"\n thing.location = location or self.default_location(thing)\n self.things.append(thing)\n if isinstance(thing, Agent):\n thing.performance = 0\n self.agents.append(thing)", "def init(self, world, mem):\n self.mem = mem\n self.client = world", "def init(self, world, mem):\n self.mem = mem\n self.client = world", "async def async_added_to_hass(self):\n await super().async_added_to_hass()\n self.coordinator.entities.append(self)", "def addLevel(self):\n pass", "def add(self, *args, **kwargs):\n return self.load(*args, **kwargs)", "def world(self):\n return self.stream_binding.world", "def _create_fleet(self):\n # make an alien\n alien = Alien(self)\n self.aliens.add(alien)", "def add(self, middleware):\n pass # pragma: no cover", "def __add__(self, sys):\n \n cl_sys = ClosedLoopSystem( sys , self )\n \n return cl_sys", "def __init__(self, size):\n self.world = [[None for y in range(size)] for x in range(size)]", "def __init__(self, *args, **kwargs):\n # init resource\n tk.Tk.__init__(self, *args, **kwargs)\n self.setup_members() # initialize class members\n self.setup_window() # root window\n self.setup_toolbar() # tool bar\n self.setup_canvas() # canvas to draw world\n\n # init world\n self.create_world()\n\n # make world alive\n self.after(5, lambda: self.life(5))", "def __init__(self, name, agent, all_locations):\n super().__init__(name)\n self.agent = agent\n self.world = agent.world\n self.all_locations = all_locations\n self.location_feat = get_location_key(agent)", "def __init__(self, *args):\n this = _libsbml.new_KineticLaw(*args)\n try: self.this.append(this)\n except: self.this = this", "def add_snowman(self):\n self.scenes[self.current_scene].add_object(Snowman())\n self.redraw()", "def to_add(self):\n pass", "def add(self, context):\n self._contexts.add(context)", "def render(self):\r\n \r\n # --------------------------------\r\n # Set world-level Panda properties\r\n # --------------------------------\r\n\r\n # Create Ambient Light 1\r\n ambientLight = AmbientLight( 'ambientLight_1' )\r\n ambientLight.setColor( Vec4( 0.2, 0.2, 0.2, 1 ) )\r\n ambientLightNP = render.attachNewNode( ambientLight.upcastToPandaNode() )\r\n ambientLightNP.setPos( 50, 50, 50)\r\n render.setLight(ambientLightNP)\r\n\r\n # Create Ambient Light 2\r\n ambientLight = AmbientLight( 'ambientLight_2' )\r\n ambientLight.setColor( Vec4(0.2, 0.2, 0.2, 1) )\r\n ambientLightNP = render.attachNewNode( ambientLight.upcastToPandaNode() )\r\n ambientLightNP.setPos( 50, -50, 50)\r\n render.setLight(ambientLightNP)\r\n# \r\n# # Directional light 01\r\n# directionalLight = DirectionalLight( \"directionalLight\" )\r\n# directionalLight.setColor( Vec4( 0.8, 0.2, 0.2, 1 ) )\r\n# directionalLightNP = render.attachNewNode( directionalLight.upcastToPandaNode() )\r\n# # This light is facing backwards, towards the camera.\r\n# directionalLightNP.setHpr(180, 20, 0)\r\n# render.setLight(directionalLightNP)\r\n#\r\n# # Directional light 02\r\n# directionalLight = DirectionalLight( \"directionalLight\" )\r\n# directionalLight.setColor( Vec4( 0.2, 0.2, 0.8, 1 ) )\r\n# directionalLightNP = render.attachNewNode( directionalLight.upcastToPandaNode() )\r\n# # This light is facing forwards, away from the camera.\r\n# directionalLightNP.setHpr(0, -20, 0)\r\n# render.setLight(directionalLightNP)\r\n\r\n #create a directional light\r\n #light = DirectionalLight('my dlight')\r\n\r\n #create a point light\r\n light = PointLight('plight')\r\n #light.setColor(VBase4(0.2, 0.2, 0.2, 1))\r\n\r\n #The following line doesn't work in Panda3D 1.7.0\r\n #lightPath = render.attachNewNode(light.upcastToPandaNode())\r\n\r\n lightPath = render.attachNewNode(light)\r\n lightPath.setPos( 10, 10, 10)\r\n\r\n #lightPath.lookAt(objPath)\r\n\r\n #illuminate all\r\n render.setLight(lightPath)\r\n #illuminate only objPath objects\r\n #objPath.setLight(lightPath)\r\n\r\n #self.SetMouseControls(objPath)\r\n #self.setKeyboardControls()\r\n \r\n taskMgr.add(self.mouseControlsTask, 'mouseControlsTask')\r\n #taskMgr.add(self.cameraMovementTask, 'cameraMovementTask') \r\n\r\n base.setBackgroundColor( .0, .0, .0 )\r\n\r\n #taskMgr.add(self.SpinCameraTask, \"SpinCameraTask\")\r\n #core.cmd.exeCommand(\"LoadEdge\", obj, file_name+self.WingedEdgeExtensions[0], file_name+self.WingedEdgeExtensions[1], file_name+self.WingedEdgeExtensions[2], file_name+self.WingedEdgeExtensions[3])\r\n #self.model = importer.loadFile(fileName)\r\n #if self.model is None:\r\n # print \"Unsupported file\"\r\n # return\r", "def add(self):\n\n db.session.add(self)\n db.session.commit()", "def add(self):\n\n db.session.add(self)\n db.session.commit()", "def __init__(self):\n this = _libsbml.new_SBMLWriter()\n try: self.this.append(this)\n except: self.this = this", "async def setheist(self, ctx):\r\n\r\n pass", "def addWorkspace(self, dryrun):\n pass", "def world(self):\n return self.worlds.get(self.world_index)", "def add_sphere(self):\n self.scenes[self.current_scene].add_object(Sphere())\n self.redraw()", "def build_and_add(self,*args):\n lattice = lattice_class.lattice(*args)\n self.lattices.append()\n self.meshfns.append(['in_memory'])", "def add(self):\n with managed_session() as session:\n session.add(self)\n session.flush()\n session.refresh(self)\n session.expunge(self)", "def register(self):\n raise NotImplementedError", "def register(self):\n raise NotImplementedError", "def __init__(self):\n self.__locusts__ = {}\n return", "def __add__(self, other):\n self.__dict__.update(other)\n return self", "def addToGameLevel(self, level, firstPosition):\n self.currentLevel = level\n self.x, self.y = firstPosition\n rectPosition = level.transformToScreenCoordinate(firstPosition)\n self.rect.midbottom = rectPosition\n if isinstance(self, UniqueObject):\n cblocals.object_registry.register(self)", "def __add__(self,_VariablesList):\n\n\t\t#Call the add method\n\t\tself.add(_VariablesList)\n\n\t\t#Return \n\t\treturn self", "def __init__(self):\n self.registry = {}", "def on_enter(self):\n # Add self to list of obstacles\n self.parent._obstacles.add(self)\n super().on_enter()", "def add(self, name, content):\n raise NotImplementedError", "def register(self):\n raise NotImplementedError(\"Should have implemented this\")", "def addObject(self,object):\n object.screen = self.screen\n object.parent = self\n self.addList.append(object)", "def _set(self, value):\n assert isinstance(self.world, pixel_worlds.ScreenBasedPixelWorld), \\\n 'Only ScreenBasedPixelWorlds can have the \"screen\" world attribute'\n\n super(ScreenWorldAttribute, self)._set(value)\n\n if self.world._populated:\n self.world.remove_objects(self.world.objects)\n\n objects, width, height = h.world.screen(self.world._screens[value], self.world._legend)\n\n self.world.create_objects(objects)\n else:\n # do nothing, assume the ScreenBasedPixelWorld constructor does all\n # the work\n pass", "def toworld(self, *args, **kwargs):\n return _coordsys.coordsys_toworld(self, *args, **kwargs)", "def __init__(self, crafty):\n self.__crafty = crafty\n #crafty.attr(self=self)", "def add(self, obj):\n raise NotImplementedError", "def __init__(self, name, location, health):\n super().__init__(name, location, health)", "def setup_members(self):\n ### cell\n self.cell_size = 8\n self.cell_row = 80\n self.cell_col = 100\n self.color_alive = \"black\"\n self.color_dead = \"white\"\n\n ### world\n self.init_modes = {} # read modes from json file\n self.init_world = {} # begining status\n self.world = {} # world's map\n # current status of world\n self.world_status = GOL(self.cell_row, self.cell_col)\n self.world_setable = True\n self.world_alive = False\n\n # widgets\n self.toolbar_height = 40\n self.world_size = [self.cell_size * self.cell_row,\n self.cell_size * self.cell_col]\n self.window_size = self.world_size\n self.window_size[0] += self.toolbar_height\n\n # resource\n self.saver_icon = \"save.gif\"\n self.run_icon = \"run.gif\"\n self.pause_icon = \"pause.gif\"\n self.stop_icon = \"stop.gif\"\n self.modes_file = \"gol.json\"\n self.modes_names = []", "def spawnWithStore(self, here, there):\n raise NotImplementedError(\"subclasses must implement the specifics\")", "def add_room(self, room, position):\n self.world[position[0]][position[1]] = room", "def _init_world(self):\n self.world.restricted_world = {\n 'not_road': [],\n 'cross_road': [],\n }\n for polygon in self._data_loader.data.get_polygons(0):\n polygon_name = polygon['label']\n polygon_points = polygon['points']\n if polygon_name in {'not_road', 'cross_road'}:\n self.world.restricted_world[polygon_name].append(geometry.Polygon(\n self._data_loader.convertIMG2PLAY(polygon_points)\n ))", "def add(obj):", "def addObject(self):\n\t\tsel = mc.ls( sl = True, typ = 'transform' )\n\t\tif sel:\n\t\t\tself.objects_lw.addItems( sel )", "def webAdd( self, web ):\n web.add( self )", "def add_head(self):\n self.scenes[self.current_scene].add_object(Head())\n self.redraw()", "def org(self):\r\n raise NotImplementedError()", "def add( self, chunk ):\n self.chunkSeq.append( chunk )\n chunk.web= weakref.ref(self)" ]
[ "0.6970694", "0.6964329", "0.6806117", "0.6598959", "0.64999914", "0.64821887", "0.64505976", "0.6299828", "0.6281762", "0.6269024", "0.62657404", "0.62023485", "0.61991674", "0.6084373", "0.5940955", "0.5922447", "0.58853203", "0.5820223", "0.5784347", "0.57677424", "0.57647425", "0.5750902", "0.57429063", "0.57279694", "0.5717122", "0.56737316", "0.564446", "0.560288", "0.5600272", "0.5594945", "0.5584398", "0.5546549", "0.55165803", "0.5516574", "0.5495703", "0.5491891", "0.5491891", "0.5477177", "0.54729337", "0.5454004", "0.5438009", "0.5437571", "0.5437571", "0.54312015", "0.5427395", "0.54222065", "0.5403589", "0.5379178", "0.5366396", "0.5366396", "0.5364921", "0.5324635", "0.5269782", "0.5263329", "0.5254378", "0.5245076", "0.52351743", "0.52295125", "0.5211272", "0.5209371", "0.5208457", "0.52067167", "0.5196732", "0.5196549", "0.51946187", "0.51898354", "0.51898354", "0.51697284", "0.51649857", "0.5164143", "0.5163539", "0.5160032", "0.51571727", "0.51540095", "0.5153782", "0.5153782", "0.5151473", "0.5151308", "0.514818", "0.51474303", "0.51468456", "0.51444745", "0.51276004", "0.5126739", "0.5118455", "0.5115578", "0.5107628", "0.51036483", "0.51020896", "0.51001567", "0.5099722", "0.5094636", "0.5091234", "0.50906396", "0.5089368", "0.5086512", "0.50854003", "0.50842696", "0.50842327", "0.50836325" ]
0.74709004
0
Merge data from another instance of this object.
def merge_stats(self, other): self[0] += other[0] self[1] += other[1] self[2] += other[2] self[3] = ((self[0] or self[1] or self[2]) and min(self[3], other[3]) or other[3]) self[4] = max(self[4], other[3])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge(self, other):\n from .dataset import Dataset\n\n if other is None:\n return self.to_dataset()\n else:\n other_vars = getattr(other, 'variables', other)\n coords = merge_coords_without_align([self.variables, other_vars])\n return Dataset._from_vars_and_coord_names(coords, set(coords))", "def merge(self, other):\n for p in other:\n for key, val in p.items():\n self.contents[key] = val\n\n return self", "def _merge(self, other: dict):\n self._storage = dict_merge(self._storage, other)", "def update(self, other):\n _merge_dicts(self, other)", "def merge(self, obj):\n pass", "def copy_from_other(self, other):\n self.data = other.data\n self.url = other.url\n self.container_factory = other.container_factory", "def merge_from(self, other):\n assert not self.is_final\n if self.parent is not None:\n assert other.parent is not None\n self.parent.merge_from(other.parent)\n self.isolated_names.update(other.isolated_names)\n self.read.update(other.read)\n self.modified.update(other.modified)\n self.bound.update(other.bound)\n self.deleted.update(other.deleted)\n self.annotations.update(other.annotations)\n self.params.update(other.params)", "def _merge(self):\n raise NotImplementedError", "def merge(self, other):\n log.debug('Merging: %s and %s' % (self.serialize(), other.serialize()))\n for k in self.keys():\n for new_item in other[k]:\n if new_item not in self[k]:\n self[k].append(new_item)\n log.debug('Result: %s' % self.serialize())\n return self", "def merge(self, other):\n if other.n_points != self.n_points:\n raise ValueError(\n 'Deduplicator size mismatch: '\n f'{self.n_points} != {other.n_points}'\n )\n self.data_reduced.extend(other.data_reduced)\n self.data_kd.extend(other.data_kd)", "def merged_rep(self,other):\n raise NotImplementedError(\"Abstract method\")", "def merge(self, other):\r\n self._train_datas = np.concatenate(\r\n [self._train_datas, other._train_datas], 0)\r\n self._train_labels = np.concatenate(\r\n [self._train_labels, other._train_labels], 0)", "def combine(self, other):\n # Copy and merge\n ppt = PPT()\n ppt.contents = dict(self.contents)\n ppt.merge(other)\n return ppt", "def __finalize__(self, other, method=None, **kwargs):\n self = super().__finalize__(other, method=method, **kwargs)\n # merge operation: using metadata of the left object\n if method == \"merge\":\n for name in self._metadata:\n print(\"self\", name, self.au_columns, other.left.au_columns)\n object.__setattr__(self, name, getattr(other.left, name, None))\n # concat operation: using metadata of the first object\n elif method == \"concat\":\n for name in self._metadata:\n object.__setattr__(self, name, getattr(other.objs[0], name, None))\n return self", "def mergeWith(self, other):\n assert not other.synthesised\n self.globals.update(other.globals)\n self.signals.update(other.signals)\n self.startsOfDataPaths.update(other.startsOfDataPaths)\n self.subUnits.update(other.subUnits)\n \n for s in other.signals:\n s.ctx = self", "def copyDataFrom (self, other):\n\n self.localTimeString=other.localTimeString\n self._myHasLocalTimeString=other._myHasLocalTimeString\n \n self.utcTimeString=other.utcTimeString\n self._myHasUtcTimeString=other._myHasUtcTimeString\n \n self.daylightSavingTime=other.daylightSavingTime\n self._myHasDaylightSavingTime=other._myHasDaylightSavingTime\n \n self.epoch=other.epoch\n self._myHasEpoch=other._myHasEpoch\n \n self.utcOffsetMinutes=other.utcOffsetMinutes\n self._myHasUtcOffsetMinutes=other._myHasUtcOffsetMinutes", "def merge(self, other):\n\n assert self.ins_addr == other.ins_addr\n assert self.type == other.type\n\n o = self.copy()\n o.targets |= other.targets\n\n return o", "def merge(self, other):\n self.isotxsMetadata = self.isotxsMetadata.merge(\n other.isotxsMetadata, self, other, \"ISOTXS\", AttributeError\n )\n self.gamisoMetadata = self.gamisoMetadata.merge(\n other.gamisoMetadata, self, other, \"GAMISO\", AttributeError\n )\n self.pmatrxMetadata = self.pmatrxMetadata.merge(\n other.pmatrxMetadata, self, other, \"PMATRX\", AttributeError\n )\n self.micros.merge(other.micros)\n self.gammaXS.merge(other.gammaXS)\n self.neutronHeating = _mergeAttributes(self, other, \"neutronHeating\")\n self.neutronDamage = _mergeAttributes(self, other, \"neutronDamage\")\n self.gammaHeating = _mergeAttributes(self, other, \"gammaHeating\")\n self.isotropicProduction = _mergeAttributes(self, other, \"isotropicProduction\")\n self.linearAnisotropicProduction = _mergeAttributes(\n self, other, \"linearAnisotropicProduction\"\n )\n # this is lazy, but should work, because the n-order wouldn't be set without the others being set first.\n self.nOrderProductionMatrix = (\n self.nOrderProductionMatrix or other.nOrderProductionMatrix\n )", "def __add__(self, other):\r\n # Make a defaultdict of defaultdicts, the latter of which returns\r\n # None when an key is not present\r\n merged_data = defaultdict(lambda: defaultdict(lambda: None))\r\n\r\n # We will keep track of all unique sample_ids and metadata headers\r\n # we have seen as we go\r\n all_sample_ids = set()\r\n all_headers = set()\r\n\r\n # add all values from self into the merged_data structure\r\n for sample_id, data in self._metadata.iteritems():\r\n all_sample_ids.add(sample_id)\r\n for header, value in data.iteritems():\r\n all_headers.add(header)\r\n merged_data[sample_id][header] = value\r\n\r\n # then add all data from other\r\n for sample_id, data in other._metadata.iteritems():\r\n all_sample_ids.add(sample_id)\r\n for header, value in data.iteritems():\r\n all_headers.add(header)\r\n # if the two mapping files have identical sample_ids and\r\n # metadata columns but have DIFFERENT values, raise a value\r\n # error\r\n if merged_data[sample_id][header] is not None and \\\r\n merged_data[sample_id][header] != value:\r\n raise ValueError(\"Different values provided for %s for \"\r\n \"sample %s in different mapping files.\"\r\n % (header, sample_id))\r\n else:\r\n merged_data[sample_id][header] = value\r\n\r\n # Now, convert what we have seen into a normal dict\r\n normal_dict = {}\r\n for sample_id in all_sample_ids:\r\n if sample_id not in normal_dict:\r\n normal_dict[sample_id] = {}\r\n\r\n for header in all_headers:\r\n normal_dict[sample_id][header] = \\\r\n merged_data[sample_id][header]\r\n\r\n # and create a MetadataMap object from it; concatenate comments\r\n return self.__class__(normal_dict, self.Comments + other.Comments)", "def copyDataFrom (self, other):\n\n self.outErrorPackets=other.outErrorPackets\n self._myHasOutErrorPackets=other._myHasOutErrorPackets\n \n self.inErrorPackets=other.inErrorPackets\n self._myHasInErrorPackets=other._myHasInErrorPackets\n \n self.inDiscardPackets=other.inDiscardPackets\n self._myHasInDiscardPackets=other._myHasInDiscardPackets\n \n self.outUnicastPackets=other.outUnicastPackets\n self._myHasOutUnicastPackets=other._myHasOutUnicastPackets\n \n self.inMulticastPackets=other.inMulticastPackets\n self._myHasInMulticastPackets=other._myHasInMulticastPackets\n \n self.outBroadcastPackets=other.outBroadcastPackets\n self._myHasOutBroadcastPackets=other._myHasOutBroadcastPackets\n \n self.inBroadcastPackets=other.inBroadcastPackets\n self._myHasInBroadcastPackets=other._myHasInBroadcastPackets\n \n self.outMulticastPackets=other.outMulticastPackets\n self._myHasOutMulticastPackets=other._myHasOutMulticastPackets\n \n self.inUnknownProtocolPackets=other.inUnknownProtocolPackets\n self._myHasInUnknownProtocolPackets=other._myHasInUnknownProtocolPackets\n \n self.outDiscardPackets=other.outDiscardPackets\n self._myHasOutDiscardPackets=other._myHasOutDiscardPackets\n \n self.inUnicastPackets=other.inUnicastPackets\n self._myHasInUnicastPackets=other._myHasInUnicastPackets\n \n self.outOctets=other.outOctets\n self._myHasOutOctets=other._myHasOutOctets\n \n self.inOctets=other.inOctets\n self._myHasInOctets=other._myHasInOctets", "def PassData(self, other):\n for this,that in zip(self.DataSet, other.DataSet):\n for assoc in [ArrayAssociation.POINT, ArrayAssociation.CELL, ArrayAssociation.ROW]:\n if this.HasAttributes(assoc) and that.HasAttributes(assoc):\n this.GetAttributes(assoc).PassData(that.GetAttributes(assoc))", "def merge(self, other):\n\n for child in other.children:\n self.add_deep_copy_of(child, merged=True)", "def add(self, other):\n if not isinstance(other, self.__class__):\n raise ValueError(\n f\"Argument (type {type(other)}) is not a {self.__class__} instance\"\n )\n if len(other.data):\n self.data = pd.concat([self.data, other.data], ignore_index=True)\n self.sort()", "def merge(self, other):\n self._moments = merge_pqc([self, other])._moments\n self._parameters = sp.symarray(self.parameter_symbol, len(self.symbols))\n if self.flatten_circuit:\n self.flatten()", "def merge(self, other: ProjectMeta) -> ProjectMeta:\n return self.clone(\n obj_classes=self._obj_classes.merge(other.obj_classes),\n tag_metas=self._tag_metas.merge(other._tag_metas),\n )", "def concat(self: TAvalancheDataset, other: TAvalancheDataset) -> TAvalancheDataset:\n return self.__class__([self, other])", "def _merge_raw(self, other):\n if other is None:\n variables = OrderedDict(self.variables)\n else:\n # don't align because we already called xarray.align\n variables = merge_coords_without_align(\n [self.variables, other.variables])\n return variables", "def merge(self, graph):\n # keep previous self.filename\n # copy data\n for x in graph.data:\n self.data.append(x)\n # copy headers, unless already exists (is so, info is lost)\n for key in graph.headers:\n if key not in self.headers:\n self.headers.update({key: graph.headers[key]})\n # copy graphInfo, unless already exists (is so, info is lost)\n for key in graph.graphInfo:\n if key not in self.graphInfo:\n self.graphInfo.update({key: graph.graphInfo[key]})\n # copy sampleInfo, unless already exists (is so, info is lost)\n for key in graph.sampleInfo:\n if key not in self.sampleInfo:\n self.sampleInfo.update({key: graph.sampleInfo[key]})", "def mergeWith(self, others):", "def extend(self, other_rollout):\n\n assert not self.is_terminal()\n assert all(k in other_rollout.fields for k in self.fields)\n for k, v in other_rollout.data.items():\n self.data[k].extend(v)\n self.last_r = other_rollout.last_r", "def __finalize__(self, other, method=None, **kwargs):\r\n # merge operation: using metadata of the left object\r\n if method == 'merge':\r\n for name in self._metadata:\r\n object.__setattr__(self, name, getattr(other.left, name, None))\r\n # concat operation: using metadata of the first object\r\n elif method == 'concat':\r\n for name in self._metadata:\r\n object.__setattr__(self, name, getattr(other.objs[0], name, None))\r\n else:\r\n for name in self._metadata:\r\n object.__setattr__(self, name, getattr(other, name, None))\r\n return self", "def merge_content(self, other):\n self.__content += other.__content", "def union(self, other):\n self.vertices.extend(other.vertices)\n self.edges.extend(other.edges)\n self.faces.extend(other.faces)\n return self", "def merge_other(self, other):\n assert(not other.isSet())\n with self.__cond:\n if self.__isset:\n other.set(self.__data)\n else:\n self.__merged.append(other)", "def get_merged_data(self):\n return self._combinedata", "def add_other_meta_data(self, other: _MetaData) -> None:\n\n for key in other._meta_data_dict.keys():\n self.add_data(key, other._meta_data_dict[key])", "def combine(self, other) -> None:\n assert self.id_ == other.id_\n assert self.type_ == other.type_\n self.count += other.count", "def merge(self, other: PerfData):\n self.total_samples += other.total_samples\n if self.total_time == 0.0:\n self.total_time = other.total_time\n self.compile_time = max(self.compile_time, other.compile_time)\n self.programming_time = max(\n self.programming_time, other.programming_time\n )\n if self.est_samples_per_sec == 0.0:\n self.est_samples_per_sec = other.est_samples_per_sec\n else:\n assert (\n self.est_samples_per_sec == other.est_samples_per_sec\n ), \"Expected all fabric-based performance estimates to be identical\"\n\n if self.total_time > 0:\n self.samples_per_sec = float(self.total_samples) / self.total_time\n else:\n self.samples_per_sec = 0.0", "def merge(self, other):\n merged = copy.deepcopy(self.__dict__())\n for k, v in other.__dict__():\n if k in merged and getattr(self, k):\n if isinstance(v, (string_types, bool)):\n pass\n else:\n list_of_stuff = merged.get(k, [])\n for entry in v:\n if entry not in list_of_stuff:\n list_of_stuff.append(entry)\n merged[k] = list_of_stuff\n else:\n merged[k] = v\n return CondaEnvironmentProvider(**merged)", "def __init__(self, v1, v2):\n mergedData = []\n list(map(mergedData.extend, list(zip_longest(v1, v2))))\n self.data = list(filter(lambda x: x is not None, mergedData))\n self.index = 0", "def combine_data(self, object, additional_data):\n object[\"ancestors\"] = additional_data[\"ancestors\"] if self.cartographer_client else []\n object[\"position\"] = additional_data.get(\"order\", 0) if additional_data else 0\n object = super(ResourceMerger, self).combine_data(object, additional_data)\n return combine_references(object)", "def merge(self, other):\n extras = other.difference(self)\n if len(extras) > 0:\n self.update(extras)\n self.reset()\n return True\n return False", "def merge(self, other):\n # todo: Using the return value None to denote the identity is a\n # bit dangerous, since a function with no explicit return statement\n # also returns None, which can lead to puzzling bugs. Maybe return\n # a special singleton Identity object instead?\n raise NotImplementedError", "def __add__(self, other):\n self.__dict__.update(other)\n return self", "def merge(self, other):\n self._segments.extend(other._segments)\n self._segments.sort()", "def merge_struct_arrays(self, data1, data2):\n data_final = np.concatenate((data1, data2))\n return data_final", "def copy(self):\n return self.update({})", "def merge(self, other_config):\n # Make a copy of the current attributes in the config object.\n config_options = copy.copy(self._user_provided_options)\n\n # Merge in the user provided options from the other config\n config_options.update(other_config._user_provided_options)\n\n # Return a new config object with the merged properties.\n return Config(**config_options)", "def __add__(self, other):\n merged_profile = super().__add__(other)\n\n # unstruct specific property merging\n merged_profile._empty_line_count = (\n self._empty_line_count + other._empty_line_count)\n merged_profile.memory_size = self.memory_size + other.memory_size\n samples = list(dict.fromkeys(self.sample + other.sample))\n merged_profile.sample = random.sample(list(samples),\n min(len(samples), 5))\n\n # merge profiles\n merged_profile._profile = self._profile + other._profile\n\n return merged_profile", "def update_inplace_from(self, other):\n self.__dict__ = other.__dict__.copy()", "def merge(self, other):\n self._mergeKeys(other)\n self._binaryOperationCheck(other)\n for id in self.clock.keys():\n print id\n self.clock[id] = max(self.clock[id], other.clock[id])", "def combine(self, existing):\n return self", "def _extend(self, other):\n for key, value in list(other.entries.items()):\n self._add_entry(key, value)", "def join(self, other, on):\n\t\t# check for correct join\n\t\tif not (on in self.headers or on in other.headers):\n\t\t\tprint \"Error: header '{0}' not found in both collections\".format(on)\n\t\t\treturn None\n\n\t\t# create new dataset\n\t\tjoined = Dataset()\n\t\t\n\t\t# fill new dataset with combined data\n\t\tmappedHeaders = joinHeaders(self, other, joined, on)\n\t\tmergeRows(self, other, joined, on, mappedHeaders)\n\t\tjoined.ensureFilled()\n\n\t\t# return newly created dataset\n\t\treturn joined", "def merge_two_dicts(self, x, y):\n z = x.copy()\n z.update(y)\n return z", "def merge_contextual(self, other):\n # TODO: This is currently dependent on our data model? Make more robust to schema changes\n # Currently we assume all lists at Compound level, with 1 further potential nested level of lists\n for k in self.keys():\n # print('key: %s' % k)\n for item in self[k]:\n # print('item: %s' % item)\n for other_item in other.get(k, []):\n # Skip text properties (don't merge names, labels, roles)\n if isinstance(other_item, six.text_type):\n continue\n for otherk in other_item.keys():\n if isinstance(other_item[otherk], list):\n if len(other_item[otherk]) > 0 and len(item[otherk]) > 0:\n other_nested_item = other_item[otherk][0]\n for othernestedk in other_nested_item.keys():\n for nested_item in item[otherk]:\n if not nested_item[othernestedk]:\n nested_item[othernestedk] = other_nested_item[othernestedk]\n elif not item[otherk]:\n item[otherk] = other_item[otherk]\n log.debug('Result: %s' % self.serialize())\n return self", "def __add__(self, other):\n mesh = deepcopy(self)\n mesh.MergeWith(other)\n return mesh", "def copy_(self, other):\n self.share.copy_(other.share)\n self.encoder = other.encoder", "def combine_dict(self, dict2):\n # iterate through smaller data set\n # base_set will be the larger set and is used for updating\n if len(self.content[\"values\"]) > len(dict2[\"values\"]):\n large_set = self.content[\"values\"]\n small_set = dict2[\"values\"]\n base_set = self.content\n else:\n small_set = self.content[\"values\"]\n large_set = dict2[\"values\"]\n base_set = dict2\n\n subset = {}\n for key in small_set.keys():\n # determine wether to compare keys\n if key in large_set:\n updated_l = large_set[key][\"updated_at\"]\n updated_s = small_set[key][\"updated_at\"]\n if updated_l == 'NULL':\n if updated_s != 'NULL':\n # update to not NULL set\n # if both updated_at are NULL, things\n # are ambiguos. We could defer to created_at\n # but for simplicity we will default to\n # the values in the larger set\n subset[key] = small_set[key]\n else:\n if updated_s == 'NULL':\n # update to not NULL set\n subset[key] = large_set[key]\n else:\n if updated_l > updated_s:\n subset[key] = large_set[key]\n else:\n subset[key] =small_set[key]\n else:\n subset[key] = small_set[key]\n base_set[\"values\"].update(subset)\n new_obj = BackupData()\n new_obj.load_from_dict(base_set)\n return new_obj", "def hallucinate_merge(self, other):\n res = CompleteVec(None,None,self.max_num_samples)\n res.needs_update = True\n return res", "def extend(self, other):\n if len(self.vertices[0]) != len(other.vertices[0]):\n raise ValueError(\"Rank mismatch ({0} != \"\n \"{1})\".format(self.vertices.shape[1],\n other.vertices.shape[1]))\n if self._geotype != other._geotype:\n raise TypeError(\"Geometry mismatch ({0} != \"\n \"{1})\".format(self._geotype, other._geotype))\n\n self.vertices = np.vstack([self.vertices, other.vertices])\n self._cache = {}\n return self", "def merge(self, *other):\n # Compute union of Fingerprints\n union = set().union(self, *other)\n # Create new fingerprint from union\n result = super(Fingerprint, type(self)).__new__(type(self), union)\n # Set n_flows to combination of self and other\n result.__setattr__('n_flows', self.n_flows + sum(o.n_flows for o in other))\n # Return result\n return result", "def merge_datasets(self, other):\r\n if isinstance(other, SpatialDataFrame) and \\\r\n other.geometry_type == self.geometry_type:\r\n return pd.concat(objs=[self, other], axis=0)\r\n elif isinstance(other, DataFrame):\r\n return pd.concat(objs=[self, other], axis=0)\r\n elif isinstance(other, Series):\r\n self['merged_datasets'] = other\r\n elif isinstance(other, SpatialDataFrame) and \\\r\n other.geometry_type != self.geometry_type:\r\n raise ValueError(\"Spatial DataFrames must have the same geometry type.\")\r\n else:\r\n raise ValueError(\"Merge datasets cannot merge types %s\" % type(other))", "def __add__ ( self, other, resample_opts=None ):\n result = ObservationStorage (datadir=self.datadir, \\\n resample_opts=resample_opts )\n if self.date[0] > other.date[0]:\n start_date = other.date[0]\n else:\n start_date = self.date[0]\n if self.date[-1] > other.date[-1]:\n end_date = other.date[-1]\n else:\n end_date = self.date[-1]\n \n delta = datetime.timedelta ( days=1 )\n this_date = start_date.date()\n end_date = end_date.date() + delta\n \n this_obs_dates = [ x.date() for x in self.date ]\n other_obs_dates = [ x.date() for x in other.date ]\n \n date = [] ; vza = [] ; vaa = [] ; sza = [] ; saa = []\n emulator = [] ; mask = [] ; data_pntr = [] ; spectral = []\n sensor = []\n \n while this_date < end_date:\n if this_date in this_obs_dates:\n iloc = this_obs_dates.index ( this_date )\n date.append ( self.date[iloc] )\n emulator.append ( self.emulator[iloc] )\n vza.append ( self.vza[iloc] )\n sza.append ( self.sza[iloc] )\n vaa.append ( self.vaa[iloc] )\n saa.append ( self.saa[iloc] )\n spectral.append ( self.spectral )\n mask.append ( ( self.get_mask, [iloc] ) )\n sensor.append ( self.sensor )\n \n data_pntr.append ( self._data_pntr[iloc] )\n if this_date in other_obs_dates:\n iloc = other_obs_dates.index ( this_date )\n date.append ( other.date[iloc] )\n emulator.append ( other.emulator[iloc] )\n vza.append ( other.vza[iloc] )\n sza.append ( other.sza[iloc] )\n vaa.append ( other.vaa[iloc] )\n saa.append ( other.saa[iloc] )\n spectral.append ( other.spectral )\n mask.append ( ( other.get_mask, [iloc] ) )\n sensor.append ( other.sensor )\n data_pntr.append ( other._data_pntr[iloc] )\n this_date += delta\n result.vza = vza\n result.vaa = vaa\n result.sza = sza \n result.saa = saa \n result.date = date\n result.spectral = spectral\n result.masks = mask\n result.sensor = sensor\n result.emulator = emulator\n result._data_pntr = data_pntr\n return result", "def Merge(self, other):\n\n # Logging just in case\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: before\", %s);'\n %(self.persistant['id'],\n sql.FormatSqlValue('details',\n repr(self.persistant))))\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: deleted\", %s);'\n %(other.persistant['id'], \n sql.FormatSqlValue('details',\n repr(other.persistant))))\n\n # Fields which can be summed\n for f in ['plays', 'skips']:\n self.persistant[f] = (self.persistant.get(f, 0) +\n other.persistant.get(f, 0))\n\n # Date fields where we take the newest\n for f in ['last_played', 'last_skipped', 'last_action']:\n a = self.persistant.get(f, datetime.datetime(1970, 1, 1))\n b = other.persistant.get(f, datetime.datetime(1970, 1, 1))\n if a > b:\n v = a\n else:\n v = b\n if v != datetime.datetime(1970, 1, 1):\n self.persistant[f] = v\n\n # Date fields where we take the oldest\n for f in ['creation_time']:\n a = self.persistant.get(f, datetime.datetime(1970, 1, 1))\n b = other.persistant.get(f, datetime.datetime(1970, 1, 1))\n if a < b:\n v = a\n else:\n v = b\n if v != datetime.datetime(1970, 1, 1):\n self.persistant[f] = v\n\n # Fields where we only clobber ours if we don't have a value\n for f in ['artist', 'album', 'song']:\n if not self.persistant.has_key(f) or not self.persistant[f]:\n self.persistant[f] = other.persistant.get(f, None)\n\n # Sometimes the number is a placeholder\n if self.persistant.has_key('number') and self.persistant['number'] == -1:\n self.persistant['number'] = other.persistant.get('number', -1)\n if not self.persistant.has_key('number'):\n self.persistant['number'] = other.persistant.get('number', -1)\n\n # Update the id in the tags table\n tags = self.db.GetRows('select tag from tags where track_id=%d;'\n % other.persistant['id'])\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: tags: %d\", %s);'\n %(self.persistant['id'], other.persistant['id'],\n sql.FormatSqlValue('details', repr(tags))))\n\n try:\n self.db.ExecuteSql('update tags set track_id=%d where track_id=%d;'\n %(self.persistant['id'], other.persistant['id']))\n self.db.ExecuteSql('commit;')\n except:\n # This can happen if the is already a matching tag for the first track\n pass\n\n # Update the id in the paths table\n paths = self.db.GetRows('select path from paths where track_id=%d;'\n % other.persistant['id'])\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: paths: %d\", %s);'\n %(self.persistant['id'], other.persistant['id'],\n sql.FormatSqlValue('details', repr(paths))))\n \n self.db.ExecuteSql('update paths set track_id=%d where track_id=%d;'\n %(self.persistant['id'], other.persistant['id']))\n self.db.ExecuteSql('commit;')\n\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: after\", %s);'\n %(self.persistant['id'],\n sql.FormatSqlValue('details',\n repr(self.persistant))))\n self.db.ExecuteSql('commit;')", "def concat(self, other):\n self.add_rules(other.cliques)\n self.prop_names.update(other.prop_names)", "def source(self, other):\n raise NotImplementedError", "def __add__(self, other):\n if not isinstance(other, RunTS):\n raise TypeError(f\"Cannot combine {type(other)} with RunTS.\")\n\n # combine into a data set use override to keep attrs from original\n combined_ds = xr.combine_by_coords(\n [self.dataset, other.dataset], combine_attrs=\"override\"\n )\n\n n_samples = (\n self.sample_rate\n * float(\n combined_ds.time.max().values - combined_ds.time.min().values\n )\n / 1e9\n ) + 1\n\n new_dt_index = make_dt_coordinates(\n combined_ds.time.min().values,\n self.sample_rate,\n n_samples,\n self.logger,\n )\n\n new_run = RunTS(\n run_metadata=self.run_metadata,\n station_metadata=self.station_metadata,\n survey_metadata=self.survey_metadata,\n )\n\n new_run.dataset = combined_ds.interp(\n time=new_dt_index, method=\"slinear\"\n )\n\n new_run.run_metadata.update_time_period()\n new_run.station_metadata.update_time_period()\n new_run.survey_metadata.update_time_period()\n new_run.filters = self.filters\n new_run.filters.update(other.filters)\n\n return new_run", "def __add__(self, other):\n if type(other) is not type(self):\n raise TypeError('`{}` and `{}` are not of the same profiler type.'.\n format(type(self).__name__, type(other).__name__))\n\n # error checks specific to its profiler\n self._add_error_checks(other)\n\n merged_profile = self.__class__(\n data=None, samples_per_update=self._samples_per_update,\n min_true_samples=self._min_true_samples, options=self.options\n )\n merged_profile.encoding = self.encoding\n if self.encoding != other.encoding:\n merged_profile.encoding = 'multiple files'\n\n merged_profile.file_type = self.file_type\n if self.file_type != other.file_type:\n merged_profile.file_type = 'multiple files'\n\n merged_profile.total_samples = self.total_samples + other.total_samples\n\n merged_profile.times = utils.add_nested_dictionaries(self.times,\n other.times)\n\n return merged_profile", "def copyAttributes(self, other, add_nxpars=False):\n import copy\n \n self.setTitle(other.getTitle())\n self.setDataSetType(other.getDataSetType())\n self.setAllAxisLabels(other.getAllAxisLabels())\n self.setAllAxisUnits(other.getAllAxisUnits())\n self.setYLabel(other.getYLabel())\n self.setYUnits(other.getYUnits())\n if len(self.attr_list.keys()) == 0:\n self.attr_list = copy.copy(other.attr_list)\n else:\n self.attr_list.instrument = copy.copy(other.attr_list.instrument)\n self.attr_list.sample = copy.copy(other.attr_list.sample)\n\n if add_nxpars:\n nxpar_keys = [item[0] for item in self.attr_list.iteritems() \\\n if isinstance(item[1], NxParameter)]\n\n for nxpar_key in nxpar_keys:\n self.attr_list[nxpar_key] += other.attr_list[nxpar_key]\n else:\n # Do nothing\n pass\n \n keys_to_get = [other_key for other_key in other.attr_list \\\n if other_key not in self.attr_list]\n \n for key_to_get in keys_to_get:\n self.attr_list[key_to_get] = \\\n copy.copy(other.attr_list[key_to_get])", "def updateFromContext(self, other):\n value = self.valueType.set(self.value, other.value)\n self.set(value)\n self.origins.extend(other.origins)", "def combine_data(self, object, additional_data):\n for k, v in additional_data.items():\n if isinstance(v, list):\n object[k] = object.get(k, []) + v\n else:\n object[k] = v\n for instance in object.get(\"instances\", []):\n if instance.get(\"sub_container\", {}).get(\"top_container\", {}).get(\"_resolved\"):\n del instance[\"sub_container\"][\"top_container\"][\"_resolved\"]\n object = super(ArchivalObjectMerger, self).combine_data(object, additional_data)\n return combine_references(object)", "def merge(self, other):\n\n if not self.can_merge(other):\n raise ValueError('These protocols can not be safely merged.')\n\n inputs_to_consider = self._find_inputs_to_merge()\n\n for input_path in inputs_to_consider:\n\n merge_behavior = getattr(type(self), input_path.property_name).merge_behavior\n\n if merge_behavior == MergeBehaviour.ExactlyEqual:\n continue\n\n if (isinstance(self.get_value(input_path), ProtocolPath) or\n isinstance(other.get_value(input_path), ProtocolPath)):\n\n continue\n\n if merge_behavior == InequalityMergeBehaviour.SmallestValue:\n value = min(self.get_value(input_path), other.get_value(input_path))\n elif merge_behavior == InequalityMergeBehaviour.LargestValue:\n value = max(self.get_value(input_path), other.get_value(input_path))\n else:\n raise NotImplementedError()\n\n self.set_value(input_path, value)\n\n return {}", "def merge_user_information(self, sid):\n pprint(self.extracted_information)\n for (field, value) in self.extracted_information.items():\n value = value[0] # TODO: should set data for everything in list but will do later\n self.data.set_data(sid, field, value[0])", "def __add__(self, other, inplace=False, **kwargs):\n output = super(HERAData, self).__add__(other, inplace=inplace, **kwargs)\n if inplace:\n output = self\n output._determine_blt_slicing()\n output._determine_pol_indexing()\n if not inplace:\n return output", "def _copy_data_from(self, original):\n raise NotImplementedError()", "def merge_results(self, other_processor):\n if not isinstance(other_processor, self.__class__):\n raise ValueError(f\"Can only extend with another \"\n f\"{self.__class__.__name__} instance.\")\n\n # Where there is overlap, there _should_ be agreement.\n self._evidence_counts.update(other_processor._evidence_counts)\n self._source_counts.update(other_processor._source_counts)\n self._belief_scores.update(other_processor._belief_scores)\n\n # Merge the statement JSONs.\n for k, sj in other_processor.__statement_jsons.items():\n if k not in self.__statement_jsons:\n self.__statement_jsons[k] = sj # This should be most of them\n else:\n # This should only happen rarely.\n for evj in sj['evidence']:\n self.__statement_jsons[k]['evidence'].append(evj)\n\n # Recompile the statements\n self._compile_results()\n return", "def merge(self, new_store):\n if new_store.name and len(new_store.name) > 0:\n self.name = new_store.name\n if new_store.address and len(new_store.address) > 0:\n self.address = new_store.address\n if new_store.city and len(new_store.city) > 0:\n self.city = new_store.city\n if new_store.state and len(new_store.state) > 0:\n self.state = new_store.state\n if new_store.zip and new_store.zip > 0:\n self.zipcode = new_store.zip\n if new_store.phone and new_store.phone > 0:\n self.phone = new_store.phone", "def __add__(self, other):\n train = copy.deepcopy(self.train)\n\n for img_path, pid, camid, dsetid in other.train:\n pid += self.num_train_pids\n camid += self.num_train_cams\n dsetid += self.num_datasets\n train.append((img_path, pid, camid, dsetid))\n\n ###################################\n # Note that\n # 1. set verbose=False to avoid unnecessary print\n # 2. set combineall=False because combineall would have been applied\n # if it was True for a specific dataset; setting it to True will\n # create new IDs that should have already been included\n ###################################\n if isinstance(train[0][0], str):\n return ImageDataset(\n train,\n self.query,\n self.gallery,\n transform=self.transform,\n mode=self.mode,\n combineall=False,\n verbose=False\n )\n else:\n return VideoDataset(\n train,\n self.query,\n self.gallery,\n transform=self.transform,\n mode=self.mode,\n combineall=False,\n verbose=False,\n seq_len=self.seq_len,\n sample_method=self.sample_method\n )", "def merge(self, other_btree):\n pass", "def update(self, other):\n self._start = other._start\n self._end = other._end\n self._nodes = {k: v.copy() for k,v in other._nodes.iteritems()}\n self._edges = {k: set(v) for k,v in other._edges.iteritems()}\n self._names = set(other._names)\n self.current = other.current", "def merge_with(self, other: \"Availability\") -> \"Availability\":\n\n if not isinstance(other, Availability):\n raise Exception(\"Please provide an Availability object.\")\n if not other.overlaps(self, strict=False):\n raise Exception(\"Only overlapping Availabilities can be merged.\")\n\n return Availability(\n start=min(self.start, other.start),\n end=max(self.end, other.end),\n event=getattr(self, \"event\", None),\n person=getattr(self, \"person\", None),\n room=getattr(self, \"room\", None),\n )", "def union(self, other: Catalog) -> Catalog:\n cat = self.copy()\n oth_cp = other.copy()\n\n for k in oth_cp.keys():\n for ver_id, version in oth_cp[k].versions.items():\n cat[k][ver_id] = version\n return cat", "def sync(self, other):\n pass # TODO", "def __add__(self, other):\n\n if not isinstance(other, Photons):\n raise ValueError('Can only add a Photons object to another Photons object.')\n\n # don't want to modify what is being added\n other = other.copy()\n\n # make column units consistent with self\n other.match_units(self)\n\n # add and /or update observation columns as necessary\n self.add_observations_column()\n other.add_observations_column()\n n_obs_self = len(self.obs_metadata)\n other['n'] += n_obs_self\n\n # re-reference times to the datum of self\n other.set_time_datum(self.time_datum)\n\n # stack the data tables\n photons = _tbl.vstack([self.photons, other.photons])\n\n # leave it to the user to deal with sorting and grouping and dealing with overlap as they see fit :)\n obs_metadata = self.obs_metadata + other.obs_metadata\n obs_times = list(self.obs_times) + list(other.obs_times)\n obs_bandpasses = list(self.obs_bandpasses) + list(other.obs_bandpasses)\n\n return Photons(photons=photons, obs_metadata=obs_metadata, time_datum=self.time_datum, obs_times=obs_times,\n obs_bandpasses=obs_bandpasses)", "def populate(self, fid1, fid2):\n self.input1 = json.load(fid1)\n self.input2 = json.load(fid2)", "def merge(self, other):\n\n if not self.can_merge(other):\n msg = 'Unable to merge \"{}\" with \"{}\" filters'.format(\n self.type, other.type)\n raise ValueError(msg)\n\n # Create deep copy of filter to return as merged filter\n merged_filter = copy.deepcopy(self)\n\n # Merge unique filter bins\n merged_bins = self.bins + other.bins\n\n # Sort energy bin edges\n if 'energy' in self.type:\n merged_bins = sorted(merged_bins)\n\n # Assign merged bins to merged filter\n merged_filter.bins = list(merged_bins)\n return merged_filter", "def merge(self, other: \"GraphSet\") -> None:\n if other.name != self.name:\n raise UnmergableGraphSetsException(\n f\"Unable to merge graph with name {other.name} into {self.name}\"\n )\n if other.version != self.version:\n raise UnmergableGraphSetsException(\n f\"Unable to merge graph with version {other.version} into {self.version}\"\n )\n self.start_time = min(self.start_time, other.start_time)\n self.end_time = max(self.end_time, other.end_time)\n self.resources += other.resources\n self._resolve_duplicates()\n self.errors += other.errors\n self.stats.merge(other.stats)", "def copy_from(self, other):\n assert not self.is_final\n if self.parent is not None:\n assert other.parent is not None\n self.parent.copy_from(other.parent)\n self.isolated_names = copy.copy(other.isolated_names)\n self.modified = copy.copy(other.modified)\n self.read = copy.copy(other.read)\n self.deleted = copy.copy(other.deleted)\n self.bound = copy.copy(other.bound)\n self.annotations = copy.copy(other.annotations)\n self.params = copy.copy(other.params)", "def merge(self, object_class):\n other_oc = self.schema.get_object_class(object_class)\n self.required_attrs |= other_oc.required_attrs\n self.allowed_attrs |= other_oc.allowed_attrs", "def fill(self, other):\n if self.stream_id is None:\n self.stream_id = other.stream_id\n\n if self.type is None:\n self.type = other.type\n\n if self.length is None:\n self.length = other.length\n\n if self.timestamp is None:\n self.timestamp = other.timestamp\n\n assert self.stream_id is not None\n assert self.type is not None\n assert self.length is not None\n assert self.timestamp is not None\n assert self.object_id is not None", "def __iadd__(self, other):\n self.MergeWith(other)\n return self", "def copy(self):\n new_data_collection = DataCollection()\n for item in self.iteritems():\n new_data_collection.add_data(item)\n return new_data_collection", "def merge(self, other):\n if other is None:\n return\n if self.theta1 > other.theta1:\n self.theta1 = other.theta1\n self.p1 = other.p1\n if self.theta2 < other.theta2:\n self.theta2 = other.theta2\n self.p2 = other.p2", "def initFromOther(self, oOther):\n for sAttr in self.getDataAttributes():\n setattr(self, sAttr, getattr(oOther, sAttr));\n return self;", "def update(self, other: dict):\n for key in other:\n if key in self:\n self[key] = other[key]", "def join_data(self, base_data, join_data, base_field, join_fields):\n for data in base_data:\n extra = join_data[data[base_field]]\n for field in join_fields:\n data[field] = extra[field]\n \n return base_data", "def copy_with(self):\n return self.copy()", "def copy_values(self, another):\n\n # Copy all value, uncertainty, and source information from the other\n # ExoParameter object.\n if isinstance(another, ExoParameter):\n self.reference = another.reference\n self.uncertainty = another.uncertainty\n self.uncertainty_lower = another.uncertainty_lower\n self.uncertainty_upper = another.uncertainty_upper\n self.units = another.units\n self.url = another.url\n self.value = another.value\n else:\n raise TypeError(\"Cannot copy values from a non-ExoParameter obj!\")", "def extend(self, other, adapt_conf=True):\n # Check if category metadata match\n if (self.size() > 0) and (other.size() > 0):\n for attr in [\"is_cat_inclusive\", \"is_categorised\"]:\n a, b = getattr(self, attr), getattr(other, attr)\n if a != b:\n raise ConcatenationError(\n f\"Categorisation metadata is different for '{attr}': {a} != {b}\"\n )\n elif other.size() > 0:\n for attr in [\"is_cat_inclusive\", \"is_categorised\"]:\n setattr(self, attr, getattr(other, attr))\n if getattr(self, \"tstep_h\", None) is None:\n self.tstep_h = getattr(other, \"tstep_h\", None)\n else:\n if getattr(other, \"tstep_h\", None) is not None:\n if self.tstep_h != other.tstep_h:\n raise ConcatenationError(\n \"Extending by a TrackRun with different timestep is not allowed\"\n )\n if adapt_conf and other.conf is not None:\n if self.conf is None:\n self.conf = other.conf.copy()\n else:\n for field in self.conf._fields:\n if getattr(self.conf, field) != getattr(other.conf, field):\n setattr(self.conf, field, None)\n self.sources.extend(other.sources)\n\n new_data = pd.concat([self.data, other.data], sort=False)\n new_track_idx = new_data.index.get_level_values(0).to_series()\n new_track_idx = new_track_idx.ne(new_track_idx.shift()).cumsum() - 1\n\n mux = pd.MultiIndex.from_arrays(\n [new_track_idx, new_data.index.get_level_values(1)], names=new_data.index.names\n )\n self.data = new_data.set_index(mux)\n\n # Concatenate categories\n if (self.cats is not None) or (other.cats is not None):\n new_cats = pd.concat([self.cats, other.cats], sort=False).fillna(False)\n new_track_idx = new_cats.index.get_level_values(0).to_series()\n new_track_idx = new_track_idx.ne(new_track_idx.shift()).cumsum() - 1\n\n ix = pd.Index(new_track_idx, name=new_cats.index.name)\n self.cats = new_cats.set_index(ix)", "def merge(self, other: Schema) -> Schema:\n if duplicates := self.keys() & other.keys():\n raise IntegrityError(f'Duplicate column name(s): {duplicates}')\n return self.__class__({**self, **other})" ]
[ "0.69505", "0.694683", "0.6885921", "0.68476945", "0.68086684", "0.6734038", "0.67226946", "0.66790885", "0.66472447", "0.66369003", "0.6572359", "0.6542647", "0.6503335", "0.65015614", "0.6469766", "0.64362967", "0.64317465", "0.6385473", "0.6376822", "0.6375423", "0.63703424", "0.63361776", "0.631753", "0.62937146", "0.62908316", "0.62438846", "0.61748385", "0.61556965", "0.61476624", "0.61324286", "0.60887307", "0.607825", "0.60765857", "0.60704356", "0.6068133", "0.60656786", "0.6027501", "0.6009837", "0.6009332", "0.5985768", "0.59678525", "0.5943717", "0.5924525", "0.5924524", "0.59010845", "0.5876143", "0.584064", "0.5831239", "0.58201", "0.5816105", "0.58112156", "0.5809095", "0.5804941", "0.57959485", "0.5794767", "0.57855856", "0.5785261", "0.5774561", "0.57736063", "0.57714903", "0.576705", "0.575061", "0.57354474", "0.57290703", "0.57218975", "0.5712195", "0.5704418", "0.5698822", "0.5693745", "0.56931365", "0.56895113", "0.5687284", "0.56764215", "0.5674636", "0.56723565", "0.5657785", "0.5655855", "0.56472", "0.5636454", "0.5634588", "0.5628336", "0.5626043", "0.5623629", "0.56202084", "0.55988896", "0.55897945", "0.55770785", "0.5576054", "0.5574003", "0.5573844", "0.5573058", "0.5572744", "0.55714935", "0.556345", "0.55581564", "0.55573905", "0.5538598", "0.5538142", "0.5534426", "0.55301327", "0.5527462" ]
0.0
-1
Merge data from an apdex metric object.
def merge_apdex_metric(self, metric): self[0] += metric.satisfying self[1] += metric.tolerating self[2] += metric.frustrating self[3] = ((self[0] or self[1] or self[2]) and min(self[3], metric.apdex_t) or metric.apdex_t) self[4] = max(self[4], metric.apdex_t)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge_custom_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for name, other in metrics:\n key = (name, '')\n stats = self.__stats_table.get(key)\n if not stats:\n self.__stats_table[key] = other\n else:\n stats.merge_stats(other)", "def mergeAggregatedCsvData(self, contexts, obj, aggData1, aggData2):\n return aggData1 + aggData2", "def _aggregate_log_values(self, source, dest):\n remove = []\n for key, item in source.items():\n if \"data\" not in item:\n # Assume it's a sub-group\n dest[key] = {}\n self._aggregate_log_values(item, dest[key])\n else:\n aggregator = self._get_aggregator_for_key(key, item['agg'])\n value = aggregator(item['data'])\n if item['precision'] is not None:\n value = round(value, item['precision'])\n dest[key] = value\n if item['scope'] == 'get':\n remove.append(key)\n for key in remove:\n del source[key]", "def update(self, data: Mapping[str, np.ndarray]) -> Self:\n\n for metric in self.metrics:\n metric.update(data)\n\n return self", "def _merge(acc: Dict[str, str], cur: Any) -> Dict[str, str]:\n parsed = _parse_feature(cur)\n acc[\"timestamp\"] = parsed[\"timestamp\"]\n acc[\"lat\"] = parsed[\"lat\"]\n acc[\"lon\"] = parsed[\"lon\"]\n key = parsed[\"property\"]\n val = parsed[\"value\"]\n\n acc[key] = val\n\n return acc", "def merge_metric_stats(self, snapshot):\n\n if not self.__settings:\n return\n\n for key, other in six.iteritems(snapshot.__stats_table):\n stats = self.__stats_table.get(key)\n if not stats:\n self.__stats_table[key] = other\n else:\n stats.merge_stats(other)", "def get_metric(self, metric, existing_dict=None):\n metric_key, metric_type, metric_name, metric_help = metric\n metric_dict = {\n 'name': metric_name,\n 'type': metric_type,\n 'help': metric_help,\n 'values': OrderedDict()\n }\n values = self.r.hgetall(metric_key) # new values\n # print \"values: %r\" % values\n metric_dict['values'] = values\n\n if existing_dict:\n # we're updating a metric we've already seen\n # print \"existing dict: %r\" % existing_dict\n for value in values:\n # print \"checking value: %r\" % value\n # value = json.loads(value)\n if value in existing_dict['values']:\n if metric_type == 'counter' or metric_type == 'histogram':\n # Combine the values if it's a counter or histogram\n # TODO: sort histogram buckets\n # TODO: append _bucket to histogram bucket names\n existing_dict['values'][value] = float(\n values[value]) + float(\n existing_dict['values'][value])\n elif metric_type == 'gauge':\n # use the last value we see for a gauge - # TODO: is there a better way? we could average it\n existing_dict['values'][value] = float(values[value])\n else:\n existing_dict['values'][value] = float(values[value])\n metric_dict['values'] = existing_dict['values']\n\n if metric_type == 'histogram':\n # json decode all of the labels\n samples = [json.loads(x, object_pairs_hook=OrderedDict) for x in metric_dict['values']]\n # we need to sort the values by the bucket labeled \"le\"\n sorted_keys = sorted(samples, key=lambda b: b['le'])\n # and then we need to store the values again json encoded\n vals = metric_dict['values']\n metric_dict['values'] = OrderedDict()\n for k in sorted_keys:\n kn = json.dumps(k, sort_keys=True)\n metric_dict['values'][kn] = vals[kn]\n\n return metric_dict", "def _merge_report(self, target, new):\n time = None\n if 'ts' in new['parsed']:\n time = new['parsed']['ts']\n\n if (target.get('lastSeenDate', None) and\n time and\n target['lastSeenDate'] < time):\n target['lastSeenDate'] = time\n\n query_millis = int(new['parsed']['stats']['millis'])\n target['stats']['totalTimeMillis'] += query_millis\n target['stats']['count'] += 1\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']", "def aggregate(all_metrics, reducer, suffix):\n # Collect metric separately\n separated_metrics = {} # type: dict[frozenset, list[dict]]\n for el in all_metrics:\n key = frozenset(el[\"metric\"][\"dimensions\"].items())\n if key not in separated_metrics:\n separated_metrics[key] = [el]\n else:\n separated_metrics[key].append(el)\n\n # Collect all dimensions\n dims = {}\n for metric_dims in separated_metrics.keys():\n for prop, val in dict(metric_dims).iteritems():\n if prop in dims:\n dims[prop].add(val)\n else:\n dims[prop] = set(val)\n\n # Sort each metric\n for _, metric in separated_metrics.iteritems():\n metric.sort(key=lambda v: v[\"metric\"][\"timestamp\"])\n\n separated_metrics = sorted(separated_metrics.values(), key=len)\n separated_metrics.reverse()\n\n # Compute the new values\n new_values = []\n all_timestamps = map(\n lambda l: map(\n lambda x: x[\"metric\"][\"timestamp\"], l),\n separated_metrics)\n metric_count = len(separated_metrics)\n for index in range(0, len(separated_metrics[0])):\n new_value = reducer[0](\n separated_metrics[0][index][\"metric\"][\"value\"],\n metric_count)\n new_timestamp = separated_metrics[0][index][\"metric\"][\"timestamp\"]\n for metric_index in range(1, metric_count):\n new_value = reducer[1](new_value, helpers.interpolate(\n new_timestamp,\n separated_metrics[metric_index],\n all_timestamps[metric_index]\n ), metric_count)\n new_values.append((new_timestamp, new_value))\n\n # Aggregate the other details:\n metric_name = separated_metrics[0][0][\"metric\"][\"name\"] + suffix\n meta = separated_metrics[0][0][\"meta\"]\n new_metrics = [\n helpers.create_agg_metric(\n metric_name,\n meta,\n dims,\n val[0],\n val[1]\n ) for val in new_values\n ]\n return new_metrics", "def get_metric_data(config, metric_list, metric_grouping, start_time, end_time, collected_data_map):\n\n def format_data_entry(json_data_entry):\n metric_name = json_data_entry.get('metric')\n host_name = json_data_entry.get('tags', {}).get('host') or 'unknownHost'\n dps = json_data_entry.get('dps', {})\n metric_value = None\n header_field = normalize_key(metric_name) + \"[\" + host_name + \"]:\" + str(\n get_grouping_id(metric_name, metric_grouping))\n mtime = 0\n for stime, val in dps.items():\n if int(stime) > mtime:\n metric_value = val\n mtime = int(stime)\n\n epoch = mtime * 1000\n\n if epoch in collected_data_map:\n timestamp_value_map = collected_data_map[epoch]\n else:\n timestamp_value_map = {}\n\n timestamp_value_map[header_field] = str(metric_value)\n collected_data_map[epoch] = timestamp_value_map\n\n json_data = {\n \"token\": config['OPENTSDB_TOKEN'],\n \"start\": start_time,\n \"end\": end_time,\n \"queries\": map(lambda m: {\n \"aggregator\": \"avg\",\n \"downsample\": \"1m-avg\",\n \"metric\": m.encode('ascii')\n }, metric_list)\n }\n\n url = config[\"OPENTSDB_URL\"] + \"/api/query\"\n response = requests.post(url, data=json.dumps(json_data))\n if response.status_code == 200:\n rawdata_list = response.json()\n logger.debug(\"Get metric data from opentsdb: \" + str(len(rawdata_list)))\n\n # format metric and save to collected_data_map\n map(lambda d: format_data_entry(d), rawdata_list)", "def _merge_report(self, target, new):\r\n time = None\r\n if 'ts' in new['parsed']:\r\n time = new['parsed']['ts']\r\n\r\n if (target.get('lastSeenDate', None) and\r\n time and\r\n target['lastSeenDate'] < time):\r\n target['lastSeenDate'] = time\r\n\r\n query_millis = int(new['parsed']['stats']['millis'])\r\n target['stats']['totalTimeMillis'] += query_millis\r\n target['stats']['count'] += 1\r\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']", "def _update_metric(\n metrics: List[mlflow.entities.Metric], dataset: MetricsDict = {}\n ) -> MetricsDict:\n for metric in metrics:\n metric_dict = {\"step\": metric.step, \"value\": metric.value}\n if metric.key in dataset:\n if isinstance(dataset[metric.key], list):\n dataset[metric.key].append(metric_dict)\n else:\n dataset[metric.key] = [dataset[metric.key], metric_dict]\n else:\n dataset[metric.key] = metric_dict\n return dataset", "def summerize_adapter_metrics(parsed_metrics: Dict[int, dict]) -> Dict[Tuple[str, str], dict]:\n\n summarized_metrics = {}\n for lane in parsed_metrics:\n # Iterate over all samples in lane\n summarized_metrics[lane] = summarized_metrics.get(lane, {})\n for value in parsed_metrics[lane].values():\n sample_id = value.get(\"Sample_ID\")\n summarized_metrics[lane][sample_id] = summarized_metrics[lane].get(sample_id, value)\n summarized_metrics[lane][sample_id][\n \"R\" + value.get(\"ReadNumber\") + \"_SampleBases\"\n ] = value.get(\"SampleBases\")\n\n return summarized_metrics", "def _aggregate_metrics(metrics, aggfunc, base):\n return base.Struct(**_UNCOMPRESSED_METRICS)(\n left_side_bearing=aggfunc(_m.left_side_bearing for _m in metrics),\n right_side_bearing=aggfunc(_m.right_side_bearing for _m in metrics),\n character_width=aggfunc(_m.character_width for _m in metrics),\n character_ascent=aggfunc(_m.character_ascent for _m in metrics),\n character_descent=aggfunc(_m.character_descent for _m in metrics),\n character_attributes=0,\n )", "def merge_time_metric(self, metric):\n\n self.merge_raw_time_metric(metric.duration, metric.exclusive)", "def to_metric(self):\r\n if self.units != 'metric':\r\n self.units = 'metric'\r\n for statement in self.statements:\r\n statement.to_metric()\r\n for tool in iter(self.tools.values()):\r\n tool.to_metric()\r\n for primitive in self.primitives:\r\n primitive.to_metric()\r\n for hit in self.hits:\r\n hit.to_metric()", "def aggregate_data(mts, feature, target):\r\n set_dict = dict()\r\n set_dict['mt'] = mts\r\n set_dict['feature'] = feature\r\n set_dict['target'] = target\r\n \r\n return set_dict", "def compute_metrics(self, x, extra=None):\n if self.__metrics is None and extra is None:\n return None\n\n ret = {}\n if self.__metrics is not None:\n for m in self.__metrics:\n ret[m.name] = self._mdmetric(x, m)\n\n if extra is not None and extra.name not in ret:\n ret[extra.name] = self._mdmetric(x, extra)\n\n return ret", "def __init_metrics(self):\n\n batch = {}\n # split data into batches of size batch_size or less\n for metric_name, metric_pattern in self.metrics.items():\n # get the batch list for that metric\n batch_list = []\n for s in range(1, self.schema + 1):\n for t in range(1, self.table + 1):\n k = '/metrics/type=IndexTable/keyspace={}/scope={}/name={}/mean'.format(s, t, metric_name)\n # from Python 3.6 onwards, the standard dict type maintains insertion order by default\n batch[k] = 0\n # if the batch has batch_size items or at the end of iteration,\n # append the batch to list of that metric and create a new empty batch\n if len(batch) == self.batch_size or (s == self.schema and t == self.table):\n batch_list.append(batch)\n batch = {}\n\n # parse metric patterns\n l = metric_pattern.split()\n if l[0] == '(>':\n self.metrics[metric_name] = IncMetricStruct(float(int(l[1])), float(l[2][1:]), float(l[4][:-2]),\n batch_list)\n else:\n self.metrics[metric_name] = RandMetricStruct(float(l[0][1:]), float(l[-1][:-1]), batch_list)", "def mergeMetadata(self, obj, dom): \n self.update_semantics = 'merge'\n # create a metadata dict that has all the values from obj, overridden\n # by the current dom values.\n metadata = self.getModuleMetadata(obj, {})\n metadata.update(self.getMetadata(dom, METADATA_MAPPING))\n for oerdc_name, cnx_name in METADATA_MAPPING.items():\n if cnx_name in ['keywords',]:\n old_value = getattr(obj, cnx_name)\n if old_value:\n current_value = list(metadata.get(cnx_name, []))\n current_value.extend(old_value)\n metadata[cnx_name] = current_value\n if metadata:\n self.validate_metadata(metadata)\n metadata = self.fixEntities(metadata, ATTRIBUTES_TO_FIX)\n if ICollection.providedBy(obj):\n obj.collection_metadata(**metadata)\n elif IModule.providedBy(obj):\n obj.update_metadata(**metadata)\n self.updateRoles(obj, dom)\n obj.reindexObject(idxs=metadata.keys())", "def metric_data(self, normalizer=None):\n\n if not self.__settings:\n return []\n\n result = []\n normalized_stats = {}\n\n # Metric Renaming and Re-Aggregation. After applying the metric\n # renaming rules, the metrics are re-aggregated to collapse the\n # metrics with same names after the renaming.\n\n if self.__settings.debug.log_raw_metric_data:\n _logger.info('Raw metric data for harvest of %r is %r.',\n self.__settings.app_name,\n list(six.iteritems(self.__stats_table)))\n\n if normalizer is not None:\n for key, value in six.iteritems(self.__stats_table):\n key = (normalizer(key[0])[0], key[1])\n stats = normalized_stats.get(key)\n if stats is None:\n normalized_stats[key] = copy.copy(value)\n else:\n stats.merge_stats(value)\n else:\n normalized_stats = self.__stats_table\n\n if self.__settings.debug.log_normalized_metric_data:\n _logger.info('Normalized metric data for harvest of %r is %r.',\n self.__settings.app_name,\n list(six.iteritems(normalized_stats)))\n\n for key, value in six.iteritems(normalized_stats):\n key = dict(name=key[0], scope=key[1])\n result.append((key, value))\n\n return result", "def add_metrics(self, metric_dict: dict):\n self.metric_dict.update(metric_dict)", "def _create_metric_sum(a, b):\n metric_sum = GridSearchClassificationMetrics()\n metric_sum.accuracy = a.accuracy + b.accuracy\n metric_sum.precision = a.precision + b.precision\n metric_sum.f_measure = a.f_measure + b.f_measure\n metric_sum.recall = a.recall + b.recall\n metric_sum.confusion_matrix = a.confusion_matrix + b.confusion_matrix\n return metric_sum", "def record_apdex_metric(self, metric):\n\n if not self.__settings:\n return\n\n # Note that because we are using a scope here of an empty string\n # we can potentially clash with an unscoped metric. Using None,\n # although it may help to keep them separate in the agent will\n # not make a difference to the data collector which treats None\n # as an empty string anyway.\n\n key = (metric.name, '')\n stats = self.__stats_table.get(key)\n if stats is None:\n stats = ApdexStats(apdex_t=metric.apdex_t)\n self.__stats_table[key] = stats\n stats.merge_apdex_metric(metric)\n\n return key", "def merge(self, obj):\n pass", "def consolidate_other(self):\n record = self.db[self.args['cstats_table']].find_one({'type': 'client'})\n if not record:\n self.log.critical('Could not get the \"client\" key in the \"cstats_table\"')\n return\n for k in record.keys():\n if k in ['_id', 'type', 'stats']:\n continue\n self.other[k] = record[k]\n self.stats.update(record.get('stats', {}))", "def convert(self, data, *args, **kwargs):\n\n # all of this is still quite ugly and verrrry specific...\n json_data = {}\n for hit in data[\"hits\"][\"hits\"]:\n # pprint(hit)\n\n # get the PQ\n pq = hit.get(\"_source\", {}).get(\"metadata\", {}).get(\"PanDAQueue\", None)\n if not pq:\n continue\n\n # get the list of all benchmark results\n latest_list = (\n hit.get(\"inner_hits\", {})\n .get(\"most_recent\", {})\n .get(\"hits\", {})\n .get(\"hits\", [])\n )\n if len(latest_list) == 0:\n continue\n\n # get the average of the latest benchmark results.\n # Only results not older than 7d, and a maximum of 50 results (whichever value is hit first).\n # If we have no values more recent than 7d, simply use the last available one (that PQ is probably not online anymore anyway)\n values = []\n for d in latest_list:\n date = datetime.datetime.strptime(\n d.get(\"_source\", {}).get(\"timestamp\", \"\"), \"%Y-%m-%dT%H:%M:%SZ\"\n )\n two_days_ago = datetime.datetime.now() - datetime.timedelta(days=2)\n seven_days_ago = datetime.datetime.now() - datetime.timedelta(days=7)\n\n if date > two_days_ago:\n # we are within the last two days, so we take all the measurements we can get!\n values.append(d)\n elif (date < two_days_ago) and (date > seven_days_ago):\n # we are between 2 and 7 days ago, so take only values if we don't have 25 values already\n if len(values) < 30:\n values.append(d)\n elif date < seven_days_ago:\n # we are further away than 7 days, so take a maximum of 5 values from here if we don't have 5 yet\n if len(values) < 10:\n values.append(d)\n\n to_average = [\n i.get(\"_source\", {})\n .get(\"profiles\", {})\n .get(\"fastBmk\", {})\n .get(\"value\", 0.0)\n for i in values\n ]\n json_data[pq] = {\n \"avg_value\": float(sum(to_average)) / len(to_average),\n \"measurements\": len(to_average),\n }\n # print(len(to_average))\n\n return json_data", "def _get_metrics_to_collect(self, instance_key, additional_metrics):\n if instance_key not in self.metrics_to_collect_by_instance:\n self.metrics_to_collect_by_instance[instance_key] = \\\n self._build_metric_list_to_collect(additional_metrics)\n return self.metrics_to_collect_by_instance[instance_key]", "def add_datapoints(self, stats):\n # APCU Stats\n apcu_stats = stats.get('apcu_stats', dict())\n self.add_gauge_value('APCu Cache/Slots', 'slots',\n apcu_stats.get('nslots',\n apcu_stats.get('num_slots', 0)))\n self.add_gauge_value('APCu Cache/Entries', 'keys',\n apcu_stats.get('nentries',\n apcu_stats.get('num_entries', 0)))\n self.add_gauge_value('APCu Cache/Size', 'bytes',\n apcu_stats.get('mem_size', 0))\n self.add_gauge_value('APCu Cache/Expunges', 'keys',\n apcu_stats.get('nexpunges',\n apcu_stats.get('expunges', 0)))\n\n hits = apcu_stats.get('nhits', apcu_stats.get('num_hits', 0))\n misses = apcu_stats.get('nmisses', apcu_stats.get('num_misses', 0))\n total = hits + misses\n if total > 0:\n effectiveness = float(float(hits) / float(total)) * 100\n else:\n effectiveness = 0\n self.add_gauge_value('APCu Cache/Effectiveness', 'percent',\n effectiveness)\n\n self.add_derive_value('APCu Cache/Hits', 'keys', hits)\n self.add_derive_value('APCu Cache/Misses', 'keys', misses)\n self.add_derive_value('APCu Cache/Inserts', 'keys',\n apcu_stats.get('ninserts',\n apcu_stats.get('num_inserts',0)))", "def _aggregate_perf_data(perf_all_ordinals: List[str]):\n aggregate = {}\n\n pd = PerfData()\n for data in perf_all_ordinals:\n worker_pd = PerfData(**json.loads(data))\n if len(perf_all_ordinals) > 1:\n aggregate.setdefault(\"ordinals\", [])\n aggregate[\"ordinals\"].append(worker_pd.throughput_dict())\n\n pd.merge(worker_pd)\n\n aggregate.update(dataclasses.asdict(pd))\n return aggregate", "def unmarshal(self, metric):\n return {\n \"name\": metric.name,\n \"value\": metric.value,\n \"transform\": metric.transform,\n \"tags\": metric.tags,\n }", "def update_metric_dict(self, metric_dict, update_dict):\n for key in update_dict.keys():\n metric_dict[key].append(update_dict[key])\n return metric_dict", "def __add__(self, other):\r\n # Make a defaultdict of defaultdicts, the latter of which returns\r\n # None when an key is not present\r\n merged_data = defaultdict(lambda: defaultdict(lambda: None))\r\n\r\n # We will keep track of all unique sample_ids and metadata headers\r\n # we have seen as we go\r\n all_sample_ids = set()\r\n all_headers = set()\r\n\r\n # add all values from self into the merged_data structure\r\n for sample_id, data in self._metadata.iteritems():\r\n all_sample_ids.add(sample_id)\r\n for header, value in data.iteritems():\r\n all_headers.add(header)\r\n merged_data[sample_id][header] = value\r\n\r\n # then add all data from other\r\n for sample_id, data in other._metadata.iteritems():\r\n all_sample_ids.add(sample_id)\r\n for header, value in data.iteritems():\r\n all_headers.add(header)\r\n # if the two mapping files have identical sample_ids and\r\n # metadata columns but have DIFFERENT values, raise a value\r\n # error\r\n if merged_data[sample_id][header] is not None and \\\r\n merged_data[sample_id][header] != value:\r\n raise ValueError(\"Different values provided for %s for \"\r\n \"sample %s in different mapping files.\"\r\n % (header, sample_id))\r\n else:\r\n merged_data[sample_id][header] = value\r\n\r\n # Now, convert what we have seen into a normal dict\r\n normal_dict = {}\r\n for sample_id in all_sample_ids:\r\n if sample_id not in normal_dict:\r\n normal_dict[sample_id] = {}\r\n\r\n for header in all_headers:\r\n normal_dict[sample_id][header] = \\\r\n merged_data[sample_id][header]\r\n\r\n # and create a MetadataMap object from it; concatenate comments\r\n return self.__class__(normal_dict, self.Comments + other.Comments)", "def _AddMetric(self, metric):\n machine = metric.machine_id\n time = metric.timestamp\n payload = DotDict(json.loads(metric.payload)).flatten()\n\n self.machines.add(machine)\n self.timestamps.add(time)\n for k in payload:\n if k not in self.counter_data:\n continue\n val = payload.get(k, None)\n if val is not None:\n self.counter_data[k].AddSample(machine, time, val)", "def _update_data(self):\n for attribute in [\"flow_rate\"]:\n self._data[attribute] = self._connection.measure", "def _gather_data(self):\n for data in self._collection:\n label = data.label\n label = disambiguate(label, self._data)\n self._data[label] = data", "def metrics_group():", "def merge(self, other):\n self.isotxsMetadata = self.isotxsMetadata.merge(\n other.isotxsMetadata, self, other, \"ISOTXS\", AttributeError\n )\n self.gamisoMetadata = self.gamisoMetadata.merge(\n other.gamisoMetadata, self, other, \"GAMISO\", AttributeError\n )\n self.pmatrxMetadata = self.pmatrxMetadata.merge(\n other.pmatrxMetadata, self, other, \"PMATRX\", AttributeError\n )\n self.micros.merge(other.micros)\n self.gammaXS.merge(other.gammaXS)\n self.neutronHeating = _mergeAttributes(self, other, \"neutronHeating\")\n self.neutronDamage = _mergeAttributes(self, other, \"neutronDamage\")\n self.gammaHeating = _mergeAttributes(self, other, \"gammaHeating\")\n self.isotropicProduction = _mergeAttributes(self, other, \"isotropicProduction\")\n self.linearAnisotropicProduction = _mergeAttributes(\n self, other, \"linearAnisotropicProduction\"\n )\n # this is lazy, but should work, because the n-order wouldn't be set without the others being set first.\n self.nOrderProductionMatrix = (\n self.nOrderProductionMatrix or other.nOrderProductionMatrix\n )", "def apply_metrics(x):\n d = {}\n d[\"custom_metric\"] = custom_metric(\n x[\"actuals\"], x[\"forecast\"], x[\"avg_vol\"].values[0]\n )\n d[\"uncertainty_metric\"] = uncertainty_metric(\n x[\"actuals\"], x[\"upper_bound\"], x[\"lower_bound\"], x[\"avg_vol\"].values[0]\n )\n\n return pd.Series(d, index=[\"custom_metric\", \"uncertainty_metric\"])", "def merge(self, other: PerfData):\n self.total_samples += other.total_samples\n if self.total_time == 0.0:\n self.total_time = other.total_time\n self.compile_time = max(self.compile_time, other.compile_time)\n self.programming_time = max(\n self.programming_time, other.programming_time\n )\n if self.est_samples_per_sec == 0.0:\n self.est_samples_per_sec = other.est_samples_per_sec\n else:\n assert (\n self.est_samples_per_sec == other.est_samples_per_sec\n ), \"Expected all fabric-based performance estimates to be identical\"\n\n if self.total_time > 0:\n self.samples_per_sec = float(self.total_samples) / self.total_time\n else:\n self.samples_per_sec = 0.0", "def init_data(self, obj):\n for col in self._category_aux:\n key_split = col.split(' ', 1)\n if len(key_split) > 1:\n key = key_split[1].replace(' ', '')\n minmax = key_split[0].lower()\n info_tuple = (key, minmax)\n if minmax != 'min' and minmax != 'max':\n info_tuple = (col.replace(' ', ''), '')\n else:\n info_tuple = (col.replace(' ', ''), '') \n self.__info_extract(obj, info_tuple[0], info_tuple[1], col)", "def read_metric_values(self):\n inv_objs = self._inventory_mgr.current_inventory()\n monitored_metrics = self._metric_mgr.get_monitored_metrics()\n perf_manager = self._si.RetrieveServiceContent().perfManager\n for mor in inv_objs.keys():\n for inv_obj in inv_objs[mor]:\n inv_obj_metrics = inv_obj.metric_id_map\n desired_keys = list(set(inv_obj_metrics.keys()) & set(monitored_metrics[mor].keys()))\n if not len(desired_keys) == 0:\n metric_id_objs = [inv_obj_metrics[key] for key in desired_keys]\n query_spec = vim.PerformanceManager.QuerySpec(\n entity=inv_obj.mor, metricId=metric_id_objs,\n intervalId=inv_obj.INSTANT_INTERVAL,\n maxSample=1, format='normal'\n )\n try:\n results = perf_manager.QueryPerf(querySpec=[query_spec])\n except Exception as e:\n self._logger.error(\"Exception while making performance query : {0}\".format(e))\n if results:\n dps = self._parse_query(inv_obj, results, monitored_metrics[mor])\n payload = self._build_payload(dps)\n self._dispatch_metrics(payload)\n else:\n self._logger.warning(\"Empty result from query : {0}\".format(query_spec))", "def compute_metrics(self):\n overall_ret = OrderedDict()\n for ap_iou_thresh in self.ap_iou_thresh:\n ret_dict = OrderedDict()\n rec, prec, ap = eval_det_multiprocessing(self.pred_map_cls, self.gt_map_cls, ovthresh=ap_iou_thresh)\n for key in sorted(ap.keys()):\n clsname = self.class2type_map[key] if self.class2type_map else str(key)\n ret_dict[\"%s Average Precision\" % (clsname)] = ap[key]\n ap_vals = np.array(list(ap.values()), dtype=np.float32)\n ap_vals[np.isnan(ap_vals)] = 0\n ret_dict[\"mAP\"] = ap_vals.mean()\n rec_list = []\n for key in sorted(ap.keys()):\n clsname = self.class2type_map[key] if self.class2type_map else str(key)\n try:\n ret_dict[\"%s Recall\" % (clsname)] = rec[key][-1]\n rec_list.append(rec[key][-1])\n except:\n ret_dict[\"%s Recall\" % (clsname)] = 0\n rec_list.append(0)\n ret_dict[\"AR\"] = np.mean(rec_list)\n overall_ret[ap_iou_thresh] = ret_dict\n return overall_ret", "def parse_metrics_file(self) -> Dict[int, dict]:\n LOG.info(\"Parsing Dragen demultiplexing adapter metrics file %s\", self.adapter_metrics_path)\n parsed_metrics = {}\n\n with self.adapter_metrics_path.open(\"r\") as metrics_file:\n metrics_reader = csv.DictReader(metrics_file)\n for row in metrics_reader:\n lane = int(row[\"Lane\"])\n read_number = row[\"ReadNumber\"]\n sample_id = row[\"Sample_ID\"]\n parsed_metrics[lane] = parsed_metrics.get(lane, {})\n parsed_metrics[lane][(read_number, sample_id)] = row\n\n return self.summerize_adapter_metrics(parsed_metrics=parsed_metrics)", "def merge(self, other):\n from .dataset import Dataset\n\n if other is None:\n return self.to_dataset()\n else:\n other_vars = getattr(other, 'variables', other)\n coords = merge_coords_without_align([self.variables, other_vars])\n return Dataset._from_vars_and_coord_names(coords, set(coords))", "def calculate_agrigate(self):\n self.total = 0.0\n for rec in self.data:\n self.total = self.total + rec[\"value\"]\n\n self.agrigate_data = {\n \"site\": self.site,\n \"utc\": self.timestamp_utc,\n \"local\": self.timestamp_local,\n \"tag\": \"TOTAL\",\n \"value\": round(self.total, 3)}\n self.data.append(self.agrigate_data)", "def merge_stats(self, other):\n\n self[0] += other[0]\n self[1] += other[1]\n self[2] += other[2]\n\n self[3] = ((self[0] or self[1] or self[2]) and\n min(self[3], other[3]) or other[3])\n self[4] = max(self[4], other[3])", "def merge_csvs(metrics_csv_dims,metrics_csv_int,region,wk_dir):\n dict_dims = load_and_transpose_csv(metrics_csv_dims)\n dict_int = load_and_transpose_csv(metrics_csv_int)\n dict_dims.update(dict_int.copy())\n fname = os.path.join(wk_dir,region.replace(' ','_') + '_summary.csv')\n fields = [*dict_dims]\n models = ['Metric']+[*dict_dims[fields[0]]]\n\n with open(fname,'w') as f:\n writer = csv.DictWriter(f,fieldnames=models)\n writer.writeheader()\n for field in fields:\n row = {'Metric': field}\n row.update(dict_dims.get(field,{}))\n writer.writerow(row)\n\n data_desc = {\n os.path.basename(fname): {\n 'longname': os.path.basename(fname).split('.')[1].replace('_',' '),\n 'description': 'Parameters and metrics for ' + region + ' region'}}\n\n # add metadata to output.json\n asop.update_output_json('metrics', data_desc, wk_dir)", "def compute(self) -> Any:\n # ddp hotfix, could be done better\n # but metric must handle DDP on it's own\n if self._ddp_backend == \"xla\":\n device = get_device()\n for key in self.statistics:\n key_statistics = torch.tensor([self.statistics[key]], device=device)\n key_statistics = xm.all_gather(key_statistics).sum(dim=0).cpu().numpy()\n self.statistics[key] = key_statistics\n elif self._ddp_backend == \"ddp\":\n for key in self.statistics:\n value: List[np.ndarray] = all_gather(self.statistics[key])\n value: np.ndarray = np.sum(np.vstack(value), axis=0)\n self.statistics[key] = value\n\n per_class, micro, macro, weighted = get_aggregated_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n support=self.statistics[\"support\"],\n zero_division=self.zero_division,\n )\n if self.compute_per_class_metrics:\n return per_class, micro, macro, weighted\n else:\n return [], micro, macro, weighted", "def compute(self) -> Any:\n # ddp hotfix, could be done better\n # but metric must handle DDP on it's own\n if self._ddp_backend == \"xla\":\n device = get_device()\n for key in self.statistics:\n key_statistics = torch.tensor([self.statistics[key]], device=device)\n key_statistics = xm.all_gather(key_statistics).sum(dim=0).cpu().numpy()\n self.statistics[key] = key_statistics\n elif self._ddp_backend == \"ddp\":\n for key in self.statistics:\n value: List[np.ndarray] = all_gather(self.statistics[key])\n value: np.ndarray = np.sum(np.vstack(value), axis=0)\n self.statistics[key] = value\n\n per_class, micro, macro, weighted = get_aggregated_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n support=self.statistics[\"support\"],\n zero_division=self.zero_division,\n )\n if self.compute_per_class_metrics:\n return per_class, micro, macro, weighted\n else:\n return [], micro, macro, weighted", "def __info_extract(self, obj, key, minmax, col):\n for k, v in obj.items(): \n val = self.__get_value(v, key, minmax)\n self._data[col].append(val)\n in_k, in_v = list(v.items())[-1]\n while not isinstance(in_v, str): \n val = self.__get_value(in_v, key, minmax)\n self._data[col].append(val)\n in_k, in_v = list(in_v.items())[-1]", "def add_metrics(_dict):\n for key, itr in _dict.items():\n if key not in self.metric_cols:\n self.metric_cols.append(key)", "def add_stats(self):\n units = self.get_unit_map()\n for metric in self.raw_metrics:\n unit, metric_type = units.get(metric, (DEFAULT_UNIT, DEFAULT_TYPE))\n if metric_type == \"counter\":\n # Unit/Second\n unit = \"/\".join((unit, \"Second\"))\n self.add_derive_value(metric, unit, self.raw_metrics[metric], rate=True)\n else:\n self.add_gauge_value(metric, unit, self.raw_metrics[metric])", "def __add__(self, other):\n new_measure = Measure()\n settings = [\"raw\", \"fil\"]\n\n for rf in settings:\n new_measure.hit1[rf] = (self.hit1[rf] + other.hit1[rf])\n new_measure.hit3[rf] = (self.hit3[rf] + other.hit3[rf])\n new_measure.hit10[rf] = (self.hit10[rf] + other.hit10[rf])\n new_measure.mrr[rf] = (self.mrr[rf] + other.mrr[rf])\n new_measure.mr[rf] = (self.mr[rf] + other.mr[rf])\n return new_measure", "def merge(self, other):\n if other.n_points != self.n_points:\n raise ValueError(\n 'Deduplicator size mismatch: '\n f'{self.n_points} != {other.n_points}'\n )\n self.data_reduced.extend(other.data_reduced)\n self.data_kd.extend(other.data_kd)", "def metric(self, metric_id):\r\n return Metric(self, metric_id)", "def add_metric(self, metric: str):\n if metric not in self.metrics:\n self.metrics[metric] = self.creator.create_metric(metric)", "def append_ipma_metadata(orig: dict, dest: dict):\n for key in [key for key in orig.keys() if key != 'data']:\n dest[key] = orig[key]", "def ana_merge_datas(datas):\n return {\n 'searches':ana_merge_searches(datas),\n 'senzory_map':ana_merge_senzory_map(datas)\n }", "def build_metrics_gauge_data(gauge_metrics):\n return [{'name': name, 'value': value} for name, value in iteritems(gauge_metrics)]", "def convert(report):\n M = []\n for row in report['data']['rows']:\n dimensions = row['dimensions']\n metrics = row['metrics'][0]['values']\n M.append(dimensions + metrics)\n return M", "def set_metrics(self):", "def aggregate_metrics(metrics):\n if len(metrics) == 1:\n return metrics[0]\n else:\n agg_metrics = metrics[0]\n for metric in agg_metrics.keys():\n vals = [x[metric] for x in metrics]\n agg_metrics[metric] = [np.mean(vals), np.std(vals)]\n return agg_metrics", "def process(self, method):\n process_dicts = []\n for d in self.data_dicts:\n dd = copy.deepcopy(d)\n for ap in self.aps:\n dd[ap] = method(d[ap])\n process_dicts.append(dict2str(dd))\n\n # print(process_dicts)\n # print(type(process_dicts[0]))\n return Dataset(process_dicts)", "def combine_data(self, object, additional_data):\n for k, v in additional_data.items():\n if isinstance(v, list):\n object[k] = object.get(k, []) + v\n else:\n object[k] = v\n for instance in object.get(\"instances\", []):\n if instance.get(\"sub_container\", {}).get(\"top_container\", {}).get(\"_resolved\"):\n del instance[\"sub_container\"][\"top_container\"][\"_resolved\"]\n object = super(ArchivalObjectMerger, self).combine_data(object, additional_data)\n return combine_references(object)", "def merge_data(self):\n\n merged_data = []\n for data in self.data:\n values_test = [self.get_ad_from_dict(ad) for ad in self.get_values_from_dict(data)]\n merged_ads_tmp = []\n for code, ads in self.group_by_code(values_test):\n for ad in ads:\n merged_ads_tmp.append(ad)\n\n if len(merged_ads_tmp) > 1:\n merged_ads = AdModel.merge_ads(self.get_first_key_from_dict(data),\n code, merged_ads_tmp)\n else:\n merged_ads = merged_ads_tmp\n merged_ads_tmp = []\n merged_data.append({self.get_first_key_from_dict(data): merged_ads})\n\n return merged_data", "def merge(cls, analyses):\r\n raise NotImplementedError()", "def update_metric(self, metric, value):\n if self.is_number(value):\n self.logger.debug(\"Collected raw metric: %s = %s\" % (metric, value))\n self.raw_metrics[metric] = value", "def calculate_metrics(self):\n self.data_stats = self.sqlContext.read.format(\"org.apache.spark.sql.cassandra\").options(table=self.cassandra_trip_table, keyspace=self.cassandra_keyspace).load()\n self.data_stats = self.data_stats.groupBy(['time_block','day','month','borough_name']).agg(func.avg('num_trips').alias('mean'))", "def evaluate(self, metric, resource):\n\n # Extract values from JSON path result\n self._values = ContextUtils.manage_values_from_json(metric.value)\n\n # Create new metric with extracted values\n new_metric = ContextUtils.replace_metric_value(metric,\n len(self._values))\n\n # Call parent evaluate method with new updated metric\n return super().evaluate(new_metric, resource)", "def get_min_or_max_values(metrics: dict, global_metrics: dict, fn2) -> dict:\n for ds_name in metrics:\n if ds_name not in global_metrics:\n global_metrics[ds_name] = {}\n\n feature_metrics = metrics[ds_name]\n for feature_name in feature_metrics:\n if feature_name not in global_metrics[ds_name]:\n global_metrics[ds_name][feature_name] = feature_metrics[feature_name]\n else:\n global_metrics[ds_name][feature_name] = fn2(\n global_metrics[ds_name][feature_name], feature_metrics[feature_name]\n )\n\n results = {}\n for ds_name in global_metrics:\n for feature_name in global_metrics[ds_name]:\n if feature_name not in results:\n results[feature_name] = global_metrics[ds_name][feature_name]\n else:\n results[feature_name] = fn2(results[feature_name], global_metrics[ds_name][feature_name])\n\n for ds_name in global_metrics:\n for feature_name in global_metrics[ds_name]:\n global_metrics[ds_name][feature_name] = results[feature_name]\n\n return global_metrics", "def test_add_tag_to_derived_metric(self):\n pass", "def add_metric_class(self, metric: NNSimpleMetric):\n if metric.name not in self.metrics:\n self.metrics[metric.name] = metric", "def calculate_dataset_metrics(self):\n pass", "def _merge(self, other: dict):\n self._storage = dict_merge(self._storage, other)", "def add_apple_data_to_activities(self):\n\n try:\n # apple data is loaded from csv rather than from json\n apple_data = self.load_apple_workouts()\n\n # filter out nike and strava data that has synced to apple, we are getting that from json source\n apple_data = apple_data[(apple_data.sourceName != \"Nike Run Club\") & (apple_data.sourceName != \"Strava\")]\n\n # set up 5 key metrics\n # note we're using enum values\n apple_data['source'] = ActivitySource.APPLE.value\n apple_data['activity_type'] = apple_data['workoutActivityType'].apply(lambda x: self.convert_apple_activity_type(x).value)\n apple_data['distance_in_km'] = apple_data['totalDistance']\n apple_data['duration_in_min'] = apple_data['duration']\n apple_data['start_timestamp'] = apple_data['startDate'].apply(lambda x: parse(x, tzinfos={\"America/Vancouver\"}))\n\n # filter out extraneous columns\n apple_data = apple_data.filter(self.data_frame_columns)\n self.all_activities = self.all_activities.append(apple_data, sort=True, ignore_index=True)\n\n logging.info(\"Done parsing Apple data.\")\n except Exception:\n logging.exception(\"Could not parse Apple data\")", "def compute_metrics(self, results: list) -> dict:", "def build_metrics(session, fill_id, population_id, properties_id, aggregations_id, label_lang):\n # query the metrics table\n build_metrics_start_time = time.time()\n metrics, metrics_columns = get_metrics(session, fill_id, population_id, properties_id, aggregations_id, label_lang)\n build_metrics_query_end_time = time.time()\n\n # make a nested dictionary represented the metrics\n metrics_response, represented_biases = build_gap_response(properties_id, metrics, metrics_columns, label_lang,\n session)\n build_metrics_grouping_end_time = time.time()\n\n # timing\n query_metrics_seconds_taken = build_metrics_query_end_time - build_metrics_start_time\n group_metrics_seconds_taken = build_metrics_grouping_end_time - build_metrics_query_end_time\n log.debug(f\"Querying metrics repsponse took {'%.3f' % query_metrics_seconds_taken} seconds\")\n log.debug(f\"Grouping metrics repsponse took {'%.3f' % group_metrics_seconds_taken} seconds\")\n return metrics_response, represented_biases", "def _resample_and_merge(ts, agg_dict):\n grouped = ts.group_serie(agg_dict['sampling'])\n existing = agg_dict.get('return')\n name = agg_dict.get(\"name\")\n resource = None if name is None else mock.Mock(id=str(uuid.uuid4()))\n metric = mock.Mock(id=str(uuid.uuid4()), name=name)\n agg_dict['return'] = (\n processor.MetricReference(metric, \"mean\", resource),\n carbonara.AggregatedTimeSerie.from_grouped_serie(\n grouped,\n carbonara.Aggregation(agg_dict['agg'],\n agg_dict['sampling'],\n None)))\n if existing:\n existing[2].merge(agg_dict['return'][2])\n agg_dict['return'] = existing", "def __value_from_metric(self, metric):\n if not metric:\n return self._EMPTY\n return self.__value_from_strings(str(metric.id), metric.metadata.as_json())", "def update_metric_dict_overall(self, metric_dict, update_dict, phase):\n for key in update_dict.keys():\n metric_dict[phase][key].append(update_dict[key])\n return metric_dict", "def combine_dict(self, dict2):\n # iterate through smaller data set\n # base_set will be the larger set and is used for updating\n if len(self.content[\"values\"]) > len(dict2[\"values\"]):\n large_set = self.content[\"values\"]\n small_set = dict2[\"values\"]\n base_set = self.content\n else:\n small_set = self.content[\"values\"]\n large_set = dict2[\"values\"]\n base_set = dict2\n\n subset = {}\n for key in small_set.keys():\n # determine wether to compare keys\n if key in large_set:\n updated_l = large_set[key][\"updated_at\"]\n updated_s = small_set[key][\"updated_at\"]\n if updated_l == 'NULL':\n if updated_s != 'NULL':\n # update to not NULL set\n # if both updated_at are NULL, things\n # are ambiguos. We could defer to created_at\n # but for simplicity we will default to\n # the values in the larger set\n subset[key] = small_set[key]\n else:\n if updated_s == 'NULL':\n # update to not NULL set\n subset[key] = large_set[key]\n else:\n if updated_l > updated_s:\n subset[key] = large_set[key]\n else:\n subset[key] =small_set[key]\n else:\n subset[key] = small_set[key]\n base_set[\"values\"].update(subset)\n new_obj = BackupData()\n new_obj.load_from_dict(base_set)\n return new_obj", "def merge(self, dataset):\n def merge_data(source, dest):\n for key, value in source.items():\n if isinstance(value, dict):\n merge_data(value, dest.setdefault(key, {}))\n else:\n dest[key] = value\n return dest\n\n merge_data(dataset.data, self._data)\n\n for h in dataset.task_history:\n if h not in self._task_history:\n self._task_history.append(h)", "def concatenate_data():", "def test_update_derived_metric(self):\n pass", "def _parse_query(self, inv_obj, query_results, monitored_metrics):\n datapoints = []\n timestamp = int(time.time()) * 1000\n try:\n result = query_results[0]\n for metric in result.value:\n key = metric.id.counterId\n metric_name = monitored_metrics[key].name\n metric_type = monitored_metrics[key].metric_type\n dimensions = self._get_dimensions(inv_obj, metric)\n value = metric.value[0]\n if monitored_metrics[key].units == 'percent':\n value /= 100.0\n dp = self.Datapoint(metric_name, metric_type, value, dimensions, timestamp)\n datapoints.append(dp)\n except Exception as e:\n self._logger.error(\"Error while parsing query results: {0} : {1}\".format(query_results, e))\n\n return datapoints", "def record_apdex_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for metric in metrics:\n self.record_apdex_metric(metric)", "def compute_metrics(self):\n self.finalize_output_dict()\n self.metric_dict = {\n key: value(self.output_dict[\"labels\"], self.output_dict[\"pred_probs\"])\n for key, value in self.metric_fns.items()\n }", "def get_scan_data(self):\n self.metric_name = self.scan_file_dict['results'][0]['metric']\n data = {}\n data['metric_vals'] = []\n for result in self.scan_file_dict['results']:\n data['metric_vals'].append(result['metric_val'])\n for param_key in result['params'].keys():\n if not result['params'][param_key]['is_fixed']:\n if param_key not in data.keys():\n data[param_key] = {}\n data[param_key]['vals'] = []\n data[param_key]['units'] = \\\n result['params'][param_key]['prior']['units']\n data[param_key]['prior'] = \\\n result['params'][param_key]['prior']\n data[param_key]['vals'].append(\n result['params'][param_key]['value'][0]\n )\n\n if self.best_fit_dict is not None:\n best_fit_data = {}\n best_fit_data['metric_val'] = self.best_fit_dict['metric_val']\n for param_key in self.best_fit_dict['params'].keys():\n if not self.best_fit_dict['params'][param_key]['is_fixed']:\n best_fit_data[param_key] = {}\n best_fit_data[param_key]['val'] = \\\n self.best_fit_dict['params'][param_key]['value'][0]\n best_fit_data[param_key]['units'] = \\\n self.best_fit_dict['params'][param_key]['value'][1]\n # Make a list of shifted metrics based on this best fit point\n data['shifted_metric_vals'] = []\n for val in data['metric_vals']:\n data['shifted_metric_vals'].append(\n val-best_fit_data['metric_val']\n )\n else:\n best_fit_data = None\n\n if self.projection_dicts is not None:\n self.proj_bin_names = []\n self.proj_bin_edges = []\n self.proj_bin_cens = []\n self.proj_bin_units = []\n self.projection_data = []\n for projection_dict in self.projection_dicts:\n projection_data = {}\n proj_bin_cens, proj_bin_edges, \\\n proj_bin_names, proj_bin_units = \\\n self.get_scan_steps(scandict=projection_dict)\n if len(proj_bin_names) != 1:\n raise ValueError(\n \"Projection files should be 1D scans. \"\n \"Got %i.\"%len(proj_bin_names)\n )\n if proj_bin_names[0] not in self.all_bin_names:\n raise ValueError(\n \"Projection file was over %s which is \"\n \"not in the 2D scan over %s.\"%(\n proj_bin_names[0], self.all_bin_names)\n )\n else:\n self.proj_bin_names.append(proj_bin_names[0])\n self.proj_bin_edges.append(proj_bin_edges[0])\n self.proj_bin_cens.append(proj_bin_cens[0])\n self.proj_bin_units.append(proj_bin_units[0])\n projection_data['metric_vals'] = []\n for result in projection_dict['results']:\n projection_data['metric_vals'].append(result['metric_val'])\n for param_key in result['params'].keys():\n if not result['params'][param_key]['is_fixed']:\n if param_key not in projection_data.keys():\n projection_data[param_key] = {}\n projection_data[param_key]['vals'] = []\n projection_data[param_key]['units'] = \\\n result['params'][\n param_key]['prior']['units']\n projection_data[param_key]['prior'] = \\\n result['params'][param_key]['prior']\n projection_data[param_key]['vals'].append(\n result['params'][param_key]['value'][0]\n )\n if best_fit_data is not None:\n projection_data['shifted_metric_vals'] = []\n for val in projection_data['metric_vals']:\n projection_data['shifted_metric_vals'].append(\n val-best_fit_data['metric_val']\n )\n self.projection_data.append(projection_data)\n else:\n self.projection_data = None\n\n if self.contour_dicts is not None:\n for contour_dict in self.contour_dicts:\n if not sorted(self.all_bin_names) == \\\n sorted(contour_dict['vars']):\n special_vars = sorted(['sin2theta23', 'deltam32'])\n special_bins = sorted(['theta23', 'deltam31'])\n good_contour = \\\n (sorted(self.all_bin_names) == special_bins) and \\\n (sorted(contour_dict['vars']) == special_vars)\n else:\n good_contour = True\n if not good_contour:\n raise ValueError(\n \"Contour variables - %s - do not match \"\n \"the scan variables - %s.\"%(\n contour_dict['vars'], self.all_bin_names\n )\n )\n\n self.data = data\n self.best_fit_data = best_fit_data", "def collect_stat(self):\n\n cnstat_dict, ratestat_dict = self.get_cnstat()\n self.cnstat_dict.update(cnstat_dict)\n self.ratestat_dict.update(ratestat_dict)", "def merge_user_information(self, sid):\n pprint(self.extracted_information)\n for (field, value) in self.extracted_information.items():\n value = value[0] # TODO: should set data for everything in list but will do later\n self.data.set_data(sid, field, value[0])", "def get_metrics(self) -> dict:\n return self.metric_dict", "def metrics(self):\n raise NotImplementedError(\"metrics\")", "def _create_metric_sum(a,b):\n metric_sum = GridSearchRegressionMetrics()\n metric_sum.explained_variance = a.explained_variance + b.explained_variance\n metric_sum.mean_absolute_error = a.mean_absolute_error + b.mean_absolute_error\n metric_sum.mean_squared_error = a.mean_squared_error + b.mean_squared_error\n metric_sum.r2 = a.r2 + b.r2\n metric_sum.root_mean_squared_error = a.root_mean_squared_error + b.root_mean_squared_error\n return metric_sum", "def calculate_metrics(metrics_data: List[Tuple[Metric, DataType]]) -> List[float]:\n pass", "def add_metric(self, metric):\n self.metrics.append(metric)\n self.estimate()", "def collect(self) -> Metric:\n ret = self.source()\n if ret is None:\n LOGGER.warning('Statistics are not available')\n return\n gauge = GaugeMetricFamily('wemo_device_state', 'Status of Wemo device', labels=['address', 'parameter'])\n gauge.add_metric([ret.address, 'today_kwh'], ret.today_kwh, timestamp=ret.collection_time.timestamp())\n gauge.add_metric([ret.address, 'current_power_mW'], ret.current_power,\n timestamp=ret.collection_time.timestamp())\n gauge.add_metric([ret.address, 'today_on_time'], ret.today_on_time, timestamp=ret.collection_time.timestamp())\n gauge.add_metric([ret.address, 'on_for'], ret.on_for, timestamp=ret.collection_time.timestamp())\n gauge.add_metric([ret.address, 'today_standby_time'], ret.today_standby_time,\n timestamp=ret.collection_time.timestamp())\n\n yield gauge\n\n counter = CounterMetricFamily('wemo_power_usage', 'Today power consumption', labels=['address'])\n counter.add_metric([ret.address], ret.today_kwh, timestamp=ret.collection_time.timestamp())\n yield counter", "def _average_training_metrics(\n self, per_batch_metrics: List[Dict[str, Any]]\n ) -> List[Dict[str, Any]]:\n check.true(self.hvd_config.use, \"Can only average training metrics in multi-GPU training.\")\n metrics_timeseries = util._list_to_dict(per_batch_metrics)\n\n # combined_timeseries is: dict[metric_name] -> 2d-array.\n # A measurement is accessed via combined_timeseries[metric_name][process_idx][batch_idx].\n combined_timeseries, _ = self._combine_metrics_across_processes(\n metrics_timeseries, num_batches=len(per_batch_metrics)\n )\n\n # If the value for a metric is a single-element array, the averaging process will\n # change that into just the element. We record what metrics are single-element arrays\n # so we can wrap them in an array later (for perfect compatibility with non-averaging\n # codepath).\n array_metrics = []\n for metric_name in per_batch_metrics[0].keys():\n if isinstance(per_batch_metrics[0][metric_name], np.ndarray):\n array_metrics.append(metric_name)\n\n if self.is_chief:\n combined_timeseries_type = Dict[str, List[List[Any]]]\n combined_timeseries = cast(combined_timeseries_type, combined_timeseries)\n num_batches = len(per_batch_metrics)\n num_processes = hvd.size()\n averaged_metrics_timeseries = {} # type: Dict[str, List]\n\n for metric_name in combined_timeseries.keys():\n averaged_metrics_timeseries[metric_name] = []\n for batch_idx in range(num_batches):\n batch = [\n combined_timeseries[metric_name][process_idx][batch_idx]\n for process_idx in range(num_processes)\n ]\n\n np_batch = np.array(batch)\n batch_avg = np.mean(np_batch[np_batch != None]) # noqa: E711\n if metric_name in array_metrics:\n batch_avg = np.array(batch_avg)\n averaged_metrics_timeseries[metric_name].append(batch_avg)\n per_batch_metrics = util._dict_to_list(averaged_metrics_timeseries)\n return per_batch_metrics", "def compute_metrics(self):\n pass", "def merge_stats(self, other):\n\n self[1] += other[1]\n self[2] += other[2]\n self[3] = self[0] and min(self[3], other[3]) or other[3]\n self[4] = max(self[4], other[4])\n self[5] += other[5]\n\n # Must update the call count last as update of the\n # minimum call time is dependent on initial value.\n\n self[0] += other[0]" ]
[ "0.59747314", "0.59122944", "0.54553163", "0.53186303", "0.52896434", "0.52513325", "0.52353007", "0.5187257", "0.5186104", "0.51808596", "0.5174486", "0.5171695", "0.5140604", "0.51390433", "0.5108755", "0.5096023", "0.50914425", "0.50677156", "0.5052862", "0.50481683", "0.503186", "0.5021795", "0.50000453", "0.49747816", "0.495587", "0.4948683", "0.4938958", "0.49351326", "0.4929363", "0.49192527", "0.49183232", "0.4876292", "0.4865573", "0.48604962", "0.4843592", "0.48349634", "0.483348", "0.48315036", "0.48257452", "0.48198393", "0.48115933", "0.47810468", "0.47682956", "0.4766804", "0.47656247", "0.47567266", "0.47551787", "0.47504655", "0.4736554", "0.4736554", "0.47266537", "0.4725826", "0.47242", "0.472187", "0.47215882", "0.4715355", "0.4706922", "0.4706351", "0.46990454", "0.4692899", "0.46733603", "0.465965", "0.46471593", "0.4646864", "0.46464005", "0.4642953", "0.46426138", "0.46404624", "0.46305493", "0.46282506", "0.46204165", "0.46083638", "0.46014494", "0.46013054", "0.45971462", "0.45941687", "0.45870626", "0.4585472", "0.45795316", "0.45771873", "0.45722684", "0.45697534", "0.45677558", "0.45610508", "0.45560414", "0.45528987", "0.4549929", "0.45493788", "0.4544118", "0.4543771", "0.45377335", "0.45343736", "0.4531007", "0.4517487", "0.4509919", "0.4507441", "0.45039445", "0.44988737", "0.4493161", "0.44925797" ]
0.69163495
0
Merge data from another instance of this object.
def merge_stats(self, other): self[1] += other[1] self[2] += other[2] self[3] = self[0] and min(self[3], other[3]) or other[3] self[4] = max(self[4], other[4]) self[5] += other[5] # Must update the call count last as update of the # minimum call time is dependent on initial value. self[0] += other[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge(self, other):\n from .dataset import Dataset\n\n if other is None:\n return self.to_dataset()\n else:\n other_vars = getattr(other, 'variables', other)\n coords = merge_coords_without_align([self.variables, other_vars])\n return Dataset._from_vars_and_coord_names(coords, set(coords))", "def merge(self, other):\n for p in other:\n for key, val in p.items():\n self.contents[key] = val\n\n return self", "def _merge(self, other: dict):\n self._storage = dict_merge(self._storage, other)", "def update(self, other):\n _merge_dicts(self, other)", "def merge(self, obj):\n pass", "def copy_from_other(self, other):\n self.data = other.data\n self.url = other.url\n self.container_factory = other.container_factory", "def merge_from(self, other):\n assert not self.is_final\n if self.parent is not None:\n assert other.parent is not None\n self.parent.merge_from(other.parent)\n self.isolated_names.update(other.isolated_names)\n self.read.update(other.read)\n self.modified.update(other.modified)\n self.bound.update(other.bound)\n self.deleted.update(other.deleted)\n self.annotations.update(other.annotations)\n self.params.update(other.params)", "def _merge(self):\n raise NotImplementedError", "def merge(self, other):\n log.debug('Merging: %s and %s' % (self.serialize(), other.serialize()))\n for k in self.keys():\n for new_item in other[k]:\n if new_item not in self[k]:\n self[k].append(new_item)\n log.debug('Result: %s' % self.serialize())\n return self", "def merge(self, other):\n if other.n_points != self.n_points:\n raise ValueError(\n 'Deduplicator size mismatch: '\n f'{self.n_points} != {other.n_points}'\n )\n self.data_reduced.extend(other.data_reduced)\n self.data_kd.extend(other.data_kd)", "def merged_rep(self,other):\n raise NotImplementedError(\"Abstract method\")", "def merge(self, other):\r\n self._train_datas = np.concatenate(\r\n [self._train_datas, other._train_datas], 0)\r\n self._train_labels = np.concatenate(\r\n [self._train_labels, other._train_labels], 0)", "def combine(self, other):\n # Copy and merge\n ppt = PPT()\n ppt.contents = dict(self.contents)\n ppt.merge(other)\n return ppt", "def __finalize__(self, other, method=None, **kwargs):\n self = super().__finalize__(other, method=method, **kwargs)\n # merge operation: using metadata of the left object\n if method == \"merge\":\n for name in self._metadata:\n print(\"self\", name, self.au_columns, other.left.au_columns)\n object.__setattr__(self, name, getattr(other.left, name, None))\n # concat operation: using metadata of the first object\n elif method == \"concat\":\n for name in self._metadata:\n object.__setattr__(self, name, getattr(other.objs[0], name, None))\n return self", "def mergeWith(self, other):\n assert not other.synthesised\n self.globals.update(other.globals)\n self.signals.update(other.signals)\n self.startsOfDataPaths.update(other.startsOfDataPaths)\n self.subUnits.update(other.subUnits)\n \n for s in other.signals:\n s.ctx = self", "def copyDataFrom (self, other):\n\n self.localTimeString=other.localTimeString\n self._myHasLocalTimeString=other._myHasLocalTimeString\n \n self.utcTimeString=other.utcTimeString\n self._myHasUtcTimeString=other._myHasUtcTimeString\n \n self.daylightSavingTime=other.daylightSavingTime\n self._myHasDaylightSavingTime=other._myHasDaylightSavingTime\n \n self.epoch=other.epoch\n self._myHasEpoch=other._myHasEpoch\n \n self.utcOffsetMinutes=other.utcOffsetMinutes\n self._myHasUtcOffsetMinutes=other._myHasUtcOffsetMinutes", "def merge(self, other):\n\n assert self.ins_addr == other.ins_addr\n assert self.type == other.type\n\n o = self.copy()\n o.targets |= other.targets\n\n return o", "def merge(self, other):\n self.isotxsMetadata = self.isotxsMetadata.merge(\n other.isotxsMetadata, self, other, \"ISOTXS\", AttributeError\n )\n self.gamisoMetadata = self.gamisoMetadata.merge(\n other.gamisoMetadata, self, other, \"GAMISO\", AttributeError\n )\n self.pmatrxMetadata = self.pmatrxMetadata.merge(\n other.pmatrxMetadata, self, other, \"PMATRX\", AttributeError\n )\n self.micros.merge(other.micros)\n self.gammaXS.merge(other.gammaXS)\n self.neutronHeating = _mergeAttributes(self, other, \"neutronHeating\")\n self.neutronDamage = _mergeAttributes(self, other, \"neutronDamage\")\n self.gammaHeating = _mergeAttributes(self, other, \"gammaHeating\")\n self.isotropicProduction = _mergeAttributes(self, other, \"isotropicProduction\")\n self.linearAnisotropicProduction = _mergeAttributes(\n self, other, \"linearAnisotropicProduction\"\n )\n # this is lazy, but should work, because the n-order wouldn't be set without the others being set first.\n self.nOrderProductionMatrix = (\n self.nOrderProductionMatrix or other.nOrderProductionMatrix\n )", "def __add__(self, other):\r\n # Make a defaultdict of defaultdicts, the latter of which returns\r\n # None when an key is not present\r\n merged_data = defaultdict(lambda: defaultdict(lambda: None))\r\n\r\n # We will keep track of all unique sample_ids and metadata headers\r\n # we have seen as we go\r\n all_sample_ids = set()\r\n all_headers = set()\r\n\r\n # add all values from self into the merged_data structure\r\n for sample_id, data in self._metadata.iteritems():\r\n all_sample_ids.add(sample_id)\r\n for header, value in data.iteritems():\r\n all_headers.add(header)\r\n merged_data[sample_id][header] = value\r\n\r\n # then add all data from other\r\n for sample_id, data in other._metadata.iteritems():\r\n all_sample_ids.add(sample_id)\r\n for header, value in data.iteritems():\r\n all_headers.add(header)\r\n # if the two mapping files have identical sample_ids and\r\n # metadata columns but have DIFFERENT values, raise a value\r\n # error\r\n if merged_data[sample_id][header] is not None and \\\r\n merged_data[sample_id][header] != value:\r\n raise ValueError(\"Different values provided for %s for \"\r\n \"sample %s in different mapping files.\"\r\n % (header, sample_id))\r\n else:\r\n merged_data[sample_id][header] = value\r\n\r\n # Now, convert what we have seen into a normal dict\r\n normal_dict = {}\r\n for sample_id in all_sample_ids:\r\n if sample_id not in normal_dict:\r\n normal_dict[sample_id] = {}\r\n\r\n for header in all_headers:\r\n normal_dict[sample_id][header] = \\\r\n merged_data[sample_id][header]\r\n\r\n # and create a MetadataMap object from it; concatenate comments\r\n return self.__class__(normal_dict, self.Comments + other.Comments)", "def copyDataFrom (self, other):\n\n self.outErrorPackets=other.outErrorPackets\n self._myHasOutErrorPackets=other._myHasOutErrorPackets\n \n self.inErrorPackets=other.inErrorPackets\n self._myHasInErrorPackets=other._myHasInErrorPackets\n \n self.inDiscardPackets=other.inDiscardPackets\n self._myHasInDiscardPackets=other._myHasInDiscardPackets\n \n self.outUnicastPackets=other.outUnicastPackets\n self._myHasOutUnicastPackets=other._myHasOutUnicastPackets\n \n self.inMulticastPackets=other.inMulticastPackets\n self._myHasInMulticastPackets=other._myHasInMulticastPackets\n \n self.outBroadcastPackets=other.outBroadcastPackets\n self._myHasOutBroadcastPackets=other._myHasOutBroadcastPackets\n \n self.inBroadcastPackets=other.inBroadcastPackets\n self._myHasInBroadcastPackets=other._myHasInBroadcastPackets\n \n self.outMulticastPackets=other.outMulticastPackets\n self._myHasOutMulticastPackets=other._myHasOutMulticastPackets\n \n self.inUnknownProtocolPackets=other.inUnknownProtocolPackets\n self._myHasInUnknownProtocolPackets=other._myHasInUnknownProtocolPackets\n \n self.outDiscardPackets=other.outDiscardPackets\n self._myHasOutDiscardPackets=other._myHasOutDiscardPackets\n \n self.inUnicastPackets=other.inUnicastPackets\n self._myHasInUnicastPackets=other._myHasInUnicastPackets\n \n self.outOctets=other.outOctets\n self._myHasOutOctets=other._myHasOutOctets\n \n self.inOctets=other.inOctets\n self._myHasInOctets=other._myHasInOctets", "def PassData(self, other):\n for this,that in zip(self.DataSet, other.DataSet):\n for assoc in [ArrayAssociation.POINT, ArrayAssociation.CELL, ArrayAssociation.ROW]:\n if this.HasAttributes(assoc) and that.HasAttributes(assoc):\n this.GetAttributes(assoc).PassData(that.GetAttributes(assoc))", "def merge(self, other):\n\n for child in other.children:\n self.add_deep_copy_of(child, merged=True)", "def add(self, other):\n if not isinstance(other, self.__class__):\n raise ValueError(\n f\"Argument (type {type(other)}) is not a {self.__class__} instance\"\n )\n if len(other.data):\n self.data = pd.concat([self.data, other.data], ignore_index=True)\n self.sort()", "def merge(self, other):\n self._moments = merge_pqc([self, other])._moments\n self._parameters = sp.symarray(self.parameter_symbol, len(self.symbols))\n if self.flatten_circuit:\n self.flatten()", "def merge(self, other: ProjectMeta) -> ProjectMeta:\n return self.clone(\n obj_classes=self._obj_classes.merge(other.obj_classes),\n tag_metas=self._tag_metas.merge(other._tag_metas),\n )", "def concat(self: TAvalancheDataset, other: TAvalancheDataset) -> TAvalancheDataset:\n return self.__class__([self, other])", "def _merge_raw(self, other):\n if other is None:\n variables = OrderedDict(self.variables)\n else:\n # don't align because we already called xarray.align\n variables = merge_coords_without_align(\n [self.variables, other.variables])\n return variables", "def merge(self, graph):\n # keep previous self.filename\n # copy data\n for x in graph.data:\n self.data.append(x)\n # copy headers, unless already exists (is so, info is lost)\n for key in graph.headers:\n if key not in self.headers:\n self.headers.update({key: graph.headers[key]})\n # copy graphInfo, unless already exists (is so, info is lost)\n for key in graph.graphInfo:\n if key not in self.graphInfo:\n self.graphInfo.update({key: graph.graphInfo[key]})\n # copy sampleInfo, unless already exists (is so, info is lost)\n for key in graph.sampleInfo:\n if key not in self.sampleInfo:\n self.sampleInfo.update({key: graph.sampleInfo[key]})", "def mergeWith(self, others):", "def extend(self, other_rollout):\n\n assert not self.is_terminal()\n assert all(k in other_rollout.fields for k in self.fields)\n for k, v in other_rollout.data.items():\n self.data[k].extend(v)\n self.last_r = other_rollout.last_r", "def __finalize__(self, other, method=None, **kwargs):\r\n # merge operation: using metadata of the left object\r\n if method == 'merge':\r\n for name in self._metadata:\r\n object.__setattr__(self, name, getattr(other.left, name, None))\r\n # concat operation: using metadata of the first object\r\n elif method == 'concat':\r\n for name in self._metadata:\r\n object.__setattr__(self, name, getattr(other.objs[0], name, None))\r\n else:\r\n for name in self._metadata:\r\n object.__setattr__(self, name, getattr(other, name, None))\r\n return self", "def merge_content(self, other):\n self.__content += other.__content", "def union(self, other):\n self.vertices.extend(other.vertices)\n self.edges.extend(other.edges)\n self.faces.extend(other.faces)\n return self", "def merge_other(self, other):\n assert(not other.isSet())\n with self.__cond:\n if self.__isset:\n other.set(self.__data)\n else:\n self.__merged.append(other)", "def get_merged_data(self):\n return self._combinedata", "def add_other_meta_data(self, other: _MetaData) -> None:\n\n for key in other._meta_data_dict.keys():\n self.add_data(key, other._meta_data_dict[key])", "def combine(self, other) -> None:\n assert self.id_ == other.id_\n assert self.type_ == other.type_\n self.count += other.count", "def merge(self, other: PerfData):\n self.total_samples += other.total_samples\n if self.total_time == 0.0:\n self.total_time = other.total_time\n self.compile_time = max(self.compile_time, other.compile_time)\n self.programming_time = max(\n self.programming_time, other.programming_time\n )\n if self.est_samples_per_sec == 0.0:\n self.est_samples_per_sec = other.est_samples_per_sec\n else:\n assert (\n self.est_samples_per_sec == other.est_samples_per_sec\n ), \"Expected all fabric-based performance estimates to be identical\"\n\n if self.total_time > 0:\n self.samples_per_sec = float(self.total_samples) / self.total_time\n else:\n self.samples_per_sec = 0.0", "def merge(self, other):\n merged = copy.deepcopy(self.__dict__())\n for k, v in other.__dict__():\n if k in merged and getattr(self, k):\n if isinstance(v, (string_types, bool)):\n pass\n else:\n list_of_stuff = merged.get(k, [])\n for entry in v:\n if entry not in list_of_stuff:\n list_of_stuff.append(entry)\n merged[k] = list_of_stuff\n else:\n merged[k] = v\n return CondaEnvironmentProvider(**merged)", "def __init__(self, v1, v2):\n mergedData = []\n list(map(mergedData.extend, list(zip_longest(v1, v2))))\n self.data = list(filter(lambda x: x is not None, mergedData))\n self.index = 0", "def combine_data(self, object, additional_data):\n object[\"ancestors\"] = additional_data[\"ancestors\"] if self.cartographer_client else []\n object[\"position\"] = additional_data.get(\"order\", 0) if additional_data else 0\n object = super(ResourceMerger, self).combine_data(object, additional_data)\n return combine_references(object)", "def merge(self, other):\n extras = other.difference(self)\n if len(extras) > 0:\n self.update(extras)\n self.reset()\n return True\n return False", "def merge(self, other):\n # todo: Using the return value None to denote the identity is a\n # bit dangerous, since a function with no explicit return statement\n # also returns None, which can lead to puzzling bugs. Maybe return\n # a special singleton Identity object instead?\n raise NotImplementedError", "def __add__(self, other):\n self.__dict__.update(other)\n return self", "def merge(self, other):\n self._segments.extend(other._segments)\n self._segments.sort()", "def merge_struct_arrays(self, data1, data2):\n data_final = np.concatenate((data1, data2))\n return data_final", "def copy(self):\n return self.update({})", "def merge(self, other_config):\n # Make a copy of the current attributes in the config object.\n config_options = copy.copy(self._user_provided_options)\n\n # Merge in the user provided options from the other config\n config_options.update(other_config._user_provided_options)\n\n # Return a new config object with the merged properties.\n return Config(**config_options)", "def __add__(self, other):\n merged_profile = super().__add__(other)\n\n # unstruct specific property merging\n merged_profile._empty_line_count = (\n self._empty_line_count + other._empty_line_count)\n merged_profile.memory_size = self.memory_size + other.memory_size\n samples = list(dict.fromkeys(self.sample + other.sample))\n merged_profile.sample = random.sample(list(samples),\n min(len(samples), 5))\n\n # merge profiles\n merged_profile._profile = self._profile + other._profile\n\n return merged_profile", "def update_inplace_from(self, other):\n self.__dict__ = other.__dict__.copy()", "def merge(self, other):\n self._mergeKeys(other)\n self._binaryOperationCheck(other)\n for id in self.clock.keys():\n print id\n self.clock[id] = max(self.clock[id], other.clock[id])", "def combine(self, existing):\n return self", "def _extend(self, other):\n for key, value in list(other.entries.items()):\n self._add_entry(key, value)", "def join(self, other, on):\n\t\t# check for correct join\n\t\tif not (on in self.headers or on in other.headers):\n\t\t\tprint \"Error: header '{0}' not found in both collections\".format(on)\n\t\t\treturn None\n\n\t\t# create new dataset\n\t\tjoined = Dataset()\n\t\t\n\t\t# fill new dataset with combined data\n\t\tmappedHeaders = joinHeaders(self, other, joined, on)\n\t\tmergeRows(self, other, joined, on, mappedHeaders)\n\t\tjoined.ensureFilled()\n\n\t\t# return newly created dataset\n\t\treturn joined", "def merge_two_dicts(self, x, y):\n z = x.copy()\n z.update(y)\n return z", "def merge_contextual(self, other):\n # TODO: This is currently dependent on our data model? Make more robust to schema changes\n # Currently we assume all lists at Compound level, with 1 further potential nested level of lists\n for k in self.keys():\n # print('key: %s' % k)\n for item in self[k]:\n # print('item: %s' % item)\n for other_item in other.get(k, []):\n # Skip text properties (don't merge names, labels, roles)\n if isinstance(other_item, six.text_type):\n continue\n for otherk in other_item.keys():\n if isinstance(other_item[otherk], list):\n if len(other_item[otherk]) > 0 and len(item[otherk]) > 0:\n other_nested_item = other_item[otherk][0]\n for othernestedk in other_nested_item.keys():\n for nested_item in item[otherk]:\n if not nested_item[othernestedk]:\n nested_item[othernestedk] = other_nested_item[othernestedk]\n elif not item[otherk]:\n item[otherk] = other_item[otherk]\n log.debug('Result: %s' % self.serialize())\n return self", "def __add__(self, other):\n mesh = deepcopy(self)\n mesh.MergeWith(other)\n return mesh", "def copy_(self, other):\n self.share.copy_(other.share)\n self.encoder = other.encoder", "def combine_dict(self, dict2):\n # iterate through smaller data set\n # base_set will be the larger set and is used for updating\n if len(self.content[\"values\"]) > len(dict2[\"values\"]):\n large_set = self.content[\"values\"]\n small_set = dict2[\"values\"]\n base_set = self.content\n else:\n small_set = self.content[\"values\"]\n large_set = dict2[\"values\"]\n base_set = dict2\n\n subset = {}\n for key in small_set.keys():\n # determine wether to compare keys\n if key in large_set:\n updated_l = large_set[key][\"updated_at\"]\n updated_s = small_set[key][\"updated_at\"]\n if updated_l == 'NULL':\n if updated_s != 'NULL':\n # update to not NULL set\n # if both updated_at are NULL, things\n # are ambiguos. We could defer to created_at\n # but for simplicity we will default to\n # the values in the larger set\n subset[key] = small_set[key]\n else:\n if updated_s == 'NULL':\n # update to not NULL set\n subset[key] = large_set[key]\n else:\n if updated_l > updated_s:\n subset[key] = large_set[key]\n else:\n subset[key] =small_set[key]\n else:\n subset[key] = small_set[key]\n base_set[\"values\"].update(subset)\n new_obj = BackupData()\n new_obj.load_from_dict(base_set)\n return new_obj", "def hallucinate_merge(self, other):\n res = CompleteVec(None,None,self.max_num_samples)\n res.needs_update = True\n return res", "def extend(self, other):\n if len(self.vertices[0]) != len(other.vertices[0]):\n raise ValueError(\"Rank mismatch ({0} != \"\n \"{1})\".format(self.vertices.shape[1],\n other.vertices.shape[1]))\n if self._geotype != other._geotype:\n raise TypeError(\"Geometry mismatch ({0} != \"\n \"{1})\".format(self._geotype, other._geotype))\n\n self.vertices = np.vstack([self.vertices, other.vertices])\n self._cache = {}\n return self", "def merge(self, *other):\n # Compute union of Fingerprints\n union = set().union(self, *other)\n # Create new fingerprint from union\n result = super(Fingerprint, type(self)).__new__(type(self), union)\n # Set n_flows to combination of self and other\n result.__setattr__('n_flows', self.n_flows + sum(o.n_flows for o in other))\n # Return result\n return result", "def merge_datasets(self, other):\r\n if isinstance(other, SpatialDataFrame) and \\\r\n other.geometry_type == self.geometry_type:\r\n return pd.concat(objs=[self, other], axis=0)\r\n elif isinstance(other, DataFrame):\r\n return pd.concat(objs=[self, other], axis=0)\r\n elif isinstance(other, Series):\r\n self['merged_datasets'] = other\r\n elif isinstance(other, SpatialDataFrame) and \\\r\n other.geometry_type != self.geometry_type:\r\n raise ValueError(\"Spatial DataFrames must have the same geometry type.\")\r\n else:\r\n raise ValueError(\"Merge datasets cannot merge types %s\" % type(other))", "def __add__ ( self, other, resample_opts=None ):\n result = ObservationStorage (datadir=self.datadir, \\\n resample_opts=resample_opts )\n if self.date[0] > other.date[0]:\n start_date = other.date[0]\n else:\n start_date = self.date[0]\n if self.date[-1] > other.date[-1]:\n end_date = other.date[-1]\n else:\n end_date = self.date[-1]\n \n delta = datetime.timedelta ( days=1 )\n this_date = start_date.date()\n end_date = end_date.date() + delta\n \n this_obs_dates = [ x.date() for x in self.date ]\n other_obs_dates = [ x.date() for x in other.date ]\n \n date = [] ; vza = [] ; vaa = [] ; sza = [] ; saa = []\n emulator = [] ; mask = [] ; data_pntr = [] ; spectral = []\n sensor = []\n \n while this_date < end_date:\n if this_date in this_obs_dates:\n iloc = this_obs_dates.index ( this_date )\n date.append ( self.date[iloc] )\n emulator.append ( self.emulator[iloc] )\n vza.append ( self.vza[iloc] )\n sza.append ( self.sza[iloc] )\n vaa.append ( self.vaa[iloc] )\n saa.append ( self.saa[iloc] )\n spectral.append ( self.spectral )\n mask.append ( ( self.get_mask, [iloc] ) )\n sensor.append ( self.sensor )\n \n data_pntr.append ( self._data_pntr[iloc] )\n if this_date in other_obs_dates:\n iloc = other_obs_dates.index ( this_date )\n date.append ( other.date[iloc] )\n emulator.append ( other.emulator[iloc] )\n vza.append ( other.vza[iloc] )\n sza.append ( other.sza[iloc] )\n vaa.append ( other.vaa[iloc] )\n saa.append ( other.saa[iloc] )\n spectral.append ( other.spectral )\n mask.append ( ( other.get_mask, [iloc] ) )\n sensor.append ( other.sensor )\n data_pntr.append ( other._data_pntr[iloc] )\n this_date += delta\n result.vza = vza\n result.vaa = vaa\n result.sza = sza \n result.saa = saa \n result.date = date\n result.spectral = spectral\n result.masks = mask\n result.sensor = sensor\n result.emulator = emulator\n result._data_pntr = data_pntr\n return result", "def Merge(self, other):\n\n # Logging just in case\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: before\", %s);'\n %(self.persistant['id'],\n sql.FormatSqlValue('details',\n repr(self.persistant))))\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: deleted\", %s);'\n %(other.persistant['id'], \n sql.FormatSqlValue('details',\n repr(other.persistant))))\n\n # Fields which can be summed\n for f in ['plays', 'skips']:\n self.persistant[f] = (self.persistant.get(f, 0) +\n other.persistant.get(f, 0))\n\n # Date fields where we take the newest\n for f in ['last_played', 'last_skipped', 'last_action']:\n a = self.persistant.get(f, datetime.datetime(1970, 1, 1))\n b = other.persistant.get(f, datetime.datetime(1970, 1, 1))\n if a > b:\n v = a\n else:\n v = b\n if v != datetime.datetime(1970, 1, 1):\n self.persistant[f] = v\n\n # Date fields where we take the oldest\n for f in ['creation_time']:\n a = self.persistant.get(f, datetime.datetime(1970, 1, 1))\n b = other.persistant.get(f, datetime.datetime(1970, 1, 1))\n if a < b:\n v = a\n else:\n v = b\n if v != datetime.datetime(1970, 1, 1):\n self.persistant[f] = v\n\n # Fields where we only clobber ours if we don't have a value\n for f in ['artist', 'album', 'song']:\n if not self.persistant.has_key(f) or not self.persistant[f]:\n self.persistant[f] = other.persistant.get(f, None)\n\n # Sometimes the number is a placeholder\n if self.persistant.has_key('number') and self.persistant['number'] == -1:\n self.persistant['number'] = other.persistant.get('number', -1)\n if not self.persistant.has_key('number'):\n self.persistant['number'] = other.persistant.get('number', -1)\n\n # Update the id in the tags table\n tags = self.db.GetRows('select tag from tags where track_id=%d;'\n % other.persistant['id'])\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: tags: %d\", %s);'\n %(self.persistant['id'], other.persistant['id'],\n sql.FormatSqlValue('details', repr(tags))))\n\n try:\n self.db.ExecuteSql('update tags set track_id=%d where track_id=%d;'\n %(self.persistant['id'], other.persistant['id']))\n self.db.ExecuteSql('commit;')\n except:\n # This can happen if the is already a matching tag for the first track\n pass\n\n # Update the id in the paths table\n paths = self.db.GetRows('select path from paths where track_id=%d;'\n % other.persistant['id'])\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: paths: %d\", %s);'\n %(self.persistant['id'], other.persistant['id'],\n sql.FormatSqlValue('details', repr(paths))))\n \n self.db.ExecuteSql('update paths set track_id=%d where track_id=%d;'\n %(self.persistant['id'], other.persistant['id']))\n self.db.ExecuteSql('commit;')\n\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: after\", %s);'\n %(self.persistant['id'],\n sql.FormatSqlValue('details',\n repr(self.persistant))))\n self.db.ExecuteSql('commit;')", "def concat(self, other):\n self.add_rules(other.cliques)\n self.prop_names.update(other.prop_names)", "def source(self, other):\n raise NotImplementedError", "def __add__(self, other):\n if not isinstance(other, RunTS):\n raise TypeError(f\"Cannot combine {type(other)} with RunTS.\")\n\n # combine into a data set use override to keep attrs from original\n combined_ds = xr.combine_by_coords(\n [self.dataset, other.dataset], combine_attrs=\"override\"\n )\n\n n_samples = (\n self.sample_rate\n * float(\n combined_ds.time.max().values - combined_ds.time.min().values\n )\n / 1e9\n ) + 1\n\n new_dt_index = make_dt_coordinates(\n combined_ds.time.min().values,\n self.sample_rate,\n n_samples,\n self.logger,\n )\n\n new_run = RunTS(\n run_metadata=self.run_metadata,\n station_metadata=self.station_metadata,\n survey_metadata=self.survey_metadata,\n )\n\n new_run.dataset = combined_ds.interp(\n time=new_dt_index, method=\"slinear\"\n )\n\n new_run.run_metadata.update_time_period()\n new_run.station_metadata.update_time_period()\n new_run.survey_metadata.update_time_period()\n new_run.filters = self.filters\n new_run.filters.update(other.filters)\n\n return new_run", "def __add__(self, other):\n if type(other) is not type(self):\n raise TypeError('`{}` and `{}` are not of the same profiler type.'.\n format(type(self).__name__, type(other).__name__))\n\n # error checks specific to its profiler\n self._add_error_checks(other)\n\n merged_profile = self.__class__(\n data=None, samples_per_update=self._samples_per_update,\n min_true_samples=self._min_true_samples, options=self.options\n )\n merged_profile.encoding = self.encoding\n if self.encoding != other.encoding:\n merged_profile.encoding = 'multiple files'\n\n merged_profile.file_type = self.file_type\n if self.file_type != other.file_type:\n merged_profile.file_type = 'multiple files'\n\n merged_profile.total_samples = self.total_samples + other.total_samples\n\n merged_profile.times = utils.add_nested_dictionaries(self.times,\n other.times)\n\n return merged_profile", "def copyAttributes(self, other, add_nxpars=False):\n import copy\n \n self.setTitle(other.getTitle())\n self.setDataSetType(other.getDataSetType())\n self.setAllAxisLabels(other.getAllAxisLabels())\n self.setAllAxisUnits(other.getAllAxisUnits())\n self.setYLabel(other.getYLabel())\n self.setYUnits(other.getYUnits())\n if len(self.attr_list.keys()) == 0:\n self.attr_list = copy.copy(other.attr_list)\n else:\n self.attr_list.instrument = copy.copy(other.attr_list.instrument)\n self.attr_list.sample = copy.copy(other.attr_list.sample)\n\n if add_nxpars:\n nxpar_keys = [item[0] for item in self.attr_list.iteritems() \\\n if isinstance(item[1], NxParameter)]\n\n for nxpar_key in nxpar_keys:\n self.attr_list[nxpar_key] += other.attr_list[nxpar_key]\n else:\n # Do nothing\n pass\n \n keys_to_get = [other_key for other_key in other.attr_list \\\n if other_key not in self.attr_list]\n \n for key_to_get in keys_to_get:\n self.attr_list[key_to_get] = \\\n copy.copy(other.attr_list[key_to_get])", "def updateFromContext(self, other):\n value = self.valueType.set(self.value, other.value)\n self.set(value)\n self.origins.extend(other.origins)", "def combine_data(self, object, additional_data):\n for k, v in additional_data.items():\n if isinstance(v, list):\n object[k] = object.get(k, []) + v\n else:\n object[k] = v\n for instance in object.get(\"instances\", []):\n if instance.get(\"sub_container\", {}).get(\"top_container\", {}).get(\"_resolved\"):\n del instance[\"sub_container\"][\"top_container\"][\"_resolved\"]\n object = super(ArchivalObjectMerger, self).combine_data(object, additional_data)\n return combine_references(object)", "def merge(self, other):\n\n if not self.can_merge(other):\n raise ValueError('These protocols can not be safely merged.')\n\n inputs_to_consider = self._find_inputs_to_merge()\n\n for input_path in inputs_to_consider:\n\n merge_behavior = getattr(type(self), input_path.property_name).merge_behavior\n\n if merge_behavior == MergeBehaviour.ExactlyEqual:\n continue\n\n if (isinstance(self.get_value(input_path), ProtocolPath) or\n isinstance(other.get_value(input_path), ProtocolPath)):\n\n continue\n\n if merge_behavior == InequalityMergeBehaviour.SmallestValue:\n value = min(self.get_value(input_path), other.get_value(input_path))\n elif merge_behavior == InequalityMergeBehaviour.LargestValue:\n value = max(self.get_value(input_path), other.get_value(input_path))\n else:\n raise NotImplementedError()\n\n self.set_value(input_path, value)\n\n return {}", "def merge_user_information(self, sid):\n pprint(self.extracted_information)\n for (field, value) in self.extracted_information.items():\n value = value[0] # TODO: should set data for everything in list but will do later\n self.data.set_data(sid, field, value[0])", "def __add__(self, other, inplace=False, **kwargs):\n output = super(HERAData, self).__add__(other, inplace=inplace, **kwargs)\n if inplace:\n output = self\n output._determine_blt_slicing()\n output._determine_pol_indexing()\n if not inplace:\n return output", "def _copy_data_from(self, original):\n raise NotImplementedError()", "def merge_results(self, other_processor):\n if not isinstance(other_processor, self.__class__):\n raise ValueError(f\"Can only extend with another \"\n f\"{self.__class__.__name__} instance.\")\n\n # Where there is overlap, there _should_ be agreement.\n self._evidence_counts.update(other_processor._evidence_counts)\n self._source_counts.update(other_processor._source_counts)\n self._belief_scores.update(other_processor._belief_scores)\n\n # Merge the statement JSONs.\n for k, sj in other_processor.__statement_jsons.items():\n if k not in self.__statement_jsons:\n self.__statement_jsons[k] = sj # This should be most of them\n else:\n # This should only happen rarely.\n for evj in sj['evidence']:\n self.__statement_jsons[k]['evidence'].append(evj)\n\n # Recompile the statements\n self._compile_results()\n return", "def merge(self, new_store):\n if new_store.name and len(new_store.name) > 0:\n self.name = new_store.name\n if new_store.address and len(new_store.address) > 0:\n self.address = new_store.address\n if new_store.city and len(new_store.city) > 0:\n self.city = new_store.city\n if new_store.state and len(new_store.state) > 0:\n self.state = new_store.state\n if new_store.zip and new_store.zip > 0:\n self.zipcode = new_store.zip\n if new_store.phone and new_store.phone > 0:\n self.phone = new_store.phone", "def __add__(self, other):\n train = copy.deepcopy(self.train)\n\n for img_path, pid, camid, dsetid in other.train:\n pid += self.num_train_pids\n camid += self.num_train_cams\n dsetid += self.num_datasets\n train.append((img_path, pid, camid, dsetid))\n\n ###################################\n # Note that\n # 1. set verbose=False to avoid unnecessary print\n # 2. set combineall=False because combineall would have been applied\n # if it was True for a specific dataset; setting it to True will\n # create new IDs that should have already been included\n ###################################\n if isinstance(train[0][0], str):\n return ImageDataset(\n train,\n self.query,\n self.gallery,\n transform=self.transform,\n mode=self.mode,\n combineall=False,\n verbose=False\n )\n else:\n return VideoDataset(\n train,\n self.query,\n self.gallery,\n transform=self.transform,\n mode=self.mode,\n combineall=False,\n verbose=False,\n seq_len=self.seq_len,\n sample_method=self.sample_method\n )", "def merge(self, other_btree):\n pass", "def update(self, other):\n self._start = other._start\n self._end = other._end\n self._nodes = {k: v.copy() for k,v in other._nodes.iteritems()}\n self._edges = {k: set(v) for k,v in other._edges.iteritems()}\n self._names = set(other._names)\n self.current = other.current", "def merge_with(self, other: \"Availability\") -> \"Availability\":\n\n if not isinstance(other, Availability):\n raise Exception(\"Please provide an Availability object.\")\n if not other.overlaps(self, strict=False):\n raise Exception(\"Only overlapping Availabilities can be merged.\")\n\n return Availability(\n start=min(self.start, other.start),\n end=max(self.end, other.end),\n event=getattr(self, \"event\", None),\n person=getattr(self, \"person\", None),\n room=getattr(self, \"room\", None),\n )", "def union(self, other: Catalog) -> Catalog:\n cat = self.copy()\n oth_cp = other.copy()\n\n for k in oth_cp.keys():\n for ver_id, version in oth_cp[k].versions.items():\n cat[k][ver_id] = version\n return cat", "def sync(self, other):\n pass # TODO", "def __add__(self, other):\n\n if not isinstance(other, Photons):\n raise ValueError('Can only add a Photons object to another Photons object.')\n\n # don't want to modify what is being added\n other = other.copy()\n\n # make column units consistent with self\n other.match_units(self)\n\n # add and /or update observation columns as necessary\n self.add_observations_column()\n other.add_observations_column()\n n_obs_self = len(self.obs_metadata)\n other['n'] += n_obs_self\n\n # re-reference times to the datum of self\n other.set_time_datum(self.time_datum)\n\n # stack the data tables\n photons = _tbl.vstack([self.photons, other.photons])\n\n # leave it to the user to deal with sorting and grouping and dealing with overlap as they see fit :)\n obs_metadata = self.obs_metadata + other.obs_metadata\n obs_times = list(self.obs_times) + list(other.obs_times)\n obs_bandpasses = list(self.obs_bandpasses) + list(other.obs_bandpasses)\n\n return Photons(photons=photons, obs_metadata=obs_metadata, time_datum=self.time_datum, obs_times=obs_times,\n obs_bandpasses=obs_bandpasses)", "def populate(self, fid1, fid2):\n self.input1 = json.load(fid1)\n self.input2 = json.load(fid2)", "def merge(self, other):\n\n if not self.can_merge(other):\n msg = 'Unable to merge \"{}\" with \"{}\" filters'.format(\n self.type, other.type)\n raise ValueError(msg)\n\n # Create deep copy of filter to return as merged filter\n merged_filter = copy.deepcopy(self)\n\n # Merge unique filter bins\n merged_bins = self.bins + other.bins\n\n # Sort energy bin edges\n if 'energy' in self.type:\n merged_bins = sorted(merged_bins)\n\n # Assign merged bins to merged filter\n merged_filter.bins = list(merged_bins)\n return merged_filter", "def merge(self, other: \"GraphSet\") -> None:\n if other.name != self.name:\n raise UnmergableGraphSetsException(\n f\"Unable to merge graph with name {other.name} into {self.name}\"\n )\n if other.version != self.version:\n raise UnmergableGraphSetsException(\n f\"Unable to merge graph with version {other.version} into {self.version}\"\n )\n self.start_time = min(self.start_time, other.start_time)\n self.end_time = max(self.end_time, other.end_time)\n self.resources += other.resources\n self._resolve_duplicates()\n self.errors += other.errors\n self.stats.merge(other.stats)", "def copy_from(self, other):\n assert not self.is_final\n if self.parent is not None:\n assert other.parent is not None\n self.parent.copy_from(other.parent)\n self.isolated_names = copy.copy(other.isolated_names)\n self.modified = copy.copy(other.modified)\n self.read = copy.copy(other.read)\n self.deleted = copy.copy(other.deleted)\n self.bound = copy.copy(other.bound)\n self.annotations = copy.copy(other.annotations)\n self.params = copy.copy(other.params)", "def merge(self, object_class):\n other_oc = self.schema.get_object_class(object_class)\n self.required_attrs |= other_oc.required_attrs\n self.allowed_attrs |= other_oc.allowed_attrs", "def fill(self, other):\n if self.stream_id is None:\n self.stream_id = other.stream_id\n\n if self.type is None:\n self.type = other.type\n\n if self.length is None:\n self.length = other.length\n\n if self.timestamp is None:\n self.timestamp = other.timestamp\n\n assert self.stream_id is not None\n assert self.type is not None\n assert self.length is not None\n assert self.timestamp is not None\n assert self.object_id is not None", "def __iadd__(self, other):\n self.MergeWith(other)\n return self", "def copy(self):\n new_data_collection = DataCollection()\n for item in self.iteritems():\n new_data_collection.add_data(item)\n return new_data_collection", "def merge(self, other):\n if other is None:\n return\n if self.theta1 > other.theta1:\n self.theta1 = other.theta1\n self.p1 = other.p1\n if self.theta2 < other.theta2:\n self.theta2 = other.theta2\n self.p2 = other.p2", "def initFromOther(self, oOther):\n for sAttr in self.getDataAttributes():\n setattr(self, sAttr, getattr(oOther, sAttr));\n return self;", "def update(self, other: dict):\n for key in other:\n if key in self:\n self[key] = other[key]", "def join_data(self, base_data, join_data, base_field, join_fields):\n for data in base_data:\n extra = join_data[data[base_field]]\n for field in join_fields:\n data[field] = extra[field]\n \n return base_data", "def copy_with(self):\n return self.copy()", "def copy_values(self, another):\n\n # Copy all value, uncertainty, and source information from the other\n # ExoParameter object.\n if isinstance(another, ExoParameter):\n self.reference = another.reference\n self.uncertainty = another.uncertainty\n self.uncertainty_lower = another.uncertainty_lower\n self.uncertainty_upper = another.uncertainty_upper\n self.units = another.units\n self.url = another.url\n self.value = another.value\n else:\n raise TypeError(\"Cannot copy values from a non-ExoParameter obj!\")", "def extend(self, other, adapt_conf=True):\n # Check if category metadata match\n if (self.size() > 0) and (other.size() > 0):\n for attr in [\"is_cat_inclusive\", \"is_categorised\"]:\n a, b = getattr(self, attr), getattr(other, attr)\n if a != b:\n raise ConcatenationError(\n f\"Categorisation metadata is different for '{attr}': {a} != {b}\"\n )\n elif other.size() > 0:\n for attr in [\"is_cat_inclusive\", \"is_categorised\"]:\n setattr(self, attr, getattr(other, attr))\n if getattr(self, \"tstep_h\", None) is None:\n self.tstep_h = getattr(other, \"tstep_h\", None)\n else:\n if getattr(other, \"tstep_h\", None) is not None:\n if self.tstep_h != other.tstep_h:\n raise ConcatenationError(\n \"Extending by a TrackRun with different timestep is not allowed\"\n )\n if adapt_conf and other.conf is not None:\n if self.conf is None:\n self.conf = other.conf.copy()\n else:\n for field in self.conf._fields:\n if getattr(self.conf, field) != getattr(other.conf, field):\n setattr(self.conf, field, None)\n self.sources.extend(other.sources)\n\n new_data = pd.concat([self.data, other.data], sort=False)\n new_track_idx = new_data.index.get_level_values(0).to_series()\n new_track_idx = new_track_idx.ne(new_track_idx.shift()).cumsum() - 1\n\n mux = pd.MultiIndex.from_arrays(\n [new_track_idx, new_data.index.get_level_values(1)], names=new_data.index.names\n )\n self.data = new_data.set_index(mux)\n\n # Concatenate categories\n if (self.cats is not None) or (other.cats is not None):\n new_cats = pd.concat([self.cats, other.cats], sort=False).fillna(False)\n new_track_idx = new_cats.index.get_level_values(0).to_series()\n new_track_idx = new_track_idx.ne(new_track_idx.shift()).cumsum() - 1\n\n ix = pd.Index(new_track_idx, name=new_cats.index.name)\n self.cats = new_cats.set_index(ix)", "def merge(self, other: Schema) -> Schema:\n if duplicates := self.keys() & other.keys():\n raise IntegrityError(f'Duplicate column name(s): {duplicates}')\n return self.__class__({**self, **other})" ]
[ "0.69505", "0.694683", "0.6885921", "0.68476945", "0.68086684", "0.6734038", "0.67226946", "0.66790885", "0.66472447", "0.66369003", "0.6572359", "0.6542647", "0.6503335", "0.65015614", "0.6469766", "0.64362967", "0.64317465", "0.6385473", "0.6376822", "0.6375423", "0.63703424", "0.63361776", "0.631753", "0.62937146", "0.62908316", "0.62438846", "0.61748385", "0.61556965", "0.61476624", "0.61324286", "0.60887307", "0.607825", "0.60765857", "0.60704356", "0.6068133", "0.60656786", "0.6027501", "0.6009837", "0.6009332", "0.5985768", "0.59678525", "0.5943717", "0.5924525", "0.5924524", "0.59010845", "0.5876143", "0.584064", "0.5831239", "0.58201", "0.5816105", "0.58112156", "0.5809095", "0.5804941", "0.57959485", "0.5794767", "0.57855856", "0.5785261", "0.5774561", "0.57736063", "0.57714903", "0.576705", "0.575061", "0.57354474", "0.57290703", "0.57218975", "0.5712195", "0.5704418", "0.5698822", "0.5693745", "0.56931365", "0.56895113", "0.5687284", "0.56764215", "0.5674636", "0.56723565", "0.5657785", "0.5655855", "0.56472", "0.5636454", "0.5634588", "0.5628336", "0.5626043", "0.5623629", "0.56202084", "0.55988896", "0.55897945", "0.55770785", "0.5576054", "0.5574003", "0.5573844", "0.5573058", "0.5572744", "0.55714935", "0.556345", "0.55581564", "0.55573905", "0.5538598", "0.5538142", "0.5534426", "0.55301327", "0.5527462" ]
0.0
-1
Merge data from a time metric object.
def merge_time_metric(self, metric): self.merge_raw_time_metric(metric.duration, metric.exclusive)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def combine(cls, date_obj, time_obj):\n return cls(date_obj.year, date_obj.month, date_obj.day,\n time_obj.hour, time_obj.minute, time_obj.second,\n time_obj.nanosecond)", "def add_time(data, t):\n data['year'] = t.year\n data['month'] = t.month\n data['day'] = t.day\n data['hour'] = t.hour\n data['minute'] = t.minute\n data['second'] = t.second", "def _merge_report(self, target, new):\n time = None\n if 'ts' in new['parsed']:\n time = new['parsed']['ts']\n\n if (target.get('lastSeenDate', None) and\n time and\n target['lastSeenDate'] < time):\n target['lastSeenDate'] = time\n\n query_millis = int(new['parsed']['stats']['millis'])\n target['stats']['totalTimeMillis'] += query_millis\n target['stats']['count'] += 1\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']", "def _merge_report(self, target, new):\r\n time = None\r\n if 'ts' in new['parsed']:\r\n time = new['parsed']['ts']\r\n\r\n if (target.get('lastSeenDate', None) and\r\n time and\r\n target['lastSeenDate'] < time):\r\n target['lastSeenDate'] = time\r\n\r\n query_millis = int(new['parsed']['stats']['millis'])\r\n target['stats']['totalTimeMillis'] += query_millis\r\n target['stats']['count'] += 1\r\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']", "def _load_time(self):\n\n time_variables = ('time', 'Times', 'Itime', 'Itime2')\n got_time, missing_time = [], []\n for time in time_variables:\n # Since not all of the time_variables specified above are required, only try to load the data if they\n # exist. We'll raise an error if we don't find any of them though.\n if time in self.ds.variables:\n setattr(self.time, time, self.ds.variables[time][:])\n got_time.append(time)\n attributes = type('attributes', (object,), {})()\n for attribute in self.ds.variables[time].ncattrs():\n setattr(attributes, attribute, getattr(self.ds.variables[time], attribute))\n setattr(self.atts, time, attributes)\n else:\n missing_time.append(time)\n\n if len(missing_time) == len(time_variables):\n warn('No time variables found in the netCDF.')\n else:\n if 'Times' in got_time:\n # Overwrite the existing Times array with a more sensibly shaped one.\n self.time.Times = np.asarray([''.join(t.astype(str)).strip() for t in self.time.Times])\n\n # Make whatever we got into datetime objects and use those to make everything else. Note: the `time' variable\n # is often the one with the lowest precision, so use the others preferentially over that.\n if 'Times' not in got_time:\n if 'time' in got_time:\n _dates = num2date(self.time.time, units=getattr(self.ds.variables['time'], 'units'))\n elif 'Itime' in got_time and 'Itime2' in got_time:\n _dates = num2date(self.time.Itime + self.time.Itime2 / 1000.0 / 60 / 60, units=getattr(self.ds.variables['Itime'], 'units'))\n try:\n self.time.Times = np.array([datetime.strftime(d, '%Y-%m-%dT%H:%M:%S.%f') for d in _dates])\n except ValueError:\n self.time.Times = np.array([datetime.strftime(d, '%Y/%m/%d %H:%M:%S.%f') for d in _dates])\n # Add the relevant attribute for the Times variable.\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'time_zone', 'UTC')\n setattr(self.atts, 'Times', attributes)\n\n if 'time' not in got_time:\n if 'Times' in got_time:\n try:\n _dates = np.array([datetime.strptime(''.join(t.astype(str)).strip(), '%Y-%m-%dT%H:%M:%S.%f') for t in self.time.Times])\n except ValueError:\n _dates = np.array([datetime.strptime(''.join(t.astype(str)).strip(), '%Y/%m/%d %H:%M:%S.%f') for t in self.time.Times])\n elif 'Itime' in got_time and 'Itime2' in got_time:\n _dates = num2date(self.time.Itime + self.time.Itime2 / 1000.0 / 60 / 60, units=getattr(self.ds.variables['Itime'], 'units'))\n # We're making Modified Julian Days here to replicate FVCOM's 'time' variable.\n self.time.time = date2num(_dates, units='days since 1858-11-17 00:00:00')\n # Add the relevant attributes for the time variable.\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'units', 'days since 1858-11-17 00:00:00')\n setattr(attributes, 'long_name', 'time')\n setattr(attributes, 'format', 'modified julian day (MJD)')\n setattr(attributes, 'time_zone', 'UTC')\n setattr(self.atts, 'time', attributes)\n\n if 'Itime' not in got_time and 'Itime2' not in got_time:\n if 'Times' in got_time:\n try:\n _dates = np.array([datetime.strptime(''.join(t.astype(str)).strip(), '%Y-%m-%dT%H:%M:%S.%f') for t in self.time.Times])\n except ValueError:\n _dates = np.array([datetime.strptime(''.join(t.astype(str)).strip(), '%Y/%m/%d %H:%M:%S.%f') for t in self.time.Times])\n elif 'time' in got_time:\n _dates = num2date(self.time.time, units=getattr(self.ds.variables['time'], 'units'))\n # We're making Modified Julian Days here to replicate FVCOM's 'time' variable.\n _datenum = date2num(_dates, units='days since 1858-11-17 00:00:00')\n self.time.Itime = np.floor(_datenum)\n self.time.Itime2 = (_datenum - np.floor(_datenum)) * 1000 * 60 * 60 # microseconds since midnight\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'units', 'days since 1858-11-17 00:00:00')\n setattr(attributes, 'format', 'modified julian day (MJD)')\n setattr(attributes, 'time_zone', 'UTC')\n setattr(self.atts, 'Itime', attributes)\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'units', 'msec since 00:00:00')\n setattr(attributes, 'time_zone', 'UTC')\n setattr(self.atts, 'Itime2', attributes)\n\n # Additional nice-to-have time representations.\n if 'Times' in got_time:\n try:\n self.time.datetime = np.array([datetime.strptime(d, '%Y-%m-%dT%H:%M:%S.%f') for d in self.time.Times])\n except ValueError:\n self.time.datetime = np.array([datetime.strptime(d, '%Y/%m/%d %H:%M:%S.%f') for d in self.time.Times])\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'long_name', 'Python datetime.datetime')\n setattr(self.atts, 'datetime', attributes)\n else:\n self.time.datetime = _dates\n self.time.matlabtime = self.time.time + 678942.0 # convert to MATLAB-indexed times from Modified Julian Date.\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'long_name', 'MATLAB datenum')\n setattr(self.atts, 'matlabtime', attributes)\n\n # Clip everything to the time indices if we've been given them. Update the time dimension too.\n if 'time' in self._dims:\n if all([isinstance(i, (datetime, str)) for i in self._dims['time']]):\n # Convert datetime dimensions to indices in the currently loaded data.\n self._dims['time'][0] = self.time_to_index(self._dims['time'][0])\n self._dims['time'][1] = self.time_to_index(self._dims['time'][1]) + 1 # make the indexing inclusive\n for time in self.obj_iter(self.time):\n setattr(self.time, time, getattr(self.time, time)[self._dims['time'][0]:self._dims['time'][1]])\n self.dims.time = len(self.time.time)", "def update_time(self):\n time_metrics = self._fetch_time_metrics_and_clear()\n self._logger.info('update_time. time_metrics = %s', build_metrics_times_data(time_metrics))", "def _get_time(self) -> None:\n self.data[\"time\"] = np.zeros(len(self.data[\"yyyymmdd\"]), dtype=object)\n \n for idx, (yyyymmdd, hhmmss) in enumerate(zip(self.data[\"yyyymmdd\"], self.data[\"hhmmss\"])):\n year, month, day = yyyymmdd.split(\"/\")\n hour, minute, second = hhmmss.split(\":\")\n self.data[\"time\"][idx] = datetime(int(year), int(month), int(day), int(hour), int(minute), int(second))\n \n del self.data[\"yyyymmdd\"]\n del self.data[\"hhmmss\"]", "def _fetch_time_metrics_and_clear(self):\n with self._time_rlock:\n time_metrics = self._time_metrics\n self._time_metrics = defaultdict(LatencyTracker)\n\n return time_metrics", "def build_metrics_times_data(time_metrics):\n return [{'name': name, 'latencies': latencies.get_latencies()}\n for name, latencies in iteritems(time_metrics)]", "def combine(cls, date, time, tzinfo=True):\n if not isinstance(date, real_date):\n raise TypeError(\"date argument must be a date instance\")\n if not isinstance(time, real_time):\n raise TypeError(\"time argument must be a time instance\")\n if tzinfo is True:\n tzinfo = time.tzinfo\n return cls(\n date.year,\n date.month,\n date.day,\n time.hour,\n time.minute,\n time.second,\n time.microsecond,\n tzinfo,\n fold=time.fold,\n )", "def with_time(self):\n key = list(self.keys())[0]\n length = len(self[key])\n time_slices = self[key].time_slices\n\n if time_slices is None:\n raise FeatureError(\"FeatureCollection has no time reference.\")\n\n for i in range(length):\n res = {}\n for key, feature in self.items():\n res[key] = feature.data[feature.name][i]\n yield (time_slices[i], res)", "def merge(self, otr):\n self._duration = otr.get_start() - self.get_start()\n self._duration += otr.get_duration()\n self._line[3] = self._duration", "def _add_time_field(self) -> None:\n self.data[\"time\"] = [datetime(int(yyyy), int(mm), int(dd)) + timedelta(hours=hh) for yyyy, mm, dd, hh in zip(self.data[\"year\"], self.data[\"month\"], self.data[\"day\"], self.data[\"hour\"])]\n for key in [\"year\", \"doy\", \"month\", \"day\", \"hour\"]:\n del self.data[key]", "def merge_custom_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for name, other in metrics:\n key = (name, '')\n stats = self.__stats_table.get(key)\n if not stats:\n self.__stats_table[key] = other\n else:\n stats.merge_stats(other)", "def copy_and_append_time_dimension_to_netcdf_dataset(self,dataset_in,dataset_out):\n\n for dim_name,dim_obj in list(dataset_in.dimensions.items()):\n dataset_out.createDimension(dim_name,len(dim_obj)\n if not dim_obj.isunlimited() else None)\n dataset_out.createDimension('time',None)\n times = dataset_out.createVariable(\"time\",'f8',(\"time\",))\n times.units = \"years since 0001-01-01 00:00:00.0\"\n times.calendar = \"proleptic_gregorian\"\n times[0] = np.array([0.0])\n for var_name, var_obj in list(dataset_in.variables.items()):\n new_var = dataset_out.createVariable(var_name,var_obj.datatype,var_obj.dimensions\n if (len(var_obj.dimensions) <= 1\n or var_name == 'AREA') else\n [\"time\"] + list(var_obj.dimensions))\n if len(var_obj.dimensions) <= 1 or var_name == 'AREA':\n new_var[:] = var_obj[:]\n else:\n new_var[0,:] = var_obj[:]\n new_var.setncatts({attr_name: var_obj.getncattr(attr_name) for attr_name in var_obj.ncattrs()})", "def merge_metric_stats(self, snapshot):\n\n if not self.__settings:\n return\n\n for key, other in six.iteritems(snapshot.__stats_table):\n stats = self.__stats_table.get(key)\n if not stats:\n self.__stats_table[key] = other\n else:\n stats.merge_stats(other)", "def merge(self, other: PerfData):\n self.total_samples += other.total_samples\n if self.total_time == 0.0:\n self.total_time = other.total_time\n self.compile_time = max(self.compile_time, other.compile_time)\n self.programming_time = max(\n self.programming_time, other.programming_time\n )\n if self.est_samples_per_sec == 0.0:\n self.est_samples_per_sec = other.est_samples_per_sec\n else:\n assert (\n self.est_samples_per_sec == other.est_samples_per_sec\n ), \"Expected all fabric-based performance estimates to be identical\"\n\n if self.total_time > 0:\n self.samples_per_sec = float(self.total_samples) / self.total_time\n else:\n self.samples_per_sec = 0.0", "def set_data(self, time, data):\n for diagnostic in self._diagnostics_list:\n out = diagnostic(time, data)", "def copyDataFrom (self, other):\n\n self.localTimeString=other.localTimeString\n self._myHasLocalTimeString=other._myHasLocalTimeString\n \n self.utcTimeString=other.utcTimeString\n self._myHasUtcTimeString=other._myHasUtcTimeString\n \n self.daylightSavingTime=other.daylightSavingTime\n self._myHasDaylightSavingTime=other._myHasDaylightSavingTime\n \n self.epoch=other.epoch\n self._myHasEpoch=other._myHasEpoch\n \n self.utcOffsetMinutes=other.utcOffsetMinutes\n self._myHasUtcOffsetMinutes=other._myHasUtcOffsetMinutes", "def with_time(self):\n if self.time_slices is None:\n raise FeatureError(\"Feature has no time reference.\")\n\n for i, datum in enumerate(self.data[self.name]):\n yield (self.time_slices[i], datum)", "def addData(self, other, time, index):\n\n xoffset = index[0]*other.xdim\n yoffset = index[1]*other.ydim \n zoffset = index[2]*other.zdim\n \n self.data [ time-self.time_range[0], zoffset:zoffset+other.zdim, yoffset:yoffset+other.ydim, xoffset:xoffset+other.xdim] = other.data [:,:,:]", "def add_time_point(self,time, mdv_instance):\n\n self.mdvtc[time] = mdv_instance", "def record_time_metric(self, metric):\n\n if not self.__settings:\n return\n\n # Scope is forced to be empty string if None as\n # scope of None is reserved for apdex metrics.\n\n key = (metric.name, metric.scope or '')\n stats = self.__stats_table.get(key)\n if stats is None:\n stats = TimeStats(call_count=1,\n total_call_time=metric.duration,\n total_exclusive_call_time=metric.exclusive,\n min_call_time=metric.duration,\n max_call_time=metric.duration,\n sum_of_squares=metric.duration ** 2)\n self.__stats_table[key] = stats\n else:\n stats.merge_time_metric(metric)\n\n return key", "def _fill_meas_result(self,meas,from_time,to_time,meas_data):\r\n input=self._pvsr.create_pvsr_object(\"GetMeasuredValuesInput\")\r\n input.ObjType = \"Measurement\"\r\n input.ObjId = meas.Id\r\n input.From = datetime.datetime.fromtimestamp(from_time)\r\n input.To = datetime.datetime.fromtimestamp(to_time)\r\n logging.info(\"Get values, eq: {0}, type: {1}, index: {2}, name: {3}, {4} -> {5}\".format(self._meas[\"equipment\"],meas.Type,meas.Index,meas.DescriptionToShow,input.From,input.To))\r\n meas_res=self._pvsr.getMeasuredValues(input)\r\n \r\n index2mplane_name={}\r\n multiply = None\r\n if \"first\" in self._meas[\"types\"][meas.Type]:\r\n index2mplane_name[0]=self._meas[\"types\"][meas.Type][\"first\"]\r\n if \"second\" in self._meas[\"types\"][meas.Type]:\r\n index2mplane_name[1]=self._meas[\"types\"][meas.Type][\"second\"]\r\n if \"multiply\" in self._meas[\"types\"][meas.Type]:\r\n multiply=int(self._meas[\"types\"][meas.Type][\"multiply\"])\r\n\r\n if hasattr(meas_res,\"D\"):\r\n for d in meas_res.D:\r\n if d.T not in meas_data:\r\n meas_data[d.T]={}\r\n for index,mplane_name in index2mplane_name.items():\r\n if index < len(d.V):\r\n if multiply is not None:\r\n d.V[index]*=multiply\r\n meas_data[d.T][mplane_name]=d.V[index]\r\n else:\r\n meas_data[d.T][mplane_name]=None", "def __add__ ( self, other, resample_opts=None ):\n result = ObservationStorage (datadir=self.datadir, \\\n resample_opts=resample_opts )\n if self.date[0] > other.date[0]:\n start_date = other.date[0]\n else:\n start_date = self.date[0]\n if self.date[-1] > other.date[-1]:\n end_date = other.date[-1]\n else:\n end_date = self.date[-1]\n \n delta = datetime.timedelta ( days=1 )\n this_date = start_date.date()\n end_date = end_date.date() + delta\n \n this_obs_dates = [ x.date() for x in self.date ]\n other_obs_dates = [ x.date() for x in other.date ]\n \n date = [] ; vza = [] ; vaa = [] ; sza = [] ; saa = []\n emulator = [] ; mask = [] ; data_pntr = [] ; spectral = []\n sensor = []\n \n while this_date < end_date:\n if this_date in this_obs_dates:\n iloc = this_obs_dates.index ( this_date )\n date.append ( self.date[iloc] )\n emulator.append ( self.emulator[iloc] )\n vza.append ( self.vza[iloc] )\n sza.append ( self.sza[iloc] )\n vaa.append ( self.vaa[iloc] )\n saa.append ( self.saa[iloc] )\n spectral.append ( self.spectral )\n mask.append ( ( self.get_mask, [iloc] ) )\n sensor.append ( self.sensor )\n \n data_pntr.append ( self._data_pntr[iloc] )\n if this_date in other_obs_dates:\n iloc = other_obs_dates.index ( this_date )\n date.append ( other.date[iloc] )\n emulator.append ( other.emulator[iloc] )\n vza.append ( other.vza[iloc] )\n sza.append ( other.sza[iloc] )\n vaa.append ( other.vaa[iloc] )\n saa.append ( other.saa[iloc] )\n spectral.append ( other.spectral )\n mask.append ( ( other.get_mask, [iloc] ) )\n sensor.append ( other.sensor )\n data_pntr.append ( other._data_pntr[iloc] )\n this_date += delta\n result.vza = vza\n result.vaa = vaa\n result.sza = sza \n result.saa = saa \n result.date = date\n result.spectral = spectral\n result.masks = mask\n result.sensor = sensor\n result.emulator = emulator\n result._data_pntr = data_pntr\n return result", "def time_stats(df):", "def get_metric_data(config, metric_list, metric_grouping, start_time, end_time, collected_data_map):\n\n def format_data_entry(json_data_entry):\n metric_name = json_data_entry.get('metric')\n host_name = json_data_entry.get('tags', {}).get('host') or 'unknownHost'\n dps = json_data_entry.get('dps', {})\n metric_value = None\n header_field = normalize_key(metric_name) + \"[\" + host_name + \"]:\" + str(\n get_grouping_id(metric_name, metric_grouping))\n mtime = 0\n for stime, val in dps.items():\n if int(stime) > mtime:\n metric_value = val\n mtime = int(stime)\n\n epoch = mtime * 1000\n\n if epoch in collected_data_map:\n timestamp_value_map = collected_data_map[epoch]\n else:\n timestamp_value_map = {}\n\n timestamp_value_map[header_field] = str(metric_value)\n collected_data_map[epoch] = timestamp_value_map\n\n json_data = {\n \"token\": config['OPENTSDB_TOKEN'],\n \"start\": start_time,\n \"end\": end_time,\n \"queries\": map(lambda m: {\n \"aggregator\": \"avg\",\n \"downsample\": \"1m-avg\",\n \"metric\": m.encode('ascii')\n }, metric_list)\n }\n\n url = config[\"OPENTSDB_URL\"] + \"/api/query\"\n response = requests.post(url, data=json.dumps(json_data))\n if response.status_code == 200:\n rawdata_list = response.json()\n logger.debug(\"Get metric data from opentsdb: \" + str(len(rawdata_list)))\n\n # format metric and save to collected_data_map\n map(lambda d: format_data_entry(d), rawdata_list)", "def merge(self, dataset):\n def merge_data(source, dest):\n for key, value in source.items():\n if isinstance(value, dict):\n merge_data(value, dest.setdefault(key, {}))\n else:\n dest[key] = value\n return dest\n\n merge_data(dataset.data, self._data)\n\n for h in dataset.task_history:\n if h not in self._task_history:\n self._task_history.append(h)", "def metrics_times(self, times_data):\n url = _METRICS_URL_TEMPLATE.format(base_url=self._events_api_url_base, endpoint='times')\n return self._post(url, times_data)", "def update(self, data: Mapping[str, np.ndarray]) -> Self:\n\n for metric in self.metrics:\n metric.update(data)\n\n return self", "def __add_segment(self, other, time):\n self.data_tmp.add_data(other, time)\n return self.data_tmp", "def merge_logs(self):\n ourlog = LogData()\n for l in self.data_set:\n ourlog.entries = ourlog.entries + l.entries\n ourlog.sort_time()\n self.finalized_data = ourlog", "def ConvertTime( self ) :\n \n # modules:\n import logging\n import datetime\n import netCDF4\n import numpy\n \n #\n # Original 'Time' units and description:\n #\n # title = \"Time at Start of Scan (s, UNIX time / POSIX time), number of seconds that have elapsed since midnight Coordinated Universal Time (UTC), 1 January 1970.\"\n # units = \"s\"\n #\n # Create new field 'Datetime' field with units:\n # units = \"Seconds since 1970-01-01 00:00'\n #\n # select:\n varid = self.swaths[self.component]['Geolocation Fields']['Time']\n # values:\n tvalues = varid['data']\n # extract description:\n long_name = varid['long_name'].decode('latin-1')\n # check ...\n key = 'Time at Start of Scan (s, UNIX time / POSIX time), number of seconds that have elapsed since midnight Coordinated Universal Time (UTC),'\n if long_name.startswith(key) :\n # remove leading description:\n time0 = long_name.replace(key,'').replace('.','').strip()\n # extract datetime object:\n t0 = datetime.datetime.strptime(time0,'%d %B %Y')\n # convert:\n var = {}\n var['units' ] = t0.strftime('seconds since %Y-%m-%d %H:%M:%H')\n var['long_name'] = long_name\n if 'mask' in dir(tvalues) :\n values1d = netCDF4.num2date( tvalues.data, var['units'] )\n else :\n values1d = netCDF4.num2date( tvalues , var['units'] )\n #endif\n # alternative:\n # \"Time at Start of Scan (s, TAI93)\"\n elif 'TAI' in long_name :\n # find start:\n i0 = long_name.index('TAI')\n # extract:\n year = int(long_name[i0+3:].replace(')',''))\n # convert to 4-digits if necessary:\n if year < 100 :\n if year > 50 :\n year = 1900 + year\n else :\n year = 2000 + year\n #endif\n #endif\n # reference time:\n t0 = datetime.datetime(year,1,1,0,0,0)\n # convert:\n var = {}\n var['units' ] = t0.strftime('seconds since %Y-%m-%d %H:%M:%H')\n var['long_name'] = long_name\n values1d = netCDF4.num2date( tvalues, var['units'] )\n else :\n self.logger.error( 'could not convert time units \"%s\"' % long_name )\n self.logger.error( 'first value : %f' % tvalues[0] )\n raise Exception\n #endif\n \n # expand to 2D:\n var['data'] = numpy.zeros( (self.ntime,self.np), values1d.dtype )\n for ip in range(self.np) :\n var['data'][:,ip] = values1d\n #endfor\n \n # set dim names:\n var['dimnames'] = ('time','pixel')\n \n # store:\n self.swaths[self.component]['Geolocation Fields']['Datetime'] = var", "def flush(self):\n with self._lock:\n batch = self._batch\n timestamps = self._timestamps\n\n items = []\n for identity, value in batch.items():\n metric = {}\n typ, name, tags = identity\n metric[\"name\"] = name\n if typ:\n metric[\"type\"] = typ\n else:\n metric[\"timestamp\"] = timestamps[identity]\n\n if tags:\n metric[\"attributes\"] = dict(tags)\n\n metric[\"value\"] = value\n items.append(metric)\n\n items = tuple(items)\n\n batch.clear()\n timestamps.clear()\n\n common = self._common.copy()\n common[\"timestamp\"] = self._interval_start\n now = int(time.time() * 1000.0)\n interval = now - self._interval_start\n common[\"interval.ms\"] = interval\n\n self._interval_start = now\n\n return items, common", "def record_custom_metric(self, name, value):\n if isinstance(value, dict):\n if len(value) == 1 and 'count' in value:\n new_stats = CountStats(call_count=value['count'])\n else:\n new_stats = TimeStats(*c2t(**value))\n else:\n new_stats = TimeStats(1, value, value, value, value, value**2)\n\n stats = self.__stats_table.get(name)\n if stats is None:\n self.__stats_table[name] = new_stats\n else:\n stats.merge_stats(new_stats)", "def _set_time_bnds(in_dir, var):\n # This is a complicated expression, but necessary to keep local\n # variables below the limit, otherwise prospector complains.\n cubelist = iris.load(\n glob.glob(\n os.path.join(in_dir, var['file'].replace('c3s', 'c3s_regridded'))))\n\n # The purpose of the following loop is to remove any attributes\n # that differ between cubes (otherwise concatenation over time fails).\n # In addition, care is taken of the time coordinate, by adding the\n # time_coverage attributes as time_bnds to the time coordinate.\n for n_cube, _ in enumerate(cubelist):\n time_coverage_start = cubelist[n_cube].\\\n attributes.pop('time_coverage_start')\n time_coverage_end = cubelist[n_cube].\\\n attributes.pop('time_coverage_end')\n\n # Now put time_coverage_start/end as time_bnds\n # Convert time_coverage_xxxx to datetime\n bnd_a = datetime.strptime(time_coverage_start, \"%Y-%m-%dT%H:%M:%SZ\")\n bnd_b = datetime.strptime(time_coverage_end, \"%Y-%m-%dT%H:%M:%SZ\")\n\n # Put in shape for time_bnds\n time_bnds_datetime = [bnd_a, bnd_b]\n\n # Read dataset time unit and calendar from file\n dataset_time_unit = str(cubelist[n_cube].coord('time').units)\n dataset_time_calender = cubelist[n_cube].coord('time').units.calendar\n # Convert datetime\n time_bnds = cf_units.date2num(time_bnds_datetime, dataset_time_unit,\n dataset_time_calender)\n # Put them on the file\n cubelist[n_cube].coord('time').bounds = time_bnds\n\n return cubelist", "def fix_time_fields(self):\n time_fields = {\"Time of day\": lambda time: time.hour, \"Time of year (month)\": lambda time: time.month}\n for time_field in time_fields.keys():\n for i in range(self.df.shape[0]):\n value = self.df[time_field][i]\n if type(value) is datetime.time or type(value) is datetime.datetime:\n self.df[time_field].loc[i] = time_fields[time_field](value)", "def mergeAggregatedCsvData(self, contexts, obj, aggData1, aggData2):\n return aggData1 + aggData2", "def __init__(self):\n self.timeMap = defaultdict(list)", "def __init__(self):\n self.timeMap = defaultdict(list)", "def merge_toggl_time_entries(self, time_entries):\n tg = Toggl()\n d = {}\n for entry in time_entries:\n if entry.get('billable'):\n if entry.get('tags') and tg.BOOKED_TAG in entry['tags']:\n status = 'booked'\n else:\n status = 'not-booked'\n date = parser.parse(entry['start']).date()\n if not entry.get('pid'):\n self.log(\"Couldn't find associated project for entry: %s\" % (str(entry)))\n continue\n unique_id = str(entry['pid']) + str(date) + status\n if not entry.get('description'):\n entry['description'] = \"\"\n if d.get(unique_id):\n d[unique_id]['duration'] += entry['duration']\n d[unique_id]['merged_ids'].append(entry['id'])\n if d[unique_id].get('description'):\n if entry['description'].strip() not in d[unique_id]['description']:\n d[unique_id]['description'] += ' / ' + entry['description']\n else:\n d[unique_id]['description'] = entry['description']\n else:\n entry['merged_ids'] = [entry['id']]\n d[unique_id] = entry\n return d.values()", "def combine(date, time, tzinfo):\n return tzinfo.localize(datetime.datetime.combine(date, time))", "def set_time_series(data):\n t = pandas.Series(\n (data['TIME_StartTime'] -\n data['TIME_StartTime'].values[0]) / 1.0e6, name='t, sec')\n data = pandas.DataFrame(\n data.values,\n columns=data.columns, index=t)\n return data", "def _resample_and_merge(ts, agg_dict):\n grouped = ts.group_serie(agg_dict['sampling'])\n existing = agg_dict.get('return')\n name = agg_dict.get(\"name\")\n resource = None if name is None else mock.Mock(id=str(uuid.uuid4()))\n metric = mock.Mock(id=str(uuid.uuid4()), name=name)\n agg_dict['return'] = (\n processor.MetricReference(metric, \"mean\", resource),\n carbonara.AggregatedTimeSerie.from_grouped_serie(\n grouped,\n carbonara.Aggregation(agg_dict['agg'],\n agg_dict['sampling'],\n None)))\n if existing:\n existing[2].merge(agg_dict['return'][2])\n agg_dict['return'] = existing", "def time_metrics(self, stats, root, parent):\n\n product = self.product\n operation = self.operation or 'other'\n target = self.target\n\n # Determine the scoped metric\n\n statement_metric_name = 'Datastore/statement/%s/%s/%s' % (product,\n target, operation)\n\n operation_metric_name = 'Datastore/operation/%s/%s' % (product,\n operation)\n\n if target:\n scoped_metric_name = statement_metric_name\n else:\n scoped_metric_name = operation_metric_name\n\n yield TimeMetric(name=scoped_metric_name, scope=root.path,\n duration=self.duration, exclusive=self.exclusive)\n\n # Unscoped rollup metrics\n\n yield TimeMetric(name='Datastore/all', scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n yield TimeMetric(name='Datastore/%s/all' % product, scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n if root.type == 'WebTransaction':\n yield TimeMetric(name='Datastore/allWeb', scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n yield TimeMetric(name='Datastore/%s/allWeb' % product, scope='',\n duration=self.duration, exclusive=self.exclusive)\n else:\n yield TimeMetric(name='Datastore/allOther', scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n yield TimeMetric(name='Datastore/%s/allOther' % product, scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n # Unscoped operation metric\n\n yield TimeMetric(name=operation_metric_name, scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n # Unscoped statement metric\n\n if target:\n yield TimeMetric(name=statement_metric_name, scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n # Unscoped instance Metric\n\n if self.instance_hostname and self.port_path_or_id:\n\n instance_metric_name = 'Datastore/instance/%s/%s/%s' % (product,\n self.instance_hostname, self.port_path_or_id)\n\n yield TimeMetric(name=instance_metric_name, scope='',\n duration=self.duration, exclusive=self.exclusive)", "def aggregated_second(self, data):\n if self.results:\n if self.results[-1][\"ts\"] >= data[\"ts\"]:\n raise AssertionError(\"TS sequence wrong: %s>=%s\" % (self.results[-1][\"ts\"], data[\"ts\"]))\n self.results.append(data)", "def _apply_all_time_reductions(self, full_ts, monthly_ts, eddy_ts):\n logging.info(self._print_verbose(\"Applying desired time-\"\n \"reduction methods.\"))\n # Determine which are regional, eddy, time-mean.\n reduc_specs = [r.split('.') for r in self.dtype_out_time]\n reduced = {}\n for reduc, specs in zip(self.dtype_out_time, reduc_specs):\n func = specs[-1]\n if 'eddy' in specs:\n data = eddy_ts\n elif 'time-mean' in specs:\n data = monthly_ts\n else:\n data = full_ts\n if 'reg' in specs:\n reduced.update({reduc: self.region_calcs(data, func)})\n else:\n reduced.update({reduc: self._time_reduce(data, func)})\n return OrderedDict(sorted(reduced.items(), key=lambda t: t[0]))", "def create_time_s(df, medidor, freq='15T'):\n dates_complete = pd.date_range('1/18/2013', '02/09/2014', freq='15T')\n # this dates take them from the file\n my_complete_series = pd.Series(dates_complete)\n frame1 = my_complete_series.to_frame()\n frame1.columns = ['key']\n merged = pd.merge(frame1, df, on='key', how='outer')\n merged = merged.sort('key')\n # fill the merged file with the number of the meter\n merged['medidor'].fillna(medidor, inplace=True)\n\n return merged", "def _write_time_cube(self, cube, key_list):\n data = cube.data[:]\n coords = cube.coord('time')[:]\n for t in range(0, data.shape[0]):\n value = round_variable(self.input_data.get_value(\n InputType.VARIABLE)[0], data[t])\n with iris.FUTURE.context(cell_datetime_objects=True):\n time_str = coords[t].cell(\n 0).point.strftime('%Y-%m-%d')\n try:\n self.data_dict[time_str].append(value)\n except KeyError:\n key_list.append(time_str)\n self.data_dict[time_str] = [value]", "def _get_aggregated_data(self, data_start, data_end, resolution, aggregation_type):\n from .timeseriesdata import TimeSeriesData\n\n # Multiple of resolution\n # We extract just the values_list here because doing it in a\n # separate statement results in django querying the database\n # twice...\n raw = TimeSeriesData.objects.filter(\n ts__gte=data_start,\n ts__lt=data_end,\n sensor=self,\n ).values_list(\"value\", \"ts\")\n\n if not raw:\n # This should raise above but for some reason it doesn't when using\n # values_list\n raise TimeSeriesData.DoesNotExist\n\n # How many samples we would expect if there was no missing data\n expected_samples = (data_end - data_start).total_seconds()/self.resolution\n\n if resolution is AGGREGATE_TO_ONE_VALUE:\n aggregation_factor = expected_samples\n else:\n # Already checked that this divides nicely\n # NOTE\n # should aggregation_factor ALWAYS be expected_samples?\n aggregation_factor = int(resolution//self.resolution)\n\n logger.debug(\"%s objects to aggregate\", len(raw))\n\n aggregation_engine = aggregation_implementations[settings.ZCONNECT_TS_AGGREGATION_ENGINE]\n\n logger.debug(\"Aggregating '%s' with %s, factor %s\",\n aggregation_type, settings.ZCONNECT_TS_AGGREGATION_ENGINE,\n aggregation_factor)\n\n data = aggregation_engine(\n raw,\n aggregation_type,\n aggregation_factor,\n expected_samples,\n data_start,\n data_end,\n self,\n )\n\n return data", "def _aggregate_log_values(self, source, dest):\n remove = []\n for key, item in source.items():\n if \"data\" not in item:\n # Assume it's a sub-group\n dest[key] = {}\n self._aggregate_log_values(item, dest[key])\n else:\n aggregator = self._get_aggregator_for_key(key, item['agg'])\n value = aggregator(item['data'])\n if item['precision'] is not None:\n value = round(value, item['precision'])\n dest[key] = value\n if item['scope'] == 'get':\n remove.append(key)\n for key in remove:\n del source[key]", "def test_fits_to_time_meta(self, table_types):\n t = table_types()\n t['a'] = Time(self.time, format='isot', scale='utc')\n t.meta['DATE'] = '1999-01-01T00:00:00'\n t.meta['MJD-OBS'] = 56670\n\n # Test for default write behaviour (full precision) and read it\n # back using native astropy objects; thus, ensure its round-trip\n t.write(self.temp('time.fits'), format='fits', overwrite=True)\n tm = table_types.read(self.temp('time.fits'), format='fits',\n astropy_native=True)\n\n # Test DATE\n assert isinstance(tm.meta['DATE'], Time)\n assert tm.meta['DATE'].value == t.meta['DATE'] + '(UTC)'\n assert tm.meta['DATE'].format == 'fits'\n # Default time scale according to the FITS standard is UTC\n assert tm.meta['DATE'].scale == 'utc'\n\n # Test MJD-xxx\n assert isinstance(tm.meta['MJD-OBS'], Time)\n assert tm.meta['MJD-OBS'].value == t.meta['MJD-OBS']\n assert tm.meta['MJD-OBS'].format == 'mjd'\n assert tm.meta['MJD-OBS'].scale == 'utc'\n\n # Explicitly specified Time Scale\n t.meta['TIMESYS'] = 'ET'\n\n t.write(self.temp('time.fits'), format='fits', overwrite=True)\n tm = table_types.read(self.temp('time.fits'), format='fits',\n astropy_native=True)\n\n # Test DATE\n assert isinstance(tm.meta['DATE'], Time)\n assert tm.meta['DATE'].value == t.meta['DATE'] + '(UTC)'\n assert tm.meta['DATE'].scale == 'utc'\n\n # Test MJD-xxx\n assert isinstance(tm.meta['MJD-OBS'], Time)\n assert tm.meta['MJD-OBS'].value == t.meta['MJD-OBS']\n assert tm.meta['MJD-OBS'].scale == FITS_DEPRECATED_SCALES[t.meta['TIMESYS']]\n\n # Test for conversion of time data to its value, as defined by its format\n t['a'].info.serialize_method['fits'] = 'formatted_value'\n t.write(self.temp('time.fits'), format='fits', overwrite=True)\n tm = table_types.read(self.temp('time.fits'), format='fits')\n\n # Test DATE\n assert not isinstance(tm.meta['DATE'], Time)\n assert tm.meta['DATE'] == t.meta['DATE']\n\n # Test MJD-xxx\n assert not isinstance(tm.meta['MJD-OBS'], Time)\n assert tm.meta['MJD-OBS'] == t.meta['MJD-OBS']\n\n assert (tm['a'] == t['a'].value).all()", "def add_time_delta(time_offset_value, date_time, dataset): \n \n if 'minutes' in time_offset:\n date_time_delta = [ timedelta(minutes = float(i) ) + time_offset_value for i in date_time ]\n elif 'hours' in time_offset:\n date_time_delta = [ timedelta(hours = float(i) ) + time_offset_value for i in date_time ] \n elif 'seconds' in time_offset: \n date_time_delta = [ timedelta(seconds = float(i) ) + time_offset_value for i in date_time ] \n \n \n '''\n if 'era' not in dataset:\n \n if 'minutes' in time_offset:\n date_time_delta = [ timedelta(minutes = float(i) ) + time_offset_value for i in date_time ]\n elif 'hours' in time_offset:\n date_time_delta = [ timedelta(hours = float(i) ) + time_offset_value for i in date_time ] \n elif 'seconds' in time_offset: \n date_time_delta = [ timedelta(seconds = float(i) ) + time_offset_value for i in date_time ] \n else:\n date_time = np.array( [ datetime.strptime(str(int(i)), '%Y%m%d%H') for i in date_time ] )# convert to datetime object \n ''' \n \n #else:\n # print('check if time is wrong !!!! (should never happen)')\n # sys.exit() \n #unique_dt = [i for i in [ time_offset_value + j for j in delta ] ] \n #unique_dt = [ i +0 ]\n date_time_delta = [ i.replace(minute=0, second=0) for i in date_time_delta ] \n \n return date_time_delta", "def record_time_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for metric in metrics:\n self.record_time_metric(metric)", "def OPCtimetransform(data, to):\n \n remove_times = []\n outtimes = []\n times = {'ms':[],'SS':[],'MM':[],'HH':[]}\n\n for i in range(0, len(data)):\n times['HH'] = 0\n times['MM'] = 0\n times['SS'] = 0\n times['ms'] = 0\n\n item = data[i]\n \n try:\n if len(item.split('.')[1]) < 2:\n item += '0'\n except IndexError:\n item += '.00'\n if len(item) < 9:\n item = item.zfill(9)\n if int(item[:2]) > 23:\n item = '0' + item\n \n # remove items with extra zero (2319010.00 to 231910)\n if len(item) > 9:\n olditem = item\n newitem = item[:4] + item[5:]\n print( ('Repairing strange value %s into %s')%(olditem, newitem) )\n item = newitem\n else:\n pass\n try:\n md = dt.datetime.strptime(item, \"%H%M%S.%f\")\n \n # round off items which exceed 59 minutes or 59 seconds \n # (i.e. 146001 to 150001.)\n except ValueError:\n \n try:\n times['HH'] = int(item[0:2])\n times['MM'] = int(item[2:4])\n times['SS'] = int(item[4:6])\n times['ms'] = int(item[7:9])\n except ValueError:\n print(i, item)\n\n if times['SS'] > 59:\n times['MM'] += 1\n times['SS'] = 0\n if times['MM'] > 59:\n times['HH'] += 1\n times['MM'] = 0\n # discard items which exceed 23 hours\n if times['HH'] > 23:\n times['HH'] = 23\n print( ('resetting value %s')%(item) )\n \n\n md = dt.datetime(1900,1,1,times['HH'], times['MM'], times['SS']) \n\n \n outtimes.append( dt.datetime.strftime(md, to) )\n\n return outtimes", "def map_scan_time(time, nmap_store):\n nmap_store[\"start_time\"] = datetime.datetime.fromtimestamp(int(time.get('starttime')))\n nmap_store[\"stop_time\"] = datetime.datetime.fromtimestamp(int(time.get('endtime')))", "def __init__(self, data, t0=None, sampling_interval=None,\r\n sampling_rate=None, duration=None, time=None, time_unit='s',\r\n metadata=None):\r\n\r\n #If a UniformTime object was provided as input:\r\n if isinstance(time, UniformTime):\r\n c_fac = time._conversion_factor\r\n #If the user did not provide an alternative t0, get that from the\r\n #input:\r\n if t0 is None:\r\n t0 = time.t0\r\n #If the user did not provide an alternative sampling interval/rate:\r\n if sampling_interval is None and sampling_rate is None:\r\n sampling_interval = time.sampling_interval\r\n sampling_rate = time.sampling_rate\r\n #The duration can be read either from the length of the data, or\r\n #from the duration specified by the time-series:\r\n if duration is None:\r\n duration = time.duration\r\n length = time.shape[-1]\r\n #If changing the duration requires a change to the\r\n #sampling_rate, make sure that this was explicitely required by\r\n #the user - if the user did not explicitely set the\r\n #sampling_rate, or it is inconsistent, throw an error:\r\n data_len = np.array(data).shape[-1]\r\n\r\n if (length != data_len and\r\n sampling_rate != float(data_len * c_fac) / time.duration):\r\n e_s = \"Length of the data (%s) \" % str(len(data))\r\n e_s += \"specified sampling_rate (%s) \" % str(sampling_rate)\r\n e_s += \"do not match.\"\r\n raise ValueError(e_s)\r\n #If user does not provide a\r\n if time_unit is None:\r\n time_unit = time.time_unit\r\n\r\n else:\r\n ##If the input was not a UniformTime, we need to check that there\r\n ##is enough information in the input to generate the UniformTime\r\n ##array.\r\n\r\n #There are different valid combinations of inputs\r\n tspec = tuple(x is not None for x in\r\n [sampling_interval, sampling_rate, duration])\r\n\r\n tspec_arg_names = [\"sampling_interval\",\r\n \"sampling_rate\",\r\n \"duration\"]\r\n\r\n #The valid configurations\r\n valid_tspecs = [\r\n #interval, length:\r\n (True, False, False),\r\n #interval, duration:\r\n (True, False, True),\r\n #rate, length:\r\n (False, True, False),\r\n #rate, duration:\r\n (False, True, True),\r\n #length, duration:\r\n (False, False, True)\r\n ]\r\n\r\n if tspec not in valid_tspecs:\r\n raise ValueError(\"Invalid time specification. \\n\"\r\n \"You provided: %s\\n %s see docstring for more info.\" % (\r\n str_tspec(tspec, tspec_arg_names),\r\n str_valid_tspecs(valid_tspecs, tspec_arg_names)))\r\n\r\n # Make sure to grab the time unit from the inputs, if it is provided:\r\n if time_unit is None:\r\n # If you gave us a duration with time_unit attached\r\n if isinstance(duration, TimeInterface):\r\n time_unit = duration.time_unit\r\n # Otherwise, you might have given us a sampling_interval with a\r\n # time_unit attached:\r\n elif isinstance(sampling_interval, TimeInterface):\r\n time_unit = sampling_interval.time_unit\r\n\r\n # Calculate the sampling_interval or sampling_rate from each other and\r\n # assign t0, if it is not already assigned:\r\n if sampling_interval is None:\r\n if isinstance(sampling_rate, Frequency):\r\n c_f = time_unit_conversion[time_unit]\r\n sampling_interval = sampling_rate.to_period() / float(c_f)\r\n elif sampling_rate is None:\r\n data_len = np.asarray(data).shape[-1]\r\n sampling_interval = float(duration) / data_len\r\n sampling_rate = Frequency(1.0 / sampling_interval,\r\n time_unit=time_unit)\r\n else:\r\n c_f = time_unit_conversion[time_unit]\r\n sampling_rate = Frequency(sampling_rate, time_unit='s')\r\n sampling_interval = sampling_rate.to_period() / float(c_f)\r\n else:\r\n if sampling_rate is None: # Only if you didn't already 'inherit'\r\n # this property from another time object\r\n # above:\r\n if isinstance(sampling_interval, TimeInterface):\r\n c_f = time_unit_conversion[sampling_interval.time_unit]\r\n sampling_rate = Frequency(1.0 / (float(sampling_interval) /\r\n c_f),\r\n time_unit=sampling_interval.time_unit)\r\n else:\r\n sampling_rate = Frequency(1.0 / sampling_interval,\r\n time_unit=time_unit)\r\n\r\n #Calculate the duration, if that is not defined:\r\n if duration is None:\r\n duration = np.asarray(data).shape[-1] * sampling_interval\r\n\r\n if t0 is None:\r\n t0 = 0\r\n\r\n # Make sure to grab the time unit from the inputs, if it is provided:\r\n if time_unit is None:\r\n #If you gave us a duration with time_unit attached\r\n if isinstance(duration, TimeInterface):\r\n time_unit = duration.time_unit\r\n #Otherwise, you might have given us a sampling_interval with a\r\n #time_unit attached:\r\n elif isinstance(sampling_interval, TimeInterface):\r\n time_unit = sampling_interval.time_unit\r\n\r\n #Otherwise, you can still call the common constructor to get the real\r\n #object initialized, with time_unit set to None and that will generate\r\n #the object with time_unit set to 's':\r\n TimeSeriesBase.__init__(self, data, time_unit, metadata=metadata)\r\n\r\n self.time_unit = time_unit\r\n self.sampling_interval = TimeArray(sampling_interval,\r\n time_unit=self.time_unit)\r\n self.t0 = TimeArray(t0, time_unit=self.time_unit)\r\n self.sampling_rate = sampling_rate\r\n self.duration = TimeArray(duration, time_unit=self.time_unit)", "def get_dataframe_with_time_column(self):\n return pd.concat([self.get_time_dataframe(),\n self.get_x_dataframe_without_time_column(),\n self.get_y_dataframe_without_time_column()], axis=1)", "def readSrc_byTime(self):\n for msg in self.srcFile:\n msg = msg[0:-1] # remove \\n at the end of the string\n if '%%' in msg:\n self.srcHeader.append(msg)\n else:\n msg = msg.split()\n time = float(msg[0])\n meas = msg[1]\n sens = msg[2]\n valu = msg[3]\n if time not in self.srcData: # none from this time yet\n self.srcData[time] = {}\n if sens not in self.srcData[time]: # none at this time from this gSensor\n self.srcData[time][sens] = {}\n self.srcData[time][sens][meas] = valu # assume only one message per meas from sens at a time", "def update_data(self, data):\n start_time = data.index[-1].strftime(\"%Y-%m-%dT%H:%M:%S.000000Z\")\n temp_data = self.gather_data(start=start_time)\n temp_data = self._list_to_df(temp_data)\n if (len(temp_data) > 1):\n # temp_data[0] is the same as data[-1]\n out_data = data.append(temp_data[1:])\n return out_data", "def __set_time_data(self, tdata):\n assert tdata.shape[-1] == self._nt\n self._in_time = tdata\n self._in_freq = None", "def load_time_index(self, attrs, resolution=None):\n ts = time.time()\n logger.info('Rechunking time_index')\n with h5py.File(self._src_path, 'r') as f:\n time_index = f['time_index'][...]\n\n timezone = attrs['attrs'].get('timezone', None)\n if timezone is not None or resolution is not None:\n time_index = pd.to_datetime(time_index.astype(str))\n if timezone is not None:\n if time_index.tz is not None:\n time_index = time_index.tz_convert(timezone)\n else:\n time_index = time_index.tz_localize(timezone)\n\n if resolution is not None:\n resample = pd.date_range(time_index.min(), time_index.max(),\n freq=resolution)\n if len(resample) > len(time_index):\n msg = (\"Resolution ({}) must be > time_index resolution \"\n \"({})\".format(resolution, time_index.freq))\n logger.error(msg)\n raise RuntimeError(msg)\n\n self._time_slice = time_index.isin(resample)\n time_index = time_index[self.time_slice]\n\n time_index = time_index.astype(str)\n dtype = 'S{}'.format(len(time_index[0]))\n time_index = np.array(time_index, dtype=dtype)\n\n attrs['dtype'] = time_index.dtype\n\n ds = self.init_dset('time_index', time_index.shape, attrs)\n ds[...] = time_index\n logger.info('- time_index transfered')\n tt = (time.time() - ts) / 60\n logger.debug('\\t- {:.2f} minutes'.format(tt))", "def aggregate_data(mts, feature, target):\r\n set_dict = dict()\r\n set_dict['mt'] = mts\r\n set_dict['feature'] = feature\r\n set_dict['target'] = target\r\n \r\n return set_dict", "def at(self, time_slices):\n\n if self.base is not None:\n return self.base.at(time_slices)\n\n if isinstance(time_slices, TimeSlice):\n time_slices = [time_slices]\n\n # join the time slice values\n timed_data = pd.DataFrame(columns=self.data.columns)\n\n # make the new data\n for slice_t in time_slices:\n slice_index = (slice_t.time <= self.data.index) & (\n self.data.index < slice_t.time + slice_t.duration\n )\n timed_data.loc[slice_t.time] = self.aggregate(\n self.data[slice_index], axis=0\n )\n\n # return the new feature object\n return Feature(\n data=timed_data,\n aggregate=self.aggregate,\n base=self,\n time_slices=time_slices,\n )", "def __add__(self, other):\n if not isinstance(other, SMTimeSeries):\n raise TypeError(\"NotImplemented Error\")\n ts = self._fsm.get(self._id) + other._fsm.get(other._id)\n return SMTimeSeries(ts._time, ts._value, self._fsm)", "def _AddMetric(self, metric):\n machine = metric.machine_id\n time = metric.timestamp\n payload = DotDict(json.loads(metric.payload)).flatten()\n\n self.machines.add(machine)\n self.timestamps.add(time)\n for k in payload:\n if k not in self.counter_data:\n continue\n val = payload.get(k, None)\n if val is not None:\n self.counter_data[k].AddSample(machine, time, val)", "def __init__(self, metricName, timeResolutions = (86400,)):\n self.metric = metricName\n self.timeResolutions = timeResolutions", "def set_timestamp(self, data):\n if \"hittime\" in data: # an absolute timestamp\n data[\"qt\"] = self.hittime(timestamp=data.pop(\"hittime\", None))\n if \"hitage\" in data: # a relative age (in seconds)\n data[\"qt\"] = self.hittime(age=data.pop(\"hitage\", None))", "def get_accumulated_data(self, topic, start_time, end_time, units):\n ignore_start_time = self._get_value('ignore_start_time', topic)\n ignore_end_time = self._get_value('ignore_end_time', topic)\n adjust_start_time = self._get_value('adjust_start_time', topic)\n adjust_end_time = self._get_value('adjust_end_time', topic)\n\n if ignore_start_time:\n self.logger.debug(\"Service ignoring start time.\")\n start_ts = self.peek_datetime(topic) - adjust_start_time\n else:\n start_ts = start_time - adjust_start_time\n\n if ignore_end_time:\n self.logger.debug(\"Service ignoring end time.\")\n end_ts = self.peek_last_datetime(topic) + adjust_end_time\n else:\n end_ts = end_time + adjust_end_time\n\n self.logger.debug(\"Service processing interval: %f %f\" %(start_ts, end_ts))\n accumulator = weewx.accum.Accum(weeutil.weeutil.TimeSpan(start_ts, end_ts))\n\n for data in self.get_data(topic, end_ts):\n if data:\n try:\n self.logger.debug(\"Service data to accumulate: %s %s\"\n % (weeutil.weeutil.timestamp_to_string(data['dateTime']), to_sorted_string(data)))\n accumulator.addRecord(data)\n except weewx.accum.OutOfSpan:\n self.logger.info(\"Service ignoring record outside of interval %f %f %f %s\"\n %(start_ts, end_ts, data['dateTime'], (to_sorted_string(data))))\n else:\n break\n\n target_data = {}\n if not accumulator.isEmpty:\n aggregate_data = accumulator.getRecord()\n self.logger.debug(\"Service data prior to conversion is: %s %s\"\n % (weeutil.weeutil.timestamp_to_string(aggregate_data['dateTime']), to_sorted_string(aggregate_data)))\n target_data = weewx.units.to_std_system(aggregate_data, units)\n self.logger.debug(\"Service data after to conversion is: %s %s\"\n % (weeutil.weeutil.timestamp_to_string(target_data['dateTime']), to_sorted_string(target_data)))\n else:\n self.logger.debug(\"Dervice queue was empty\")\n\n # Force dateTime to packet's datetime so that the packet datetime is not updated to the MQTT datetime\n if ignore_end_time:\n target_data['dateTime'] = end_time\n\n return target_data", "def add_stats(self):\n units = self.get_unit_map()\n for metric in self.raw_metrics:\n unit, metric_type = units.get(metric, (DEFAULT_UNIT, DEFAULT_TYPE))\n if metric_type == \"counter\":\n # Unit/Second\n unit = \"/\".join((unit, \"Second\"))\n self.add_derive_value(metric, unit, self.raw_metrics[metric], rate=True)\n else:\n self.add_gauge_value(metric, unit, self.raw_metrics[metric])", "def time(self):\n return self[self.time_columns]", "def time(self):\n return self[self.time_columns]", "def OPCtimetransformOld(data, to):\n outtimes = []\n \n times = {\n 'ms':[],\n 'SS':[],\n 'MM':[],\n 'HH':[]\n }\n for i in range(0, len(data)):\n item = data[i]\n try: \n times['HH'].append(int(item[0:2]))\n times['MM'].append(int(item[2:4]))\n times['SS'].append(int(item[4:6]))\n times['ms'].append(int(item[7:9]))\n except ValueError:\n # strange value 2319010.00 in 201129 file...\n olditem = item\n newitem = item[:4] + item[4+1:]\n print( ('Repairing strange value %s into %s')%(olditem, newitem) )\n try:\n times['HH'].append(int(newitem[0:2]))\n times['MM'].append(int(newitem[2:4]))\n times['SS'].append(int(newitem[4:6]))\n times['ms'].append(int(newitem[7:9]))\n except ValueError:\n print(newitem)\n\n # OPC times go up to 60 minutes. This is corrected by moving one minute\n times['MM'] = [max(0,x-1) for x in times['MM']]\n times['SS'] = [max(0,x-1) for x in times['SS']]\n\n for i in range(0, len(data)):\n md = dt.datetime(1900,1,1,times['HH'][i], times['MM'][i], times['SS'][i]) \n outtimes.append( dt.datetime.strftime(md, to))\n\n return outtimes", "def aggregate(all_metrics, reducer, suffix):\n # Collect metric separately\n separated_metrics = {} # type: dict[frozenset, list[dict]]\n for el in all_metrics:\n key = frozenset(el[\"metric\"][\"dimensions\"].items())\n if key not in separated_metrics:\n separated_metrics[key] = [el]\n else:\n separated_metrics[key].append(el)\n\n # Collect all dimensions\n dims = {}\n for metric_dims in separated_metrics.keys():\n for prop, val in dict(metric_dims).iteritems():\n if prop in dims:\n dims[prop].add(val)\n else:\n dims[prop] = set(val)\n\n # Sort each metric\n for _, metric in separated_metrics.iteritems():\n metric.sort(key=lambda v: v[\"metric\"][\"timestamp\"])\n\n separated_metrics = sorted(separated_metrics.values(), key=len)\n separated_metrics.reverse()\n\n # Compute the new values\n new_values = []\n all_timestamps = map(\n lambda l: map(\n lambda x: x[\"metric\"][\"timestamp\"], l),\n separated_metrics)\n metric_count = len(separated_metrics)\n for index in range(0, len(separated_metrics[0])):\n new_value = reducer[0](\n separated_metrics[0][index][\"metric\"][\"value\"],\n metric_count)\n new_timestamp = separated_metrics[0][index][\"metric\"][\"timestamp\"]\n for metric_index in range(1, metric_count):\n new_value = reducer[1](new_value, helpers.interpolate(\n new_timestamp,\n separated_metrics[metric_index],\n all_timestamps[metric_index]\n ), metric_count)\n new_values.append((new_timestamp, new_value))\n\n # Aggregate the other details:\n metric_name = separated_metrics[0][0][\"metric\"][\"name\"] + suffix\n meta = separated_metrics[0][0][\"meta\"]\n new_metrics = [\n helpers.create_agg_metric(\n metric_name,\n meta,\n dims,\n val[0],\n val[1]\n ) for val in new_values\n ]\n return new_metrics", "def combine_date_time(time, date):\n datetime_time = get_time_datetime(time)\n return date.replace(hour=datetime_time.hour, minute=datetime_time.minute, second=0)", "def get_itime_section_data(date, time):\n return np.array([time, date.day, date.month, date.year, -2345, 1, 0, -2345, -2345, -2345, 0, 0, 0])", "def add_metrics_to_monitor_object(self, communication_time, metrics_array):\n for index in range(len(metrics_array)):\n if metrics_array[index] == '1':\n self.monitor_functions[index]()\n\n self.monitoring_object['node_id'] = self.object_id\n self.server_connection.send_packet(json.dumps(\n self.monitoring_object, indent=1))\n\n pid = Timer(int(communication_time),\n self.add_metrics_to_monitor_object,\n args=(communication_time, metrics_array,))\n pid.start()", "def conv_time_units(cube1,cube2):\n time_coord1=cube1.coord('time')\n time_units1=time_coord1.units\n #\n time_coord2=cube2.coord('time')\n time_units2=time_coord2.units\n #\n new_time_vals=[time_units2.date2num(time_units1.num2date(xx)) for xx in time_coord1.points]\n new_time_coord=iris.coords.DimCoord(new_time_vals,standard_name='time',units=time_units2)\n #\n coord_names=[dimc.standard_name for dimc in cube1.dim_coords]\n time_index=coord_names.index('time')\n cube1.remove_coord('time')\n cube1.add_dim_coord(new_time_coord,time_index)", "def from_object(obj):\n if isinstance(obj, PartialTime):\n return obj\n\n d = {}\n for attr in partialtime_attrs:\n try:\n d[attr] = getattr(obj, attr)\n continue\n except AttributeError:\n pass\n\n try:\n d[attr] = obj[attr]\n except (KeyError, TypeError):\n pass\n\n pt = PartialTime(**d)\n return pt", "def __init__(self, time, metadata):\n self.time = time\n self.metadata = metadata", "def convert_timestamp_to_object(data):\n for k, value in data.items():\n value_type = value.split(\"::\", 1)[0]\n if value_type == \"datetime\":\n timestamp = int(value.split(\"::\", 1)[1])\n value = datetime.fromtimestamp(timestamp)\n elif value_type == \"date\":\n timestamp = int(value.split(\"::\", 1)[1])\n value = date.fromtimestamp(timestamp)\n data[k] = value\n return data", "def get_data_at_time(self, time=None, tolerance=0.0):\n if time is None:\n # If time is not specified, assume we want the entire time\n # set. Skip all the overhead, don't create a new object, and\n # return self.\n return self\n is_iterable = _is_iterable(time)\n time_iter = iter(time) if is_iterable else (time,)\n indices = []\n # Allocate indices list dynamically to support a general iterator\n # for time. Not sure if this will ever matter...\n for t in time_iter:\n if t in self._time_idx_map:\n idx = self._time_idx_map[t]\n else:\n idx = find_nearest_index(self._time, t, tolerance=tolerance)\n if idx is None:\n raise RuntimeError(\n \"Time point %s is invalid within tolerance %s\" % (t, tolerance)\n )\n indices.append(idx)\n if not is_iterable:\n indices = indices[0]\n return self.get_data_at_time_indices(indices)", "def dict_time(self, workspace_unique_id=None, subset_unique_id=None, request=None):\n workspace_object = self._get_workspace_object(unique_id=workspace_unique_id) \n subset_object = workspace_object.get_subset_object(subset_unique_id) \n if not subset_object:\n self._logger.warning('Could not find subset object {}. Subset is probably not loaded.'.format(subset_unique_id))\n return {}\n\n data_filter_object = subset_object.get_data_filter_object('step_1')\n if request:\n data_filter_object.set_filter(filter_type='include_list', \n filter_name='MYEAR', \n data=request['year_range'])\n# data_filter_object.set_filter(filter_type='include_list', \n# filter_name='MONTH', \n# data=request['month_list'])\n \n else:\n year_list = sorted(map(int, data_filter_object.get_include_list_filter('MYEAR')))\n# month_list = sorted(map(int, data_filter_object.get_include_list_filter('MONTH')))\n \n return {\"year_range\": [year_list[0], year_list[-1]]}#, \"month_list\": month_list}", "def event_timex_analysis(event1, event2):\n tagged = tag(event1.text)\n base_time = event1.get_best_time()\n\n if base_time is not None:\n dt, trusted = base_time.to_datetime() # Get the datetime representation of the reference's best_time\n grounded_times = ground(tagged, dt) # Ground any timex tags to that time\n new_dates = [] # holds new dates constructed from the grounded datetimes\n\n for time in grounded_times:\n new_date = Date()\n if trusted['year']:\n new_date.year = time.year\n\n if trusted['month']:\n new_date.month = time.month\n\n if trusted['day']:\n new_date.day = time.day\n\n if trusted['hour']:\n new_date.hour = time.hour\n\n if trusted['minute']:\n new_date.minute = time.minute\n\n new_dates.append(new_date)\n\n if len(new_dates) == 0: # Nothing interesting found.\n return\n\n new_dates = sorted(new_dates, lambda x: x.precision(), reverse=True)\n best_date = new_dates[0]\n\n other_best_date = event2.get_best_time()\n if other_best_date is not None:\n if best_date.precision() > other_best_date.precision():\n event2.set_best_time(best_date)\n else:\n event2.set_best_time(best_date)", "def get_data_by_time(filename):\n with open(filename, 'r') as f_in:\n # set up csv reader object\n reader = csv.DictReader(f_in)\n result = {}\n result['n_week'] = [0] * 7\n result['d_week'] = [0] * 7\n result['cus_hour'] = [0] * 24\n result['sub_hour'] = [0] * 24\n for data in reader:\n duration = float(data['duration'])\n if data['day_of_week'] == 'Sunday':\n result['n_week'][0] += 1\n result['d_week'][0] += duration\n elif data['day_of_week'] == 'Monday':\n result['n_week'][1] += 1\n result['d_week'][1] += duration\n elif data['day_of_week'] == 'Tuesday':\n result['n_week'][2] += 1\n result['d_week'][2] += duration\n elif data['day_of_week'] == 'Wednesday':\n result['n_week'][3] += 1\n result['d_week'][3] += duration\n elif data['day_of_week'] == 'Thursday':\n result['n_week'][4] += 1\n result['d_week'][4] += duration\n elif data['day_of_week'] == 'Friday':\n result['n_week'][5] += 1\n result['d_week'][5] += duration\n else:\n result['n_week'][6] += 1\n result['d_week'][6] += duration\n\n hour = int(data['hour'])\n if data['user_type'] == 'Customer':\n result['cus_hour'][hour] += 1\n else:\n result['sub_hour'][hour] += 1\n return result", "def merge_logfiles(log1, log2):\n first_in_2 = log2['time'][0]\n keep_from_1 = log1['time'] < first_in_2\n for key in log1.keys():\n log1[key] = log1[key][keep_from_1]\n log1.timeseries_append(log2)\n return log1", "def test_aggregate_times(self):\n f1 = DarshanIngestedJobFile(\"job\")\n f2 = DarshanIngestedJobFile(\"job\")\n f1.read_time_start = 123456\n f2.read_time_start = 456789\n f1.read_time_end = 444\n f2.read_time_end = 555\n f1.write_time_start = 222\n f2.write_time_start = 111\n f1.write_time_end = 666\n f2.write_time_end = 777\n f1.aggregate(f2)\n self.assertEqual(f1.read_time_start, 123456)\n self.assertEqual(f1.read_time_end, 555)\n self.assertEqual(f1.write_time_start, 111)\n self.assertEqual(f1.write_time_end, 777)\n\n f1 = DarshanIngestedJobFile(\"job\")\n f2 = DarshanIngestedJobFile(\"job\")\n f1.read_time_start = 456789\n f2.read_time_start = 123456\n f1.read_time_end = 555\n f2.read_time_end = 444\n f1.write_time_start = 111\n f2.write_time_start = 222\n f1.write_time_end = 777\n f2.write_time_end = 666\n f1.aggregate(f2)\n self.assertEqual(f1.read_time_start, 123456)\n self.assertEqual(f1.read_time_end, 555)\n self.assertEqual(f1.write_time_start, 111)\n self.assertEqual(f1.write_time_end, 777)\n\n # One equals None\n f1 = DarshanIngestedJobFile(\"job\")\n f2 = DarshanIngestedJobFile(\"job\")\n f1.read_time_start = 123456\n f2.read_time_start = None\n f1.read_time_end = 555\n f2.read_time_end = None\n f1.write_time_start = None\n f2.write_time_start = 111\n f1.write_time_end = None\n f2.write_time_end = 666\n f1.aggregate(f2)\n self.assertEqual(f1.read_time_start, 123456)\n self.assertEqual(f1.read_time_end, 555)\n self.assertEqual(f1.write_time_start, 111)\n self.assertEqual(f1.write_time_end, 666)\n\n # The other equals None\n f1 = DarshanIngestedJobFile(\"job\")\n f2 = DarshanIngestedJobFile(\"job\")\n f1.read_time_start = None\n f2.read_time_start = 456789\n f1.read_time_end = None\n f2.read_time_end = 444\n f1.write_time_start = 222\n f2.write_time_start = None\n f1.write_time_end = 777\n f2.write_time_end = None\n f1.aggregate(f2)\n self.assertEqual(f1.read_time_start, 456789)\n self.assertEqual(f1.read_time_end, 444)\n self.assertEqual(f1.write_time_start, 222)\n self.assertEqual(f1.write_time_end, 777)\n\n # Both equal None\n f1 = DarshanIngestedJobFile(\"job\")\n f2 = DarshanIngestedJobFile(\"job\")\n f1.read_time_start = None\n f2.read_time_start = None\n f1.read_time_end = None\n f2.read_time_end = None\n f1.write_time_start = None\n f2.write_time_start = None\n f1.write_time_end = None\n f2.write_time_end = None\n f1.aggregate(f2)\n self.assertEqual(f1.read_time_start, None)\n self.assertEqual(f1.read_time_end, None)\n self.assertEqual(f1.write_time_start, None)\n self.assertEqual(f1.write_time_end, None)", "def test_ingest_with_datetime():\n schema = pa.schema([\n pa.field(\"foo\", pa.int64()),\n pa.field(\"bar\", pa.int64()),\n pa.field(\"baz\", pa.timestamp(\"ns\"))\n ])\n\n data = [{\"foo\": 1, \"bar\": 2, \"baz\": \"2018-01-01 01:02:03\"}, {\"foo\": 10, \"bar\": 20, \"baz\": \"2018-01-02 01:02:03\"}]\n\n converted_data = client.ingest_data(data, schema)\n timestamp_values = [pd.to_datetime(\"2018-01-01 01:02:03\"), pd.to_datetime(\"2018-01-02 01:02:03\")]\n assert converted_data.to_pydict() == {'foo': [1, 10], 'bar': [2, 20], 'baz': timestamp_values}", "def get_metrics(self, from_time=None, to_time=None, metrics=None, view=None):\n return self._get_resource_root().get_metrics(self._path() + '/metrics',\n from_time, to_time, metrics, view)", "def get_timepix_data_object(evt, src):\n o = evt.get(_psana.Timepix.DataV2, src)\n if o is not None: return o\n\n o = evt.get(_psana.Timepix.DataV1, src)\n if o is not None: return o\n\n return None", "def put(self, metric, values, timestamp=None):\n if timestamp is None:\n timestamp = time.time()\n now_date = datetime.datetime.fromtimestamp(timestamp)\n\n if self.last is None:\n self.last = timestamp\n return\n\n self.last = timestamp\n\n values = [str(d) for d in [now_date, timestamp]+values]\n\n with open(self.filename, \"at\") as df:\n df.write(\"{}\\n\".format(\",\".join(values)))", "def data_to_time_series(sorted_data):\n curr_char = None\n curr_geo = None\n results = []\n next_series = {}\n for obj in sorted_data:\n new_char = obj['characteristic.id'] != curr_char\n new_geo = obj['geography.id'] != curr_geo\n if new_char or new_geo:\n if curr_char is not None and curr_geo is not None:\n results.append(next_series)\n next_series = {\n 'characteristic.id': obj.pop('characteristic.id'),\n 'characteristic.label.id':\n obj.pop('characteristic.label.id'),\n 'geography.id': obj.pop('geography.id'),\n 'geography.label.id': obj.pop('geography.label.id'),\n 'country.id': obj.pop('country.id'),\n 'country.label.id': obj.pop('country.label.id'),\n 'values': [\n {\n 'survey.id': obj.pop('survey.id'),\n 'survey.label.id': obj.pop('survey.label.id'),\n 'survey.date': obj.pop('survey.date'),\n 'value': obj.pop('value'),\n }\n ]\n }\n curr_char = next_series['characteristic.id']\n curr_geo = next_series['geography.id']\n else:\n next_series['values'].append({\n 'survey.id': obj.pop('survey.id'),\n 'survey.label.id': obj.pop('survey.label.id'),\n 'survey.date': obj.pop('survey.date'),\n 'value': obj.pop('value'),\n })\n if next_series:\n results.append(next_series)\n return results", "def add_metrics_point(self, data_points: Dict[str, float], timestamp: float):\n for name, value in data_points.items():\n # Using in-sort to insert while maintaining sorted ordering.\n bisect.insort(a=self.data[name], x=TimeStampedValue(timestamp, value))", "def summarise_data(trip_in, station_data, trip_out):\n # generate dictionary of station - city mapping\n station_map = create_station_mapping(station_data)\n \n with open(trip_out, 'w') as f_out:\n # set up csv writer object \n out_colnames = ['duration', 'start_date', 'start_year',\n 'start_month', 'start_hour', 'weekday',\n 'start_city', 'end_city', 'subscription_type'] \n trip_writer = csv.DictWriter(f_out, fieldnames = out_colnames)\n trip_writer.writeheader()\n \n for data_file in trip_in:\n with open(data_file, 'r') as f_in:\n # set up csv reader object\n trip_reader = csv.DictReader(f_in)\n\n # collect data from and process each row\n for row in trip_reader:\n new_point = {}\n \n # convert duration units from seconds to minutes\n ### Question 3a: Add a mathematical operation below ###\n ### to convert durations from seconds to minutes. ###\n new_point['duration'] = float(row['Duration'])/60\n \n # reformat datestrings into multiple columns\n ### Question 3b: Fill in the blanks below to generate ###\n ### the expected time values. ###\n trip_date = datetime.strptime(row['Start Date'], '%m/%d/%Y %H:%M')\n new_point['start_date'] = trip_date.strftime('%Y-%m-%d')\n new_point['start_year'] = trip_date.strftime('%Y') # or : trip_date.year\n new_point['start_month'] = trip_date.strftime('%m') # or : trip_date.month\n new_point['start_hour'] = trip_date.strftime('%H') # or : trip_date.hour\n new_point['weekday'] = trip_date.strftime('%a') # or : trip_date.weekday() OR trip_date.isoweekday()\n\n \n # remap start and end terminal with start and end city\n new_point['start_city'] = station_map[row['Start Terminal']]\n new_point['end_city'] = station_map[row['End Terminal']]\n # two different column names for subscribers depending on file\n if 'Subscription Type' in row:\n new_point['subscription_type'] = row['Subscription Type']\n else:\n new_point['subscription_type'] = row['Subscriber Type']\n\n # write the processed information to the output file.\n trip_writer.writerow(new_point)", "def merge(self, other):\n self._mergeKeys(other)\n self._binaryOperationCheck(other)\n for id in self.clock.keys():\n print id\n self.clock[id] = max(self.clock[id], other.clock[id])", "def _write_to_dataset(parser1, parser2, dset, rundate):\n\n data_all1 = parser1.as_dict()\n data_all2 = parser2.as_dict()\n if parser1.file_path == parser2.file_path:\n collection = [data_all1]\n else:\n collection = [data_all1, data_all2]\n\n # Meta information\n dset.meta[\"tech\"] = \"slr\"\n dset.meta.add(\"file\", parser1.file_path.stem, section=\"input\")\n dset.meta.add(\"file\", parser2.file_path.stem, section=\"input\")\n dset.meta.add(\"type\", config.tech.obs_format.str.upper(), section=\"input\")\n\n # Make new dict \"obs_data\" containing only data in relevant time interval:\n arc_length = config.tech.arc_length.float\n rundate_datetime = datetime(rundate.year, rundate.month, rundate.day)\n obs_data = dict()\n for data_all in collection:\n for i, x in enumerate(data_all[\"meta\"][\"obs_time\"]):\n if rundate_datetime <= x < rundate_datetime + timedelta(days=arc_length):\n for key in (\"meta\", \"obs\", \"obs_str\"):\n for field, val in data_all[key].items():\n obs_data.setdefault(key, dict()).setdefault(field, list()).append(val[i])\n\n data_all.pop(\"meta\")\n data_all.pop(\"obs\")\n data_all.pop(\"obs_str\")\n\n for key in data_all.keys():\n if key.startswith(\"met_\"):\n for key2, val in data_all[key].items():\n obs_data.setdefault(key, dict()).setdefault(key2, list()).append(val)\n elif key.startswith(\"satellite_\"):\n # TODO: Use this information in the future?\n continue\n elif key.startswith(\"station_\"):\n # TODO: Use this information in the future?\n continue\n else:\n log.fatal(f\"Unknown data type{key}\")\n\n obs_date = obs_data[\"meta\"][\"obs_date\"]\n time = [obs_date[i] + timedelta(seconds=obs_data[\"meta\"][\"obs_sec\"][i]) for i in range(0, len(obs_date))]\n dset.num_obs = len(obs_data[\"meta\"][\"obs_time\"])\n dset.add_time(\"time\", val=time, scale=\"utc\", fmt=\"datetime\")\n dset.add_text(val=obs_data[\"meta\"][\"station\"], name=\"station\")\n dset.add_text(val=obs_data[\"meta\"][\"satellite\"], name=\"satellite\")\n dset.add_float(val=obs_data[\"meta\"][\"bin_rms\"], unit=\"picoseconds\", name=\"bin_rms\")\n # Positions\n trf = apriori.get(\"trf\", time=dset.time)\n for station in dset.unique(\"station\"):\n trf_site = trf[station]\n station_pos = trf_site.pos.trs.val\n log.debug(f\"Station position for {station} ({trf_site.name}) is (x,y,z) = {station_pos.mean(axis=0)}\")\n domes = trf_site.meta[\"domes\"]\n obs_data[\"pos_\" + station] = station_pos\n obs_data[\"station-other_\" + station] = dict(domes=domes, cdp=station, site_id=station)\n dset.add_position(\n \"site_pos\",\n time=dset.time,\n system=\"trs\",\n val=np.array([obs_data[\"pos_\" + s][idx] for idx, s in enumerate(dset.station)]),\n )\n # Station data\n sta_fields = set().union(*[v.keys() for k, v in obs_data.items() if k.startswith(\"station_\")])\n for field in sta_fields:\n dset.add_float(field, val=np.array([float(obs_data[\"station_\" + s][field]) for s in dset.station]))\n sta_fields = set().union(*[v.keys() for k, v in obs_data.items() if k.startswith(\"station-other_\")])\n for field in sta_fields:\n dset.add_text(field, val=[obs_data[\"station-other_\" + s][field] for s in dset.station])\n\n # Station meta\n station_keys = sorted([k for k, v in obs_data.items() if k.startswith(\"station-other_\")])\n pos_keys = sorted([k for k, v in obs_data.items() if k.startswith(\"pos_\")])\n\n for sta_key, pos_key in zip(station_keys, pos_keys):\n sta_name = sta_key.replace(\"station-other_\", \"\")\n cdp = obs_data[sta_key][\"cdp\"]\n dset.meta.add(sta_name, \"site_id\", cdp)\n longitude, latitude, height, _ = sofa.iau_gc2gd(2, obs_data[pos_key][0, :]) # TODO: Reference ellipsoid\n dset.meta[\"station\"].setdefault(sta_name, {})[\"cdp\"] = cdp\n dset.meta[\"station\"].setdefault(sta_name, {})[\"site_id\"] = cdp\n dset.meta[\"station\"].setdefault(sta_name, {})[\"domes\"] = obs_data[sta_key][\"domes\"]\n dset.meta[\"station\"].setdefault(sta_name, {})[\"marker\"] = \" \"\n dset.meta[\"station\"].setdefault(sta_name, {})[\"description\"] = \" \"\n dset.meta[\"station\"].setdefault(sta_name, {})[\"longitude\"] = longitude\n dset.meta[\"station\"].setdefault(sta_name, {})[\"latitude\"] = latitude\n dset.meta[\"station\"].setdefault(sta_name, {})[\"height\"] = height\n\n # Satellite data\n sat_fields = set().union(*[v.keys() for k, v in obs_data.items() if k.startswith(\"satellite_\")])\n for field in sat_fields:\n dset.add_float(field, val=np.array([float(obs_data[\"satellite_\" + s][field]) for s in dset.satellite]))\n\n # Observations\n # In the dataset, obs_time is seconds since rundate:\n v = [\n (obs_data[\"meta\"][\"obs_date\"][i] - rundate_datetime).total_seconds() + obs_data[\"meta\"][\"obs_sec\"][i]\n for i in range(0, dset.num_obs)\n ]\n\n obs_data[\"obs\"].pop(\"obs_time\")\n dset.add_float(\"obs_time\", val=v)\n for field, values in obs_data[\"obs\"].items():\n dset.add_float(field, val=np.array(values))\n\n for field, values in obs_data[\"obs_str\"].items():\n dset.add_text(field, val=values)\n\n return obs_data", "def _get_time(self):\n # get the current time in UTC (make sure we are timezone aware)\n now_utc = datetime.datetime.now(pytz.UTC)\n \n # convert to our local timezone\n timenow = now_utc.astimezone(self.timezone)\n \n # save the data to our data\n self.data['year'][0] = timenow.year\n self.data['month'][0] = timenow.month\n self.data['day'][0] = timenow.day\n self.data['hour'][0] = timenow.hour\n self.data['minute'][0] = timenow.minute\n self.data['second'][0] = timenow.second\n \n return", "def get_data_at_time_indices(self, indices):\n if _is_iterable(indices):\n # Raise error if indices not sorted?\n index_list = list(sorted(indices))\n time_list = [self._time[i] for i in indices]\n data = {\n cuid: [values[idx] for idx in index_list]\n for cuid, values in self._data.items()\n }\n time_set = self._orig_time_set\n return TimeSeriesData(data, time_list, time_set=time_set)\n else:\n # indices is a scalar\n return ScalarData(\n {cuid: values[indices] for cuid, values in self._data.items()}\n )", "def get_metric(self, metric, existing_dict=None):\n metric_key, metric_type, metric_name, metric_help = metric\n metric_dict = {\n 'name': metric_name,\n 'type': metric_type,\n 'help': metric_help,\n 'values': OrderedDict()\n }\n values = self.r.hgetall(metric_key) # new values\n # print \"values: %r\" % values\n metric_dict['values'] = values\n\n if existing_dict:\n # we're updating a metric we've already seen\n # print \"existing dict: %r\" % existing_dict\n for value in values:\n # print \"checking value: %r\" % value\n # value = json.loads(value)\n if value in existing_dict['values']:\n if metric_type == 'counter' or metric_type == 'histogram':\n # Combine the values if it's a counter or histogram\n # TODO: sort histogram buckets\n # TODO: append _bucket to histogram bucket names\n existing_dict['values'][value] = float(\n values[value]) + float(\n existing_dict['values'][value])\n elif metric_type == 'gauge':\n # use the last value we see for a gauge - # TODO: is there a better way? we could average it\n existing_dict['values'][value] = float(values[value])\n else:\n existing_dict['values'][value] = float(values[value])\n metric_dict['values'] = existing_dict['values']\n\n if metric_type == 'histogram':\n # json decode all of the labels\n samples = [json.loads(x, object_pairs_hook=OrderedDict) for x in metric_dict['values']]\n # we need to sort the values by the bucket labeled \"le\"\n sorted_keys = sorted(samples, key=lambda b: b['le'])\n # and then we need to store the values again json encoded\n vals = metric_dict['values']\n metric_dict['values'] = OrderedDict()\n for k in sorted_keys:\n kn = json.dumps(k, sort_keys=True)\n metric_dict['values'][kn] = vals[kn]\n\n return metric_dict", "def _create_profile_from_xarray(self, time=None):\n # Select one time from the larger dataset\n if not isinstance(time, type(None)):\n \n # User specified the time to use\n if 'time' in self.data.dims:\n \n # Check if time exists in dataset\n if time <= self.data.dims['time']:\n # Store selected time in self.ds\n self.ds = self.data.isel(time=time)\n \n else:\n # Time selection is out of range\n error_message = '\\nWarning!!! Time is outside of range'\n error_message += ' of xarray Dataset!\\nUsing latest'\n error_message += ' time in Dataset instead.'\n print(error_message)\n self.ds = self.data.isel(time=-1)\n \n else:\n # Unfortunately, dataset does not have times to select\n error_message = '\\nWarning!!! Time is not a coordinate'\n error_message += ' of the Dataset! \\nUsing the whole'\n error_message += ' Dataset instead.\\n'\n print(error_message)\n self.ds = self.data\n \n else:\n # Use the first time-step in the dataset\n if 'time' in self.data.dims:\n self.ds = self.data.isel(time=0)\n else:\n self.ds = self.data\n \n # Update all the units in the dataset to match standard units\n # used in TAMOC\n xr_convert_units(self.ds, self.ztsp[0])\n \n # Insert the pressure data if missing by integrating the density\n if self.ztsp[-1] not in self.ds:\n # Extract the depth and temperature data\n zs = self.ds.coords[self.ztsp[0]].values\n Ts = self.ds[self.ztsp[1]].values\n Ss = self.ds[self.ztsp[2]].values\n fs_loc = np.min(np.where(zs == np.min(zs)))\n \n if fs_loc > 0:\n fs_loc = -1\n # Compute the pressure for this density profile\n Ps = compute_pressure(zs, Ts, Ss, fs_loc)\n \n # Insert the computed pressure into the dataset\n self.ds[self.ztsp[-1]] = ((self.ztsp[0]), Ps)\n self.ds[self.ztsp[-1]].attrs['units'] = 'Pa'\n self.ztsp_units[-1] = 'Pa'\n \n # Remove any data not requested by user\n keep_names = self.ztsp + self.chem_names\n for name in self.ds.data_vars:\n if name not in keep_names:\n self.ds = self.ds.drop_vars([name])\n \n # Add the unit labels passed to the initializer if they are not\n # already in the dataset\n xr_check_units(self.ds, self.ztsp, self.ztsp_units)\n xr_check_units(self.ds, self.chem_names, self.chem_units)\n \n # Coarsen the data for interpolation\n if self.err > 0.:\n self.interp_ds = xr_coarsen_dataset(self.ds, self.ztsp[0], \n self.err)\n else:\n self.interp_ds = self.ds\n \n # Stablize the interpolation profile\n if 'pressure' in self.interp_ds and self.stabilize_profile:\n self.interp_ds = xr_stabilize_dataset(self.interp_ds,\n self.ztsp[0], self.ztsp)\n \n # Store the boundaries of the z-coordinate\n zs = self.interp_ds[self.ztsp[0]].values\n self.z_min = np.min(zs)\n self.z_max = np.max(zs)\n \n # Make sure the Profile.ds attribute points to the interp_ds\n self.ds = self.interp_ds\n \n # Build a hard-wired interpolator for speed\n self._build_interpolator()" ]
[ "0.6252038", "0.5991166", "0.5987996", "0.59735656", "0.5910383", "0.5889225", "0.58206046", "0.5819968", "0.5817195", "0.56706285", "0.5599506", "0.55690825", "0.5536235", "0.54553676", "0.5344387", "0.532456", "0.5291916", "0.52320737", "0.52283955", "0.5221072", "0.5202319", "0.51057315", "0.51032513", "0.51024926", "0.5096357", "0.50709116", "0.5053796", "0.5052287", "0.5047473", "0.5038343", "0.5020621", "0.50201076", "0.5019306", "0.5013213", "0.50015885", "0.4998639", "0.4987343", "0.4963805", "0.49601233", "0.49601233", "0.49541336", "0.49449113", "0.49426648", "0.49360776", "0.49355093", "0.4923132", "0.49094272", "0.49090707", "0.49051598", "0.48998162", "0.48990497", "0.48867604", "0.48591548", "0.48543996", "0.48505887", "0.48429546", "0.48358884", "0.48345086", "0.48340324", "0.48306152", "0.48289147", "0.48213038", "0.48212206", "0.48197323", "0.4812262", "0.4807759", "0.48070136", "0.47998378", "0.47983116", "0.47976512", "0.4780106", "0.4780106", "0.47710678", "0.47693458", "0.4764216", "0.4756323", "0.47448045", "0.4737494", "0.4735266", "0.4734199", "0.4731462", "0.4727344", "0.4713068", "0.47086775", "0.4695694", "0.46928918", "0.4691713", "0.46824408", "0.46756327", "0.46714", "0.4671115", "0.4669567", "0.46613064", "0.46565154", "0.46502957", "0.4639842", "0.46350425", "0.46338725", "0.46258548", "0.4622624" ]
0.69977385
0
Record a single value metric, merging the data with any data from prior value metrics with the same name.
def record_custom_metric(self, name, value): if isinstance(value, dict): if len(value) == 1 and 'count' in value: new_stats = CountStats(call_count=value['count']) else: new_stats = TimeStats(*c2t(**value)) else: new_stats = TimeStats(1, value, value, value, value, value**2) stats = self.__stats_table.get(name) if stats is None: self.__stats_table[name] = new_stats else: stats.merge_stats(new_stats)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log(self, metric_name: str, value: float) -> None:\n if metric_name in self.metrics:\n self.metrics[metric_name].append(value)\n else:\n self.metrics[metric_name] = [value]", "def log_metric(self, name: str, value):\n self.metrics[name] = value\n\n self._sync_log_event()", "def write_metric(self, metric_name: str, metric_value: Union[float, int]):\n self._metrics.append(Metric(metric_name, metric_value))", "def record_custom_metric(self, name, value):\n key = (name, '')\n\n if isinstance(value, dict):\n if len(value) == 1 and 'count' in value:\n new_stats = CountStats(call_count=value['count'])\n else:\n new_stats = TimeStats(*c2t(**value))\n else:\n new_stats = TimeStats(1, value, value, value, value, value**2)\n\n stats = self.__stats_table.get(key)\n if stats is None:\n self.__stats_table[key] = new_stats\n else:\n stats.merge_stats(new_stats)\n\n return key", "def record_gauge(self, name, value, tags=None):\n identity = self.create_identity(name, tags)\n with self._lock:\n self._batch[identity] = value\n self._timestamps[identity] = int(time.time() * 1000.0)", "def add_metric(self, metric_name: str, metric_val: typing.Any):\n self.add_metrics({metric_name: metric_val})", "def record_summary(self, name, value, tags=None):\n identity = self.create_identity(name, tags, \"summary\")\n with self._lock:\n if identity in self._batch:\n merged_value = self._batch[identity]\n merged_value[\"count\"] += 1\n merged_value[\"sum\"] += value\n merged_value[\"min\"] = min(value, merged_value[\"min\"])\n merged_value[\"max\"] = max(value, merged_value[\"max\"])\n else:\n value = {\"count\": 1, \"sum\": value, \"min\": value, \"max\": value}\n self._batch[identity] = value", "def log_metric(self, name, val):\n raise NotImplementedError", "def update_metric(self, metric, value):\n if self.is_number(value):\n self.logger.debug(\"Collected raw metric: %s = %s\" % (metric, value))\n self.raw_metrics[metric] = value", "def __push_metric(self, metric_name, value, timestamp):\n sock = self.__get_carbon_socket()\n _data = \"%s %d %d\\n\" % (metric_name, value, timestamp)\n LOGGER.debug(\"SEND: %s\", _data.replace(\"\\n\", \"\"))\n sock.send(_data.encode('utf-8'))", "def metric_recorded(self, record):\n if record.name in self.headers and self.current_row is not None:\n if record.name == \"learning_rate\" and not record.is_scalar:\n # record is a list of scalars\n value = \",\".join([f\"{lr:.4f}\" for lr in record.value])\n elif record.is_scalar and isinstance(record.value, int):\n value = str(record.value)\n else:\n assert record.is_scalar\n\n value = f\"{record.value:.4f}\"\n\n self.current_row[record.name] = value", "def dispatch_value(metric, value, type):\n log_verbose('Sending metric: %s=%s as type %s' % (metric, value,type))\n\n val = collectd.Values(plugin='redis_metrics')\n val.type = type\n val.type_instance = metric\n val.values = [value]\n val.dispatch()", "def log_metric(self, name, val, step):\n raise NotImplementedError", "def sum(self, key, value):\n self._metrics[key] += value", "def log_metric(key, value, step=None):\n mlflow.log_metric(key, value, step=step)", "def add_metric(self, metric_name, metric_value, login=False):\n if login:\n self._gc.login()\n\n try: \n if metric_name not in self._metric_dict:\n metric_index = len(self._metric_dict) + 2\n self._wks.update_cell(1, metric_index, metric_name)\n self._metric_dict[metric_name] = metric_index\n self.save_config()\n\n self._wks.update_cell(self.row_index, self._metric_dict[metric_name], metric_value)\n except Exception as ins:\n if not login:\n self.add_metric(metric_name, metric_value, login=True)\n else:\n return '\\n'.join([str(type(ins)), str(ins.args), str(ins)])\n return None", "def record_metrics(self, pid, metrics):\n\n for _metric, _metric_value in metrics.items():\n if not self.__metrics_history[pid].get(_metric):\n self.__metrics_history[pid][_metric] = []\n self.__metrics_history[pid][_metric].append(_metric_value)\n # only keep the last 2 running history for any metric\n self.__metrics_history[pid][_metric] = self.__metrics_history[pid][_metric][\n -2:\n ]", "def record_custom_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for name, value in metrics:\n self.record_custom_metric(name, value)", "def _record_storage_event(metric, value=0):\n command_name = properties.VALUES.metrics.command_name.Get()\n metrics.CustomKeyValue(command_name, 'Storage-' + metric, value)", "def _log_op_value(self, name: str, value: Any) -> None:\n summary_op, placeholder = self._get_log_op(name)\n sess = tf.get_default_session()\n result = sess.run(summary_op, {placeholder: value})\n self.summary_writer.add_summary(result, self.batches_seen)", "def put(self, metric, values, timestamp=None):\n if timestamp is None:\n timestamp = time.time()\n now_date = datetime.datetime.fromtimestamp(timestamp)\n\n if self.last is None:\n self.last = timestamp\n return\n\n self.last = timestamp\n\n values = [str(d) for d in [now_date, timestamp]+values]\n\n with open(self.filename, \"at\") as df:\n df.write(\"{}\\n\".format(\",\".join(values)))", "def publish_metric(name, value, type):\n t = time.time()\n m = json.dumps({'monitor':name, type:value, 'time':t})\n r = redis.StrictRedis(host='localhost', port=6379, db=0) \n r.lpush('sensor_readings',m)", "def save_metric(key, value, timestamp=None):\n\n from analytics_client.settings import _ANALYTICS_ENABLED\n\n if not _ANALYTICS_ENABLED:\n return None\n\n from analytics_client.tasks import store_metric\n\n # Set a timestamp if it is undefined\n _timestamp = timestamp\n if _timestamp is None:\n _timestamp = datetime.now()\n\n store_metric.delay(Metric(key=key, value=value, timestamp=_timestamp))", "def _AddMetric(self, metric):\n machine = metric.machine_id\n time = metric.timestamp\n payload = DotDict(json.loads(metric.payload)).flatten()\n\n self.machines.add(machine)\n self.timestamps.add(time)\n for k in payload:\n if k not in self.counter_data:\n continue\n val = payload.get(k, None)\n if val is not None:\n self.counter_data[k].AddSample(machine, time, val)", "def log_other(self, name: str, value):\n self._other_metadata[name] = value\n\n self._sync_log_event()", "def add_metric(self, metric: str):\n if metric not in self.metrics:\n self.metrics[metric] = self.creator.create_metric(metric)", "def _record(self, metric_point: MetricPoint,\n measurement_map: MeasurementMap):\n metric_name = metric_point.metric_name\n tags = metric_point.tags\n\n metric = self._registry.get(metric_name)\n # Metrics should be always registered dynamically.\n assert metric\n\n tag_map = tag_map_module.TagMap()\n for key, value in tags.items():\n tag_key = tag_key_module.TagKey(key)\n tag_value = tag_value_module.TagValue(value)\n tag_map.insert(tag_key, tag_value)\n\n metric_value = metric_point.value\n measurement_map.measure_float_put(metric.measure, metric_value)\n # NOTE: When we record this metric, timestamp will be renewed.\n measurement_map.record(tag_map)", "def add_metric(self, metric):\n self.metrics.append(metric)\n self.estimate()", "def log(self, metric, value, source, timestamp=None):\n if timestamp is None:\n timestamp = datetime.now()\n\n sql = \"insert into measurement(metric, value, source, timestamp) values('{0}', {1}, '{2}', '{3}');\".format(\n metric, value, source, timestamp)\n\n self._execute_sql(sql)", "def get_metric(self, metric, existing_dict=None):\n metric_key, metric_type, metric_name, metric_help = metric\n metric_dict = {\n 'name': metric_name,\n 'type': metric_type,\n 'help': metric_help,\n 'values': OrderedDict()\n }\n values = self.r.hgetall(metric_key) # new values\n # print \"values: %r\" % values\n metric_dict['values'] = values\n\n if existing_dict:\n # we're updating a metric we've already seen\n # print \"existing dict: %r\" % existing_dict\n for value in values:\n # print \"checking value: %r\" % value\n # value = json.loads(value)\n if value in existing_dict['values']:\n if metric_type == 'counter' or metric_type == 'histogram':\n # Combine the values if it's a counter or histogram\n # TODO: sort histogram buckets\n # TODO: append _bucket to histogram bucket names\n existing_dict['values'][value] = float(\n values[value]) + float(\n existing_dict['values'][value])\n elif metric_type == 'gauge':\n # use the last value we see for a gauge - # TODO: is there a better way? we could average it\n existing_dict['values'][value] = float(values[value])\n else:\n existing_dict['values'][value] = float(values[value])\n metric_dict['values'] = existing_dict['values']\n\n if metric_type == 'histogram':\n # json decode all of the labels\n samples = [json.loads(x, object_pairs_hook=OrderedDict) for x in metric_dict['values']]\n # we need to sort the values by the bucket labeled \"le\"\n sorted_keys = sorted(samples, key=lambda b: b['le'])\n # and then we need to store the values again json encoded\n vals = metric_dict['values']\n metric_dict['values'] = OrderedDict()\n for k in sorted_keys:\n kn = json.dumps(k, sort_keys=True)\n metric_dict['values'][kn] = vals[kn]\n\n return metric_dict", "def _update_metric(\n metrics: List[mlflow.entities.Metric], dataset: MetricsDict = {}\n ) -> MetricsDict:\n for metric in metrics:\n metric_dict = {\"step\": metric.step, \"value\": metric.value}\n if metric.key in dataset:\n if isinstance(dataset[metric.key], list):\n dataset[metric.key].append(metric_dict)\n else:\n dataset[metric.key] = [dataset[metric.key], metric_dict]\n else:\n dataset[metric.key] = metric_dict\n return dataset", "def log_metadata(self, label, value):\n self.__metadata[label].append(value)", "def add_metric(self, metric, *, name=None, **kwargs):\n if name is None:\n name = metric.__name__\n\n self.metrics.append((metric, name, kwargs))", "def add_metric(self, metric, *, name=None, **kwargs):\n if name is None:\n name = metric.__name__\n\n self.metrics.append((metric, name, kwargs))", "def log_metric(name, values, tags={}):\n value_list = []\n for key in sorted(values.keys()):\n value = values[key]\n value_list.append(f\"{key}:{value:7.3f}\")\n values = \", \".join(value_list)\n tag_list = []\n for key, tag in tags.items():\n tag_list.append(f\"{key}:{tag}\")\n tags = \", \".join(tag_list)\n print(\"{name:30s} - {values} ({tags})\".format(name=name, values=values, tags=tags))", "def save_scalar(step, name, value, writer):\n summary = tf.Summary()\n summary_value = summary.value.add()\n summary_value.simple_value = float(value)\n summary_value.tag = name\n writer.add_summary(summary, step)", "def add_metric(self, metric_class, namespace, name, value=1.0, tags=None, interval=None):\n # type: (Type[Metric], str, str, float, MetricTagType, Optional[float]) -> None\n metric_id = Metric.get_id(name, namespace, tags, metric_class.metric_type)\n if metric_class is DistributionMetric:\n metrics_type_payload = TELEMETRY_TYPE_DISTRIBUTION\n else:\n metrics_type_payload = TELEMETRY_TYPE_GENERATE_METRICS\n\n with self._lock:\n existing_metric = self._metrics_data[metrics_type_payload][namespace].get(metric_id)\n if existing_metric:\n existing_metric.add_point(value)\n else:\n new_metric = metric_class(namespace, name, tags=tags, common=True, interval=interval)\n new_metric.add_point(value)\n self._metrics_data[metrics_type_payload][namespace][metric_id] = new_metric", "def add(self, val):\n key = self.get_key(val)\n self.store.add(key)\n\n # Keep track of summary stats\n self._count += 1\n self._sum += val\n if val < self._min:\n self._min = val\n if val > self._max:\n self._max = val", "def writeSummary(self, value, tag, summaryWriter, global_step):\n\n summary = tf.Summary()\n summary.value.add(tag=tag, simple_value=value)\n summaryWriter.add_summary(summary, global_step)", "def record_count(self, name, value, tags=None):\n identity = self.create_identity(name, tags, \"count\")\n with self._lock:\n self._batch[identity] = self._batch.get(identity, 0) + value", "def record_apdex_metric(self, metric):\n\n if not self.__settings:\n return\n\n # Note that because we are using a scope here of an empty string\n # we can potentially clash with an unscoped metric. Using None,\n # although it may help to keep them separate in the agent will\n # not make a difference to the data collector which treats None\n # as an empty string anyway.\n\n key = (metric.name, '')\n stats = self.__stats_table.get(key)\n if stats is None:\n stats = ApdexStats(apdex_t=metric.apdex_t)\n self.__stats_table[key] = stats\n stats.merge_apdex_metric(metric)\n\n return key", "def merge_custom_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for name, other in metrics:\n key = (name, '')\n stats = self.__stats_table.get(key)\n if not stats:\n self.__stats_table[key] = other\n else:\n stats.merge_stats(other)", "def update(self, current_iter, *metrics, **named_metrics):\n\n # Same order as __init__() in python>=3.6\n if len(metrics) > 0:\n for key, metric in zip(self.metrics.keys(), metrics):\n self.metrics[key].append((current_iter, metric))\n \n # Random order with names\n elif len(named_metrics) > 0:\n for name, metric in named_metrics.item():\n self.metrics[name].append((metric))\n\n else:\n raise ValueError(\"No valid value to update losses\")", "def counter(self, metric_name, value=1):\n if self._send_sampled_event():\n counter = \"%s%s:%d|c|@%s\" % (self.metric_name_prepend, metric_name,\n value, self.statsd_sample_rate)\n self._send_events([counter])", "def log_scalar(name, value, step, autolog):\n if not autolog:\n mlflow.log_metric(name, value)", "def set_measured_value(self, value_sig1, value_sig2):\n self.entry_measured_value_sig1.set(value_sig1)\n self.entry_measured_value_sig2.set(value_sig2)", "def add_key_value(self, key, value):\n key = self._metadata_map().get(key, key)\n if key in ['dateAdded', 'lastModified']:\n self._data[key] = self.util.any_to_datetime(value).strftime('%Y-%m-%dT%H:%M:%SZ')\n elif key == 'confidence':\n self._data[key] = int(value)\n elif key == 'rating':\n self._data[key] = float(value)\n elif key == 'unique_id':\n self._unique_id = quote(self.fully_decode_uri(value), safe='')\n else:\n self._data[key] = value", "def log_scalar(self, tag, value, step):\n\n summary = tf.Summary(\n value=[tf.Summary.Value(tag=tag, simple_value=value)])\n self.writer.add_summary(summary, step)\n self.writer.flush()", "def _append_value(self, stream, value):\n if FLAGS.timestamp:\n x_val = float(time.time())\n stream['x'].append(x_val)\n\n y_val = float(value)\n stream['y'].append(y_val)", "def _add_to_queue(key, value, step, time, run_id):\n met = Metric(key=key, value=value, timestamp=time, step=step)\n _metric_queue.append((run_id, met))\n if len(_metric_queue) > _MAX_METRIC_QUEUE_SIZE:\n _thread_pool.submit(_flush_queue)", "def sendMeasurement(self, metric, value, source, timestamp=None):\n sys.stdout.write('{0} {1} {2} {3}\\n'.format(metric, value, source, timestamp).decode('utf-8'))\n sys.stdout.flush()", "def append_to_list(self, metric_value_to_append):\n if type(metric_value_to_append)==MetricValue:\n self.__metric_value_list.append(metric_value_to_append)\n else:\n print(\"appended object must be a MetricValue, metric_value_to_append=\",metric_value_to_append)\n sys.exit() # stop entire program, because metric_value_to_append MUST be correct", "def set_aggregate_data(self, event_name, value, key=None):\n \n raise NotImplementedError()", "def update(self, step, metrics):\n self.steps_arr.append(step)\n for key, val in metrics.items():\n if isinstance(val, tf.Tensor):\n try:\n self.data[key].append(val.numpy())\n except KeyError:\n self.data[key] = [val.numpy()]", "def increment(self) -> None:\n self._increment_called = True\n self.append(deepcopy(self._base_metric))", "def push(self, value):\n self.values.append((time.time(), value))", "def log_metric(data_category, key, value):\n # always, just print in logs\n log(logging.INFO, data_category, \"AML Metric({}={})\".format(key, value))\n if data_category == DataCategory.ONLY_PUBLIC_DATA:\n # if public, ask azureml to record (if azureml attached)\n run = AmlRunWrapper()\n run.setup(attach=True)\n run.log(key, value)\n run.flush()", "def average(self, key, value):\n self._average_metrics[key] += value\n self._average_metrics_count[key] += 1", "def record_sensor_readings(data_dict, metric_list, output_file_name):\n sensor_vals = []\n # going though metric_list to keep order consistent\n for metric in metric_list:\n if metric in data_dict:\n sensor_vals.append(str(data_dict[metric]))\n else:\n # value not recorded properly\n sensor_vals.append(\"null\")\n vals = \",\".join(sensor_vals)\n\n # write to file\n # TODO: keep file open for duration of the drive to avoid re-opening it at each iteration\n with open(output_file_name, 'a') as file:\n file.write(vals+\"\\n\")", "def save_scalars(self, step, metrics):\n\n # Save\n with self.summary_writer.as_default():\n for name, value in metrics.items():\n tf.summary.scalar(name, value, step=step)", "def save_data(self, gauge_name, date_key, data):\n pass", "def append_to(self, key, value):\n self.get_data()[key].append(value.get_data())", "def send(self, name, value, dimensions=None, sample_rate=1):\n\n self._connection.report(metric=self.update_name(name),\n metric_type='s',\n value=value,\n dimensions=self.update_dimensions(dimensions),\n sample_rate=sample_rate)", "def gauge(self, gauge, value):\n if self.ignore_metrics:\n return\n\n with self._gauge_rlock:\n self._gauge_metrics[gauge] = value\n self._gauge_call_count += 1\n\n old_call_time = self._gauge_last_call_time\n self._gauge_last_call_time = arrow.utcnow().timestamp\n if (self._gauge_call_count == self._max_call_count > 0) or \\\n self._gauge_last_call_time - old_call_time > self._max_time_between_calls > 0:\n self._gauge_call_count = 0\n self.update_gauge()", "def __add_one_day_values__(self):\n values = self.values()\n for value in values:\n ls = []\n if value.label in self.values_dict:\n ls = self.values_dict[value.label]\n ls.append(value)\n else:\n ls = [value]\n self.values_dict[value.label] = ls", "def set_gauge_value(self, name: str, value: float | None, delta: bool, tags: Attributes):\n key: str = _generate_key_name(name, tags)\n new_value = value or DEFAULT_GAUGE_VALUE\n old_value = self.poke_gauge(name, tags)\n if delta:\n new_value += old_value\n # If delta is true, add the new value to the last reading otherwise overwrite it.\n self.map[key] = Observation(new_value, tags)", "def add_metrics(self, metrics):\n for i, metric in enumerate(self.config.metrics):\n tf.summary.scalar(metric, metrics[i])", "def send_metrics(self):\n metrics = self.get_metrics()\n if not metrics:\n return\n\n for mkey, metric in metrics.items():\n for mname, mval in metric.items():\n try:\n self.agent.record_custom_metric(self.convert_metric_name(mkey, mname), mval, None)\n except Exception as e:\n print_(e)", "def sensorValue(self, key=None, value=None, timestamp=None):\n\n try:\n self.logger.debug('logging trigger value for {0} with value {1}'.format(key, value))\n Sensor.get_by_id(key).add_value(value)\n except Exception as e:\n self.logger.warn('Something went wrong registering trigger value for {0}: {1}'.format(key, e))\n else:\n # lauch trigger checks\n self.logger.debug('posting sensordata to trigger processor')\n self.triggerqueue.put((\"sensor\", key, value))", "def add_value(self, value):\r\n self.resource_records.append(value)", "def mark_point(\n metric: str,\n value: float,\n result: Literal[\"SUM\", \"AVG\"] = \"SUM\",\n timestamp: Optional[float] = None,\n):\n now = int(time.time())\n current_minute_tstamp = timestamp or (now - (now % 60))\n key_name = f\"{Monitoring.ACC_PREFIX}_{current_minute_tstamp}_{metric}\"\n prefix = [\n metric,\n result,\n \"FLOAT\" if isinstance(value, float) else \"INT\",\n ]\n\n # create key and set expiry\n redis_client.set(key_name, \"|\".join(prefix), ex=120, nx=True)\n redis_client.append(key_name, f\"|{value}\")", "def _submit_gauge(self, metric_name, val, metric, custom_tags=None, hostname=None):\n _tags = self._metric_tags(metric_name, val, metric, custom_tags, hostname)\n self.check.gauge('{}.{}'.format(self.NAMESPACE, metric_name), val, _tags, hostname=hostname)", "def state_metric(self, key: str, value: str, dimensions: Dict[str, str] = None):\n self._results_builder.add_absolute_result(PluginStateMetric(key=key, value=value, dimensions=dimensions,\n entity_selector=self.selector))", "def add_stats(self):\n units = self.get_unit_map()\n for metric in self.raw_metrics:\n unit, metric_type = units.get(metric, (DEFAULT_UNIT, DEFAULT_TYPE))\n if metric_type == \"counter\":\n # Unit/Second\n unit = \"/\".join((unit, \"Second\"))\n self.add_derive_value(metric, unit, self.raw_metrics[metric], rate=True)\n else:\n self.add_gauge_value(metric, unit, self.raw_metrics[metric])", "def __setitem__(self, key, val):\n extract = lambda t: t.item() if type(t) is torch.Tensor else t\n\n if type(val) is dict:\n for k, v in val.items():\n self.log_scalar(k, extract(v), 'last')\n else:\n self.log_scalar(key, extract(val), 'last')", "def write_summary(value, tag, summary_writer, global_step):\n summary = tf.Summary()\n summary.value.add(tag=tag, simple_value=value)\n summary_writer.add_summary(summary, global_step)", "def lambda_metric(metric_name, value, timestamp=None, tags=None):\n tags = _tag_dd_lambda_layer(tags)\n if os.environ.get(\"DD_FLUSH_TO_LOG\", \"\").lower() == \"true\":\n logger.debug(\"Sending metric %s to Datadog via log forwarder\", metric_name)\n print(\n json.dumps(\n {\n \"m\": metric_name,\n \"v\": value,\n \"e\": timestamp or int(time.time()),\n \"t\": tags,\n }\n )\n )\n else:\n logger.debug(\"Sending metric %s to Datadog via lambda layer\", metric_name)\n lambda_stats.distribution(metric_name, value, timestamp=timestamp, tags=tags)", "def __write_value(self, group: h5py.Group, name: str, value: np.ndarray):\n try:\n normalized = normalize_attr_values(value)\n except Exception as ex:\n raise ValueError(f'Could normalize {type(value)}(key \"{name}\")') from ex\n\n if np.isscalar(normalized) or normalized.dtype == np.object_:\n group[name] = normalized\n else:\n self.__write_array(group, name, normalized)", "def record_time_metric(self, metric):\n\n if not self.__settings:\n return\n\n # Scope is forced to be empty string if None as\n # scope of None is reserved for apdex metrics.\n\n key = (metric.name, metric.scope or '')\n stats = self.__stats_table.get(key)\n if stats is None:\n stats = TimeStats(call_count=1,\n total_call_time=metric.duration,\n total_exclusive_call_time=metric.exclusive,\n min_call_time=metric.duration,\n max_call_time=metric.duration,\n sum_of_squares=metric.duration ** 2)\n self.__stats_table[key] = stats\n else:\n stats.merge_time_metric(metric)\n\n return key", "def add_value(self, value):\n if len(self.hist) < 2:\n BaseFilter.add_value(self, value)\n else:\n filtered_value = self.hist[-1] * self.alpha + value * (1.0 - self.alpha)\n BaseFilter.add_value(self, filtered_value)", "def submit_metric(self, metric_suffix, metric, scraper_config, gauge=True, monotonic_count=True):\n metric_name = scraper_config['namespace'] + metric_suffix\n for sample in metric.samples:\n # Explicit shallow copy of the instance tags\n _tags = list(scraper_config['custom_tags'])\n\n for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):\n _tags.append('{}:{}'.format(label_name, label_value))\n if gauge:\n # submit raw metric\n self.gauge(metric_name, sample[self.SAMPLE_VALUE], _tags)\n if monotonic_count:\n # submit rate metric\n self.monotonic_count(metric_name + '.count', sample[self.SAMPLE_VALUE], _tags)", "def AddSample(self, machine, timestamp, value):\n self.machine_data.setdefault(machine, list()).append([timestamp, value])\n if len(self.cluster_total) == 0 or timestamp > self.cluster_total[-1][0]:\n self.cluster_total.append([timestamp, 0])\n self.cluster_avg.append([timestamp, 0])\n self.cluster_total[-1][1] += value\n self.cluster_avg[-1][1] = self.cluster_total[-1][1] / float(len(self.machine_data))", "def add_metric(self, metric_fn):\n self._metrics.append(metric_fn)", "def send_metric(model_id, metric, value):\n host, port, namespace = get_metric_endpoint()\n\n metric_name = '%s.%s' % (namespace, get_metric_name(metric, model_id))\n message = \"%s %f %d\\n\" % (metric_name, float(value), int(time.time()))\n send_tcp(host, port, message)\n\n build_no = get_build_number()\n metric_name = '%s.%s' % (namespace, get_metric_name('build', model_id))\n message = \"%s %f %d\\n\" % (metric_name, build_no, int(time.time()))\n send_tcp(host, port, message)", "def process(self, key, value):\n if key not in self.counts:\n self.counts[key] = 0.0\n self.counts[key] += value", "def add(self, value):\n self._resolve_copies()\n self.data.append(value)", "def metrics(self):\n \n if self.mse.shape[0]>1:\n raise ValueError('Metrics can only handle single observations.')\n \n if self.N==1:\n pred = float('nan')\n err = float('nan')\n y_true = float('nan')\n else:\n pred = int(self._predictions[-1])\n err = self._mse[-1]\n y_true = int(self.label[0])\n \n is_outlier = {\"type\":\"GAUGE\",\"key\":\"is_outlier\",\"value\":pred}\n mse = {\"type\":\"GAUGE\",\"key\":\"mse\",\"value\":err}\n obs = {\"type\":\"GAUGE\",\"key\":\"observation\",\"value\":self.N - 1}\n threshold = {\"type\":\"GAUGE\",\"key\":\"threshold\",\"value\":self.threshold}\n \n label = {\"type\":\"GAUGE\",\"key\":\"label\",\"value\":y_true}\n \n accuracy_tot = {\"type\":\"GAUGE\",\"key\":\"accuracy_tot\",\"value\":self.metric[4]}\n precision_tot = {\"type\":\"GAUGE\",\"key\":\"precision_tot\",\"value\":self.metric[5]}\n recall_tot = {\"type\":\"GAUGE\",\"key\":\"recall_tot\",\"value\":self.metric[6]}\n f1_score_tot = {\"type\":\"GAUGE\",\"key\":\"f1_tot\",\"value\":self.metric[7]}\n f2_score_tot = {\"type\":\"GAUGE\",\"key\":\"f2_tot\",\"value\":self.metric[8]}\n \n accuracy_roll = {\"type\":\"GAUGE\",\"key\":\"accuracy_roll\",\"value\":self.metric[9]}\n precision_roll = {\"type\":\"GAUGE\",\"key\":\"precision_roll\",\"value\":self.metric[10]}\n recall_roll = {\"type\":\"GAUGE\",\"key\":\"recall_roll\",\"value\":self.metric[11]}\n f1_score_roll = {\"type\":\"GAUGE\",\"key\":\"f1_roll\",\"value\":self.metric[12]}\n f2_score_roll = {\"type\":\"GAUGE\",\"key\":\"f2_roll\",\"value\":self.metric[13]}\n \n true_negative = {\"type\":\"GAUGE\",\"key\":\"true_negative\",\"value\":self.metric[0]}\n false_positive = {\"type\":\"GAUGE\",\"key\":\"false_positive\",\"value\":self.metric[1]}\n false_negative = {\"type\":\"GAUGE\",\"key\":\"false_negative\",\"value\":self.metric[2]}\n true_positive = {\"type\":\"GAUGE\",\"key\":\"true_positive\",\"value\":self.metric[3]}\n \n nb_outliers_roll = {\"type\":\"GAUGE\",\"key\":\"nb_outliers_roll\",\"value\":self.metric[14]}\n nb_labels_roll = {\"type\":\"GAUGE\",\"key\":\"nb_labels_roll\",\"value\":self.metric[15]}\n nb_outliers_tot = {\"type\":\"GAUGE\",\"key\":\"nb_outliers_tot\",\"value\":self.metric[16]}\n nb_labels_tot = {\"type\":\"GAUGE\",\"key\":\"nb_labels_tot\",\"value\":self.metric[17]}\n \n return [is_outlier,mse,obs,threshold,label,\n accuracy_tot,precision_tot,recall_tot,f1_score_tot,f2_score_tot,\n accuracy_roll,precision_roll,recall_roll,f1_score_roll,f2_score_roll,\n true_negative,false_positive,false_negative,true_positive,\n nb_outliers_roll,nb_labels_roll,nb_outliers_tot,nb_labels_tot]", "def value(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Summary.Value]:", "def put_metric(cw_metric_name, statistic_value, config=None,\n cw_dimension_name=None, cw_namespace=None):\n\n try:\n if config:\n session = config.boto3_session()\n cw_dimension_name = config.cw_dimension_name\n cw_namespace = config.cw_namespace\n region = config.region\n else:\n session = Config.boto3_session()\n except:\n logger.exception(\"\")\n sys.exit(127)\n\n if not cw_dimension_name or not cw_metric_name:\n raise ValueError(\"You have to specify at least\\\n cw_dimension_name or config parameter\")\n\n cw = session.resource('cloudwatch', region_name=region)\n try:\n float(statistic_value)\n except ValueError:\n logger.error(\"Statistic value not convertible to float.\")\n return False\n\n try:\n if statistic_value == 0:\n statistic_value = 0.1\n\n cw.Metric(cw_namespace, cw_metric_name).put_data(\n MetricData=[\n {\n 'MetricName': cw_metric_name,\n 'Dimensions': [\n {\n 'Name': cw_dimension_name,\n 'Value': cw_metric_name\n }\n ],\n 'StatisticValues': {\n 'SampleCount': statistic_value,\n 'Sum': statistic_value,\n 'Minimum': statistic_value,\n 'Maximum': statistic_value\n },\n 'Unit': 'Count',\n 'StorageResolution': 1\n }\n ]\n )\n except:\n logger.exception(\"\")", "def record(self, config, value, time_ms):\n raise NotImplementedError", "def accumulate(self, value):\n inc_counter_op = smart_assign(self._counter, 1.0, assign_fn=tf.assign_add)\n acc_op = smart_assign(self._acc_var, value, assign_fn=tf.assign_add)\n return tf.group(inc_counter_op, acc_op)", "def _submit_rate(self, metric_name, val, metric, custom_tags=None, hostname=None):\n _tags = self._metric_tags(metric_name, val, metric, custom_tags, hostname)\n self.check.rate('{}.{}'.format(self.NAMESPACE, metric_name), val, _tags, hostname=hostname)", "def add_metric(self, metric_name, aggregate=None):\n\n clean_metric = metric_name.lower().strip()\n\n if clean_metric.lower() not in METRICS:\n raise Exception(\"Metric named: \" + metric_name + \" is not a valid benchmark metric.\")\n self.metrics.add(clean_metric)\n\n if not aggregate:\n self.raw_metrics.add(clean_metric)\n elif aggregate.lower().strip() in AGGREGATES:\n # Add aggregate to this metric\n clean_aggregate = aggregate.lower().strip()\n current_aggregates = self.aggregated_metrics.get(clean_metric, list())\n current_aggregates.append(clean_aggregate)\n self.aggregated_metrics[clean_metric] = current_aggregates\n else:\n raise Exception(\"Aggregate function \" + aggregate + \" is not a legal aggregate function name\");\n\n return self;", "def aggregate(all_metrics, reducer, suffix):\n # Collect metric separately\n separated_metrics = {} # type: dict[frozenset, list[dict]]\n for el in all_metrics:\n key = frozenset(el[\"metric\"][\"dimensions\"].items())\n if key not in separated_metrics:\n separated_metrics[key] = [el]\n else:\n separated_metrics[key].append(el)\n\n # Collect all dimensions\n dims = {}\n for metric_dims in separated_metrics.keys():\n for prop, val in dict(metric_dims).iteritems():\n if prop in dims:\n dims[prop].add(val)\n else:\n dims[prop] = set(val)\n\n # Sort each metric\n for _, metric in separated_metrics.iteritems():\n metric.sort(key=lambda v: v[\"metric\"][\"timestamp\"])\n\n separated_metrics = sorted(separated_metrics.values(), key=len)\n separated_metrics.reverse()\n\n # Compute the new values\n new_values = []\n all_timestamps = map(\n lambda l: map(\n lambda x: x[\"metric\"][\"timestamp\"], l),\n separated_metrics)\n metric_count = len(separated_metrics)\n for index in range(0, len(separated_metrics[0])):\n new_value = reducer[0](\n separated_metrics[0][index][\"metric\"][\"value\"],\n metric_count)\n new_timestamp = separated_metrics[0][index][\"metric\"][\"timestamp\"]\n for metric_index in range(1, metric_count):\n new_value = reducer[1](new_value, helpers.interpolate(\n new_timestamp,\n separated_metrics[metric_index],\n all_timestamps[metric_index]\n ), metric_count)\n new_values.append((new_timestamp, new_value))\n\n # Aggregate the other details:\n metric_name = separated_metrics[0][0][\"metric\"][\"name\"] + suffix\n meta = separated_metrics[0][0][\"meta\"]\n new_metrics = [\n helpers.create_agg_metric(\n metric_name,\n meta,\n dims,\n val[0],\n val[1]\n ) for val in new_values\n ]\n return new_metrics", "def add(self, value, source=None, **params):\n\t\treturn self.connection.send_gauge_value(self.name, value, source, **params)", "def max(self, key, value):\n self._metrics[key] = max(value, self._metrics[key])", "def add_value_mean(cls, sensor, values, device_id):\n if values[device_id] is not None:\n if sensor == 't':\n cls.mean_t.append(int(values[device_id][sensor]))\n if sensor == 'l':\n cls.mean_l.append(int(values[device_id][sensor]))", "def write_value(message):\n tag_definition = WRITE_TABLE.get(message.topic.strip(base_topic))\n if tag_definition:\n string_value = message.payload.decode(\"utf-8\")\n value = tag_definition.convertion(string_value)\n _LOGGER.debug(\"write value %s : %s => address : %s = %s\",\n message.topic.strip(base_topic), string_value,\n tag_definition.address, value)\n if value is not None:\n instrument.write_registers(tag_definition.address, value)", "def set_metric(self, slug, value, category=None, expire=None, date=None):\n keys = self._build_keys(slug, date=date)\n\n # Add the slug to the set of metric slugs\n self.r.sadd(self._metric_slugs_key, slug)\n\n # Construct a dictionary of key/values for use with mset\n data = {}\n for k in keys:\n data[k] = value\n self.r.mset(data)\n\n # Add the category if applicable.\n if category:\n self._categorize(slug, category)\n\n # Expire the Metric in ``expire`` seconds if applicable.\n if expire:\n for k in keys:\n self.r.expire(k, expire)", "def MakeSummary(name, value):\n summary = tf.Summary()\n val = summary.value.add()\n val.tag = str(name)\n val.simple_value = float(value)\n return summary" ]
[ "0.72285455", "0.6880584", "0.6831178", "0.67528003", "0.64814395", "0.6404036", "0.6218233", "0.6194534", "0.6135706", "0.60753894", "0.6063926", "0.60352796", "0.59790224", "0.59553665", "0.59262705", "0.589197", "0.58506346", "0.5779258", "0.5770089", "0.5764042", "0.5720421", "0.56907314", "0.56858194", "0.5682319", "0.56533355", "0.56444114", "0.5638115", "0.56074387", "0.5605615", "0.559948", "0.5586559", "0.5528929", "0.549665", "0.549665", "0.54704165", "0.5463643", "0.5431214", "0.5424882", "0.5422048", "0.53972435", "0.53872067", "0.5386981", "0.5385592", "0.5352262", "0.53347474", "0.5324351", "0.53186417", "0.529756", "0.52820486", "0.5277467", "0.52565557", "0.5255323", "0.52535784", "0.52503085", "0.52291757", "0.52208036", "0.52050483", "0.519897", "0.5192651", "0.51911", "0.518752", "0.5186884", "0.51812845", "0.5176006", "0.5163804", "0.51494056", "0.51200783", "0.5112283", "0.51115865", "0.51039296", "0.50991046", "0.5094091", "0.5090609", "0.50704265", "0.5069496", "0.50605375", "0.50474226", "0.5038293", "0.5022302", "0.5021665", "0.50193506", "0.5018139", "0.501587", "0.50054455", "0.5004593", "0.49931762", "0.49888155", "0.4980746", "0.497567", "0.49655676", "0.4964267", "0.4956744", "0.49559963", "0.4947641", "0.49454054", "0.49355763", "0.49353456", "0.49333215", "0.49301913", "0.49291602" ]
0.69385666
1
Returns an iterator over the set of value metrics. The items returned are a tuple consisting of the metric name and accumulated stats for the metric.
def metrics(self): return six.iteritems(self.__stats_table)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __iter__(self):\n prefix = len(META_NS) + 2\n for key, value in self.stats.items():\n yield (key[prefix:-6], int(value))", "def get_val_iterator(self) -> Iterable[Batch]:\n if self._val_name not in self._datasets:\n raise ValueError(\"Val data not provided.\")\n return self.get_iterator(self._val_name)", "def itervaluerefs(self):\r\n return self.data.itervalues()", "def metrics(self) -> List[Metric]:\n return self._metrics", "def metrics(self):\n return self._metrics", "def metrics(self):\n return self._metrics", "def itervaluerefs(self):\n for value in self.itervalues():\n yield ref(value)", "def metrics(self):\n return self.__metrics", "def stat_values(self):\n return self._stat_values", "def metrics(self) -> list[dict[str, dict[str, float | int]]]:\n return self.performance[\"performances\"]", "def get_valued_metrics(self):\n return self._valued_metrics", "def get_metrics(self) -> Dict[str, base.Number]:\n return self._metrics", "def metrics(self):\n raise NotImplementedError(\"metrics\")", "def values(self):\n return iter(util.annotate(v) for _, v in self._pairs())", "def metrics(self):\n if not self.df:\n return []\n\n column_metric_strings = [col.split(self.sep)[0] for col in self.df.columns]\n\n metrics = set()\n for colstring in column_metric_strings:\n try:\n metrics.add(Metric(colstring))\n except ValueError:\n continue\n\n return sorted(list(set(metrics)))", "def iter_values(self):\n values = self.values\n if (values is not None):\n yield from values", "def values(self):\n\t\treturn iter(self.data)", "def metrics(self) -> typing.Optional[typing.List[\"BucketMetrics\"]]:\n return self._values.get('metrics')", "def metrics(self):\r\n if not hasattr(self, '_observable_metrics'):\r\n self._observable_metrics = Metrics()\r\n return self._observable_metrics", "def iterator(self):\n return self.ValueIterator()", "def values(self):\n for ts in self:\n yield self[ts]", "def metrics(self):\n\n data = requests.get(\n f\"http://{self.prometheus_host}:{self.prometheus_port}/metrics\"\n ).content.decode()\n lines = [line for line in data.split(\"\\n\") if not line.startswith(\"#\")]\n metrics = {}\n for line in lines:\n if not line:\n continue\n\n name, value = line.split(\" \")\n\n try:\n value = int(value) # type: ignore\n except ValueError:\n value = float(value) # type: ignore\n\n if \"{\" in name and \"}\" in name:\n base = name[: name.index(\"{\")]\n tags = name[name.index(\"{\") + 1 : -1]\n tags = [tag.split(\"=\") for tag in tags.split(\",\")]\n tags = [(key, val.replace('\"', \"\")) for key, val in tags]\n\n name = base + \"#\" + \",\".join(f\"{k}:{v}\" for k, v in sorted(tags))\n\n metrics[name] = value\n\n return metrics", "def iter_values(self):\n if self.contributes:\n for value in self.values:\n if isinstance(value, GroupingComponent):\n for x in value.iter_values():\n yield x\n else:\n yield value", "def read_metric_values(self):\n inv_objs = self._inventory_mgr.current_inventory()\n monitored_metrics = self._metric_mgr.get_monitored_metrics()\n perf_manager = self._si.RetrieveServiceContent().perfManager\n for mor in inv_objs.keys():\n for inv_obj in inv_objs[mor]:\n inv_obj_metrics = inv_obj.metric_id_map\n desired_keys = list(set(inv_obj_metrics.keys()) & set(monitored_metrics[mor].keys()))\n if not len(desired_keys) == 0:\n metric_id_objs = [inv_obj_metrics[key] for key in desired_keys]\n query_spec = vim.PerformanceManager.QuerySpec(\n entity=inv_obj.mor, metricId=metric_id_objs,\n intervalId=inv_obj.INSTANT_INTERVAL,\n maxSample=1, format='normal'\n )\n try:\n results = perf_manager.QueryPerf(querySpec=[query_spec])\n except Exception as e:\n self._logger.error(\"Exception while making performance query : {0}\".format(e))\n if results:\n dps = self._parse_query(inv_obj, results, monitored_metrics[mor])\n payload = self._build_payload(dps)\n self._dispatch_metrics(payload)\n else:\n self._logger.warning(\"Empty result from query : {0}\".format(query_spec))", "def get_state(self, duration):\n metrics = []\n\n if duration:\n for count_key in self.kv_counts:\n metrics.append(\n MetricObject(\n count_key,\n self.kv_counts[count_key] / duration\n )\n )\n\n for time_key in self.kv_times:\n values = self.kv_times[time_key]['values']\n unit = self.kv_times[time_key]['unit']\n\n metrics.append(\n MetricObject(\n '.'.join([time_key, 'mean']),\n stats_helper.find_mean(values),\n unit\n )\n )\n\n metrics.append(\n MetricObject(\n '.'.join([time_key, 'median']),\n stats_helper.find_median(values),\n unit\n )\n )\n\n for pct in self.percentiles:\n metrics.append(\n MetricObject(\n '.'.join([time_key, \"%sth_percentile\" % pct]),\n stats_helper.find_percentile(values, int(pct)),\n unit\n )\n )\n\n return metrics", "def iteritems(self):\n\t\tfor attribute_name in dir(self):\n\t\t\tif self._valuable(attribute_name):\n\t\t\t\tyield (attribute_name, getattr(self, attribute_name))", "def collect(self): # pylint: disable=no-self-use\n start = time.time()\n\n if \"jobs\" in PLUGIN_SETTINGS and PLUGIN_SETTINGS[\"jobs\"]:\n for metric in metric_jobs():\n yield metric\n\n if \"models\" in PLUGIN_SETTINGS:\n for metric in metric_models(PLUGIN_SETTINGS[\"models\"]):\n yield metric\n\n # --------------------------------------------------------------\n # Extras Function defined in configuration.py or the Regristry\n # # --------------------------------------------------------------\n if \"extras\" in PLUGIN_SETTINGS:\n for metric in collect_extras_metric(PLUGIN_SETTINGS[\"extras\"]):\n yield metric\n\n for metric in collect_extras_metric(__REGISTRY__):\n yield metric\n\n gauge = GaugeMetricFamily(\n \"nautobot_app_metrics_processing_ms\", \"Time in ms to generate the app metrics endpoint\"\n )\n duration = time.time() - start\n gauge.add_metric([], format(duration * 1000, \".5f\"))\n yield gauge", "def IterBuildStats(\n self) -> Generator[Tuple[str, str, BaseBuildStats], None, None]:\n return self.IterToValueType(BuildStats)", "def itervalues(self):\r\n return self.data.itervalues()", "def itervalues(self):\r\n return self.data.itervalues()", "def __iter__(self):\n for val in self.value:\n yield val", "def __get_metrics_list(self):\n metrics = metrics_calculator.MetricsCalculator(self.processor)\n metric_list = []\n # Populate the list\n for key in metrics.get_raw_metrics().keys():\n name = metrics.get_raw_metrics()[key][\"NAME\"]\n formula = metrics.get_raw_metrics()[key][\"FORMULA\"]\n description = metrics.get_raw_metrics()[key][\"DESCRIPTION\"]\n metric = Metric(name, formula, description)\n metric_list.append(metric)\n return metric_list", "def metrics(self):\r\n return Metrics(self)", "def list_metrics(self):\n pass", "def values(self):\n return [i.value for i in self.value]", "def _monitor_metrics(self):\n metrics = [\"loss\"]\n try:\n m = U.metrics_from_model(self.model)\n if isinstance(m, list):\n metrics.extend(m)\n except:\n pass\n if self.val_data is not None:\n for m in metrics[:]:\n metrics.append(\"val_%s\" % (m))\n return metrics", "def itervalues(self):\n return iter(kvp.value for kvp in self.keyvaluepair_set.all())", "def values(self):\n for key in self.metadb.values():\n yield key, self.datadb[key]", "def add_stats(self):\n units = self.get_unit_map()\n for metric in self.raw_metrics:\n unit, metric_type = units.get(metric, (DEFAULT_UNIT, DEFAULT_TYPE))\n if metric_type == \"counter\":\n # Unit/Second\n unit = \"/\".join((unit, \"Second\"))\n self.add_derive_value(metric, unit, self.raw_metrics[metric], rate=True)\n else:\n self.add_gauge_value(metric, unit, self.raw_metrics[metric])", "def _get_val_metrics(\n self,\n ) -> Tuple[_Metrics, torch.Tensor, torch.Tensor, torch.Tensor]:\n # Turn off batch-norm updates\n self.model.eval()\n\n with torch.no_grad():\n metrics = _Metrics()\n\n for val_img, val_gt in tqdm(\n self.val_loader, desc=\"Validating\", leave=False\n ):\n val_img = val_img.to(self.device)\n val_gt = val_gt.to(self.device)\n\n with autocast(enabled=self.config.mixed_precision):\n val_pred = self.model(val_img)[0]\n metrics.class_loss += self.class_loss_fn(val_pred, val_gt)\n\n metrics.accuracy += self._get_acc(val_pred, val_gt)\n metrics.f1_score += self._get_f1(val_pred, val_gt)\n\n metrics.class_loss /= len(self.val_loader)\n metrics.accuracy /= len(self.val_loader)\n metrics.f1_score /= len(self.val_loader)\n\n return metrics, val_img, val_gt, torch.sigmoid(val_pred)", "def metrics(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupMultipleMetricsMetricArgs']]]]:\n return pulumi.get(self, \"metrics\")", "def itervalues(self):\n for key in self:\n yield self[key]", "def iter_stats_graph(graph: tfgnn.GraphTensor) -> Iterator[Tuple[str,\n tfgnn.Field]]:\n\n for set_type, set_name, set_obj in tfgnn.iter_sets(graph):\n if set_type != tfgnn.CONTEXT:\n # Output a feature for the size of the set.\n key = f\"{set_type}/{set_name}/{tfgnn.SIZE_NAME}\"\n yield key, set_obj.sizes\n\n # Output the values for each feature.\n for feature_name, tensor in set_obj.features.items():\n if tensor.dtype == tf.string:\n continue\n key = f\"{set_type}/{set_name}/{feature_name}\"\n yield key, tensor", "def Values(self):\r\n\t\treturn self._get_attribute('values')", "def get_metrics(self):\n self.logger.debug(\"Fetching metrics.\")\n return self._api_query(\"metrics\")['metrics']", "def accum_val_ops(outputs, names, global_step, output_dir, metric_summary, N):\n outs = []\n if N >= 0:\n outputs = outputs[:N]\n for i in range(len(outputs[0])):\n scalar = np.array(map(lambda x: x[i], outputs))\n assert (scalar.ndim == 1)\n add_value_to_summary(metric_summary, names[i], np.mean(scalar),\n tag_str='{:>27s}: [{:s}]: %f'.format(names[i], ''))\n outs.append(np.mean(scalar))\n return outs", "def value_stats(values):\n stats = describe(values)\n mean = stats.mean\n std = np.sqrt(stats.variance)\n t_stat = t.ppf(1 - 0.025, len(values) - 1)\n dev = t_stat * (std / np.sqrt(len(values)))\n trim_mean_v = trim_mean(values, 0.25)\n upper_val = mean + dev\n lower_val = mean - dev\n\n return mean, trim_mean_v, std, upper_val, lower_val", "def metric_tests(self) -> Dict[str, FAIRResultEvaluationCriterium]:\n return self._metric_tests", "def value_iterator(self):\n return _osgAnimation.mapVertexInfluence_value_iterator(self)", "def metrics(self):\n self.metrics = []\n \n self.clients()\n\n if len(self.metrics) > 0:\n return self.metrics\n else:\n return []", "def get_values(self):\n return map(lambda x: x.value(),self)", "def list_metrics(self):\n results = []\n if self.r.exists(self.metrics_key):\n keys = self.r.smembers(self.metrics_key)\n for k in keys:\n # metric_key, metric_type, metric_name, metric_help = keys.split(\" \", 3)\n results.append(k.split(\" \", 3))\n return results", "def values(self):\n return ValueCollection()", "def get_measured_outputs_values(self):\n obsOut = numpy.zeros(self.get_num_measured_outputs())\n i = 0\n for o in self.outputs:\n if o.is_measured_output():\n obsOut[i] = o.read_value_in_fmu(self.fmu)\n i += 1\n return obsOut", "def metrics(self):\n \n if self.mse.shape[0]>1:\n raise ValueError('Metrics can only handle single observations.')\n \n if self.N==1:\n pred = float('nan')\n err = float('nan')\n y_true = float('nan')\n else:\n pred = int(self._predictions[-1])\n err = self._mse[-1]\n y_true = int(self.label[0])\n \n is_outlier = {\"type\":\"GAUGE\",\"key\":\"is_outlier\",\"value\":pred}\n mse = {\"type\":\"GAUGE\",\"key\":\"mse\",\"value\":err}\n obs = {\"type\":\"GAUGE\",\"key\":\"observation\",\"value\":self.N - 1}\n threshold = {\"type\":\"GAUGE\",\"key\":\"threshold\",\"value\":self.threshold}\n \n label = {\"type\":\"GAUGE\",\"key\":\"label\",\"value\":y_true}\n \n accuracy_tot = {\"type\":\"GAUGE\",\"key\":\"accuracy_tot\",\"value\":self.metric[4]}\n precision_tot = {\"type\":\"GAUGE\",\"key\":\"precision_tot\",\"value\":self.metric[5]}\n recall_tot = {\"type\":\"GAUGE\",\"key\":\"recall_tot\",\"value\":self.metric[6]}\n f1_score_tot = {\"type\":\"GAUGE\",\"key\":\"f1_tot\",\"value\":self.metric[7]}\n f2_score_tot = {\"type\":\"GAUGE\",\"key\":\"f2_tot\",\"value\":self.metric[8]}\n \n accuracy_roll = {\"type\":\"GAUGE\",\"key\":\"accuracy_roll\",\"value\":self.metric[9]}\n precision_roll = {\"type\":\"GAUGE\",\"key\":\"precision_roll\",\"value\":self.metric[10]}\n recall_roll = {\"type\":\"GAUGE\",\"key\":\"recall_roll\",\"value\":self.metric[11]}\n f1_score_roll = {\"type\":\"GAUGE\",\"key\":\"f1_roll\",\"value\":self.metric[12]}\n f2_score_roll = {\"type\":\"GAUGE\",\"key\":\"f2_roll\",\"value\":self.metric[13]}\n \n true_negative = {\"type\":\"GAUGE\",\"key\":\"true_negative\",\"value\":self.metric[0]}\n false_positive = {\"type\":\"GAUGE\",\"key\":\"false_positive\",\"value\":self.metric[1]}\n false_negative = {\"type\":\"GAUGE\",\"key\":\"false_negative\",\"value\":self.metric[2]}\n true_positive = {\"type\":\"GAUGE\",\"key\":\"true_positive\",\"value\":self.metric[3]}\n \n nb_outliers_roll = {\"type\":\"GAUGE\",\"key\":\"nb_outliers_roll\",\"value\":self.metric[14]}\n nb_labels_roll = {\"type\":\"GAUGE\",\"key\":\"nb_labels_roll\",\"value\":self.metric[15]}\n nb_outliers_tot = {\"type\":\"GAUGE\",\"key\":\"nb_outliers_tot\",\"value\":self.metric[16]}\n nb_labels_tot = {\"type\":\"GAUGE\",\"key\":\"nb_labels_tot\",\"value\":self.metric[17]}\n \n return [is_outlier,mse,obs,threshold,label,\n accuracy_tot,precision_tot,recall_tot,f1_score_tot,f2_score_tot,\n accuracy_roll,precision_roll,recall_roll,f1_score_roll,f2_score_roll,\n true_negative,false_positive,false_negative,true_positive,\n nb_outliers_roll,nb_labels_roll,nb_outliers_tot,nb_labels_tot]", "def collect(self): # pylint: disable=no-self-use\n start = time.time()\n for metric in metric_rq():\n yield metric\n\n gauge = GaugeMetricFamily(\n \"nautobot_rq_metrics_processing_ms\", \"Time in ms to generate the app metrics endpoint\"\n )\n duration = time.time() - start\n gauge.add_metric([], format(duration * 1000, \".5f\"))\n yield gauge", "def itervalues(self):\n return DictValuesIterator(self)", "def iter_stats_schema(schema: tfgnn.GraphSchema) -> Iterator[Tuple[str, Any]]:\n\n for set_type, set_name, set_obj in tfgnn.iter_sets(schema):\n if set_type != tfgnn.CONTEXT:\n # Output a feature for the size of the set.\n key = f\"{set_type}/{set_name}/{tfgnn.SIZE_NAME}\"\n yield key, set_obj\n\n # Output the values for each feature.\n for feature_name, feature in set_obj.features.items():\n if tf.dtypes.as_dtype(feature.dtype) == tf.string:\n continue\n key = f\"{set_type}/{set_name}/{feature_name}\"\n yield key, feature", "def values(self):\n return iter(self._noise_objs.values())", "def metrics_group():", "def itervalues(self, multi=False):\n for k, v in self.iteritems(multi=multi):\n yield v", "def metric_points(self) -> typing.Iterable[utils.MetricPoint]:\n return [\n utils.MetricPoint(key, value, self.compute_bounds(key, assertion))\n for key, value, assertion in self.read_metrics_and_assertions()]", "def values(self):\n return [p.value for p in self]", "def iter(self, measures):\n if measures == \"\": \n for name in sorted(self.measures.keys()):\n yield name, self.measures[name][0], self.measures[name][1]\n else:\n for name, _ in measures:\n if name in self.measures:\n yield name, self.measures[name][0], self.measures[name][1]", "def get_metrics(self) -> dict:\n return self.metric_dict", "def metrics(self) -> list:\n my_metrics = [\n FramesMetric(\"frames\"),\n FPSMetric(\"fps\"),\n EpisodeRewardMetric('PMM:episode_rewards'),\n EpisodeRewardMetricQuantile('P09:episode_rewards', quantile=0.9),\n EpisodeRewardMetricQuantile('P01:episode_rewards', quantile=0.1),\n EpisodeLengthMetric(\"episode_length\")\n ]\n\n return my_metrics + self.algo.metrics() + self.env_roller.metrics()", "def itervalues(self):\n def make_iter(self=self):\n keys = self.iterkeys()\n while True:\n yield self[keys.next()]\n return make_iter()", "def list_definition(self):\n return self._get(path='metrics')", "def _getMetrics(self):\n metric = None\n if self.metrics is not None:\n metric = self.metrics(self._currentRecordIndex+1)\n elif self.metricValue is not None:\n metric = self.metricValue\n else:\n raise RuntimeError('No metrics or metric value specified for dummy model')\n\n return {self._optimizeKeyPattern:metric}", "def __iter__(self):\n for name, score in zip(self.class_names, self.class_scores):\n # Ensure that no numpy floats get returned (causing bad errors)\n yield str(name), float(score)", "def metrics(self) -> pulumi.Output['outputs.RuntimeMetricsResponse']:\n return pulumi.get(self, \"metrics\")", "def collect(self) -> Metric:\n ret = self.source()\n if ret is None:\n LOGGER.warning('Statistics are not available')\n return\n gauge = GaugeMetricFamily('wemo_device_state', 'Status of Wemo device', labels=['address', 'parameter'])\n gauge.add_metric([ret.address, 'today_kwh'], ret.today_kwh, timestamp=ret.collection_time.timestamp())\n gauge.add_metric([ret.address, 'current_power_mW'], ret.current_power,\n timestamp=ret.collection_time.timestamp())\n gauge.add_metric([ret.address, 'today_on_time'], ret.today_on_time, timestamp=ret.collection_time.timestamp())\n gauge.add_metric([ret.address, 'on_for'], ret.on_for, timestamp=ret.collection_time.timestamp())\n gauge.add_metric([ret.address, 'today_standby_time'], ret.today_standby_time,\n timestamp=ret.collection_time.timestamp())\n\n yield gauge\n\n counter = CounterMetricFamily('wemo_power_usage', 'Today power consumption', labels=['address'])\n counter.add_metric([ret.address], ret.today_kwh, timestamp=ret.collection_time.timestamp())\n yield counter", "def calc_stat_values(self):", "def values(self):\n # Loop through all buckets\n # Collect all values in each bucket\n values_list = []\n\n for linked_list in self.buckets:\n for key_value_tuple in linked_list.items():\n values_list.append(key_value_tuple[1])\n\n return values_list", "def __iter__(self):\n for value in self.__dict__.values():\n yield value", "def values (self):\n return self._values", "def values (self):\n return self._values", "def values(self):\n if not self.__values:\n self.rank()\n return self.__values", "def calculate_metrics(metrics_data: List[Tuple[Metric, DataType]]) -> List[float]:\n pass", "def _calculate_stats(values, factor=1):\n result = {'min': min(values) * factor,\n 'max': max(values) * factor,\n 'sum': sum(values) * factor,\n 'mean': 0,\n 'stddev': 0}\n\n if values:\n mean = sum(values) / float(len(values))\n result['mean'] = factor * mean\n result['stddev'] = (\n factor * math.sqrt((1.0 / (len(values) - 1))\n * sum((x - mean) ** 2 for x in values)))\n\n return result", "def __iter__(self):\n\n for lit in self.fvals:\n yield lit", "def get_measurements(self):\n metrics = {}\n for key in self.fields.keys():\n metrics[key] = []\n # What's in output:\n # proc_pid date virt res shrd cpu mem power gpus_power\n while not self.queue.empty():\n data = self.queue.get().strip().split()\n for field in self.fields:\n tp = self.fields[field]['type']\n idx = self.fields[field]['index']\n count = self.fields[field]['count']\n if count == -1:\n metrics[field].append(ResourceMonitor.str_to_type(data[idx], tp))\n elif count == 0:\n metrics[field].append([ResourceMonitor.str_to_type(data[idx], tp)])\n else:\n metrics[field].append([\n ResourceMonitor.str_to_type(data[index], tp) for index in xrange(idx, idx+count)\n ])\n return metrics", "def mem(self) -> List[float]:\n return list(map(attrgetter(\"mem\"), self.stats))", "def getAllMetrics(self):\n result = self.getReportMetrics()\n result.update(self.getOptimizationMetrics())\n return result", "def values(self):\n return self._values", "def values(self):\n return self._values", "def values(self):\n return self._values", "def values(self):\n return self._values", "def getValues(self):\n return [ float(val.text()) for val in self.values ]", "def get_values(self, names):\n r = []\n for n in names:\n if n in self.raw_metrics:\n r.append(self.raw_metrics[n])\n else:\n return None\n return r", "def get_individual_performance(self):\n\n divs = self.page.find_all(\"span\", {\"class\":\"value\"})\n values = [div.text for div in divs]\n return values", "def getReportMetrics(self):\n return self.__unwrapResults().reportMetrics", "def get_all_metrics(self):\n up_time = self.uptime()\n down_time = self.downtime()\n customer_sla = self.sla()\n objective = self.slo()\n indicator = self.sli()\n avail_percentage = self.availability()\n mt_bf = self.mtbf(up_time)\n mt_tr = self.mttr(down_time)\n list_results = [up_time,down_time,customer_sla,objective,indicator,avail_percentage,mt_bf,mt_tr]\n return list_results", "def get_metrics(self) -> np.ndarray:\n dice = np.mean(self.dice_scores)\n iou = np.mean(self.iou_scores)\n sens = np.mean(self.sens_scores)\n spec = np.mean(self.spec_scores)\n accu = np.mean(self.accu_scores)\n return dice, iou, sens, spec, accu", "def values(self) -> Iterable[U]:\n return self._store.values()", "def measures(self):\n return self._measures", "def get_metrics(self, slug_list):\n # meh. I should have been consistent here, but I'm lazy, so support these\n # value names instead of granularity names, but respect the min/max\n # granularity settings.\n keys = ['seconds', 'minutes', 'hours', 'day', 'week', 'month', 'year']\n key_mapping = {gran: key for gran, key in zip(GRANULARITIES, keys)}\n keys = [key_mapping[gran] for gran in self._granularities()]\n\n results = []\n for slug in slug_list:\n metrics = self.r.mget(*self._build_keys(slug))\n if any(metrics): # Only if we have data.\n results.append((slug, dict(zip(keys, metrics))))\n return results", "def values(self):\n return self[\"values\"]", "def values(self):\n return self[\"values\"]", "def metrics(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'metrics')\r\n\r\n return http.Request('GET', url), parsers.parse_json" ]
[ "0.61560905", "0.6048202", "0.6047815", "0.6044677", "0.6018319", "0.6018319", "0.5974941", "0.59604144", "0.5956948", "0.595421", "0.59379584", "0.58816415", "0.58809036", "0.58738184", "0.5869844", "0.5865912", "0.58568335", "0.5852111", "0.5831421", "0.58237755", "0.5791648", "0.5778051", "0.5740947", "0.57382125", "0.5714218", "0.5701945", "0.5700453", "0.5693304", "0.56856513", "0.56856513", "0.56762177", "0.5675749", "0.56555957", "0.56294423", "0.5622164", "0.56116885", "0.5609909", "0.55912775", "0.558789", "0.558384", "0.5583596", "0.5563081", "0.5548897", "0.5541585", "0.5539016", "0.5530069", "0.5509059", "0.55033016", "0.5494307", "0.5479326", "0.54687285", "0.5465305", "0.54572546", "0.545691", "0.54413015", "0.5441201", "0.5432908", "0.54306203", "0.54301053", "0.5426836", "0.54177254", "0.5412346", "0.5405109", "0.5402679", "0.5377965", "0.5371441", "0.53704286", "0.5368904", "0.5368183", "0.5361434", "0.53534055", "0.53452945", "0.533364", "0.53194934", "0.5315312", "0.5311732", "0.5311732", "0.5309284", "0.5308626", "0.53054947", "0.53051144", "0.53031665", "0.52937937", "0.52867246", "0.5279201", "0.5279201", "0.5279201", "0.5279201", "0.5277432", "0.5275297", "0.52685773", "0.52653176", "0.52608883", "0.5251633", "0.5241734", "0.52398556", "0.5225903", "0.5217988", "0.5217988", "0.52114576" ]
0.66805625
0
Resets the accumulated statistics back to initial state for metric data.
def reset_metric_stats(self): self.__stats_table = {}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset(self):\n self.sum_metric = 0.\n self.num_inst = 0.\n self.metrics.reset_stats()", "def reset_metric_stats(self):\n\n self.__stats_table = {}", "def stats_reset(self):\n self.stats.reset()", "def stats_reset(self):\n self.stats.reset()", "def reset(self) -> None:\n self.statistics = defaultdict(float)", "def reset(self) -> None:\n self.statistics = defaultdict(int)", "def reset(self):\n self.stats = {}", "def reset(self):\n self.num_inst = 0\n self.sum_metric = 0.0", "def reset_state(self):\n for name in self.metrics:\n self.metrics[name].reset_state()", "def reset_stats() -> None:\n STATS[\"cleaned\"] = 0\n STATS[\"null\"] = 0\n STATS[\"unknown\"] = 0", "def reset(self):\n self.start_times = {}\n self.stats = defaultdict(OnlineMeter) # float defaults to 0", "def reset(self):\n self.avg = 0\n self.sum = 0\n self.cnt = 0", "def reset_metric_variables(self) -> None:\n with self._lock:\n self._reset_metric_variables()", "def clear_stats(self):\n self._stats = None", "def reset(self):\n self.accumulation = None", "def reset(self):\n self.accumulation = None", "def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0", "def reset_metrics(self):\n self.metrics['loss'] = 0.0\n self.metrics['num_tokens'] = 0\n self.metrics['correct_tokens'] = 0\n self.metrics['correct_pred'] = 0\n self.metrics['pred_count'] = 0", "def reset(self) -> None:\n self.statistics = defaultdict(self._mp_hack)\n self._ddp_backend = get_backend()", "def reset(self) -> None:\n self.statistics = defaultdict(self._mp_hack)\n self._ddp_backend = get_backend()", "def reset(self) -> None:\n self.statistics = defaultdict(self._mp_hack)\n self._ddp_backend = get_backend()", "def reset(self):\n self.sum = [0.] * len(self.topk)\n self.data_num = 0\n self.pfm = [0.] * len(self.topk)", "def reset(self) -> None:\n self.statistics = defaultdict(self._mp_hack)\n self._is_ddp = get_rank() > -1", "def reset(self):\n self._total_value = 0.0\n self._count = 0", "def reset_all(self) -> None:\n for metric in self:\n metric.reset()", "def reset(self):\n self.ref_value = 0.0\n self._average = 0.0\n self.num_samples = 0", "def reset(self):\n for i in range(0, len(self.__counts)):\n self.__counts[i] = 0\n self.__overflow = 0\n self.__total_count = 0\n self.__total_values = 0\n self.__min = None\n self.__max = None", "def reset(self):\n self._value_estimates[:] = self.prior\n self.action_attempts[:] = 0\n self.last_action = None\n self.t = 0", "def reset(self):\n self._accumulated_time.clear()\n self._hit_count.clear()", "def reset(self):\n super().reset()\n self.m_n = 1\n self.m_num_errors = 0\n self.m_d = 0\n self.m_lastd = 0\n self.m_mean = 0.0\n self.m_std_temp = 0.0\n self.m_m2s_max = 0.0\n self.estimation = 0.0", "def _reset(self):\n\n # Checking one attribute is enough, because they are all set together\n # in partial_fit\n if hasattr(self, 'scale_'):\n del self.scale_\n del self.n_samples_seen_\n del self.mean_\n del self.var_", "def reset_turn_stats(self):\n\n # Set the attribute value to 0\n self._current_score = 0", "def reset() -> None:\n Stat._cache = SortedDict()", "def reset(self):\n\n self.scaler = None\n self.isFitted = False\n self.__create_scaler()", "def reset(self):\n self.reset_count += 1\n self._init_data()", "def reset(self):\n super().reset()\n self.sample_count = 1\n self.miss_prob = 1.0\n self.miss_std = 0.0\n self.miss_prob_sd_min = float(\"inf\")\n self.miss_prob_min = float(\"inf\")\n self.miss_sd_min = float(\"inf\")", "def reset(self):\n self.current_exposure = None\n self.scores = {}", "def reset(self):\n reset_system_health_series()", "def reset_average(self):\n self._total_time = 0\n self._average_time = 0\n self._calls = 0", "def reset(self):\n self.damage_dealt = 0\n self.kills = 0\n self.got_killed = False\n self.fitness = 0", "def reset(self):\n self.values.clear()\n\n self.on_reset()", "def reset(self):\n self.loss = 0\n self.cnt = 0", "def _reset(self):\n\n # Checking one attribute is enough, becase they are all set together\n # in partial_fit\n if hasattr(self, 'scale_'):\n del self.scale_\n del self.mean_\n del self.var_", "def clear(self):\n self.sum_hit_at_one = 0.0\n self.sum_perr = 0.0\n self.sum_loss = 0.0\n self.map_calculator.clear()\n self.global_ap_calculator.clear()\n self.num_examples = 0", "def reset_memory_statistics(sender, **kwargs): # pylint: disable=unused-argument\n MemoryUsageData.start_counting()", "def reset(self):\n self.m = normalize(self.m0)\n self.t = 0.0", "def reset(self, complete=False):\n self.sum = 0\n self.n = 0\n if complete:\n self.running_avg = []", "def reset(self):\n previous_solution_values = tf.constant(np.tile((self._action_lower_bound + self._action_upper_bound) / 2,\n [self._planning_horizon * self._num_agents, 1]), dtype=tf.float32)\n previous_solution_values = tf.reshape(previous_solution_values, [-1])\n solution_variance_values = tf.constant(\n np.tile(np.square(self._action_lower_bound - self._action_upper_bound) / 16,\n [self._planning_horizon * self._num_agents, 1]), dtype=tf.float32)\n solution_variance_values = tf.reshape(solution_variance_values, [-1])\n self._m.assign(previous_solution_values)\n self._sigma.assign(tf.math.sqrt(solution_variance_values))", "def clear(self):\r\n\r\n\t\tself.ITerm = 0.0\r\n\t\tself.DTerm = 0.0\r\n\t\tself.last_error = 0.0\r\n\r\n\t\tself.state_history = []\r\n\t\tself.error_history = []\r\n\t\tself.output_history = []\r\n\t\tself.sample_times = []\r\n\t\t\r\n\t\tself.OutputValue = 0.0", "def reset(self):\n self.total_pulls = 0\n self.total_score = 0\n self.npulls = np.zeros(self.k)\n self.score = np.zeros(self.k)", "def reset(self):\n self.temp_data.clear()", "def _reset(self):\n self._values = {}", "def reset(self):\n self.acc_loss = 0\n self.norm_term = 0", "def reset(self):\n if self.monotonic_energy is not None:\n self.monotonic_energy.reset()\n if self.chunk_energy is not None:\n self.chunk_energy.reset()\n self.bd_L_prev = 0\n self.key_tail = None", "def reset(self):\n\n self.results = []\n self._plot()", "def reset(self):\n self.observation = None\n self.history.clear()\n for i in range(len(self.answers)):\n self.answers[i] = None\n self.reset_metrics()", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.score = None\n self.true = None\n self.meta = None", "def reset_data_recorder(self):\n\n self.t_values = []\n self.x_values = []\n self.tau_values = []", "def reset(self):\n self.data = {}\n self.is_bound = False\n self._errors = None", "def reset(self):\n self.error_p = 0.0\n self.error_i = 0.0\n self.error_d = 0.0\n self.errors = [ 0.0 ] * self.samples\n if callable(self.debug_callback):\n self.debug_callback(\"reset\")", "def reset_stats(self):\n self.ships_left = self.sett.ship_limit\n self.score = 0\n self.level = 1", "def reset(self):\n self.table[:, :] = 0\n self.counts[:] = 0\n self.names = []\n self.hashesperid.resize(0)\n self.dirty = True", "def reset(self):\r\n self.state = copy.copy(self.mu)", "def reset(self):\n self.reset_image_estimate()\n self.init_m_aux()\n self.reset_hessian_and_bias()\n self.reset_adadelta_variables()", "def reset(self):\n self.visited = False\n self.calculated = False\n self.past_value = self.value\n self.value = 0", "def reset(self):\n self.pred_classes.clear()\n self.gold_classes.clear()\n self.pred_probas.clear()\n self.gold_probas.clear()\n self.loss = 0\n self.nb_batches = 0\n self.prec_rec_f1 = None\n self.acc = None\n self.mcc = None", "def reset(self):\n self.data = self._defaults", "def reset(self):\n self.mode = 0\n self.graphs = [[], [], []]\n self.coefficients = []\n self.sample = []", "def reset_wm(self):\n\n self.plan = []\n self.hist = []", "def reset(self):\n\t\tself._initial = None\n\t\tself._start = None\n\t\tself._time = 0\n\t\tself._total = 0\n\t\treturn self", "def reset(self):\n self._proportional = 0\n self._integral = 0\n self._derivative = 0\n\n self._last_time = self._current_time()\n self._last_output = None\n self._last_input = None", "def reset(self):\n self.__sets = []\n self._computed = False", "def reset(self):\n self._stat = CardMeta()", "def reset(self):\n self._hist.reset()\n return self", "def reset(self):\n self._clusters = {}\n self._clusters_val = {}\n self._centroids = {}\n self.store()", "def reset_stats(self):\n print(\"Reseting stats\")\n self.player_lives = self.ai_stts.player_lives\n self.score = 0\n self.level = 1", "def reset(self):\n self.edges = None\n self.chi = None\n self.k = None\n self.n_bins = None\n self.classes = None\n self.n_params = None", "def clear_all_accumulators(self):\n self._require_state(\"INITIALIZING\")\n for mi,accum in self._accums.items():\n accum.clear()", "def reset(self):\n self.work_state = work_state[\"Measuring\"]\n self.report_mode = report_mode[\"Initiative\"]\n self.duty_cycle = 0\n self.logger.info(\"{}: sensor resetted.\".format(self.sensor_name))", "def reset(self):\n self.x_prev = np.zeros_like(self.mu)", "def reset(self):\n self.baseline = None\n self.cut = None\n self.manual_push = 0", "def reset(self):\n self.head_pos = 0\n self.left_expands = 0\n self.memory = np.zeros(\n (\n self.max_memory if self.fixed_size else 1,\n self.memory_unit_size\n )\n )\n self.previous_read = np.zeros(self.memory_unit_size)\n if self.history is not None:\n self.history = defaultdict(list)", "def __reset(self):\n\t\tself.__highest = -float('inf')\n\t\tself.__lowest = float('inf')\n\t\tself.__total = 0\n\t\tself.__steps = 0\n\t\tself.__cold_days = 0", "def reset(self):\n self.reserve.reset()\n self.revenue.reset()\n self.transfers.reset()\n self.missions.reset()\n self.debt.reset()\n self.genfund.reset()\n self.macro.reset(self.pop[self.start_yr],self.eco_first)\n self.summary = pd.DataFrame(index=self.names,columns=[t for t in range(self.start_yr,self.stop_yr)])\n self.year = self.start_yr", "def reset(self):\n self.values = None\n self.keys = None\n self.mask = None", "def _reset(self):\n\n # Checking one attribute is enough, becase they are all set together\n # in partial_fit\n if hasattr(self, 'scale_'):\n del self.scale_", "def _reset(self):\n\n # Checking one attribute is enough, becase they are all set together\n # in partial_fit\n if hasattr(self, 'scale_'):\n del self.scale_", "def reset(self):\n self.train_loss.reset_states()\n self.train_accuracy.reset_states()\n self.val_loss.reset_states()\n self.val_accuracy.reset_states()\n self.train_mIoU.reset_states()\n self.val_mIoU.reset_states()", "def reset(self):\n # Attempt to reset data loader\n self.data_loader_iter = iter(self.data_loader)\n self.num_batches = 0\n\n # Make sure calibrator will check the cache again when reset.\n self.cache_contents = None" ]
[ "0.8448358", "0.84143066", "0.83299094", "0.83299094", "0.8270503", "0.8153123", "0.79735655", "0.7772603", "0.77270293", "0.77003264", "0.76665866", "0.7614711", "0.75709176", "0.7540221", "0.75036174", "0.75036174", "0.7490715", "0.7471787", "0.74488425", "0.74488425", "0.74488425", "0.7399214", "0.7371752", "0.7350859", "0.73405546", "0.73228246", "0.7312936", "0.72774065", "0.7229525", "0.71553284", "0.7075319", "0.706154", "0.7059099", "0.70363504", "0.702773", "0.70184267", "0.70123726", "0.70105654", "0.70089495", "0.6993825", "0.69876575", "0.6979957", "0.69488895", "0.69401044", "0.68768877", "0.686644", "0.6866367", "0.6860591", "0.68563175", "0.6852236", "0.68487674", "0.6847574", "0.68278927", "0.6826811", "0.6824579", "0.6819496", "0.6812596", "0.6812596", "0.6812596", "0.6812596", "0.6812596", "0.6812596", "0.6812596", "0.6812596", "0.6812596", "0.6812596", "0.6812596", "0.6810124", "0.6801448", "0.6796431", "0.6786861", "0.6774378", "0.676991", "0.6768513", "0.6748076", "0.674789", "0.6747709", "0.6743537", "0.67379457", "0.67296934", "0.67281467", "0.6725452", "0.67183644", "0.67178744", "0.6709824", "0.6704403", "0.6679279", "0.6678569", "0.66781825", "0.6674983", "0.66681176", "0.6668089", "0.6666285", "0.66623515", "0.6658285", "0.66577923", "0.66570085", "0.66570085", "0.66526616", "0.665174" ]
0.83412653
2
Merge data from another instance of this object.
def merge_stats(self, other): self[1] += other[1] self[2] = self[0] and min(self[2], other[2]) or other[2] self[3] = max(self[3], other[3]) if self[3] == other[3]: self[4] = other[4] # Must update the call count last as update of the # minimum call time is dependent on initial value. self[0] += other[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge(self, other):\n from .dataset import Dataset\n\n if other is None:\n return self.to_dataset()\n else:\n other_vars = getattr(other, 'variables', other)\n coords = merge_coords_without_align([self.variables, other_vars])\n return Dataset._from_vars_and_coord_names(coords, set(coords))", "def merge(self, other):\n for p in other:\n for key, val in p.items():\n self.contents[key] = val\n\n return self", "def _merge(self, other: dict):\n self._storage = dict_merge(self._storage, other)", "def update(self, other):\n _merge_dicts(self, other)", "def merge(self, obj):\n pass", "def copy_from_other(self, other):\n self.data = other.data\n self.url = other.url\n self.container_factory = other.container_factory", "def merge_from(self, other):\n assert not self.is_final\n if self.parent is not None:\n assert other.parent is not None\n self.parent.merge_from(other.parent)\n self.isolated_names.update(other.isolated_names)\n self.read.update(other.read)\n self.modified.update(other.modified)\n self.bound.update(other.bound)\n self.deleted.update(other.deleted)\n self.annotations.update(other.annotations)\n self.params.update(other.params)", "def _merge(self):\n raise NotImplementedError", "def merge(self, other):\n log.debug('Merging: %s and %s' % (self.serialize(), other.serialize()))\n for k in self.keys():\n for new_item in other[k]:\n if new_item not in self[k]:\n self[k].append(new_item)\n log.debug('Result: %s' % self.serialize())\n return self", "def merge(self, other):\n if other.n_points != self.n_points:\n raise ValueError(\n 'Deduplicator size mismatch: '\n f'{self.n_points} != {other.n_points}'\n )\n self.data_reduced.extend(other.data_reduced)\n self.data_kd.extend(other.data_kd)", "def merged_rep(self,other):\n raise NotImplementedError(\"Abstract method\")", "def merge(self, other):\r\n self._train_datas = np.concatenate(\r\n [self._train_datas, other._train_datas], 0)\r\n self._train_labels = np.concatenate(\r\n [self._train_labels, other._train_labels], 0)", "def combine(self, other):\n # Copy and merge\n ppt = PPT()\n ppt.contents = dict(self.contents)\n ppt.merge(other)\n return ppt", "def __finalize__(self, other, method=None, **kwargs):\n self = super().__finalize__(other, method=method, **kwargs)\n # merge operation: using metadata of the left object\n if method == \"merge\":\n for name in self._metadata:\n print(\"self\", name, self.au_columns, other.left.au_columns)\n object.__setattr__(self, name, getattr(other.left, name, None))\n # concat operation: using metadata of the first object\n elif method == \"concat\":\n for name in self._metadata:\n object.__setattr__(self, name, getattr(other.objs[0], name, None))\n return self", "def mergeWith(self, other):\n assert not other.synthesised\n self.globals.update(other.globals)\n self.signals.update(other.signals)\n self.startsOfDataPaths.update(other.startsOfDataPaths)\n self.subUnits.update(other.subUnits)\n \n for s in other.signals:\n s.ctx = self", "def copyDataFrom (self, other):\n\n self.localTimeString=other.localTimeString\n self._myHasLocalTimeString=other._myHasLocalTimeString\n \n self.utcTimeString=other.utcTimeString\n self._myHasUtcTimeString=other._myHasUtcTimeString\n \n self.daylightSavingTime=other.daylightSavingTime\n self._myHasDaylightSavingTime=other._myHasDaylightSavingTime\n \n self.epoch=other.epoch\n self._myHasEpoch=other._myHasEpoch\n \n self.utcOffsetMinutes=other.utcOffsetMinutes\n self._myHasUtcOffsetMinutes=other._myHasUtcOffsetMinutes", "def merge(self, other):\n\n assert self.ins_addr == other.ins_addr\n assert self.type == other.type\n\n o = self.copy()\n o.targets |= other.targets\n\n return o", "def merge(self, other):\n self.isotxsMetadata = self.isotxsMetadata.merge(\n other.isotxsMetadata, self, other, \"ISOTXS\", AttributeError\n )\n self.gamisoMetadata = self.gamisoMetadata.merge(\n other.gamisoMetadata, self, other, \"GAMISO\", AttributeError\n )\n self.pmatrxMetadata = self.pmatrxMetadata.merge(\n other.pmatrxMetadata, self, other, \"PMATRX\", AttributeError\n )\n self.micros.merge(other.micros)\n self.gammaXS.merge(other.gammaXS)\n self.neutronHeating = _mergeAttributes(self, other, \"neutronHeating\")\n self.neutronDamage = _mergeAttributes(self, other, \"neutronDamage\")\n self.gammaHeating = _mergeAttributes(self, other, \"gammaHeating\")\n self.isotropicProduction = _mergeAttributes(self, other, \"isotropicProduction\")\n self.linearAnisotropicProduction = _mergeAttributes(\n self, other, \"linearAnisotropicProduction\"\n )\n # this is lazy, but should work, because the n-order wouldn't be set without the others being set first.\n self.nOrderProductionMatrix = (\n self.nOrderProductionMatrix or other.nOrderProductionMatrix\n )", "def __add__(self, other):\r\n # Make a defaultdict of defaultdicts, the latter of which returns\r\n # None when an key is not present\r\n merged_data = defaultdict(lambda: defaultdict(lambda: None))\r\n\r\n # We will keep track of all unique sample_ids and metadata headers\r\n # we have seen as we go\r\n all_sample_ids = set()\r\n all_headers = set()\r\n\r\n # add all values from self into the merged_data structure\r\n for sample_id, data in self._metadata.iteritems():\r\n all_sample_ids.add(sample_id)\r\n for header, value in data.iteritems():\r\n all_headers.add(header)\r\n merged_data[sample_id][header] = value\r\n\r\n # then add all data from other\r\n for sample_id, data in other._metadata.iteritems():\r\n all_sample_ids.add(sample_id)\r\n for header, value in data.iteritems():\r\n all_headers.add(header)\r\n # if the two mapping files have identical sample_ids and\r\n # metadata columns but have DIFFERENT values, raise a value\r\n # error\r\n if merged_data[sample_id][header] is not None and \\\r\n merged_data[sample_id][header] != value:\r\n raise ValueError(\"Different values provided for %s for \"\r\n \"sample %s in different mapping files.\"\r\n % (header, sample_id))\r\n else:\r\n merged_data[sample_id][header] = value\r\n\r\n # Now, convert what we have seen into a normal dict\r\n normal_dict = {}\r\n for sample_id in all_sample_ids:\r\n if sample_id not in normal_dict:\r\n normal_dict[sample_id] = {}\r\n\r\n for header in all_headers:\r\n normal_dict[sample_id][header] = \\\r\n merged_data[sample_id][header]\r\n\r\n # and create a MetadataMap object from it; concatenate comments\r\n return self.__class__(normal_dict, self.Comments + other.Comments)", "def copyDataFrom (self, other):\n\n self.outErrorPackets=other.outErrorPackets\n self._myHasOutErrorPackets=other._myHasOutErrorPackets\n \n self.inErrorPackets=other.inErrorPackets\n self._myHasInErrorPackets=other._myHasInErrorPackets\n \n self.inDiscardPackets=other.inDiscardPackets\n self._myHasInDiscardPackets=other._myHasInDiscardPackets\n \n self.outUnicastPackets=other.outUnicastPackets\n self._myHasOutUnicastPackets=other._myHasOutUnicastPackets\n \n self.inMulticastPackets=other.inMulticastPackets\n self._myHasInMulticastPackets=other._myHasInMulticastPackets\n \n self.outBroadcastPackets=other.outBroadcastPackets\n self._myHasOutBroadcastPackets=other._myHasOutBroadcastPackets\n \n self.inBroadcastPackets=other.inBroadcastPackets\n self._myHasInBroadcastPackets=other._myHasInBroadcastPackets\n \n self.outMulticastPackets=other.outMulticastPackets\n self._myHasOutMulticastPackets=other._myHasOutMulticastPackets\n \n self.inUnknownProtocolPackets=other.inUnknownProtocolPackets\n self._myHasInUnknownProtocolPackets=other._myHasInUnknownProtocolPackets\n \n self.outDiscardPackets=other.outDiscardPackets\n self._myHasOutDiscardPackets=other._myHasOutDiscardPackets\n \n self.inUnicastPackets=other.inUnicastPackets\n self._myHasInUnicastPackets=other._myHasInUnicastPackets\n \n self.outOctets=other.outOctets\n self._myHasOutOctets=other._myHasOutOctets\n \n self.inOctets=other.inOctets\n self._myHasInOctets=other._myHasInOctets", "def PassData(self, other):\n for this,that in zip(self.DataSet, other.DataSet):\n for assoc in [ArrayAssociation.POINT, ArrayAssociation.CELL, ArrayAssociation.ROW]:\n if this.HasAttributes(assoc) and that.HasAttributes(assoc):\n this.GetAttributes(assoc).PassData(that.GetAttributes(assoc))", "def merge(self, other):\n\n for child in other.children:\n self.add_deep_copy_of(child, merged=True)", "def add(self, other):\n if not isinstance(other, self.__class__):\n raise ValueError(\n f\"Argument (type {type(other)}) is not a {self.__class__} instance\"\n )\n if len(other.data):\n self.data = pd.concat([self.data, other.data], ignore_index=True)\n self.sort()", "def merge(self, other):\n self._moments = merge_pqc([self, other])._moments\n self._parameters = sp.symarray(self.parameter_symbol, len(self.symbols))\n if self.flatten_circuit:\n self.flatten()", "def merge(self, other: ProjectMeta) -> ProjectMeta:\n return self.clone(\n obj_classes=self._obj_classes.merge(other.obj_classes),\n tag_metas=self._tag_metas.merge(other._tag_metas),\n )", "def concat(self: TAvalancheDataset, other: TAvalancheDataset) -> TAvalancheDataset:\n return self.__class__([self, other])", "def _merge_raw(self, other):\n if other is None:\n variables = OrderedDict(self.variables)\n else:\n # don't align because we already called xarray.align\n variables = merge_coords_without_align(\n [self.variables, other.variables])\n return variables", "def merge(self, graph):\n # keep previous self.filename\n # copy data\n for x in graph.data:\n self.data.append(x)\n # copy headers, unless already exists (is so, info is lost)\n for key in graph.headers:\n if key not in self.headers:\n self.headers.update({key: graph.headers[key]})\n # copy graphInfo, unless already exists (is so, info is lost)\n for key in graph.graphInfo:\n if key not in self.graphInfo:\n self.graphInfo.update({key: graph.graphInfo[key]})\n # copy sampleInfo, unless already exists (is so, info is lost)\n for key in graph.sampleInfo:\n if key not in self.sampleInfo:\n self.sampleInfo.update({key: graph.sampleInfo[key]})", "def mergeWith(self, others):", "def extend(self, other_rollout):\n\n assert not self.is_terminal()\n assert all(k in other_rollout.fields for k in self.fields)\n for k, v in other_rollout.data.items():\n self.data[k].extend(v)\n self.last_r = other_rollout.last_r", "def __finalize__(self, other, method=None, **kwargs):\r\n # merge operation: using metadata of the left object\r\n if method == 'merge':\r\n for name in self._metadata:\r\n object.__setattr__(self, name, getattr(other.left, name, None))\r\n # concat operation: using metadata of the first object\r\n elif method == 'concat':\r\n for name in self._metadata:\r\n object.__setattr__(self, name, getattr(other.objs[0], name, None))\r\n else:\r\n for name in self._metadata:\r\n object.__setattr__(self, name, getattr(other, name, None))\r\n return self", "def merge_content(self, other):\n self.__content += other.__content", "def union(self, other):\n self.vertices.extend(other.vertices)\n self.edges.extend(other.edges)\n self.faces.extend(other.faces)\n return self", "def merge_other(self, other):\n assert(not other.isSet())\n with self.__cond:\n if self.__isset:\n other.set(self.__data)\n else:\n self.__merged.append(other)", "def get_merged_data(self):\n return self._combinedata", "def add_other_meta_data(self, other: _MetaData) -> None:\n\n for key in other._meta_data_dict.keys():\n self.add_data(key, other._meta_data_dict[key])", "def combine(self, other) -> None:\n assert self.id_ == other.id_\n assert self.type_ == other.type_\n self.count += other.count", "def merge(self, other: PerfData):\n self.total_samples += other.total_samples\n if self.total_time == 0.0:\n self.total_time = other.total_time\n self.compile_time = max(self.compile_time, other.compile_time)\n self.programming_time = max(\n self.programming_time, other.programming_time\n )\n if self.est_samples_per_sec == 0.0:\n self.est_samples_per_sec = other.est_samples_per_sec\n else:\n assert (\n self.est_samples_per_sec == other.est_samples_per_sec\n ), \"Expected all fabric-based performance estimates to be identical\"\n\n if self.total_time > 0:\n self.samples_per_sec = float(self.total_samples) / self.total_time\n else:\n self.samples_per_sec = 0.0", "def merge(self, other):\n merged = copy.deepcopy(self.__dict__())\n for k, v in other.__dict__():\n if k in merged and getattr(self, k):\n if isinstance(v, (string_types, bool)):\n pass\n else:\n list_of_stuff = merged.get(k, [])\n for entry in v:\n if entry not in list_of_stuff:\n list_of_stuff.append(entry)\n merged[k] = list_of_stuff\n else:\n merged[k] = v\n return CondaEnvironmentProvider(**merged)", "def __init__(self, v1, v2):\n mergedData = []\n list(map(mergedData.extend, list(zip_longest(v1, v2))))\n self.data = list(filter(lambda x: x is not None, mergedData))\n self.index = 0", "def combine_data(self, object, additional_data):\n object[\"ancestors\"] = additional_data[\"ancestors\"] if self.cartographer_client else []\n object[\"position\"] = additional_data.get(\"order\", 0) if additional_data else 0\n object = super(ResourceMerger, self).combine_data(object, additional_data)\n return combine_references(object)", "def merge(self, other):\n extras = other.difference(self)\n if len(extras) > 0:\n self.update(extras)\n self.reset()\n return True\n return False", "def merge(self, other):\n # todo: Using the return value None to denote the identity is a\n # bit dangerous, since a function with no explicit return statement\n # also returns None, which can lead to puzzling bugs. Maybe return\n # a special singleton Identity object instead?\n raise NotImplementedError", "def __add__(self, other):\n self.__dict__.update(other)\n return self", "def merge(self, other):\n self._segments.extend(other._segments)\n self._segments.sort()", "def merge_struct_arrays(self, data1, data2):\n data_final = np.concatenate((data1, data2))\n return data_final", "def copy(self):\n return self.update({})", "def merge(self, other_config):\n # Make a copy of the current attributes in the config object.\n config_options = copy.copy(self._user_provided_options)\n\n # Merge in the user provided options from the other config\n config_options.update(other_config._user_provided_options)\n\n # Return a new config object with the merged properties.\n return Config(**config_options)", "def __add__(self, other):\n merged_profile = super().__add__(other)\n\n # unstruct specific property merging\n merged_profile._empty_line_count = (\n self._empty_line_count + other._empty_line_count)\n merged_profile.memory_size = self.memory_size + other.memory_size\n samples = list(dict.fromkeys(self.sample + other.sample))\n merged_profile.sample = random.sample(list(samples),\n min(len(samples), 5))\n\n # merge profiles\n merged_profile._profile = self._profile + other._profile\n\n return merged_profile", "def update_inplace_from(self, other):\n self.__dict__ = other.__dict__.copy()", "def merge(self, other):\n self._mergeKeys(other)\n self._binaryOperationCheck(other)\n for id in self.clock.keys():\n print id\n self.clock[id] = max(self.clock[id], other.clock[id])", "def combine(self, existing):\n return self", "def _extend(self, other):\n for key, value in list(other.entries.items()):\n self._add_entry(key, value)", "def join(self, other, on):\n\t\t# check for correct join\n\t\tif not (on in self.headers or on in other.headers):\n\t\t\tprint \"Error: header '{0}' not found in both collections\".format(on)\n\t\t\treturn None\n\n\t\t# create new dataset\n\t\tjoined = Dataset()\n\t\t\n\t\t# fill new dataset with combined data\n\t\tmappedHeaders = joinHeaders(self, other, joined, on)\n\t\tmergeRows(self, other, joined, on, mappedHeaders)\n\t\tjoined.ensureFilled()\n\n\t\t# return newly created dataset\n\t\treturn joined", "def merge_two_dicts(self, x, y):\n z = x.copy()\n z.update(y)\n return z", "def merge_contextual(self, other):\n # TODO: This is currently dependent on our data model? Make more robust to schema changes\n # Currently we assume all lists at Compound level, with 1 further potential nested level of lists\n for k in self.keys():\n # print('key: %s' % k)\n for item in self[k]:\n # print('item: %s' % item)\n for other_item in other.get(k, []):\n # Skip text properties (don't merge names, labels, roles)\n if isinstance(other_item, six.text_type):\n continue\n for otherk in other_item.keys():\n if isinstance(other_item[otherk], list):\n if len(other_item[otherk]) > 0 and len(item[otherk]) > 0:\n other_nested_item = other_item[otherk][0]\n for othernestedk in other_nested_item.keys():\n for nested_item in item[otherk]:\n if not nested_item[othernestedk]:\n nested_item[othernestedk] = other_nested_item[othernestedk]\n elif not item[otherk]:\n item[otherk] = other_item[otherk]\n log.debug('Result: %s' % self.serialize())\n return self", "def __add__(self, other):\n mesh = deepcopy(self)\n mesh.MergeWith(other)\n return mesh", "def copy_(self, other):\n self.share.copy_(other.share)\n self.encoder = other.encoder", "def combine_dict(self, dict2):\n # iterate through smaller data set\n # base_set will be the larger set and is used for updating\n if len(self.content[\"values\"]) > len(dict2[\"values\"]):\n large_set = self.content[\"values\"]\n small_set = dict2[\"values\"]\n base_set = self.content\n else:\n small_set = self.content[\"values\"]\n large_set = dict2[\"values\"]\n base_set = dict2\n\n subset = {}\n for key in small_set.keys():\n # determine wether to compare keys\n if key in large_set:\n updated_l = large_set[key][\"updated_at\"]\n updated_s = small_set[key][\"updated_at\"]\n if updated_l == 'NULL':\n if updated_s != 'NULL':\n # update to not NULL set\n # if both updated_at are NULL, things\n # are ambiguos. We could defer to created_at\n # but for simplicity we will default to\n # the values in the larger set\n subset[key] = small_set[key]\n else:\n if updated_s == 'NULL':\n # update to not NULL set\n subset[key] = large_set[key]\n else:\n if updated_l > updated_s:\n subset[key] = large_set[key]\n else:\n subset[key] =small_set[key]\n else:\n subset[key] = small_set[key]\n base_set[\"values\"].update(subset)\n new_obj = BackupData()\n new_obj.load_from_dict(base_set)\n return new_obj", "def hallucinate_merge(self, other):\n res = CompleteVec(None,None,self.max_num_samples)\n res.needs_update = True\n return res", "def extend(self, other):\n if len(self.vertices[0]) != len(other.vertices[0]):\n raise ValueError(\"Rank mismatch ({0} != \"\n \"{1})\".format(self.vertices.shape[1],\n other.vertices.shape[1]))\n if self._geotype != other._geotype:\n raise TypeError(\"Geometry mismatch ({0} != \"\n \"{1})\".format(self._geotype, other._geotype))\n\n self.vertices = np.vstack([self.vertices, other.vertices])\n self._cache = {}\n return self", "def merge(self, *other):\n # Compute union of Fingerprints\n union = set().union(self, *other)\n # Create new fingerprint from union\n result = super(Fingerprint, type(self)).__new__(type(self), union)\n # Set n_flows to combination of self and other\n result.__setattr__('n_flows', self.n_flows + sum(o.n_flows for o in other))\n # Return result\n return result", "def merge_datasets(self, other):\r\n if isinstance(other, SpatialDataFrame) and \\\r\n other.geometry_type == self.geometry_type:\r\n return pd.concat(objs=[self, other], axis=0)\r\n elif isinstance(other, DataFrame):\r\n return pd.concat(objs=[self, other], axis=0)\r\n elif isinstance(other, Series):\r\n self['merged_datasets'] = other\r\n elif isinstance(other, SpatialDataFrame) and \\\r\n other.geometry_type != self.geometry_type:\r\n raise ValueError(\"Spatial DataFrames must have the same geometry type.\")\r\n else:\r\n raise ValueError(\"Merge datasets cannot merge types %s\" % type(other))", "def __add__ ( self, other, resample_opts=None ):\n result = ObservationStorage (datadir=self.datadir, \\\n resample_opts=resample_opts )\n if self.date[0] > other.date[0]:\n start_date = other.date[0]\n else:\n start_date = self.date[0]\n if self.date[-1] > other.date[-1]:\n end_date = other.date[-1]\n else:\n end_date = self.date[-1]\n \n delta = datetime.timedelta ( days=1 )\n this_date = start_date.date()\n end_date = end_date.date() + delta\n \n this_obs_dates = [ x.date() for x in self.date ]\n other_obs_dates = [ x.date() for x in other.date ]\n \n date = [] ; vza = [] ; vaa = [] ; sza = [] ; saa = []\n emulator = [] ; mask = [] ; data_pntr = [] ; spectral = []\n sensor = []\n \n while this_date < end_date:\n if this_date in this_obs_dates:\n iloc = this_obs_dates.index ( this_date )\n date.append ( self.date[iloc] )\n emulator.append ( self.emulator[iloc] )\n vza.append ( self.vza[iloc] )\n sza.append ( self.sza[iloc] )\n vaa.append ( self.vaa[iloc] )\n saa.append ( self.saa[iloc] )\n spectral.append ( self.spectral )\n mask.append ( ( self.get_mask, [iloc] ) )\n sensor.append ( self.sensor )\n \n data_pntr.append ( self._data_pntr[iloc] )\n if this_date in other_obs_dates:\n iloc = other_obs_dates.index ( this_date )\n date.append ( other.date[iloc] )\n emulator.append ( other.emulator[iloc] )\n vza.append ( other.vza[iloc] )\n sza.append ( other.sza[iloc] )\n vaa.append ( other.vaa[iloc] )\n saa.append ( other.saa[iloc] )\n spectral.append ( other.spectral )\n mask.append ( ( other.get_mask, [iloc] ) )\n sensor.append ( other.sensor )\n data_pntr.append ( other._data_pntr[iloc] )\n this_date += delta\n result.vza = vza\n result.vaa = vaa\n result.sza = sza \n result.saa = saa \n result.date = date\n result.spectral = spectral\n result.masks = mask\n result.sensor = sensor\n result.emulator = emulator\n result._data_pntr = data_pntr\n return result", "def Merge(self, other):\n\n # Logging just in case\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: before\", %s);'\n %(self.persistant['id'],\n sql.FormatSqlValue('details',\n repr(self.persistant))))\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: deleted\", %s);'\n %(other.persistant['id'], \n sql.FormatSqlValue('details',\n repr(other.persistant))))\n\n # Fields which can be summed\n for f in ['plays', 'skips']:\n self.persistant[f] = (self.persistant.get(f, 0) +\n other.persistant.get(f, 0))\n\n # Date fields where we take the newest\n for f in ['last_played', 'last_skipped', 'last_action']:\n a = self.persistant.get(f, datetime.datetime(1970, 1, 1))\n b = other.persistant.get(f, datetime.datetime(1970, 1, 1))\n if a > b:\n v = a\n else:\n v = b\n if v != datetime.datetime(1970, 1, 1):\n self.persistant[f] = v\n\n # Date fields where we take the oldest\n for f in ['creation_time']:\n a = self.persistant.get(f, datetime.datetime(1970, 1, 1))\n b = other.persistant.get(f, datetime.datetime(1970, 1, 1))\n if a < b:\n v = a\n else:\n v = b\n if v != datetime.datetime(1970, 1, 1):\n self.persistant[f] = v\n\n # Fields where we only clobber ours if we don't have a value\n for f in ['artist', 'album', 'song']:\n if not self.persistant.has_key(f) or not self.persistant[f]:\n self.persistant[f] = other.persistant.get(f, None)\n\n # Sometimes the number is a placeholder\n if self.persistant.has_key('number') and self.persistant['number'] == -1:\n self.persistant['number'] = other.persistant.get('number', -1)\n if not self.persistant.has_key('number'):\n self.persistant['number'] = other.persistant.get('number', -1)\n\n # Update the id in the tags table\n tags = self.db.GetRows('select tag from tags where track_id=%d;'\n % other.persistant['id'])\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: tags: %d\", %s);'\n %(self.persistant['id'], other.persistant['id'],\n sql.FormatSqlValue('details', repr(tags))))\n\n try:\n self.db.ExecuteSql('update tags set track_id=%d where track_id=%d;'\n %(self.persistant['id'], other.persistant['id']))\n self.db.ExecuteSql('commit;')\n except:\n # This can happen if the is already a matching tag for the first track\n pass\n\n # Update the id in the paths table\n paths = self.db.GetRows('select path from paths where track_id=%d;'\n % other.persistant['id'])\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: paths: %d\", %s);'\n %(self.persistant['id'], other.persistant['id'],\n sql.FormatSqlValue('details', repr(paths))))\n \n self.db.ExecuteSql('update paths set track_id=%d where track_id=%d;'\n %(self.persistant['id'], other.persistant['id']))\n self.db.ExecuteSql('commit;')\n\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: after\", %s);'\n %(self.persistant['id'],\n sql.FormatSqlValue('details',\n repr(self.persistant))))\n self.db.ExecuteSql('commit;')", "def concat(self, other):\n self.add_rules(other.cliques)\n self.prop_names.update(other.prop_names)", "def source(self, other):\n raise NotImplementedError", "def __add__(self, other):\n if not isinstance(other, RunTS):\n raise TypeError(f\"Cannot combine {type(other)} with RunTS.\")\n\n # combine into a data set use override to keep attrs from original\n combined_ds = xr.combine_by_coords(\n [self.dataset, other.dataset], combine_attrs=\"override\"\n )\n\n n_samples = (\n self.sample_rate\n * float(\n combined_ds.time.max().values - combined_ds.time.min().values\n )\n / 1e9\n ) + 1\n\n new_dt_index = make_dt_coordinates(\n combined_ds.time.min().values,\n self.sample_rate,\n n_samples,\n self.logger,\n )\n\n new_run = RunTS(\n run_metadata=self.run_metadata,\n station_metadata=self.station_metadata,\n survey_metadata=self.survey_metadata,\n )\n\n new_run.dataset = combined_ds.interp(\n time=new_dt_index, method=\"slinear\"\n )\n\n new_run.run_metadata.update_time_period()\n new_run.station_metadata.update_time_period()\n new_run.survey_metadata.update_time_period()\n new_run.filters = self.filters\n new_run.filters.update(other.filters)\n\n return new_run", "def __add__(self, other):\n if type(other) is not type(self):\n raise TypeError('`{}` and `{}` are not of the same profiler type.'.\n format(type(self).__name__, type(other).__name__))\n\n # error checks specific to its profiler\n self._add_error_checks(other)\n\n merged_profile = self.__class__(\n data=None, samples_per_update=self._samples_per_update,\n min_true_samples=self._min_true_samples, options=self.options\n )\n merged_profile.encoding = self.encoding\n if self.encoding != other.encoding:\n merged_profile.encoding = 'multiple files'\n\n merged_profile.file_type = self.file_type\n if self.file_type != other.file_type:\n merged_profile.file_type = 'multiple files'\n\n merged_profile.total_samples = self.total_samples + other.total_samples\n\n merged_profile.times = utils.add_nested_dictionaries(self.times,\n other.times)\n\n return merged_profile", "def copyAttributes(self, other, add_nxpars=False):\n import copy\n \n self.setTitle(other.getTitle())\n self.setDataSetType(other.getDataSetType())\n self.setAllAxisLabels(other.getAllAxisLabels())\n self.setAllAxisUnits(other.getAllAxisUnits())\n self.setYLabel(other.getYLabel())\n self.setYUnits(other.getYUnits())\n if len(self.attr_list.keys()) == 0:\n self.attr_list = copy.copy(other.attr_list)\n else:\n self.attr_list.instrument = copy.copy(other.attr_list.instrument)\n self.attr_list.sample = copy.copy(other.attr_list.sample)\n\n if add_nxpars:\n nxpar_keys = [item[0] for item in self.attr_list.iteritems() \\\n if isinstance(item[1], NxParameter)]\n\n for nxpar_key in nxpar_keys:\n self.attr_list[nxpar_key] += other.attr_list[nxpar_key]\n else:\n # Do nothing\n pass\n \n keys_to_get = [other_key for other_key in other.attr_list \\\n if other_key not in self.attr_list]\n \n for key_to_get in keys_to_get:\n self.attr_list[key_to_get] = \\\n copy.copy(other.attr_list[key_to_get])", "def updateFromContext(self, other):\n value = self.valueType.set(self.value, other.value)\n self.set(value)\n self.origins.extend(other.origins)", "def combine_data(self, object, additional_data):\n for k, v in additional_data.items():\n if isinstance(v, list):\n object[k] = object.get(k, []) + v\n else:\n object[k] = v\n for instance in object.get(\"instances\", []):\n if instance.get(\"sub_container\", {}).get(\"top_container\", {}).get(\"_resolved\"):\n del instance[\"sub_container\"][\"top_container\"][\"_resolved\"]\n object = super(ArchivalObjectMerger, self).combine_data(object, additional_data)\n return combine_references(object)", "def merge(self, other):\n\n if not self.can_merge(other):\n raise ValueError('These protocols can not be safely merged.')\n\n inputs_to_consider = self._find_inputs_to_merge()\n\n for input_path in inputs_to_consider:\n\n merge_behavior = getattr(type(self), input_path.property_name).merge_behavior\n\n if merge_behavior == MergeBehaviour.ExactlyEqual:\n continue\n\n if (isinstance(self.get_value(input_path), ProtocolPath) or\n isinstance(other.get_value(input_path), ProtocolPath)):\n\n continue\n\n if merge_behavior == InequalityMergeBehaviour.SmallestValue:\n value = min(self.get_value(input_path), other.get_value(input_path))\n elif merge_behavior == InequalityMergeBehaviour.LargestValue:\n value = max(self.get_value(input_path), other.get_value(input_path))\n else:\n raise NotImplementedError()\n\n self.set_value(input_path, value)\n\n return {}", "def merge_user_information(self, sid):\n pprint(self.extracted_information)\n for (field, value) in self.extracted_information.items():\n value = value[0] # TODO: should set data for everything in list but will do later\n self.data.set_data(sid, field, value[0])", "def __add__(self, other, inplace=False, **kwargs):\n output = super(HERAData, self).__add__(other, inplace=inplace, **kwargs)\n if inplace:\n output = self\n output._determine_blt_slicing()\n output._determine_pol_indexing()\n if not inplace:\n return output", "def _copy_data_from(self, original):\n raise NotImplementedError()", "def merge_results(self, other_processor):\n if not isinstance(other_processor, self.__class__):\n raise ValueError(f\"Can only extend with another \"\n f\"{self.__class__.__name__} instance.\")\n\n # Where there is overlap, there _should_ be agreement.\n self._evidence_counts.update(other_processor._evidence_counts)\n self._source_counts.update(other_processor._source_counts)\n self._belief_scores.update(other_processor._belief_scores)\n\n # Merge the statement JSONs.\n for k, sj in other_processor.__statement_jsons.items():\n if k not in self.__statement_jsons:\n self.__statement_jsons[k] = sj # This should be most of them\n else:\n # This should only happen rarely.\n for evj in sj['evidence']:\n self.__statement_jsons[k]['evidence'].append(evj)\n\n # Recompile the statements\n self._compile_results()\n return", "def merge(self, new_store):\n if new_store.name and len(new_store.name) > 0:\n self.name = new_store.name\n if new_store.address and len(new_store.address) > 0:\n self.address = new_store.address\n if new_store.city and len(new_store.city) > 0:\n self.city = new_store.city\n if new_store.state and len(new_store.state) > 0:\n self.state = new_store.state\n if new_store.zip and new_store.zip > 0:\n self.zipcode = new_store.zip\n if new_store.phone and new_store.phone > 0:\n self.phone = new_store.phone", "def __add__(self, other):\n train = copy.deepcopy(self.train)\n\n for img_path, pid, camid, dsetid in other.train:\n pid += self.num_train_pids\n camid += self.num_train_cams\n dsetid += self.num_datasets\n train.append((img_path, pid, camid, dsetid))\n\n ###################################\n # Note that\n # 1. set verbose=False to avoid unnecessary print\n # 2. set combineall=False because combineall would have been applied\n # if it was True for a specific dataset; setting it to True will\n # create new IDs that should have already been included\n ###################################\n if isinstance(train[0][0], str):\n return ImageDataset(\n train,\n self.query,\n self.gallery,\n transform=self.transform,\n mode=self.mode,\n combineall=False,\n verbose=False\n )\n else:\n return VideoDataset(\n train,\n self.query,\n self.gallery,\n transform=self.transform,\n mode=self.mode,\n combineall=False,\n verbose=False,\n seq_len=self.seq_len,\n sample_method=self.sample_method\n )", "def merge(self, other_btree):\n pass", "def update(self, other):\n self._start = other._start\n self._end = other._end\n self._nodes = {k: v.copy() for k,v in other._nodes.iteritems()}\n self._edges = {k: set(v) for k,v in other._edges.iteritems()}\n self._names = set(other._names)\n self.current = other.current", "def merge_with(self, other: \"Availability\") -> \"Availability\":\n\n if not isinstance(other, Availability):\n raise Exception(\"Please provide an Availability object.\")\n if not other.overlaps(self, strict=False):\n raise Exception(\"Only overlapping Availabilities can be merged.\")\n\n return Availability(\n start=min(self.start, other.start),\n end=max(self.end, other.end),\n event=getattr(self, \"event\", None),\n person=getattr(self, \"person\", None),\n room=getattr(self, \"room\", None),\n )", "def union(self, other: Catalog) -> Catalog:\n cat = self.copy()\n oth_cp = other.copy()\n\n for k in oth_cp.keys():\n for ver_id, version in oth_cp[k].versions.items():\n cat[k][ver_id] = version\n return cat", "def sync(self, other):\n pass # TODO", "def __add__(self, other):\n\n if not isinstance(other, Photons):\n raise ValueError('Can only add a Photons object to another Photons object.')\n\n # don't want to modify what is being added\n other = other.copy()\n\n # make column units consistent with self\n other.match_units(self)\n\n # add and /or update observation columns as necessary\n self.add_observations_column()\n other.add_observations_column()\n n_obs_self = len(self.obs_metadata)\n other['n'] += n_obs_self\n\n # re-reference times to the datum of self\n other.set_time_datum(self.time_datum)\n\n # stack the data tables\n photons = _tbl.vstack([self.photons, other.photons])\n\n # leave it to the user to deal with sorting and grouping and dealing with overlap as they see fit :)\n obs_metadata = self.obs_metadata + other.obs_metadata\n obs_times = list(self.obs_times) + list(other.obs_times)\n obs_bandpasses = list(self.obs_bandpasses) + list(other.obs_bandpasses)\n\n return Photons(photons=photons, obs_metadata=obs_metadata, time_datum=self.time_datum, obs_times=obs_times,\n obs_bandpasses=obs_bandpasses)", "def populate(self, fid1, fid2):\n self.input1 = json.load(fid1)\n self.input2 = json.load(fid2)", "def merge(self, other):\n\n if not self.can_merge(other):\n msg = 'Unable to merge \"{}\" with \"{}\" filters'.format(\n self.type, other.type)\n raise ValueError(msg)\n\n # Create deep copy of filter to return as merged filter\n merged_filter = copy.deepcopy(self)\n\n # Merge unique filter bins\n merged_bins = self.bins + other.bins\n\n # Sort energy bin edges\n if 'energy' in self.type:\n merged_bins = sorted(merged_bins)\n\n # Assign merged bins to merged filter\n merged_filter.bins = list(merged_bins)\n return merged_filter", "def merge(self, other: \"GraphSet\") -> None:\n if other.name != self.name:\n raise UnmergableGraphSetsException(\n f\"Unable to merge graph with name {other.name} into {self.name}\"\n )\n if other.version != self.version:\n raise UnmergableGraphSetsException(\n f\"Unable to merge graph with version {other.version} into {self.version}\"\n )\n self.start_time = min(self.start_time, other.start_time)\n self.end_time = max(self.end_time, other.end_time)\n self.resources += other.resources\n self._resolve_duplicates()\n self.errors += other.errors\n self.stats.merge(other.stats)", "def copy_from(self, other):\n assert not self.is_final\n if self.parent is not None:\n assert other.parent is not None\n self.parent.copy_from(other.parent)\n self.isolated_names = copy.copy(other.isolated_names)\n self.modified = copy.copy(other.modified)\n self.read = copy.copy(other.read)\n self.deleted = copy.copy(other.deleted)\n self.bound = copy.copy(other.bound)\n self.annotations = copy.copy(other.annotations)\n self.params = copy.copy(other.params)", "def merge(self, object_class):\n other_oc = self.schema.get_object_class(object_class)\n self.required_attrs |= other_oc.required_attrs\n self.allowed_attrs |= other_oc.allowed_attrs", "def fill(self, other):\n if self.stream_id is None:\n self.stream_id = other.stream_id\n\n if self.type is None:\n self.type = other.type\n\n if self.length is None:\n self.length = other.length\n\n if self.timestamp is None:\n self.timestamp = other.timestamp\n\n assert self.stream_id is not None\n assert self.type is not None\n assert self.length is not None\n assert self.timestamp is not None\n assert self.object_id is not None", "def __iadd__(self, other):\n self.MergeWith(other)\n return self", "def copy(self):\n new_data_collection = DataCollection()\n for item in self.iteritems():\n new_data_collection.add_data(item)\n return new_data_collection", "def merge(self, other):\n if other is None:\n return\n if self.theta1 > other.theta1:\n self.theta1 = other.theta1\n self.p1 = other.p1\n if self.theta2 < other.theta2:\n self.theta2 = other.theta2\n self.p2 = other.p2", "def initFromOther(self, oOther):\n for sAttr in self.getDataAttributes():\n setattr(self, sAttr, getattr(oOther, sAttr));\n return self;", "def update(self, other: dict):\n for key in other:\n if key in self:\n self[key] = other[key]", "def join_data(self, base_data, join_data, base_field, join_fields):\n for data in base_data:\n extra = join_data[data[base_field]]\n for field in join_fields:\n data[field] = extra[field]\n \n return base_data", "def copy_with(self):\n return self.copy()", "def copy_values(self, another):\n\n # Copy all value, uncertainty, and source information from the other\n # ExoParameter object.\n if isinstance(another, ExoParameter):\n self.reference = another.reference\n self.uncertainty = another.uncertainty\n self.uncertainty_lower = another.uncertainty_lower\n self.uncertainty_upper = another.uncertainty_upper\n self.units = another.units\n self.url = another.url\n self.value = another.value\n else:\n raise TypeError(\"Cannot copy values from a non-ExoParameter obj!\")", "def extend(self, other, adapt_conf=True):\n # Check if category metadata match\n if (self.size() > 0) and (other.size() > 0):\n for attr in [\"is_cat_inclusive\", \"is_categorised\"]:\n a, b = getattr(self, attr), getattr(other, attr)\n if a != b:\n raise ConcatenationError(\n f\"Categorisation metadata is different for '{attr}': {a} != {b}\"\n )\n elif other.size() > 0:\n for attr in [\"is_cat_inclusive\", \"is_categorised\"]:\n setattr(self, attr, getattr(other, attr))\n if getattr(self, \"tstep_h\", None) is None:\n self.tstep_h = getattr(other, \"tstep_h\", None)\n else:\n if getattr(other, \"tstep_h\", None) is not None:\n if self.tstep_h != other.tstep_h:\n raise ConcatenationError(\n \"Extending by a TrackRun with different timestep is not allowed\"\n )\n if adapt_conf and other.conf is not None:\n if self.conf is None:\n self.conf = other.conf.copy()\n else:\n for field in self.conf._fields:\n if getattr(self.conf, field) != getattr(other.conf, field):\n setattr(self.conf, field, None)\n self.sources.extend(other.sources)\n\n new_data = pd.concat([self.data, other.data], sort=False)\n new_track_idx = new_data.index.get_level_values(0).to_series()\n new_track_idx = new_track_idx.ne(new_track_idx.shift()).cumsum() - 1\n\n mux = pd.MultiIndex.from_arrays(\n [new_track_idx, new_data.index.get_level_values(1)], names=new_data.index.names\n )\n self.data = new_data.set_index(mux)\n\n # Concatenate categories\n if (self.cats is not None) or (other.cats is not None):\n new_cats = pd.concat([self.cats, other.cats], sort=False).fillna(False)\n new_track_idx = new_cats.index.get_level_values(0).to_series()\n new_track_idx = new_track_idx.ne(new_track_idx.shift()).cumsum() - 1\n\n ix = pd.Index(new_track_idx, name=new_cats.index.name)\n self.cats = new_cats.set_index(ix)", "def merge(self, other: Schema) -> Schema:\n if duplicates := self.keys() & other.keys():\n raise IntegrityError(f'Duplicate column name(s): {duplicates}')\n return self.__class__({**self, **other})" ]
[ "0.69505", "0.694683", "0.6885921", "0.68476945", "0.68086684", "0.6734038", "0.67226946", "0.66790885", "0.66472447", "0.66369003", "0.6572359", "0.6542647", "0.6503335", "0.65015614", "0.6469766", "0.64362967", "0.64317465", "0.6385473", "0.6376822", "0.6375423", "0.63703424", "0.63361776", "0.631753", "0.62937146", "0.62908316", "0.62438846", "0.61748385", "0.61556965", "0.61476624", "0.61324286", "0.60887307", "0.607825", "0.60765857", "0.60704356", "0.6068133", "0.60656786", "0.6027501", "0.6009837", "0.6009332", "0.5985768", "0.59678525", "0.5943717", "0.5924525", "0.5924524", "0.59010845", "0.5876143", "0.584064", "0.5831239", "0.58201", "0.5816105", "0.58112156", "0.5809095", "0.5804941", "0.57959485", "0.5794767", "0.57855856", "0.5785261", "0.5774561", "0.57736063", "0.57714903", "0.576705", "0.575061", "0.57354474", "0.57290703", "0.57218975", "0.5712195", "0.5704418", "0.5698822", "0.5693745", "0.56931365", "0.56895113", "0.5687284", "0.56764215", "0.5674636", "0.56723565", "0.5657785", "0.5655855", "0.56472", "0.5636454", "0.5634588", "0.5628336", "0.5626043", "0.5623629", "0.56202084", "0.55988896", "0.55897945", "0.55770785", "0.5576054", "0.5574003", "0.5573844", "0.5573058", "0.5572744", "0.55714935", "0.556345", "0.55581564", "0.55573905", "0.5538598", "0.5538142", "0.5534426", "0.55301327", "0.5527462" ]
0.0
-1
Merge data from a slow sql node object.
def merge_slow_sql_node(self, node): duration = node.duration self[1] += duration self[2] = self[0] and min(self[2], duration) or duration self[3] = max(self[3], duration) if self[3] == duration: self[4] = node # Must update the call count last as update of the # minimum call time is dependent on initial value. self[0] += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def record_slow_sql_node(self, node):\n\n if not self.__settings:\n return\n\n key = node.identifier\n stats = self.__sql_stats_table.get(key)\n if stats is None:\n # Only record slow SQL if not already over the limit on\n # how many can be collected in the harvest period.\n\n settings = self.__settings\n maximum = settings.agent_limits.slow_sql_data\n if len(self.__sql_stats_table) < maximum:\n stats = SlowSqlStats()\n self.__sql_stats_table[key] = stats\n\n if stats:\n stats.merge_slow_sql_node(node)\n\n return key", "def concatenate_data():", "def query(self, qid):\r\n connection = self.getConnection()\r\n cursor = connection.cursor()\r\n lst = []\r\n\r\n #========================================================================\r\n # Preparation\r\n #========================================================================\r\n whereStatement, ranges, morPrep, insert, Levels, rangeTab = self.prepareQuery(qid)\r\n lst.append(round(morPrep, 6)) # preparation\r\n lst.append(round(insert, 6)) # insert ranges into table\r\n lst.append(ranges) #number of ranges\r\n lst.append(Levels) #depth of the tree\r\n\r\n #========================================================================\r\n # First approximation of query region\r\n #========================================================================\r\n\r\n if whereStatement is not '':\r\n if rangeTab is not None:\r\n query = \"SELECT \" + ora.getHintStatement(['USE_NL (t r)', ora.getParallelStringQuery(self.numProcesses)]) + \\\r\n\" \" + ', '.join(['t.'+ i for i in self.columnNames]) + \"\"\"\r\nFROM \"\"\" + self.iotTableName + \" t, \" + rangeTab + \"\"\" r \r\n\"\"\" + whereStatement\r\n\r\n else:\r\n query = \"SELECT \"+ ora.getHintStatement([ora.getParallelStringQuery(self.numProcesses)]) + ', '.join(self.columnNames) + \"\"\" \r\nFROM \"\"\" + self.iotTableName + \"\"\" \r\n\"\"\" + whereStatement\r\n\r\n start1 = time.time()\r\n ora.mogrifyExecute(cursor, query)\r\n result = cursor.fetchall()\r\n\r\n lst.append(round(time.time() - start1, 10)) # fetching\r\n \r\n if (self.integration == 'loose' and self.qtype.lower() != 'time') or self.integration == 'deep':\r\n qTable = self.queryTable + '_temp_' + qid\r\n else: \r\n qTable = self.queryTable + '_' + qid\r\n \r\n start1 = time.time()\r\n decoded = self.decodeSpaceTime(result)\r\n lst.append(round(time.time() - start1, 6)) #decoding\r\n\r\n start1 = time.time()\r\n res = self.storeQuery(qTable, self.queryColumns, decoded, True)\r\n lst.append(round(time.time() - start1, 6)) #storing\r\n if res != []:\r\n ptsInTemp = res\r\n lst.append(res) #approximate points\r\n else:\r\n ptsInTemp = 0\r\n \r\n #==================================================================\r\n # Secondary filtering of query region\r\n #==================================================================\r\n\r\n if (self.qtype.lower() == 'time' and self.integration == 'loose') or res == []:\r\n # no data returned or it is a time query in the loose integration\r\n lst.append(ptsInTemp) #approximate points\r\n lst.append(0) # point in polygon time\r\n return lst\r\n else:\r\n \r\n if self.integration.lower() == 'deep' and self.qtype.lower() == 'time':\r\n queryTab = self.iotTableName + '_res_' + str(qid)\r\n timeWhere = whereClause.addTimeCondition(getTime(self.granularity, self.start_date, self.end_date), 'TIME', self.timeType)\r\n zWhere = whereClause.addZCondition([self.ozmin, self.ozmax], 'Z')\r\n whereValue = whereClause.getWhereStatement([timeWhere, zWhere])\r\n \r\n \r\n if self.granularity == 'day':\r\n query = \"CREATE TABLE \" + queryTab + \"\"\"\r\n\"\"\" + ora.getTableSpaceString(self.tableSpace) + \"\"\"\r\nAS SELECT * \r\nFROM (\r\n SELECT \"\"\" + ora.getHintStatement([ora.getParallelStringQuery(self.numProcesses)]) + \\\r\n \"\"\" X, Y, Z, TO_DATE(TIME, 'yyyy/mm/dd') as TIME \r\n FROM \"\"\" + qTable +\"\"\"\r\n ) \r\n\"\"\" + whereValue\r\n else:\r\n query = \"CREATE TABLE \" + queryTab + \"\"\"\r\n\"\"\" + ora.getTableSpaceString(self.tableSpace) + \"\"\" \r\n AS SELECT \"\"\" + ora.getHintStatement([ora.getParallelStringQuery(self.numProcesses)]) + \\\r\n \"\"\" X, Y, Z, TIME \r\n FROM \"\"\"+ qTable + \"\"\"\" \r\n \"\"\" + whereValue\r\n \r\n start1 = time.time()\r\n cursor.execute(query)\r\n end = round(time.time() - start1, 2)\r\n\r\n ora.dropTable(cursor, qTable, False)\r\n final = ora.getNumPoints(connection, cursor, queryTab)\r\n lst.append(final) #final points\r\n lst.append(end) #point in polygon time\r\n return lst\r\n \r\n else:\r\n final, end = self.pointInPolygon(qTable, qid, True)\r\n \r\n lst.append(final) #final points\r\n lst.append(end) #point in polygon time\r\n return lst\r\n else:\r\n print 'No data returned'\r\n return [lst[0], lst[1], '-', '-', '-','-','-','-']", "def merge(self, obj, **kwargs):\r\n raise NotImplementedError\r\n # if type(obj) == StreamFork:\r\n # node = obj.node\r\n # else:\r\n # node = obj\r\n #\r\n # self.stream.append(node)\r\n #\r\n # merge = MergeNode(**kwargs)\r\n # self.stream.append(merge)\r\n # self.stream.connect()\r", "def merge(self, other_btree):\n pass", "def process_row_to_graph(s3_object_info, app_logger, app_config, start_timing):\n\n # Graph Server Connection Info\n graph_server_host = get_config_item(app_config, \"neo4j.host\")\n graph_server_user = get_config_item(app_config, \"neo4j.username\")\n graph_server_pwd = get_config_item(app_config, \"neo4j.password\")\n driver = GraphDatabase.driver(\"bolt://\" + graph_server_host, auth=basic_auth(graph_server_user, graph_server_pwd))\n\n object_key = get_config_item(app_config, 's3_info.object_base') + \\\n '/' + s3_object_info['camera_name'] + '/' + \\\n s3_object_info['date_string'] + '/' + \\\n s3_object_info['hour_string'] + '/' + \\\n s3_object_info['img_type'] + '/' + \\\n s3_object_info['just_file']\n\n date_info = parse_date_time_pacific(object_key)\n event_ts = s3_object_info['utc_ts']\n\n add_camera_node = 'MERGE(this_camera:Camera {camera_name: \"' + s3_object_info['camera_name'] + '\"})'\n if s3_object_info['img_type'] == 'snap':\n add_image_node = 'MERGE(this_image:Image {object_key: \"' + object_key + \\\n '\", timestamp: ' + str(event_ts) + '})'\n else:\n add_image_node = 'MERGE(this_image:Video {object_key: \"' + object_key + \\\n '\", timestamp: ' + str(event_ts) + '})'\n add_isodate_node = 'MERGE(this_isodate:ISODate {iso_date: \"' + date_info['isodate'] + '\"})'\n add_year_node = 'MERGE(this_year:Year {year_value: ' + date_info['year'] + '})'\n add_month_node = 'MERGE(this_month:Month {month_value: ' + date_info['month'] + '})'\n add_day_node = 'MERGE(this_day:Day {day_value: ' + date_info['day'] + '})'\n add_hour_node = 'MERGE(this_hour:Hour {hour_value: ' + date_info['hour'] + '})'\n add_size_node = 'MERGE(this_size:Size {size_in_bytes: ' + s3_object_info['size_in_bytes'] + '})'\n relate_image_to_camera = 'MERGE (this_camera)-[:HAS_IMAGE {timestamp: ' + str(event_ts) + '}]->(this_image)'\n relate_image_to_timestamp = 'MERGE (this_image)-[:HAS_TIMESTAMP]->(this_isodate)'\n relate_image_to_year = 'MERGE (this_image)-[:HAS_YEAR]->(this_year)'\n relate_image_to_month = 'MERGE (this_image)-[:HAS_MONTH]->(this_month)'\n relate_image_to_day = 'MERGE (this_image)-[:HAS_DAY]->(this_day)'\n relate_image_to_hour = 'MERGE (this_image)-[:HAS_HOUR]->(this_hour)'\n relate_image_to_size = 'MERGE (this_image)-[:HAS_SIZE]->(this_size)'\n\n full_query_list = add_camera_node + \" \" + \\\n add_image_node + \" \" + \\\n add_isodate_node + \" \" + \\\n add_year_node + \" \" + \\\n add_month_node + \" \" + \\\n add_day_node + \" \" + \\\n add_hour_node + \" \" + \\\n add_size_node + \" \" + \\\n relate_image_to_camera + \" \" + \\\n relate_image_to_timestamp + \" \" + \\\n relate_image_to_year + \" \" + \\\n relate_image_to_month + \" \" + \\\n relate_image_to_day + \" \" + \\\n relate_image_to_size + \" \" + \\\n relate_image_to_hour\n\n neo_session = driver.session()\n\n tx = neo_session.begin_transaction()\n\n tx.run(full_query_list)\n\n tx.commit()\n neo_session.close()\n total_time = time.time() - start_timing\n app_logger.info(\"S3 Object: {} information written to graph DB in {} seconds.\".format(object_key, total_time))\n return True", "def archive_ost_data(self, lmtdb):\n\n dataset_names = [\n 'datatargets/readbytes',\n 'datatargets/writebytes',\n 'fullness/bytes',\n 'fullness/bytestotal',\n 'fullness/inodes',\n 'fullness/inodestotal'\n ]\n\n self.init_datasets(dataset_names, lmtdb.ost_names)\n\n # Now query the OST_DATA table to get byte counts over the query time range\n results, columns = lmtdb.get_ost_data(self.query_start, self.query_end_plusplus)\n\n # Index the columns to speed up insertion of data\n col_map = {}\n try:\n for db_col in ['TIMESTAMP', 'OST_ID', 'READ_BYTES',\n 'WRITE_BYTES', 'KBYTES_USED', 'KBYTES_FREE',\n 'INODES_USED', 'INODES_FREE']:\n col_map[db_col] = columns.index(db_col)\n except ValueError:\n raise ValueError(\"LMT database schema does not match expectation\")\n\n # Loop through all the results of the timeseries query\n for row in results:\n if isstr(row[col_map['TIMESTAMP']]):\n # SQLite stores timestamps as a unicode string\n timestamp = datetime.datetime.strptime(row[col_map['TIMESTAMP']],\n \"%Y-%m-%d %H:%M:%S\")\n else:\n # MySQL timestamps are automatically converted to datetime.datetime\n timestamp = row[col_map['TIMESTAMP']]\n target_name = lmtdb.ost_id_map[row[col_map['OST_ID']]]\n for dataset_name in dataset_names:\n target_dbcol = self.config[dataset_name].get('column')\n if target_dbcol is not None:\n self[dataset_name].insert_element(\n timestamp,\n target_name,\n row[col_map[target_dbcol]])\n elif dataset_name == 'fullness/bytestotal':\n self[dataset_name].insert_element(\n timestamp,\n target_name,\n row[col_map['KBYTES_USED']] + row[col_map['KBYTES_FREE']])\n elif dataset_name == 'fullness/inodestotal':\n self[dataset_name].insert_element(\n timestamp,\n target_name,\n row[col_map['INODES_USED']] + row[col_map['INODES_FREE']])\n else:\n errmsg = \"%s in self.config but missing 'column' setting\" % dataset_name\n raise KeyError(errmsg)", "def _load_elastic(self, sqldata):\n from collections import defaultdict\n attributes = ResourceMetricsLoader.attr_fields.keys()\n records = defaultdict(lambda: defaultdict(int))\n for sd in sqldata:\n r = dict(sd.items())\n if r['ATTRIBUTE_NAME'] not in attributes:\n continue\n # Only store hostnames and not FQDN for resources\n r['RESOURCE_NAME'] = r['RESOURCE_NAME'].split('.')[0]\n\n (attr, val) = self._get_attr_val(r)\n records[r.get('RESOURCE_NAME'),r.get('TIME_STAMP')][attr] = val\n records[r.get('RESOURCE_NAME'),r.get('TIME_STAMP')]['INSERT_SEQ'] = r['INSERT_SEQ']\n\n # Construct docs from records\n inserts = [] \n for k, v in records.iteritems():\n body = { attr: val for attr, val in v.iteritems() } \n body['RESOURCE_NAME'], body['TIME_STAMP'] = k\n document = {\n \"_index\" : self._get_index_name(body['TIME_STAMP']),\n \"_type\" : 'default',\n \"_source\" : body\n }\n inserts.append(document)\n \n # Insert list of documents into elasticsearch\n self.logger.info(\"Loading chunk into elasticsearch\")\n status = helpers.bulk(self.es,\n inserts,\n self.chunk_size)\n self.logger.info(\"Finished loading chunk into elasticsearch\")\n\n # update sequence to last item in the results\n #self.seq = dict(results[-1].items())[self.id_field]\n self.seq = sqldata[-1][self.seq_field]\n \n return status", "def _load_elastic(self, sqldata):\n inserts = []\n for r in sqldata:\n body = self._preprocess(dict(r.items()))\n if not body:\n continue # Skip if preprocessing returns False\n index_name = self._get_index_name(body['TIME_STAMP'])\n document = {\n \"_index\" : index_name,\n \"_type\" : 'default', # Hardcoded - we only have 1 doctype\n \"_id\" : body[self.seq_field],\n \"_source\" : body\n }\n inserts.append(document)\n\n # update sequence to last item in the results\n self.seq = sqldata[-1][self.seq_field]\n \n # Insert list of documents into elasticsearch\n status = helpers.bulk(self.es, inserts, self.chunk_size)\n self.logger.info(\"Inserted %d chunks into %s\" % (self.chunk_size,\n index_name))\n return status", "def get_src_data(DbClass, date_from, date_to, columns_to_copy):\n session = get_db_session(\"src\")\n results = (\n session.query(DbClass)\n .filter(\n and_(\n DbClass.timestamp >= date_from,\n DbClass.timestamp <= date_to,\n )\n )\n .all()\n )\n all_results = []\n for r in results:\n result_dict = {}\n for col in columns_to_copy:\n result_dict[col] = getattr(r, col)\n all_results.append(result_dict)\n session_close(session)\n return all_results", "def _merge_report(self, target, new):\r\n time = None\r\n if 'ts' in new['parsed']:\r\n time = new['parsed']['ts']\r\n\r\n if (target.get('lastSeenDate', None) and\r\n time and\r\n target['lastSeenDate'] < time):\r\n target['lastSeenDate'] = time\r\n\r\n query_millis = int(new['parsed']['stats']['millis'])\r\n target['stats']['totalTimeMillis'] += query_millis\r\n target['stats']['count'] += 1\r\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']", "def _merge_report(self, target, new):\n time = None\n if 'ts' in new['parsed']:\n time = new['parsed']['ts']\n\n if (target.get('lastSeenDate', None) and\n time and\n target['lastSeenDate'] < time):\n target['lastSeenDate'] = time\n\n query_millis = int(new['parsed']['stats']['millis'])\n target['stats']['totalTimeMillis'] += query_millis\n target['stats']['count'] += 1\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']", "def _merge(self):\n raise NotImplementedError", "def _get_new_data(self):\n self.log.info(\"Get new query from db \")\n surveys = self.db.execute_pandas_query(self._get_query('surveys_query'))\n\n final_query = ''\n for index_s, survey_id in surveys.iterrows():\n questions = self.db.execute_pandas_query(self._get_query('questions_query').replace('@currentSurveyId', str(survey_id['SurveyId'])))\n query_in_progress = ''\n for index_q, question_id in questions.iterrows():\n if question_id['InSurvey'] == 0:\n query_in_progress = query_in_progress + self._get_query('query_template_for_null_column').replace('<QUESTION_ID>', str(question_id['QuestionId']))\n else:\n query_in_progress = query_in_progress + self._get_query('query_template_for_answer_column').replace('<QUESTION_ID>', str(question_id['QuestionId']))\n\n if index_q != questions.index[-1]:\n query_in_progress = query_in_progress + ' , '\n\n union_query_block = self._get_query('query_template_outer_union_query').replace('<DYNAMIC_QUESTION_ANSWERS>', query_in_progress)\n union_query_block = union_query_block.replace('<SURVEY_ID>', str(survey_id['SurveyId']))\n final_query = final_query + union_query_block\n if index_s != surveys.index[-1]:\n final_query = final_query + ' UNION '\n return final_query", "def update_data(self, data):\n start_time = data.index[-1].strftime(\"%Y-%m-%dT%H:%M:%S.000000Z\")\n temp_data = self.gather_data(start=start_time)\n temp_data = self._list_to_df(temp_data)\n if (len(temp_data) > 1):\n # temp_data[0] is the same as data[-1]\n out_data = data.append(temp_data[1:])\n return out_data", "def mergeNodes(new, t1, t2):\n \n if t1 and t2:\n new.val = t1.val + t2.val\n elif not t1:\n new.val = t2.val\n elif not t2:\n new.val = t1.val", "def merge(self, obj):\n pass", "def get_query_results(sql_query, raw_info, column_fields= []):\n answer= {}\n #try:\n if True:\n if not connect():\n answer['type']= \"message\"\n answer['data']= \"Connection Problem.\"\n return answer\n cursor.execute('set profiling = 1')\n cursor.execute(sql_query) #query execution\n \"\"\"\n if answer['type']== 'graph':\n data = {}\n for row in cursor.fetchall():\n key, value, date = row\n _data = data.get(key, {'date':[], 'values':[], 'all':[]})\n _data['date'].append(str(date))\n _data['values'].append(float(value))\n _data['all'].append([date, float(value)])\n data[key] = _data\n\n final_data = []\n for k, v in data.items():\n v['key'] = k\n sorted_data = sorted(v['all'], key=lambda x: x[0])\n date, values = [], []\n for _date, _value in sorted_data:\n date.append(str(_date))\n values.append(_value)\n v['date'] = date\n v['values'] = values\n v.pop('all')\n final_data.append(v)\n answer['data']= final_data\"\"\"\n #else: # answer['type']== 'text'\n if True:\n data= []\n for row in cursor.fetchall():\n temp= []\n for r in row:\n temp.append(str(r))\n data.append(temp)\n answer['type']= 'text'\n answer['data']= data\n answer['format']= []\n for col in column_fields:\n answer['format'].append({'field': col, 'type': ''})\n if not answer['data']:\n answer['type']= \"message\"\n answer['data']= \"No records found.\"\n if 'format' in answer.keys():\n del answer['format']\n\n cursor.execute('set profiling= 0')\n cursor.execute(\"SELECT query_id, SUM(duration) FROM information_schema.profiling GROUP BY query_id ORDER BY query_id DESC LIMIT 1\")\n raw_info['execution_time']= float(cursor.fetchone()[1])\n #except Exception as e:\n if False:\n print('problem executing query')\n print(e, 'line:', sys.exc_info()[-1].tb_lineno)\n answer['type']= \"message\"\n answer['data']= \"No records found.\"\n if 'format' in answer.keys():\n del answer['format']\n if request and request.session['query_stack']:\n request.session['query_stack']= {}\n #finally:\n if True:\n return answer", "def _from_db_object(nodegroup, db_nodegroup):\n for field in nodegroup.fields:\n nodegroup[field] = db_nodegroup[field]\n\n nodegroup.obj_reset_changes()\n return nodegroup", "def archive_oss_data(self, lmtdb):\n\n dataset_names = [\n 'dataservers/cpuload',\n 'dataservers/memused',\n ]\n\n self.init_datasets(dataset_names, lmtdb.oss_names)\n\n # Now query the OSS_DATA table to get byte counts over the query time range\n results, columns = lmtdb.get_oss_data(self.query_start, self.query_end_plusplus)\n\n # Index the columns to speed up insertion of data\n col_map = {}\n try:\n for db_col in ['TIMESTAMP', 'OSS_ID', 'PCT_CPU', 'PCT_MEMORY']:\n col_map[db_col] = columns.index(db_col)\n except ValueError:\n raise ValueError(\"LMT database schema does not match expectation\")\n\n # Loop through all the results of the timeseries query\n for row in results:\n if isstr(row[col_map['TIMESTAMP']]):\n # SQLite stores timestamps as a unicode string\n timestamp = datetime.datetime.strptime(row[col_map['TIMESTAMP']],\n \"%Y-%m-%d %H:%M:%S\")\n else:\n # MySQL timestamps are automatically converted to datetime.datetime\n timestamp = row[col_map['TIMESTAMP']]\n target_name = lmtdb.oss_id_map[row[col_map['OSS_ID']]]\n for dataset_name in dataset_names:\n target_dbcol = self.config[dataset_name].get('column')\n # target_dbcol=PCT_CPU, target_name=snx11025n022\n if target_dbcol is not None:\n self[dataset_name].insert_element(\n timestamp,\n target_name,\n row[col_map[target_dbcol]])\n else:\n errmsg = \"%s in self.config but missing 'column' setting\" % dataset_name\n raise KeyError(errmsg)", "def merge(self, graph):\n # keep previous self.filename\n # copy data\n for x in graph.data:\n self.data.append(x)\n # copy headers, unless already exists (is so, info is lost)\n for key in graph.headers:\n if key not in self.headers:\n self.headers.update({key: graph.headers[key]})\n # copy graphInfo, unless already exists (is so, info is lost)\n for key in graph.graphInfo:\n if key not in self.graphInfo:\n self.graphInfo.update({key: graph.graphInfo[key]})\n # copy sampleInfo, unless already exists (is so, info is lost)\n for key in graph.sampleInfo:\n if key not in self.sampleInfo:\n self.sampleInfo.update({key: graph.sampleInfo[key]})", "def _before_stockpyle_deserialize(self, obj):\n \n # only merge SA objects\n if _is_sqlalchemy_object(obj):\n self.__session.merge(obj, load=False)", "def merge(self):\n collapsable = self.findSameSubtrees()\n\n dummy = GraphiusNode(-1, None)\n for i, node in self.nodes.items():\n dummy.addNeighbor(node)\n\n # Perform the merge\n self.mergeHelper(dummy, collapsable)\n\n # Regenerate trees\n newNodes = self.dfs(dummy)\n assert(-1 not in newNodes)\n self.nodes = newNodes", "def merge(self, other):\n from .dataset import Dataset\n\n if other is None:\n return self.to_dataset()\n else:\n other_vars = getattr(other, 'variables', other)\n coords = merge_coords_without_align([self.variables, other_vars])\n return Dataset._from_vars_and_coord_names(coords, set(coords))", "def _merge_by_query(self, obj_dict):\n _res = self.__session.query(obj_dict[\"class\"]).filter_by(**obj_dict[\"query_dict\"]).first()\n\n if _res is None:\n self._add(obj_dict[\"instance\"])\n else:\n if hasattr(obj_dict[\"instance\"], 'attributes') and \\\n hasattr(obj_dict[\"instance\"], 'p_key'):\n for attr in obj_dict[\"instance\"].attributes:\n if attr not in obj_dict[\"instance\"].p_key:\n setattr(_res, attr, getattr(obj_dict[\"instance\"], attr))\n # updating the instance\n obj_dict[\"instance\"] = _res\n else:\n raise AttributeError(\"Class variable (attributes / p_key) not set for %s\" %\n (obj_dict[\"instance\"],))", "def _build_from_chunks(self, data_node):\n result = ''\n\n if not data_node:\n return ''\n\n master_data = data_node[0]\n result = \"{}{}\".format(result, self._decode(master_data['value']))\n # if data is not in chunks, then return the first node's value\n if 'tags' not in master_data or 'chunks' not in master_data['tags']:\n return result\n\n # join the values in chunks\n last_chunk = int(master_data['tags']['chunks'])\n for chunk_id in range(1, last_chunk):\n slave_data = data_node[chunk_id]\n result = \"{}{}\".format(result, self._decode(slave_data['value']))\n return result", "def _gather_deep_data(self):\n\n cleaned_data_from_website = list()\n\n for i, search_result in self.data_from_website.iterrows():\n cleaned_data_from_website.append(self._deep_data(search_result.url))\n\n cleaned_data_from_website = pd.DataFrame(cleaned_data_from_website)\n if len(cleaned_data_from_website) == 0:\n cleaned_data_from_website['@id'] = '0'\n cleaned_data_from_website.set_index('@id', inplace=True)\n self.data_from_website = cleaned_data_from_website", "def merge_logs(self):\n ourlog = LogData()\n for l in self.data_set:\n ourlog.entries = ourlog.entries + l.entries\n ourlog.sort_time()\n self.finalized_data = ourlog", "def _to_redis(self):\n\n # OSM ways and nodes tables\n self._gdf_to_redis(self._bbid + \"_ways\", self._ways, geometry='geometry')\n self._df_to_redis(self._bbid + \"_nodes\", self._nodes)\n\n # graph to graph nodes and edges tables (storing only ids and edge lengths)\n gdf_nodes, gdf_edges = osmnx.utils_graph.graph_to_gdfs(self._graph, node_geometry=False,\n fill_edge_geometry=False)\n self._gdf_to_redis(self._bbid + \"_graph_nodes\", gdf_nodes[['id']]) # ['id', 'x', 'y'] to store coordinates\n self._gdf_to_redis(self._bbid + \"_graph_edges\", gdf_edges[['id', 'length', 'u', 'v', 'key']])", "def mergeAggregatedCsvData(self, contexts, obj, aggData1, aggData2):\n return aggData1 + aggData2", "def _transform_in_memory_row(data_to_send, row, target_stream_id):\n\n data_available = False\n\n try:\n row_id = row['id']\n asset_code = row['asset_code']\n timestamp = row['user_ts']\n sensor_data = row['reading']\n\n if _log_debug_level == 3:\n _logger.debug(\"stream ID : |{0}| sensor ID : |{1}| row ID : |{2}| \"\n .format(target_stream_id, asset_code, str(row_id)))\n\n # Prepares new data for the PICROMF\n new_data = [\n {\n \"containerid\": target_stream_id,\n \"values\": [\n {\n \"Time\": timestamp\n }\n ]\n }\n ]\n\n # Evaluates which data is available\n for data_key in sensor_data:\n try:\n new_data[0][\"values\"][0][data_key] = sensor_data[data_key]\n\n data_available = True\n except KeyError:\n pass\n\n if data_available:\n # note : append produces a not properly constructed OMF message\n data_to_send.extend(new_data)\n\n if _log_debug_level == 3:\n _logger.debug(\"in memory info |{0}| \".format(new_data))\n\n else:\n _message = plugin_common.MESSAGES_LIST[\"e000020\"]\n _logger.warning(_message)\n\n except Exception:\n _message = plugin_common.MESSAGES_LIST[\"e000022\"]\n\n _logger.error(_message)\n raise", "def __init__(self, database_url, file_path):\n\t\tself.url = database_url\n\t\tself.con = pcg2.connect(self.url)\n\t\tself.con.autocommit = True\n\t\tself.cur = self.con.cursor()\n\t\tself.existing_nodes = {} # stores objects that have already been found\n\t\tself.layers = 0 # used to return max_depth at the end of both algorithms\n\t\tself.pointers_to = {}\n\t\tself.pointed_to_by = {}\n\t\twith open(file_path) as file:\n\t\t\tfor line in file:\n\t\t\t\t(key, value) = line.split(\" -> \")\n\t\t\t\tif value.endswith(\"\\n\"):\n\t\t\t\t\tvalue = value[:-1]\n\t\t\t\ttry:\n\t\t\t\t\tbisect.insort(self.pointers_to[key], value) # alphabetizing generates more connections with \"relevant\" object types (accounts, adunits, etc.) that happen to come first alphabetically\n\t\t\t\t\t#self.pointers_to[key].append(value) # to add types in \"natural\" order\n\t\t\t\texcept:\n\t\t\t\t\tself.pointers_to[key] = [value]\n\t\t\t\ttry:\n\t\t\t\t\tbisect.insort(self.pointed_to_by[value], key)\n\t\t\t\t\t#self.pointed_to_by[value].append(key)\n\t\t\t\texcept:\n\t\t\t\t\tself.pointed_to_by[value] = [key]\n\t\tself.queries = {} # holds SQL queries\n\t\tself.root_logger= logging.getLogger()\n\t\tself.root_logger.setLevel(logging.DEBUG)\n\t\thandler = logging.FileHandler('runtime.log', 'w', 'utf-8')\n\t\thandler.setFormatter(logging.Formatter('%(name)s %(message)s'))\n\t\tsecond_handler = logging.StreamHandler(sys.stdout)\n\t\tsecond_handler.setLevel(logging.DEBUG)\n\t\tsecond_handler.setFormatter(logging.Formatter('%(name)s %(message)s'))\n\t\tself.root_logger.addHandler(handler)\n\t\tself.root_logger.addHandler(second_handler)", "def GetSqlData2(select,bycolumn=True):\n #connect to database and execute sql and retrieve data\n conn,cur = ConnectDb()\n cur.execute(select)\n fields = [d.name for d in cur.description]\n\n data = cur.fetchall()\n if len(data)==0:return None\n\n #print N.c_[fields,data[0]]\n\n if bycolumn:\n data = zip(*data)\n #print fields, len(data),len(data[0]),data[0][0] \n dic = {}\n while fields:\n field = fields.pop(0)\n \n #IF DATA IS GEOM OR GEOG\n if re.search('geog|geom',field,re.IGNORECASE):\n #print field, len(data),len(data[0]),data[0][0]\n geoms = data.pop(0)\n dic[field] = [ppygis.Geometry.read_ewkb(poly) for poly in geoms]\n if hasattr(dic[field][0], 'polygons'):\n #print dir()\n outerring = dic[field][0].polygons[0].rings.pop(0)\n dic['outer'] = [[point.x,point.y] for point in outerring.points]\n dic['inner'] = [[[point.x,point.y] for point in ring.points] for ring in dic[field][0].polygons[0].rings]\n #dic[field][0].polygons[0].rings[0].points]\n elif hasattr(dic[field][0], 'x'):\n dic['x'] = [item.x for item in dic[field]]\n dic['y'] = [item.y for item in dic[field]]\n else:dic[field] = N.array(data.pop(0))\n \n return dic\n else:\n lst = [] \n while data:\n dic = {}\n row = data.pop(0)\n \n for i,field in enumerate(fields):\n \n #IF DATA IS GEOM OR GEOG\n if re.search('geog|geom',field,re.IGNORECASE):\n #print 'here'\n dic[field] = ppygis.Geometry.read_ewkb(row[i])\n #if hasattr(dic[field], 'polygons'):\n outerring = dic[field].polygons[0].rings.pop(0)\n dic['outer'] = [[point.x,point.y] for point in outerring.points]\n dic['inner'] = [[[point.x,point.y] for point in ring.points] for ring in dic[field].polygons[0].rings]\n #elif hasattr(dic[field], 'x'):\n # dic['x'] = [item.x for item in dic[field]]\n # dic['y'] = [item.y for item in dic[field]]\n\n elif type(row[i]) == list or type(row[i]) == tuple:\n dic[field] = N.array(row[i])\n else:\n dic[field] = row[i]\n lst.append(dic)\n return lst", "def update_nodes_df(nodes: pandas.DataFrame) -> None:\n nodes_clean = nodes.copy(deep=True)\n # Ensure that all '' values are NaN, so that those rows can be easily removed with dropna()\n nodes_clean.replace('', numpy.nan, inplace=True)\n nodes_clean.dropna(axis=0, how='any', inplace=True)\n nodes_clean.drop_duplicates(keep='first', inplace=True, ignore_index=True)\n\n print('\\nCache used at start of function: ' + str(read_node.cache_info()) + '.')\n print('There are ' + str(len(nodes_clean)) + ' nodes, updating node: 0 ', end='')\n count = 0\n columns = nodes_clean.columns\n for row in nodes_clean.itertuples():\n count += 1\n if count % 250 == 0:\n print(count, ' ', end='', flush=True)\n if count % 10000 == 0:\n print('\\n', end='', flush=True)\n\n node_properties = {}\n for prop_name in RICGRAPH_PROPERTIES_ADDITIONAL:\n for other_name in columns:\n if prop_name == other_name:\n node_properties[prop_name] = getattr(row, other_name)\n\n update_node(name=row.name, category=row.category, value=row.value,\n **node_properties)\n\n print(count, '\\n', end='', flush=True)\n print('Cache used at end of function: ' + str(read_node.cache_info()) + '.')\n return", "def merge_nodes(self, parent, child):\n parent.key += child.key\n parent.real = child.real\n parent.value = child.value\n parent.children = child.children", "def vmerge(self, dataset, on=None, left_on=None, right_on=None,\n row_id_name=None, left_id=None, right_id=None, row_ids=None,\n overwrite_text=False, from_set=None, uniquify_key=None,\n reset_index=True, inplace=True, verbose=True):\n datasets = [(self._meta, self._data)]\n merge_ds = [(ds._meta, ds._data) for ds in dataset]\n datasets.extend(merge_ds)\n merged_meta, merged_data = _vmerge(\n None, None, datasets, on=on, left_on=left_on,\n right_on=right_on, row_id_name=row_id_name, left_id=left_id,\n right_id=right_id, row_ids=row_ids, overwrite_text=overwrite_text,\n from_set=from_set, reset_index=reset_index, verbose=verbose)\n if inplace:\n self._data = merged_data\n self._meta = merged_meta\n if uniquify_key:\n self._make_unique_key(uniquify_key, row_id_name)\n return None\n else:\n new_dataset = self.clone()\n new_dataset._data = merged_data\n new_dataset._meta = merged_meta\n if uniquify_key:\n new_dataset._make_unique_key(uniquify_key, row_id_name)\n return new_dataset", "def seed_other_dataset(name: str, chunk_size: int, start=None, end=None):\n objects = []\n for chunk in pd.read_csv(name, chunksize=chunk_size, header=1):\n chunk_as_mat = chunk.to_numpy()\n chunk_start = datetime.datetime.strptime(str(chunk_as_mat[0][0]), \"%Y%m%d\")\n chunk_end = datetime.datetime.strptime(str(chunk_as_mat[-1][0]), \"%Y%m%d\")\n if start is not None and start > chunk_end:\n continue\n if end is not None and end < chunk_start:\n break\n # print(chunk.to_numpy())\n objects += insert_into_sql(chunk.to_numpy())\n return objects", "def naive(self):\n # SLOW algorithm ! A lot of COM calls are performed.\n recordset = win32com.client.Dispatch('ADODB.Recordset')\n if self.order_by:\n recordset.Open(\n unicode('SELECT * FROM [%s] ORDER BY %s' % (\n self.name, self.order_by)), self.document.connection, 0, 1)\n else:\n recordset.Open(\n unicode('SELECT * FROM [%s]' % self.name),\n self.document.connection, 0, 1)\n try:\n while not recordset.EOF:\n source = {}\n for field in recordset.Fields:\n source[self.encoding(field.Name)] = self.encoding(\n field.Value)\n yield source\n recordset.MoveNext()\n recordset.Close()\n del recordset\n except:\n # cannot use \"finally\" here because Python doesn't want\n # a \"yield\" statement inside a \"try...finally\" block.\n recordset.Close()\n del recordset\n raise", "def addAppRecordMerge (self, nodeList) :\n\t\tfor i in range(len(nodeList)) :\n\t\t\tnodeList[i].addAppData(\"id\",\"data \" + nodeList[i].instanceID , Node.ALL, Node.ALL)\n\t\t\tnodeList[i].serialize((Node.ALL, Node.ALL))", "def add_node(self, node: Node):\n prop_str = \",\\n\".join([\"n.%s = '%s'\" % (k, v) for k, v in node.data.items()])\n query = \"\"\"\n MERGE (n:%s {id: '%s'})\n SET %s\n \"\"\" % (\n node.labels,\n norm_id(node.db_ns, node.db_id),\n prop_str,\n )\n return self.create_tx(query)", "def _get_obj_and_data(self, key, write_merged=True):\r\n bucket = self.get_bucket()\r\n\r\n obj = bucket.get(key)\r\n data = [self._list_to_dict(o.get_data()) for o\r\n in obj.get_siblings()\r\n if o.get_data() is not None]\r\n\r\n obj_data = obj.get_data()\r\n if obj_data is not None:\r\n data.append(self._list_to_dict(obj_data))\r\n\r\n # if we have no data or only 1 sibling we can safetly return\r\n # it without merging\r\n if len(data) == 0:\r\n return obj, {}\r\n elif len(data) == 1:\r\n return obj, data[0]\r\n\r\n resolved_data = reduce(self._merge_two, data)\r\n # NOTE: is this really the only way to fix a conflict in the\r\n # python riak library?\r\n try:\r\n obj._vclock = obj.get_sibling(0).vclock()\r\n except IndexError:\r\n pass\r\n else:\r\n if write_merged:\r\n obj.set_data(self._dict_to_list(resolved_data)[:self.max_items])\r\n obj.store()\r\n\r\n return obj, resolved_data", "def add_data():\n neo = NeoData(\"neo4j://neo:7687\")\n data = neo.find_all()\n return data", "def slow_transaction_data(self):\n\n # XXX This method no longer appears to be used. Being replaced\n # by the transaction_trace_data() method.\n\n if not self.__settings:\n return []\n\n if not self.__slow_transaction:\n return []\n\n maximum = self.__settings.agent_limits.transaction_traces_nodes\n\n transaction_trace = self.__slow_transaction.transaction_trace(\n self, maximum)\n\n data = [transaction_trace,\n list(self.__slow_transaction.string_table.values())]\n\n if self.__settings.debug.log_transaction_trace_payload:\n _logger.debug('Encoding slow transaction data where '\n 'payload=%r.', data)\n\n json_data = json_encode(data)\n\n level = self.__settings.agent_limits.data_compression_level\n level = level or zlib.Z_DEFAULT_COMPRESSION\n\n zlib_data = zlib.compress(six.b(json_data), level)\n\n pack_data = base64.standard_b64encode(zlib_data)\n\n if six.PY3:\n pack_data = pack_data.decode('Latin-1')\n\n root = transaction_trace.root\n\n trace_data = [[root.start_time,\n root.end_time - root.start_time,\n self.__slow_transaction.path,\n self.__slow_transaction.request_uri,\n pack_data]]\n\n return trace_data", "def merge_node(**kwargs):\n\n if kwargs['TypeName'] is None:\n kwargs['TypeName'] = 'None'\n\n kwargs['Hash'] = get_node_hash(kwargs)\n\n if '(*)' in kwargs['TypeDefinition']:\n # The type contains a pointer to a function prototype\n # args['TypeDefinition'] = HRESULT (*)(IRpcChannelBuffer *, RPCOLEMESSAGE *, ULONG *)\n # args['TypeName'] = SendReceive\n index = kwargs['TypeDefinition'].find('(*)')\n TypeDefinition = kwargs['TypeDefinition'][:index + 2] + kwargs['TypeName'] \\\n + ')' + kwargs['TypeDefinition'][index + 3:]\n kwargs['TypeName'] = ';'\n kwargs['TypeDefinition'] = TypeDefinition\n\n if not nodes_cache.get(kwargs['Hash']):\n nodes_cache.update({kwargs['Hash']: (kwargs['TypeDefinition'], kwargs['TypeName'], kwargs['NodeLabel'])})\n if kwargs['StartNodeHash'] != kwargs['Hash']:\n relationships_cache.add(kwargs['StartNodeHash'] + \" \" + kwargs['Hash'] + \" \" + kwargs['RelationshipType'])\n\n return kwargs['Hash']", "def concat_and_sort(self):\n for link in self.to_concat:\n \n to_concat = self.to_concat[link]\n df = pd.concat(to_concat,axis=0)\n df=df.sort_values(by=['day','actualtime_arr_from'])\n for d in df['day'].unique():\n self.data[d][link] = {}\n temp = df[df['day']==d]\n \n for r in temp['routeid'].unique(): \n self.data[d][link][r] = temp[temp['routeid']==r][['actualtime_arr_from','actualtime_arr_to','routeid']].values \n del(temp)\n del(df)\n del(self.to_concat)", "def query(self, sql):\r\n\r\n result_sets = []\r\n self.messages = \"\"\r\n messages = \"\"\r\n\r\n # self.batched_query(sql)\r\n\r\n with self.conn.cursor() as cur:\r\n self.cur = cur\r\n\r\n self.request_cancel = False\r\n try:\r\n cur.execute(sql)\r\n except Exception as ex:\r\n self.cur = None\r\n self.messages = str(ex)\r\n return None\r\n while True:\r\n try:\r\n description = cur.description\r\n except:\r\n self.messages = \"Error reading description\"\r\n if self.metadata() is not None and description is not None:\r\n description = list(map(lambda c: c+(self._better_description(c),),description))\r\n # print(description)\r\n try:\r\n # data = cur.fetchmany(10000)\r\n # while True:\r\n # d = cur.fetchmany(10000)\r\n # if d is None or len(d) == 0:\r\n # break\r\n # if self.request_cancel:\r\n # cur.cancel()\r\n # self.request_cancel = False\r\n # data = data + d\r\n data = cur.fetchall()\r\n\r\n # select @var = 'tto' does not produce any resultset and raised an exception\r\n # we catch it and ignore it.\r\n # TODO: is there a better way to handle that?\r\n except pytds.ProgrammingError as ex:\r\n data = None\r\n if str(ex) == \"Previous statement didn't produce any results\":\r\n pass\r\n else:\r\n break\r\n \r\n except Exception as ex:\r\n data = None\r\n self.messages = str(ex) + \" Error while fetching data\"\r\n break\r\n\r\n\r\n\r\n if data is not None:\r\n result_sets.append(ResultSet(description, data))\r\n\r\n try:\r\n have_more_set = cur.nextset()\r\n except:\r\n self.messages = \"Error reading next set\"\r\n break\r\n if have_more_set is None or have_more_set is False:\r\n break\r\n\r\n try:\r\n for msg in cur.messages:\r\n messages = messages + str(msg[1]) + \"\\n\"\r\n except:\r\n self.messages = \"Error reading messages\"\r\n self.messages = messages + self.messages\r\n\r\n # print(\"End \",str(len(result_sets)))\r\n\r\n self.cur = None\r\n\r\n # if not result_sets:\r\n # return None\r\n return result_sets", "def update_data():\n etf_prices = get_prices(start=START_DATE, end=END_DATE)\n etf_returns = compute_returns(etf_prices)\n merged_etf_data = etf_prices.merge(etf_returns, right_index=True, left_index=True)\n indicators = compute_indicators(merged_etf_data) # this uses the \"ta\" lib, but it does not need\n # to be imported\n merged_etf_data = merged_etf_data.merge(indicators, right_index=True, left_index=True)\n vix_data = get_vix()\n data = merged_etf_data.merge(vix_data, right_index=True, left_index=True)\n data.to_csv('Data/database.csv')\n return", "def _finalize_data(self):\n\n if isinstance(self.node_data, np.ndarray): # SR workflow\n self.node_data = da.from_array(self.node_data)\n elif isinstance(self.node_data, list): # vr workflow\n struct_data = np.empty(len(self.node_data), dtype=self.data.dtype)\n datavals = np.array(self.node_data)\n for cnt, varname in enumerate(self.data.dtype.names):\n struct_data[varname] = datavals[:, cnt]\n self.node_data = da.from_array(struct_data)\n if isinstance(self.data, np.ndarray):\n self.data = da.from_array(self.data)", "def emit(self, column_names, data):", "def step030():\n logger.logMessage('Begin: get data from table')\n \n query = 'select tsa,time at time zone \\'utc\\' from weather_dupes ' + \\\n 'order by time;'\n \n pgConn = pg.connect(host=host,user=user,password=password,database=database) \n with pgConn:\n with pgConn.cursor() as c:\n c.execute(query)\n numrecs = 0\n with open(dbDumpFile,'w') as f:\n for row in c.fetchall():\n tsa = row[0]\n time= row[1].isoformat()\n f.write('{0:14d};{1:25s}\\n'.format(tsa,time))\n numrecs += 1\n if numrecs % 1000 == 0:\n logger.logMessage(level='DEBUG',message=\"{0:9d} rows dumped\".format(numrecs))\n logger.logMessage(\"Total rows: {0:d}\".format(numrecs))\n \n logger.logMessage('End : get data from table')", "def get_timeseries_data(self, table, datetime_start, datetime_end, timechunk=datetime.timedelta(hours=1)):\n table_schema = LMTDB_TABLES.get(table.upper())\n if table_schema is None:\n raise KeyError(\"Table '%s' is not valid\" % table)\n else:\n result_columns = ['TIMESTAMP'] + table_schema['columns']\n format_dict = {\n 'schema': ', '.join(result_columns).replace(\"TS_ID,\", \"TIMESTAMP_INFO.TS_ID,\"),\n 'table': table,\n }\n\n index0 = len(self.saved_results.get(table, {'rows': []})['rows'])\n chunk_start = datetime_start\n while chunk_start < datetime_end:\n if timechunk is None:\n chunk_end = datetime_end\n else:\n chunk_end = chunk_start + timechunk\n if chunk_end > datetime_end:\n chunk_end = datetime_end\n start_stamp = chunk_start.strftime(\"%Y-%m-%d %H:%M:%S\")\n end_stamp = chunk_end.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n query_str = \"\"\"SELECT\n %(schema)s\n FROM\n %(table)s\n INNER JOIN TIMESTAMP_INFO ON TIMESTAMP_INFO.TS_ID = %(table)s.TS_ID\n WHERE\n TIMESTAMP_INFO.TIMESTAMP >= %%(ps)s\n AND TIMESTAMP_INFO.TIMESTAMP < %%(ps)s\n \"\"\" % format_dict\n self.query(query_str, (start_stamp, end_stamp), table=table, table_schema=table_schema)\n if timechunk is not None:\n chunk_start += timechunk\n\n return self.saved_results[table]['rows'][index0:], result_columns", "def read_all_odbsql_stn_withfeedback(dataset, odbfile):\n columns, kinds, tdict = make_odb_header(odbfile, dataset) \n try: \n t=time.time() \n try:\n f=gzip.open(odbfile) \n except:\n print(odbfile, 'The zipped ODB file was not found !')\n return\n \n #d=['date@hdr','time@hdr','statid@hdr','vertco_reference_1@body','varno@body','reportype','andate','antime',\n # 'obsvalue@body','fg_depar@body','an_depar@body','biascorr@body','sonde_type@conv','collection_identifier@conv','source@hdr']\n \n # had to remove 'collection_identifier@conv' to make it work with 1, 3188, 1759, 1761 \n \n tdict['sensor@hdr']=numpy.float32\n tdict['ppcode@conv_body']=numpy.float32\n \n '''\n d=['date@hdr','time@hdr','statid@hdr','vertco_reference_1@body','varno@body','lon@hdr','lat@hdr','seqno@hdr',\n 'obsvalue@body','source@hdr' , 'vertco_type@body']\n \n if 'fg_depar@body' in columns: # creating the colkumns for era5fb \n d=d+['fg_depar@body','an_depar@body','biascorr@body','sonde_type@conv','reportype','andate','antime']\n '''\n \n# restrict feedback to certain columns \n #for c in columns:\n # if c not in d:\n # del tdict[c]\n \n #columns=d.copy()\n \n alldict=pd.read_csv(f,delimiter='\\t', usecols=columns, quoting=3,comment='#', skipinitialspace=True, dtype=tdict) #nrows=1000000)\n \n \"\"\" Case where erafb is not available \"\"\"\n if 'fg_depar@body' not in columns:\n alldict['fg_depar@body']=numpy.float32(numpy.NaN)\n alldict['an_depar@body']=numpy.float32(numpy.NaN)\n alldict['biascorr@body']=numpy.float32(numpy.NaN)\n alldict['sondetype@conv']=numpy.int32(-2147483648)\n alldict['reportype']=numpy.int32(-2147483648)\n \n #print(time.time()-t,sys.getsizeof(alldict)//1024//1024)\n idx=numpy.where(numpy.logical_or(alldict.reportype.values==16045,alldict.reportype.values==16068))[0]\n if len(idx)>0:\n \n #alldict.drop(index=alldict.index[idx],inplace=True)\n y=numpy.int64(alldict['date@hdr'].values)*1000000+alldict['time@hdr'].values\n x=numpy.unique(y)\n dropindex=[]\n for i in range(1,x.shape[0]):\n if x[i]-x[i-1]<60:\n idx=numpy.where(y==x[i-1])[0]\n if idx.shape[0]>0:\n dropindex.append(idx)\n else:\n print('empty index')\n if dropindex: \n dropindex = numpy.concatenate(dropindex).ravel()\n alldict.drop(index=alldict.index[dropindex],inplace=True)\n \n #print(time.time()-t) #,sys.getsizeof(alldict)//1024//1024)\n \n #idx=numpy.where(alldict.reportype.values==16045)[0]\n #if idx.shape[0]>0:\n #idy=numpy.where(numpy.logical_and(alldict.reportype.values!=16045,alldict.reportype.values!=16068))[0]\n #if idy.shape[0]>0:\n #idz=numpy.isin(alldict.andate.values[idy],alldict.andate.values[idx])\n #if numpy.sum(idz)>0:\n #alldict.drop(index=alldict.index[idy[idz]],inplace=True)\n \n #idx=numpy.where(alldict.reportype.values==16068)[0]\n #if idx.shape[0]>0:\n #idy=numpy.where(numpy.logical_and(alldict.reportype.values!=16045,alldict.reportype.values!=16068))[0]\n #if idy.shape[0]>0:\n #idz=numpy.isin(alldict.andate.values[idy],alldict.andate.values[idx])\n #if numpy.sum(idz)>0:\n #alldict.drop(index=alldict.index[idy[idz]],inplace=True)\n \n \n #print(time.time()-t,sys.getsizeof(alldict)//1024//1024)\n \n alldict['source_id'] = dataset.rjust(10)\n\n for c in alldict.columns:\n \n if type(alldict[c].iloc[0]) in [str,bytes]:\n l=alldict[c].shape[0]\n slen=len(alldict[c].values[0])\n alldict[c]=numpy.array(alldict.pop(c).values,dtype='S{}'.format(slen))\n #alldict[c]=numpy.string_(alldict[c])\n \n if type(alldict[c].iloc[0]) is numpy.int64:\n alldict[c]=numpy.int32(alldict[c])\n \n if type(alldict[c].iloc[0]) is numpy.float64:\n alldict[c]=numpy.float32(alldict[c])\n \n #print('after odb:',time.time()-t)\n \n except MemoryError:\n print('Reading ODB failed ! ' + odbfile)\n return alldict\n \n #print(odbfile,time.time()-t)#, sys.getsizeof(alldict))\n\n \n return alldict", "def join_data(self, base_data, join_data, base_field, join_fields):\n for data in base_data:\n extra = join_data[data[base_field]]\n for field in join_fields:\n data[field] = extra[field]\n \n return base_data", "def process(self, device, results, log):\n log.info('processing %s for device %s', self.name(), device.id)\n getdata, tabledata = results\n \n ltmnode_table = tabledata.get(\"ltmNodeAddrTable\")\n \n # Grab the second table and append it to the first\n status_table = tabledata.get(\"ltmNodeStatusTable\")\n for oid, data in status_table.items():\n for key, value in data.items():\n if key not in ltmnode_table[oid]:\n ltmnode_table[oid][key] = value\n \n maps = []\n rm = self.relMap()\n # Get the list of name patterns to search for\n node_name_filter = getattr(device, 'zF5BigipNodesNameFilter', None)\n log.debug(\"Picked up Filter List of: %s\" , node_name_filter)\n for oid, data in ltmnode_table.items():\n # log.debug(\"%s : %s\\n\", oid, data)\n #\n om = self.objectMap(data)\n binclude = True\n if node_name_filter != None and node_name_filter != \"\":\n # If there is a regex filter supplied, lets use it\n if re.search(node_name_filter, om.ltmNodeAddrScreenName) == None:\n binclude = False\n if binclude == True:\n # The value fetched is a packed hex representation of the IP\n # Try and unpack the address, and check if route_domains\n # are in use\n address, route_domain = unpack_address_to_string(oid, \n om.ltmNodeAddrAddr)\n if address != \"\":\n om.ltmNodeAddrAddr = address\n if route_domain != \"\":\n om.ltmNodeAddrRouteDomain = route_domain\n om.id = self.prepId(om.ltmNodeAddrAddr)\n om.snmpindex = oid\n\n om.ltmNodeAddrStatusEnabledState = \\\n enable_state_values[om.ltmNodeAddrStatusEnabledState]\n om.ltmNodeAddrStatusAvailState = \\\n avail_status_values[om.ltmNodeAddrStatusAvailState]\n rm.append(om)\n log.debug(rm)\n return [rm]", "def stream_ingest(df):\n global index\n\n i=0\n coords= []\n datum = collections.OrderedDict()\n for index, row in df.iterrows():\n datum[\"symbol\"]=str(df.iloc[index,0])\n datum[\"spot_price\"]=float(df.iloc[index,1])\n datum[\"option_type\"] = str(df.iloc[index, 4])\n datum[\"exposure\"] = str(df.iloc[index, 6])\n datum[\"strike_price\"] = float(df.iloc[index, 7])\n datum[\"maturity_y\"] = int(df.iloc[index, 8])\n datum[\"maturity_m\"] = int(df.iloc[index, 9])\n datum[\"maturity_d\"] = int(df.iloc[index, 10])\n datum[\"calendar\"] = str(df.iloc[index, 11])\n datum[\"day_count\"] = str(df.iloc[index, 12])\n datum[\"risk_free_rate\"] = float(df.iloc[index, 13])\n datum[\"dividend_rate\"] = float(df.iloc[index, 14])\n datum[\"calc_dt_y\"] = int(df.iloc[index, 15])\n datum[\"calc_dt_m\"] = int(df.iloc[index, 16])\n datum[\"calc_dt_d\"] = int(df.iloc[index, 17])\n datum[\"volatility\"] = float(df.iloc[index, 18])\n coords.append(h_db.encode_datum(my_type, datum))\n\n i= i + 1\n # Pump data in batches\n if i % DATA_PACK == 0:\n response = h_db.insert_records(\n table_name=NEW_TABLE,\n data=coords,\n list_encoding=ENCODING,\n options={})\n coords = []\n time.sleep(INGEST_FREQ)\n print(response)\n\n # Flush the last batch\n if i % DATA_PACK != 0:\n response = h_db.insert_records(\n table_name=NEW_TABLE,\n data=coords,\n list_encoding=ENCODING,\n options={})\n\n # 3 second delay to mimic real time ingest\n time.sleep(INGEST_FREQ)\n print(response)\n return coords", "def merge_nodes(self):\n\n\t\t\t#obtenemos los dos primeros nodos que equivalen a quienes tienen menor frecuencia\n\t\t\twhile(len(self.heap)>1):\n\t\t\t\tnode1 = heapq.heappop(self.heap)\n\t\t\t\tnode2 = heapq.heappop(self.heap)\n\n\t\t\t\tmerged = self.HeapNode(None, node1.freq + node2.freq)#creamos un nodo padre que va a contener los nodos anteriores a la derecha y izquierda\n\t\t\t\tmerged.left = node1\n\t\t\t\tmerged.right = node2\n\n\t\t\t\theapq.heappush(self.heap, merged)#agregamos este nodo al priority queue", "def load_data(connection, insert_sql, data):\n cur = connection.cursor()\n for d in data:\n cur.execute(insert_sql, d)\n connection.commit()", "def _exec1(self, sql):\n result = self._exec(sql)\n return [row[0] for row in result]", "def getDbase(self):\n for item in self.sqlData: # for every colummn name in the data\n self.sqdbase[item]=np.array(self.sqlData[item]) # add to the dictionary the clomunm name and the corresponding data\n \n self.sqlData['index'] = list(range(len(self.sqlData['time']))) # since we sometimes have even a column for index(pandas put it automatically) and sometimes not which will not be used for Stats we dropp it out\n self.sqdbase.pop('index') # we make sure that all dataFRames we are working with has inde column and the drop it\n return self.sqdbase", "def get_data(self, date_time):\n id_columns = ','.join([col for col in self.table_primary_keys if col not in ['EFFECTIVEDATE', 'VERSIONNO']])\n return_columns = ','.join(self.table_columns)\n with self.con:\n cur = self.con.cursor()\n cur.execute(\"DROP TABLE IF EXISTS temp;\")\n cur.execute(\"DROP TABLE IF EXISTS temp2;\")\n cur.execute(\"DROP TABLE IF EXISTS temp3;\")\n cur.execute(\"DROP TABLE IF EXISTS temp4;\")\n # Store just the unique sets of ids that came into effect before the the datetime in a temporary table.\n query = \"\"\"CREATE TEMPORARY TABLE temp AS \n SELECT * \n FROM {table} \n WHERE EFFECTIVEDATE <= '{datetime}';\"\"\"\n cur.execute(query.format(table=self.table_name, datetime=date_time))\n # For each unique set of ids and effective dates get the latest versionno and sore in temporary table.\n query = \"\"\"CREATE TEMPORARY TABLE temp2 AS\n SELECT {id}, EFFECTIVEDATE, MAX(VERSIONNO) AS VERSIONNO\n FROM temp\n GROUP BY {id}, EFFECTIVEDATE;\"\"\"\n cur.execute(query.format(id=id_columns))\n # For each unique set of ids get the record with the most recent effective date.\n query = \"\"\"CREATE TEMPORARY TABLE temp3 as\n SELECT {id}, VERSIONNO, max(EFFECTIVEDATE) as EFFECTIVEDATE\n FROM temp2\n GROUP BY {id};\"\"\"\n cur.execute(query.format(id=id_columns))\n # Inner join the original table to the set of most recent effective dates and version no.\n query = \"\"\"CREATE TEMPORARY TABLE temp4 AS\n SELECT * \n FROM {table} \n INNER JOIN temp3 \n USING ({id}, VERSIONNO, EFFECTIVEDATE);\"\"\"\n cur.execute(query.format(table=self.table_name, id=id_columns))\n # Inner join the most recent data with the interconnectors used in the actual interval of interest.\n query = \"\"\"SELECT {cols} FROM temp4 ;\"\"\"\n query = query.format(cols=return_columns)\n data = pd.read_sql_query(query, con=self.con)\n return data", "def _merge_results(self, res_to_node, res_to_end):\n path_to_node = res_to_node['path']\n path_to_end = res_to_end['path']\n\n # Contains distances from start to other nodes\n dist_from_start = res_to_node['dist']\n # Contains distances from end to other nodes\n dist_from_end = res_to_end['dist']\n\n to_node_contributions = res_to_node['contributions']\n to_end_contributions = res_to_end['contributions']\n\n # Remove any shared nodes from the concatenated path\n shared_node = None\n i = 0\n full_path_ele_gain = res_to_node['ele_gain'] + res_to_end['ele_gain']\n while(i < len(path_to_end) and len(path_to_node) > 0\n and path_to_node[-1] == path_to_end[i]):\n shared_node = path_to_node[-1]\n path_to_node.pop(-1)\n full_path_ele_gain -= to_node_contributions[shared_node]\n full_path_ele_gain -= to_end_contributions[shared_node]\n i += 1\n # Replace the final shared node (midpoint if the paths share no other nodes)\n path_to_node.append(shared_node)\n full_path_ele_gain += to_node_contributions[shared_node]\n full_path = path_to_node + path_to_end[i:]\n full_path_len = dist_from_start[shared_node] + dist_from_end[shared_node]\n\n return SearchResult(\n path=full_path,\n path_len= full_path_len,\n ele_gain=full_path_ele_gain\n )", "def get_node_merge_query(user):\n labels = \"user\"\n\n statement = ((\"MERGE (n {id: {node_id}}) \"\n \"ON MATCH SET n={props}, n :%s \"\n \"ON CREATE SET n={props}, n :%s \"\n \"RETURN (n)\") % (labels, labels))\n\n props = {\"node_id\": user[\"id\"], \"props\": user}\n\n return statement, props", "def archive_mds_data(self, lmtdb):\n\n dataset_names = [\n 'mdservers/cpuload',\n ]\n\n self.init_datasets(dataset_names, lmtdb.mds_names)\n\n # Now query the MDS_DATA table to get byte counts over the query time range\n results, columns = lmtdb.get_mds_data(self.query_start, self.query_end_plusplus)\n\n\n # Index the columns to speed up insertion of data\n col_map = {}\n try:\n for db_col in ['TIMESTAMP', 'MDS_ID', 'PCT_CPU']:\n col_map[db_col] = columns.index(db_col)\n except ValueError:\n raise ValueError(\"LMT database schema does not match expectation\")\n\n # Loop through all the results of the timeseries query\n for row in results:\n if isstr(row[col_map['TIMESTAMP']]):\n # SQLite stores timestamps as a unicode string\n timestamp = datetime.datetime.strptime(row[col_map['TIMESTAMP']],\n \"%Y-%m-%d %H:%M:%S\")\n else:\n # MySQL timestamps are automatically converted to datetime.datetime\n timestamp = row[col_map['TIMESTAMP']]\n target_name = lmtdb.mds_id_map[row[col_map['MDS_ID']]]\n for dataset_name in dataset_names:\n target_dbcol = self.config[dataset_name].get('column')\n # target_dbcol=PCT_CPU, target_name=snx11025n022\n if target_dbcol is not None:\n self[dataset_name].insert_element(\n timestamp,\n target_name,\n row[col_map[target_dbcol]])\n else:\n errmsg = \"%s in self.config but missing 'column' setting\" % dataset_name\n raise KeyError(errmsg)", "def combine_dict(self, dict2):\n # iterate through smaller data set\n # base_set will be the larger set and is used for updating\n if len(self.content[\"values\"]) > len(dict2[\"values\"]):\n large_set = self.content[\"values\"]\n small_set = dict2[\"values\"]\n base_set = self.content\n else:\n small_set = self.content[\"values\"]\n large_set = dict2[\"values\"]\n base_set = dict2\n\n subset = {}\n for key in small_set.keys():\n # determine wether to compare keys\n if key in large_set:\n updated_l = large_set[key][\"updated_at\"]\n updated_s = small_set[key][\"updated_at\"]\n if updated_l == 'NULL':\n if updated_s != 'NULL':\n # update to not NULL set\n # if both updated_at are NULL, things\n # are ambiguos. We could defer to created_at\n # but for simplicity we will default to\n # the values in the larger set\n subset[key] = small_set[key]\n else:\n if updated_s == 'NULL':\n # update to not NULL set\n subset[key] = large_set[key]\n else:\n if updated_l > updated_s:\n subset[key] = large_set[key]\n else:\n subset[key] =small_set[key]\n else:\n subset[key] = small_set[key]\n base_set[\"values\"].update(subset)\n new_obj = BackupData()\n new_obj.load_from_dict(base_set)\n return new_obj", "def merge_meta(df, col='demand'):\n meta_info = pd.read_csv(meta_path)\n df = df.merge(meta_info, left_on=\"node\", right_on=\"Node\",\n how='outer')\n return df.ix[df[col].dropna().index]", "def _process_nodes(self):\n # Sort the nodes by metanode type, then by id\n self.node_df = self.node_df.sort_values(['label', 'id']).reset_index(drop=True)\n # Get all the ids\n self.nodes = self.node_df['id']\n # Get mapping from the index to the node ID (one to many so need different one for each node type)\n self.index_to_nid = dict()\n for group_name, group in self.node_df.groupby('label'):\n self.index_to_nid[group_name] = group['id'].reset_index(drop=True).to_dict()\n # Get the reverse mapping (many to one so don't need to separate based on type).\n self.nid_to_index = dict()\n for mapper in self.index_to_nid.values():\n for index, nid in mapper.items():\n self.nid_to_index[nid] = index\n # Finally, we need a mapper from id to node type\n self.id_to_metanode = self.node_df.set_index('id')['label'].to_dict()\n # And from node type to a list of ids\n self.metanode_to_ids = dict()\n for group_name, group in self.node_df.groupby('label'):\n self.metanode_to_ids[group_name] = group['id'].tolist()\n # One more mapper of id to name\n self.nid_to_name = self.node_df.set_index('id')['name'].to_dict()", "def update_one_node_from_pbs_data(node, attr_dict):\n # put node under a subcluster if it does not have any yet\n if not node.subcluster:\n for id,node_regexp in SubCluster.objects.filter(server=node.server).values_list('id','node_regexp'):\n if re.match(node_regexp,node.name):\n node.subcluster_id = id\n node.save()\n break\n # fill node's np if it is not present\n if not node.np:\n node.np = attr_dict['np']\n node.save()\n\n new_states = []\n if attr_dict.has_key('state'):\n# node.state.clear()\n for statename in attr_dict['state'].split(','):\n #node.state.add(NodeState.objects.get(name=statename.strip()))\n new_states.append(NodeState.objects.get(name=statename.strip()))\n attr_dict['state'] = new_states\n\n\n new_properties = []\n if attr_dict.has_key('properties'):\n# node.properties.clear()\n for propertyname in attr_dict['properties'].split(','):\n np,created = NodeProperty.objects.get_or_create(name=propertyname.strip())\n if created:\n print(\"New property created: %s\" % propertyname)\n new_properties.append(np)\n# node.properties.add(np)\n attr_dict['properties'] = new_properties\n\n new_jobs = []\n if attr_dict.has_key('jobs'):\n slot_jobs = dict([tuple(j.strip().split('/')) for j in attr_dict['jobs'].split(',')])\n for slotstr, longjobid in slot_jobs.items():\n slot = int(slotstr)\n# js,created = getJobSlot(slot=slot,node=node)\n# if created:\n# logging.info(\"new jobslot will be created: slot: %d, node name: %s\" % (slot,name))\n jobid = int(longjobid.split('.')[0])\n new_jobs.append(jobid)\n \n# js.livejob,created = LiveJob.objects.get_or_create(jobid=jobid, server=node.server)\n# if created:\n# logging.info(\"new livejob created: %d\" % jobid)\n# js.save()\n attr_dict['jobs'] = new_jobs\n return attr_dict", "def load_data_sql(): \r\n conn = mysql.connect(**st.secrets[\"mysql\"])\r\n\r\n data = pd.read_sql('SELECT * FROM song_data', conn)\r\n lookup_table = pd.read_sql('SELECT * FROM lookup_table', conn)\r\n \r\n return data, lookup_table", "def _merge_table_data(self, first_page):\n table = self._table_defs.get(first_page * self.page_size)\n parsed_header = TDEF_HEADER.parse(table)\n data = table[parsed_header.header_end:]\n while parsed_header.next_page_ptr:\n table = self._table_defs.get(parsed_header.next_page_ptr * self.page_size)\n parsed_header = TDEF_HEADER.parse(table)\n data = data + table[parsed_header.header_end:]\n return data", "def merge_all_data(self):\n\n logging.info('***** Starting the merging process merge_all_data')\n\n \"\"\" All possible unique_dates to loop on \"\"\"\n date_times = self.merged_unique_dates\n date_times.sort()\n date_times = np.array(date_times) \n\n \"\"\" List storing the indices of the date_index of the merged dataset \"\"\"\n all_combined_obs , all_combined_head, all_combined_era5fb , combined_indices , combined_date_time, = [] , [] , [] , [] , []\n best_ds_list = [] \n source_files = []\n station_configurations = []\n\n \"\"\" The items contained in the lists in the list below can be removed from the list when the record that was previously stored is removed. \"\"\"\n all_list = [all_combined_obs , all_combined_head, all_combined_era5fb , combined_indices , combined_date_time, best_ds_list, source_files , station_configurations ] # holder of all the above lists\n all_list_name = ['all_combined_obs' , 'all_combined_head', 'all_combined_era5fb' , 'combined_indices' , 'combined_date_time' , 'best_ds_list', 'source_files' ] \n \n removed_record, kept_record = [], []\n \n \"\"\" Dictionary that will contain the merged file. \"\"\" \n # rand = datetime.strptime('1981-01-03 12:00:00', '%Y-%m-%d %H:%M:%S') \n #dt_bestds_dic = {} # store the selected best dataset for each dt \n #date_times=date_times[0:30000]\n tot = len(date_times)\n tt=time.time()\n print('*** Merging ' , tot, ' records ***')\n \n early_datasets = True\n \n self.processed_dt = [] \n \n for dt, c in zip(date_times, range(tot) ): # loop over all the possible date_times \n\n if (c+1)%1000==0:\n print('Analize : ', str(c+1) , '/', str(tot) , ' ', dt , ' ',\n now(time.time()),'{:5.3f}'.format(time.time()-tt ))\n\n delete = self.delete_ds(dt) # check if there is a dataset to delete \n \n \"\"\" Finding if this record is the same as the previous one analyzed, according to the given time_shift \"\"\"\n if c == 0:\n is_same_record = False\n else:\n is_same_record = self.is_same_record( time_shift = self.hour_time_delta , dt = dt)\n \n \"\"\" Updating list of processed datetimes \"\"\"\n self.processed_dt.append(dt) # cannot put it before the check_timeshift or it will check itself \n\n \n cleaned_df_container = {} \n all_len = [] # will hold the length of all the obs_tabs \n \n for k in self.dataset_per_dt[dt].keys() : # checking the list of available datasets \n ''' {'era5_2': ['example_stations/0-20000-0-82930_era5_2_harvested_era5.conv._1:82930.gz.nc', \n 'example_stations/0-20000-0-82930_era5_2_harvested_era5.conv._82930.gz.nc']}\n ''' \n for F in self.dataset_per_dt[dt][k]: # checking the list of available files for the dataset\n \n if data[k][F][\"counter\"] %self.slice_size==0 or data[k][F][\"counter\"] == 0: # loading the data only at specific slices \n load = self.load_obstab_feedback_sliced(datetime=dt, dataset=k, file = F)\n \n data[k][F][\"counter\"] = data[k][F][\"counter\"] + 1 \n \n obs_tab, era5fb_tab = self.make_obstab_era5fb_dic(dataset = k , date_time = dt, File = F )\n\n if len(obs_tab['date_time'][:])==0: # go to next file if obs_tab is empty \n #print('ZERO length')\n continue \n\n all_len.append( len(obs_tab['date_time'][:] ) )\n \n if k not in cleaned_df_container.keys():\n cleaned_df_container[k] = {}\n\n cleaned_df_container[k][F] = {}\n cleaned_df_container[k][F]['obs_tab'] = obs_tab # cleaned dataframe \n cleaned_df_container[k][F]['era5fb_tab'] = era5fb_tab # cleaned dataframe \n \n \"\"\" Merging the different records found in the sifferent sources \"\"\"\n if bool(all_len): # skipping empty container dictionary. At this point I certainyl have one valid record \n best_ds, combined_obs_tab, combined_era5fb_tab, combined_head_tab, selected_file, best_file = self.combine_record(dt, container = cleaned_df_container)\n \n if is_same_record: # decide what to keep in case of same record\n temporary_previous = all_combined_obs[-1] # keep the temporary previous record \n\n if best_ds in ['era5_1','era5_2']: # best_ds from era5\n if best_ds_list[-1] not in ['era5_1','era5_2']: # remove previous non era5_1 or era5_2 record \n for lista in all_list:\n lista.pop() \n #removed_record.append(temporary_previous)\n #kept_record.append(combined_obs_tab) \n\n elif best_ds_list[-1] in ['era5_1','era5_2']:\n if len(combined_obs_tab) <= len(all_combined_obs[-1] ):\n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab)\n continue # nothing to do, will keep the previous records -> go to next dt \n \n else: # case where both the current and previous are from era5_1 and era5_2, but the previous has smaller number of data \n for lista in all_list:\n lista.pop() \n #removed_record.append(temporary_previous)\n #kept_record.append(combined_obs_tab) \n \n else: # best_ds not from era5\n if best_ds_list[-1] in ['era5_1','era5_2']:\n #print('This best ds is ' , best_ds , ' but I will keep ' , best_ds_list[-1] )\n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab) \n continue \n \n else:\n if len(combined_obs_tab) < len(all_combined_obs[-1] ):\n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab) \n continue # nothing to do, will keep the previous records -> go to next dt \n \n elif len(combined_obs_tab) > len(all_combined_obs[-1] ): # remove previous, keep current \n for lista in all_list:\n lista.pop() \n #kept_record.append(combined_obs_tab) \n #removed_record.append(temporary_previous)\n \n elif len(combined_obs_tab) == len(all_combined_obs[-1] ): # prefer igra2, otherwise\n if best_ds == 'igra2':\n for lista in all_list:\n lista.pop() \n #removed_record.append(temporary_previous)\n #kept_record.append(combined_obs_tab) \n \n else: # case where data source is not important, I keep the previous and do nothing \n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab) \n continue \n \n else: # not the same record, nothing special to do, keep both previous and current \n pass \n else:\n print(' Found an empty record / time shifted record ')\n continue\n \n\n \"\"\" Fill the best_ds list \"\"\"\n best_ds_list.append(best_ds)\n\n \"\"\" Storing the selected file for the source_configuration \"\"\"\n source_files.append(selected_file)\n \"\"\" Selecting the station_configuration \"\"\"\n station_configurations.append(self.data[best_ds][best_file]['station_configuration'] )\n \n \"\"\" Storing the combined era5fb, header and observations tables\"\"\"\n all_combined_era5fb.append(combined_era5fb_tab)\n all_combined_obs .append(combined_obs_tab)\n \n primary, name = self.data[best_ds][best_file]['station_configuration']['primary_id'][0] , self.data[best_ds][best_file]['station_configuration']['station_name'][0] \n #combined_head_tab['primary_station_id'] = [ primary ] * len( combined_head_tab ) \n #combined_head_tab['station_name'] = [ name ] * len( combined_head_tab ) \n \n combined_head_tab['primary_station_id'] = np.array( [primary] )\n combined_head_tab['station_name'] = np.array( [name] )\n \n all_combined_head .append(combined_head_tab)\n\n \"\"\" Dictionary to fill the best_ds for duplicates \"\"\"\n #dt_bestds_dic[dt] = {}\n #dt_bestds_dic[dt]['best_ds'] = best_ds\n #dt_bestds_dic[dt]['len'] = len(combined_obs_tab['date_time'])\n\n \"\"\" New merged recordindex and recordtimestamps indices \"\"\"\n combined_indices.append(len(combined_obs_tab['date_time'])) \n combined_date_time.append(dt)\n\n del cleaned_df_container \n \n \n \n #print(blue + 'Memory used after deleting the cleaned_df_container: ', process.memory_info().rss/1000000000 , cend)\n\n \"\"\" Removing remaining loaded df \"\"\"\n for k in self.datasets_keys:\n for F in self.datasets[k]:\n try:\n del data[k][F]['era5fb_tab']\n print('=== removed era5fb ' , k , F )\n except:\n pass\n try:\n del data[k][F]['observations_table']\n print('=== removed obstab ' , k , F ) \n except:\n pass\n \n \n \"\"\" Saving a numpy dictionary \"\"\"\n print(\" === Saving the numpy dictionary of removed and kept records +++ \")\n #dic_records = { 'kept' : kept_record , 'removed': removed_record }\n #np.save(self.station + '_time_shift_removed_kept.npy',dic_records )\n \n \n \"\"\" Storing the merged date_time values and indices \"\"\"\n di=xr.Dataset()\n combined_date_time = np.array(combined_date_time)\n di['recordtimestamp'] = ( {'recordtimestamp' : combined_date_time.shape } , combined_date_time )\n di['recordtimestamp'].attrs['units']='seconds since 1900-01-01 00:00:00'\n\n \"\"\" Creating the merged indices mi \"\"\"\n mi = [] \n mi.append(0)\n for i in range(len(combined_indices)):\n mi.append( combined_indices[i] + mi[-1] )\n mi.pop()\n pop = np.array(mi) # removing last unecessary index \n di['recordindex'] = ( {'recordindex' : pop.shape } , pop )\n\n\n \"\"\" Creating the combined data \"\"\"\n logging.debug('*** Concatenating the observations_table ' ) \n combined_obs = {}\n #### Writing combined observations_table dic\n logging.info(' ***** Writing the observations_table to the netCDF output ***** ' ) \n for k in all_combined_obs[0].keys(): \n a = np.concatenate([all_combined_obs[i][k][:] for i in range(len(all_combined_obs))])\n if k == 'date_time':\n combined_obs[k]= a \n self.tot_records = len(combined_obs[k])\n self.write_merged(content = 'observations_table', table= {k:a})\n #logging.info('*** Written observations table %s: ', k)\n\n\n #self.tot_records = len(combined_obs['date_time'])\n del all_combined_obs\n print(blue + 'Memory used after deleting all_combined_obs dic: ', process.memory_info().rss/1000000000 , cend )\n \n dateindex = combined_obs['date_time']//86400 \n date_times, indices, counts = np.unique(dateindex, return_counts = True, return_index= True) \n di['dateindex'] = ( {'dateindex' : indices.shape } , indices ) # considers the day only \n del combined_obs\n \n combined_era5fb = {}\n #### Writing combined era5fb_table dic \n for k in all_combined_era5fb[0].keys():\n try:\n #combined_era5fb[k]=np.concatenate([all_combined_era5fb[i][k][:] for i in range(len(all_combined_era5fb))])\n #self.write_merged(content = 'era5fb', table= {k:combined_era5fb[k]})\n \"\"\" try replacing , remove combined_era5fb = {} \"\"\"\n a = np.concatenate([all_combined_era5fb[i][k][:] for i in range(len(all_combined_era5fb))])\n self.write_merged(content = 'era5fb', table= {k:a})\n logging.debug('*** Written era5fb %s: ', k)\n except:\n print(\"FAILED feedback variable \" , k)\n\n del all_combined_era5fb\n print(blue + 'Memory used after deleting era5fb_tab dic: ', process.memory_info().rss/1000000000 , cend)\n\n\n #### Writing combined header_table dic \n for k in all_combined_head[0].keys():\n print('head variable is', k )\n if ( k == 'comments' or k == 'history'):\n continue\n try:\n tab=np.concatenate([all_combined_head[i][k][:] for i in range(len(all_combined_head))])\n self.write_merged(content = 'header_table', table= {k: tab}) # { key: np.array([])}\n logging.info('*** Written header table %s: ', k)\n except:\n print('FFF FAILED variable in header table', k )\n\n del all_combined_head\n print(blue + 'Memory used after deleting all_merged head_tab dic: ', process.memory_info().rss/1000000000 , cend)\n \n self.write_merged(content = 'recordindex', table = di) \n self.write_merged(content = 'cdm_tables', table= '')\n\n\n source_conf=xr.Dataset()\n source_files = np.array(source_files).astype(dtype='|S70')\n source_conf['source_file'] = ( {'source_file' : source_files.shape } , source_files )\n self.write_merged(content = 'source_configuration', table= source_conf )\n\n print(0)\n\n\n \"\"\" Concatenation of station_configurations \"\"\"\n station_conf = pd.concat( station_configurations ) \n for k in station_conf.columns:\n try:\n a =np.array( station_conf[k])\n self.write_merged(content = 'station_configuration', table= {k:a})\n logging.debug('*** Written station_configuration %s: ', k)\n except:\n print(\" Failed station_configuration \" , k )\n \n return 0", "def merge(self):\n leftSon = self.nodes.getNode(0)\n rightSon = self.nodes.getNode(1)\n self.nodes.removeNode(0)\n self.nodes.removeNode(0)\n self.nodes.addNode(Node(leftSon, rightSon))", "def test_get_rows_with_sql(self):\n error = None\n try:\n statements = [\n \"select timestamp from cpu_idle\",\n \"select value from cpu_idle\",\n \"select host from cpu_idle\",\n \"select timestamp,field1 from cpu_idle\",\n \"select * from cpu_idle\",\n \"select timestamp, value from cpu_idle order by timestamp \",\n \"select timestamp, value from cpu_idle order by timestamp desc\",\n '''select timestamp, value from cpu_idle\n where value > 30 and timestamp >150937263000''',\n \"select host, count(1) from cpu_idle group by host\",\n '''select time_bucket(timestamp, '2 days') as DAY, sum(value) as SUM\n from cpu_idle group by time_bucket(timestamp, '2 days')\n order by time_bucket(timestamp, '2 days')''',\n \"select timestamp, ((field2 - field1) * 10) as RESULT, host from cpu_idle\",\n \"select timestamp from cpu_idle\",\n '''SELECT field1, CASE field1 WHEN 1 THEN 'one' WHEN 2 THEN 'two' ELSE 'many' END\n FROM cpu_idle''',\n \"SELECT field1, IF(field1>100,1,0) as result FROM cpu_idle\",\n \"SELECT field1, field2, COALESCE (field1, field2) as result FROM cpu_idle\",\n \"SELECT field1, abs (field1) as result FROM cpu_idle\",\n \"SELECT field1, sqrt (field1) as result FROM cpu_idle\",\n \"SELECT field1, cbrt (field1) as result FROM cpu_idle\",\n \"SELECT field1, ceil (field1) as result FROM cpu_idle\",\n \"SELECT field1, floor (field1) as result FROM cpu_idle\",\n \"SELECT 'str1' || 'str2' as result FROM cpu_idle\",\n '''SELECT time_bucket(timestamp, '2 days') as DAY, avg(field1) as result \n FROM cpu_idle group by time_bucket(timestamp, '2 days')\n order by time_bucket(timestamp, '2 days')''',\n ''' SELECT count(*) as result \n FROM cpu_idle where timestamp < 1525611901''',\n ''' SELECT time_bucket(timestamp, '2 days') as DAY, count(field1) as count \n FROM cpu_idle group by time_bucket(timestamp, '2 days')\n order by time_bucket(timestamp, '2 days')''',\n '''SELECT max_by(field1,field2) as result \n FROM cpu_idle where timestamp < 1525611901000 ''',\n '''SELECT min_by(field1,field2) as result \n FROM cpu_idle where timestamp < 1525611901000\t''',\n '''SELECT max(field1) as result \n FROM cpu_idle where timestamp < 1525611901000''',\n '''SELECT min(field1) as result \n FROM cpu_idle where timestamp < 1525611901000''',\n '''SELECT time_bucket(timestamp, '2 days') as DAY, sum(field1) as sum \n FROM cpu_idle group by time_bucket(timestamp, '2 days')\n order by time_bucket(timestamp, '2 days')'''\n ]\n for statement in statements:\n response = self.tsdb_client.get_rows_with_sql(statement)\n print(statement, response)\n except BaseException as e:\n error = e\n finally:\n self.assertIsNone(error)", "def aggregate_intermediate_data_frame(self, node_name, child, child_df, edge_df):\n expr = []\n for n in child_df.schema.names:\n if n in self.parser.reducer_by_prop:\n if self.parser.reducer_by_prop.get(n) in [\"list\", \"set\"]:\n expr.append(\n self.reducer_to_agg_func_expr(\n self.parser.reducer_by_prop.get(n), n, is_merging=False\n )\n )\n else:\n expr.append(\n self.reducer_to_agg_func_expr(\n self.parser.reducer_by_prop.get(n), n, is_merging=True\n )\n )\n if len(expr) == 0:\n expr.append(\n self.reducer_to_agg_func_expr(\"set\", get_node_id_name(child.name))\n )\n tmp_df = (\n self.join_two_dataframe(edge_df, child_df, how=\"left_outer\")\n .groupBy(get_node_id_name(node_name))\n .agg(*expr)\n )\n\n select_expr = [get_node_id_name(node_name)]\n for n in child_df.schema.names:\n if n in self.parser.reducer_by_prop and self.parser.reducer_by_prop.get(\n n\n ) in [\"list\", \"set\"]:\n select_expr.append(\n self.reducer_to_agg_func_expr(\n self.parser.reducer_by_prop.get(n), n, is_merging=True\n )\n )\n tmp_df = tmp_df.select(*select_expr)\n return self.return_dataframe(\n tmp_df,\n f\"{Translator.aggregate_intermediate_data_frame.__qualname__}__{node_name}__{child.name}\"\n )", "def combine(new_data, raw_data):\n return pd.merge(new_data, raw_data, on=[\"location\", \"date\"], how=\"outer\")", "def _tree_getitem(cls, op):\n out_series = op.outputs[0]\n combine_size = options.combine_size\n chunks = op.inputs[0].chunks\n while len(chunks) > combine_size:\n new_chunks = []\n for i in range(0, len(chunks), combine_size):\n chks = chunks[i : i + combine_size]\n if len(chks) == 1:\n chk = chks[0]\n else:\n concat_op = DataFrameConcat(output_types=[OutputType.series])\n chk = concat_op.new_chunk(chks, dtype=chks[0].dtype)\n chk_op = SeriesIndex(labels=op.labels, is_intermediate=True)\n kw = {\"name\": out_series.name} if hasattr(out_series, \"name\") else {}\n chk = chk_op.new_chunk(\n [chk],\n shape=(np.nan,),\n dtype=chk.dtype,\n index_value=parse_index(pd.RangeIndex(-1)),\n **kw,\n )\n new_chunks.append(chk)\n chunks = new_chunks\n\n concat_op = DataFrameConcat(output_types=[OutputType.series])\n kw = {\"name\": out_series.name} if hasattr(out_series, \"name\") else {}\n kw[\"index\"] = (0,)\n chk = concat_op.new_chunk(chunks, dtype=chunks[0].dtype, **kw)\n index_op = SeriesIndex(labels=op.labels)\n chunk = index_op.new_chunk([chk], dtype=chk.dtype, **kw)\n new_op = op.copy()\n nsplits = ((len(op.labels),),) if isinstance(op.labels, list) else ()\n kw = out_series.params\n kw[\"nsplits\"] = nsplits\n kw[\"chunks\"] = [chunk]\n return new_op.new_tileables(op.inputs, kws=[kw])", "def __init__(self):\n BDLQuery.__init__(self)\n self.pandas_df = []", "def join(self, other, on):\n\t\t# check for correct join\n\t\tif not (on in self.headers or on in other.headers):\n\t\t\tprint \"Error: header '{0}' not found in both collections\".format(on)\n\t\t\treturn None\n\n\t\t# create new dataset\n\t\tjoined = Dataset()\n\t\t\n\t\t# fill new dataset with combined data\n\t\tmappedHeaders = joinHeaders(self, other, joined, on)\n\t\tmergeRows(self, other, joined, on, mappedHeaders)\n\t\tjoined.ensureFilled()\n\n\t\t# return newly created dataset\n\t\treturn joined", "def mergeMetadata(self, obj, dom): \n self.update_semantics = 'merge'\n # create a metadata dict that has all the values from obj, overridden\n # by the current dom values.\n metadata = self.getModuleMetadata(obj, {})\n metadata.update(self.getMetadata(dom, METADATA_MAPPING))\n for oerdc_name, cnx_name in METADATA_MAPPING.items():\n if cnx_name in ['keywords',]:\n old_value = getattr(obj, cnx_name)\n if old_value:\n current_value = list(metadata.get(cnx_name, []))\n current_value.extend(old_value)\n metadata[cnx_name] = current_value\n if metadata:\n self.validate_metadata(metadata)\n metadata = self.fixEntities(metadata, ATTRIBUTES_TO_FIX)\n if ICollection.providedBy(obj):\n obj.collection_metadata(**metadata)\n elif IModule.providedBy(obj):\n obj.update_metadata(**metadata)\n self.updateRoles(obj, dom)\n obj.reindexObject(idxs=metadata.keys())", "def merge(list1, list2):\n holding = list1.to_list()\n [holding.append(i) for i in list2.to_list()]\n # for i in list2.to_list():\n # holding.append(i)\n holding = sorted(holding)\n\n output = LinkedList(Node(holding[0]))\n for i in holding[1:]:\n output.append(i)\n return output", "def create_query_df(self):\n\n # display output message for timeframe\n print(\n f'{Fore.GREEN}\\nQuerying database for tags between the timeframe: '\n f'{Fore.LIGHTGREEN_EX}{str(self._start)}{Fore.GREEN} and {Fore.LIGHTGREEN_EX}{str(self._end)}'\n f'{Style.RESET_ALL}')\n print(\n f'{Fore.GREEN}\\nTIMESPAN: '\n f'{Fore.LIGHTGREEN_EX}{self.time_span} hours'\n f'{Style.RESET_ALL}')\n\n engine = get_db_engine()\n offset = 0\n chunk_size = 100000\n\n dfs = []\n while True:\n sa_select = sa.select(\n [self.data_table],\n whereclause=sa.and_(\n self.data_table.c._TIMESTAMP > '{}'.format(self._start),\n self.data_table.c._TIMESTAMP <= '{}'.format(self._end)),\n limit=chunk_size,\n offset=offset,\n order_by=self.data_table.c._NUMERICID\n )\n dfs.append(pd.read_sql(sa_select, engine))\n offset += chunk_size\n if len(dfs[-1]) < chunk_size:\n break\n\n self.query_df = pd.concat(dfs)", "def join(self, b_tree, key, data, failed_counter):\n try:\n # Try to find a match\n right_data = b_tree.pop(key)\n data['right_data'] = right_data\n except KeyError:\n # If no match if found catch the exception and carry on\n self.cc_log(\"INFO\", \"Could not find any data for the key %s\" % key)\n failed_counter += 1\n\n return (data, b_tree, failed_counter)", "def _merge_dataframes(self, left_node: Node, right_node: Node):\r\n\r\n left_dataframe = self._get_table(left_node.name)\r\n right_dataframe = self._get_table(right_node.name)\r\n\r\n join_id = 0\r\n while left_node.inner_joins[join_id]['join_with'] != right_node.name:\r\n join_id += 1\r\n\r\n merged_df = left_dataframe.merge(right_dataframe,\r\n how='left',\r\n left_on=left_node.inner_joins[join_id]['on'],\r\n right_on=left_node.inner_joins[join_id]['join_with_on'],\r\n suffixes=(None, \"_\" + right_node.name))\r\n self._set_table(left_node.name, merged_df)", "def combine_data(self, object, additional_data):\n for k, v in additional_data.items():\n if isinstance(v, list):\n object[k] = object.get(k, []) + v\n else:\n object[k] = v\n for instance in object.get(\"instances\", []):\n if instance.get(\"sub_container\", {}).get(\"top_container\", {}).get(\"_resolved\"):\n del instance[\"sub_container\"][\"top_container\"][\"_resolved\"]\n object = super(ArchivalObjectMerger, self).combine_data(object, additional_data)\n return combine_references(object)", "def insert_into_sql(chunk):\n bulk_list = []\n for row in chunk:\n bulk_list.append(StockData(\n date=str(row[0])[0:4] + '-' + str(row[0])[4:6] + '-' + str(row[0])[6:8],\n code=row[1],\n code_name=row[2],\n d1_diff_rate=row[3],\n close=row[4],\n open=row[5],\n high=row[6],\n low=row[7],\n volume=row[8],\n clo5=row[9],\n clo10=row[10],\n clo20=row[11],\n clo40=row[12],\n clo60=row[13],\n clo80=row[14],\n clo100=row[15],\n clo120=row[16],\n clo5_diff_rate=row[17],\n clo10_diff_rate=row[18],\n clo20_diff_rate=row[19],\n clo40_diff_rate=row[20],\n clo60_diff_rate=row[21],\n clo80_diff_rate=row[22],\n clo100_diff_rate=row[23],\n clo120_diff_rate=row[24],\n yes_clo_5=row[25],\n yes_clo_10=row[26],\n yes_clo_20=row[27],\n yes_clo_40=row[28],\n yes_clo_60=row[29],\n yes_clo_80=row[30],\n yes_clo_100=row[31],\n yes_clo_120=row[32],\n vol5=row[33],\n vol10=row[34],\n vol20=row[35],\n vol40=row[36],\n vol60=row[37],\n vol80=row[38],\n vol100=row[39],\n vol120=row[40],\n ))\n StockData.objects.bulk_create(bulk_list)\n return bulk_list", "def _merge_two(self, obj1, obj2):\r\n for uniq_ident in obj2.keys():\r\n if (uniq_ident not in obj1) \\\r\n or (obj1[uniq_ident]['modified'] \\\r\n < obj2[uniq_ident]['modified']):\r\n obj1[uniq_ident] = obj2[uniq_ident]\r\n\r\n return obj1 # self._dict_to_list(obj1)\r", "def mergeTreeFrom1DTimeSeries(x):\n #Add points from the bottom up\n N = len(x)\n idx = np.argsort(x)\n idxorder = np.zeros(N)\n idxorder[idx] = np.arange(N)\n UFP = np.arange(N) #Pointer to oldest indices\n UFR = np.arange(N) #Representatives of classes\n I = [] #Persistence diagram\n PS = {} #Persistences for merge tree nodes\n MT = {} #Merge tree\n for i in idx:\n neighbs = set([])\n #Find the oldest representatives of the neighbors that\n #are already alive\n for di in [-1, 1]: #Neighbor set is simply left/right\n if i+di >= 0 and i+di < N:\n if idxorder[i+di] < idxorder[i]:\n neighbs.add(UFFind(UFP, i+di))\n #If none of this point's neighbors are alive yet, this\n #point will become alive with its own class\n if len(neighbs) == 0:\n continue\n neighbs = [n for n in neighbs]\n #Find the oldest class, merge earlier classes with this class,\n #and record the merge events and birth/death times\n oldestNeighb = neighbs[np.argmin([idxorder[n] for n in neighbs])]\n #No matter, what, the current node becomes part of the\n #oldest class to which it is connected\n UFUnion(UFP, oldestNeighb, i, idxorder)\n if len(neighbs) > 1: #A nontrivial merge\n MT[i] = [UFR[n] for n in neighbs] #Add merge tree children\n for n in neighbs:\n if not (n == oldestNeighb):\n #Record persistence event\n I.append([x[n], x[i]])\n pers = x[i] - x[n]\n PS[i] = pers\n PS[n] = pers\n UFUnion(UFP, oldestNeighb, n, idxorder)\n #Change the representative for this class to be the\n #saddle point\n UFR[oldestNeighb] = i\n #Add the essential class\n idx1 = np.argmin(x)\n idx2 = np.argmax(x)\n [b, d] = [x[idx1], x[idx2]]\n I.append([b, d])\n I = np.array(I)\n PS[idx1] = d-b\n PS[idx2] = d-b\n return (MT, PS, I)", "def _query_data(self, index, tag):\n version, datapoints = yield self.quasar.stream_get(self.name, tag, tag+(15*qdf.MINUTE))\n values = np.empty((BLOCK_SIZE,), dtype=(type(datapoints[0])))\n values[:] = None\n \n for point in datapoints:\n time = float(point.time - tag)\n time_index = int(round(time*SAMPLE_RATE/qdf.SECOND))\n values[time_index] = point\n\n self.cache[index][CACHE_INDEX_TAG] = tag\n self.cache[index][CACHE_INDEX_DATA] = values", "def merge_data(self, nodenet_data, keep_uids=False):\n\n uidmap = {}\n # for dict_engine compatibility\n uidmap[\"Root\"] = \"s1\"\n\n # re-use the root nodespace\n uidmap[\"s1\"] = \"s1\"\n\n # merge in spaces, make sure that parent nodespaces exist before children are initialized\n nodespaces_to_merge = set(nodenet_data.get('nodespaces', {}).keys())\n for nodespace in nodespaces_to_merge:\n self.merge_nodespace_data(nodespace, nodenet_data['nodespaces'], uidmap, keep_uids)\n\n # merge in nodes\n for uid in nodenet_data.get('nodes', {}):\n data = nodenet_data['nodes'][uid]\n parent_uid = data['parent_nodespace']\n if not keep_uids:\n parent_uid = uidmap[data['parent_nodespace']]\n if data['type'] in self.__nodetypes or data['type'] in self.native_modules:\n olduid = None\n if keep_uids:\n olduid = uid\n new_uid = self.create_node(\n data['type'],\n parent_uid,\n data['position'],\n name=data['name'],\n uid=olduid,\n parameters=data['parameters'],\n gate_parameters=data['gate_parameters'],\n gate_functions=data['gate_functions'])\n uidmap[uid] = new_uid\n node_proxy = self.get_node(new_uid)\n for gatetype in data['gate_activations']: # todo: implement sheaves\n node_proxy.get_gate(gatetype).activation = data['gate_activations'][gatetype]['default']['activation']\n\n else:\n warnings.warn(\"Invalid nodetype %s for node %s\" % (data['type'], uid))\n\n # merge in links\n for linkid in nodenet_data.get('links', {}):\n data = nodenet_data['links'][linkid]\n self.create_link(\n uidmap[data['source_node_uid']],\n data['source_gate_name'],\n uidmap[data['target_node_uid']],\n data['target_slot_name'],\n data['weight']\n )\n\n for monitorid in nodenet_data.get('monitors', {}):\n data = nodenet_data['monitors'][monitorid]\n if 'node_uid' in data:\n old_node_uid = data['node_uid']\n if old_node_uid in uidmap:\n data['node_uid'] = uidmap[old_node_uid]\n if 'classname' in data:\n if hasattr(monitor, data['classname']):\n getattr(monitor, data['classname'])(self, **data)\n else:\n self.logger.warn('unknown classname for monitor: %s (uid:%s) ' % (data['classname'], monitorid))\n else:\n # Compatibility mode\n monitor.NodeMonitor(self, name=data['node_name'], **data)", "def extract_join_info(node):\n operator_info = node['operatorInfo']\n analyze_info = node['AnalyzeInfo']\n\n if 'Join' in node['id']:\n # Join Node\n join_type = extract_join_type(operator_info)\n conditions = extract_join_conditions(operator_info)\n current_node = JoinPlan(join_type, conditions)\n assert 'children' in node and len(node['children']) == 2\n childrens = node['children']\n current_node.left_node = extract_join_info(childrens[0])\n current_node.right_node = extract_join_info(childrens[1])\n current_node.execute_time = analyze_info[\"time\"]\n current_node.est_rows = node[\"estRows\"]\n else:\n # Table Reader\n # assert 'TableReader' in node['id']\n # extract selection if need\n current_node = extract_table_reader(node)\n current_node.est_rows = node['estRows']\n return current_node", "def datapull_master(self, connect):\n self.newtable = 'popcorn'\n self.oldtable = 'hashtags'\n self.scorecol = 'score'\n self.coltype = 'INTEGER'\n self.pullvars = 'tweet_id, created_at, from_user_screen_name, from_user_id, favorite_count, retweet_count, content'\n self.modvars ='tweet_id, created_at, from_user_screen_name, from_user_id, favorite_count, retweet_count, score, content'\n self.filtervars ='language, entities_media_count, retweeted_status, truncated'\n self.filters = '''language = 'en' AND entities_media_count = 0 AND retweeted_status = '' AND truncated = 0'''\n self.ordering = 'score DESC'\n self.tweet_dt = 'created_at'\n self.cron_ordering = 'created_at ASC'\n self.c = connect.cursor()\n #Create modified table (drop if exists)\n self.c.execute(\"DROP TABLE IF EXISTS {newtab}\".format(newtab=self.newtable))\n self.c.execute(\"CREATE TABLE {newtab} AS SELECT {vars}, {filtervars} FROM {oldtab} ORDER BY {order}\" \\\n .format(newtab=self.newtable, vars=self.pullvars, filtervars=self.filtervars, oldtab=self.oldtable, order=self.cron_ordering))\n #Add in Score\n self.c.execute(\"ALTER TABLE {tn} ADD COLUMN '{cn}' {ct}\"\\\n .format(tn=self.newtable, cn=self.scorecol, ct=self.coltype))\n self.c.execute(\"UPDATE {tn} SET {scorecol} = {fvt} + 5*{rt}\"\\\n .format(tn=self.newtable, scorecol=self.scorecol, fvt='favorite_count', rt='retweet_count'))\n connect.commit()", "def build_tag_query(db, request, tags):\n inner_query, clauses = build_inner_query(request, tags)\n query = \"\"\"\nSELECT s.metadata || hstore('uuid', s.uuid)\nFROM stream s\nWHERE s.id IN \"\"\" + inner_query + \"\"\"\nORDER BY s.id ASC\"\"\"\n log.msg(query)\n d = db.runQuery(query)\n d.addCallback(log_time, time.time())\n return d", "def merge_all_data(self):\n \n logging.info('***** Starting the merging process ')\n\n \n \"\"\" All possible unqiue_dates to loop on \"\"\"\n date_times = self.merged_unique_dates\n date_times.sort()\n \n date_times = np.array(date_times) \n \n \"\"\" List storing the indices of the date_index of the merged dataset \"\"\"\n all_merged_obs , all_merged_head, all_merged_fb , merged_indices , merged_date_time, mi= [] , [] , [] , [] , [], []\n \n \"\"\" Dictionary that will contain the merged file. \"\"\" \n # rand = datetime.strptime('1981-01-03 12:00:00', '%Y-%m-%d %H:%M:%S') \n #for dt in date_times[3008:3100]: # loop over all the possible date_times \n \n tot = len(date_times)\n for dt, c in zip(date_times[3008:3100], range(tot) ): # loop over all the possible date_times \n #print('Analize : ', str(c) , '/', str(tot) , ' ', dt , ' ', now(time.time()) )\n \n logging.info('Analize : %s %s /', str(c) , str(tot) )\n \n cleaned_df_container = {} \n chunk = ''\n \n for k in self.dataset_per_dt[dt] : # checking the list of available datasets \n \n index, index_up = self.unique_dates[k]['indices'][dt]['low'] , self.unique_dates[k]['indices'][dt]['up'] # extracting the exact chunk of the dataframe where the data of this are stored \n \n chunk = self.data[k]['dataframe'].iloc[index:index_up]\n \n chunk['date_time'] = dt\n chunk = self.clean_dataframe(chunk) # cleaning from wrong or nan values \n \n if len(chunk)==0:\n continue\n \n cleaned_df_container[k] = {} \n cleaned_df_container[k]['df'] = chunk # cleaned dataframe \n\n \n if all(value == 0 for value in cleaned_df_container.values()):\n logging.debug('No data were found! ')\n continue\n \n merged_observations_table, best_ds, duplicates, header = self.merge_record(dt, container = cleaned_df_container)\n \n merged_observations_table['source_id'] = best_ds # adding extra columns i.e. chosen dataset, other dataset with data, number of pressure levels \n merged_observations_table['z_coordinate_type'] = 1 # only pressure inn [Pa] available at the moment. Check z_coordinate_type table for the correpsonding code \n \n \n \"\"\" Extracting the merged feedback, flagging the advanced_observations_feedback flag = 1\"\"\"\n feedback, merged_obs = self.get_reanalysis_feedback( dt, merged_observations_table , reanalysis='era5fb', best_ds= best_ds)\n all_merged_fb.append(feedback) \n all_merged_obs.append(merged_obs)\n \n \"\"\" Setting the correct report_id in the header table \"\"\"\n merged_report_id = merged_obs['report_id'].values[0] # same report_id as calculated in the observation_table \n header['report_id'] = merged_report_id \n all_merged_head.append(header)\n \n #if len(merged_observations_table) != len(header): \n #print('lengths check best ds: ', best_ds , ' obs_merged: ' , len(merged_observations_table), ' feedback:' , len(feedback) , ' header: ' , len(header) )\n #print( len(merged_observations_table), ' ' , len(feedback) )\n\n \"\"\" New merged recordindex and recordtimestamps indices \"\"\"\n merged_indices.append(len(merged_observations_table)) \n merged_date_time.append(dt)\n\n\n \"\"\" Storing the merged date_time values and indices \"\"\"\n di=xr.Dataset()\n merged_date_time = np.array(merged_date_time)\n di['recordtimestamp'] = ( {'recordtimestamp' : merged_date_time.shape } , merged_date_time )\n \n \n \"\"\" Creating the merged indices \"\"\"\n mi.append(0)\n for i,ind in zip(merged_indices[0:], range(len(merged_indices[0:]) ) ) :\n mi.append(mi[ind] + i )\n mi = np.array(mi) \n di['recordindex'] = ( {'recordindex' : mi.shape } , mi )\n self.MergedRecordIndex = di \n \n \n \"\"\" Creating the merged dataframes \"\"\"\n logging.debug('*** Concatenating the observations_table dataframes' ) \n merged_obs = pd.concat (all_merged_obs)\n \n self.MergedObs = merged_obs \n logging.debug('*** Finished concatenating theobservations_table dataframes' ) \n \n logging.debug('*** Concatenating the header_table dataframes' ) \n merged_hd = pd.concat (all_merged_head)\n self.MergedHead = merged_hd \n logging.debug('*** Finished concatenating the header_table dataframes' ) \n \n logging.debug('*** Concatenating the feedback dataframes' ) \n merged_fb = pd.concat (all_merged_fb)\n self.MergedFeedback = merged_fb \n logging.debug('*** Finished concatenating the feedback dataframes' ) \n\n return 0", "def get_data(self, date_time):\n id_columns = ','.join([col for col in self.table_primary_keys if col not in ['EFFECTIVEDATE', 'VERSIONNO']])\n return_columns = ','.join(self.table_columns)\n with self.con:\n cur = self.con.cursor()\n cur.execute(\"DROP TABLE IF EXISTS temp;\")\n cur.execute(\"DROP TABLE IF EXISTS temp2;\")\n cur.execute(\"DROP TABLE IF EXISTS temp3;\")\n cur.execute(\"DROP TABLE IF EXISTS temp4;\")\n # Store just the unique sets of ids that came into effect before the the datetime in a temporary table.\n query = \"\"\"CREATE TEMPORARY TABLE temp AS \n SELECT * \n FROM {table} \n WHERE EFFECTIVEDATE <= '{datetime}';\"\"\"\n cur.execute(query.format(table=self.table_name, datetime=date_time))\n # For each unique set of ids and effective dates get the latest versionno and sore in temporary table.\n query = \"\"\"CREATE TEMPORARY TABLE temp2 AS\n SELECT {id}, EFFECTIVEDATE, MAX(VERSIONNO) AS VERSIONNO\n FROM temp\n GROUP BY {id}, EFFECTIVEDATE;\"\"\"\n cur.execute(query.format(id=id_columns))\n # For each unique set of ids get the record with the most recent effective date.\n query = \"\"\"CREATE TEMPORARY TABLE temp3 as\n SELECT {id}, VERSIONNO, max(EFFECTIVEDATE) as EFFECTIVEDATE\n FROM temp2\n GROUP BY {id};\"\"\"\n cur.execute(query.format(id=id_columns))\n # Inner join the original table to the set of most recent effective dates and version no.\n query = \"\"\"CREATE TEMPORARY TABLE temp4 AS\n SELECT * \n FROM {table} \n INNER JOIN temp3 \n USING ({id}, VERSIONNO, EFFECTIVEDATE);\"\"\"\n cur.execute(query.format(table=self.table_name, id=id_columns))\n # Inner join the most recent data with the interconnectors used in the actual interval of interest.\n query = \"\"\"SELECT {cols} \n FROM temp4 \n INNER JOIN (SELECT * \n FROM DISPATCHINTERCONNECTORRES \n WHERE SETTLEMENTDATE == '{datetime}') \n USING (INTERCONNECTORID);\"\"\"\n query = query.format(datetime=date_time, id=id_columns, cols=return_columns)\n data = pd.read_sql_query(query, con=self.con)\n return data", "def elastic_data_sync(from_ts, to_ts, conn_obj, idx, type):\n if from_ts:\n query = {\"_id\": {\"$gt\": from_ts, \"$lte\": to_ts}}\n else:\n query = {\"_id\": {\"$lte\": to_ts}}\n pkg_meta = conn_obj.find(query)\n #Call elasticsearch bulk insert with mongo cursor\n data = {\"data_iter\": pkg_meta, \"index\": idx, \"_type\": type,\n \"mapping\": ELASTIC_MAPPINGS.get(idx, {})}\n es_sync = ElasticSearch()\n status, res = es_sync.bulk(**data)\n return status, res", "def merge(self, node, depth=200):\n assert isinstance(node, Node), \"Merge node must be type Node\"\n #------------------------------------------------------------\n\n self.counter.update(node.counter)\n self.trim_counter(depth)\n return self", "def merge_arrival_and_completion_time(tests_dataframe):\r\n arrival_time_df = tests_dataframe[['time_test_arrives_lab', 'server_size']]\r\n completion_time_df = tests_dataframe[['completion_time', 'server_size']]\r\n arrival_time_df['add'] = 1\r\n completion_time_df['add'] = -1\r\n arrival_time_df = arrival_time_df.rename(columns={\"time_test_arrives_lab\":\"time\"})\r\n completion_time_df = completion_time_df.rename(columns={\"completion_time\":\"time\"})\r\n union = pd.concat([arrival_time_df, completion_time_df])\r\n union = union.sort_values(by=\"time\")\r\n prev_server_size = 0\r\n for index, row in union.iterrows():\r\n if index == 0:\r\n current_server_size= row['server_size'] + row['add']\r\n prev_server_size = current_server_size\r\n #union['server_size'] = union['server_size'] + union['add']\r\n else:\r\n current_server_size = prev_server_size + row['add'] \r\n prev_server_size = current_server_size\r\n union.at[index,'server_size'] = current_server_size\r\n #union.to_csv('union.csv')\r\n return union", "def _rewrite_concat(self, node: saldag.Concat):\n\n # Copy over columns from existing relation\n out_rel_cols = node.out_rel.columns\n\n # Combine per-column collusion sets\n for idx, col in enumerate(out_rel_cols):\n columns_at_idx = [in_rel.columns[idx] for in_rel in node.get_in_rels()]\n col.coll_sets = utils.coll_sets_from_columns(columns_at_idx)", "def query(self, code):\n df = pd.read_sql(code, self.conn)\n if 'uuid' in df:\n df = df.set_index('uuid')\n return df", "def merge_struct_arrays(self, data1, data2):\n data_final = np.concatenate((data1, data2))\n return data_final", "def modifyOpt(self, sql): # insert \\ update \\ delete\n # apply connection rescource\n dbp_opt = dbPool()\n results = dbp_opt.opModify(sql)\n # release connection rescource\n dbp_opt.dispose()\n return results" ]
[ "0.5318473", "0.52193975", "0.5193449", "0.515354", "0.5137922", "0.50185084", "0.49987668", "0.49883923", "0.4975488", "0.4966561", "0.49638265", "0.49379689", "0.48661035", "0.48568356", "0.48503768", "0.4833968", "0.48199612", "0.4812925", "0.4802843", "0.47878385", "0.47712696", "0.47580692", "0.47430667", "0.47422996", "0.4735941", "0.4728958", "0.472683", "0.47210637", "0.47130597", "0.47112504", "0.46986896", "0.4698093", "0.46888936", "0.4672249", "0.4670861", "0.46605387", "0.46576843", "0.46512893", "0.46382", "0.46303862", "0.46225694", "0.46217263", "0.46194947", "0.46184778", "0.46131316", "0.46055984", "0.460186", "0.45972186", "0.4596505", "0.45954448", "0.459155", "0.45888245", "0.45818946", "0.45662424", "0.45651343", "0.45629358", "0.45545074", "0.4543906", "0.45406646", "0.4539576", "0.45361906", "0.45322326", "0.45233893", "0.45184237", "0.4499533", "0.44969317", "0.4493616", "0.44935736", "0.44912997", "0.44909945", "0.4487042", "0.44840768", "0.44830424", "0.4482763", "0.44762957", "0.4473797", "0.447376", "0.44653627", "0.44545475", "0.44496763", "0.44477648", "0.44455802", "0.4441863", "0.4435739", "0.44209412", "0.44173068", "0.4410998", "0.44080883", "0.4403735", "0.44010666", "0.43961123", "0.43942925", "0.43936613", "0.43912262", "0.4388057", "0.43849868", "0.43758827", "0.43676305", "0.4363869", "0.43599895" ]
0.6787032
0
Returns a count of the number of unique metrics currently recorded for apdex, time and value metrics.
def metrics_count(self): return len(self.__stats_table)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def number_of_running_metrics(self):\n try:\n return len(self.get_classads(\"OSGRSV==\\\"metrics\\\"\"))\n except TypeError:\n self.rsv.log(\"ERROR\", \"Classad parsing failed, unable to count running metrics\")", "def metric_data_count(self):\n\n if not self.__settings:\n return 0\n\n return len(self.__stats_table)", "def get_counts(self):\n self._update_counts()\n return self.failures, self.warnings, self.infos", "def get_number_of_measurement(self):\n num_of_meas = 0\n for time in self.mdvtc.keys():\n num_of_meas = num_of_meas + self.mdvtc[time].get_number_of_measurement()\n #\n return num_of_meas", "def device_count():\n apipath = \"/targets/devices\"\n url = SERVER + apipath\n params = {\n 'q': '(deviceType:ASA)',\n 'agg': 'count'}\n headers = {\n 'Accept': \"application/json\",\n 'Content-Type': \"application/json\",\n 'Authorization': \"bearer {}\".format(token)}\n response = requests.get(url, verify=False, stream=True, headers=headers, params=params)\n getstatuscode = response.status_code\n getresponse = response.json()\n if getstatuscode == 200:\n return getresponse\n else:\n response.raise_for_status()", "def _fetch_count_metrics_and_clear(self):\n with self._count_rlock:\n count_metrics = self._count_metrics\n self._count_metrics = defaultdict(int)\n\n return count_metrics", "def count(self) -> Tuple[groupable, pdarray]:\n repMsg = generic_msg(\n cmd=\"countReduction\",\n args={\"segments\": cast(pdarray, self.segments), \"size\": self.length},\n )\n self.logger.debug(repMsg)\n return self.unique_keys, create_pdarray(repMsg)", "def get_total_counts(self):\n ret = {}\n all_loggers_count = 0\n for logger, name_map in self.acc_map.items():\n cur_logger_count = 0\n ret[logger.name] = {}\n for name, status_map in name_map.items():\n cur_name_count = 0\n ret[logger.name][name] = {}\n for status, acc in status_map.items():\n cur_count = acc.total_count\n ret[logger.name][name][status] = cur_count\n cur_name_count += cur_count\n cur_logger_count += cur_count\n all_loggers_count += cur_count\n ret[logger.name][name]['__all__'] = cur_name_count\n ret[logger.name]['__all__'] = cur_logger_count\n ret['__all__'] = all_loggers_count\n return ret", "def get_count():\n _check_init()\n return _pypm.CountDevices()", "def analyze_count(data):\n\n dsct_vk = pd.unique(data['vk'])\n dsct_itemid = pd.unique(data['itemid'])\n\n print 'number of user:', dsct_vk.shape\n print 'number of items:', dsct_itemid.shape\n print 'the number of ratings:', data.shape\n\n print 'unique actions:', pd.unique(data['action'])\n print 'the number of action 0:', np.sum(data['action'] == 0)\n print 'the number of action 1:', np.sum(data['action'] == 1)\n print 'the number of action 2:', np.sum(data['action'] == 2)\n print 'the number of action 3:', np.sum(data['action'] == 3)\n print 'the number of action 4:', np.sum(data['action'] == 4)\n \n time_range_item = data.groupby('itemid')['real_time'].aggregate(sum_unique)\n print 'Max Range:', np.max(time_range_item)\n print 'Mean Range:', np.mean(time_range_item)\n print 'Median Range:', np.median(time_range_item)", "def count(self, key):\n self._metrics[key] += 1", "def observation_count(self):\n if not self.can_update():\n self._handle_error(910, [self.type])\n return self.tc_requests.observation_count(\n self.api_type, self.api_branch, self.unique_id, owner=self.owner\n )", "def get_metrics(self):\n self.logger.debug(\"Fetching metrics.\")\n return self._api_query(\"metrics\")['metrics']", "async def test_nr_of_metrics(self):\n response = await self.collect(get_request_json_side_effect=[self.data_model, self.reports])\n self.assert_measurement(\n response,\n value=str(len(self.entities)),\n total=self.expected_software_metrics,\n entities=self.entities,\n )", "def counts(self):\n return sum(self.counter.values()), len(self.visited)", "def counts(self):\n return sum(self.counter.values()), len(self.visited)", "def count(time):\n \n return len(events(time))", "def get_count(cls):\n total = 0\n for counter in SimpleCounterShard.objects.all():\n total += counter.count\n return total", "def count(self):\n nreq, nres = 0, 0\n for entry in self.__history:\n if entry.oreq is not None:\n nreq += 1\n if entry.ores is not None:\n nres += 1\n return nreq, nres", "def test_get_time_summary_stats_counter():\n # This is constructed to test the parsing logic for timestamps, so the number don't\n # add up.\n runtime_profile = \"- ExampleTimeStats: (Avg: 161.554ms ; \" \\\n \"Min: 101.411us ; \" \\\n \"Max: 1h2m3s4ms5us6ns ; \" \\\n \"Number of samples: 6)\"\n summary_stats = get_time_summary_stats_counter(\"ExampleTimeStats\", runtime_profile)\n assert len(summary_stats) == 1\n assert summary_stats[0].sum == 969324000\n assert summary_stats[0].min_value == 101411\n assert summary_stats[0].max_value == 3723004005006\n assert summary_stats[0].total_num_values == 6", "def gives_stats():\n dict_count = {\n \"amenities\": storage.count(Amenity),\n \"cities\": storage.count(City),\n \"places\": storage.count(Place),\n \"reviews\": storage.count(Review),\n \"states\": storage.count(State),\n \"users\": storage.count(User)\n }\n return jsonify(dict_count)", "def get_metrics(self) -> Dict[str, base.Number]:\n return self._metrics", "def get_count_overview():\n from app.core.api_views import Api\n api = Api()\n return api.getOverviewCount(\n db_name=LoggingDetails,\n field='success',\n key='logs',\n )", "def metrics(self):\n\n return six.iteritems(self.__stats_table)", "def test_counts(self):\n c = array([5,0,1,1,5,5])\n obs = counts(c)\n exp = array([1,2,0,0,0,3])\n self.assertEqual(obs, exp)\n d = array([2,2,1,0])\n obs = counts(d, obs)\n exp = array([2,3,2,0,0,3])\n self.assertEqual(obs, exp)", "def count(timeseries):\n try:\n return timeseries[0].points[0].value.int64_value\n except (IndexError, AttributeError) as exception:\n LOGGER.warning(\"Couldn't find any values in timeseries response\")\n LOGGER.debug(exception)\n return 0 # no events in timeseries", "def get_attendance_counts(attendance):\n count_a = 0\n count_p = 0\n count_d = 0\n for a in attendance:\n if a.ATT_STATUS == 'A':\n count_a+=1\n elif a.ATT_STATUS == 'D':\n count_d+=1\n elif a.ATT_STATUS == 'P':\n count_p+=1\n return (count_p,count_a,count_d)", "def count():", "def count(self):\n return self._reduce_for_stat_function(F.count, only_numeric=False)", "def hits(self) -> Mapping[str, int]:\n if len(self._clock_starts) > 0:\n warnings.warn(\n \"Retrieved hit counts while clocks are still going, \"\n \"incomplete times are not included: \"\n f\"{list(self._clock_starts.keys())}\",\n RuntimeWarning,\n )\n return self._hit_count.copy()", "def get_num_measured_outputs(self):\n i = 0\n for o in self.outputs:\n if o.is_measured_output():\n i += 1\n return i", "def numberActivities(self):\n if self.use_dic:\n nb_data = self.dic.keys()\n nb_act = (self.dic[nb_data[0]]).keys()\n return len(nb_data)*len(nb_act)\n else:\n return -1", "def class_callcount(self):\r\n # timing is stored by node, we compute timing by class on demand\r\n rval = {}\r\n for node, count in self.apply_callcount.items():\r\n typ = type(node.op)\r\n rval.setdefault(typ, 0)\r\n rval[typ] += count\r\n return rval", "def probe_counts(**kwargs):\n attributes = [\"ping_type\", \"os\", \"app_version\", \"app_build_id\", \"channel\"]\n\n return dict(\n attributes=\",\".join(attributes),\n aggregate_attributes=\"\"\"\n metric,\n metric_type,\n key\n \"\"\",\n aggregate_grouping=\"\"\"\n client_agg_type,\n agg_type\n \"\"\",\n # not boolean\n scalar_metric_types=\"\"\"\n \"counter\",\n \"quantity\",\n \"labeled_counter\",\n \"timespan\"\n \"\"\",\n boolean_metric_types=\"\"\"\n \"boolean\"\n \"\"\",\n **kwargs,\n )", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def get_resource_count(har_json):\n entries = har_json['log']['entries']\n\n resource_type_counts = Counter()\n\n for entry in entries:\n resource = entry['request']['url']\n dirty_resource_type = resource.split('.')[-1]\n resource_type = dirty_resource_type.split('?')[0] # Remove url params\n if len(resource_type) > 4:\n resource_type_counts['other'] += 1\n # print 'Found other resource type: {0}'.format(resource_type)\n else:\n resource_type_counts[resource_type] += 1\n\n return resource_type_counts", "def count_barcodes(metrics_file):\n\n barcodes = pd.read_csv(metrics_file, sep=\"\\t\", header=0, names=[\"barcode\", \"randomer\", \"count\"])\n return Counter(dict(barcodes.groupby(\"barcode\")['count'].sum().iteritems()))", "def count_measurements(database: Database) -> int:\n return int(database.measurements.count_documents(filter={}))", "def metrics(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'metrics')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def get_usage(metric: str, interval_time: int):\n\n count, ignored = check_scale(metric, f'{interval_time}s')\n param = {\n 'start': f'{interval_time}s-ago',\n 'm': f'sum:{metric}' + '{host=*}',\n }\n\n start = time.time()\n resp = urlopen(f'http://{OPENTSDB_HOST}:{OPENTSDB_PORT}/api/query?', param)\n if resp.status == 200:\n _total = json.load(resp)\n else:\n pass\n\n # remove the elements that should be ignored\n valid_source = [i for i in _total if i['tags'] not in ignored]\n\n valid_last_time = []\n for i in valid_source:\n last = sorted(i['dps'].keys())[-1]\n if (start - interval_time) <= int(last) <= (start + interval_time):\n valid_last_time.append(i)\n else:\n pass\n # elements in valid_last_time mean it should be aggregated.\n total = [i['dps'][sorted(i['dps'].keys())[-1]] for i in valid_last_time]\n\n return count, sum(total)", "def datacounts(self):\n return self._properties[\"datacounts\"]", "def timerCount(cmds):\n return int(sum(np.asarray(cmds) == 0x400001)) # numpy version\n #return cmds.count(0x400001) # python list version", "def _fetch_time_metrics_and_clear(self):\n with self._time_rlock:\n time_metrics = self._time_metrics\n self._time_metrics = defaultdict(LatencyTracker)\n\n return time_metrics", "def count(self):\n return len([i for i in self.iteritems()])", "def count(self):\n # TODO not implemented yet\n return 0", "def update_count(self):\n count_metrics = self._fetch_count_metrics_and_clear()\n self._logger.info('update_count. count_metrics = %s',\n build_metrics_counter_data(count_metrics))", "def count_counts(self):\n count_counts = defaultdict(Counter)\n for token, followers in self._dict.items():\n for f, count in followers.items():\n count_counts[token][count] += 1\n count_counts[token][0] = len(self._dict) - sum(count_counts[token].values())\n return count_counts", "def metrics(self):\n raise NotImplementedError(\"metrics\")", "def get_usage_count(equations):\n usage_count = {}\n for eq in equations:\n usage_count.setdefault(eq.lhs, 0)\n for var in eq.rhs.atoms(Variable):\n usage_count.setdefault(var, 0)\n usage_count[var] += 1\n return usage_count", "def count(ev):\n profData = getProfilingData(ev)\n if profData is not None:\n return profData.Count()\n return \"\"", "def resultCounter(detections):\n counter = 0\n for attribute, value in classIterator(detections):\n if 'crease' in attribute:\n counter += len(value)\n return counter", "def stats(self):\n return {\"size\": 0, \"maxsize\": 0, \"hits\": 0, \"miss\": 0}", "def metrics(self) -> list[dict[str, dict[str, float | int]]]:\n return self.performance[\"performances\"]", "def example_six():\n stats = defaultdict(int)\n stats['my_counter'] += 1", "def test_cw_metrics(self):\n\n instances = set()\n result = self.cw_client.list_metrics(Namespace=\"CWAgent\", MetricName=\"cpu_usage_system\")\n for i in result[\"Metrics\"]:\n instances.add(i[\"Dimensions\"][0][\"Value\"])\n\n for key, value in self.cdk_output_map.items():\n if \"Instance\" in key:\n self.assertTrue(value in instances)", "def get_count(name, key):\n total = 0\n query = CounterShard.all().filter('name = ', name).filter('reference_key = ', key)\n for counter in query:\n total += counter.count\n \n return total", "def test_data_source_soaps_count_get(self):\n pass", "def _getValueCounts(mapping):\n return Counter({k: len(v) for k, v in viewitems(mapping)})", "def count(self, tokens):\n return self.counts[tokens]", "def stats():\n class_counts = {}\n convert_dict = {\n 'Amenity': 'amenities',\n 'State': 'states',\n 'City': 'cities',\n 'User': 'users',\n 'Place': 'places',\n 'Review': 'reviews'\n }\n\n for _class in convert_dict.keys():\n class_counts[convert_dict[_class]] = storage.count(_class)\n\n return jsonify(class_counts)", "def Count(self):\r\n\t\treturn self._get_attribute('count')", "def Count(self):\r\n\t\treturn self._get_attribute('count')", "def status_counts(self):\n return self._status_counts", "def mymetrics(): \n _update_metric_counters()\n logging.debug(prom_objects_seen.collect())\n return flask.Response(generate_latest(), mimetype='text/plain')", "def get_all_metrics():\n return get_overlap_metrics() + get_distance_metrics() + get_distance_metrics()", "def _count_devices(self):\n number_of_devices = ctypes.c_uint()\n\n if ctypes.windll.user32.GetRawInputDeviceList(\n ctypes.POINTER(ctypes.c_int)(),\n ctypes.byref(number_of_devices),\n ctypes.sizeof(RawInputDeviceList)) == -1:\n warn(\"Call to GetRawInputDeviceList was unsuccessful.\"\n \"We have no idea if a mouse or keyboard is attached.\",\n RuntimeWarning)\n return\n\n devices_found = (RawInputDeviceList * number_of_devices.value)()\n\n if ctypes.windll.user32.GetRawInputDeviceList(\n devices_found,\n ctypes.byref(number_of_devices),\n ctypes.sizeof(RawInputDeviceList)) == -1:\n warn(\"Call to GetRawInputDeviceList was unsuccessful.\"\n \"We have no idea if a mouse or keyboard is attached.\",\n RuntimeWarning)\n return\n\n for device in devices_found:\n if device.dwType == 0:\n self._raw_device_counts['mice'] += 1\n elif device.dwType == 1:\n self._raw_device_counts['keyboards'] += 1\n elif device.dwType == 2:\n self._raw_device_counts['otherhid'] += 1\n else:\n self._raw_device_counts['unknown'] += 1", "def class_callcount(self):\n # timing is stored by node, we compute timing by class on demand\n rval = {}\n for (fgraph, node), count in self.apply_callcount.items():\n typ = type(node.op)\n rval.setdefault(typ, 0)\n rval[typ] += count\n return rval", "def get_attribute_counts(self):\n counts = defaultdict(int)\n for attr in self:\n counts[attr.name] += 1\n\n return dict(counts)", "def metrics(self):\n return self.__metrics", "def fetch_counts_for_debug(stdout):\n test_names = TEST_NAMES_DEBUG_APP_PATTERN.findall(stdout)\n test_counts = collections.Counter(test_class for test_class, _ in test_names\n if test_class not in IGNORED_CLASSES)\n\n return test_counts", "def test_counts(pawprint_default_tracker_db_with_table):\n\n tracker = pawprint_default_tracker_db_with_table\n\n # Add a bunch of events\n query = (\n \"\"\"\n INSERT INTO {} (timestamp, user_id, event) VALUES\n ('2016-01-01 12:30', 'alice', 'logged_in'),\n ('2016-01-01 12:40', 'bob', 'logged_in'),\n ('2016-01-01 16:00', 'charlotte', 'logged_in'),\n ('2016-01-02 00:00', 'dan', 'logged_in'),\n ('2016-01-02 00:00', 'elizabeth', 'logged_in'),\n ('2016-01-05 00:00', 'frank', 'logged_in'),\n ('2016-01-10 00:00', 'gabrielle', 'logged_in'),\n ('2016-01-20 00:00', 'hans', 'logged_in'),\n ('2016-02-01 00:00', 'iris', 'logged_in'),\n ('2016-02-01 00:00', 'james', 'logged_in'),\n ('2016-03-01 00:00', 'kelly', 'logged_in'),\n ('2016-03-01 00:00', 'laura', 'logged_in'),\n ('2016-03-01 00:00', 'mike', 'not_logged_in')\n \"\"\"\n ).format(tracker.table)\n\n pd.io.sql.execute(query, tracker.db)\n\n logins_hourly = tracker.count(event=\"logged_in\", resolution=\"hour\")\n logins_daily = tracker.count(event=\"logged_in\")\n logins_weekly = tracker.count(event=\"logged_in\", resolution=\"week\")\n logins_monthly = tracker.count(event=\"logged_in\", resolution=\"month\")\n logins_weekly_left_range = tracker.count(\n event=\"logged_in\", resolution=\"week\", start=datetime(2016, 2, 1)\n )\n logins_weekly_right_range = tracker.count(\n event=\"logged_in\", resolution=\"week\", end=datetime(2016, 2, 1)\n )\n logins_daily_full_range = tracker.count(\n event=\"logged_in\", start=datetime(2016, 1, 15), end=datetime(2016, 2, 15)\n )\n\n # Hourly\n assert len(logins_hourly) == 8\n assert np.all(logins_hourly[\"count\"].values == [2, 1, 2, 1, 1, 1, 2, 2])\n\n # Daily\n assert len(logins_daily) == 7\n assert np.all(logins_daily[\"count\"].values == [3, 2, 1, 1, 1, 2, 2])\n\n # Weekly\n assert len(logins_weekly) == 5\n assert np.all(logins_weekly[\"count\"].values == [5, 2, 1, 2, 2])\n\n # Others\n assert len(logins_monthly) == 3\n assert len(logins_weekly_left_range) == 2 # weeks start on Monday\n assert len(logins_weekly_right_range) == 4 # and not at the start / end dates provided\n assert len(logins_daily_full_range) == 2", "def totalCount(self):\n return sum(self.values())", "def totalCount(self):\n return sum(self.values())", "def totalCount(self):\n return sum(self.values())", "def count(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'count')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def count(self, value): # real signature unknown; restored from __doc__\n return 0", "def test_get_virtual_machine_count_metrics1(self):\n pass", "def hits(self):\n return len(self.successes) + len(self.failures)", "def count(self):\n return self.get_count()", "def _calculate_metrics(self):\n metrics = {}\n precision, recall = self.calc_precision_recall()\n metrics[\"precision\"] = precision\n metrics[\"recall\"] = recall\n metrics[\"entropy\"] = self.calc_entropy()\n metrics[\"component_entropy\"] = self.calc_component_entropy()\n metrics[\"num_comps\"] = len(self.get_components())\n metrics[\"num_diagnoses\"] = len(self.diagnoses)\n metrics[\"distinct_diagnoses_scores\"] = len(Counter(list(map(lambda x: x.probability, self.diagnoses))))\n metrics[\"num_tests\"] = len(self.get_tests())\n metrics[\"num_distinct_traces\"] = len(self.get_distinct_traces())\n metrics[\"num_failed_tests\"] = len(self._get_tests_by_error(1))\n metrics[\"num_passed_tests\"] = len(self._get_tests_by_error(0))\n passed_comps = set(self._get_components_by_error(0))\n failed_comps = set(self.get_components_in_failed_tests())\n metrics[\"num_failed_comps\"] = len(failed_comps)\n metrics[\"only_failed_comps\"] = len(failed_comps - passed_comps)\n metrics[\"only_passed_comps\"] = len(passed_comps - failed_comps)\n metrics[\"num_bugs\"] = len(self.get_bugs())\n metrics[\"wasted\"] = self.calc_wasted_components()\n metrics[\"top_k\"] = self.calc_top_k()\n metrics[\"num_comps_in_diagnoses\"] = len(self._get_comps_in_diagnoses())\n metrics[\"bugs_cover_ratio\"] = self._get_bugs_cover_ratio()\n metrics[\"average_trace_size\"] = self._get_average_trace_size()\n metrics[\"average_component_activity\"] = self._get_average_component_activity()\n metrics[\"average_diagnosis_size\"] = self._get_average_diagnosis_size()\n metrics[\"bugs_scores_average\"], metrics[\"bugs_scores_std\"], metrics[\"bugs_scores_entropy\"] = self._get_bugs_scores()\n metrics[\"non_bugs_scores_average\"], metrics[\"non_bugs_scores_std\"], metrics[\"non_bugs_scores_entropy\"] = self._get_non_bugs_scores()\n metrics.update(self.cardinality())\n # metrics[\"ochiai\"] = self.calc_ochiai_values()\n return metrics", "def count_benchmarks():\n return len(setup_storage().fetch_benchmark({}))", "def counts(self) -> dict:\n return Counter(self.sequence)", "def tally(self):\n return self.count", "def size(self) -> Tuple[int, int]:\n count_keys = 0 # store the number of different 'key'.\n count_values = 0 # store the the number of different 'value'.\n for node in self.hashTable:\n count_values = count_values + node.count\n count_keys = count_keys + len(node.keys)\n return count_keys, count_values", "def static_metrics(self) -> dict[str, float | int]:\n return self.performance[\"meta\"]", "def count(self):\n\n return self._get(\"count\", rtype=UInt)", "def trainCount(\n trainData, \n questionType,\n questionDict,\n questionIdict, \n objDict, \n objIdict,\n numAns):\n count_wa = np.zeros((len(objIdict), numAns))\n count_a = np.zeros((numAns))\n objIds = extractObjId(\n trainData[0], \n questionType, \n questionDict, \n questionIdict)\n for i in range(objIds.shape[0]):\n objId = objIds[i]\n obj = questionIdict[objId - 1]\n ansId = trainData[1][i, 0]\n objId2 = objDict[obj]\n count_wa[objId2, ansId] += 1\n count_a[ansId] += 1\n # Add UNK count\n count_a[-1] += 1\n return count_wa, count_a", "def getUsersBySSID():\n\tstats = {}\n\tms = MobileStation.objects.filter(ssid__isnull=False)\n\tfor ssid in set(MobileStation.objects.values_list('ssid', flat=True)):\n\t\tstats[ssid] = MobileStation.objects.areAssociated().filter(ssid=ssid).count()\n\treturn stats", "def count(self):\n\n raise NotImplementedError", "def counts(self):\n\n if self._counts is not None:\n return self._counts\n else:\n try:\n return self.cps * self.livetime\n except TypeError:\n raise SpectrumError(\n 'Unknown livetime; cannot calculate counts from CPS')", "def count(self, tokens):\n return self._count[tuple(tokens)]", "def summarize(self) -> Mapping[str, int]:\n return dict(\n compounds=self.count_compounds(),\n side_effects=self.count_side_effects(),\n indications=self.count_indications(),\n umls=self.count_umls(),\n )", "def metrics(self):\n return self._metrics", "def metrics(self):\n return self._metrics", "def get_all_dataset_counts(\n self,\n ) -> Dict[Tuple[str, int, int], int]:\n res = self._engine.execute(\n select(\n [\n PRODUCT.c.name,\n TIME_OVERVIEW.c.start_day,\n TIME_OVERVIEW.c.period_type,\n TIME_OVERVIEW.c.dataset_count,\n ]\n )\n .select_from(TIME_OVERVIEW.join(PRODUCT))\n .where(TIME_OVERVIEW.c.product_ref == PRODUCT.c.id)\n .order_by(\n PRODUCT.c.name, TIME_OVERVIEW.c.start_day, TIME_OVERVIEW.c.period_type\n )\n )\n\n return {\n (\n r.name,\n *TimePeriodOverview.from_flat_period_representation(\n r.period_type, r.start_day\n )[:2],\n ): r.dataset_count\n for r in res\n }", "def task3a(self):\n browser_count = {}\n for entry in self.records:\n if((entry['visitor_device'] == 'browser') and (entry['event_type'] == 'read')):\n browser = entry['visitor_useragent']\n if (browser in browser_count):\n browser_count[entry['visitor_useragent']] += 1\n else:\n browser_count[entry['visitor_useragent']] = 1\n GUI.show_histo(browser_count, \"vert\", \"Number of Accesses using Browser\", \"Browser Distribution\")", "def get_metrics(self):\n return None" ]
[ "0.6411366", "0.6283042", "0.62296087", "0.6220417", "0.61111647", "0.6047972", "0.60304636", "0.5969979", "0.59513307", "0.5878805", "0.5827968", "0.5753369", "0.5703473", "0.56953144", "0.56933933", "0.56933933", "0.5690032", "0.5668702", "0.56455106", "0.5636696", "0.56353694", "0.5620147", "0.56077564", "0.5602681", "0.5592418", "0.5571603", "0.5568807", "0.55369514", "0.5507133", "0.5506029", "0.5504633", "0.5498267", "0.548768", "0.54729164", "0.54719996", "0.54719996", "0.54719996", "0.54719996", "0.54715645", "0.54708236", "0.54609716", "0.545132", "0.5451227", "0.5448816", "0.54399306", "0.5427897", "0.54258287", "0.5419275", "0.5417845", "0.5417562", "0.5408946", "0.5406547", "0.53984153", "0.5398128", "0.53969675", "0.5390657", "0.53872776", "0.5381089", "0.5375169", "0.53618926", "0.53606606", "0.5358235", "0.53581595", "0.5357596", "0.5357596", "0.5349776", "0.5349084", "0.53400207", "0.5337269", "0.5334993", "0.5334892", "0.53306955", "0.53262585", "0.5323736", "0.53215295", "0.53215295", "0.53215295", "0.5320835", "0.53108346", "0.53062135", "0.5304726", "0.53026265", "0.5302289", "0.5298677", "0.5296915", "0.5294834", "0.5293615", "0.52928203", "0.52926236", "0.52914274", "0.5288801", "0.5280909", "0.5280162", "0.5279869", "0.52763927", "0.5275152", "0.5275152", "0.52729756", "0.52726495", "0.5271937" ]
0.6419223
0
Record a single apdex metric, merging the data with any data from prior apdex metrics with the same name.
def record_apdex_metric(self, metric): if not self.__settings: return # Note that because we are using a scope here of an empty string # we can potentially clash with an unscoped metric. Using None, # although it may help to keep them separate in the agent will # not make a difference to the data collector which treats None # as an empty string anyway. key = (metric.name, '') stats = self.__stats_table.get(key) if stats is None: stats = ApdexStats(apdex_t=metric.apdex_t) self.__stats_table[key] = stats stats.merge_apdex_metric(metric) return key
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def record_apdex_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for metric in metrics:\n self.record_apdex_metric(metric)", "def merge_apdex_metric(self, metric):\n\n self[0] += metric.satisfying\n self[1] += metric.tolerating\n self[2] += metric.frustrating\n\n self[3] = ((self[0] or self[1] or self[2]) and\n min(self[3], metric.apdex_t) or metric.apdex_t)\n self[4] = max(self[4], metric.apdex_t)", "def write_metric(self, metric_name: str, metric_value: Union[float, int]):\n self._metrics.append(Metric(metric_name, metric_value))", "def add_metric(self, metric: str):\n if metric not in self.metrics:\n self.metrics[metric] = self.creator.create_metric(metric)", "def log(self, metric_name: str, value: float) -> None:\n if metric_name in self.metrics:\n self.metrics[metric_name].append(value)\n else:\n self.metrics[metric_name] = [value]", "def add_metric(self, metric_name: str, metric_val: typing.Any):\n self.add_metrics({metric_name: metric_val})", "def add_metric(self, metric, *, name=None, **kwargs):\n if name is None:\n name = metric.__name__\n\n self.metrics.append((metric, name, kwargs))", "def add_metric(self, metric, *, name=None, **kwargs):\n if name is None:\n name = metric.__name__\n\n self.metrics.append((metric, name, kwargs))", "def record_custom_metric(self, name, value):\n if isinstance(value, dict):\n if len(value) == 1 and 'count' in value:\n new_stats = CountStats(call_count=value['count'])\n else:\n new_stats = TimeStats(*c2t(**value))\n else:\n new_stats = TimeStats(1, value, value, value, value, value**2)\n\n stats = self.__stats_table.get(name)\n if stats is None:\n self.__stats_table[name] = new_stats\n else:\n stats.merge_stats(new_stats)", "def add_metric(self, metric):\n self.metrics.append(metric)\n self.estimate()", "def log_metric(self, name: str, value):\n self.metrics[name] = value\n\n self._sync_log_event()", "def _record(self, metric_point: MetricPoint,\n measurement_map: MeasurementMap):\n metric_name = metric_point.metric_name\n tags = metric_point.tags\n\n metric = self._registry.get(metric_name)\n # Metrics should be always registered dynamically.\n assert metric\n\n tag_map = tag_map_module.TagMap()\n for key, value in tags.items():\n tag_key = tag_key_module.TagKey(key)\n tag_value = tag_value_module.TagValue(value)\n tag_map.insert(tag_key, tag_value)\n\n metric_value = metric_point.value\n measurement_map.measure_float_put(metric.measure, metric_value)\n # NOTE: When we record this metric, timestamp will be renewed.\n measurement_map.record(tag_map)", "def _AddMetric(self, metric):\n machine = metric.machine_id\n time = metric.timestamp\n payload = DotDict(json.loads(metric.payload)).flatten()\n\n self.machines.add(machine)\n self.timestamps.add(time)\n for k in payload:\n if k not in self.counter_data:\n continue\n val = payload.get(k, None)\n if val is not None:\n self.counter_data[k].AddSample(machine, time, val)", "def record_custom_metric(self, name, value):\n key = (name, '')\n\n if isinstance(value, dict):\n if len(value) == 1 and 'count' in value:\n new_stats = CountStats(call_count=value['count'])\n else:\n new_stats = TimeStats(*c2t(**value))\n else:\n new_stats = TimeStats(1, value, value, value, value, value**2)\n\n stats = self.__stats_table.get(key)\n if stats is None:\n self.__stats_table[key] = new_stats\n else:\n stats.merge_stats(new_stats)\n\n return key", "def record_gauge(self, name, value, tags=None):\n identity = self.create_identity(name, tags)\n with self._lock:\n self._batch[identity] = value\n self._timestamps[identity] = int(time.time() * 1000.0)", "def record_metrics(self, pid, metrics):\n\n for _metric, _metric_value in metrics.items():\n if not self.__metrics_history[pid].get(_metric):\n self.__metrics_history[pid][_metric] = []\n self.__metrics_history[pid][_metric].append(_metric_value)\n # only keep the last 2 running history for any metric\n self.__metrics_history[pid][_metric] = self.__metrics_history[pid][_metric][\n -2:\n ]", "def record_metrics(metrics, args):\n with open('interpretation_metrics/model_metrics_{}'.format(args.file_num), 'a') as f:\n f.write(\"META DATA\\n\")\n f.write(\"---------\\n\")\n f.write(\"Model Name: {}\\n\".format(args.model_name))\n f.write(\"Attack Target: {}\\n\".format(args.attack_target))\n f.write(\"Gradient Model File: {}\\n\".format(args.gradient_model_file))\n f.write(\"Predictive Model File: {}\\n\".format(args.predictive_model_file))\n f.write(\"Cuda: {}\\n\".format(args.cuda))\n\n f.write(\"\\nSIMPLE GRADIENT COMBINED MODEL METRICS\\n\")\n f.write(\"----------------------------------------\\n\")\n for key, val in metrics['simple_gradient_combined'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nSIMPLE GRADIENT BASELINE MODEL METRICS\\n\")\n f.write(\"----------------------------------------\\n\")\n for key, val in metrics['simple_gradient_baseline'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nSMOOTH GRADIENT COMBINED MODEL METRICS\\n\")\n f.write(\"----------------------------------------\\n\")\n for key, val in metrics['smooth_gradient_combined'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nSMOOTH GRADIENT BASELINE MODEL METRICS\\n\")\n f.write(\"----------------------------------------\\n\")\n for key, val in metrics['smooth_gradient_baseline'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nINTEGRATED GRADIENT COMBINED MODEL METRICS\\n\")\n f.write(\"--------------------------------------------\\n\")\n for key, val in metrics['integr_gradient_combined'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nINTEGRATED GRADIENT BASELINE MODEL METRICS\\n\")\n f.write(\"--------------------------------------------\\n\")\n for key, val in metrics['integr_gradient_baseline'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))", "def add_metric_class(self, metric: NNSimpleMetric):\n if metric.name not in self.metrics:\n self.metrics[metric.name] = metric", "def add_metric(self, metric_name, aggregate=None):\n\n clean_metric = metric_name.lower().strip()\n\n if clean_metric.lower() not in METRICS:\n raise Exception(\"Metric named: \" + metric_name + \" is not a valid benchmark metric.\")\n self.metrics.add(clean_metric)\n\n if not aggregate:\n self.raw_metrics.add(clean_metric)\n elif aggregate.lower().strip() in AGGREGATES:\n # Add aggregate to this metric\n clean_aggregate = aggregate.lower().strip()\n current_aggregates = self.aggregated_metrics.get(clean_metric, list())\n current_aggregates.append(clean_aggregate)\n self.aggregated_metrics[clean_metric] = current_aggregates\n else:\n raise Exception(\"Aggregate function \" + aggregate + \" is not a legal aggregate function name\");\n\n return self;", "def send_metrics(self):\n metrics = self.get_metrics()\n if not metrics:\n return\n\n for mkey, metric in metrics.items():\n for mname, mval in metric.items():\n try:\n self.agent.record_custom_metric(self.convert_metric_name(mkey, mname), mval, None)\n except Exception as e:\n print_(e)", "def add_metrics(self, metric_dict: dict):\n self.metric_dict.update(metric_dict)", "def log_metric(data_category, key, value):\n # always, just print in logs\n log(logging.INFO, data_category, \"AML Metric({}={})\".format(key, value))\n if data_category == DataCategory.ONLY_PUBLIC_DATA:\n # if public, ask azureml to record (if azureml attached)\n run = AmlRunWrapper()\n run.setup(attach=True)\n run.log(key, value)\n run.flush()", "def record_time_metric(self, metric):\n\n if not self.__settings:\n return\n\n # Scope is forced to be empty string if None as\n # scope of None is reserved for apdex metrics.\n\n key = (metric.name, metric.scope or '')\n stats = self.__stats_table.get(key)\n if stats is None:\n stats = TimeStats(call_count=1,\n total_call_time=metric.duration,\n total_exclusive_call_time=metric.exclusive,\n min_call_time=metric.duration,\n max_call_time=metric.duration,\n sum_of_squares=metric.duration ** 2)\n self.__stats_table[key] = stats\n else:\n stats.merge_time_metric(metric)\n\n return key", "def process(self, metric):\n self.metrics.append(metric)\n if self.should_flush():\n self._send()", "def report_metrics(prefix, metrics):\n series = []\n\n now = time.time()\n for key, value in metrics.iteritems():\n metric = '{prefix}.{key}'.format(prefix=prefix, key=key)\n point = [(now, value)]\n series.append({'metric':metric, 'points':point})\n\n if len(series) > 0:\n print u\"Sending {}\".format(series)\n dog_http_api.metrics(series)", "def add_metric(self, metric_fn):\n self._metrics.append(metric_fn)", "def record_custom_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for name, value in metrics:\n self.record_custom_metric(name, value)", "def __push_metric(self, metric_name, value, timestamp):\n sock = self.__get_carbon_socket()\n _data = \"%s %d %d\\n\" % (metric_name, value, timestamp)\n LOGGER.debug(\"SEND: %s\", _data.replace(\"\\n\", \"\"))\n sock.send(_data.encode('utf-8'))", "def increment(self) -> None:\n self._increment_called = True\n self.append(deepcopy(self._base_metric))", "def add_metric(self, metric_name, metric_value, login=False):\n if login:\n self._gc.login()\n\n try: \n if metric_name not in self._metric_dict:\n metric_index = len(self._metric_dict) + 2\n self._wks.update_cell(1, metric_index, metric_name)\n self._metric_dict[metric_name] = metric_index\n self.save_config()\n\n self._wks.update_cell(self.row_index, self._metric_dict[metric_name], metric_value)\n except Exception as ins:\n if not login:\n self.add_metric(metric_name, metric_value, login=True)\n else:\n return '\\n'.join([str(type(ins)), str(ins.args), str(ins)])\n return None", "def log_metric(self, name, val):\n raise NotImplementedError", "def post(self):\r\n json_data = request.get_json(force=True)\r\n if not json_data:\r\n abort(400, message='No input data provided')\r\n # make sure the metric_id (temporary) and metric_type (model) are filled\r\n json_data[\"metric_id\"] = \"TBD\"\r\n json_data[\"metric_type\"] = \"model\"\r\n\r\n # validate and deserialize input\r\n new_metric = self.load(json_data, session=db.session)\r\n\r\n # get the next metric id and update metric object\r\n try:\r\n db.session.add(new_metric)\r\n db.session.commit()\r\n except SQLAlchemyError as e:\r\n abort(400, message=f'Database error. Reason: {e}')\r\n\r\n # dump to json and return result\r\n result = self.schema.dump(new_metric)\r\n return success(result, code=201)", "def emit(self, metric):\n metric_data = self.unmarshal(metric)\n self.logger.log(\n self.log_level, metric.DEFAULT_LOG_FORMAT.format(**metric_data)\n )", "def add_metric(self, metric_class, namespace, name, value=1.0, tags=None, interval=None):\n # type: (Type[Metric], str, str, float, MetricTagType, Optional[float]) -> None\n metric_id = Metric.get_id(name, namespace, tags, metric_class.metric_type)\n if metric_class is DistributionMetric:\n metrics_type_payload = TELEMETRY_TYPE_DISTRIBUTION\n else:\n metrics_type_payload = TELEMETRY_TYPE_GENERATE_METRICS\n\n with self._lock:\n existing_metric = self._metrics_data[metrics_type_payload][namespace].get(metric_id)\n if existing_metric:\n existing_metric.add_point(value)\n else:\n new_metric = metric_class(namespace, name, tags=tags, common=True, interval=interval)\n new_metric.add_point(value)\n self._metrics_data[metrics_type_payload][namespace][metric_id] = new_metric", "def merge_custom_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for name, other in metrics:\n key = (name, '')\n stats = self.__stats_table.get(key)\n if not stats:\n self.__stats_table[key] = other\n else:\n stats.merge_stats(other)", "def update_metric(self, metric, value):\n if self.is_number(value):\n self.logger.debug(\"Collected raw metric: %s = %s\" % (metric, value))\n self.raw_metrics[metric] = value", "def save_case_metrics_on_check_point(self) -> None:\n pd.read_csv(f'{self.path_to_case_metrics}/{self.file_name}.csv')\\\n .append(pd.DataFrame(self.case_metrics,\n columns=['stream_index', 'timestamp', 'check point', 'case',\n 'graph distance', 'time distance', 'label']))\\\n .to_csv(f'{self.path_to_case_metrics}/{self.file_name}.csv', index=False)\n self.case_metrics = []", "def post_save_metrics(sender, **kwargs):\r\n action = 'created' if kwargs.pop('created', False) else 'updated'\r\n\r\n tags = _database_tags(action, sender, kwargs)\r\n dog_stats_api.increment('edxapp.db.model', tags=tags)", "def metrics(self, metrics):\n\n self._metrics = metrics", "def metrics_group():", "def _update_metric(\n metrics: List[mlflow.entities.Metric], dataset: MetricsDict = {}\n ) -> MetricsDict:\n for metric in metrics:\n metric_dict = {\"step\": metric.step, \"value\": metric.value}\n if metric.key in dataset:\n if isinstance(dataset[metric.key], list):\n dataset[metric.key].append(metric_dict)\n else:\n dataset[metric.key] = [dataset[metric.key], metric_dict]\n else:\n dataset[metric.key] = metric_dict\n return dataset", "def __init__(self, metrics_to_record):\n self.tape = {}\n\n for metric_name in metrics_to_record:\n self.tape[metric_name] = []", "def metric_recorded(self, record):\n if record.name in self.headers and self.current_row is not None:\n if record.name == \"learning_rate\" and not record.is_scalar:\n # record is a list of scalars\n value = \",\".join([f\"{lr:.4f}\" for lr in record.value])\n elif record.is_scalar and isinstance(record.value, int):\n value = str(record.value)\n else:\n assert record.is_scalar\n\n value = f\"{record.value:.4f}\"\n\n self.current_row[record.name] = value", "def compute_metrics(self, x, extra=None):\n if self.__metrics is None and extra is None:\n return None\n\n ret = {}\n if self.__metrics is not None:\n for m in self.__metrics:\n ret[m.name] = self._mdmetric(x, m)\n\n if extra is not None and extra.name not in ret:\n ret[extra.name] = self._mdmetric(x, extra)\n\n return ret", "def sum(self, key, value):\n self._metrics[key] += value", "def to_metric(self):\r\n if self.units != 'metric':\r\n self.units = 'metric'\r\n for statement in self.statements:\r\n statement.to_metric()\r\n for tool in iter(self.tools.values()):\r\n tool.to_metric()\r\n for primitive in self.primitives:\r\n primitive.to_metric()\r\n for hit in self.hits:\r\n hit.to_metric()", "def log_metrics(self, metrics: dict):\n self.metrics.update(metrics)\n\n self._sync_log_event()", "def addAPK(self, filename, data):\n digest = hashlib.sha256(data).hexdigest()\n log.debug(\"add APK:%s\" % digest)\n apk = APK(data, True)\n self.analyzed_apk[digest] = [apk]\n self.analyzed_files[filename].append(digest)\n self.analyzed_digest[digest] = filename\n self.analyzed_vms[digest] = Analysis()\n log.debug(\"added APK:%s\" % digest)\n return digest, apk", "def create_metric(self, metric, metric_name=None):\n metric_name = metric_name or metric.name\n with self._accessor_lock:\n self._accessor.create_metric(metric)\n self._cache_set(metric_name, metric)", "def save_metrics(self):\n self.data_stats.write.format(\"org.apache.spark.sql.cassandra\").mode(\"append\").options(table=self.cassandra_stats_table, keyspace=self.cassandra_keyspace).save()\n print (\"Saved data successfully\")", "def submit_metric(self, metric_suffix, metric, scraper_config, gauge=True, monotonic_count=True):\n metric_name = scraper_config['namespace'] + metric_suffix\n for sample in metric.samples:\n # Explicit shallow copy of the instance tags\n _tags = list(scraper_config['custom_tags'])\n\n for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):\n _tags.append('{}:{}'.format(label_name, label_value))\n if gauge:\n # submit raw metric\n self.gauge(metric_name, sample[self.SAMPLE_VALUE], _tags)\n if monotonic_count:\n # submit rate metric\n self.monotonic_count(metric_name + '.count', sample[self.SAMPLE_VALUE], _tags)", "def save_metric(key, value, timestamp=None):\n\n from analytics_client.settings import _ANALYTICS_ENABLED\n\n if not _ANALYTICS_ENABLED:\n return None\n\n from analytics_client.tasks import store_metric\n\n # Set a timestamp if it is undefined\n _timestamp = timestamp\n if _timestamp is None:\n _timestamp = datetime.now()\n\n store_metric.delay(Metric(key=key, value=value, timestamp=_timestamp))", "def write_measurement(self, name: str, measurement: dict):\n self._measurements.append(Measurement(name, measurement))", "def endpoint_metrics_set(self, endpoint_name=None, metrics=None):\n if metrics is None:\n raise Exception(\"Metrics required!\")\n if endpoint_name is None:\n self.request('/v1.1/endpoint/metrics', 'POST', body=metrics)\n else:\n self.request('/v1.1/endpoints/%s/metrics' % endpoint_name, 'POST', body=metrics)", "def output_metric(self, key=None, metric='loss'):\n if key is None:\n key = self.key\n return self.metrics[key][metric][-1]", "def save_data(self, gauge_name, date_key, data):\n pass", "def log_metric(self, name, val, step):\n raise NotImplementedError", "def average(self, key, value):\n self._average_metrics[key] += value\n self._average_metrics_count[key] += 1", "def aws_write(vl, flusher):\n flusher.add_metric(vl)", "def setup_metrics_file(self):\n\n with open(self.metrics_path, \"w+\") as f_metrics:\n\n f_metrics.write(get_metrics_file_form())", "def aggregate(all_metrics, reducer, suffix):\n # Collect metric separately\n separated_metrics = {} # type: dict[frozenset, list[dict]]\n for el in all_metrics:\n key = frozenset(el[\"metric\"][\"dimensions\"].items())\n if key not in separated_metrics:\n separated_metrics[key] = [el]\n else:\n separated_metrics[key].append(el)\n\n # Collect all dimensions\n dims = {}\n for metric_dims in separated_metrics.keys():\n for prop, val in dict(metric_dims).iteritems():\n if prop in dims:\n dims[prop].add(val)\n else:\n dims[prop] = set(val)\n\n # Sort each metric\n for _, metric in separated_metrics.iteritems():\n metric.sort(key=lambda v: v[\"metric\"][\"timestamp\"])\n\n separated_metrics = sorted(separated_metrics.values(), key=len)\n separated_metrics.reverse()\n\n # Compute the new values\n new_values = []\n all_timestamps = map(\n lambda l: map(\n lambda x: x[\"metric\"][\"timestamp\"], l),\n separated_metrics)\n metric_count = len(separated_metrics)\n for index in range(0, len(separated_metrics[0])):\n new_value = reducer[0](\n separated_metrics[0][index][\"metric\"][\"value\"],\n metric_count)\n new_timestamp = separated_metrics[0][index][\"metric\"][\"timestamp\"]\n for metric_index in range(1, metric_count):\n new_value = reducer[1](new_value, helpers.interpolate(\n new_timestamp,\n separated_metrics[metric_index],\n all_timestamps[metric_index]\n ), metric_count)\n new_values.append((new_timestamp, new_value))\n\n # Aggregate the other details:\n metric_name = separated_metrics[0][0][\"metric\"][\"name\"] + suffix\n meta = separated_metrics[0][0][\"meta\"]\n new_metrics = [\n helpers.create_agg_metric(\n metric_name,\n meta,\n dims,\n val[0],\n val[1]\n ) for val in new_values\n ]\n return new_metrics", "def lambda_metric(metric_name, value, timestamp=None, tags=None):\n tags = _tag_dd_lambda_layer(tags)\n if os.environ.get(\"DD_FLUSH_TO_LOG\", \"\").lower() == \"true\":\n logger.debug(\"Sending metric %s to Datadog via log forwarder\", metric_name)\n print(\n json.dumps(\n {\n \"m\": metric_name,\n \"v\": value,\n \"e\": timestamp or int(time.time()),\n \"t\": tags,\n }\n )\n )\n else:\n logger.debug(\"Sending metric %s to Datadog via lambda layer\", metric_name)\n lambda_stats.distribution(metric_name, value, timestamp=timestamp, tags=tags)", "def log_results(self, filename=None):\n\n self.ad_log['train_auc'] = self.diag['train']['auc'][-1]\n self.ad_log['train_accuracy'] = self.diag['train']['acc'][-1]\n self.ad_log['train_time'] = self.train_time\n\n self.ad_log['test_auc'] = self.diag['test']['auc'][-1]\n self.ad_log['test_accuracy'] = self.diag['test']['acc'][-1]\n self.ad_log['test_time'] = self.test_time\n\n self.ad_log.save_to_file(filename=filename)", "def set_metrics(self):", "def add_measure(self, field, aggregations = None):\r\n self.aggregations[field] = aggregations\r\n self.measures.append(field)", "def submit_metric():\n\n gson = json.loads(request.get_json())\n\n new_point = DataPoint(\n computer_name=gson[\"computer_name\"],\n cpu_percentage=gson[\"cpu_percentage\"],\n memory_percentage=gson[\"memory_percentage\"],\n timestamp=gson[\"timestamp\"]\n )\n\n with lock:\n if not instances.get(new_point.computer_name):\n instances[new_point.computer_name] = Timeline(\n maxsize=int(os.environ.get(\"COLLECTOR_BUFFER_SIZE\"))\n )\n instances[new_point.computer_name].append(new_point)\n\n return Response(status=200)", "def add_metrics(_dict):\n for key, itr in _dict.items():\n if key not in self.metric_cols:\n self.metric_cols.append(key)", "def _add_to_queue(key, value, step, time, run_id):\n met = Metric(key=key, value=value, timestamp=time, step=step)\n _metric_queue.append((run_id, met))\n if len(_metric_queue) > _MAX_METRIC_QUEUE_SIZE:\n _thread_pool.submit(_flush_queue)", "def gauge(self, gauge, value):\n if self.ignore_metrics:\n return\n\n with self._gauge_rlock:\n self._gauge_metrics[gauge] = value\n self._gauge_call_count += 1\n\n old_call_time = self._gauge_last_call_time\n self._gauge_last_call_time = arrow.utcnow().timestamp\n if (self._gauge_call_count == self._max_call_count > 0) or \\\n self._gauge_last_call_time - old_call_time > self._max_time_between_calls > 0:\n self._gauge_call_count = 0\n self.update_gauge()", "def write_metrics(metrics, db_path):\n conn = sqlite3.connect(db_path)\n c = conn.cursor()\n c.execute('DELETE FROM metrics')\n for metric in metrics:\n c.execute(\n 'INSERT INTO metrics '\n '(timestamp, callerid, uniqueid, channel, channel_extension, name) '\n 'VALUES (datetime(?),?,?,?,?,?)',\n (metric['timestamp'],\n metric['callerid'],\n metric['uniqueid'],\n metric['channel'],\n metric['channel_extension'],\n metric['name']))\n conn.commit()\n conn.close()", "def _record_storage_event(metric, value=0):\n command_name = properties.VALUES.metrics.command_name.Get()\n metrics.CustomKeyValue(command_name, 'Storage-' + metric, value)", "def _SnapMetrics(deadline):\n next_deadline = deadline + frequency_seconds\n callback = partial(_SnapMetrics, next_deadline)\n cls._timeouts[group_key] = IOLoop.current().add_timeout(next_deadline, callback)\n\n sample = meter.sample()\n sample_json = json.dumps(sample)\n new_metric = Metric.Create(group_key, machine_id, deadline, sample_json)\n with util.Barrier(_UploadSuccess, _UploadError) as b:\n retry.CallWithRetryAsync(retry_policy, new_metric.Update, client=client, callback=b.Callback())", "def add_stats(self):\n units = self.get_unit_map()\n for metric in self.raw_metrics:\n unit, metric_type = units.get(metric, (DEFAULT_UNIT, DEFAULT_TYPE))\n if metric_type == \"counter\":\n # Unit/Second\n unit = \"/\".join((unit, \"Second\"))\n self.add_derive_value(metric, unit, self.raw_metrics[metric], rate=True)\n else:\n self.add_gauge_value(metric, unit, self.raw_metrics[metric])", "def mymetrics(): \n _update_metric_counters()\n logging.debug(prom_objects_seen.collect())\n return flask.Response(generate_latest(), mimetype='text/plain')", "def log_metric(key, value, step=None):\n mlflow.log_metric(key, value, step=step)", "def add_data(output, name, hits=True, misses=True, ltrim=0, rtrim=0):\n global CUR_MARKER\n\n h, m = _read_out(output)\n if ltrim:\n h, m = h[ltrim:], m[ltrim:]\n if rtrim:\n h, m = h[:-rtrim], m[:-rtrim]\n if hits:\n _add_series(h, '{}_hits'.format(name),\n MARKERS[CUR_MARKER % len(MARKERS)])\n if misses:\n _add_series(m, '{}_misses'.format(name),\n MARKERS[CUR_MARKER % len(MARKERS)])\n\n # use different marker for each plotted benchmark data\n CUR_MARKER += 1", "def increment_metric_counter(metric_name, redis_db):\n if TEST_MODE:\n print 'Simulate redis incremet, key is %s' % metric_name\n return\n if redis_db:\n try:\n redis_db.incr(metric_name)\n except Exception as e:\n logger.warning(\"Failed to increment redis metric '%s' \"\n \"with exception '%s'\", metric_name, e)", "def make_metric(name):\n return {\n \"type\": \"Metric\",\n \"name\": name,\n \"value\": \"\",\n \"units\": \"\",\n \"rating\": \"\",\n \"notes\": \"\",\n \"comment\": \"\",\n }", "def metrics(self):\n raise NotImplementedError(\"metrics\")", "def _record_result(self, action, data, tags=None):\r\n if tags is None:\r\n tags = []\r\n\r\n tags.append(u'result:{}'.format(data.get('success', False)))\r\n tags.append(u'action:{}'.format(action))\r\n dog_stats_api.increment(self._metric_name('request.count'), tags=tags)", "def get_metrics(self, metric_name: str):\n if metric_name == \"rmse\":\n return self._rmse\n elif metric_name == \"mase\":\n return self._mase\n elif metric_name == \"mae\":\n return self._mae\n elif metric_name == \"mape\":\n return self._mape\n elif metric_name == \"f1\":\n return self._f1\n elif metric_name == \"accuracy\":\n return self._accuracy", "def add_metrics_to_db(self) -> None:\n\n model = {\n 'id': 'model1',\n 'name': 'Housing Price Prediction',\n 'metrics': {\n 'mean_squared_error': mean_squared_error(self._y_test, self._predictions),\n 'mean_absolute_error': mean_absolute_error(self._y_test, self._predictions),\n 'r2_score': r2_score(self._y_test, self._predictions)\n }\n }\n\n self._db.add_model(model)", "def add_apple_data_to_activities(self):\n\n try:\n # apple data is loaded from csv rather than from json\n apple_data = self.load_apple_workouts()\n\n # filter out nike and strava data that has synced to apple, we are getting that from json source\n apple_data = apple_data[(apple_data.sourceName != \"Nike Run Club\") & (apple_data.sourceName != \"Strava\")]\n\n # set up 5 key metrics\n # note we're using enum values\n apple_data['source'] = ActivitySource.APPLE.value\n apple_data['activity_type'] = apple_data['workoutActivityType'].apply(lambda x: self.convert_apple_activity_type(x).value)\n apple_data['distance_in_km'] = apple_data['totalDistance']\n apple_data['duration_in_min'] = apple_data['duration']\n apple_data['start_timestamp'] = apple_data['startDate'].apply(lambda x: parse(x, tzinfos={\"America/Vancouver\"}))\n\n # filter out extraneous columns\n apple_data = apple_data.filter(self.data_frame_columns)\n self.all_activities = self.all_activities.append(apple_data, sort=True, ignore_index=True)\n\n logging.info(\"Done parsing Apple data.\")\n except Exception:\n logging.exception(\"Could not parse Apple data\")", "def addDEX(self, filename, data, dx=None):\n digest = hashlib.sha256(data).hexdigest()\n log.debug(\"add DEX:%s\" % digest)\n\n log.debug(\"Parsing format ...\")\n d = DalvikVMFormat(data)\n log.debug(\"added DEX:%s\" % digest)\n\n if filename not in self.analyzed_files:\n self.analyzed_files[filename] = []\n\n self.analyzed_files[filename].append(digest)\n self.analyzed_digest[digest] = filename\n\n if dx is None:\n dx = Analysis()\n\n dx.add(d)\n dx.create_xref()\n\n for d in dx.vms:\n d.set_decompiler(DecompilerDAD(d, dx))\n d.set_vmanalysis(dx)\n self.analyzed_dex[digest] = dx\n\n if self.export_ipython:\n log.debug(\"Exporting in ipython\")\n d.create_python_export()\n\n return digest, d, dx", "def observe_first(self, env: dm_env.Environment, timestep: dm_env.TimeStep\n ) -> None:\n self._metrics = {}\n self._accumulate_metrics(env)", "def update_metrics(self, metrics, predictions, labels):\n return", "def log(self, metric, value, source, timestamp=None):\n if timestamp is None:\n timestamp = datetime.now()\n\n sql = \"insert into measurement(metric, value, source, timestamp) values('{0}', {1}, '{2}', '{3}');\".format(\n metric, value, source, timestamp)\n\n self._execute_sql(sql)", "def update(self, current_iter, *metrics, **named_metrics):\n\n # Same order as __init__() in python>=3.6\n if len(metrics) > 0:\n for key, metric in zip(self.metrics.keys(), metrics):\n self.metrics[key].append((current_iter, metric))\n \n # Random order with names\n elif len(named_metrics) > 0:\n for name, metric in named_metrics.item():\n self.metrics[name].append((metric))\n\n else:\n raise ValueError(\"No valid value to update losses\")", "def write_metrics(output_dir, metrics, config, ancestors):\n os.makedirs(output_dir, exist_ok=True)\n\n file_name = \"metrics.csv\"\n file_path = os.path.join(output_dir, file_name)\n\n with open(file_path, \"w\", newline=\"\", encoding=\"utf-8\") as csvfile:\n csv_writer = csv.writer(csvfile)\n for line in metrics.items():\n csv_writer.writerow(line)\n\n record_provenance(file_path, config, ancestors)", "def export(\n self, metric_tuples: Sequence[Tuple[Metric, Sequence[str]]]\n ) -> \"MetricsExportResult\":", "def get_metric(self, metric, existing_dict=None):\n metric_key, metric_type, metric_name, metric_help = metric\n metric_dict = {\n 'name': metric_name,\n 'type': metric_type,\n 'help': metric_help,\n 'values': OrderedDict()\n }\n values = self.r.hgetall(metric_key) # new values\n # print \"values: %r\" % values\n metric_dict['values'] = values\n\n if existing_dict:\n # we're updating a metric we've already seen\n # print \"existing dict: %r\" % existing_dict\n for value in values:\n # print \"checking value: %r\" % value\n # value = json.loads(value)\n if value in existing_dict['values']:\n if metric_type == 'counter' or metric_type == 'histogram':\n # Combine the values if it's a counter or histogram\n # TODO: sort histogram buckets\n # TODO: append _bucket to histogram bucket names\n existing_dict['values'][value] = float(\n values[value]) + float(\n existing_dict['values'][value])\n elif metric_type == 'gauge':\n # use the last value we see for a gauge - # TODO: is there a better way? we could average it\n existing_dict['values'][value] = float(values[value])\n else:\n existing_dict['values'][value] = float(values[value])\n metric_dict['values'] = existing_dict['values']\n\n if metric_type == 'histogram':\n # json decode all of the labels\n samples = [json.loads(x, object_pairs_hook=OrderedDict) for x in metric_dict['values']]\n # we need to sort the values by the bucket labeled \"le\"\n sorted_keys = sorted(samples, key=lambda b: b['le'])\n # and then we need to store the values again json encoded\n vals = metric_dict['values']\n metric_dict['values'] = OrderedDict()\n for k in sorted_keys:\n kn = json.dumps(k, sort_keys=True)\n metric_dict['values'][kn] = vals[kn]\n\n return metric_dict", "def AddAncillaryData(self, ds):\n self.IsAncillaryData = True\n self.AncillaryData = ds", "def _dispatch_metrics(self, payload):\n for item in payload:\n try:\n self._ingest.send(gauges=item['gauges'], counters=item['counters'])\n except Exception as e:\n self._logger.error(\"Exception while sending payload to ingest : {0}\".format(e))", "def merge_time_metric(self, metric):\n\n self.merge_raw_time_metric(metric.duration, metric.exclusive)", "def update(self, data: Mapping[str, np.ndarray]) -> Self:\n\n for metric in self.metrics:\n metric.update(data)\n\n return self", "def record(method, arguments, result):\n recorder[call_to_key(method, arguments)] = result", "def batch_gauge(self, metric_dict, prefix='stalker.'):\n if not self.enabled:\n return\n payload = []\n for k in metric_dict:\n payload.append('%s%s:%d|g' % (prefix, k, metric_dict[k]))\n self._send_events(payload)", "def publisher_snap_metrics(snap_name):\n try:\n details = api.get_snap_info(snap_name, flask.session)\n except ApiResponseErrorList as api_response_error_list:\n if api_response_error_list.status_code == 404:\n return flask.abort(404, 'No snap named {}'.format(snap_name))\n else:\n return _handle_error_list(api_response_error_list.errors)\n except ApiError as api_error:\n return _handle_errors(api_error)\n\n metric_requested = logic.extract_metrics_period(\n flask.request.args.get('period', default='30d', type=str))\n\n installed_base_metric = logic.verify_base_metrics(\n flask.request.args.get(\n 'active-devices',\n default='version',\n type=str))\n\n installed_base = logic.get_installed_based_metric(installed_base_metric)\n metrics_query_json = metrics_helper.build_metrics_json(\n snap_id=details['snap_id'],\n installed_base=installed_base,\n metric_period=metric_requested['int'],\n metric_bucket=metric_requested['bucket'])\n\n try:\n metrics_response = api.get_publisher_metrics(\n flask.session,\n json=metrics_query_json)\n except ApiResponseErrorList as api_response_error_list:\n if api_response_error_list.status_code == 404:\n return flask.abort(404, 'No snap named {}'.format(snap_name))\n else:\n return _handle_error_list(api_response_error_list.errors)\n except ApiError as api_error:\n return _handle_errors(api_error)\n\n active_metrics = metrics_helper.find_metric(\n metrics_response['metrics'], installed_base)\n active_devices = metrics.ActiveDevices(\n name=active_metrics['metric_name'],\n series=active_metrics['series'],\n buckets=active_metrics['buckets'],\n status=active_metrics['status'])\n\n latest_active = 0\n if active_devices:\n latest_active = active_devices.get_number_latest_active_devices()\n\n country_metric = metrics_helper.find_metric(\n metrics_response['metrics'], \"weekly_installed_base_by_country\")\n country_devices = metrics.CountryDevices(\n name=country_metric['metric_name'],\n series=country_metric['series'],\n buckets=country_metric['buckets'],\n status=country_metric['status'],\n private=True)\n\n territories_total = 0\n if country_devices:\n territories_total = country_devices.get_number_territories()\n\n nodata = not any([country_devices, active_devices])\n\n context = {\n 'page_slug': 'my-snaps',\n # Data direct from details API\n 'snap_name': snap_name,\n 'snap_title': details['title'],\n 'metric_period': metric_requested['period'],\n 'active_device_metric': installed_base_metric,\n\n # Metrics data\n 'nodata': nodata,\n 'latest_active_devices': latest_active,\n 'active_devices': dict(active_devices),\n 'territories_total': territories_total,\n 'territories': country_devices.country_data,\n\n # Context info\n 'is_linux': 'Linux' in flask.request.headers['User-Agent']\n }\n\n return flask.render_template(\n 'publisher/metrics.html',\n **context)", "def add_metrics(self, metrics):\n for i, metric in enumerate(self.config.metrics):\n tf.summary.scalar(metric, metrics[i])", "def record_metrics_header(metric_list, output_file_name):\n with open(output_file_name, 'w') as file:\n # writting each metric on the header\n file.write(\",\".join(metric_list)+\"\\n\")" ]
[ "0.7010535", "0.6634994", "0.5894368", "0.579922", "0.57742566", "0.5767345", "0.5742764", "0.5742764", "0.56948036", "0.5694517", "0.56521934", "0.564671", "0.5620353", "0.56121796", "0.552094", "0.53992915", "0.5386446", "0.52903515", "0.5282726", "0.52693814", "0.5267927", "0.52584195", "0.5209942", "0.5201727", "0.51946783", "0.51549935", "0.5154828", "0.5152201", "0.5135652", "0.5111745", "0.5066726", "0.5066428", "0.5030387", "0.50243247", "0.50108314", "0.5010248", "0.50053656", "0.49680936", "0.49620962", "0.49533957", "0.49527538", "0.4952753", "0.49334887", "0.48896444", "0.4864278", "0.48421243", "0.48315993", "0.48226178", "0.4819026", "0.48162884", "0.47710025", "0.47606036", "0.47441918", "0.47441584", "0.4736944", "0.47174513", "0.47159958", "0.47116393", "0.470249", "0.46901873", "0.46852252", "0.46838903", "0.46706945", "0.46683267", "0.4640345", "0.4639422", "0.46387354", "0.46381575", "0.4637767", "0.463637", "0.4635044", "0.46336025", "0.46308553", "0.46259928", "0.46230653", "0.46175838", "0.4612211", "0.46115917", "0.46083197", "0.46018103", "0.458679", "0.45846784", "0.45839658", "0.45810753", "0.45786613", "0.4561651", "0.4556475", "0.45531547", "0.4550534", "0.45485884", "0.4547037", "0.453808", "0.45338842", "0.45255786", "0.45231473", "0.45212597", "0.45165637", "0.45126295", "0.45120025", "0.45106798" ]
0.74796903
0
Record the apdex metrics supplied by the iterable for a single transaction, merging the data with any data from prior apdex metrics with the same name.
def record_apdex_metrics(self, metrics): if not self.__settings: return for metric in metrics: self.record_apdex_metric(metric)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def record_transaction(self, transaction):\n\n if not self.__settings:\n return\n\n settings = self.__settings\n\n # Record the apdex, value and time metrics generated from the\n # transaction. Whether time metrics are reported as distinct\n # metrics or into a rollup is in part controlled via settings\n # for minimum number of unique metrics to be reported and thence\n # whether over a time threshold calculated as percentage of\n # overall request time, up to a maximum number of unique\n # metrics. This is intended to limit how many metrics are\n # reported for each transaction and try and cut down on an\n # explosion of unique metric names. The limits and thresholds\n # are applied after the metrics are reverse sorted based on\n # exclusive times for each metric. This ensures that the metrics\n # with greatest exclusive time are retained over those with\n # lesser time. Such metrics get reported into the performance\n # breakdown tab for specific web transactions.\n\n self.record_apdex_metrics(transaction.apdex_metrics(self))\n\n self.merge_custom_metrics(transaction.custom_metrics.metrics())\n\n self.record_time_metrics(transaction.time_metrics(self))\n\n # Capture any errors if error collection is enabled.\n # Only retain maximum number allowed per harvest.\n\n error_collector = settings.error_collector\n\n if (error_collector.enabled and settings.collect_errors and\n len(self.__transaction_errors) <\n settings.agent_limits.errors_per_harvest):\n self.__transaction_errors.extend(transaction.error_details())\n\n self.__transaction_errors = self.__transaction_errors[:\n settings.agent_limits.errors_per_harvest]\n\n if (error_collector.capture_events and\n error_collector.enabled and\n settings.collect_error_events):\n events = transaction.error_events(self.__stats_table)\n for event in events:\n self._error_events.add(event, priority=transaction.priority)\n\n # Capture any sql traces if transaction tracer enabled.\n\n if settings.slow_sql.enabled and settings.collect_traces:\n for node in transaction.slow_sql_nodes(self):\n self.record_slow_sql_node(node)\n\n # Remember as slowest transaction if transaction tracer\n # is enabled, it is over the threshold and slower than\n # any existing transaction seen for this period and in\n # the historical snapshot of slow transactions, plus\n # recording of transaction trace for this transaction\n # has not been suppressed.\n\n transaction_tracer = settings.transaction_tracer\n\n if (not transaction.suppress_transaction_trace and\n transaction_tracer.enabled and settings.collect_traces):\n\n # Transactions saved for Synthetics transactions\n # do not depend on the transaction threshold.\n\n self._update_synthetics_transaction(transaction)\n\n threshold = transaction_tracer.transaction_threshold\n\n if threshold is None:\n threshold = transaction.apdex_t * 4\n\n if transaction.duration >= threshold:\n self._update_slow_transaction(transaction)\n\n # Create the transaction event and add it to the\n # appropriate \"bucket.\" Synthetic requests are saved in one,\n # while transactions from regular requests are saved in another.\n\n if transaction.synthetics_resource_id:\n event = transaction.transaction_event(self.__stats_table)\n self._synthetics_events.add(event)\n\n elif (settings.collect_analytics_events and\n settings.transaction_events.enabled):\n\n event = transaction.transaction_event(self.__stats_table)\n self._transaction_events.add(event, priority=transaction.priority)\n\n # Merge in custom events\n\n if (settings.collect_custom_events and\n settings.custom_insights_events.enabled):\n self.custom_events.merge(transaction.custom_events)\n\n # Merge in span events\n\n if (settings.distributed_tracing.enabled and\n settings.span_events.enabled and settings.collect_span_events):\n if settings.infinite_tracing.enabled:\n for event in transaction.span_protos(settings):\n self._span_stream.put(event)\n elif transaction.sampled:\n for event in transaction.span_events(self.__settings):\n self._span_events.add(event, priority=transaction.priority)", "def flush(self):\n with self._lock:\n batch = self._batch\n timestamps = self._timestamps\n\n items = []\n for identity, value in batch.items():\n metric = {}\n typ, name, tags = identity\n metric[\"name\"] = name\n if typ:\n metric[\"type\"] = typ\n else:\n metric[\"timestamp\"] = timestamps[identity]\n\n if tags:\n metric[\"attributes\"] = dict(tags)\n\n metric[\"value\"] = value\n items.append(metric)\n\n items = tuple(items)\n\n batch.clear()\n timestamps.clear()\n\n common = self._common.copy()\n common[\"timestamp\"] = self._interval_start\n now = int(time.time() * 1000.0)\n interval = now - self._interval_start\n common[\"interval.ms\"] = interval\n\n self._interval_start = now\n\n return items, common", "def record_apdex_metric(self, metric):\n\n if not self.__settings:\n return\n\n # Note that because we are using a scope here of an empty string\n # we can potentially clash with an unscoped metric. Using None,\n # although it may help to keep them separate in the agent will\n # not make a difference to the data collector which treats None\n # as an empty string anyway.\n\n key = (metric.name, '')\n stats = self.__stats_table.get(key)\n if stats is None:\n stats = ApdexStats(apdex_t=metric.apdex_t)\n self.__stats_table[key] = stats\n stats.merge_apdex_metric(metric)\n\n return key", "def log_batch(self, measurements):\n for m in measurements:\n logger.info(m)\n self.log(metric=m.metric, value=m.value, source=m.source, timestamp=m.timestamp)", "def merge_apdex_metric(self, metric):\n\n self[0] += metric.satisfying\n self[1] += metric.tolerating\n self[2] += metric.frustrating\n\n self[3] = ((self[0] or self[1] or self[2]) and\n min(self[3], metric.apdex_t) or metric.apdex_t)\n self[4] = max(self[4], metric.apdex_t)", "def _dispatch_metrics(self, payload):\n for item in payload:\n try:\n self._ingest.send(gauges=item['gauges'], counters=item['counters'])\n except Exception as e:\n self._logger.error(\"Exception while sending payload to ingest : {0}\".format(e))", "def increment(self) -> None:\n self._increment_called = True\n self.append(deepcopy(self._base_metric))", "def calculate_batch_metrics(self):\n pass", "def apply_all_accumulators(self):\n self._require_state(\"APPLYING\")\n for mi in self._accums.keys():\n self._apply_one_accum_set(mi)", "def accumulate(self, batch_pred_map_cls, batch_gt_map_cls):\n bsize = len(batch_pred_map_cls)\n assert bsize == len(batch_gt_map_cls)\n for i in range(bsize):\n self.gt_map_cls[self.scan_cnt] = batch_gt_map_cls[i]\n self.pred_map_cls[self.scan_cnt] = batch_pred_map_cls[i]\n self.scan_cnt += 1", "def add_stats(self):\n units = self.get_unit_map()\n for metric in self.raw_metrics:\n unit, metric_type = units.get(metric, (DEFAULT_UNIT, DEFAULT_TYPE))\n if metric_type == \"counter\":\n # Unit/Second\n unit = \"/\".join((unit, \"Second\"))\n self.add_derive_value(metric, unit, self.raw_metrics[metric], rate=True)\n else:\n self.add_gauge_value(metric, unit, self.raw_metrics[metric])", "def send_metrics(self):\n metrics = self.get_metrics()\n if not metrics:\n return\n\n for mkey, metric in metrics.items():\n for mname, mval in metric.items():\n try:\n self.agent.record_custom_metric(self.convert_metric_name(mkey, mname), mval, None)\n except Exception as e:\n print_(e)", "def calculate_agrigate(self):\n self.total = 0.0\n for rec in self.data:\n self.total = self.total + rec[\"value\"]\n\n self.agrigate_data = {\n \"site\": self.site,\n \"utc\": self.timestamp_utc,\n \"local\": self.timestamp_local,\n \"tag\": \"TOTAL\",\n \"value\": round(self.total, 3)}\n self.data.append(self.agrigate_data)", "def __apply_accumulators():\n self.__xdata = np.array([])\n self.__ydata = np.array([])\n for acc in self.signal_accumulators:\n self.__xdata = __array_append(self.__xdata,acc.attempt)\n self.__ydata = __array_append(self.__ydata,acc.count)\n self.__applied = True", "def merge_custom_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for name, other in metrics:\n key = (name, '')\n stats = self.__stats_table.get(key)\n if not stats:\n self.__stats_table[key] = other\n else:\n stats.merge_stats(other)", "def report_metrics(prefix, metrics):\n series = []\n\n now = time.time()\n for key, value in metrics.iteritems():\n metric = '{prefix}.{key}'.format(prefix=prefix, key=key)\n point = [(now, value)]\n series.append({'metric':metric, 'points':point})\n\n if len(series) > 0:\n print u\"Sending {}\".format(series)\n dog_http_api.metrics(series)", "def __init__(self, metrics_to_record):\n self.tape = {}\n\n for metric_name in metrics_to_record:\n self.tape[metric_name] = []", "def collect(self): # pylint: disable=no-self-use\n start = time.time()\n for metric in metric_rq():\n yield metric\n\n gauge = GaugeMetricFamily(\n \"nautobot_rq_metrics_processing_ms\", \"Time in ms to generate the app metrics endpoint\"\n )\n duration = time.time() - start\n gauge.add_metric([], format(duration * 1000, \".5f\"))\n yield gauge", "def __init_metrics(self):\n\n batch = {}\n # split data into batches of size batch_size or less\n for metric_name, metric_pattern in self.metrics.items():\n # get the batch list for that metric\n batch_list = []\n for s in range(1, self.schema + 1):\n for t in range(1, self.table + 1):\n k = '/metrics/type=IndexTable/keyspace={}/scope={}/name={}/mean'.format(s, t, metric_name)\n # from Python 3.6 onwards, the standard dict type maintains insertion order by default\n batch[k] = 0\n # if the batch has batch_size items or at the end of iteration,\n # append the batch to list of that metric and create a new empty batch\n if len(batch) == self.batch_size or (s == self.schema and t == self.table):\n batch_list.append(batch)\n batch = {}\n\n # parse metric patterns\n l = metric_pattern.split()\n if l[0] == '(>':\n self.metrics[metric_name] = IncMetricStruct(float(int(l[1])), float(l[2][1:]), float(l[4][:-2]),\n batch_list)\n else:\n self.metrics[metric_name] = RandMetricStruct(float(l[0][1:]), float(l[-1][:-1]), batch_list)", "def record_custom_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for name, value in metrics:\n self.record_custom_metric(name, value)", "def get_next_batch(self):\n\n metrics = {}\n for struct in self.metrics.values():\n metrics = {**metrics, **struct.get_next_batch()}\n\n return metrics", "def _average_training_metrics(\n self, per_batch_metrics: List[Dict[str, Any]]\n ) -> List[Dict[str, Any]]:\n check.true(self.hvd_config.use, \"Can only average training metrics in multi-GPU training.\")\n metrics_timeseries = util._list_to_dict(per_batch_metrics)\n\n # combined_timeseries is: dict[metric_name] -> 2d-array.\n # A measurement is accessed via combined_timeseries[metric_name][process_idx][batch_idx].\n combined_timeseries, _ = self._combine_metrics_across_processes(\n metrics_timeseries, num_batches=len(per_batch_metrics)\n )\n\n # If the value for a metric is a single-element array, the averaging process will\n # change that into just the element. We record what metrics are single-element arrays\n # so we can wrap them in an array later (for perfect compatibility with non-averaging\n # codepath).\n array_metrics = []\n for metric_name in per_batch_metrics[0].keys():\n if isinstance(per_batch_metrics[0][metric_name], np.ndarray):\n array_metrics.append(metric_name)\n\n if self.is_chief:\n combined_timeseries_type = Dict[str, List[List[Any]]]\n combined_timeseries = cast(combined_timeseries_type, combined_timeseries)\n num_batches = len(per_batch_metrics)\n num_processes = hvd.size()\n averaged_metrics_timeseries = {} # type: Dict[str, List]\n\n for metric_name in combined_timeseries.keys():\n averaged_metrics_timeseries[metric_name] = []\n for batch_idx in range(num_batches):\n batch = [\n combined_timeseries[metric_name][process_idx][batch_idx]\n for process_idx in range(num_processes)\n ]\n\n np_batch = np.array(batch)\n batch_avg = np.mean(np_batch[np_batch != None]) # noqa: E711\n if metric_name in array_metrics:\n batch_avg = np.array(batch_avg)\n averaged_metrics_timeseries[metric_name].append(batch_avg)\n per_batch_metrics = util._dict_to_list(averaged_metrics_timeseries)\n return per_batch_metrics", "def update(self, current_iter, *metrics, **named_metrics):\n\n # Same order as __init__() in python>=3.6\n if len(metrics) > 0:\n for key, metric in zip(self.metrics.keys(), metrics):\n self.metrics[key].append((current_iter, metric))\n \n # Random order with names\n elif len(named_metrics) > 0:\n for name, metric in named_metrics.item():\n self.metrics[name].append((metric))\n\n else:\n raise ValueError(\"No valid value to update losses\")", "def record_time_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for metric in metrics:\n self.record_time_metric(metric)", "def prepareAccumulatedMetrics(self):\n displayDF = analyzeMetricsDF(self.resultList)\n displayDF.to_csv(\"data/results.csv\")", "def log_metrics(self, metrics: dict):\n self.metrics.update(metrics)\n\n self._sync_log_event()", "def _aggregate_perf_data(perf_all_ordinals: List[str]):\n aggregate = {}\n\n pd = PerfData()\n for data in perf_all_ordinals:\n worker_pd = PerfData(**json.loads(data))\n if len(perf_all_ordinals) > 1:\n aggregate.setdefault(\"ordinals\", [])\n aggregate[\"ordinals\"].append(worker_pd.throughput_dict())\n\n pd.merge(worker_pd)\n\n aggregate.update(dataclasses.asdict(pd))\n return aggregate", "def record_custom_metric(self, name, value):\n if isinstance(value, dict):\n if len(value) == 1 and 'count' in value:\n new_stats = CountStats(call_count=value['count'])\n else:\n new_stats = TimeStats(*c2t(**value))\n else:\n new_stats = TimeStats(1, value, value, value, value, value**2)\n\n stats = self.__stats_table.get(name)\n if stats is None:\n self.__stats_table[name] = new_stats\n else:\n stats.merge_stats(new_stats)", "async def update_trade_stats(self):\n\n summary_keys = [base for base in config['min_base_volumes']] + ['global']\n summaries = {\n key: {\n 'open_count': 0,\n 'buys': 0,\n 'rebuys': 0,\n 'sells': 0,\n 'collect_sells': 0,\n 'soft_stop_sells': 0,\n 'total_profit': 0.0,\n 'total_loss': 0.0,\n 'total_fees': 0.0,\n 'balancer_refills': 0,\n 'balancer_remits': 0,\n 'balancer_stop_losses': 0,\n 'balancer_profit': 0.0,\n 'balancer_loss': 0.0,\n 'balancer_fees': 0.0,\n } for key in summary_keys\n }\n\n for pair in self.trades:\n if pair not in self.trade_stats[self.time_prefix]:\n continue\n\n base = pair.split('-', 1)[0]\n open_count = len(self.trades[pair]['open'])\n\n summaries[base]['open_count'] += open_count\n summaries[base]['buys'] += self.trade_stats[self.time_prefix][pair]['buys']\n summaries[base]['rebuys'] += self.trade_stats[self.time_prefix][pair]['rebuys']\n summaries[base]['sells'] += self.trade_stats[self.time_prefix][pair]['sells']\n summaries[base]['collect_sells'] += self.trade_stats[self.time_prefix][pair]['collect_sells']\n summaries[base]['soft_stop_sells'] += self.trade_stats[self.time_prefix][pair]['soft_stop_sells']\n summaries[base]['total_profit'] += self.trade_stats[self.time_prefix][pair]['total_profit']\n summaries[base]['total_loss'] += self.trade_stats[self.time_prefix][pair]['total_loss']\n summaries[base]['total_fees'] += self.trade_stats[self.time_prefix][pair]['total_fees']\n summaries[base]['balancer_refills'] += self.trade_stats[self.time_prefix][pair]['balancer_refills']\n summaries[base]['balancer_remits'] += self.trade_stats[self.time_prefix][pair]['balancer_remits']\n summaries[base]['balancer_profit'] += self.trade_stats[self.time_prefix][pair]['balancer_profit']\n summaries[base]['balancer_loss'] += self.trade_stats[self.time_prefix][pair]['balancer_loss']\n summaries[base]['balancer_fees'] += self.trade_stats[self.time_prefix][pair]['balancer_fees']\n\n summaries['global']['open_count'] += open_count\n summaries['global']['buys'] += self.trade_stats[self.time_prefix][pair]['buys']\n summaries['global']['rebuys'] += self.trade_stats[self.time_prefix][pair]['rebuys']\n summaries['global']['sells'] += self.trade_stats[self.time_prefix][pair]['sells']\n summaries['global']['collect_sells'] += self.trade_stats[self.time_prefix][pair]['collect_sells']\n summaries['global']['soft_stop_sells'] += self.trade_stats[self.time_prefix][pair]['soft_stop_sells']\n summaries['global']['total_profit'] += self.trade_stats[self.time_prefix][pair]['total_profit']\n summaries['global']['total_loss'] += self.trade_stats[self.time_prefix][pair]['total_loss']\n summaries['global']['total_fees'] += self.trade_stats[self.time_prefix][pair]['total_fees']\n summaries['global']['balancer_refills'] += self.trade_stats[self.time_prefix][pair]['balancer_refills']\n summaries['global']['balancer_remits'] += self.trade_stats[self.time_prefix][pair]['balancer_remits']\n summaries['global']['balancer_profit'] += self.trade_stats[self.time_prefix][pair]['balancer_profit']\n summaries['global']['balancer_loss'] += self.trade_stats[self.time_prefix][pair]['balancer_loss']\n summaries['global']['balancer_fees'] += self.trade_stats[self.time_prefix][pair]['balancer_fees']\n\n for key in summaries:\n self.trade_stats[self.time_prefix][key]['buys'] = summaries[key]['buys']\n self.trade_stats[self.time_prefix][key]['rebuys'] = summaries[key]['rebuys']\n self.trade_stats[self.time_prefix][key]['sells'] = summaries[key]['sells']\n self.trade_stats[self.time_prefix][key]['collect_sells'] = summaries[key]['collect_sells']\n self.trade_stats[self.time_prefix][key]['soft_stop_sells'] = summaries[key]['soft_stop_sells']\n self.trade_stats[self.time_prefix][key]['total_profit'] = summaries[key]['total_profit']\n self.trade_stats[self.time_prefix][key]['total_loss'] = summaries[key]['total_loss']\n self.trade_stats[self.time_prefix][key]['total_fees'] = summaries[key]['total_fees']\n self.trade_stats[self.time_prefix][key]['balancer_refills'] = summaries[key]['balancer_refills']\n self.trade_stats[self.time_prefix][key]['balancer_remits'] = summaries[key]['balancer_remits']\n self.trade_stats[self.time_prefix][key]['balancer_profit'] = summaries[key]['balancer_profit']\n self.trade_stats[self.time_prefix][key]['balancer_loss'] = summaries[key]['balancer_loss']\n self.trade_stats[self.time_prefix][key]['balancer_fees'] = summaries[key]['balancer_fees']\n\n if summaries[key]['open_count'] > self.trade_stats[self.time_prefix][key]['most_open']:\n self.trade_stats[self.time_prefix][key]['most_open'] = summaries[key]['open_count']\n\n filter_items = [pair for pair in self.trades] + [base for base in config['min_base_volumes']] + ['global']\n self.save_attr('trade_stats', max_depth=2, filter_items=filter_items, filter_keys=[self.time_prefix])", "def aggregate(all_metrics, reducer, suffix):\n # Collect metric separately\n separated_metrics = {} # type: dict[frozenset, list[dict]]\n for el in all_metrics:\n key = frozenset(el[\"metric\"][\"dimensions\"].items())\n if key not in separated_metrics:\n separated_metrics[key] = [el]\n else:\n separated_metrics[key].append(el)\n\n # Collect all dimensions\n dims = {}\n for metric_dims in separated_metrics.keys():\n for prop, val in dict(metric_dims).iteritems():\n if prop in dims:\n dims[prop].add(val)\n else:\n dims[prop] = set(val)\n\n # Sort each metric\n for _, metric in separated_metrics.iteritems():\n metric.sort(key=lambda v: v[\"metric\"][\"timestamp\"])\n\n separated_metrics = sorted(separated_metrics.values(), key=len)\n separated_metrics.reverse()\n\n # Compute the new values\n new_values = []\n all_timestamps = map(\n lambda l: map(\n lambda x: x[\"metric\"][\"timestamp\"], l),\n separated_metrics)\n metric_count = len(separated_metrics)\n for index in range(0, len(separated_metrics[0])):\n new_value = reducer[0](\n separated_metrics[0][index][\"metric\"][\"value\"],\n metric_count)\n new_timestamp = separated_metrics[0][index][\"metric\"][\"timestamp\"]\n for metric_index in range(1, metric_count):\n new_value = reducer[1](new_value, helpers.interpolate(\n new_timestamp,\n separated_metrics[metric_index],\n all_timestamps[metric_index]\n ), metric_count)\n new_values.append((new_timestamp, new_value))\n\n # Aggregate the other details:\n metric_name = separated_metrics[0][0][\"metric\"][\"name\"] + suffix\n meta = separated_metrics[0][0][\"meta\"]\n new_metrics = [\n helpers.create_agg_metric(\n metric_name,\n meta,\n dims,\n val[0],\n val[1]\n ) for val in new_values\n ]\n return new_metrics", "def _store_rows(self):\n\n for value in self.values:\n self.counters.append(value['counter'])\n self.timestamps.append(value['timestamp'])\n self.acceleration.append(value['acceleration'])", "def record_metrics(self, pid, metrics):\n\n for _metric, _metric_value in metrics.items():\n if not self.__metrics_history[pid].get(_metric):\n self.__metrics_history[pid][_metric] = []\n self.__metrics_history[pid][_metric].append(_metric_value)\n # only keep the last 2 running history for any metric\n self.__metrics_history[pid][_metric] = self.__metrics_history[pid][_metric][\n -2:\n ]", "def metrics(self, metrics):\n\n self._metrics = metrics", "def _update_data(self):\n for attribute in [\"flow_rate\"]:\n self._data[attribute] = self._connection.measure", "def _calculate_aggregated_metrics(self):\n\n # using the historical values, calculate the aggregate\n # there are two kinds of metrics:\n # a) cumulative metrics - only the delta of the last 2 recorded values is used (eg cpu cycles)\n # b) absolute metrics - the last absolute value is used\n\n running_pids_set = set(self.__pids)\n\n for pid, process_metrics in self.__metrics_history.items():\n for _metric, _metric_values in process_metrics.items():\n if not self.__aggregated_metrics.get(_metric):\n self.__aggregated_metrics[_metric] = 0\n if _metric.is_cumulative:\n if pid in running_pids_set:\n if len(_metric_values) > 1:\n # only report the cumulative metrics for more than one sample\n self.__aggregated_metrics[_metric] += (\n _metric_values[-1] - _metric_values[-2]\n )\n else:\n if pid in running_pids_set:\n # absolute metric - accumulate the last reported value\n self.__aggregated_metrics[_metric] += _metric_values[-1]", "def record_gauge(self, name, value, tags=None):\n identity = self.create_identity(name, tags)\n with self._lock:\n self._batch[identity] = value\n self._timestamps[identity] = int(time.time() * 1000.0)", "def batch_gauge(self, metric_dict, prefix='stalker.'):\n if not self.enabled:\n return\n payload = []\n for k in metric_dict:\n payload.append('%s%s:%d|g' % (prefix, k, metric_dict[k]))\n self._send_events(payload)", "def metrics_group():", "def merge_accumulators(\n self,\n wrapper_accumulators: Iterable[WrapperAccumulator]) -> WrapperAccumulator:\n result = self.create_accumulator()\n for wrapper_accumulator in wrapper_accumulators:\n for feature_path, accumulator_for_feature in wrapper_accumulator.items():\n wrapped_accumulators = self._get_wrapped_accumulators(\n result, feature_path)\n for index, generator in enumerate(self._feature_stats_generators):\n wrapped_accumulators[index] = generator.merge_accumulators(\n [wrapped_accumulators[index], accumulator_for_feature[index]])\n return result", "def results_aggregator(self, names):\n\t\tfor name in names:\n\t\t\tresult = self.main(name)\n\t\t\tself.results.append(result)\n\t\t\tprint(\"'%s' has been written to the file.\" % result[0])\n\t\t\t\"\"\"result is formatted name, number, rating, review count\"\"\"", "def merge_accumulators(self, accumulators):\n raise NotImplementedError", "def summerize_adapter_metrics(parsed_metrics: Dict[int, dict]) -> Dict[Tuple[str, str], dict]:\n\n summarized_metrics = {}\n for lane in parsed_metrics:\n # Iterate over all samples in lane\n summarized_metrics[lane] = summarized_metrics.get(lane, {})\n for value in parsed_metrics[lane].values():\n sample_id = value.get(\"Sample_ID\")\n summarized_metrics[lane][sample_id] = summarized_metrics[lane].get(sample_id, value)\n summarized_metrics[lane][sample_id][\n \"R\" + value.get(\"ReadNumber\") + \"_SampleBases\"\n ] = value.get(\"SampleBases\")\n\n return summarized_metrics", "def update(self, outputs: torch.Tensor, targets: torch.Tensor) -> Tuple[Any, Any, Any, Any]:\n tn, fp, fn, tp, support = super().update(outputs=outputs, targets=targets)\n per_class, micro, macro, weighted = get_aggregated_metrics(\n tp=tp, fp=fp, fn=fn, support=support, zero_division=self.zero_division,\n )\n return per_class, micro, macro, weighted", "def merge_logs(self):\n ourlog = LogData()\n for l in self.data_set:\n ourlog.entries = ourlog.entries + l.entries\n ourlog.sort_time()\n self.finalized_data = ourlog", "def setPerfMetrics(self, perf_metrics):\n for event in perf_metrics.metric:\n attr_name = '%s_%s_%s' % (frontendConfig.glidein_perfmetric_prefix,\n perf_metrics.name, event)\n self.adParams[attr_name] = perf_metrics.event_lifetime(event)", "def store(self, **stats):\n if self.first_row:\n self.log_headers = list(stats.keys())\n for key in stats:\n assert key in self.log_headers, f\"Can't introduce a new key that you didn't include before: {key}\"\n\n # Write to output file\n if self.first_row:\n self.file_writer.writerow(self.log_headers)\n self.file_writer.writerow(stats.values())\n self.output_file.flush()\n\n # Display in stdout\n if self.log_freq > 0 and self.counter % self.log_freq == 0:\n _print_table(stats)\n\n self.first_row = False\n self.counter += 1", "def flush_analysis_data(self):\n self.writer.write_bulk(zip(self.analyzed_types, self.analyzed))\n self.analyzed_types = []\n self.analyzed = []", "def compute_metrics(self, results: list) -> dict:", "def update_all_fa_tag():\n failed_dict = {}\n mb_remaining = 100\n requests_remaining = 100\n\n fa_table_ids = pybea.get_parameter_values(UserID, 'FixedAssets', ParameterName='TableName', ResultFormat='JSON')\n tablenames = fa_table_ids['TableName'].values\n\n table_name_col = []\n series_code_col = []\n period_col = []\n data_val_col = []\n line_description_col = []\n\n for x in tablenames:\n temp = pybea.get_data(UserID, 'FixedAssets', TableName=x, Year='ALL')\n # Compute how many megabytes each request is\n size = sys.getsizeof(temp) / 1000000\n mb_remaining -= size\n requests_remaining -= 1\n\n table_name = temp['TableName']\n series_code = temp['SeriesCode']\n period = temp['TimePeriod']\n data_val = temp['DataValue']\n line_description = temp['LineDescription']\n\n table_name_col.extend(table_name)\n series_code_col.extend(series_code)\n period_col.extend(period)\n data_val_col.extend(data_val)\n line_description_col.extend(line_description)\n\n time.sleep(1)\n if mb_remaining < 5:\n time.sleep(55)\n mb_remaining = 100\n requests_remaining = 100\n if requests_remaining < 2:\n time.sleep(45)\n mb_remaining = 100\n requests_remaining = 100\n if pybea.JSON_ERROR:\n failed_dict[x] = pybea.JSON_ERROR\n time.sleep(1)\n\n aggregate_fa = pd.DataFrame()\n aggregate_fa['line_number'] = table_name_col\n aggregate_fa['line_name_short'] = line_description_col\n aggregate_fa['series_code'] = series_code_col\n aggregate_fa['year'] = period_col\n aggregate_fa['value'] = data_val_col\n\n aggregate_fa.to_csv('../FA_ALL/aggregate_fa.csv', index=False)\n aggregate_fa.to_csv('aggregate_fa.csv', index=False)\n\n\n return failed_dict", "def aggregate(self):\n data_to_track = {}\n for possession in self.possessions_to_track_aggregate:\n data_to_track[possession] = self._haves[possession]\n\n for variable in self.variables_to_track_aggregate:\n try:\n data_to_track[variable] = self.__dict__[variable]\n except KeyError:\n pass\n self.database_connection.put([\"aggregate\",\n data_to_track,\n self.group,\n self.round])", "def add_results(self, results):\n if self.replication_counter < self.replication_num:\n for metric in self.metrics:\n self.metric_final_results[metric].append(results[metric])\n\n self.replication_counter += 1\n else:\n raise Exception(\"The requested metric collection call of {}/{} exceeds the number of pre-defined replication\".format(self.replication_counter, self.replication_num))", "def aggregate_results(self):\n\n raise NotImplementedError", "def log_results(self, filename=None):\n\n self.ad_log['train_auc'] = self.diag['train']['auc'][-1]\n self.ad_log['train_accuracy'] = self.diag['train']['acc'][-1]\n self.ad_log['train_time'] = self.train_time\n\n self.ad_log['test_auc'] = self.diag['test']['auc'][-1]\n self.ad_log['test_accuracy'] = self.diag['test']['acc'][-1]\n self.ad_log['test_time'] = self.test_time\n\n self.ad_log.save_to_file(filename=filename)", "def compute_metrics(self):\n overall_ret = OrderedDict()\n for ap_iou_thresh in self.ap_iou_thresh:\n ret_dict = OrderedDict()\n rec, prec, ap = eval_det_multiprocessing(self.pred_map_cls, self.gt_map_cls, ovthresh=ap_iou_thresh)\n for key in sorted(ap.keys()):\n clsname = self.class2type_map[key] if self.class2type_map else str(key)\n ret_dict[\"%s Average Precision\" % (clsname)] = ap[key]\n ap_vals = np.array(list(ap.values()), dtype=np.float32)\n ap_vals[np.isnan(ap_vals)] = 0\n ret_dict[\"mAP\"] = ap_vals.mean()\n rec_list = []\n for key in sorted(ap.keys()):\n clsname = self.class2type_map[key] if self.class2type_map else str(key)\n try:\n ret_dict[\"%s Recall\" % (clsname)] = rec[key][-1]\n rec_list.append(rec[key][-1])\n except:\n ret_dict[\"%s Recall\" % (clsname)] = 0\n rec_list.append(0)\n ret_dict[\"AR\"] = np.mean(rec_list)\n overall_ret[ap_iou_thresh] = ret_dict\n return overall_ret", "def aggregate_statistics(self, new_stats):\n \n if isinstance(new_stats,RunStatistics):\n new_stats = [new_stats, ]\n elif isinstance(new_stats,list):\n if any(not isinstance(_,RunStatistics) for _ in new_stats):\n raise MadGraph5Error, \"The 'new_stats' argument of the function \"+\\\n \"'updtate_statistics' must be a (possibly list of) \"+\\\n \"RunStatistics instance.\"\n \n keys = set([])\n for stat in [self,]+new_stats:\n keys |= set(stat.keys())\n\n new_stats = new_stats+[self,]\n for key in keys:\n # Define special rules\n if key=='max_precision':\n # The minimal precision corresponds to the maximal value for PREC\n self[key] = min( _[key] for _ in new_stats if key in _)\n elif key=='min_precision':\n # The maximal precision corresponds to the minimal value for PREC\n self[key] = max( _[key] for _ in new_stats if key in _)\n elif key=='averaged_timing':\n n_madloop_calls = sum(_['n_madloop_calls'] for _ in new_stats if\n 'n_madloop_calls' in _)\n if n_madloop_calls > 0 :\n self[key] = sum(_[key]*_['n_madloop_calls'] for _ in \n new_stats if (key in _ and 'n_madloop_calls' in _) )/n_madloop_calls\n else:\n # Now assume all other quantities are cumulative\n self[key] = sum(_[key] for _ in new_stats if key in _)", "def add_apple_data_to_activities(self):\n\n try:\n # apple data is loaded from csv rather than from json\n apple_data = self.load_apple_workouts()\n\n # filter out nike and strava data that has synced to apple, we are getting that from json source\n apple_data = apple_data[(apple_data.sourceName != \"Nike Run Club\") & (apple_data.sourceName != \"Strava\")]\n\n # set up 5 key metrics\n # note we're using enum values\n apple_data['source'] = ActivitySource.APPLE.value\n apple_data['activity_type'] = apple_data['workoutActivityType'].apply(lambda x: self.convert_apple_activity_type(x).value)\n apple_data['distance_in_km'] = apple_data['totalDistance']\n apple_data['duration_in_min'] = apple_data['duration']\n apple_data['start_timestamp'] = apple_data['startDate'].apply(lambda x: parse(x, tzinfos={\"America/Vancouver\"}))\n\n # filter out extraneous columns\n apple_data = apple_data.filter(self.data_frame_columns)\n self.all_activities = self.all_activities.append(apple_data, sort=True, ignore_index=True)\n\n logging.info(\"Done parsing Apple data.\")\n except Exception:\n logging.exception(\"Could not parse Apple data\")", "def report_store(analysis_store, helpers, timestamp_yesterday):\n case = analysis_store.get_cases()[0]\n helpers.add_analysis(\n analysis_store, case, pipeline=Pipeline.MIP_DNA, started_at=timestamp_yesterday\n )\n helpers.add_analysis(analysis_store, case, pipeline=Pipeline.MIP_DNA, started_at=datetime.now())\n # Mock sample dates to calculate processing times\n for family_sample in analysis_store.get_case_samples_by_case_id(\n case_internal_id=case.internal_id\n ):\n family_sample.sample.ordered_at = timestamp_yesterday - timedelta(days=2)\n family_sample.sample.received_at = timestamp_yesterday - timedelta(days=1)\n family_sample.sample.prepared_at = timestamp_yesterday\n family_sample.sample.sequenced_at = timestamp_yesterday\n family_sample.sample.delivered_at = datetime.now()\n return analysis_store", "def add_metrics_point(self, data_points: Dict[str, float], timestamp: float):\n for name, value in data_points.items():\n # Using in-sort to insert while maintaining sorted ordering.\n bisect.insort(a=self.data[name], x=TimeStampedValue(timestamp, value))", "def emit_metrics(self):\n parse_time = time.perf_counter() - self._parsing_start_time\n Stats.gauge(\"dag_processing.total_parse_time\", parse_time)\n Stats.gauge(\"dagbag_size\", sum(stat.num_dags for stat in self._file_stats.values()))\n Stats.gauge(\n \"dag_processing.import_errors\", sum(stat.import_errors for stat in self._file_stats.values())\n )", "def _report(self, registry=None, timestamp=None, flush_current_hist=False):\n registry = registry or self.registry\n if self.enable_runtime_metrics:\n col = runtime_metrics.RuntimeCollector(registry)\n col.collect()\n metrics = registry.dump_metrics()\n for key in metrics.keys():\n metric_name, metric_tags = self.decode_key(key)\n tags = self.tags\n if metric_tags:\n tags = self.tags.copy()\n tags.update(metric_tags)\n\n wf_hist = wavefront_histogram.get(key, registry)\n if wf_hist is not None:\n distributions = wf_hist.get_distribution()\n if flush_current_hist:\n distributions.extend(\n wf_hist.get_current_minute_distribution())\n for dist in distributions:\n self.wavefront_client.send_distribution(\n name=f'{self.prefix}{metric_name}',\n centroids=dist.centroids,\n histogram_granularities=self.histogram_granularities,\n timestamp=dist.timestamp,\n source=self.source,\n tags=tags)\n continue\n\n is_delta = delta.is_delta_counter(key, registry)\n for value_key in metrics[key].keys():\n if is_delta:\n self.wavefront_client.send_delta_counter(\n name=delta.get_delta_name(self.prefix, metric_name,\n value_key),\n value=metrics[key][value_key], source=self.source,\n tags=tags\n )\n # decrement delta counter\n registry.counter(key).dec(metrics[key][value_key])\n else:\n self.wavefront_client.send_metric(\n name=f'{self.prefix}{metric_name}.{value_key}',\n value=metrics[key][value_key], timestamp=timestamp,\n source=self.source, tags=tags)", "def new_archive_record(self, event):\n end_ts = event.record['dateTime']\n start_ts = end_ts - event.record['interval'] * 60\n\n for topic in self.subscriber.subscribed_topics: # topics might not be cached.. therefore use subscribed?\n self.logger.debug(\"Service record prior to update is: %s %s\"\n % (weeutil.weeutil.timestamp_to_string(event.record['dateTime']),\n to_sorted_string(event.record)))\n target_data = self.subscriber.get_accumulated_data(topic, start_ts, end_ts, event.record['usUnits'])\n event.record.update(target_data)\n self.logger.debug(\"Service record after update is: %s %s\"\n % (weeutil.weeutil.timestamp_to_string(event.record['dateTime']),\n to_sorted_string(event.record)))", "def store_all(self, store):\n for uid in self._status.list():\n distribution = self._status.get(uid)\n name = self._status.get_name(uid)\n\n # Store data\n store.store(uid, {'name': name, 'distribution': distribution})", "def record_custom_metric(self, name, value):\n key = (name, '')\n\n if isinstance(value, dict):\n if len(value) == 1 and 'count' in value:\n new_stats = CountStats(call_count=value['count'])\n else:\n new_stats = TimeStats(*c2t(**value))\n else:\n new_stats = TimeStats(1, value, value, value, value, value**2)\n\n stats = self.__stats_table.get(key)\n if stats is None:\n self.__stats_table[key] = new_stats\n else:\n stats.merge_stats(new_stats)\n\n return key", "def collect_stat(self):\n\n cnstat_dict, ratestat_dict = self.get_cnstat()\n self.cnstat_dict.update(cnstat_dict)\n self.ratestat_dict.update(ratestat_dict)", "def get_aggregated_values(self):\n if not self._initialized:\n raise Exception(\"To readout you must first initialize, then\"\n \"process batches!\")\n else:\n ret_vals = [q.readout() for q in self.quantities]\n return dict(zip(self.quantity_names, ret_vals))", "def compute_metrics(self, results: list) -> dict:\n dump(results, self.out_file_path)\n print_log(\n f'Results has been saved to {self.out_file_path}.',\n logger='current')\n return {}", "def process_and_write_aggregate_results(\n aggregate_metrics: List[Dict],\n aggregate_stats: List[Dict],\n configuration: Dict,\n args: argparse.Namespace,\n dataset_id: str,\n) -> None:\n (\n averaged_metrics,\n averaged_stats,\n ) = fanatic.metrics.average_metrics_stats_from_seed_runs(aggregate_metrics, aggregate_stats)\n\n fanatic.output.save_averaged_results(averaged_metrics, averaged_stats, configuration, args, dataset_id)\n\n final_metric = averaged_metrics[\"ami\"][\"mean\"]\n logger.info(f\"For dataset_id={dataset_id} final averaged ami metric={final_metric}\")", "def add_datapoints(self, stats):\n # APCU Stats\n apcu_stats = stats.get('apcu_stats', dict())\n self.add_gauge_value('APCu Cache/Slots', 'slots',\n apcu_stats.get('nslots',\n apcu_stats.get('num_slots', 0)))\n self.add_gauge_value('APCu Cache/Entries', 'keys',\n apcu_stats.get('nentries',\n apcu_stats.get('num_entries', 0)))\n self.add_gauge_value('APCu Cache/Size', 'bytes',\n apcu_stats.get('mem_size', 0))\n self.add_gauge_value('APCu Cache/Expunges', 'keys',\n apcu_stats.get('nexpunges',\n apcu_stats.get('expunges', 0)))\n\n hits = apcu_stats.get('nhits', apcu_stats.get('num_hits', 0))\n misses = apcu_stats.get('nmisses', apcu_stats.get('num_misses', 0))\n total = hits + misses\n if total > 0:\n effectiveness = float(float(hits) / float(total)) * 100\n else:\n effectiveness = 0\n self.add_gauge_value('APCu Cache/Effectiveness', 'percent',\n effectiveness)\n\n self.add_derive_value('APCu Cache/Hits', 'keys', hits)\n self.add_derive_value('APCu Cache/Misses', 'keys', misses)\n self.add_derive_value('APCu Cache/Inserts', 'keys',\n apcu_stats.get('ninserts',\n apcu_stats.get('num_inserts',0)))", "def on_batch_end(self, batch, logs={}):\n self.losses.append(logs.get('loss'))\n self.accuracies.append(logs.get('acc'))", "def metrics(self):\n raise NotImplementedError(\"metrics\")", "def add_metrics(self, metrics):\n for i, metric in enumerate(self.config.metrics):\n tf.summary.scalar(metric, metrics[i])", "def _storePerfStats(self, results):\n self.state = ZenProcessTask.STATE_STORE_PERF\n byConf = reverseDict(self._deviceStats._pidToProcess)\n for procStat, pids in byConf.iteritems():\n if len(pids) != 1:\n log.debug(\"There are %d pids by the name %s - %s\",\n len(pids), procStat._config.name, procStat._config.originalName)\n procName = procStat._config.name\n for pid in pids:\n if not AS400PLUG in self._device.zCollectorPlugins:\n cpu = results.get(CPU + str(pid), None)\n else:\n cpu = results.get(AS400CPU + str(pid), None) / 10 ## as we get millis vs centis\n mem = results.get(MEM + str(pid), None)\n procStat.updateCpu(pid, cpu)\n procStat.updateMemory(pid, mem)\n self._save(procName, 'cpu_cpu', procStat.getCpu(),\n 'DERIVE', min=0)\n self._save(procName, 'mem_mem',\n procStat.getMemory() * 1024, 'GAUGE')\n return results", "def calculate(self, batch_info):\n self.buffer.extend(batch_info['episode_infos'])", "def calculate(self, batch_info):\n self.buffer.extend(batch_info['episode_infos'])", "def add_hits(cls, all_hits):\n # Organize hits so that one client IP will always use the same queue.\n # We have to do this so visits from the same IP will be added in the right order.\n hits_by_client = [[] for r in cls.recorders]\n for hit in all_hits:\n hits_by_client[hit.get_visitor_id_hash() % len(cls.recorders)].append(hit)\n\n for i, recorder in enumerate(cls.recorders):\n recorder.queue.put(hits_by_client[i])", "def _update_metric(\n metrics: List[mlflow.entities.Metric], dataset: MetricsDict = {}\n ) -> MetricsDict:\n for metric in metrics:\n metric_dict = {\"step\": metric.step, \"value\": metric.value}\n if metric.key in dataset:\n if isinstance(dataset[metric.key], list):\n dataset[metric.key].append(metric_dict)\n else:\n dataset[metric.key] = [dataset[metric.key], metric_dict]\n else:\n dataset[metric.key] = metric_dict\n return dataset", "def estimate_metrics(\n self,\n all_labels,\n all_preds\n ):\n n_predictions = len(all_preds)\n\n for metric in self.metrics:\n # report everything but loss\n if metric.__name__ is not \"loss\":\n if isinstance(all_preds[0], list):\n result = np.mean([metric(labels, preds) for preds,labels in zip(all_preds, all_labels)])\n else:\n result = metric(all_labels, all_preds)\n \n if metric.__name__ in self.multi_batch_metrics:\n self.multi_batch_metrics[metric.__name__].append(result)\n self.multi_batch_metrics[\"len_\" + metric.__name__].append(\n n_predictions)\n else:\n self.multi_batch_metrics[metric.__name__] = [result]\n self.multi_batch_metrics[\"len_\" + metric.__name__] = [n_predictions]", "def record_all(self):\n for i in self.recorders:\n t = i[0]\n r = i[1]\n self.add_row(t, r())", "def processReports(self):\n count = 0\n for r in self.reports:\n #need to change the next two lines so that the fields are not hard-coded\n self.currentCase = r.id\n self.currentText = r.impression.lower()\n self.analyzeReport(self.currentText,\n \"disease\",\n modFilters=['indication','probable_existence',\n 'definite_existence',\n 'historical','future','pseudoneg',\n 'definite_negated_existence',\n 'probable_negated_existence'])\n\n self.recordResults()", "def get_new_data(self):\n\n # record bar parse performance\n self.logger.debug(\"Started parsing new ticks.\")\n start_parse = time.time()\n for exchange in self.exchanges:\n exchange.parse_ticks()\n end_parse = time.time()\n duration = round(end_parse - start_parse, 5)\n\n self.logger.debug(\n \"Parsed \" + str(self.total_instruments) +\n \" instruments' ticks in \" + str(duration) + \" seconds.\")\n self.track_tick_processing_performance(duration)\n\n # wrap new 1 min bars in market events\n new_market_events = []\n for exchange in self.exchanges:\n bars = exchange.get_new_bars()\n for symbol in exchange.get_symbols():\n for bar in bars[symbol]:\n event = MarketEvent(exchange.get_name(), bar)\n new_market_events.append(event)\n # add bars to save-to-db-later queue\n # TODO: store new bars concurrently with a processpool\n self.bars_save_to_db.put(event)\n return new_market_events", "def aggregate(self, **aggregations):\n # Before we iterate, reset the aggregations\n for _, agg in aggregations.items():\n agg.reset()\n # Do the accumulation\n for attrs in self:\n for _, agg in aggregations.items():\n agg.accumulate(attrs)\n # Return the results\n return {name: agg.result for name, agg in aggregations.items()}", "def gather_sample(self):\n\n for _pid in self._select_processes():\n if not self.__trackers.get(_pid):\n self.__trackers[_pid] = ProcessTracker(_pid, self._logger, self.__id)\n\n self._reset_absolute_metrics()\n\n for _tracker in self.__trackers.values():\n _metrics = _tracker.collect()\n self.record_metrics(_tracker.pid, _metrics)\n\n self._calculate_aggregated_metrics()\n self._remove_dead_processes()\n\n self.print_metrics()", "def _merge_report(self, target, new):\n time = None\n if 'ts' in new['parsed']:\n time = new['parsed']['ts']\n\n if (target.get('lastSeenDate', None) and\n time and\n target['lastSeenDate'] < time):\n target['lastSeenDate'] = time\n\n query_millis = int(new['parsed']['stats']['millis'])\n target['stats']['totalTimeMillis'] += query_millis\n target['stats']['count'] += 1\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']", "async def test_all_transactions(self):\n response = await self.collect(get_request_text=self.GATLING_LOG)\n self.assert_measurement(response, value=\"2\")", "def _process_charts(self, data_set_instance_id):\n for chart in self._charts:\n self._db.Query(\"\"\"INSERT INTO report_data_set_chart_instance\n (report_data_set_chart_id, report_data_set_instance_id, chart_generation_time)\n VALUES(%s, %s, NOW())\n ON DUPLICATE KEY UPDATE chart_generation_time = NOW()\"\"\",(chart['report_data_set_chart_id'], data_set_instance_id))", "def accumulate_quantities(self, numerical_values):\n if not self._initialized:\n raise Exception(\"To readout you must first initialize, then\"\n \"process batches!\")\n else:\n for quantity in self.quantities:\n quantity.accumulate(\n *[numerical_values[self.requires.index(requirement)]\n for requirement in quantity.requires])", "def compute_metrics(self):\n self.finalize_output_dict()\n self.metric_dict = {\n key: value(self.output_dict[\"labels\"], self.output_dict[\"pred_probs\"])\n for key, value in self.metric_fns.items()\n }", "def _merge_report(self, target, new):\r\n time = None\r\n if 'ts' in new['parsed']:\r\n time = new['parsed']['ts']\r\n\r\n if (target.get('lastSeenDate', None) and\r\n time and\r\n target['lastSeenDate'] < time):\r\n target['lastSeenDate'] = time\r\n\r\n query_millis = int(new['parsed']['stats']['millis'])\r\n target['stats']['totalTimeMillis'] += query_millis\r\n target['stats']['count'] += 1\r\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']", "def generate_metrics_data(metricsquery: List, resultsquery: Dict, deltaminutes: int = 5, Region_name: str = None) -> Dict:\r\n cloudwatch=client('cloudwatch', region_name=Region_name) \r\n paginator = cloudwatch.get_paginator('get_metric_data')\r\n metricsgroup=grouper(metricsquery)\r\n resultsquery['ApiCalls']=0 \r\n for mqs in metricsgroup:\r\n for response in paginator.paginate(MetricDataQueries=mqs, StartTime=datetime.now()-timedelta(minutes=deltaminutes),EndTime=datetime.now()):\r\n for results in response['MetricDataResults']:\r\n resultsquery[results['Id']].append({'results':results})\r\n resultsquery['ApiCalls']+=1\r\n return resultsquery", "def aggregate_historical_trades(self, pair: list):\n raise NotImplementedError", "def merge_metric_stats(self, snapshot):\n\n if not self.__settings:\n return\n\n for key, other in six.iteritems(snapshot.__stats_table):\n stats = self.__stats_table.get(key)\n if not stats:\n self.__stats_table[key] = other\n else:\n stats.merge_stats(other)", "def track_transaction(self, transaction, items):\n trans = Transaction()\n trans.order_id = transaction.get('order_id', None)\n trans.total = transaction.get('total', None)\n trans.tax = transaction.get('tax', None)\n trans.affiliation = transaction.get('affiliation', None)\n trans.shipping = transaction.get('shipping', None)\n trans.city = transaction.get('city', None)\n trans.state = transaction.get('state', None)\n trans.country = transaction.get('country', None)\n\n for item in items:\n gitem = gaItem()\n gitem.sku = item.get('sku', None)\n gitem.name = item.get('name', None)\n gitem.variation = item.get('variation', None)\n gitem.price = item.get('price', None)\n gitem.quantity = item.get('quantity', 1)\n trans.add_item(gitem)\n\n self.ga_tracker.track_transaction(transaction=trans,session=self.ga_session,visitor=self.ga_visitor)", "def accumulate(self, predictions, labels, loss):\n batch_size = labels.shape[0]\n mean_hit_at_one = calculate_hit_at_one(predictions, labels)\n mean_perr = calculate_precision_at_equal_recall_rate(predictions, labels)\n mean_f1score = calculate_f1score(predictions, labels)\n mean_f2score = calculate_f2score(predictions, labels)\n mean_perr = calculate_precision_at_equal_recall_rate(predictions, labels)\n mean_loss = np.mean(loss)\n\n self.num_examples += batch_size\n self.sum_hit_at_one += mean_hit_at_one * batch_size\n self.sum_perr += mean_perr * batch_size\n self.sum_f1score += mean_f1score * batch_size\n self.sum_f2score += mean_f2score * batch_size\n self.sum_loss += mean_loss * batch_size\n\n return {\"hit_at_one\": mean_hit_at_one, \"perr\": mean_perr, \"f1score\": mean_f1score, \"f2score\": mean_f2score, \"loss\": mean_loss}", "def report(self):\n m = {}\n num_tok = self.metrics['num_tokens']\n if num_tok > 0:\n if self.metrics['correct_tokens'] > 0:\n m['token_acc'] = self.metrics['correct_tokens'] / num_tok\n m['loss'] = self.metrics['loss'] / num_tok\n if self.metrics['pred_count'] > 0:\n m['pred'] = self.metrics['correct_pred'] / self.metrics['pred_count']\n try:\n m['ppl'] = math.exp(m['loss'])\n except OverflowError:\n m['ppl'] = float('inf')\n if self.metrics['total_skipped_batches'] > 0:\n m['total_skipped_batches'] = self.metrics['total_skipped_batches']\n for k, v in m.items():\n # clean up: rounds to sigfigs and converts tensors to floats\n m[k] = round_sigfigs(v, 4)\n return m", "def add_metrics(self, metric_dict: dict):\n self.metric_dict.update(metric_dict)", "def process_indicators(actapi: act.api.Act, args: Config, falcon: Intel) -> None:\n\n indicator_marker = None\n\n for indicator in crowdstrike_intel.get_latest_indicators(\n falcon, get_last_indicator()\n ):\n handle_indicator(actapi, indicator, args.output_format)\n\n indicator_marker = indicator.marker\n\n if indicator_marker:\n update_last_indicator(indicator_marker)", "def batchstore(self, reward, next_obs):\n self.count_oa[self.current_obs, self.current_act] += 1\n self.count_oao[self.current_obs, self.current_act, next_obs] += 1\n self.reward_oa[self.current_obs, self.current_act] += reward\n \n # updating the value table, estiamting the current state-action values\n self.valQoa[self.current_obs, self.current_act]\\\n += self.alpha * ((1-self.gamma) * reward\\\n + self.gamma * np.dot(self.X[next_obs], self.valQoa[next_obs])\\\n - self.valQoa[self.current_obs, self.current_act])\n\n self.next_obs = next_obs # just for consistency checking\n \n self.ret = (1-self.gamma)*reward + self.gamma * self.ret\n self.batch_step += 1\n self.total_step += 1", "def aggregate(self, batch_outs, batch_start=None, batch_end=None):\n raise NotImplementedError('Must be implemented in subclasses.')", "def tally(self, ident, name, cost, leadtime):\r\n self.idents.append(ident)\r\n self.names.append(name)\r\n self.costs.append(cost)\r\n self.leadtimes.append(leadtime)", "def gap_report():\r\n\r\n #Get all assess_summary\r\n assess_need_rows = db((db.project_need.id > 0) &\\\r\n (db.project_need.assess_id == db.assess_assess.id) &\\\r\n (db.assess_assess.location_id > 0) &\\\r\n (db.assess_assess.deleted != True)\r\n ).select(db.assess_assess.id,\r\n db.assess_assess.location_id,\r\n db.assess_assess.datetime,\r\n db.project_need.need_type_id,\r\n db.project_need.value\r\n )\r\n\r\n activity_rows = db((db.project_activity.id > 0) &\\\r\n (db.project_activity.location_id > 0) &\\\r\n (db.project_activity.deleted != True)\r\n ).select(db.project_activity.id,\r\n db.project_activity.location_id,\r\n db.project_activity.need_type_id,\r\n db.project_activity.organisation_id,\r\n db.project_activity.total_bnf,\r\n db.project_activity.start_date,\r\n db.project_activity.end_date\r\n )\r\n\r\n def map_assess_to_gap(row):\r\n return Storage( assess_id = row.assess_assess.id,\r\n location_id = row.assess_assess.location_id,\r\n datetime = row.assess_assess.datetime,\r\n need_type_id = row.project_need.need_type_id,\r\n value = row.project_need.value,\r\n activity_id = None,\r\n organisation_id = None,\r\n start_date = NONE,\r\n end_date = NONE,\r\n total_bnf = NONE,\r\n )\r\n\r\n gap_rows = map(map_assess_to_gap, assess_need_rows)\r\n\r\n for activity_row in activity_rows:\r\n add_new_gap_row = True\r\n # Check if there is an Assessment of this location & cluster_subsector_id\r\n for gap_row in gap_rows:\r\n if activity_row.location_id == gap_row.location_id and \\\r\n activity_row.need_type_id == gap_row.need_type_id:\r\n\r\n add_new_gap_row = False\r\n\r\n gap_row.activity_id = activity_row.id,\r\n gap_row.organisation_id = activity_row.organisation_id\r\n gap_row.start_date = activity_row.start_date\r\n gap_row.end_date = activity_row.end_date\r\n gap_row.total_bnf = activity_row.total_bnf\r\n break\r\n\r\n if add_new_gap_row:\r\n gap_rows.append(Storage(location_id = activity_row.location_id,\r\n need_type_id = activity_row.need_type_id,\r\n activity_id = activity_row.id,\r\n organisation_id = activity_row.organisation_id,\r\n start_date = activity_row.start_date,\r\n end_date = activity_row.end_date,\r\n total_bnf = activity_row.total_bnf,\r\n )\r\n )\r\n\r\n headings = (\"Location\",\r\n \"Needs\",\r\n \"Assessment\",\r\n \"Date\",\r\n \"Activity\",\r\n \"Start Date\",\r\n \"End Date\",\r\n \"Total Beneficiaries\",\r\n \"Organization\",\r\n \"Gap (% Needs Met)\",\r\n )\r\n gap_table = TABLE(THEAD(TR(*[TH(header) for header in headings])),\r\n _id = \"list\",\r\n _class = \"dataTable display\"\r\n )\r\n\r\n for gap_row in gap_rows:\r\n if gap_row.assess_id:\r\n assess_action_btn = A(T(\"Open\"),\r\n _href = URL(r=request,\r\n c=\"assess\",\r\n f=\"assess\",\r\n args = (gap_row.assess_id, \"need\")\r\n ),\r\n _target = \"blank\",\r\n _id = \"show-add-btn\",\r\n _class=\"action-btn\"\r\n )\r\n else:\r\n assess_action_btn = NONE\r\n\r\n if gap_row.activity_id:\r\n activity_action_btn =A(T(\"Open\"),\r\n _href = URL(r=request,\r\n c=\"project\",\r\n f=\"activity\",\r\n args = (gap_row.activity_id)\r\n ),\r\n _target = \"blank\",\r\n _id = \"show-add-btn\",\r\n _class=\"action-btn\"\r\n ),\r\n else:\r\n activity_action_btn = A(T(\"Add\"),\r\n _href = URL(r=request,\r\n c=\"project\",\r\n f=\"activity\",\r\n args = (\"create\"),\r\n vars = {\"location_id\":gap_row.location_id,\r\n \"need_type_id\":gap_row.need_type_id,\r\n }\r\n ),\r\n _id = \"show-add-btn\",\r\n _class=\"action-btn\"\r\n ),\r\n\r\n need_str = shn_need_type_represent(gap_row.need_type_id)\r\n if gap_row.value:\r\n need_str = \"%d %s\" % (gap_row.value, need_str)\r\n\r\n #Calculate the Gap\r\n if not gap_row.value:\r\n gap_str = NONE\r\n elif gap_row.total_bnf and gap_row.total_bnf != NONE:\r\n gap_str = \"%d%%\" % min((gap_row.total_bnf / gap_row.value) * 100, 100)\r\n else:\r\n gap_str = \"0%\"\r\n\r\n gap_table.append(TR( shn_gis_location_represent(gap_row.location_id),\r\n need_str,\r\n assess_action_btn,\r\n gap_row.datetime or NONE,\r\n activity_action_btn,\r\n gap_row.start_date or NONE,\r\n gap_row.end_date or NONE,\r\n gap_row.total_bnf or NONE,\r\n shn_organisation_represent(gap_row.organisation_id),\r\n gap_str\r\n )\r\n )\r\n\r\n return dict(title = T(\"Gap Analysis Report\"),\r\n subtitle = T(\"Assessments Needs vs. Activities\"),\r\n gap_table = gap_table,\r\n )" ]
[ "0.6160698", "0.56689644", "0.5492042", "0.53796333", "0.5373829", "0.5285188", "0.52515113", "0.52226347", "0.522238", "0.52220243", "0.5142877", "0.51234", "0.5087667", "0.50747657", "0.50630915", "0.5057553", "0.5049021", "0.50429595", "0.5004636", "0.49941427", "0.49351588", "0.49190253", "0.49097806", "0.4884444", "0.48710936", "0.48627272", "0.48547217", "0.48513654", "0.48434678", "0.4842337", "0.48364562", "0.48335174", "0.48318768", "0.48302817", "0.4821537", "0.47805858", "0.477299", "0.47671816", "0.47627565", "0.47554985", "0.47471896", "0.4736033", "0.47347367", "0.4733205", "0.47320825", "0.47309372", "0.47260886", "0.47171244", "0.47111237", "0.47021237", "0.4692445", "0.4678423", "0.4672856", "0.46702173", "0.4667245", "0.4661149", "0.464801", "0.46458572", "0.46421245", "0.4639622", "0.46347284", "0.46303022", "0.4623223", "0.46208295", "0.46201223", "0.46127138", "0.4611412", "0.46079394", "0.46054214", "0.4604656", "0.4602011", "0.4600921", "0.45939714", "0.45939714", "0.45902905", "0.45899138", "0.45830154", "0.45704493", "0.4563135", "0.45616022", "0.4560944", "0.45576802", "0.45576", "0.4554847", "0.45522264", "0.45483226", "0.4548173", "0.45434994", "0.454241", "0.4539563", "0.45308405", "0.4529194", "0.45285738", "0.45248768", "0.45243695", "0.4522357", "0.45213065", "0.45154452", "0.45091966", "0.44915235" ]
0.6819747
0
Record a single time metric, merging the data with any data from prior time metrics with the same name and scope.
def record_time_metric(self, metric): if not self.__settings: return # Scope is forced to be empty string if None as # scope of None is reserved for apdex metrics. key = (metric.name, metric.scope or '') stats = self.__stats_table.get(key) if stats is None: stats = TimeStats(call_count=1, total_call_time=metric.duration, total_exclusive_call_time=metric.exclusive, min_call_time=metric.duration, max_call_time=metric.duration, sum_of_squares=metric.duration ** 2) self.__stats_table[key] = stats else: stats.merge_time_metric(metric) return key
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def record_time_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for metric in metrics:\n self.record_time_metric(metric)", "def record_custom_metric(self, name, value):\n if isinstance(value, dict):\n if len(value) == 1 and 'count' in value:\n new_stats = CountStats(call_count=value['count'])\n else:\n new_stats = TimeStats(*c2t(**value))\n else:\n new_stats = TimeStats(1, value, value, value, value, value**2)\n\n stats = self.__stats_table.get(name)\n if stats is None:\n self.__stats_table[name] = new_stats\n else:\n stats.merge_stats(new_stats)", "def merge_time_metric(self, metric):\n\n self.merge_raw_time_metric(metric.duration, metric.exclusive)", "def record_custom_metric(self, name, value):\n key = (name, '')\n\n if isinstance(value, dict):\n if len(value) == 1 and 'count' in value:\n new_stats = CountStats(call_count=value['count'])\n else:\n new_stats = TimeStats(*c2t(**value))\n else:\n new_stats = TimeStats(1, value, value, value, value, value**2)\n\n stats = self.__stats_table.get(key)\n if stats is None:\n self.__stats_table[key] = new_stats\n else:\n stats.merge_stats(new_stats)\n\n return key", "def record(self, time, increment):\n raise NotImplementedError(\"Abstract method not implemented.\")", "def record_data(self, time, x, tau):\n\n self.t_values.append(np.copy(time))\n self.x_values.append(np.copy(x))\n self.tau_values.append(np.copy(tau))", "def update_time(self):\n time_metrics = self._fetch_time_metrics_and_clear()\n self._logger.info('update_time. time_metrics = %s', build_metrics_times_data(time_metrics))", "def record(self, time, increment):\n\n if time < self._initialTime:\n return\n\n if self._lastObsValue > self._max:\n self._max = self._lastObsValue\n if time == self._initialTime:\n self._min = self._lastObsValue\n elif self._lastObsValue < self._min:\n self._min = self._lastObsValue\n\n self._n += 1\n self._area += self._lastObsValue * (time - self._lastObsTime)\n self._areaSquared += (self._lastObsValue ** 2) * (time - self._lastObsTime)\n self._lastObsTime = time\n self._lastObsValue += increment", "def _AddMetric(self, metric):\n machine = metric.machine_id\n time = metric.timestamp\n payload = DotDict(json.loads(metric.payload)).flatten()\n\n self.machines.add(machine)\n self.timestamps.add(time)\n for k in payload:\n if k not in self.counter_data:\n continue\n val = payload.get(k, None)\n if val is not None:\n self.counter_data[k].AddSample(machine, time, val)", "def observe_first(self, env: dm_env.Environment, timestep: dm_env.TimeStep\n ) -> None:\n self._metrics = {}\n self._accumulate_metrics(env)", "def record_metrics(self, pid, metrics):\n\n for _metric, _metric_value in metrics.items():\n if not self.__metrics_history[pid].get(_metric):\n self.__metrics_history[pid][_metric] = []\n self.__metrics_history[pid][_metric].append(_metric_value)\n # only keep the last 2 running history for any metric\n self.__metrics_history[pid][_metric] = self.__metrics_history[pid][_metric][\n -2:\n ]", "def time_metrics(self, stats, root, parent):\n\n product = self.product\n operation = self.operation or 'other'\n target = self.target\n\n # Determine the scoped metric\n\n statement_metric_name = 'Datastore/statement/%s/%s/%s' % (product,\n target, operation)\n\n operation_metric_name = 'Datastore/operation/%s/%s' % (product,\n operation)\n\n if target:\n scoped_metric_name = statement_metric_name\n else:\n scoped_metric_name = operation_metric_name\n\n yield TimeMetric(name=scoped_metric_name, scope=root.path,\n duration=self.duration, exclusive=self.exclusive)\n\n # Unscoped rollup metrics\n\n yield TimeMetric(name='Datastore/all', scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n yield TimeMetric(name='Datastore/%s/all' % product, scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n if root.type == 'WebTransaction':\n yield TimeMetric(name='Datastore/allWeb', scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n yield TimeMetric(name='Datastore/%s/allWeb' % product, scope='',\n duration=self.duration, exclusive=self.exclusive)\n else:\n yield TimeMetric(name='Datastore/allOther', scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n yield TimeMetric(name='Datastore/%s/allOther' % product, scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n # Unscoped operation metric\n\n yield TimeMetric(name=operation_metric_name, scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n # Unscoped statement metric\n\n if target:\n yield TimeMetric(name=statement_metric_name, scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n # Unscoped instance Metric\n\n if self.instance_hostname and self.port_path_or_id:\n\n instance_metric_name = 'Datastore/instance/%s/%s/%s' % (product,\n self.instance_hostname, self.port_path_or_id)\n\n yield TimeMetric(name=instance_metric_name, scope='',\n duration=self.duration, exclusive=self.exclusive)", "def _push(self):\n if len(self._stat_now):\n self._stat_now['epoch_num'] = self.epoch_num\n self._stat_now['global_step'] = self.global_step\n\n self._stats.append(self._stat_now)\n self._stat_now = {}\n self._write_stat()", "def record_gauge(self, name, value, tags=None):\n identity = self.create_identity(name, tags)\n with self._lock:\n self._batch[identity] = value\n self._timestamps[identity] = int(time.time() * 1000.0)", "def _record(self, metric_point: MetricPoint,\n measurement_map: MeasurementMap):\n metric_name = metric_point.metric_name\n tags = metric_point.tags\n\n metric = self._registry.get(metric_name)\n # Metrics should be always registered dynamically.\n assert metric\n\n tag_map = tag_map_module.TagMap()\n for key, value in tags.items():\n tag_key = tag_key_module.TagKey(key)\n tag_value = tag_value_module.TagValue(value)\n tag_map.insert(tag_key, tag_value)\n\n metric_value = metric_point.value\n measurement_map.measure_float_put(metric.measure, metric_value)\n # NOTE: When we record this metric, timestamp will be renewed.\n measurement_map.record(tag_map)", "def _fetch_time_metrics_and_clear(self):\n with self._time_rlock:\n time_metrics = self._time_metrics\n self._time_metrics = defaultdict(LatencyTracker)\n\n return time_metrics", "def record_apdex_metric(self, metric):\n\n if not self.__settings:\n return\n\n # Note that because we are using a scope here of an empty string\n # we can potentially clash with an unscoped metric. Using None,\n # although it may help to keep them separate in the agent will\n # not make a difference to the data collector which treats None\n # as an empty string anyway.\n\n key = (metric.name, '')\n stats = self.__stats_table.get(key)\n if stats is None:\n stats = ApdexStats(apdex_t=metric.apdex_t)\n self.__stats_table[key] = stats\n stats.merge_apdex_metric(metric)\n\n return key", "def build_metrics_times_data(time_metrics):\n return [{'name': name, 'latencies': latencies.get_latencies()}\n for name, latencies in iteritems(time_metrics)]", "def put(self, metric, values, timestamp=None):\n if timestamp is None:\n timestamp = time.time()\n now_date = datetime.datetime.fromtimestamp(timestamp)\n\n if self.last is None:\n self.last = timestamp\n return\n\n self.last = timestamp\n\n values = [str(d) for d in [now_date, timestamp]+values]\n\n with open(self.filename, \"at\") as df:\n df.write(\"{}\\n\".format(\",\".join(values)))", "def log(self, label, times, overlapping=False):\r\n self._timings.append(Timing(label, times, overlapping))", "def add_metric(self, metric: str):\n if metric not in self.metrics:\n self.metrics[metric] = self.creator.create_metric(metric)", "def log_metric(self, name: str, value):\n self.metrics[name] = value\n\n self._sync_log_event()", "def save_metric(key, value, timestamp=None):\n\n from analytics_client.settings import _ANALYTICS_ENABLED\n\n if not _ANALYTICS_ENABLED:\n return None\n\n from analytics_client.tasks import store_metric\n\n # Set a timestamp if it is undefined\n _timestamp = timestamp\n if _timestamp is None:\n _timestamp = datetime.now()\n\n store_metric.delay(Metric(key=key, value=value, timestamp=_timestamp))", "def log(self, metric_name: str, value: float) -> None:\n if metric_name in self.metrics:\n self.metrics[metric_name].append(value)\n else:\n self.metrics[metric_name] = [value]", "def _merge_report(self, target, new):\r\n time = None\r\n if 'ts' in new['parsed']:\r\n time = new['parsed']['ts']\r\n\r\n if (target.get('lastSeenDate', None) and\r\n time and\r\n target['lastSeenDate'] < time):\r\n target['lastSeenDate'] = time\r\n\r\n query_millis = int(new['parsed']['stats']['millis'])\r\n target['stats']['totalTimeMillis'] += query_millis\r\n target['stats']['count'] += 1\r\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']", "def _merge_report(self, target, new):\n time = None\n if 'ts' in new['parsed']:\n time = new['parsed']['ts']\n\n if (target.get('lastSeenDate', None) and\n time and\n target['lastSeenDate'] < time):\n target['lastSeenDate'] = time\n\n query_millis = int(new['parsed']['stats']['millis'])\n target['stats']['totalTimeMillis'] += query_millis\n target['stats']['count'] += 1\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']", "def add_time(data, t):\n data['year'] = t.year\n data['month'] = t.month\n data['day'] = t.day\n data['hour'] = t.hour\n data['minute'] = t.minute\n data['second'] = t.second", "def _record_current_time(self):\n now = time.time()\n delta = now - self._last_time\n self._last_time = now\n self._timing_recorder.append(delta)", "def log(self, metric, value, source, timestamp=None):\n if timestamp is None:\n timestamp = datetime.now()\n\n sql = \"insert into measurement(metric, value, source, timestamp) values('{0}', {1}, '{2}', '{3}');\".format(\n metric, value, source, timestamp)\n\n self._execute_sql(sql)", "def submit_metric():\n\n gson = json.loads(request.get_json())\n\n new_point = DataPoint(\n computer_name=gson[\"computer_name\"],\n cpu_percentage=gson[\"cpu_percentage\"],\n memory_percentage=gson[\"memory_percentage\"],\n timestamp=gson[\"timestamp\"]\n )\n\n with lock:\n if not instances.get(new_point.computer_name):\n instances[new_point.computer_name] = Timeline(\n maxsize=int(os.environ.get(\"COLLECTOR_BUFFER_SIZE\"))\n )\n instances[new_point.computer_name].append(new_point)\n\n return Response(status=200)", "def _addTiming(self, key, duration):\n pass", "def __push_metric(self, metric_name, value, timestamp):\n sock = self.__get_carbon_socket()\n _data = \"%s %d %d\\n\" % (metric_name, value, timestamp)\n LOGGER.debug(\"SEND: %s\", _data.replace(\"\\n\", \"\"))\n sock.send(_data.encode('utf-8'))", "def RegisterMetric(metadata):\n with _init_lock:\n _metadatas.append(metadata)\n if _stats_singleton is not None:\n _stats_singleton.RegisterMetric(metadata)", "def publish_metric(name, value, type):\n t = time.time()\n m = json.dumps({'monitor':name, type:value, 'time':t})\n r = redis.StrictRedis(host='localhost', port=6379, db=0) \n r.lpush('sensor_readings',m)", "def add_metric(self, metric, *, name=None, **kwargs):\n if name is None:\n name = metric.__name__\n\n self.metrics.append((metric, name, kwargs))", "def add_metric(self, metric, *, name=None, **kwargs):\n if name is None:\n name = metric.__name__\n\n self.metrics.append((metric, name, kwargs))", "def save_data(self, gauge_name, date_key, data):\n pass", "def insert_new_measurement(database: Database, data_model, metric: Dict, measurement: Dict) -> Dict:\n if \"_id\" in measurement:\n del measurement[\"_id\"]\n metric_type = data_model[\"metrics\"][metric[\"type\"]]\n direction = metric.get(\"direction\") or metric_type[\"direction\"]\n for scale in metric_type[\"scales\"]:\n value = calculate_measurement_value(data_model, metric, measurement[\"sources\"], scale)\n status = determine_measurement_status(metric, direction, value)\n measurement[scale] = dict(value=value, status=status, direction=direction)\n for target in (\"target\", \"near_target\", \"debt_target\"):\n measurement[scale][target] = determine_target(\n metric, measurement, metric_type, scale, cast(TargetType, target))\n measurement[\"start\"] = measurement[\"end\"] = iso_timestamp()\n database.measurements.insert_one(measurement)\n del measurement[\"_id\"]\n return measurement", "def _SnapMetrics(deadline):\n next_deadline = deadline + frequency_seconds\n callback = partial(_SnapMetrics, next_deadline)\n cls._timeouts[group_key] = IOLoop.current().add_timeout(next_deadline, callback)\n\n sample = meter.sample()\n sample_json = json.dumps(sample)\n new_metric = Metric.Create(group_key, machine_id, deadline, sample_json)\n with util.Barrier(_UploadSuccess, _UploadError) as b:\n retry.CallWithRetryAsync(retry_policy, new_metric.Update, client=client, callback=b.Callback())", "def record_event(self, description, time=None, additional=None):\n if time is None:\n time = datetime.datetime.now()\n if additional is not None:\n self.history.append((time, (description, additional)))\n else:\n self.history.append((time, description))", "def record_custom_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for name, value in metrics:\n self.record_custom_metric(name, value)", "def add_metric(self, metric):\n self.metrics.append(metric)\n self.estimate()", "def _save(self, data: MetricsDict) -> None:\n client = MlflowClient()\n try:\n run_id = self.run_id\n except DataSetError:\n # If run_id can't be found log_metric would create new run.\n run_id = None\n\n log_metric = (\n partial(client.log_metric, run_id)\n if run_id is not None\n else mlflow.log_metric\n )\n metrics = (\n self._build_args_list_from_metric_item(k, v) for k, v in data.items()\n )\n\n if self._logging_activated:\n for k, v, i in chain.from_iterable(metrics):\n log_metric(k, v, step=i)", "def add_metric(self, metric_name: str, metric_val: typing.Any):\n self.add_metrics({metric_name: metric_val})", "def metrics_times(self, times_data):\n url = _METRICS_URL_TEMPLATE.format(base_url=self._events_api_url_base, endpoint='times')\n return self._post(url, times_data)", "def record(self, var_keys, value=None):\n\n for var_key in make_list(var_keys):\n\n # Create empty lists\n if 't' not in self.log:\n self.log['t'] = []\n if var_key not in self.log:\n self.log[var_key] = [None] * len(self.log['t'])\n\n if self.model.t not in self.log['t']:\n\n # Create empty slot for new documented time step\n for v in self.log.values():\n v.append(None)\n\n # Store time step\n self.log['t'][-1] = self.model.t\n\n if value is None:\n v = getattr(self, var_key)\n else:\n v = value\n\n self.log[var_key][-1] = v", "def _add_time_field(self) -> None:\n self.data[\"time\"] = [datetime(int(yyyy), int(mm), int(dd)) + timedelta(hours=hh) for yyyy, mm, dd, hh in zip(self.data[\"year\"], self.data[\"month\"], self.data[\"day\"], self.data[\"hour\"])]\n for key in [\"year\", \"doy\", \"month\", \"day\", \"hour\"]:\n del self.data[key]", "def store_data(self,type_measurement,measurement):\n \n c = copy.deepcopy(measurement) \n self.write_queue.add((type_measurement,c)) \n if len(self.write_queue)>=self.buffer_size:\n self.write_data(self.write_queue)", "def metric_recorded(self, record):\n if record.name in self.headers and self.current_row is not None:\n if record.name == \"learning_rate\" and not record.is_scalar:\n # record is a list of scalars\n value = \",\".join([f\"{lr:.4f}\" for lr in record.value])\n elif record.is_scalar and isinstance(record.value, int):\n value = str(record.value)\n else:\n assert record.is_scalar\n\n value = f\"{record.value:.4f}\"\n\n self.current_row[record.name] = value", "def copy_and_append_time_dimension_to_netcdf_dataset(self,dataset_in,dataset_out):\n\n for dim_name,dim_obj in list(dataset_in.dimensions.items()):\n dataset_out.createDimension(dim_name,len(dim_obj)\n if not dim_obj.isunlimited() else None)\n dataset_out.createDimension('time',None)\n times = dataset_out.createVariable(\"time\",'f8',(\"time\",))\n times.units = \"years since 0001-01-01 00:00:00.0\"\n times.calendar = \"proleptic_gregorian\"\n times[0] = np.array([0.0])\n for var_name, var_obj in list(dataset_in.variables.items()):\n new_var = dataset_out.createVariable(var_name,var_obj.datatype,var_obj.dimensions\n if (len(var_obj.dimensions) <= 1\n or var_name == 'AREA') else\n [\"time\"] + list(var_obj.dimensions))\n if len(var_obj.dimensions) <= 1 or var_name == 'AREA':\n new_var[:] = var_obj[:]\n else:\n new_var[0,:] = var_obj[:]\n new_var.setncatts({attr_name: var_obj.getncattr(attr_name) for attr_name in var_obj.ncattrs()})", "def add(self, key):\n self.times[key] = time.time()", "def send_metrics(self):\n metrics = self.get_metrics()\n if not metrics:\n return\n\n for mkey, metric in metrics.items():\n for mname, mval in metric.items():\n try:\n self.agent.record_custom_metric(self.convert_metric_name(mkey, mname), mval, None)\n except Exception as e:\n print_(e)", "def _add_to_queue(key, value, step, time, run_id):\n met = Metric(key=key, value=value, timestamp=time, step=step)\n _metric_queue.append((run_id, met))\n if len(_metric_queue) > _MAX_METRIC_QUEUE_SIZE:\n _thread_pool.submit(_flush_queue)", "def save_measures(start_time, end_time, log_path=LOG_FILE):\n data = {\n \"start\": start_time,\n \"finish\": end_time,\n \"duration\": end_time - start_time,\n }\n try:\n with open(log_path, \"r\", encoding=DEFAULT_FILE_ENCODING) as data_fp:\n log = json.load(data_fp)\n log[\"measures\"].append(data)\n if len(log[\"measures\"]) > 100:\n del log[\"measures\"][0]\n except (IOError, ValueError):\n log = {\"measures\": [data]}\n\n with open(log_path, \"w\", encoding=DEFAULT_FILE_ENCODING) as file_pt:\n json.dump(log, file_pt)", "def _record_storage_event(metric, value=0):\n command_name = properties.VALUES.metrics.command_name.Get()\n metrics.CustomKeyValue(command_name, 'Storage-' + metric, value)", "def log_metrics(self, metrics: dict):\n self.metrics.update(metrics)\n\n self._sync_log_event()", "def publishTimeTaken(self, data):\n time_taken = Float32()\n time_taken.data = data\n self.time_taken_pub.publish(data)", "def _add_to_recently_called(self, match, reporter):\n if utils.istrcmp(match.player1_tag, reporter):\n other = match.player2_tag\n else:\n other = match.player1_tag\n self.recently_called[other] = time()", "def write_timed(\n self, data: AnyWritableBuf, freq: int | Timer, /, *, mode: int = NORMAL\n ) -> None:", "def report_metrics(prefix, metrics):\n series = []\n\n now = time.time()\n for key, value in metrics.iteritems():\n metric = '{prefix}.{key}'.format(prefix=prefix, key=key)\n point = [(now, value)]\n series.append({'metric':metric, 'points':point})\n\n if len(series) > 0:\n print u\"Sending {}\".format(series)\n dog_http_api.metrics(series)", "def add_time_point(self,time, mdv_instance):\n\n self.mdvtc[time] = mdv_instance", "def write_metric(self, metric_name: str, metric_value: Union[float, int]):\n self._metrics.append(Metric(metric_name, metric_value))", "def addtomemorycollectiontime(self, datetime):\n self._memorycollectiontime.append(datetime)", "def add_timecard(self,time,name):\n id = self.find_employee_id(name)\n if id in self.timecard:\n self.timecard[id].append(time)\n else:\n self.timecard[id] = [time]\n return self.timecard", "def add_sample(self, time, value):\n\t\tif self.buf_full:\n\t\t\tself.buf.pop(0)\n\t\t\n\t\tself.buf.append((time, value))", "def add_point(self, time=None, location=None):\n\n # calculate the bounds for time and location and create or update the bounds for the coordinate axis\n # hold onto the values so you can put them in an hdf...\n\n self._element_count.value += 1\n\n assert time, 'Can not create a point without a time value'\n\n assert location and len(location) == (len(self.coordinate_axis)-1), 'Must provide the correct number of location values'\n\n #@todo add some more type checking!\n\n self._coordinates[self.coordinate_axis[0]]['records'].append(time)\n\n for ind in xrange(len(location)):\n self._coordinates[self.coordinate_axis[ind+1]]['records'].append(location[ind])\n\n return self._element_count.value -1 # the actual index into the records list", "def addKey(self, time, value) -> None:\n ...", "def __init__(self, metrics_to_record):\n self.tape = {}\n\n for metric_name in metrics_to_record:\n self.tape[metric_name] = []", "def _write_time_cube(self, cube, key_list):\n data = cube.data[:]\n coords = cube.coord('time')[:]\n for t in range(0, data.shape[0]):\n value = round_variable(self.input_data.get_value(\n InputType.VARIABLE)[0], data[t])\n with iris.FUTURE.context(cell_datetime_objects=True):\n time_str = coords[t].cell(\n 0).point.strftime('%Y-%m-%d')\n try:\n self.data_dict[time_str].append(value)\n except KeyError:\n key_list.append(time_str)\n self.data_dict[time_str] = [value]", "def add_stats(self):\n units = self.get_unit_map()\n for metric in self.raw_metrics:\n unit, metric_type = units.get(metric, (DEFAULT_UNIT, DEFAULT_TYPE))\n if metric_type == \"counter\":\n # Unit/Second\n unit = \"/\".join((unit, \"Second\"))\n self.add_derive_value(metric, unit, self.raw_metrics[metric], rate=True)\n else:\n self.add_gauge_value(metric, unit, self.raw_metrics[metric])", "def observe(self, env: dm_env.Environment, timestep: dm_env.TimeStep,\n action: np.ndarray) -> None:\n self._accumulate_metrics(env)", "def add(self, timing_dict: Dict[str, float]):\n self._timings.append(timing_dict)\n if not self.steps:\n self.steps = list(timing_dict.keys())", "def add_metrics(self, metric_dict: dict):\n self.metric_dict.update(metric_dict)", "def insert_machine_time(self, start: int, end: int):\n today = datetime.date.today().strftime(\"%Y/%m/%d\")\n machine_time = '{}#{}'.format(start, end)\n time_used = end - start\n\n try:\n usage_today = usage.Usage.objects.get(machine_id=self.model.id, date=today)\n except DocumentDoesNotExists:\n usage_today = usage.Usage()\n usage_today.date = today\n usage_today.machine_id = self.model.id\n usage_today.name = self.model.name\n usage_today.times = []\n usage_today.total_time = 0\n\n usage_today.times.append(machine_time)\n usage_today.total_time += time_used\n usage_today.save()\n\n if not self.model.open:\n self.model.open = True\n self.model.save()", "def set_timestamp(self, data):\n if \"hittime\" in data: # an absolute timestamp\n data[\"qt\"] = self.hittime(timestamp=data.pop(\"hittime\", None))\n if \"hitage\" in data: # a relative age (in seconds)\n data[\"qt\"] = self.hittime(age=data.pop(\"hitage\", None))", "def add_score(self, difficulty, time, name):\n self.database[difficulty].insert_one({'time': time, 'name': name})", "def storeM(self, m, k):\n self.t.timeline[\"week\"+str(self.week)][k].mood.set_value(m)", "def add(self, message, time):\n if message not in self.results.keys():\n self.results[message] = [time]\n\n self.results[message].append(time)", "def medication_time(intent_request):\n session_attributes = helper.get_attribute(intent_request, 'sessionAttributes')\n current_intent = helper.get_attribute(intent_request, 'currentIntent')\n slots = helper.get_attribute(current_intent, 'slots')\n slot_details = helper.get_attribute(current_intent, 'slotDetails')\n intent_name = helper.get_attribute(current_intent, 'name')\n if helper.is_validation_request(intent_request):\n return validate_medication_time(intent_name, session_attributes, slot_details, slots)\n\n med_taken_time = slots[SLOT_MED_TIME]\n hh = int(med_taken_time.split(':')[0])\n mm = int(med_taken_time.split(':')[1])\n\n user = helper.lookup_user(session_attributes)\n local_time_reported = get_current_time_for_user(user)\n now_with_no_timezone = datetime.now()\n med_taken_datetime = now_with_no_timezone.replace(hour=hh, minute=mm, second=0)\n local_med_time = pytz.timezone(user.timezone).localize(med_taken_datetime)\n\n # TODO: for production, handle cases when user reported the same info multiple times.\n med_diary.log_med(user.uid, time_reported=local_time_reported, med_taken=True, time_taken=local_med_time)\n\n update_survey_completion(user.uid, local_time_reported, BOT_MEDICATION_NAME)\n session_attributes['NextBot'] = get_next_survey_bot(user.uid, local_time_reported)\n return helper.close(session_attributes, helper.FulfillmentState.FULFILLED,\n message_content=msg_strings.get('FINISH_MED_DIARY'))", "def log(self, game: str, outcome: str):\n current_time = datetime.now()\n self.user.record.append([current_time.strftime(\"%c\"), game, outcome, self.user.balance])", "def add(self, timestamp):\n self.total_count += 1\n self.times.append(timestamp)", "def record(self, config, value, time_ms):\n raise NotImplementedError", "def end_time(self, t):\r\n # Increase temperature while silent.\r\n if np.count_nonzero(self.next_note) == 0:\r\n self.silent_time += 1\r\n if self.silent_time >= NOTES_PER_BAR:\r\n self.temperature += 0.1\r\n else:\r\n self.silent_time = 0\r\n self.temperature = self.default_temp\r\n \r\n self.notes_memory.append(self.next_note)\r\n # Consistent with dataset representation\r\n self.beat_memory.append(compute_beat(t, NOTES_PER_BAR))\r\n self.results.append(self.next_note)\r\n # Reset next note\r\n self.next_note = np.zeros((NUM_NOTES, NOTE_UNITS))\r\n return self.results[-1]", "def append_data(self, topic, in_data, fieldname=None):\n data = dict(in_data)\n for key in data:\n if key != 'dateTime':\n data[key]=to_float(data[key])\n payload = {}\n payload['wind_data'] = False\n if fieldname in self.wind_fields:\n payload['wind_data'] = True\n\n queue = self._get_queue(topic)\n use_server_datetime = self._get_value('use_server_datetime', topic)\n\n self._queue_size_check(queue, self._get_max_queue(topic))\n\n if 'dateTime' not in data or use_server_datetime:\n data['dateTime'] = time.time()\n if 'usUnits' not in data:\n data['usUnits'] = self._get_unit_system(topic)\n\n datetime_format = self._get_value('datetime_format', topic)\n if datetime_format and 'dateTime' in data:\n data['dateTime'] = self._to_epoch(data['dateTime'], datetime_format, self._get_value('offset_format', topic))\n\n data['dateTime'] = to_int(data['dateTime'])\n # if type(data['dateTime']) is str:\n # data['dateTime'] = int(float(data['dateTime']))\n # td = data['dateTime']\n # self.logger.info(weeutil.weeutil.timestamp_to_string(data['dateTime']))\n self.logger.info(\"TopicManager Added to queue %s %s %s: %s\"\n %(topic, self._lookup_topic(topic),\n # weeutil.weeutil.timestamp_to_string(td), to_sorted_string(data)))\n weeutil.weeutil.timestamp_to_string(data['dateTime']), to_sorted_string(data))) \n payload['data'] = data\n queue.append(payload)", "def _trigger(self):\n if len(self._stat_now):\n self._stat_now['epoch_num'] = self.epoch_num\n self._stat_now['global_step'] = self.global_step\n\n self._stats.append(self._stat_now)\n self._stat_now = {}\n self._write_stat()", "def put_time(self, step, value):\n assert step > 0, \"Step must be larger than 0.\"\n # XXX: Currently the time axis is not unlimited due to a limitation\n # in h5netcdf - thus no new time steps can be created after the\n # initialization.\n assert step <= self._f.dimensions[\"time_step\"]\n\n self._f.variables[\"time_whole\"][step - 1] = value", "def timeit(self, metric_name: str):\n suffix = metric_name.rsplit('_', 1)[-1]\n if suffix not in ('time', 'timer'):\n raise ValueError(f'The metric name for a timer should end with '\n f'suffix \"_time\" or \"_timer\": got metric name '\n f'{metric_name!r}')\n start_time = time.time()\n try:\n yield\n finally:\n self.add_metrics({metric_name: time.time() - start_time})", "def record_time(t):\n\n f = open('time.out', 'w')\n f.write(str(t))\n f.close()", "def write_measurement(self, name: str, measurement: dict):\n self._measurements.append(Measurement(name, measurement))", "def add_timestamp(self, key='timestamp'):\n value = timestamp()\n self.add_metadata(key, value)", "def send_metrics(timestamp: Optional[float] = None) -> bool:\n\n def new_point(metric_name: str, result: float):\n series = monitoring_v3.types.TimeSeries()\n series.metric.type = f\"custom.googleapis.com/{metric_name}\"\n\n point = series.points.add()\n point.interval.end_time.seconds = now\n\n if isinstance(result, float):\n point.value.double_value = result\n else:\n point.value.int64_value = result\n return series\n\n now = int(time.time())\n prev_minute_tstamp = timestamp or (now - (now % 60) - 60)\n metrics_pattern = f\"{Monitoring.ACC_PREFIX}_{prev_minute_tstamp}_*\"\n monitoring_keys = redis_client.keys(metrics_pattern)\n all_series = []\n for metric_key in monitoring_keys:\n raw_value = redis_client.get(metric_key)\n values: List[str] = raw_value.split(\"|\") # type: ignore\n metric_name = values.pop(0) # metric name\n op = values.pop(0) # operation - SUM or AVG\n typ = values.pop(0) # INT or FLOAT\n if typ == \"INT\":\n result = sum(map(int, values))\n if op == \"AVG\":\n result = result // len(values)\n else:\n result = sum(map(float, values)) # type: ignore\n if op == \"AVG\":\n result = result / len(values) # type: ignore\n\n all_series.append(new_point(metric_name, result))\n if op == \"AVG\": # create count for AVG metric too\n all_series.append(new_point(f\"{metric_name}_COUNT\", len(values)))\n\n try:\n monitor_client.create_time_series(project_path, all_series)\n except InvalidArgument:\n logging.exception(\"mark_point failed\")\n return False\n else:\n return True", "def add_timeline(self, t):\n\n self.timelines.update({t.name : t})", "def time_xskillscore_metric(self, metric, dim):\n metric(self.ds[\"tos\"], self.ds[\"sos\"], dim=dim).compute()", "def log_metric(key, value, step=None):\n mlflow.log_metric(key, value, step=step)", "def merge_custom_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for name, other in metrics:\n key = (name, '')\n stats = self.__stats_table.get(key)\n if not stats:\n self.__stats_table[key] = other\n else:\n stats.merge_stats(other)", "def recordStart(self, event_key):\n self.start_times[event_key] = time.time()", "def register_recorder(self, groupname, tablename, recorder,\n timeseries=False):\n self.open_db()\n tab = self.get_table(groupname, tablename)\n if timeseries is False:\n self.add_row(tab, recorder())\n else:\n self.recorders.append((tab, recorder))", "def add_elapsed(self, key='elapsed'):\n now = datetime.datetime.now()\n self.add_metadata(key, str(now - self._open_time))", "def log_metric(self, name, val, step):\n raise NotImplementedError", "def insert_timeseries_data(message, device):\n # Get the product and check for any preprocessors\n product = device.product\n\n preprocessors = product.preprocessors.all()\n\n for preprocessor in preprocessors:\n preprocessor = get_preprocessor(preprocessor.preprocessor_name)\n if preprocessor:\n preprocessor(message.body, device=device, ts_cls=TimeSeriesData)\n else:\n logger.warning(\"No preprocessor handler called %s on product %s\",\n preprocessor.preprocessor_name, product.name)\n\n for sensor in device.sensors.all():\n sensor_name = sensor.sensor_type.sensor_name\n if message.body.get(sensor_name) is not None:\n new_datum = TimeSeriesData(\n ts=message.timestamp,\n sensor=sensor,\n value=message.body[sensor_name]\n )\n new_datum.save()\n\n # Evaluate any definitions data with new datapoint\n context = device.get_context(context=message.body, time=message.timestamp)\n logger.debug(\"device context %s\", context)\n redis_cache = RedisEventDefinitions(get_redis())\n\n triggered_events = device.evaluate_all_event_definitions(\n context, redis_cache, check_product=True\n )\n\n send_triggered_events(triggered_events, device, message.body)" ]
[ "0.6278324", "0.62258005", "0.6140665", "0.6003981", "0.58658415", "0.5658924", "0.5652999", "0.5612092", "0.5606227", "0.5472987", "0.5470835", "0.54608715", "0.5427048", "0.53969103", "0.53553826", "0.533382", "0.52751833", "0.5262168", "0.52570385", "0.52540565", "0.5227859", "0.52182734", "0.5213903", "0.51284367", "0.5117981", "0.5115526", "0.51100886", "0.51041216", "0.50835633", "0.5070457", "0.5029585", "0.5021722", "0.5020142", "0.50060886", "0.5004785", "0.5004785", "0.49947816", "0.4985825", "0.4966654", "0.49363664", "0.49293083", "0.4919601", "0.4915958", "0.4913854", "0.49129418", "0.49100903", "0.48955163", "0.4888135", "0.4878527", "0.48748496", "0.48692688", "0.4864525", "0.48588428", "0.485383", "0.48370576", "0.48363736", "0.4834045", "0.48292398", "0.48160183", "0.48145452", "0.4806099", "0.47999468", "0.47967407", "0.47885552", "0.47865772", "0.47639772", "0.47595274", "0.47446585", "0.47410363", "0.47161654", "0.47073466", "0.47053698", "0.46882793", "0.46865186", "0.4682797", "0.46752962", "0.46679583", "0.4662597", "0.46616274", "0.465725", "0.46551746", "0.46486583", "0.46475363", "0.46473897", "0.4647003", "0.4642787", "0.46423668", "0.4641514", "0.46340296", "0.4626262", "0.4621619", "0.4618651", "0.46145216", "0.4609746", "0.46093825", "0.46054706", "0.46035832", "0.46027783", "0.45999846", "0.45916948" ]
0.7506857
0
Record the time metrics supplied by the iterable for a single transaction, merging the data with any data from prior time metrics with the same name and scope.
def record_time_metrics(self, metrics): if not self.__settings: return for metric in metrics: self.record_time_metric(metric)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def flush(self):\n with self._lock:\n batch = self._batch\n timestamps = self._timestamps\n\n items = []\n for identity, value in batch.items():\n metric = {}\n typ, name, tags = identity\n metric[\"name\"] = name\n if typ:\n metric[\"type\"] = typ\n else:\n metric[\"timestamp\"] = timestamps[identity]\n\n if tags:\n metric[\"attributes\"] = dict(tags)\n\n metric[\"value\"] = value\n items.append(metric)\n\n items = tuple(items)\n\n batch.clear()\n timestamps.clear()\n\n common = self._common.copy()\n common[\"timestamp\"] = self._interval_start\n now = int(time.time() * 1000.0)\n interval = now - self._interval_start\n common[\"interval.ms\"] = interval\n\n self._interval_start = now\n\n return items, common", "def time_metrics(self, stats, root, parent):\n\n product = self.product\n operation = self.operation or 'other'\n target = self.target\n\n # Determine the scoped metric\n\n statement_metric_name = 'Datastore/statement/%s/%s/%s' % (product,\n target, operation)\n\n operation_metric_name = 'Datastore/operation/%s/%s' % (product,\n operation)\n\n if target:\n scoped_metric_name = statement_metric_name\n else:\n scoped_metric_name = operation_metric_name\n\n yield TimeMetric(name=scoped_metric_name, scope=root.path,\n duration=self.duration, exclusive=self.exclusive)\n\n # Unscoped rollup metrics\n\n yield TimeMetric(name='Datastore/all', scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n yield TimeMetric(name='Datastore/%s/all' % product, scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n if root.type == 'WebTransaction':\n yield TimeMetric(name='Datastore/allWeb', scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n yield TimeMetric(name='Datastore/%s/allWeb' % product, scope='',\n duration=self.duration, exclusive=self.exclusive)\n else:\n yield TimeMetric(name='Datastore/allOther', scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n yield TimeMetric(name='Datastore/%s/allOther' % product, scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n # Unscoped operation metric\n\n yield TimeMetric(name=operation_metric_name, scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n # Unscoped statement metric\n\n if target:\n yield TimeMetric(name=statement_metric_name, scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n # Unscoped instance Metric\n\n if self.instance_hostname and self.port_path_or_id:\n\n instance_metric_name = 'Datastore/instance/%s/%s/%s' % (product,\n self.instance_hostname, self.port_path_or_id)\n\n yield TimeMetric(name=instance_metric_name, scope='',\n duration=self.duration, exclusive=self.exclusive)", "def record_time_metric(self, metric):\n\n if not self.__settings:\n return\n\n # Scope is forced to be empty string if None as\n # scope of None is reserved for apdex metrics.\n\n key = (metric.name, metric.scope or '')\n stats = self.__stats_table.get(key)\n if stats is None:\n stats = TimeStats(call_count=1,\n total_call_time=metric.duration,\n total_exclusive_call_time=metric.exclusive,\n min_call_time=metric.duration,\n max_call_time=metric.duration,\n sum_of_squares=metric.duration ** 2)\n self.__stats_table[key] = stats\n else:\n stats.merge_time_metric(metric)\n\n return key", "def record_transaction(self, transaction):\n\n if not self.__settings:\n return\n\n settings = self.__settings\n\n # Record the apdex, value and time metrics generated from the\n # transaction. Whether time metrics are reported as distinct\n # metrics or into a rollup is in part controlled via settings\n # for minimum number of unique metrics to be reported and thence\n # whether over a time threshold calculated as percentage of\n # overall request time, up to a maximum number of unique\n # metrics. This is intended to limit how many metrics are\n # reported for each transaction and try and cut down on an\n # explosion of unique metric names. The limits and thresholds\n # are applied after the metrics are reverse sorted based on\n # exclusive times for each metric. This ensures that the metrics\n # with greatest exclusive time are retained over those with\n # lesser time. Such metrics get reported into the performance\n # breakdown tab for specific web transactions.\n\n self.record_apdex_metrics(transaction.apdex_metrics(self))\n\n self.merge_custom_metrics(transaction.custom_metrics.metrics())\n\n self.record_time_metrics(transaction.time_metrics(self))\n\n # Capture any errors if error collection is enabled.\n # Only retain maximum number allowed per harvest.\n\n error_collector = settings.error_collector\n\n if (error_collector.enabled and settings.collect_errors and\n len(self.__transaction_errors) <\n settings.agent_limits.errors_per_harvest):\n self.__transaction_errors.extend(transaction.error_details())\n\n self.__transaction_errors = self.__transaction_errors[:\n settings.agent_limits.errors_per_harvest]\n\n if (error_collector.capture_events and\n error_collector.enabled and\n settings.collect_error_events):\n events = transaction.error_events(self.__stats_table)\n for event in events:\n self._error_events.add(event, priority=transaction.priority)\n\n # Capture any sql traces if transaction tracer enabled.\n\n if settings.slow_sql.enabled and settings.collect_traces:\n for node in transaction.slow_sql_nodes(self):\n self.record_slow_sql_node(node)\n\n # Remember as slowest transaction if transaction tracer\n # is enabled, it is over the threshold and slower than\n # any existing transaction seen for this period and in\n # the historical snapshot of slow transactions, plus\n # recording of transaction trace for this transaction\n # has not been suppressed.\n\n transaction_tracer = settings.transaction_tracer\n\n if (not transaction.suppress_transaction_trace and\n transaction_tracer.enabled and settings.collect_traces):\n\n # Transactions saved for Synthetics transactions\n # do not depend on the transaction threshold.\n\n self._update_synthetics_transaction(transaction)\n\n threshold = transaction_tracer.transaction_threshold\n\n if threshold is None:\n threshold = transaction.apdex_t * 4\n\n if transaction.duration >= threshold:\n self._update_slow_transaction(transaction)\n\n # Create the transaction event and add it to the\n # appropriate \"bucket.\" Synthetic requests are saved in one,\n # while transactions from regular requests are saved in another.\n\n if transaction.synthetics_resource_id:\n event = transaction.transaction_event(self.__stats_table)\n self._synthetics_events.add(event)\n\n elif (settings.collect_analytics_events and\n settings.transaction_events.enabled):\n\n event = transaction.transaction_event(self.__stats_table)\n self._transaction_events.add(event, priority=transaction.priority)\n\n # Merge in custom events\n\n if (settings.collect_custom_events and\n settings.custom_insights_events.enabled):\n self.custom_events.merge(transaction.custom_events)\n\n # Merge in span events\n\n if (settings.distributed_tracing.enabled and\n settings.span_events.enabled and settings.collect_span_events):\n if settings.infinite_tracing.enabled:\n for event in transaction.span_protos(settings):\n self._span_stream.put(event)\n elif transaction.sampled:\n for event in transaction.span_events(self.__settings):\n self._span_events.add(event, priority=transaction.priority)", "def build_metrics_times_data(time_metrics):\n return [{'name': name, 'latencies': latencies.get_latencies()}\n for name, latencies in iteritems(time_metrics)]", "async def update_trade_stats(self):\n\n summary_keys = [base for base in config['min_base_volumes']] + ['global']\n summaries = {\n key: {\n 'open_count': 0,\n 'buys': 0,\n 'rebuys': 0,\n 'sells': 0,\n 'collect_sells': 0,\n 'soft_stop_sells': 0,\n 'total_profit': 0.0,\n 'total_loss': 0.0,\n 'total_fees': 0.0,\n 'balancer_refills': 0,\n 'balancer_remits': 0,\n 'balancer_stop_losses': 0,\n 'balancer_profit': 0.0,\n 'balancer_loss': 0.0,\n 'balancer_fees': 0.0,\n } for key in summary_keys\n }\n\n for pair in self.trades:\n if pair not in self.trade_stats[self.time_prefix]:\n continue\n\n base = pair.split('-', 1)[0]\n open_count = len(self.trades[pair]['open'])\n\n summaries[base]['open_count'] += open_count\n summaries[base]['buys'] += self.trade_stats[self.time_prefix][pair]['buys']\n summaries[base]['rebuys'] += self.trade_stats[self.time_prefix][pair]['rebuys']\n summaries[base]['sells'] += self.trade_stats[self.time_prefix][pair]['sells']\n summaries[base]['collect_sells'] += self.trade_stats[self.time_prefix][pair]['collect_sells']\n summaries[base]['soft_stop_sells'] += self.trade_stats[self.time_prefix][pair]['soft_stop_sells']\n summaries[base]['total_profit'] += self.trade_stats[self.time_prefix][pair]['total_profit']\n summaries[base]['total_loss'] += self.trade_stats[self.time_prefix][pair]['total_loss']\n summaries[base]['total_fees'] += self.trade_stats[self.time_prefix][pair]['total_fees']\n summaries[base]['balancer_refills'] += self.trade_stats[self.time_prefix][pair]['balancer_refills']\n summaries[base]['balancer_remits'] += self.trade_stats[self.time_prefix][pair]['balancer_remits']\n summaries[base]['balancer_profit'] += self.trade_stats[self.time_prefix][pair]['balancer_profit']\n summaries[base]['balancer_loss'] += self.trade_stats[self.time_prefix][pair]['balancer_loss']\n summaries[base]['balancer_fees'] += self.trade_stats[self.time_prefix][pair]['balancer_fees']\n\n summaries['global']['open_count'] += open_count\n summaries['global']['buys'] += self.trade_stats[self.time_prefix][pair]['buys']\n summaries['global']['rebuys'] += self.trade_stats[self.time_prefix][pair]['rebuys']\n summaries['global']['sells'] += self.trade_stats[self.time_prefix][pair]['sells']\n summaries['global']['collect_sells'] += self.trade_stats[self.time_prefix][pair]['collect_sells']\n summaries['global']['soft_stop_sells'] += self.trade_stats[self.time_prefix][pair]['soft_stop_sells']\n summaries['global']['total_profit'] += self.trade_stats[self.time_prefix][pair]['total_profit']\n summaries['global']['total_loss'] += self.trade_stats[self.time_prefix][pair]['total_loss']\n summaries['global']['total_fees'] += self.trade_stats[self.time_prefix][pair]['total_fees']\n summaries['global']['balancer_refills'] += self.trade_stats[self.time_prefix][pair]['balancer_refills']\n summaries['global']['balancer_remits'] += self.trade_stats[self.time_prefix][pair]['balancer_remits']\n summaries['global']['balancer_profit'] += self.trade_stats[self.time_prefix][pair]['balancer_profit']\n summaries['global']['balancer_loss'] += self.trade_stats[self.time_prefix][pair]['balancer_loss']\n summaries['global']['balancer_fees'] += self.trade_stats[self.time_prefix][pair]['balancer_fees']\n\n for key in summaries:\n self.trade_stats[self.time_prefix][key]['buys'] = summaries[key]['buys']\n self.trade_stats[self.time_prefix][key]['rebuys'] = summaries[key]['rebuys']\n self.trade_stats[self.time_prefix][key]['sells'] = summaries[key]['sells']\n self.trade_stats[self.time_prefix][key]['collect_sells'] = summaries[key]['collect_sells']\n self.trade_stats[self.time_prefix][key]['soft_stop_sells'] = summaries[key]['soft_stop_sells']\n self.trade_stats[self.time_prefix][key]['total_profit'] = summaries[key]['total_profit']\n self.trade_stats[self.time_prefix][key]['total_loss'] = summaries[key]['total_loss']\n self.trade_stats[self.time_prefix][key]['total_fees'] = summaries[key]['total_fees']\n self.trade_stats[self.time_prefix][key]['balancer_refills'] = summaries[key]['balancer_refills']\n self.trade_stats[self.time_prefix][key]['balancer_remits'] = summaries[key]['balancer_remits']\n self.trade_stats[self.time_prefix][key]['balancer_profit'] = summaries[key]['balancer_profit']\n self.trade_stats[self.time_prefix][key]['balancer_loss'] = summaries[key]['balancer_loss']\n self.trade_stats[self.time_prefix][key]['balancer_fees'] = summaries[key]['balancer_fees']\n\n if summaries[key]['open_count'] > self.trade_stats[self.time_prefix][key]['most_open']:\n self.trade_stats[self.time_prefix][key]['most_open'] = summaries[key]['open_count']\n\n filter_items = [pair for pair in self.trades] + [base for base in config['min_base_volumes']] + ['global']\n self.save_attr('trade_stats', max_depth=2, filter_items=filter_items, filter_keys=[self.time_prefix])", "def with_time(self):\n if self.time_slices is None:\n raise FeatureError(\"Feature has no time reference.\")\n\n for i, datum in enumerate(self.data[self.name]):\n yield (self.time_slices[i], datum)", "def _fetch_time_metrics_and_clear(self):\n with self._time_rlock:\n time_metrics = self._time_metrics\n self._time_metrics = defaultdict(LatencyTracker)\n\n return time_metrics", "def add_stats(self):\n units = self.get_unit_map()\n for metric in self.raw_metrics:\n unit, metric_type = units.get(metric, (DEFAULT_UNIT, DEFAULT_TYPE))\n if metric_type == \"counter\":\n # Unit/Second\n unit = \"/\".join((unit, \"Second\"))\n self.add_derive_value(metric, unit, self.raw_metrics[metric], rate=True)\n else:\n self.add_gauge_value(metric, unit, self.raw_metrics[metric])", "def record_custom_metric(self, name, value):\n if isinstance(value, dict):\n if len(value) == 1 and 'count' in value:\n new_stats = CountStats(call_count=value['count'])\n else:\n new_stats = TimeStats(*c2t(**value))\n else:\n new_stats = TimeStats(1, value, value, value, value, value**2)\n\n stats = self.__stats_table.get(name)\n if stats is None:\n self.__stats_table[name] = new_stats\n else:\n stats.merge_stats(new_stats)", "def update_time(self):\n time_metrics = self._fetch_time_metrics_and_clear()\n self._logger.info('update_time. time_metrics = %s', build_metrics_times_data(time_metrics))", "def with_time(self):\n key = list(self.keys())[0]\n length = len(self[key])\n time_slices = self[key].time_slices\n\n if time_slices is None:\n raise FeatureError(\"FeatureCollection has no time reference.\")\n\n for i in range(length):\n res = {}\n for key, feature in self.items():\n res[key] = feature.data[feature.name][i]\n yield (time_slices[i], res)", "def _push(self):\n if len(self._stat_now):\n self._stat_now['epoch_num'] = self.epoch_num\n self._stat_now['global_step'] = self.global_step\n\n self._stats.append(self._stat_now)\n self._stat_now = {}\n self._write_stat()", "def merge_logs(self):\n ourlog = LogData()\n for l in self.data_set:\n ourlog.entries = ourlog.entries + l.entries\n ourlog.sort_time()\n self.finalized_data = ourlog", "def _merge_report(self, target, new):\n time = None\n if 'ts' in new['parsed']:\n time = new['parsed']['ts']\n\n if (target.get('lastSeenDate', None) and\n time and\n target['lastSeenDate'] < time):\n target['lastSeenDate'] = time\n\n query_millis = int(new['parsed']['stats']['millis'])\n target['stats']['totalTimeMillis'] += query_millis\n target['stats']['count'] += 1\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']", "def _merge_report(self, target, new):\r\n time = None\r\n if 'ts' in new['parsed']:\r\n time = new['parsed']['ts']\r\n\r\n if (target.get('lastSeenDate', None) and\r\n time and\r\n target['lastSeenDate'] < time):\r\n target['lastSeenDate'] = time\r\n\r\n query_millis = int(new['parsed']['stats']['millis'])\r\n target['stats']['totalTimeMillis'] += query_millis\r\n target['stats']['count'] += 1\r\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']", "def collect(self, revisions):\n nr_revisions = len(revisions)\n estimate = TimeEstimator(nr_revisions)\n for index, revision_number in enumerate(revisions):\n last_measurement = self.__get_last_measurement(revision_number)\n self.__write_measurement(last_measurement)\n self.__last_revision.set(revision_number)\n logging.info('Revision: %s, %s/%s, measurement date: %s, time remaining: %s', revision_number, index + 1,\n nr_revisions, self.__get_date(last_measurement), estimate.time_remaining(index))", "def record_all(self):\n for i in self.recorders:\n t = i[0]\n r = i[1]\n self.add_row(t, r())", "def record_event_times(counter, event_times):\n # type: (event_counter.EventCounter, List[float]) -> None\n for event_time in event_times:\n with mock_time(event_time):\n counter.record_event()", "def record_data(self, time, x, tau):\n\n self.t_values.append(np.copy(time))\n self.x_values.append(np.copy(x))\n self.tau_values.append(np.copy(tau))", "def generate_record(self, data_dictionaries, group_by):\n result = {}\n\n for one_measurement in data_dictionaries:\n time = one_measurement['datetime']\n\n if isinstance(time, str):\n if self.timezone:\n time = arrow.get(time).shift(hours=6) # TODO: fix utc conversion\n else:\n time = arrow.get(time)\n\n record = Record(self.name, self.lat, self.lon, self.height, time)\n\n del one_measurement['datetime']\n\n one_measurement = {k: float(v) for k, v in one_measurement.items()}\n\n record.merge(one_measurement)\n\n key = group_by(time)\n \n if key == '2016-04-01_00':\n break\n\n record_string = record.little_r_report()\n\n try:\n result[key].append(record_string)\n except KeyError:\n result[key] = [record_string]\n\n return result", "def log_batch(self, measurements):\n for m in measurements:\n logger.info(m)\n self.log(metric=m.metric, value=m.value, source=m.source, timestamp=m.timestamp)", "def Analyze(self):\n \n self._analyzeLogs()\n for user in self._start_times:\n self._result[user] = self._zipTimes(user)", "def add(self, timing_dict: Dict[str, float]):\n self._timings.append(timing_dict)\n if not self.steps:\n self.steps = list(timing_dict.keys())", "def spending_over_time_test_data():\n for i in range(30):\n # Define some values that are calculated and used multiple times\n transaction_id = i\n award_id = i + 1000\n awarding_agency_id = i + 2000\n toptier_awarding_agency_id = i + 3000\n subtier_awarding_agency_id = i + 4000\n funding_agency_id = i + 5000\n toptier_funding_agency_id = i + 6000\n subtier_funding_agency_id = i + 7000\n federal_action_obligation = i + 8000\n total_obligation = i + 9000\n federal_account_id = i + 10000\n treasury_account_id = i + 11000\n\n action_date = f\"20{i % 10 + 10}-{i % 9 + 1}-{i % 28 + 1}\"\n action_date_obj = datetime.datetime.strptime(action_date, \"%Y-%m-%d\")\n fiscal_month = generate_fiscal_month(action_date_obj)\n fiscal_year = generate_fiscal_year(action_date_obj)\n fiscal_action_date = f\"{fiscal_year}-{fiscal_month}-{i % 28 + 1}\"\n contract_award_type = [\"A\", \"B\", \"C\", \"D\"][i % 4]\n grant_award_type = [\"02\", \"03\", \"04\", \"05\"][i % 4]\n is_fpds = i % 2 == 0\n\n # Award\n baker.make(\n \"search.AwardSearch\",\n award_id=award_id,\n fain=f\"fain_{transaction_id}\" if not is_fpds else None,\n is_fpds=is_fpds,\n latest_transaction_id=transaction_id,\n piid=f\"piid_{transaction_id}\" if is_fpds else None,\n total_obligation=total_obligation,\n type=contract_award_type if is_fpds else grant_award_type,\n action_date=\"2020-01-01\",\n )\n\n # Federal, Treasury, and Financial Accounts\n baker.make(\n \"accounts.FederalAccount\",\n id=federal_account_id,\n parent_toptier_agency_id=toptier_awarding_agency_id,\n account_title=f\"federal_account_title_{transaction_id}\",\n federal_account_code=f\"federal_account_code_{transaction_id}\",\n )\n baker.make(\n \"accounts.TreasuryAppropriationAccount\",\n agency_id=f\"taa_aid_{transaction_id}\",\n allocation_transfer_agency_id=f\"taa_ata_{transaction_id}\",\n availability_type_code=f\"taa_a_{transaction_id}\",\n beginning_period_of_availability=f\"taa_bpoa_{transaction_id}\",\n ending_period_of_availability=f\"taa_epoa_{transaction_id}\",\n federal_account_id=federal_account_id,\n main_account_code=f\"taa_main_{transaction_id}\",\n sub_account_code=f\"taa_sub_{transaction_id}\",\n treasury_account_identifier=treasury_account_id,\n )\n tas_components = [\n f\"aid=taa_aid_{transaction_id}\"\n f\"main=taa_main_{transaction_id}\"\n f\"ata=taa_ata_{transaction_id}\"\n f\"sub=taa_sub_{transaction_id}\"\n f\"bpoa=taa_bpoa_{transaction_id}\"\n f\"epoa=taa_epoa_{transaction_id}\"\n f\"a=taa_a_{transaction_id}\"\n ]\n baker.make(\"awards.FinancialAccountsByAwards\", award_id=award_id, treasury_account_id=treasury_account_id)\n\n # Awarding Agency\n baker.make(\n \"references.Agency\",\n id=awarding_agency_id,\n subtier_agency_id=subtier_awarding_agency_id,\n toptier_agency_id=toptier_awarding_agency_id,\n )\n baker.make(\n \"references.ToptierAgency\",\n abbreviation=f\"toptier_awarding_agency_abbreviation_{transaction_id}\",\n name=f\"toptier_awarding_agency_agency_name_{transaction_id}\",\n toptier_agency_id=toptier_awarding_agency_id,\n toptier_code=f\"toptier_awarding_agency_code_{transaction_id}\",\n )\n baker.make(\n \"references.SubtierAgency\",\n abbreviation=f\"subtier_awarding_agency_abbreviation_{transaction_id}\",\n name=f\"subtier_awarding_agency_agency_name_{transaction_id}\",\n subtier_agency_id=subtier_awarding_agency_id,\n subtier_code=f\"subtier_awarding_agency_code_{transaction_id}\",\n )\n\n # Funding Agency\n baker.make(\n \"references.Agency\",\n id=funding_agency_id,\n subtier_agency_id=subtier_funding_agency_id,\n toptier_agency_id=toptier_funding_agency_id,\n )\n baker.make(\n \"references.ToptierAgency\",\n abbreviation=f\"toptier_funding_agency_abbreviation_{transaction_id}\",\n name=f\"toptier_funding_agency_agency_name_{transaction_id}\",\n toptier_agency_id=toptier_funding_agency_id,\n toptier_code=f\"toptier_funding_agency_code_{transaction_id}\",\n )\n baker.make(\n \"references.SubtierAgency\",\n abbreviation=f\"subtier_funding_agency_abbreviation_{transaction_id}\",\n name=f\"subtier_funding_agency_agency_name_{transaction_id}\",\n subtier_agency_id=subtier_funding_agency_id,\n subtier_code=f\"subtier_funding_agency_code_{transaction_id}\",\n )\n\n # Ref Country Code\n baker.make(\"references.RefCountryCode\", country_code=\"USA\", country_name=\"UNITED STATES\")\n\n # FPDS / FABS\n if is_fpds:\n baker.make(\n \"search.TransactionSearch\",\n transaction_id=transaction_id,\n is_fpds=is_fpds,\n action_date=action_date,\n fiscal_year=fiscal_year,\n fiscal_action_date=fiscal_action_date,\n award_id=award_id,\n awarding_agency_id=awarding_agency_id,\n business_categories=[f\"business_category_1_{transaction_id}\", f\"business_category_2_{transaction_id}\"],\n transaction_description=f\"This is a test description {transaction_id}\"\n if transaction_id % 2 == 0\n else None,\n federal_action_obligation=federal_action_obligation,\n generated_pragmatic_obligation=federal_action_obligation,\n award_amount=total_obligation,\n funding_agency_id=funding_agency_id,\n type=contract_award_type if is_fpds else grant_award_type,\n awarding_agency_code=f\"toptier_awarding_agency_code_{transaction_id}\",\n awarding_toptier_agency_name=f\"toptier_awarding_agency_agency_name_{transaction_id}\",\n awarding_toptier_agency_abbreviation=f\"toptier_awarding_agency_agency_name_{transaction_id}\",\n funding_agency_code=f\"toptier_funding_agency_code_{transaction_id}\",\n funding_toptier_agency_name=f\"toptier_funding_agency_agency_name_{transaction_id}\",\n funding_toptier_agency_abbreviation=f\"toptier_funding_agency_agency_name_{transaction_id}\",\n awarding_sub_tier_agency_c=f\"subtier_awarding_agency_code_{transaction_id}\",\n awarding_subtier_agency_name=f\"subtier_awarding_agency_agency_name_{transaction_id}\",\n funding_sub_tier_agency_co=f\"subtier_funding_agency_code_{transaction_id}\",\n funding_subtier_agency_name=f\"subtier_funding_agency_agency_name_{transaction_id}\",\n funding_subtier_agency_abbreviation=f\"subtier_funding_agency_agency_name_{transaction_id}\",\n recipient_name=f\"recipient_name_{transaction_id}\",\n recipient_unique_id=f\"{transaction_id:09d}\",\n recipient_hash=\"c687823d-10af-701b-1bad-650c6e680190\" if transaction_id == 21 else None,\n recipient_levels=[\"R\"] if i == 21 else [],\n extent_competed=f\"extent_competed_{transaction_id}\",\n recipient_location_country_code=\"USA\",\n recipient_location_country_name=\"USA\",\n recipient_location_state_code=f\"LE_STATE_CODE_{transaction_id}\",\n recipient_location_county_code=f\"{transaction_id:03d}\",\n recipient_location_county_name=f\"LE_COUNTY_NAME_{transaction_id}\",\n recipient_location_congressional_code=f\"{transaction_id:02d}\",\n recipient_location_zip5=f\"LE_ZIP5_{transaction_id}\",\n recipient_location_city_name=f\"LE_CITY_NAME_{transaction_id}\",\n naics_code=f\"{transaction_id}{transaction_id}\",\n naics_description=f\"naics_description_{transaction_id}\",\n piid=f\"piid_{transaction_id}\",\n pop_country_code=\"USA\",\n pop_country_name=\"UNITED STATES\",\n pop_state_code=f\"POP_STATE_CODE_{transaction_id}\",\n pop_county_code=f\"{transaction_id:03d}\",\n pop_county_name=f\"POP_COUNTY_NAME_{transaction_id}\",\n pop_zip5=f\"POP_ZIP5_{transaction_id}\",\n pop_congressional_code=f\"{transaction_id:02d}\",\n pop_city_name=f\"POP_CITY_NAME_{transaction_id}\",\n product_or_service_code=str(transaction_id).zfill(4),\n product_or_service_description=f\"psc_description_{transaction_id}\",\n type_of_contract_pricing=f\"type_of_contract_pricing_{transaction_id}\",\n type_set_aside=f\"type_set_aside_{transaction_id}\",\n tas_components=tas_components,\n )\n baker.make(\n \"references.NAICS\",\n code=f\"{transaction_id}\",\n description=f\"naics_description_{transaction_id}\",\n )\n baker.make(\n \"references.PSC\", code=str(transaction_id).zfill(4), description=f\"psc_description_{transaction_id}\"\n )\n else:\n baker.make(\n \"search.TransactionSearch\",\n transaction_id=transaction_id,\n is_fpds=is_fpds,\n action_date=action_date,\n fiscal_year=fiscal_year,\n fiscal_action_date=fiscal_action_date,\n award_id=award_id,\n awarding_agency_id=awarding_agency_id,\n business_categories=[f\"business_category_1_{transaction_id}\", f\"business_category_2_{transaction_id}\"],\n transaction_description=f\"This is a test description {transaction_id}\"\n if transaction_id % 2 == 0\n else None,\n federal_action_obligation=federal_action_obligation,\n generated_pragmatic_obligation=federal_action_obligation,\n award_amount=total_obligation,\n funding_agency_id=funding_agency_id,\n type=contract_award_type if is_fpds else grant_award_type,\n awarding_agency_code=f\"toptier_awarding_agency_code_{transaction_id}\",\n awarding_toptier_agency_name=f\"toptier_awarding_agency_agency_name_{transaction_id}\",\n awarding_toptier_agency_abbreviation=f\"toptier_awarding_agency_agency_name_{transaction_id}\",\n funding_agency_code=f\"toptier_funding_agency_code_{transaction_id}\",\n funding_toptier_agency_name=f\"toptier_funding_agency_agency_name_{transaction_id}\",\n funding_toptier_agency_abbreviation=f\"toptier_funding_agency_agency_name_{transaction_id}\",\n awarding_sub_tier_agency_c=f\"subtier_awarding_agency_code_{transaction_id}\",\n awarding_subtier_agency_name=f\"subtier_awarding_agency_agency_name_{transaction_id}\",\n funding_sub_tier_agency_co=f\"subtier_funding_agency_code_{transaction_id}\",\n funding_subtier_agency_name=f\"subtier_funding_agency_agency_name_{transaction_id}\",\n funding_subtier_agency_abbreviation=f\"subtier_funding_agency_agency_name_{transaction_id}\",\n recipient_name=f\"recipient_name_{transaction_id}\",\n recipient_unique_id=f\"{transaction_id:09d}\",\n recipient_hash=\"c687823d-10af-701b-1bad-650c6e680190\" if transaction_id == 21 else None,\n recipient_levels=[\"R\"] if i == 21 else [],\n cfda_number=f\"cfda_number_{transaction_id}\",\n fain=f\"fain_{transaction_id}\",\n recipient_location_country_code=\"USA\",\n recipient_location_country_name=\"USA\",\n recipient_location_state_code=f\"LE_STATE_CODE_{transaction_id}\",\n recipient_location_county_code=f\"{transaction_id:03d}\",\n recipient_location_county_name=f\"LE_COUNTY_NAME_{transaction_id}\",\n recipient_location_congressional_code=f\"{transaction_id:02d}\",\n recipient_location_zip5=f\"LE_ZIP5_{transaction_id}\",\n recipient_location_city_name=f\"LE_CITY_NAME_{transaction_id}\",\n pop_country_code=\"USA\",\n pop_country_name=\"UNITED STATES\",\n pop_state_code=f\"POP_STATE_CODE_{transaction_id}\",\n pop_county_code=f\"{transaction_id:03d}\",\n pop_county_name=f\"POP_COUNTY_NAME_{transaction_id}\",\n pop_zip5=f\"POP_ZIP5_{transaction_id}\",\n pop_congressional_code=f\"{transaction_id:02d}\",\n pop_city_name=f\"POP_CITY_NAME{transaction_id}\",\n tas_components=tas_components,\n )", "def aggregate_historical_trades(self, pair: list):\n raise NotImplementedError", "def align_time_frames(self, dict, name, freq_unit, group_unit='mean'):\n\t\tagg_dict = {}\n\t\tindex_use = dict[name]\n\t\tif freq_unit == 'M':\n\t\t\tfreq_unit = 'MS'\n\t\tfor name, df in dict.items():\n\t\t\ttime_series = pd.date_range(index_use.index[0],index_use.index[-1], freq=freq_unit)\n\t\t\t#print('time_series',time_series)\n\t\t\tdf = df.reindex(time_series)\n\t\t\t#print('df', df)\n\t\t\tarray = list(df.columns)\n\t\t\tarray.remove('value')\n\t\t\tdf = df.drop(array,axis=1)\n\t\t\tdf[name + ' Value'] = df['value']\n\t\t\tagg_dict[name + ' Value'] = group_unit\n\t\t\t\"\"\"\n\t\t\tdf[name + ' Min'] = df['value']\n\t\t\tdf[name + ' Max'] = df['value']\n\t\t\tdf[name + ' Average'] = df['value']\n\t\t\tagg_dict[name + ' Min'] = 'min'\n\t\t\tagg_dict[name + ' Max'] = 'max'\n\t\t\tagg_dict[name + ' Average'] = 'mean'\n\t\t\t\"\"\"\n\t\t\tdf = df.drop('value',axis=1)\n\t\t\t#print(df)\n\t\t\tdict[name] = df\n\t\treturn dict, agg_dict", "def collect(self): # pylint: disable=no-self-use\n start = time.time()\n for metric in metric_rq():\n yield metric\n\n gauge = GaugeMetricFamily(\n \"nautobot_rq_metrics_processing_ms\", \"Time in ms to generate the app metrics endpoint\"\n )\n duration = time.time() - start\n gauge.add_metric([], format(duration * 1000, \".5f\"))\n yield gauge", "def record_custom_metric(self, name, value):\n key = (name, '')\n\n if isinstance(value, dict):\n if len(value) == 1 and 'count' in value:\n new_stats = CountStats(call_count=value['count'])\n else:\n new_stats = TimeStats(*c2t(**value))\n else:\n new_stats = TimeStats(1, value, value, value, value, value**2)\n\n stats = self.__stats_table.get(key)\n if stats is None:\n self.__stats_table[key] = new_stats\n else:\n stats.merge_stats(new_stats)\n\n return key", "def do_timestamp_messages(self, messages):\n timestamp = self.env.now\n self.reception_records[timestamp] = messages\n log.debug(\"{} recorded {}\".format(self, self.reception_records))", "def record_time(times, enabled, *args):\n if not enabled:\n yield\n else:\n start = time.time()\n yield\n end = time.time()\n times.append((' '.join(args), start, end))", "def calculate_times(log):\n log['processing_time'] = 0\n log['multitasking'] = 0\n log = log.to_dict('records')\n log = sorted(log, key=lambda x: (x['source'], x['caseid']))\n for _, group in itertools.groupby(log, key=lambda x: (x['source'], x['caseid'])):\n events = list(group)\n events = sorted(events, key=itemgetter('start_timestamp'))\n for i in range(0, len(events)):\n # In one-timestamp approach the first activity of the trace\n # is taken as instantsince there is no previous timestamp\n # to find a range\n dur = (events[i]['end_timestamp'] -\n events[i]['start_timestamp']).total_seconds()\n if i == 0:\n wit = 0\n else:\n wit = (events[i]['start_timestamp'] -\n events[i-1]['end_timestamp']).total_seconds()\n events[i]['waiting_time'] = wit if wit >= 0 else 0\n events[i]['processing_time'] = dur\n return pd.DataFrame.from_dict(log)", "def build_timeseries(self):\n timeseries = {\n \"metricKind\": \"DELTA\", \n \"metric\": {\n \"labels\": {\n \"response_code\": \"0\"}, \n \"type\": \"agent.googleapis.com/agent/request_count\"\n }, \n \"points\": [\n {\n \"interval\": {\n \"endTime\": \"2019-02-18T22:09:53.939194Z\", \n \"startTime\": \"2019-02-18T21:09:53.939194Z\"\n }, \n \"value\": {\n \"int64Value\": \"62\"\n }\n }, \n {\n \"interval\": {\n \"endTime\": \"2019-02-18T21:09:53.939194Z\", \n \"startTime\": \"2019-02-18T20:09:53.939194Z\"\n }, \n \"value\": {\n \"int64Value\": \"61\"\n }\n }\n ], \n \"resource\": {\n \"labels\": {\n \"instance_id\": \"9113659852587170607\", \n \"project_id\": \"YOUR_PROJECT_ID\", \n \"zone\": \"us-east4-a\"\n }, \n \"type\": \"gce_instance\"\n }, \n \"valueType\": \"INT64\"\n }\n\n return timeseries", "def get_accumulated_data(self, topic, start_time, end_time, units):\n ignore_start_time = self._get_value('ignore_start_time', topic)\n ignore_end_time = self._get_value('ignore_end_time', topic)\n adjust_start_time = self._get_value('adjust_start_time', topic)\n adjust_end_time = self._get_value('adjust_end_time', topic)\n\n if ignore_start_time:\n self.logger.debug(\"Service ignoring start time.\")\n start_ts = self.peek_datetime(topic) - adjust_start_time\n else:\n start_ts = start_time - adjust_start_time\n\n if ignore_end_time:\n self.logger.debug(\"Service ignoring end time.\")\n end_ts = self.peek_last_datetime(topic) + adjust_end_time\n else:\n end_ts = end_time + adjust_end_time\n\n self.logger.debug(\"Service processing interval: %f %f\" %(start_ts, end_ts))\n accumulator = weewx.accum.Accum(weeutil.weeutil.TimeSpan(start_ts, end_ts))\n\n for data in self.get_data(topic, end_ts):\n if data:\n try:\n self.logger.debug(\"Service data to accumulate: %s %s\"\n % (weeutil.weeutil.timestamp_to_string(data['dateTime']), to_sorted_string(data)))\n accumulator.addRecord(data)\n except weewx.accum.OutOfSpan:\n self.logger.info(\"Service ignoring record outside of interval %f %f %f %s\"\n %(start_ts, end_ts, data['dateTime'], (to_sorted_string(data))))\n else:\n break\n\n target_data = {}\n if not accumulator.isEmpty:\n aggregate_data = accumulator.getRecord()\n self.logger.debug(\"Service data prior to conversion is: %s %s\"\n % (weeutil.weeutil.timestamp_to_string(aggregate_data['dateTime']), to_sorted_string(aggregate_data)))\n target_data = weewx.units.to_std_system(aggregate_data, units)\n self.logger.debug(\"Service data after to conversion is: %s %s\"\n % (weeutil.weeutil.timestamp_to_string(target_data['dateTime']), to_sorted_string(target_data)))\n else:\n self.logger.debug(\"Dervice queue was empty\")\n\n # Force dateTime to packet's datetime so that the packet datetime is not updated to the MQTT datetime\n if ignore_end_time:\n target_data['dateTime'] = end_time\n\n return target_data", "def tally(self, ident, name, cost, leadtime):\r\n self.idents.append(ident)\r\n self.names.append(name)\r\n self.costs.append(cost)\r\n self.leadtimes.append(leadtime)", "def save_scopeTraces_Multiple(fileNames, scope, channels, noPulses):\n results = []\n for i in range(len(fileNames)):\n\t scope._get_preamble(channels[i])\n\t results.append(utils.PickleFile(fileNames[i], 1))\n\t results[i].add_meta_data(\"timeform_1\", scope.get_timeform(channels[i]))\n\n #ct = scope.acquire_time_check()\n #if ct == False:\n # print 'No triggers for this data point. Will skip and set data to 0.'\n # results.save()\n # results.close()\n # return False\n\n t_start, loopStart = time.time(),time.time()\n for i in range(noPulses):\n try:\n ct = scope.acquire_time_check(timeout=.4)\n for j in range(len(results)):\n\t\t results[j].add_data(scope.get_waveform(channels[j]), 1)\n except Exception, e:\n print \"Scope died, acquisition lost.\"\n print e\n if i % 100 == 0 and i > 0:\n print \"%d traces collected - This loop took : %1.1f s\" % (i, time.time()-loopStart)\n loopStart = time.time()\n print \"%d traces collected TOTAL - took : %1.1f s\" % (i, (time.time()-t_start))\n for i in range(len(results)):\n\t results[i].save()\n return True", "def put(self, metric, values, timestamp=None):\n if timestamp is None:\n timestamp = time.time()\n now_date = datetime.datetime.fromtimestamp(timestamp)\n\n if self.last is None:\n self.last = timestamp\n return\n\n self.last = timestamp\n\n values = [str(d) for d in [now_date, timestamp]+values]\n\n with open(self.filename, \"at\") as df:\n df.write(\"{}\\n\".format(\",\".join(values)))", "def write_stacked_response_times(self):\r\n results_dirname = get_param(\"results_dir\")\r\n filename = os.path.join(results_dirname, \"%s_%s\" % (get_param(\"file_prefix\"),\r\n \"stacked_fairness\"))\r\n file = open(filename, \"w\")\r\n file.write(\"time\\trunning_tasks\\n\")\r\n previous_time = -1\r\n # Write in reverse order so that we automatically get the last event\r\n # for each time.\r\n for time, running_tasks in reversed(self.new_running_tasks):\r\n if time != previous_time:\r\n if previous_time != -1:\r\n file.write(\"%d\\t\" % time)\r\n for user in range(get_param(\"num_users\")):\r\n file.write(\"%d\\t\" % running_tasks[user])\r\n file.write(\"\\n\")\r\n previous_time = time", "async def prepare_trade_stats(self, key: str):\n\n if key not in self.trade_stats[self.time_prefix]:\n self.trade_stats[self.time_prefix][key] = {\n 'num_open': [],\n 'most_open': 0,\n 'buys': 0,\n 'rebuys': 0,\n 'sells': 0,\n 'collect_sells': 0,\n 'soft_stop_sells': 0,\n 'total_profit': 0.0,\n 'total_loss': 0.0,\n 'total_fees': 0.0,\n 'unfilled': 0,\n 'unfilled_partial': 0,\n 'unfilled_quantity': 0.0,\n 'unfilled_value': 0.0,\n 'failed': 0,\n 'balancer_refills': 0,\n 'balancer_remits': 0,\n 'balancer_stop_losses': 0,\n 'balancer_profit': 0.0,\n 'balancer_loss': 0.0,\n 'balancer_fees': 0.0,\n 'balancer_unfilled': 0,\n 'balancer_failed': 0,\n }", "def log(self, label, times, overlapping=False):\r\n self._timings.append(Timing(label, times, overlapping))", "def record(self, time, increment):\n raise NotImplementedError(\"Abstract method not implemented.\")", "def merge_time_metric(self, metric):\n\n self.merge_raw_time_metric(metric.duration, metric.exclusive)", "def log_stats(self):\n while True:\n for stats in self.stats.values():\n stats.log_stats()\n\n yield from asyncio.sleep(stats_delay)", "def merge(self, dataset):\n def merge_data(source, dest):\n for key, value in source.items():\n if isinstance(value, dict):\n merge_data(value, dest.setdefault(key, {}))\n else:\n dest[key] = value\n return dest\n\n merge_data(dataset.data, self._data)\n\n for h in dataset.task_history:\n if h not in self._task_history:\n self._task_history.append(h)", "def _store_rows(self):\n\n for value in self.values:\n self.counters.append(value['counter'])\n self.timestamps.append(value['timestamp'])\n self.acceleration.append(value['acceleration'])", "def merge_toggl_time_entries(self, time_entries):\n tg = Toggl()\n d = {}\n for entry in time_entries:\n if entry.get('billable'):\n if entry.get('tags') and tg.BOOKED_TAG in entry['tags']:\n status = 'booked'\n else:\n status = 'not-booked'\n date = parser.parse(entry['start']).date()\n if not entry.get('pid'):\n self.log(\"Couldn't find associated project for entry: %s\" % (str(entry)))\n continue\n unique_id = str(entry['pid']) + str(date) + status\n if not entry.get('description'):\n entry['description'] = \"\"\n if d.get(unique_id):\n d[unique_id]['duration'] += entry['duration']\n d[unique_id]['merged_ids'].append(entry['id'])\n if d[unique_id].get('description'):\n if entry['description'].strip() not in d[unique_id]['description']:\n d[unique_id]['description'] += ' / ' + entry['description']\n else:\n d[unique_id]['description'] = entry['description']\n else:\n entry['merged_ids'] = [entry['id']]\n d[unique_id] = entry\n return d.values()", "def set_analysis_time(self, t):\n for z in self.zones:\n z.set_demand_rate_per_t(t)", "def fake_record_data():\n\n user_ids = [4, 4, 4, 4, 5,\n 5, 2, 6, 1, 2,\n 5, 7, 5, 1, 3,\n 3, 1, 4, 2, 3,\n 6, 4, 2, 7, 3,\n 3, 3, 6, 7, 6,\n 6, 7, 1, 7, 1,\n 8, 7, 1, 8, 4]\n\n days = [1519200000, 1519200000, 1519200000, 1519200000, 1519113600,\n 1519113600, 1519113600, 1519027200, 1519027200, 1519027200,\n 1518940800, 1518940800, 1518854400, 1518854400, 1518768000,\n 1518681600, 1518681600, 1518681600, 1518681600, 1518681600,\n 1518595200, 1518595200, 1518595200, 1518595200, 1518508800,\n 1518422400, 1518422400, 1518422400, 1518422400, 1518336000,\n 1518336000, 1518336000, 1518336000, 1518249600, 1518249600,\n 1518163200, 1518163200, 1518076800, 1517990400, 1517904000]\n\n for i, user_id in enumerate(user_ids):\n act_qty = random.randint(5, 13)\n selected_activities = set()\n\n for _ in range(0, act_qty):\n act_id = random.randint(1, 13)\n selected_activities.add(act_id)\n\n day = days[-(i + 1)]\n start = day + 33000\n total_time = 0\n\n for act_id in selected_activities:\n act_time = random.randint(120, 1000)\n\n start_t = start + total_time\n end_t = datetime.fromtimestamp(start_t + act_time)\n start_t = datetime.fromtimestamp(start_t)\n\n total_time += act_time\n\n print (str(user_id) + '|' + str(i + 1) + '|' + str(act_id) + '|' +\n str(start_t) + '|' + str(end_t))", "def record_metrics(self, pid, metrics):\n\n for _metric, _metric_value in metrics.items():\n if not self.__metrics_history[pid].get(_metric):\n self.__metrics_history[pid][_metric] = []\n self.__metrics_history[pid][_metric].append(_metric_value)\n # only keep the last 2 running history for any metric\n self.__metrics_history[pid][_metric] = self.__metrics_history[pid][_metric][\n -2:\n ]", "def append(self, batch: Batch):\n for timestamp1 in batch.entering:\n self.append_event(timestamp1, EventType.person_entered)\n\n for timestamp2 in batch.leaving:\n self.append_event(timestamp2, EventType.person_left)", "def record_inference_stats(self, nms_step_time: float, inference_round_trip_time: Tuple[float, float, float], inference_step_time: float):\n\n # inference_round_trip_time is an average time needed for a step\n self.inference_times.append(inference_round_trip_time)\n # inference_step_time is the time taken to complete the step, and used to calculate the throughput\n inference_throughput = self.image_count/inference_step_time\n self.inference_throughputs.append(inference_throughput)\n\n self.nms_times.append(nms_step_time)\n\n total_step_time = inference_step_time + nms_step_time\n self.total_times.append(total_step_time)\n\n total_throughput = self.image_count/total_step_time\n self.total_throughputs.append(total_throughput)", "def aggregate(self):\n data_to_track = {}\n for possession in self.possessions_to_track_aggregate:\n data_to_track[possession] = self._haves[possession]\n\n for variable in self.variables_to_track_aggregate:\n try:\n data_to_track[variable] = self.__dict__[variable]\n except KeyError:\n pass\n self.database_connection.put([\"aggregate\",\n data_to_track,\n self.group,\n self.round])", "def _report(self, registry=None, timestamp=None, flush_current_hist=False):\n registry = registry or self.registry\n if self.enable_runtime_metrics:\n col = runtime_metrics.RuntimeCollector(registry)\n col.collect()\n metrics = registry.dump_metrics()\n for key in metrics.keys():\n metric_name, metric_tags = self.decode_key(key)\n tags = self.tags\n if metric_tags:\n tags = self.tags.copy()\n tags.update(metric_tags)\n\n wf_hist = wavefront_histogram.get(key, registry)\n if wf_hist is not None:\n distributions = wf_hist.get_distribution()\n if flush_current_hist:\n distributions.extend(\n wf_hist.get_current_minute_distribution())\n for dist in distributions:\n self.wavefront_client.send_distribution(\n name=f'{self.prefix}{metric_name}',\n centroids=dist.centroids,\n histogram_granularities=self.histogram_granularities,\n timestamp=dist.timestamp,\n source=self.source,\n tags=tags)\n continue\n\n is_delta = delta.is_delta_counter(key, registry)\n for value_key in metrics[key].keys():\n if is_delta:\n self.wavefront_client.send_delta_counter(\n name=delta.get_delta_name(self.prefix, metric_name,\n value_key),\n value=metrics[key][value_key], source=self.source,\n tags=tags\n )\n # decrement delta counter\n registry.counter(key).dec(metrics[key][value_key])\n else:\n self.wavefront_client.send_metric(\n name=f'{self.prefix}{metric_name}.{value_key}',\n value=metrics[key][value_key], timestamp=timestamp,\n source=self.source, tags=tags)", "def _thing_stats(self, thing, all_time=None):\r\n header = \"%ss created \" % thing\r\n if all_time:\r\n header += \"since the beginning:\"\r\n else:\r\n header += \"%s %s:\" % (self.phrase, self.time)\r\n columns = ['all', 'spam', 'non-spam']\r\n data = [str(self.r.total_things(thing, all_time=all_time)),\r\n str(self.r.total_things(thing, spam=\"t\", all_time=all_time)),\r\n str(self.r.total_things(thing, spam=\"f\", all_time=all_time))]\r\n return [[header], columns, data]", "def __init__(self, metrics_to_record):\n self.tape = {}\n\n for metric_name in metrics_to_record:\n self.tape[metric_name] = []", "def aggregate_statistics(self, new_stats):\n \n if isinstance(new_stats,RunStatistics):\n new_stats = [new_stats, ]\n elif isinstance(new_stats,list):\n if any(not isinstance(_,RunStatistics) for _ in new_stats):\n raise MadGraph5Error, \"The 'new_stats' argument of the function \"+\\\n \"'updtate_statistics' must be a (possibly list of) \"+\\\n \"RunStatistics instance.\"\n \n keys = set([])\n for stat in [self,]+new_stats:\n keys |= set(stat.keys())\n\n new_stats = new_stats+[self,]\n for key in keys:\n # Define special rules\n if key=='max_precision':\n # The minimal precision corresponds to the maximal value for PREC\n self[key] = min( _[key] for _ in new_stats if key in _)\n elif key=='min_precision':\n # The maximal precision corresponds to the minimal value for PREC\n self[key] = max( _[key] for _ in new_stats if key in _)\n elif key=='averaged_timing':\n n_madloop_calls = sum(_['n_madloop_calls'] for _ in new_stats if\n 'n_madloop_calls' in _)\n if n_madloop_calls > 0 :\n self[key] = sum(_[key]*_['n_madloop_calls'] for _ in \n new_stats if (key in _ and 'n_madloop_calls' in _) )/n_madloop_calls\n else:\n # Now assume all other quantities are cumulative\n self[key] = sum(_[key] for _ in new_stats if key in _)", "def each_statement_in_batch_uses_proper_timestamp_test(self):\n cursor = self.prepare()\n cursor.execute(\"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow') USING TIMESTAMP 1111111111111111\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner') USING TIMESTAMP 1111111111111112\n APPLY BATCH\n \"\"\")\n cursor.execute(\"SELECT id, writetime(firstname), writetime(lastname) FROM users\")\n res = sorted(cursor.fetchall())\n assert res == [[0, 1111111111111111, 1111111111111111], [1, 1111111111111112, 1111111111111112]], res", "def merge_metric_stats(self, snapshot):\n\n if not self.__settings:\n return\n\n for key, other in six.iteritems(snapshot.__stats_table):\n stats = self.__stats_table.get(key)\n if not stats:\n self.__stats_table[key] = other\n else:\n stats.merge_stats(other)", "def reduce_data():\n snapshots = Snapshot.objects.all()\n locations = Location.objects.all()\n lst = []\n for snapshot in snapshots:\n lst.append([snapshot.location.name, snapshot.avail_bikes,\n snapshot.free_stands, snapshot.timestamp])\n cols = ['location', 'avail_bikes', 'free_stands', 'timestamp']\n df = pd.DataFrame(lst, columns=cols)\n df['time'] = df['timestamp'].dt.round('30min').dt.strftime('%H:%M')\n\n group = df.groupby(['location', 'time'])\n means = group.mean()\n sd = group.std()\n today = date.today()\n first = today.replace(day=1)\n last_month = first - timedelta(days=1)\n\n for name, time in means.index:\n subset_mean = means.xs((name, time), level=(0, 1), axis=0)\n subset_sd = sd.xs((name, time), level=(0, 1), axis=0)\n m = Stat.objects.get_or_create(\n location=locations.get(name=name),\n avail_bikes_mean=subset_mean['avail_bikes'],\n free_stands_mean=subset_mean['free_stands'],\n avail_bikes_sd=subset_sd['avail_bikes'],\n free_stands_sd=subset_sd['free_stands'],\n time=time,\n month=last_month\n )\n\n # snaps = Snapshot.objects.all()\n # i = 0\n # length = len(snaps)\n # for s in snaps:\n # i += 1\n # print(i)\n # if i > 35000:\n # s.save()\n # reduce_data()", "def groupElapsedTscByScope(txnSubCollection, beginProbe, endProbe, classifier=DefaultClassifier()):\n elapsedTscGroup = {}\n for txn in txnSubCollection:\n if txn.hasProbes([beginProbe, endProbe]):\n beginCounter = txn.getCounterForProbe(beginProbe)\n endCounter = txn.getCounterForProbe(endProbe)\n TxnAggregator._addOrUpdateContainer(\n elapsedTscGroup, lambda v: [v], classifier, txn,\n endCounter.tsc - beginCounter.tsc\n )\n return elapsedTscGroup", "def add(self, timestamp):\n self.total_count += 1\n self.times.append(timestamp)", "def save_to_history(self):\n for stat_type in self.log_book.keys():\n stat = self.get_stat(stat_type)\n self.history[stat_type].append(stat)\n self.init_stat()", "def get_new_data(self):\n\n # record bar parse performance\n self.logger.debug(\"Started parsing new ticks.\")\n start_parse = time.time()\n for exchange in self.exchanges:\n exchange.parse_ticks()\n end_parse = time.time()\n duration = round(end_parse - start_parse, 5)\n\n self.logger.debug(\n \"Parsed \" + str(self.total_instruments) +\n \" instruments' ticks in \" + str(duration) + \" seconds.\")\n self.track_tick_processing_performance(duration)\n\n # wrap new 1 min bars in market events\n new_market_events = []\n for exchange in self.exchanges:\n bars = exchange.get_new_bars()\n for symbol in exchange.get_symbols():\n for bar in bars[symbol]:\n event = MarketEvent(exchange.get_name(), bar)\n new_market_events.append(event)\n # add bars to save-to-db-later queue\n # TODO: store new bars concurrently with a processpool\n self.bars_save_to_db.put(event)\n return new_market_events", "def merge_custom_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for name, other in metrics:\n key = (name, '')\n stats = self.__stats_table.get(key)\n if not stats:\n self.__stats_table[key] = other\n else:\n stats.merge_stats(other)", "def all_time(self):\r\n return RecordsAllTime(self)", "def generate_metrics_data(metricsquery: List, resultsquery: Dict, deltaminutes: int = 5, Region_name: str = None) -> Dict:\r\n cloudwatch=client('cloudwatch', region_name=Region_name) \r\n paginator = cloudwatch.get_paginator('get_metric_data')\r\n metricsgroup=grouper(metricsquery)\r\n resultsquery['ApiCalls']=0 \r\n for mqs in metricsgroup:\r\n for response in paginator.paginate(MetricDataQueries=mqs, StartTime=datetime.now()-timedelta(minutes=deltaminutes),EndTime=datetime.now()):\r\n for results in response['MetricDataResults']:\r\n resultsquery[results['Id']].append({'results':results})\r\n resultsquery['ApiCalls']+=1\r\n return resultsquery", "def log_metrics(self, metrics: dict):\n self.metrics.update(metrics)\n\n self._sync_log_event()", "def generate_time_data(self):\n # generate random dates and append to a list\n sd = self.start_date\n ed = self.end_date\n dates = [random_date(start=sd, end=ed) for d in range(0, self.obs)]\n\n # convert to ISO 8601 format and update \"Local Time\" field\n self.output['Local Time'] = map(lambda x: x.isoformat(), dates)", "def stats_timing(stats_key, stats_logger):\r\n start_ts = now_as_float()\r\n try:\r\n yield start_ts\r\n except Exception as e:\r\n raise e\r\n finally:\r\n stats_logger.timing(stats_key, now_as_float() - start_ts)", "def _compute_last_observations(self):\n observations = {}\n for ts in self.ts_ids:\n observations[ts] = self.traffic_signals[ts]._compute_observation()\n return observations", "def update_timeindex(self, event):\n latest_datetime = self.bars.get_latest_bar_datetime(self.symbol_list[0])\n \n # Update positions\n # ================\n dp = dict((k,v) for k,v in [(s,0) for s in self.symbol_list])\n dp['datetime'] = latest_datetime\n \n for s in self.symbol_list:\n dp[s] = self.current_positions[s]\n \n # Append the current positions\n self.all_positions.append(dp)\n \n # Update holdings\n # ===============\n dh = dict((k,v) for k,v in [(s,0) for s in self.symbol_list])\n dh['datetime'] = latest_datetime\n dh['cash'] = self.current_holdings['cash']\n dh['commission'] = self.current_holdings['commission']\n dh['total'] = self.current_holdings['cash']\n \n for s in self.symbol_list:\n # Approximation to the real value\n market_value = self.current_positions[s] * self.bars.get_latest_bar_value(s, \"close\")\n dh[s] = market_value\n dh['total'] += market_value\n \n # Append the current holdings\n self.all_holdings.append(dh)", "def flush(self):\n for k, l in self.logs.items():\n self.full_logs[k].extend(l)\n self.logs = dict()", "def accumulate(self, scopes=None):\n scopes = scopes if scopes is not None else self.scopes\n for scope in scopes:\n for k, v in self.partials.items():\n self.metrics[scope][k] += v\n self.metric_counts[scope][k] += self.partial_counts.get(k, 1)\n\n self.partials.clear()\n self.partial_counts.clear()", "def collapse_using_timeStr(self):\n if self.modified == True:\n raise Exception('Probabilities already modified.\\nCollapsing after modification will lead to incorrect results.')\n timeUnits = np.array(process_time_string(self.timeStr))\n if len(self.timeslices) + 1 == np.sum(timeUnits):\n if timeUnits[-1] == 1:\n timeUnits = timeUnits[:-1]\n else:\n timeUnits[-1] -= 1\n if len(self.timeslices) != np.sum(timeUnits):\n raise Exception('Total number of timeslices is different.')\n ind = 0\n cnt = 0\n curr_rates = np.matrix(np.zeros((np.shape(self.obsRates)[0], len(timeUnits))))\n curr_times = []\n for i in timeUnits:\n curr_rates[:, cnt] = np.sum(self.obsRates[:, ind:ind + i], axis=1)\n curr_times.append(np.sum(self.timeslices[ind:ind + i]))\n ind += i\n cnt += 1\n\n self.obsRates = curr_rates\n self.timeslices = curr_times", "def _aggregate_perf_data(perf_all_ordinals: List[str]):\n aggregate = {}\n\n pd = PerfData()\n for data in perf_all_ordinals:\n worker_pd = PerfData(**json.loads(data))\n if len(perf_all_ordinals) > 1:\n aggregate.setdefault(\"ordinals\", [])\n aggregate[\"ordinals\"].append(worker_pd.throughput_dict())\n\n pd.merge(worker_pd)\n\n aggregate.update(dataclasses.asdict(pd))\n return aggregate", "def add_action_timestamp_to_rate_limiters(self, timestamp: TimeType) -> None:\n for limiter in self.rate_limiters:\n limiter.add_action_timestamp(timestamp)", "def do_report(input):\n for (session_id, group) in itertools.groupby(input, get_session_id):\n group = list(group)\n elapsed = [finfo.elapsed for finfo in group]\n n = float(len(elapsed))\n mean = sum(elapsed) / n\n s = math.sqrt(\n (1 / (n - 1)) * sum((e - mean) ** 2 for e in elapsed)\n )\n yield SummaryData(\n session_id,\n group[0].message,\n min(elapsed),\n max(elapsed),\n mean,\n s\n )", "def _record_current_time(self):\n now = time.time()\n delta = now - self._last_time\n self._last_time = now\n self._timing_recorder.append(delta)", "def update_metrics(self, state: TrainState, step: int, train_metrics: List[MetricsDict], t0):\n if not self.logflag:\n return\n\n eval_metrics: List[Any] = []\n\n # Build summary dictionary for logging\n # Include training stats\n train_metrics = common_utils.get_metrics(train_metrics)\n summary = {\n f\"train_{k}\": v\n for k, v in jax.tree_util.tree_map(lambda x: x.mean(), train_metrics).items()\n }\n epoch = step // self.steps_per_epoch\n summary[\"epoch\"] = epoch\n summary[\"time\"] = time.time() - t0\n\n # Eval over testing set\n for _ in range(self.steps_per_eval):\n eval_batch = next(self.eval_dt_iter)\n metrics = self.p_eval_step(state, eval_batch)\n eval_metrics.append(metrics)\n # Compute testing metrics\n eval_metrics = common_utils.get_metrics(eval_metrics)\n\n # Add testing stats to summary\n summary_eval = jax.tree_util.tree_map(lambda x: x.mean(), eval_metrics)\n summary.update(summary_eval)\n\n # Update iteration stats object\n assert isinstance(self.itstat_object, IterationStats) # for mypy\n self.itstat_object.insert(self.itstat_insert_func(ArgumentStruct(**summary)))", "def update_timeindex(self, event):\n latest_datetime = self.bars.get_latest_bar_datetime(self.symbol_list[0])\n \n # Update positions\n # ================\n dp = dict((k,v) for k,v in [(s,0) for s in self.symbol_list])\n dp['datetime'] = latest_datetime\n \n for s in self.symbol_list:\n dp[s] = self.current_positions[s]\n \n # Append the current positions\n self.all_positions.append(dp)\n \n # Update holdings\n # ===============\n dh = dict((k,v) for k,v in [(s,0) for s in self.symbol_list])\n dh['datetime'] = latest_datetime\n dh['cash'] = self.current_holdings['cash']\n dh['commission'] = self.current_holdings['commission']\n dh['total'] = self.current_holdings['cash']\n \n for s in self.symbol_list:\n # Approximation to the real value\n market_value = self.current_positions[s] * self.bars.get_latest_bar_value(s, \"adj_close\")\n dh[s] = market_value\n dh['total'] += market_value\n \n # Append the current holdings\n self.all_holdings.append(dh)\n print('timeindex: ', dh)", "def increment(self) -> None:\n self._increment_called = True\n self.append(deepcopy(self._base_metric))", "def __add__ ( self, other, resample_opts=None ):\n result = ObservationStorage (datadir=self.datadir, \\\n resample_opts=resample_opts )\n if self.date[0] > other.date[0]:\n start_date = other.date[0]\n else:\n start_date = self.date[0]\n if self.date[-1] > other.date[-1]:\n end_date = other.date[-1]\n else:\n end_date = self.date[-1]\n \n delta = datetime.timedelta ( days=1 )\n this_date = start_date.date()\n end_date = end_date.date() + delta\n \n this_obs_dates = [ x.date() for x in self.date ]\n other_obs_dates = [ x.date() for x in other.date ]\n \n date = [] ; vza = [] ; vaa = [] ; sza = [] ; saa = []\n emulator = [] ; mask = [] ; data_pntr = [] ; spectral = []\n sensor = []\n \n while this_date < end_date:\n if this_date in this_obs_dates:\n iloc = this_obs_dates.index ( this_date )\n date.append ( self.date[iloc] )\n emulator.append ( self.emulator[iloc] )\n vza.append ( self.vza[iloc] )\n sza.append ( self.sza[iloc] )\n vaa.append ( self.vaa[iloc] )\n saa.append ( self.saa[iloc] )\n spectral.append ( self.spectral )\n mask.append ( ( self.get_mask, [iloc] ) )\n sensor.append ( self.sensor )\n \n data_pntr.append ( self._data_pntr[iloc] )\n if this_date in other_obs_dates:\n iloc = other_obs_dates.index ( this_date )\n date.append ( other.date[iloc] )\n emulator.append ( other.emulator[iloc] )\n vza.append ( other.vza[iloc] )\n sza.append ( other.sza[iloc] )\n vaa.append ( other.vaa[iloc] )\n saa.append ( other.saa[iloc] )\n spectral.append ( other.spectral )\n mask.append ( ( other.get_mask, [iloc] ) )\n sensor.append ( other.sensor )\n data_pntr.append ( other._data_pntr[iloc] )\n this_date += delta\n result.vza = vza\n result.vaa = vaa\n result.sza = sza \n result.saa = saa \n result.date = date\n result.spectral = spectral\n result.masks = mask\n result.sensor = sensor\n result.emulator = emulator\n result._data_pntr = data_pntr\n return result", "def collect_stat(self):\n\n cnstat_dict, ratestat_dict = self.get_cnstat()\n self.cnstat_dict.update(cnstat_dict)\n self.ratestat_dict.update(ratestat_dict)", "def setPerfMetrics(self, perf_metrics):\n for event in perf_metrics.metric:\n attr_name = '%s_%s_%s' % (frontendConfig.glidein_perfmetric_prefix,\n perf_metrics.name, event)\n self.adParams[attr_name] = perf_metrics.event_lifetime(event)", "def _generate_time_values(self):\r\n # Populate time values\r\n log('writing times', 'INFO')\r\n d1970 = datetime(1970, 1, 1, tzinfo=utc)\r\n time_array = [[int((self.start_datetime - d1970).total_seconds())]]\r\n \r\n datetime_nc_start_simulation = self.start_datetime\r\n for raw_nc_index, raw_nc in enumerate(self.raw_nc_list):\r\n \r\n raw_nc_time = raw_nc.get_time_array(datetime_simulation_start=datetime_nc_start_simulation,\r\n simulation_time_step_seconds=self.time_step_array[raw_nc_index])\r\n \r\n time_array.append(raw_nc_time)\r\n datetime_nc_start_simulation = datetime.utcfromtimestamp(raw_nc_time[-1])\r\n \r\n self.cf_nc.variables['time'][:] = np.concatenate(time_array)\r\n end_date = datetime.utcfromtimestamp(self.cf_nc.variables['time'][-1])\r\n self.cf_nc.time_coverage_start = self.start_datetime.isoformat() + 'Z'\r\n self.cf_nc.time_coverage_end = end_date.isoformat() + 'Z'", "def _filterTimes(self):\n print(self.tRange)\n idT = np.where((self.tRange[0] > np.array(self.rawD['Epoch'][:])) & \n (self.tRange[1] < np.array(self.rawD['Epoch'][:])))[0]\n #print(self.rawD['Epoch'][:100])\n print(idT)\n # Filter data\n for key in filter(lambda x: ('Epoch' in x or \n ('Counts' in x and x[-1] == 's')), self.rawD.keys()):\n self.d[key] = self.rawD[key].copy()[idT]\n return", "def refresh(self):\n for i in self.data:\n values = self.data[i]\n try:\n if values[\"state\"] == \"Teardown\":\n t_delta = (values[\"t_end\"] or values[\n \"date\"]) - values[\"ts\"]\n else:\n t_delta = values[\"date\"] - values[\"ts\"]\n\n if t_delta.total_seconds() < 0:\n t_delta = values[\"ts\"] - values[\"ts\"]\n values[\"duration\"] = str(t_delta.total_seconds())\n except:\n print sys.exc_info()\n # print values\n values[\"duration\"] = 0", "def time_stats(df):", "def get_totals_by_time_scope(self, aggregates, filters=None):\n if filters is None:\n filters = self.ten_day_filter\n with tenant_context(self.tenant):\n return OCPUsageLineItemDailySummary.objects.filter(**filters).aggregate(**aggregates)", "def record_custom_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for name, value in metrics:\n self.record_custom_metric(name, value)", "def report_metrics(prefix, metrics):\n series = []\n\n now = time.time()\n for key, value in metrics.iteritems():\n metric = '{prefix}.{key}'.format(prefix=prefix, key=key)\n point = [(now, value)]\n series.append({'metric':metric, 'points':point})\n\n if len(series) > 0:\n print u\"Sending {}\".format(series)\n dog_http_api.metrics(series)", "def apply_all_accumulators(self):\n self._require_state(\"APPLYING\")\n for mi in self._accums.keys():\n self._apply_one_accum_set(mi)", "def emit_optional_stats(self):\n # set union and deduplicate\n field_and_values = list(set(chain.from_iterable(\n iter(self._stats[v].keys()) for v in self._kind_map.values()\n )))\n field_and_values.sort() # for better order in logging\n for field_and_value in field_and_values:\n joined_count = self._stats[1][field_and_value]\n fake_count = self._stats[0][field_and_value]\n unjoined_count = self._stats[-1][field_and_value]\n total_count = joined_count + unjoined_count\n fake_total_count = total_count + fake_count\n join_rate = joined_count / max(total_count, 1) * 100\n fake_join_rate = (joined_count + fake_count) / \\\n max(fake_total_count, 1) * 100\n logging.info(\n 'Cumulative stats of `%s`:\\n '\n 'total: %d, joined: %d, unjoined: %d, join rate: %f, '\n 'total w/ fake: %d, joined w/ fake: %d, join rate w/ fake: %f',\n field_and_value,\n total_count, joined_count, unjoined_count, join_rate,\n fake_total_count, joined_count + fake_count, fake_join_rate\n )\n logging.info('Unjoined example ids: %s', self._sample_reservoir)\n self._sample_reservoir = []\n self._sample_receive_num = 0", "def add_count_data(self, counts: Dict[datetime, int]):\n raise NotImplementedError()", "def _apply_all_time_reductions(self, full_ts, monthly_ts, eddy_ts):\n logging.info(self._print_verbose(\"Applying desired time-\"\n \"reduction methods.\"))\n # Determine which are regional, eddy, time-mean.\n reduc_specs = [r.split('.') for r in self.dtype_out_time]\n reduced = {}\n for reduc, specs in zip(self.dtype_out_time, reduc_specs):\n func = specs[-1]\n if 'eddy' in specs:\n data = eddy_ts\n elif 'time-mean' in specs:\n data = monthly_ts\n else:\n data = full_ts\n if 'reg' in specs:\n reduced.update({reduc: self.region_calcs(data, func)})\n else:\n reduced.update({reduc: self._time_reduce(data, func)})\n return OrderedDict(sorted(reduced.items(), key=lambda t: t[0]))", "def count_results(self):\n for record in self._traced_records:\n if self._statistical_results.get(record.name) == None:\n self._statistical_results[record.name] = StatisticalResult(\n record.name)\n self._statistical_results[record.name].insert_record(\n record.kernel_time)\n self._total_time_ms += record.kernel_time\n self._statistical_results = {\n k: v\n for k, v in sorted(self._statistical_results.items(),\n key=lambda item: item[1],\n reverse=True)\n }", "def _addTiming(self, key, duration):\n pass", "def set_timeseries_metadata(self, dataset_names):\n for dataset_name in dataset_names:\n if dataset_name in self:\n self[dataset_name].dataset_metadata.update({\n 'version': SCHEMA_VERSION,\n 'units': self.config[dataset_name]['units']\n })\n self[dataset_name].group_metadata.update({'source': 'lmt'})", "def _append_results(self) -> None:\n self._t_mps.compute_traces(self._step, self._process_tensors)\n time = self.time(self._step)\n norm = self._t_mps.get_norm()\n bond_dimensions = self._t_mps.get_bond_dimensions()\n self._results['time'].append(time)\n self._results['norm'].append(norm)\n self._results['bond_dimensions'].append(bond_dimensions)\n for sites, dynamics in self._results['dynamics'].items():\n if isinstance(sites, int):\n sites_list = [sites]\n else:\n sites_list = list(sites)\n dynamics.add(\n time,\n self._t_mps.get_density_matrix(sites_list))\n self._t_mps.clear_traces()", "def set_ga_timestamp(self, time: int):\n for cl in self:\n cl.tga = time" ]
[ "0.6077945", "0.5785074", "0.5715007", "0.5492405", "0.5340035", "0.5287281", "0.5276565", "0.5246753", "0.524385", "0.5236754", "0.5186013", "0.5171279", "0.5168675", "0.51627535", "0.51169103", "0.51088405", "0.5038848", "0.5023717", "0.49753776", "0.49482045", "0.4947367", "0.4939258", "0.493599", "0.4935371", "0.49286577", "0.49141905", "0.4912324", "0.49121654", "0.49102762", "0.49041864", "0.4898764", "0.48587075", "0.4854696", "0.48456606", "0.4841744", "0.4827174", "0.48074424", "0.48074162", "0.48039734", "0.48013756", "0.47929645", "0.4791077", "0.4789995", "0.47824666", "0.47774616", "0.47593382", "0.4756202", "0.47511008", "0.47481778", "0.47404942", "0.4736889", "0.47327283", "0.47288415", "0.47271782", "0.47268113", "0.47263578", "0.4720613", "0.47189415", "0.4715364", "0.47145703", "0.4713798", "0.47131917", "0.47123966", "0.47091", "0.47048196", "0.46984968", "0.46854183", "0.46842676", "0.4676481", "0.46732202", "0.46722272", "0.46598682", "0.46596283", "0.46544075", "0.46534362", "0.46456254", "0.46419892", "0.46358278", "0.46345478", "0.46319664", "0.4631778", "0.4631619", "0.4629199", "0.46279818", "0.4621765", "0.46174878", "0.46148023", "0.46115005", "0.4611171", "0.4609646", "0.46014312", "0.46011734", "0.46008843", "0.46002653", "0.4599743", "0.45988223", "0.45973855", "0.4597263", "0.4594237", "0.4593578" ]
0.6196177
0
Record a single value metric, merging the data with any data from prior value metrics with the same name.
def record_custom_metric(self, name, value): key = (name, '') if isinstance(value, dict): if len(value) == 1 and 'count' in value: new_stats = CountStats(call_count=value['count']) else: new_stats = TimeStats(*c2t(**value)) else: new_stats = TimeStats(1, value, value, value, value, value**2) stats = self.__stats_table.get(key) if stats is None: self.__stats_table[key] = new_stats else: stats.merge_stats(new_stats) return key
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log(self, metric_name: str, value: float) -> None:\n if metric_name in self.metrics:\n self.metrics[metric_name].append(value)\n else:\n self.metrics[metric_name] = [value]", "def record_custom_metric(self, name, value):\n if isinstance(value, dict):\n if len(value) == 1 and 'count' in value:\n new_stats = CountStats(call_count=value['count'])\n else:\n new_stats = TimeStats(*c2t(**value))\n else:\n new_stats = TimeStats(1, value, value, value, value, value**2)\n\n stats = self.__stats_table.get(name)\n if stats is None:\n self.__stats_table[name] = new_stats\n else:\n stats.merge_stats(new_stats)", "def log_metric(self, name: str, value):\n self.metrics[name] = value\n\n self._sync_log_event()", "def write_metric(self, metric_name: str, metric_value: Union[float, int]):\n self._metrics.append(Metric(metric_name, metric_value))", "def record_gauge(self, name, value, tags=None):\n identity = self.create_identity(name, tags)\n with self._lock:\n self._batch[identity] = value\n self._timestamps[identity] = int(time.time() * 1000.0)", "def add_metric(self, metric_name: str, metric_val: typing.Any):\n self.add_metrics({metric_name: metric_val})", "def record_summary(self, name, value, tags=None):\n identity = self.create_identity(name, tags, \"summary\")\n with self._lock:\n if identity in self._batch:\n merged_value = self._batch[identity]\n merged_value[\"count\"] += 1\n merged_value[\"sum\"] += value\n merged_value[\"min\"] = min(value, merged_value[\"min\"])\n merged_value[\"max\"] = max(value, merged_value[\"max\"])\n else:\n value = {\"count\": 1, \"sum\": value, \"min\": value, \"max\": value}\n self._batch[identity] = value", "def log_metric(self, name, val):\n raise NotImplementedError", "def update_metric(self, metric, value):\n if self.is_number(value):\n self.logger.debug(\"Collected raw metric: %s = %s\" % (metric, value))\n self.raw_metrics[metric] = value", "def __push_metric(self, metric_name, value, timestamp):\n sock = self.__get_carbon_socket()\n _data = \"%s %d %d\\n\" % (metric_name, value, timestamp)\n LOGGER.debug(\"SEND: %s\", _data.replace(\"\\n\", \"\"))\n sock.send(_data.encode('utf-8'))", "def metric_recorded(self, record):\n if record.name in self.headers and self.current_row is not None:\n if record.name == \"learning_rate\" and not record.is_scalar:\n # record is a list of scalars\n value = \",\".join([f\"{lr:.4f}\" for lr in record.value])\n elif record.is_scalar and isinstance(record.value, int):\n value = str(record.value)\n else:\n assert record.is_scalar\n\n value = f\"{record.value:.4f}\"\n\n self.current_row[record.name] = value", "def dispatch_value(metric, value, type):\n log_verbose('Sending metric: %s=%s as type %s' % (metric, value,type))\n\n val = collectd.Values(plugin='redis_metrics')\n val.type = type\n val.type_instance = metric\n val.values = [value]\n val.dispatch()", "def log_metric(self, name, val, step):\n raise NotImplementedError", "def sum(self, key, value):\n self._metrics[key] += value", "def log_metric(key, value, step=None):\n mlflow.log_metric(key, value, step=step)", "def add_metric(self, metric_name, metric_value, login=False):\n if login:\n self._gc.login()\n\n try: \n if metric_name not in self._metric_dict:\n metric_index = len(self._metric_dict) + 2\n self._wks.update_cell(1, metric_index, metric_name)\n self._metric_dict[metric_name] = metric_index\n self.save_config()\n\n self._wks.update_cell(self.row_index, self._metric_dict[metric_name], metric_value)\n except Exception as ins:\n if not login:\n self.add_metric(metric_name, metric_value, login=True)\n else:\n return '\\n'.join([str(type(ins)), str(ins.args), str(ins)])\n return None", "def record_metrics(self, pid, metrics):\n\n for _metric, _metric_value in metrics.items():\n if not self.__metrics_history[pid].get(_metric):\n self.__metrics_history[pid][_metric] = []\n self.__metrics_history[pid][_metric].append(_metric_value)\n # only keep the last 2 running history for any metric\n self.__metrics_history[pid][_metric] = self.__metrics_history[pid][_metric][\n -2:\n ]", "def record_custom_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for name, value in metrics:\n self.record_custom_metric(name, value)", "def _record_storage_event(metric, value=0):\n command_name = properties.VALUES.metrics.command_name.Get()\n metrics.CustomKeyValue(command_name, 'Storage-' + metric, value)", "def _log_op_value(self, name: str, value: Any) -> None:\n summary_op, placeholder = self._get_log_op(name)\n sess = tf.get_default_session()\n result = sess.run(summary_op, {placeholder: value})\n self.summary_writer.add_summary(result, self.batches_seen)", "def put(self, metric, values, timestamp=None):\n if timestamp is None:\n timestamp = time.time()\n now_date = datetime.datetime.fromtimestamp(timestamp)\n\n if self.last is None:\n self.last = timestamp\n return\n\n self.last = timestamp\n\n values = [str(d) for d in [now_date, timestamp]+values]\n\n with open(self.filename, \"at\") as df:\n df.write(\"{}\\n\".format(\",\".join(values)))", "def publish_metric(name, value, type):\n t = time.time()\n m = json.dumps({'monitor':name, type:value, 'time':t})\n r = redis.StrictRedis(host='localhost', port=6379, db=0) \n r.lpush('sensor_readings',m)", "def save_metric(key, value, timestamp=None):\n\n from analytics_client.settings import _ANALYTICS_ENABLED\n\n if not _ANALYTICS_ENABLED:\n return None\n\n from analytics_client.tasks import store_metric\n\n # Set a timestamp if it is undefined\n _timestamp = timestamp\n if _timestamp is None:\n _timestamp = datetime.now()\n\n store_metric.delay(Metric(key=key, value=value, timestamp=_timestamp))", "def _AddMetric(self, metric):\n machine = metric.machine_id\n time = metric.timestamp\n payload = DotDict(json.loads(metric.payload)).flatten()\n\n self.machines.add(machine)\n self.timestamps.add(time)\n for k in payload:\n if k not in self.counter_data:\n continue\n val = payload.get(k, None)\n if val is not None:\n self.counter_data[k].AddSample(machine, time, val)", "def log_other(self, name: str, value):\n self._other_metadata[name] = value\n\n self._sync_log_event()", "def add_metric(self, metric: str):\n if metric not in self.metrics:\n self.metrics[metric] = self.creator.create_metric(metric)", "def _record(self, metric_point: MetricPoint,\n measurement_map: MeasurementMap):\n metric_name = metric_point.metric_name\n tags = metric_point.tags\n\n metric = self._registry.get(metric_name)\n # Metrics should be always registered dynamically.\n assert metric\n\n tag_map = tag_map_module.TagMap()\n for key, value in tags.items():\n tag_key = tag_key_module.TagKey(key)\n tag_value = tag_value_module.TagValue(value)\n tag_map.insert(tag_key, tag_value)\n\n metric_value = metric_point.value\n measurement_map.measure_float_put(metric.measure, metric_value)\n # NOTE: When we record this metric, timestamp will be renewed.\n measurement_map.record(tag_map)", "def add_metric(self, metric):\n self.metrics.append(metric)\n self.estimate()", "def log(self, metric, value, source, timestamp=None):\n if timestamp is None:\n timestamp = datetime.now()\n\n sql = \"insert into measurement(metric, value, source, timestamp) values('{0}', {1}, '{2}', '{3}');\".format(\n metric, value, source, timestamp)\n\n self._execute_sql(sql)", "def get_metric(self, metric, existing_dict=None):\n metric_key, metric_type, metric_name, metric_help = metric\n metric_dict = {\n 'name': metric_name,\n 'type': metric_type,\n 'help': metric_help,\n 'values': OrderedDict()\n }\n values = self.r.hgetall(metric_key) # new values\n # print \"values: %r\" % values\n metric_dict['values'] = values\n\n if existing_dict:\n # we're updating a metric we've already seen\n # print \"existing dict: %r\" % existing_dict\n for value in values:\n # print \"checking value: %r\" % value\n # value = json.loads(value)\n if value in existing_dict['values']:\n if metric_type == 'counter' or metric_type == 'histogram':\n # Combine the values if it's a counter or histogram\n # TODO: sort histogram buckets\n # TODO: append _bucket to histogram bucket names\n existing_dict['values'][value] = float(\n values[value]) + float(\n existing_dict['values'][value])\n elif metric_type == 'gauge':\n # use the last value we see for a gauge - # TODO: is there a better way? we could average it\n existing_dict['values'][value] = float(values[value])\n else:\n existing_dict['values'][value] = float(values[value])\n metric_dict['values'] = existing_dict['values']\n\n if metric_type == 'histogram':\n # json decode all of the labels\n samples = [json.loads(x, object_pairs_hook=OrderedDict) for x in metric_dict['values']]\n # we need to sort the values by the bucket labeled \"le\"\n sorted_keys = sorted(samples, key=lambda b: b['le'])\n # and then we need to store the values again json encoded\n vals = metric_dict['values']\n metric_dict['values'] = OrderedDict()\n for k in sorted_keys:\n kn = json.dumps(k, sort_keys=True)\n metric_dict['values'][kn] = vals[kn]\n\n return metric_dict", "def _update_metric(\n metrics: List[mlflow.entities.Metric], dataset: MetricsDict = {}\n ) -> MetricsDict:\n for metric in metrics:\n metric_dict = {\"step\": metric.step, \"value\": metric.value}\n if metric.key in dataset:\n if isinstance(dataset[metric.key], list):\n dataset[metric.key].append(metric_dict)\n else:\n dataset[metric.key] = [dataset[metric.key], metric_dict]\n else:\n dataset[metric.key] = metric_dict\n return dataset", "def log_metadata(self, label, value):\n self.__metadata[label].append(value)", "def add_metric(self, metric, *, name=None, **kwargs):\n if name is None:\n name = metric.__name__\n\n self.metrics.append((metric, name, kwargs))", "def add_metric(self, metric, *, name=None, **kwargs):\n if name is None:\n name = metric.__name__\n\n self.metrics.append((metric, name, kwargs))", "def log_metric(name, values, tags={}):\n value_list = []\n for key in sorted(values.keys()):\n value = values[key]\n value_list.append(f\"{key}:{value:7.3f}\")\n values = \", \".join(value_list)\n tag_list = []\n for key, tag in tags.items():\n tag_list.append(f\"{key}:{tag}\")\n tags = \", \".join(tag_list)\n print(\"{name:30s} - {values} ({tags})\".format(name=name, values=values, tags=tags))", "def save_scalar(step, name, value, writer):\n summary = tf.Summary()\n summary_value = summary.value.add()\n summary_value.simple_value = float(value)\n summary_value.tag = name\n writer.add_summary(summary, step)", "def add_metric(self, metric_class, namespace, name, value=1.0, tags=None, interval=None):\n # type: (Type[Metric], str, str, float, MetricTagType, Optional[float]) -> None\n metric_id = Metric.get_id(name, namespace, tags, metric_class.metric_type)\n if metric_class is DistributionMetric:\n metrics_type_payload = TELEMETRY_TYPE_DISTRIBUTION\n else:\n metrics_type_payload = TELEMETRY_TYPE_GENERATE_METRICS\n\n with self._lock:\n existing_metric = self._metrics_data[metrics_type_payload][namespace].get(metric_id)\n if existing_metric:\n existing_metric.add_point(value)\n else:\n new_metric = metric_class(namespace, name, tags=tags, common=True, interval=interval)\n new_metric.add_point(value)\n self._metrics_data[metrics_type_payload][namespace][metric_id] = new_metric", "def add(self, val):\n key = self.get_key(val)\n self.store.add(key)\n\n # Keep track of summary stats\n self._count += 1\n self._sum += val\n if val < self._min:\n self._min = val\n if val > self._max:\n self._max = val", "def writeSummary(self, value, tag, summaryWriter, global_step):\n\n summary = tf.Summary()\n summary.value.add(tag=tag, simple_value=value)\n summaryWriter.add_summary(summary, global_step)", "def record_count(self, name, value, tags=None):\n identity = self.create_identity(name, tags, \"count\")\n with self._lock:\n self._batch[identity] = self._batch.get(identity, 0) + value", "def merge_custom_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for name, other in metrics:\n key = (name, '')\n stats = self.__stats_table.get(key)\n if not stats:\n self.__stats_table[key] = other\n else:\n stats.merge_stats(other)", "def update(self, current_iter, *metrics, **named_metrics):\n\n # Same order as __init__() in python>=3.6\n if len(metrics) > 0:\n for key, metric in zip(self.metrics.keys(), metrics):\n self.metrics[key].append((current_iter, metric))\n \n # Random order with names\n elif len(named_metrics) > 0:\n for name, metric in named_metrics.item():\n self.metrics[name].append((metric))\n\n else:\n raise ValueError(\"No valid value to update losses\")", "def record_apdex_metric(self, metric):\n\n if not self.__settings:\n return\n\n # Note that because we are using a scope here of an empty string\n # we can potentially clash with an unscoped metric. Using None,\n # although it may help to keep them separate in the agent will\n # not make a difference to the data collector which treats None\n # as an empty string anyway.\n\n key = (metric.name, '')\n stats = self.__stats_table.get(key)\n if stats is None:\n stats = ApdexStats(apdex_t=metric.apdex_t)\n self.__stats_table[key] = stats\n stats.merge_apdex_metric(metric)\n\n return key", "def counter(self, metric_name, value=1):\n if self._send_sampled_event():\n counter = \"%s%s:%d|c|@%s\" % (self.metric_name_prepend, metric_name,\n value, self.statsd_sample_rate)\n self._send_events([counter])", "def log_scalar(name, value, step, autolog):\n if not autolog:\n mlflow.log_metric(name, value)", "def set_measured_value(self, value_sig1, value_sig2):\n self.entry_measured_value_sig1.set(value_sig1)\n self.entry_measured_value_sig2.set(value_sig2)", "def add_key_value(self, key, value):\n key = self._metadata_map().get(key, key)\n if key in ['dateAdded', 'lastModified']:\n self._data[key] = self.util.any_to_datetime(value).strftime('%Y-%m-%dT%H:%M:%SZ')\n elif key == 'confidence':\n self._data[key] = int(value)\n elif key == 'rating':\n self._data[key] = float(value)\n elif key == 'unique_id':\n self._unique_id = quote(self.fully_decode_uri(value), safe='')\n else:\n self._data[key] = value", "def log_scalar(self, tag, value, step):\n\n summary = tf.Summary(\n value=[tf.Summary.Value(tag=tag, simple_value=value)])\n self.writer.add_summary(summary, step)\n self.writer.flush()", "def _append_value(self, stream, value):\n if FLAGS.timestamp:\n x_val = float(time.time())\n stream['x'].append(x_val)\n\n y_val = float(value)\n stream['y'].append(y_val)", "def _add_to_queue(key, value, step, time, run_id):\n met = Metric(key=key, value=value, timestamp=time, step=step)\n _metric_queue.append((run_id, met))\n if len(_metric_queue) > _MAX_METRIC_QUEUE_SIZE:\n _thread_pool.submit(_flush_queue)", "def sendMeasurement(self, metric, value, source, timestamp=None):\n sys.stdout.write('{0} {1} {2} {3}\\n'.format(metric, value, source, timestamp).decode('utf-8'))\n sys.stdout.flush()", "def append_to_list(self, metric_value_to_append):\n if type(metric_value_to_append)==MetricValue:\n self.__metric_value_list.append(metric_value_to_append)\n else:\n print(\"appended object must be a MetricValue, metric_value_to_append=\",metric_value_to_append)\n sys.exit() # stop entire program, because metric_value_to_append MUST be correct", "def set_aggregate_data(self, event_name, value, key=None):\n \n raise NotImplementedError()", "def update(self, step, metrics):\n self.steps_arr.append(step)\n for key, val in metrics.items():\n if isinstance(val, tf.Tensor):\n try:\n self.data[key].append(val.numpy())\n except KeyError:\n self.data[key] = [val.numpy()]", "def increment(self) -> None:\n self._increment_called = True\n self.append(deepcopy(self._base_metric))", "def push(self, value):\n self.values.append((time.time(), value))", "def log_metric(data_category, key, value):\n # always, just print in logs\n log(logging.INFO, data_category, \"AML Metric({}={})\".format(key, value))\n if data_category == DataCategory.ONLY_PUBLIC_DATA:\n # if public, ask azureml to record (if azureml attached)\n run = AmlRunWrapper()\n run.setup(attach=True)\n run.log(key, value)\n run.flush()", "def average(self, key, value):\n self._average_metrics[key] += value\n self._average_metrics_count[key] += 1", "def record_sensor_readings(data_dict, metric_list, output_file_name):\n sensor_vals = []\n # going though metric_list to keep order consistent\n for metric in metric_list:\n if metric in data_dict:\n sensor_vals.append(str(data_dict[metric]))\n else:\n # value not recorded properly\n sensor_vals.append(\"null\")\n vals = \",\".join(sensor_vals)\n\n # write to file\n # TODO: keep file open for duration of the drive to avoid re-opening it at each iteration\n with open(output_file_name, 'a') as file:\n file.write(vals+\"\\n\")", "def save_scalars(self, step, metrics):\n\n # Save\n with self.summary_writer.as_default():\n for name, value in metrics.items():\n tf.summary.scalar(name, value, step=step)", "def append_to(self, key, value):\n self.get_data()[key].append(value.get_data())", "def save_data(self, gauge_name, date_key, data):\n pass", "def send(self, name, value, dimensions=None, sample_rate=1):\n\n self._connection.report(metric=self.update_name(name),\n metric_type='s',\n value=value,\n dimensions=self.update_dimensions(dimensions),\n sample_rate=sample_rate)", "def gauge(self, gauge, value):\n if self.ignore_metrics:\n return\n\n with self._gauge_rlock:\n self._gauge_metrics[gauge] = value\n self._gauge_call_count += 1\n\n old_call_time = self._gauge_last_call_time\n self._gauge_last_call_time = arrow.utcnow().timestamp\n if (self._gauge_call_count == self._max_call_count > 0) or \\\n self._gauge_last_call_time - old_call_time > self._max_time_between_calls > 0:\n self._gauge_call_count = 0\n self.update_gauge()", "def __add_one_day_values__(self):\n values = self.values()\n for value in values:\n ls = []\n if value.label in self.values_dict:\n ls = self.values_dict[value.label]\n ls.append(value)\n else:\n ls = [value]\n self.values_dict[value.label] = ls", "def set_gauge_value(self, name: str, value: float | None, delta: bool, tags: Attributes):\n key: str = _generate_key_name(name, tags)\n new_value = value or DEFAULT_GAUGE_VALUE\n old_value = self.poke_gauge(name, tags)\n if delta:\n new_value += old_value\n # If delta is true, add the new value to the last reading otherwise overwrite it.\n self.map[key] = Observation(new_value, tags)", "def add_metrics(self, metrics):\n for i, metric in enumerate(self.config.metrics):\n tf.summary.scalar(metric, metrics[i])", "def send_metrics(self):\n metrics = self.get_metrics()\n if not metrics:\n return\n\n for mkey, metric in metrics.items():\n for mname, mval in metric.items():\n try:\n self.agent.record_custom_metric(self.convert_metric_name(mkey, mname), mval, None)\n except Exception as e:\n print_(e)", "def sensorValue(self, key=None, value=None, timestamp=None):\n\n try:\n self.logger.debug('logging trigger value for {0} with value {1}'.format(key, value))\n Sensor.get_by_id(key).add_value(value)\n except Exception as e:\n self.logger.warn('Something went wrong registering trigger value for {0}: {1}'.format(key, e))\n else:\n # lauch trigger checks\n self.logger.debug('posting sensordata to trigger processor')\n self.triggerqueue.put((\"sensor\", key, value))", "def add_value(self, value):\r\n self.resource_records.append(value)", "def mark_point(\n metric: str,\n value: float,\n result: Literal[\"SUM\", \"AVG\"] = \"SUM\",\n timestamp: Optional[float] = None,\n):\n now = int(time.time())\n current_minute_tstamp = timestamp or (now - (now % 60))\n key_name = f\"{Monitoring.ACC_PREFIX}_{current_minute_tstamp}_{metric}\"\n prefix = [\n metric,\n result,\n \"FLOAT\" if isinstance(value, float) else \"INT\",\n ]\n\n # create key and set expiry\n redis_client.set(key_name, \"|\".join(prefix), ex=120, nx=True)\n redis_client.append(key_name, f\"|{value}\")", "def _submit_gauge(self, metric_name, val, metric, custom_tags=None, hostname=None):\n _tags = self._metric_tags(metric_name, val, metric, custom_tags, hostname)\n self.check.gauge('{}.{}'.format(self.NAMESPACE, metric_name), val, _tags, hostname=hostname)", "def state_metric(self, key: str, value: str, dimensions: Dict[str, str] = None):\n self._results_builder.add_absolute_result(PluginStateMetric(key=key, value=value, dimensions=dimensions,\n entity_selector=self.selector))", "def add_stats(self):\n units = self.get_unit_map()\n for metric in self.raw_metrics:\n unit, metric_type = units.get(metric, (DEFAULT_UNIT, DEFAULT_TYPE))\n if metric_type == \"counter\":\n # Unit/Second\n unit = \"/\".join((unit, \"Second\"))\n self.add_derive_value(metric, unit, self.raw_metrics[metric], rate=True)\n else:\n self.add_gauge_value(metric, unit, self.raw_metrics[metric])", "def __setitem__(self, key, val):\n extract = lambda t: t.item() if type(t) is torch.Tensor else t\n\n if type(val) is dict:\n for k, v in val.items():\n self.log_scalar(k, extract(v), 'last')\n else:\n self.log_scalar(key, extract(val), 'last')", "def write_summary(value, tag, summary_writer, global_step):\n summary = tf.Summary()\n summary.value.add(tag=tag, simple_value=value)\n summary_writer.add_summary(summary, global_step)", "def lambda_metric(metric_name, value, timestamp=None, tags=None):\n tags = _tag_dd_lambda_layer(tags)\n if os.environ.get(\"DD_FLUSH_TO_LOG\", \"\").lower() == \"true\":\n logger.debug(\"Sending metric %s to Datadog via log forwarder\", metric_name)\n print(\n json.dumps(\n {\n \"m\": metric_name,\n \"v\": value,\n \"e\": timestamp or int(time.time()),\n \"t\": tags,\n }\n )\n )\n else:\n logger.debug(\"Sending metric %s to Datadog via lambda layer\", metric_name)\n lambda_stats.distribution(metric_name, value, timestamp=timestamp, tags=tags)", "def __write_value(self, group: h5py.Group, name: str, value: np.ndarray):\n try:\n normalized = normalize_attr_values(value)\n except Exception as ex:\n raise ValueError(f'Could normalize {type(value)}(key \"{name}\")') from ex\n\n if np.isscalar(normalized) or normalized.dtype == np.object_:\n group[name] = normalized\n else:\n self.__write_array(group, name, normalized)", "def add_value(self, value):\n if len(self.hist) < 2:\n BaseFilter.add_value(self, value)\n else:\n filtered_value = self.hist[-1] * self.alpha + value * (1.0 - self.alpha)\n BaseFilter.add_value(self, filtered_value)", "def record_time_metric(self, metric):\n\n if not self.__settings:\n return\n\n # Scope is forced to be empty string if None as\n # scope of None is reserved for apdex metrics.\n\n key = (metric.name, metric.scope or '')\n stats = self.__stats_table.get(key)\n if stats is None:\n stats = TimeStats(call_count=1,\n total_call_time=metric.duration,\n total_exclusive_call_time=metric.exclusive,\n min_call_time=metric.duration,\n max_call_time=metric.duration,\n sum_of_squares=metric.duration ** 2)\n self.__stats_table[key] = stats\n else:\n stats.merge_time_metric(metric)\n\n return key", "def submit_metric(self, metric_suffix, metric, scraper_config, gauge=True, monotonic_count=True):\n metric_name = scraper_config['namespace'] + metric_suffix\n for sample in metric.samples:\n # Explicit shallow copy of the instance tags\n _tags = list(scraper_config['custom_tags'])\n\n for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):\n _tags.append('{}:{}'.format(label_name, label_value))\n if gauge:\n # submit raw metric\n self.gauge(metric_name, sample[self.SAMPLE_VALUE], _tags)\n if monotonic_count:\n # submit rate metric\n self.monotonic_count(metric_name + '.count', sample[self.SAMPLE_VALUE], _tags)", "def AddSample(self, machine, timestamp, value):\n self.machine_data.setdefault(machine, list()).append([timestamp, value])\n if len(self.cluster_total) == 0 or timestamp > self.cluster_total[-1][0]:\n self.cluster_total.append([timestamp, 0])\n self.cluster_avg.append([timestamp, 0])\n self.cluster_total[-1][1] += value\n self.cluster_avg[-1][1] = self.cluster_total[-1][1] / float(len(self.machine_data))", "def add_metric(self, metric_fn):\n self._metrics.append(metric_fn)", "def send_metric(model_id, metric, value):\n host, port, namespace = get_metric_endpoint()\n\n metric_name = '%s.%s' % (namespace, get_metric_name(metric, model_id))\n message = \"%s %f %d\\n\" % (metric_name, float(value), int(time.time()))\n send_tcp(host, port, message)\n\n build_no = get_build_number()\n metric_name = '%s.%s' % (namespace, get_metric_name('build', model_id))\n message = \"%s %f %d\\n\" % (metric_name, build_no, int(time.time()))\n send_tcp(host, port, message)", "def process(self, key, value):\n if key not in self.counts:\n self.counts[key] = 0.0\n self.counts[key] += value", "def add(self, value):\n self._resolve_copies()\n self.data.append(value)", "def metrics(self):\n \n if self.mse.shape[0]>1:\n raise ValueError('Metrics can only handle single observations.')\n \n if self.N==1:\n pred = float('nan')\n err = float('nan')\n y_true = float('nan')\n else:\n pred = int(self._predictions[-1])\n err = self._mse[-1]\n y_true = int(self.label[0])\n \n is_outlier = {\"type\":\"GAUGE\",\"key\":\"is_outlier\",\"value\":pred}\n mse = {\"type\":\"GAUGE\",\"key\":\"mse\",\"value\":err}\n obs = {\"type\":\"GAUGE\",\"key\":\"observation\",\"value\":self.N - 1}\n threshold = {\"type\":\"GAUGE\",\"key\":\"threshold\",\"value\":self.threshold}\n \n label = {\"type\":\"GAUGE\",\"key\":\"label\",\"value\":y_true}\n \n accuracy_tot = {\"type\":\"GAUGE\",\"key\":\"accuracy_tot\",\"value\":self.metric[4]}\n precision_tot = {\"type\":\"GAUGE\",\"key\":\"precision_tot\",\"value\":self.metric[5]}\n recall_tot = {\"type\":\"GAUGE\",\"key\":\"recall_tot\",\"value\":self.metric[6]}\n f1_score_tot = {\"type\":\"GAUGE\",\"key\":\"f1_tot\",\"value\":self.metric[7]}\n f2_score_tot = {\"type\":\"GAUGE\",\"key\":\"f2_tot\",\"value\":self.metric[8]}\n \n accuracy_roll = {\"type\":\"GAUGE\",\"key\":\"accuracy_roll\",\"value\":self.metric[9]}\n precision_roll = {\"type\":\"GAUGE\",\"key\":\"precision_roll\",\"value\":self.metric[10]}\n recall_roll = {\"type\":\"GAUGE\",\"key\":\"recall_roll\",\"value\":self.metric[11]}\n f1_score_roll = {\"type\":\"GAUGE\",\"key\":\"f1_roll\",\"value\":self.metric[12]}\n f2_score_roll = {\"type\":\"GAUGE\",\"key\":\"f2_roll\",\"value\":self.metric[13]}\n \n true_negative = {\"type\":\"GAUGE\",\"key\":\"true_negative\",\"value\":self.metric[0]}\n false_positive = {\"type\":\"GAUGE\",\"key\":\"false_positive\",\"value\":self.metric[1]}\n false_negative = {\"type\":\"GAUGE\",\"key\":\"false_negative\",\"value\":self.metric[2]}\n true_positive = {\"type\":\"GAUGE\",\"key\":\"true_positive\",\"value\":self.metric[3]}\n \n nb_outliers_roll = {\"type\":\"GAUGE\",\"key\":\"nb_outliers_roll\",\"value\":self.metric[14]}\n nb_labels_roll = {\"type\":\"GAUGE\",\"key\":\"nb_labels_roll\",\"value\":self.metric[15]}\n nb_outliers_tot = {\"type\":\"GAUGE\",\"key\":\"nb_outliers_tot\",\"value\":self.metric[16]}\n nb_labels_tot = {\"type\":\"GAUGE\",\"key\":\"nb_labels_tot\",\"value\":self.metric[17]}\n \n return [is_outlier,mse,obs,threshold,label,\n accuracy_tot,precision_tot,recall_tot,f1_score_tot,f2_score_tot,\n accuracy_roll,precision_roll,recall_roll,f1_score_roll,f2_score_roll,\n true_negative,false_positive,false_negative,true_positive,\n nb_outliers_roll,nb_labels_roll,nb_outliers_tot,nb_labels_tot]", "def value(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Summary.Value]:", "def put_metric(cw_metric_name, statistic_value, config=None,\n cw_dimension_name=None, cw_namespace=None):\n\n try:\n if config:\n session = config.boto3_session()\n cw_dimension_name = config.cw_dimension_name\n cw_namespace = config.cw_namespace\n region = config.region\n else:\n session = Config.boto3_session()\n except:\n logger.exception(\"\")\n sys.exit(127)\n\n if not cw_dimension_name or not cw_metric_name:\n raise ValueError(\"You have to specify at least\\\n cw_dimension_name or config parameter\")\n\n cw = session.resource('cloudwatch', region_name=region)\n try:\n float(statistic_value)\n except ValueError:\n logger.error(\"Statistic value not convertible to float.\")\n return False\n\n try:\n if statistic_value == 0:\n statistic_value = 0.1\n\n cw.Metric(cw_namespace, cw_metric_name).put_data(\n MetricData=[\n {\n 'MetricName': cw_metric_name,\n 'Dimensions': [\n {\n 'Name': cw_dimension_name,\n 'Value': cw_metric_name\n }\n ],\n 'StatisticValues': {\n 'SampleCount': statistic_value,\n 'Sum': statistic_value,\n 'Minimum': statistic_value,\n 'Maximum': statistic_value\n },\n 'Unit': 'Count',\n 'StorageResolution': 1\n }\n ]\n )\n except:\n logger.exception(\"\")", "def record(self, config, value, time_ms):\n raise NotImplementedError", "def accumulate(self, value):\n inc_counter_op = smart_assign(self._counter, 1.0, assign_fn=tf.assign_add)\n acc_op = smart_assign(self._acc_var, value, assign_fn=tf.assign_add)\n return tf.group(inc_counter_op, acc_op)", "def _submit_rate(self, metric_name, val, metric, custom_tags=None, hostname=None):\n _tags = self._metric_tags(metric_name, val, metric, custom_tags, hostname)\n self.check.rate('{}.{}'.format(self.NAMESPACE, metric_name), val, _tags, hostname=hostname)", "def add_metric(self, metric_name, aggregate=None):\n\n clean_metric = metric_name.lower().strip()\n\n if clean_metric.lower() not in METRICS:\n raise Exception(\"Metric named: \" + metric_name + \" is not a valid benchmark metric.\")\n self.metrics.add(clean_metric)\n\n if not aggregate:\n self.raw_metrics.add(clean_metric)\n elif aggregate.lower().strip() in AGGREGATES:\n # Add aggregate to this metric\n clean_aggregate = aggregate.lower().strip()\n current_aggregates = self.aggregated_metrics.get(clean_metric, list())\n current_aggregates.append(clean_aggregate)\n self.aggregated_metrics[clean_metric] = current_aggregates\n else:\n raise Exception(\"Aggregate function \" + aggregate + \" is not a legal aggregate function name\");\n\n return self;", "def aggregate(all_metrics, reducer, suffix):\n # Collect metric separately\n separated_metrics = {} # type: dict[frozenset, list[dict]]\n for el in all_metrics:\n key = frozenset(el[\"metric\"][\"dimensions\"].items())\n if key not in separated_metrics:\n separated_metrics[key] = [el]\n else:\n separated_metrics[key].append(el)\n\n # Collect all dimensions\n dims = {}\n for metric_dims in separated_metrics.keys():\n for prop, val in dict(metric_dims).iteritems():\n if prop in dims:\n dims[prop].add(val)\n else:\n dims[prop] = set(val)\n\n # Sort each metric\n for _, metric in separated_metrics.iteritems():\n metric.sort(key=lambda v: v[\"metric\"][\"timestamp\"])\n\n separated_metrics = sorted(separated_metrics.values(), key=len)\n separated_metrics.reverse()\n\n # Compute the new values\n new_values = []\n all_timestamps = map(\n lambda l: map(\n lambda x: x[\"metric\"][\"timestamp\"], l),\n separated_metrics)\n metric_count = len(separated_metrics)\n for index in range(0, len(separated_metrics[0])):\n new_value = reducer[0](\n separated_metrics[0][index][\"metric\"][\"value\"],\n metric_count)\n new_timestamp = separated_metrics[0][index][\"metric\"][\"timestamp\"]\n for metric_index in range(1, metric_count):\n new_value = reducer[1](new_value, helpers.interpolate(\n new_timestamp,\n separated_metrics[metric_index],\n all_timestamps[metric_index]\n ), metric_count)\n new_values.append((new_timestamp, new_value))\n\n # Aggregate the other details:\n metric_name = separated_metrics[0][0][\"metric\"][\"name\"] + suffix\n meta = separated_metrics[0][0][\"meta\"]\n new_metrics = [\n helpers.create_agg_metric(\n metric_name,\n meta,\n dims,\n val[0],\n val[1]\n ) for val in new_values\n ]\n return new_metrics", "def add(self, value, source=None, **params):\n\t\treturn self.connection.send_gauge_value(self.name, value, source, **params)", "def max(self, key, value):\n self._metrics[key] = max(value, self._metrics[key])", "def write_value(message):\n tag_definition = WRITE_TABLE.get(message.topic.strip(base_topic))\n if tag_definition:\n string_value = message.payload.decode(\"utf-8\")\n value = tag_definition.convertion(string_value)\n _LOGGER.debug(\"write value %s : %s => address : %s = %s\",\n message.topic.strip(base_topic), string_value,\n tag_definition.address, value)\n if value is not None:\n instrument.write_registers(tag_definition.address, value)", "def add_value_mean(cls, sensor, values, device_id):\n if values[device_id] is not None:\n if sensor == 't':\n cls.mean_t.append(int(values[device_id][sensor]))\n if sensor == 'l':\n cls.mean_l.append(int(values[device_id][sensor]))", "def set_metric(self, slug, value, category=None, expire=None, date=None):\n keys = self._build_keys(slug, date=date)\n\n # Add the slug to the set of metric slugs\n self.r.sadd(self._metric_slugs_key, slug)\n\n # Construct a dictionary of key/values for use with mset\n data = {}\n for k in keys:\n data[k] = value\n self.r.mset(data)\n\n # Add the category if applicable.\n if category:\n self._categorize(slug, category)\n\n # Expire the Metric in ``expire`` seconds if applicable.\n if expire:\n for k in keys:\n self.r.expire(k, expire)", "def process(self, metric):\n self.metrics.append(metric)\n if self.should_flush():\n self._send()" ]
[ "0.72274554", "0.69380003", "0.68785536", "0.683132", "0.6480525", "0.6403055", "0.6216645", "0.61926883", "0.61354136", "0.6075729", "0.6063117", "0.6036601", "0.59779376", "0.5955456", "0.5926401", "0.5891051", "0.5852286", "0.5779409", "0.5769437", "0.576314", "0.5720652", "0.5690581", "0.56863165", "0.5683876", "0.56515497", "0.5644724", "0.56393", "0.5608614", "0.56070733", "0.5599034", "0.5587598", "0.5528484", "0.54964286", "0.54964286", "0.54686004", "0.5462526", "0.5431433", "0.54243296", "0.54224724", "0.53968173", "0.53868", "0.53861463", "0.5385953", "0.5352787", "0.53327864", "0.5324448", "0.5318853", "0.5296961", "0.5282978", "0.52791095", "0.52579117", "0.5255756", "0.5252722", "0.52512765", "0.52296764", "0.52214175", "0.5204215", "0.5199083", "0.5192064", "0.5190996", "0.5187085", "0.51870614", "0.51799417", "0.5176846", "0.5162781", "0.51475537", "0.5120476", "0.5112968", "0.51118153", "0.5104258", "0.51001614", "0.5093191", "0.50917906", "0.50709873", "0.5069684", "0.50609124", "0.5047125", "0.503507", "0.5022811", "0.5021153", "0.5020783", "0.5018685", "0.50170165", "0.5008193", "0.5005783", "0.49933925", "0.49902523", "0.4981031", "0.49747726", "0.49653327", "0.49646652", "0.49561766", "0.4955589", "0.4948064", "0.4946329", "0.4935785", "0.4934192", "0.4933622", "0.49304634", "0.49287945" ]
0.6751963
4
Record the value metrics supplied by the iterable, merging the data with any data from prior value metrics with the same name.
def record_custom_metrics(self, metrics): if not self.__settings: return for name, value in metrics: self.record_custom_metric(name, value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self, current_iter, *metrics, **named_metrics):\n\n # Same order as __init__() in python>=3.6\n if len(metrics) > 0:\n for key, metric in zip(self.metrics.keys(), metrics):\n self.metrics[key].append((current_iter, metric))\n \n # Random order with names\n elif len(named_metrics) > 0:\n for name, metric in named_metrics.item():\n self.metrics[name].append((metric))\n\n else:\n raise ValueError(\"No valid value to update losses\")", "def log(self, metric_name: str, value: float) -> None:\n if metric_name in self.metrics:\n self.metrics[metric_name].append(value)\n else:\n self.metrics[metric_name] = [value]", "def _update_metric(\n metrics: List[mlflow.entities.Metric], dataset: MetricsDict = {}\n ) -> MetricsDict:\n for metric in metrics:\n metric_dict = {\"step\": metric.step, \"value\": metric.value}\n if metric.key in dataset:\n if isinstance(dataset[metric.key], list):\n dataset[metric.key].append(metric_dict)\n else:\n dataset[metric.key] = [dataset[metric.key], metric_dict]\n else:\n dataset[metric.key] = metric_dict\n return dataset", "def record_metrics(self, pid, metrics):\n\n for _metric, _metric_value in metrics.items():\n if not self.__metrics_history[pid].get(_metric):\n self.__metrics_history[pid][_metric] = []\n self.__metrics_history[pid][_metric].append(_metric_value)\n # only keep the last 2 running history for any metric\n self.__metrics_history[pid][_metric] = self.__metrics_history[pid][_metric][\n -2:\n ]", "def record_custom_metric(self, name, value):\n if isinstance(value, dict):\n if len(value) == 1 and 'count' in value:\n new_stats = CountStats(call_count=value['count'])\n else:\n new_stats = TimeStats(*c2t(**value))\n else:\n new_stats = TimeStats(1, value, value, value, value, value**2)\n\n stats = self.__stats_table.get(name)\n if stats is None:\n self.__stats_table[name] = new_stats\n else:\n stats.merge_stats(new_stats)", "def update(self, step, metrics):\n self.steps_arr.append(step)\n for key, val in metrics.items():\n if isinstance(val, tf.Tensor):\n try:\n self.data[key].append(val.numpy())\n except KeyError:\n self.data[key] = [val.numpy()]", "def merge_custom_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for name, other in metrics:\n key = (name, '')\n stats = self.__stats_table.get(key)\n if not stats:\n self.__stats_table[key] = other\n else:\n stats.merge_stats(other)", "def record_sensor_readings(data_dict, metric_list, output_file_name):\n sensor_vals = []\n # going though metric_list to keep order consistent\n for metric in metric_list:\n if metric in data_dict:\n sensor_vals.append(str(data_dict[metric]))\n else:\n # value not recorded properly\n sensor_vals.append(\"null\")\n vals = \",\".join(sensor_vals)\n\n # write to file\n # TODO: keep file open for duration of the drive to avoid re-opening it at each iteration\n with open(output_file_name, 'a') as file:\n file.write(vals+\"\\n\")", "def add_stats(self):\n units = self.get_unit_map()\n for metric in self.raw_metrics:\n unit, metric_type = units.get(metric, (DEFAULT_UNIT, DEFAULT_TYPE))\n if metric_type == \"counter\":\n # Unit/Second\n unit = \"/\".join((unit, \"Second\"))\n self.add_derive_value(metric, unit, self.raw_metrics[metric], rate=True)\n else:\n self.add_gauge_value(metric, unit, self.raw_metrics[metric])", "def aggregate(all_metrics, reducer, suffix):\n # Collect metric separately\n separated_metrics = {} # type: dict[frozenset, list[dict]]\n for el in all_metrics:\n key = frozenset(el[\"metric\"][\"dimensions\"].items())\n if key not in separated_metrics:\n separated_metrics[key] = [el]\n else:\n separated_metrics[key].append(el)\n\n # Collect all dimensions\n dims = {}\n for metric_dims in separated_metrics.keys():\n for prop, val in dict(metric_dims).iteritems():\n if prop in dims:\n dims[prop].add(val)\n else:\n dims[prop] = set(val)\n\n # Sort each metric\n for _, metric in separated_metrics.iteritems():\n metric.sort(key=lambda v: v[\"metric\"][\"timestamp\"])\n\n separated_metrics = sorted(separated_metrics.values(), key=len)\n separated_metrics.reverse()\n\n # Compute the new values\n new_values = []\n all_timestamps = map(\n lambda l: map(\n lambda x: x[\"metric\"][\"timestamp\"], l),\n separated_metrics)\n metric_count = len(separated_metrics)\n for index in range(0, len(separated_metrics[0])):\n new_value = reducer[0](\n separated_metrics[0][index][\"metric\"][\"value\"],\n metric_count)\n new_timestamp = separated_metrics[0][index][\"metric\"][\"timestamp\"]\n for metric_index in range(1, metric_count):\n new_value = reducer[1](new_value, helpers.interpolate(\n new_timestamp,\n separated_metrics[metric_index],\n all_timestamps[metric_index]\n ), metric_count)\n new_values.append((new_timestamp, new_value))\n\n # Aggregate the other details:\n metric_name = separated_metrics[0][0][\"metric\"][\"name\"] + suffix\n meta = separated_metrics[0][0][\"meta\"]\n new_metrics = [\n helpers.create_agg_metric(\n metric_name,\n meta,\n dims,\n val[0],\n val[1]\n ) for val in new_values\n ]\n return new_metrics", "def increment(self) -> None:\n self._increment_called = True\n self.append(deepcopy(self._base_metric))", "def _log_metrics(self, logs, prefix, step):\r\n if logs is None:\r\n logs = {}\r\n\r\n # Group metrics by the name of their associated file writer. Values\r\n # are lists of metrics, as (name, scalar_value) pairs.\r\n logs_by_writer = {\r\n self._train_run_name: [],\r\n self._validation_run_name: [],\r\n }\r\n validation_prefix = 'val_'\r\n for (name, value) in logs.items():\r\n if name in ('batch', 'size', 'num_steps'):\r\n # Scrub non-metric items.\r\n continue\r\n if name.startswith(validation_prefix):\r\n name = name[len(validation_prefix):]\r\n writer_name = self._validation_run_name\r\n else:\r\n writer_name = self._train_run_name\r\n name = prefix + name # assign batch or epoch prefix\r\n logs_by_writer[writer_name].append((name, value))\r\n\r\n with context.eager_mode():\r\n with summary_ops_v2.always_record_summaries():\r\n for writer_name in logs_by_writer:\r\n these_logs = logs_by_writer[writer_name]\r\n if not these_logs:\r\n # Don't create a \"validation\" events file if we don't\r\n # actually have any validation data.\r\n continue\r\n writer = self._get_writer(writer_name)\r\n with writer.as_default():\r\n for (name, value) in these_logs:\r\n summary_ops_v2.scalar(name, value, step=step)", "def log_batch(self, measurements):\n for m in measurements:\n logger.info(m)\n self.log(metric=m.metric, value=m.value, source=m.source, timestamp=m.timestamp)", "def add_metrics(self, metrics):\n for i, metric in enumerate(self.config.metrics):\n tf.summary.scalar(metric, metrics[i])", "def record_custom_metric(self, name, value):\n key = (name, '')\n\n if isinstance(value, dict):\n if len(value) == 1 and 'count' in value:\n new_stats = CountStats(call_count=value['count'])\n else:\n new_stats = TimeStats(*c2t(**value))\n else:\n new_stats = TimeStats(1, value, value, value, value, value**2)\n\n stats = self.__stats_table.get(key)\n if stats is None:\n self.__stats_table[key] = new_stats\n else:\n stats.merge_stats(new_stats)\n\n return key", "def record_summary(self, name, value, tags=None):\n identity = self.create_identity(name, tags, \"summary\")\n with self._lock:\n if identity in self._batch:\n merged_value = self._batch[identity]\n merged_value[\"count\"] += 1\n merged_value[\"sum\"] += value\n merged_value[\"min\"] = min(value, merged_value[\"min\"])\n merged_value[\"max\"] = max(value, merged_value[\"max\"])\n else:\n value = {\"count\": 1, \"sum\": value, \"min\": value, \"max\": value}\n self._batch[identity] = value", "def save_scalars(self, step, metrics):\n\n # Save\n with self.summary_writer.as_default():\n for name, value in metrics.items():\n tf.summary.scalar(name, value, step=step)", "def flush(self):\n with self._lock:\n batch = self._batch\n timestamps = self._timestamps\n\n items = []\n for identity, value in batch.items():\n metric = {}\n typ, name, tags = identity\n metric[\"name\"] = name\n if typ:\n metric[\"type\"] = typ\n else:\n metric[\"timestamp\"] = timestamps[identity]\n\n if tags:\n metric[\"attributes\"] = dict(tags)\n\n metric[\"value\"] = value\n items.append(metric)\n\n items = tuple(items)\n\n batch.clear()\n timestamps.clear()\n\n common = self._common.copy()\n common[\"timestamp\"] = self._interval_start\n now = int(time.time() * 1000.0)\n interval = now - self._interval_start\n common[\"interval.ms\"] = interval\n\n self._interval_start = now\n\n return items, common", "def accum_val_ops(outputs, names, global_step, output_dir, metric_summary, N):\n outs = []\n if N >= 0:\n outputs = outputs[:N]\n for i in range(len(outputs[0])):\n scalar = np.array(map(lambda x: x[i], outputs))\n assert (scalar.ndim == 1)\n add_value_to_summary(metric_summary, names[i], np.mean(scalar),\n tag_str='{:>27s}: [{:s}]: %f'.format(names[i], ''))\n outs.append(np.mean(scalar))\n return outs", "def put(self, metric, values, timestamp=None):\n if timestamp is None:\n timestamp = time.time()\n now_date = datetime.datetime.fromtimestamp(timestamp)\n\n if self.last is None:\n self.last = timestamp\n return\n\n self.last = timestamp\n\n values = [str(d) for d in [now_date, timestamp]+values]\n\n with open(self.filename, \"at\") as df:\n df.write(\"{}\\n\".format(\",\".join(values)))", "def write(self, writer: SummaryWriter, epoch = None, use_metric_name = True, prefix = \"\", show=False):\n\n all_data = defaultdict()\n for metric in self.metrics.values():\n data = metric.get_data()\n for name in data:\n #print(type(data[name]))\n #print(data[name])\n if type(data[name]) == list: continue\n\n try:\n if use_metric_name:\n final_name = metric.name()+'/'+name\n else:\n final_name = name\n\n if prefix:\n final_name = prefix+\"/\"+final_name\n\n writer.add_scalar(final_name, data[name], epoch)\n all_data[name] = data[name]\n #Hard to get length of pieces of data if more than one and not lists, but still ndarray/etc. bah.\n except Exception:\n continue\n if self.loss:\n writer.add_scalar(prefix+\"/loss\", self.data['loss'], epoch)\n all_data['loss'] = self.data['loss']\n return all_data", "def collect_value(self, name_to_arr):\n for tensor, data_arr in name_to_arr.items():\n data_arr = np.asarray(data_arr) # noqa: PLW2901\n data_arr = data_arr.flatten() # noqa: PLW2901\n\n if data_arr.size > 0:\n min_value = np.min(data_arr)\n max_value = np.max(data_arr)\n else:\n min_value = 0\n max_value = 0\n\n threshold = max(abs(min_value), abs(max_value))\n\n if tensor in self.histogram_dict:\n old_histogram = self.histogram_dict[tensor]\n self.histogram_dict[tensor] = self.merge_histogram(\n old_histogram, data_arr, min_value, max_value, threshold\n )\n else:\n hist, hist_edges = np.histogram(data_arr, self.num_bins, range=(-threshold, threshold))\n self.histogram_dict[tensor] = (\n hist,\n hist_edges,\n min_value,\n max_value,\n threshold,\n )", "def log_metric(self, name: str, value):\n self.metrics[name] = value\n\n self._sync_log_event()", "def __add_one_day_values__(self):\n values = self.values()\n for value in values:\n ls = []\n if value.label in self.values_dict:\n ls = self.values_dict[value.label]\n ls.append(value)\n else:\n ls = [value]\n self.values_dict[value.label] = ls", "def update(self, data: Mapping[str, np.ndarray]) -> Self:\n\n for metric in self.metrics:\n metric.update(data)\n\n return self", "def get_next_batch(self):\n\n metrics = {}\n for struct in self.metrics.values():\n metrics = {**metrics, **struct.get_next_batch()}\n\n return metrics", "def compute_metrics(self):\n self.finalize_output_dict()\n self.metric_dict = {\n key: value(self.output_dict[\"labels\"], self.output_dict[\"pred_probs\"])\n for key, value in self.metric_fns.items()\n }", "def update(self, current, values=None, finalize=None):\n if finalize is None:\n if self.target is None:\n finalize = False\n else:\n finalize = current >= self.target\n\n values = values or []\n for k, v in values:\n if k not in self._values_order:\n self._values_order.append(k)\n if k not in self.stateful_metrics:\n # In the case that progress bar doesn't have a target value in the first\n # epoch, both on_batch_end and on_epoch_end will be called, which will\n # cause 'current' and 'self._seen_so_far' to have the same value. Force\n # the minimal value to 1 here, otherwise stateful_metric will be 0s.\n value_base = max(current - self._seen_so_far, 1)\n if k not in self._values:\n self._values[k] = [v * value_base, value_base]\n else:\n self._values[k][0] += v * value_base\n self._values[k][1] += value_base\n else:\n # Stateful metrics output a numeric value. This representation\n # means \"take an average from a single value\" but keeps the\n # numeric formatting.\n self._values[k] = [v, 1]\n self._seen_so_far = current\n\n now = time.time()\n info = ' - %.0fs' % (now - self._start)\n if self.verbose == 1:\n if now - self._last_update < self.interval and not finalize:\n return\n\n prev_total_width = self._total_width\n if self._dynamic_display:\n sys.stdout.write('\\b' * prev_total_width)\n sys.stdout.write('\\r')\n else:\n sys.stdout.write('\\n')\n\n if self.target is not None:\n numdigits = int(np.log10(self.target)) + 1\n bar = ('%' + str(numdigits) + 'd/%d [') % (current, self.target)\n prog = float(current) / self.target\n prog_width = int(self.width * prog)\n if prog_width > 0:\n bar += ('=' * (prog_width - 1))\n if current < self.target:\n bar += '>'\n else:\n bar += '='\n bar += ('.' * (self.width - prog_width))\n bar += ']'\n else:\n bar = '%7d/Unknown' % current\n\n self._total_width = len(bar)\n sys.stdout.write(bar)\n\n if current:\n time_per_unit = (now - self._start) / current\n else:\n time_per_unit = 0\n\n if self.target is None or finalize:\n if time_per_unit >= 1 or time_per_unit == 0:\n info += ' %.0fs/%s' % (time_per_unit, self.unit_name)\n elif time_per_unit >= 1e-3:\n info += ' %.0fms/%s' % (time_per_unit * 1e3, self.unit_name)\n else:\n info += ' %.0fus/%s' % (time_per_unit * 1e6, self.unit_name)\n else:\n eta = time_per_unit * (self.target - current)\n if eta > 3600:\n eta_format = '%d:%02d:%02d' % (eta // 3600,\n (eta % 3600) // 60, eta % 60)\n elif eta > 60:\n eta_format = '%d:%02d' % (eta // 60, eta % 60)\n else:\n eta_format = '%ds' % eta\n\n info = ' - ETA: %s' % eta_format\n\n for k in self._values_order:\n info += ' - %s:' % k\n if isinstance(self._values[k], list):\n avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))\n if abs(avg) > 1e-3:\n info += ' %.4f' % avg\n else:\n info += ' %.4e' % avg\n else:\n info += ' %s' % self._values[k]\n\n self._total_width += len(info)\n if prev_total_width > self._total_width:\n info += (' ' * (prev_total_width - self._total_width))\n\n if finalize:\n info += '\\n'\n\n sys.stdout.write(info)\n sys.stdout.flush()\n\n elif self.verbose == 2:\n if finalize:\n numdigits = int(np.log10(self.target)) + 1\n count = ('%' + str(numdigits) + 'd/%d') % (current, self.target)\n info = count + info\n for k in self._values_order:\n info += ' - %s:' % k\n avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))\n if avg > 1e-3:\n info += ' %.4f' % avg\n else:\n info += ' %.4e' % avg\n info += '\\n'\n\n sys.stdout.write(info)\n sys.stdout.flush()\n\n self._last_update = now", "def _evaluate(dataset: dict, name: str, metrics=None):\n if metrics is None:\n metrics = ['Accuracy', 'AUROC', 'AUPRC', 'Precision', 'Recall', 'F1', 'F2']\n measures = [dataset[metric] for metric in metrics]\n measures.insert(0, name)\n return measures", "def _store_rows(self):\n\n for value in self.values:\n self.counters.append(value['counter'])\n self.timestamps.append(value['timestamp'])\n self.acceleration.append(value['acceleration'])", "def observe(self, scores, **fields):\n [self._scores.append({'value': s, 'index': i, **fields}) for s, i in zip(scores, self.index_scores)]\n for s in self.statistics:\n v = self.statistics[s](scores)\n self._results.append({'statistics': s, 'value': v, **fields})", "def record_gauge(self, name, value, tags=None):\n identity = self.create_identity(name, tags)\n with self._lock:\n self._batch[identity] = value\n self._timestamps[identity] = int(time.time() * 1000.0)", "def after_val_epoch(self,\n runner,\n metrics: Optional[Dict[str, float]] = None) -> None:\n tag, log_str = runner.log_processor.get_log_after_epoch(\n runner, len(runner.val_dataloader), 'val')\n runner.logger.info(log_str)\n if self.log_metric_by_epoch:\n # Accessing the epoch attribute of the runner will trigger\n # the construction of the train_loop. Therefore, to avoid\n # triggering the construction of the train_loop during\n # validation, check before accessing the epoch.\n if (isinstance(runner._train_loop, dict)\n or runner._train_loop is None):\n epoch = 0\n else:\n epoch = runner.epoch\n runner.visualizer.add_scalars(\n tag, step=epoch, file_path=self.json_log_path)\n else:\n if (isinstance(runner._train_loop, dict)\n or runner._train_loop is None):\n iter = 0\n else:\n iter = runner.iter\n runner.visualizer.add_scalars(\n tag, step=iter, file_path=self.json_log_path)", "def log_metric(name, values, tags={}):\n value_list = []\n for key in sorted(values.keys()):\n value = values[key]\n value_list.append(f\"{key}:{value:7.3f}\")\n values = \", \".join(value_list)\n tag_list = []\n for key, tag in tags.items():\n tag_list.append(f\"{key}:{tag}\")\n tags = \", \".join(tag_list)\n print(\"{name:30s} - {values} ({tags})\".format(name=name, values=values, tags=tags))", "def metrics_group():", "def log_metric(self, name, val, step):\n raise NotImplementedError", "def send_metrics(self):\n metrics = self.get_metrics()\n if not metrics:\n return\n\n for mkey, metric in metrics.items():\n for mname, mval in metric.items():\n try:\n self.agent.record_custom_metric(self.convert_metric_name(mkey, mname), mval, None)\n except Exception as e:\n print_(e)", "def get_metric_vals(self, metrics_list, include_val_metrics=False):\n metrics_list2 = [\"loss\"] if metrics_list is None else metrics_list\n metric_vals = {\n metric_name: self.metrics_history[metric_name][\"epoch_vals\"][-1] for metric_name in metrics_list2\n }\n if include_val_metrics:\n for metric_name in metrics_list2:\n metric_vals[f\"val_{metric_name}\"] = self.metrics_history[f\"val_{metric_name}\"][\"epoch_vals\"][-1]\n return metric_vals", "def write_metric(self, metric_name: str, metric_value: Union[float, int]):\n self._metrics.append(Metric(metric_name, metric_value))", "def store(self, **stats):\n if self.first_row:\n self.log_headers = list(stats.keys())\n for key in stats:\n assert key in self.log_headers, f\"Can't introduce a new key that you didn't include before: {key}\"\n\n # Write to output file\n if self.first_row:\n self.file_writer.writerow(self.log_headers)\n self.file_writer.writerow(stats.values())\n self.output_file.flush()\n\n # Display in stdout\n if self.log_freq > 0 and self.counter % self.log_freq == 0:\n _print_table(stats)\n\n self.first_row = False\n self.counter += 1", "def build_metrics_gauge_data(gauge_metrics):\n return [{'name': name, 'value': value} for name, value in iteritems(gauge_metrics)]", "def metric_recorded(self, record):\n if record.name in self.headers and self.current_row is not None:\n if record.name == \"learning_rate\" and not record.is_scalar:\n # record is a list of scalars\n value = \",\".join([f\"{lr:.4f}\" for lr in record.value])\n elif record.is_scalar and isinstance(record.value, int):\n value = str(record.value)\n else:\n assert record.is_scalar\n\n value = f\"{record.value:.4f}\"\n\n self.current_row[record.name] = value", "def metrics(self):\n \n if self.mse.shape[0]>1:\n raise ValueError('Metrics can only handle single observations.')\n \n if self.N==1:\n pred = float('nan')\n err = float('nan')\n y_true = float('nan')\n else:\n pred = int(self._predictions[-1])\n err = self._mse[-1]\n y_true = int(self.label[0])\n \n is_outlier = {\"type\":\"GAUGE\",\"key\":\"is_outlier\",\"value\":pred}\n mse = {\"type\":\"GAUGE\",\"key\":\"mse\",\"value\":err}\n obs = {\"type\":\"GAUGE\",\"key\":\"observation\",\"value\":self.N - 1}\n threshold = {\"type\":\"GAUGE\",\"key\":\"threshold\",\"value\":self.threshold}\n \n label = {\"type\":\"GAUGE\",\"key\":\"label\",\"value\":y_true}\n \n accuracy_tot = {\"type\":\"GAUGE\",\"key\":\"accuracy_tot\",\"value\":self.metric[4]}\n precision_tot = {\"type\":\"GAUGE\",\"key\":\"precision_tot\",\"value\":self.metric[5]}\n recall_tot = {\"type\":\"GAUGE\",\"key\":\"recall_tot\",\"value\":self.metric[6]}\n f1_score_tot = {\"type\":\"GAUGE\",\"key\":\"f1_tot\",\"value\":self.metric[7]}\n f2_score_tot = {\"type\":\"GAUGE\",\"key\":\"f2_tot\",\"value\":self.metric[8]}\n \n accuracy_roll = {\"type\":\"GAUGE\",\"key\":\"accuracy_roll\",\"value\":self.metric[9]}\n precision_roll = {\"type\":\"GAUGE\",\"key\":\"precision_roll\",\"value\":self.metric[10]}\n recall_roll = {\"type\":\"GAUGE\",\"key\":\"recall_roll\",\"value\":self.metric[11]}\n f1_score_roll = {\"type\":\"GAUGE\",\"key\":\"f1_roll\",\"value\":self.metric[12]}\n f2_score_roll = {\"type\":\"GAUGE\",\"key\":\"f2_roll\",\"value\":self.metric[13]}\n \n true_negative = {\"type\":\"GAUGE\",\"key\":\"true_negative\",\"value\":self.metric[0]}\n false_positive = {\"type\":\"GAUGE\",\"key\":\"false_positive\",\"value\":self.metric[1]}\n false_negative = {\"type\":\"GAUGE\",\"key\":\"false_negative\",\"value\":self.metric[2]}\n true_positive = {\"type\":\"GAUGE\",\"key\":\"true_positive\",\"value\":self.metric[3]}\n \n nb_outliers_roll = {\"type\":\"GAUGE\",\"key\":\"nb_outliers_roll\",\"value\":self.metric[14]}\n nb_labels_roll = {\"type\":\"GAUGE\",\"key\":\"nb_labels_roll\",\"value\":self.metric[15]}\n nb_outliers_tot = {\"type\":\"GAUGE\",\"key\":\"nb_outliers_tot\",\"value\":self.metric[16]}\n nb_labels_tot = {\"type\":\"GAUGE\",\"key\":\"nb_labels_tot\",\"value\":self.metric[17]}\n \n return [is_outlier,mse,obs,threshold,label,\n accuracy_tot,precision_tot,recall_tot,f1_score_tot,f2_score_tot,\n accuracy_roll,precision_roll,recall_roll,f1_score_roll,f2_score_roll,\n true_negative,false_positive,false_negative,true_positive,\n nb_outliers_roll,nb_labels_roll,nb_outliers_tot,nb_labels_tot]", "def add_metrics_point(self, data_points: Dict[str, float], timestamp: float):\n for name, value in data_points.items():\n # Using in-sort to insert while maintaining sorted ordering.\n bisect.insort(a=self.data[name], x=TimeStampedValue(timestamp, value))", "def record_apdex_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for metric in metrics:\n self.record_apdex_metric(metric)", "def update(self, current, values=None):\n values = values or []\n for k, v in values:\n if k not in self._values_order:\n self._values_order.append(k)\n if k not in self.stateful_metrics:\n if k not in self._values:\n self._values[k] = [\n v * (current - self._seen_so_far),\n current - self._seen_so_far,\n ]\n else:\n self._values[k][0] += v * (current - self._seen_so_far)\n self._values[k][1] += current - self._seen_so_far\n else:\n # Stateful metrics output a numeric value. This representation\n # means \"take an average from a single value\" but keeps the\n # numeric formatting.\n self._values[k] = [v, 1]\n self._seen_so_far = current\n\n now = time.time()\n info = \" - %.0fs\" % (now - self._start)\n if self.verbose == 1:\n if (\n now - self._last_update < self.interval\n and self.target is not None\n and current < self.target\n ):\n return\n\n prev_total_width = self._total_width\n if self._dynamic_display:\n sys.stdout.write(\"\\b\" * prev_total_width)\n sys.stdout.write(\"\\r\")\n else:\n sys.stdout.write(\"\\n\")\n\n if self.target is not None:\n numdigits = int(np.log10(self.target)) + 1\n bar = (\"%\" + str(numdigits) + \"d/%d [\") % (current, self.target)\n prog = float(current) / self.target\n prog_width = int(self.width * prog)\n if prog_width > 0:\n bar += \"=\" * (prog_width - 1)\n if current < self.target:\n bar += \">\"\n else:\n bar += \"=\"\n bar += \".\" * (self.width - prog_width)\n bar += \"]\"\n else:\n bar = \"%7d/Unknown\" % current\n\n self._total_width = len(bar)\n sys.stdout.write(bar)\n\n if current:\n time_per_unit = (now - self._start) / current\n else:\n time_per_unit = 0\n if self.target is not None and current < self.target:\n eta = time_per_unit * (self.target - current)\n if eta > 3600:\n eta_format = \"%d:%02d:%02d\" % (\n eta // 3600,\n (eta % 3600) // 60,\n eta % 60,\n )\n elif eta > 60:\n eta_format = \"%d:%02d\" % (eta // 60, eta % 60)\n else:\n eta_format = \"%ds\" % eta\n\n info = \" - ETA: %s\" % eta_format\n else:\n if time_per_unit >= 1 or time_per_unit == 0:\n info += \" %.0fs/%s\" % (time_per_unit, self.unit_name)\n elif time_per_unit >= 1e-3:\n info += \" %.0fms/%s\" % (time_per_unit * 1e3, self.unit_name)\n else:\n info += \" %.0fus/%s\" % (time_per_unit * 1e6, self.unit_name)\n\n for k in self._values_order:\n info += \" - %s:\" % k\n if isinstance(self._values[k], list):\n avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))\n if abs(avg) > 1e-3:\n info += \" %.4f\" % avg\n else:\n info += \" %.4e\" % avg\n else:\n info += \" %s\" % self._values[k]\n\n self._total_width += len(info)\n if prev_total_width > self._total_width:\n info += \" \" * (prev_total_width - self._total_width)\n\n if self.target is not None and current >= self.target:\n info += \"\\n\"\n\n sys.stdout.write(info)\n sys.stdout.flush()\n\n elif self.verbose == 2:\n if self.target is not None and current >= self.target:\n numdigits = int(np.log10(self.target)) + 1\n count = (\"%\" + str(numdigits) + \"d/%d\") % (current, self.target)\n info = count + info\n for k in self._values_order:\n info += \" - %s:\" % k\n avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))\n if avg > 1e-3:\n info += \" %.4f\" % avg\n else:\n info += \" %.4e\" % avg\n info += \"\\n\"\n\n sys.stdout.write(info)\n sys.stdout.flush()\n\n self._last_update = now", "def report_metrics(prefix, metrics):\n series = []\n\n now = time.time()\n for key, value in metrics.iteritems():\n metric = '{prefix}.{key}'.format(prefix=prefix, key=key)\n point = [(now, value)]\n series.append({'metric':metric, 'points':point})\n\n if len(series) > 0:\n print u\"Sending {}\".format(series)\n dog_http_api.metrics(series)", "def metrics(self, metrics):\n\n self._metrics = metrics", "def iterable_hook(self, name, iterable):\n for record in iterable:\n self(name, record)\n yield record", "def __apply_accumulators():\n self.__xdata = np.array([])\n self.__ydata = np.array([])\n for acc in self.signal_accumulators:\n self.__xdata = __array_append(self.__xdata,acc.attempt)\n self.__ydata = __array_append(self.__ydata,acc.count)\n self.__applied = True", "def metrics(self):\n raise NotImplementedError(\"metrics\")", "def set_polling_many(self, objs_and_values):\n cache_dict = {}\n if len(objs_and_values) == 0:\n return\n first_obj = objs_and_values[0][0]\n pre_key = self._pre_key_for(first_obj)\n pre_path = os.path.join(self.rrd_root, objtype(first_obj))\n filename = '%s.rrd' % self.pk\n\n for obj, value in objs_and_values:\n obj_pk = str(obj.pk).replace(':', '')\n key = pre_key % obj_pk\n cache_dict[key] = value\n if self.rrd_enabled:\n filepath = os.path.join(pre_path, obj_pk, filename)\n try:\n # we could use os.path.exists here, but python calls stat()\n # to check and that is too slow for our use case.\n # Use an exception for such rare cases\n rrdtool.update(filepath, \"N:{}\".format(value))\n except rrdtool.error as err:\n if \"No such file or directory\" not in err.message:\n LOGGER.error(\"error on %s metric: %s\", self.parameter,\n err)\n continue\n\n dirpath = os.path.dirname(filepath)\n if not os.path.exists(dirpath):\n os.makedirs(dirpath)\n rrdtool.create(filepath,\n 'DS:%s:GAUGE:%s:U:U' % (self.id, self.heartbeat),\n 'RRA:AVERAGE:0.5:1:600',\n 'RRA:AVERAGE:0.5:6:600',\n 'RRA:AVERAGE:0.5:24:600',\n 'RRA:AVERAGE:0.5:288:600',\n 'RRA:MAX:0.5:1:600',\n 'RRA:MAX:0.5:6:600',\n 'RRA:MAX:0.5:24:600',\n 'RRA:MAX:0.5:288:600') # Up to 600d\n # As rrdupdate manpage says, \"using the letter 'N', in which\n # case the update time is set to be the current time\n rrdtool.update(filepath, \"N:{}\".format(value))\n cache.set_many(cache_dict, 7 * 86400)", "def log_metric(self, name, val):\n raise NotImplementedError", "def log_metrics(self, metrics: dict):\n self.metrics.update(metrics)\n\n self._sync_log_event()", "def build_summary(self):\n for k, v in self.metrics.items():\n tf.summary.scalar(k, v)\n \n self.summary_op = tf.summary.merge_all()", "def _log_metrics(\n self,\n train_writer: SummaryWriter,\n val_writer: SummaryWriter,\n timestamped_save_dir: Path,\n train_metrics: _Metrics,\n step: int,\n ) -> None:\n if len(self.val_loader) > 0:\n val_metrics, val_img, val_gt, val_pred = self._get_val_metrics()\n if val_metrics.accuracy > self.best_acc:\n self.best_acc = val_metrics.accuracy\n self.save_weights(timestamped_save_dir, True)\n\n for key in vars(train_metrics):\n if key == \"class_loss\":\n tag = \"losses/classification\"\n elif key in {\"shape_loss\", \"total_loss\"}:\n continue\n else:\n tag = f\"metrics/{key}\"\n\n train_writer.add_scalar(tag, getattr(train_metrics, key), step)\n if len(self.val_loader) > 0:\n val_writer.add_scalar(tag, getattr(val_metrics, key), step)\n\n reg_loss = self._get_l2_reg()\n train_writer.add_scalar(\"losses/regularization\", reg_loss, step)\n train_writer.add_scalar(\"losses/shape\", train_metrics.shape_loss, step)\n train_writer.add_scalar(\n \"losses/total\",\n train_metrics.total_loss + self.config.weight_decay * reg_loss,\n step,\n )\n\n # Log a histogram for each tensor parameter in the model, to\n # see if a parameter is training stably or not\n for name, value in self.model.state_dict().items():\n train_writer.add_histogram(name, value, step)\n\n # Log the validation images for easy visualization\n if len(self.val_loader) > 0:\n val_writer.add_images(\"input\", val_img, step)\n val_writer.add_images(\"ground_truth\", val_gt, step)\n val_writer.add_images(\"prediction\", val_pred, step)", "def add_results(self, results):\n if self.replication_counter < self.replication_num:\n for metric in self.metrics:\n self.metric_final_results[metric].append(results[metric])\n\n self.replication_counter += 1\n else:\n raise Exception(\"The requested metric collection call of {}/{} exceeds the number of pre-defined replication\".format(self.replication_counter, self.replication_num))", "def __init__(self, metrics_to_record):\n self.tape = {}\n\n for metric_name in metrics_to_record:\n self.tape[metric_name] = []", "def update_metrics(self, state: TrainState, step: int, train_metrics: List[MetricsDict], t0):\n if not self.logflag:\n return\n\n eval_metrics: List[Any] = []\n\n # Build summary dictionary for logging\n # Include training stats\n train_metrics = common_utils.get_metrics(train_metrics)\n summary = {\n f\"train_{k}\": v\n for k, v in jax.tree_util.tree_map(lambda x: x.mean(), train_metrics).items()\n }\n epoch = step // self.steps_per_epoch\n summary[\"epoch\"] = epoch\n summary[\"time\"] = time.time() - t0\n\n # Eval over testing set\n for _ in range(self.steps_per_eval):\n eval_batch = next(self.eval_dt_iter)\n metrics = self.p_eval_step(state, eval_batch)\n eval_metrics.append(metrics)\n # Compute testing metrics\n eval_metrics = common_utils.get_metrics(eval_metrics)\n\n # Add testing stats to summary\n summary_eval = jax.tree_util.tree_map(lambda x: x.mean(), eval_metrics)\n summary.update(summary_eval)\n\n # Update iteration stats object\n assert isinstance(self.itstat_object, IterationStats) # for mypy\n self.itstat_object.insert(self.itstat_insert_func(ArgumentStruct(**summary)))", "def __setitem__(self, key, val):\n extract = lambda t: t.item() if type(t) is torch.Tensor else t\n\n if type(val) is dict:\n for k, v in val.items():\n self.log_scalar(k, extract(v), 'last')\n else:\n self.log_scalar(key, extract(val), 'last')", "def log_tensorboard(self, value_dict, step):\n for key, value in value_dict.items():\n summary = tf.Summary(value=[tf.Summary.Value(tag=key, simple_value=value)])\n self.writer.add_summary(summary, step)", "def apply_all_accumulators(self):\n self._require_state(\"APPLYING\")\n for mi in self._accums.keys():\n self._apply_one_accum_set(mi)", "def add_metrics(_dict):\n for key, itr in _dict.items():\n if key not in self.metric_cols:\n self.metric_cols.append(key)", "def calculate_epoch_metrics(self, val_metrics=False):\n metric_names = self.tracked_metrics()\n\n for metric in metric_names:\n if val_metrics:\n mean_val = np.array(self.metrics_history[f\"val_{metric}\"][\"batch_vals\"]).mean()\n self.metrics_history[f\"val_{metric}\"][\"epoch_vals\"].append(mean_val)\n else:\n mean_val = np.array(self.metrics_history[metric][\"batch_vals\"]).mean()\n self.metrics_history[metric][\"epoch_vals\"].append(mean_val)", "def log_metadata(self, label, value):\n self.__metadata[label].append(value)", "def _save(self, data: MetricsDict) -> None:\n client = MlflowClient()\n try:\n run_id = self.run_id\n except DataSetError:\n # If run_id can't be found log_metric would create new run.\n run_id = None\n\n log_metric = (\n partial(client.log_metric, run_id)\n if run_id is not None\n else mlflow.log_metric\n )\n metrics = (\n self._build_args_list_from_metric_item(k, v) for k, v in data.items()\n )\n\n if self._logging_activated:\n for k, v, i in chain.from_iterable(metrics):\n log_metric(k, v, step=i)", "def _update_result(self, results, clf_numbers):\n # ToDo make results of scoring values dynamic\n names_results = ['Accuracy']\n for number in clf_numbers:\n for name in names_results:\n if name not in self.results:\n self.results[name] = [results[number][name + \"_test_score_\" + str(number)]]\n else:\n self.results[name].append(results[number][name + \"_test_score_\" + str(number)])", "def merge_accumulators(self, accumulators):\n raise NotImplementedError", "def write(self, tags, values, step):\n if not isinstance(tags, list):\n tags = list(tags)\n if not isinstance(values, list):\n values = list(values)\n\n for i, (tag, value) in enumerate(zip(tags,values)):\n self.writer.add_scalar(tag, value, step)", "def _add_values(self, pre_report, report):\n for f in FIELDS:\n pre_report[f] += report[f]\n\n clats = report.get(\"clats\")\n for i in range(len(clats)):\n value = float(clats[i][1])\n pre_report[\"clats\"][i][1] += value", "def sum(self, key, value):\n self._metrics[key] += value", "def list_of_scalars_summary(self, tag_value_pairs, step):\n for tag, value in tag_value_pairs:\n self.writer.add_scalar(tag, value, step)", "def add_value(trajectories, val_func):\n for trajectory in trajectories:\n observes = trajectory['observes']\n values = val_func.predict(observes)\n trajectory['values'] = values", "def _aggregate_log_values(self, source, dest):\n remove = []\n for key, item in source.items():\n if \"data\" not in item:\n # Assume it's a sub-group\n dest[key] = {}\n self._aggregate_log_values(item, dest[key])\n else:\n aggregator = self._get_aggregator_for_key(key, item['agg'])\n value = aggregator(item['data'])\n if item['precision'] is not None:\n value = round(value, item['precision'])\n dest[key] = value\n if item['scope'] == 'get':\n remove.append(key)\n for key in remove:\n del source[key]", "def update_history_val(self, loss, speakers):\r\n loss_copy = loss.detach().cpu().numpy()\r\n for loss_value, speaker in zip(loss_copy, speakers):\r\n speaker_index = self.s2i[speaker]\r\n if speaker_index not in self.val_history:\r\n self.val_history[speaker_index] = []\r\n self.val_history[speaker_index].append(loss_value)", "def update_metrics(self, metrics, predictions, labels):\n return", "def _write_measurements(summary_writer, labels_and_values, step):\n\n # Write TF Summaries Measurements.\n with summary_writer.as_default():\n for (label, value) in labels_and_values:\n tf.summary.scalar(label, value, step=step)", "def _forward_summary(self, summaries):\n p = self.params\n for summary_key, summary_value in summaries.items():\n logging.info((summary_key, summary_value))\n summary_type = base_layer.get_summary_type_from_key(summary_key)\n assert summary_value.shape[0] == p.x_times\n if p.unpack_summaries:\n # unstack summary_value\n unstacked_values = jnp.split(summary_value, p.x_times)\n for i, v in enumerate(unstacked_values):\n base_layer.add_summary(f'{summary_key}/{i}', v, summary_type)\n else:\n base_layer.add_summary('{summary_key}', summary_value, summary_type)", "def add_metric(self, metric_name: str, metric_val: typing.Any):\n self.add_metrics({metric_name: metric_val})", "def results_aggregator(self, names):\n\t\tfor name in names:\n\t\t\tresult = self.main(name)\n\t\t\tself.results.append(result)\n\t\t\tprint(\"'%s' has been written to the file.\" % result[0])\n\t\t\t\"\"\"result is formatted name, number, rating, review count\"\"\"", "def push_many(self, values):\n self._tail_iters.append(iter(values))", "def evaluate(self, val_dataloader, reduce=True, **kwargs):\n eval_list = defaultdict(list)\n\n for data in tqdm(val_dataloader):\n eval_step_dict = self.eval_step(data, **kwargs)\n\n for k, v in eval_step_dict.items():\n eval_list[k].append(v)\n\n eval_dict = {k: torch.stack(v) for k, v in eval_list.items()}\n if reduce:\n eval_dict = {k: torch.mean(v) for k, v in eval_dict.items()}\n return eval_dict", "def add(self, val):\n key = self.get_key(val)\n self.store.add(key)\n\n # Keep track of summary stats\n self._count += 1\n self._sum += val\n if val < self._min:\n self._min = val\n if val > self._max:\n self._max = val", "def get_values(self, names):\n r = []\n for n in names:\n if n in self.raw_metrics:\n r.append(self.raw_metrics[n])\n else:\n return None\n return r", "def append_to_list(self, metric_value_to_append):\n if type(metric_value_to_append)==MetricValue:\n self.__metric_value_list.append(metric_value_to_append)\n else:\n print(\"appended object must be a MetricValue, metric_value_to_append=\",metric_value_to_append)\n sys.exit() # stop entire program, because metric_value_to_append MUST be correct", "def add_metrics(self, metric_dict: dict):\n self.metric_dict.update(metric_dict)", "def test_metric_tracker_and_collection_multioutput(input_to_tracker, assert_type):\n tracker = MetricTracker(input_to_tracker)\n for _ in range(5):\n tracker.increment()\n for _ in range(5):\n preds, target = torch.randn(100, 2), torch.randn(100, 2)\n tracker.update(preds, target)\n all_res = tracker.compute_all()\n assert isinstance(all_res, assert_type)\n best_metric, which_epoch = tracker.best_metric(return_step=True)\n if isinstance(best_metric, dict):\n for v in best_metric.values():\n assert v is None\n for v in which_epoch.values():\n assert v is None\n else:\n assert best_metric is None\n assert which_epoch is None", "def after_val_iter(self,\n runner,\n batch_idx: int,\n data_batch: DATA_BATCH = None,\n outputs: Optional[Sequence] = None) -> None:\n if self.every_n_inner_iters(batch_idx, self.interval):\n _, log_str = runner.log_processor.get_log_after_iter(\n runner, batch_idx, 'val')\n runner.logger.info(log_str)", "def aggregate_statistics(self, new_stats):\n \n if isinstance(new_stats,RunStatistics):\n new_stats = [new_stats, ]\n elif isinstance(new_stats,list):\n if any(not isinstance(_,RunStatistics) for _ in new_stats):\n raise MadGraph5Error, \"The 'new_stats' argument of the function \"+\\\n \"'updtate_statistics' must be a (possibly list of) \"+\\\n \"RunStatistics instance.\"\n \n keys = set([])\n for stat in [self,]+new_stats:\n keys |= set(stat.keys())\n\n new_stats = new_stats+[self,]\n for key in keys:\n # Define special rules\n if key=='max_precision':\n # The minimal precision corresponds to the maximal value for PREC\n self[key] = min( _[key] for _ in new_stats if key in _)\n elif key=='min_precision':\n # The maximal precision corresponds to the minimal value for PREC\n self[key] = max( _[key] for _ in new_stats if key in _)\n elif key=='averaged_timing':\n n_madloop_calls = sum(_['n_madloop_calls'] for _ in new_stats if\n 'n_madloop_calls' in _)\n if n_madloop_calls > 0 :\n self[key] = sum(_[key]*_['n_madloop_calls'] for _ in \n new_stats if (key in _ and 'n_madloop_calls' in _) )/n_madloop_calls\n else:\n # Now assume all other quantities are cumulative\n self[key] = sum(_[key] for _ in new_stats if key in _)", "def _update_data(self):\n for attribute in [\"flow_rate\"]:\n self._data[attribute] = self._connection.measure", "def update_state(self, **kwargs):\n\n for name in self.metrics:\n\n metric = self.metrics[name]\n\n argspec = inspect.getfullargspec(metric.update_state)\n\n kwargs_to_pass = {k: kwargs[k] for k in kwargs if k in argspec.args}\n\n metric.update_state(**kwargs_to_pass)", "def evaluate_with_metrics(self, dataset, metrics, *args, **kwargs):\n\n utils.assert_raise(isinstance(metrics, dict), ValueError,\n '\"metrics\" must be a dict with metric_name -> metric_function')\n result = dict()\n\n for sample in dataset:\n output = self.predict(sample)\n\n for key, call in metrics.items():\n holder = result.get(key, list())\n holder.append(call(output, sample))\n\n result[key] = holder\n\n return result", "def _monitor_metrics(self):\n metrics = [\"loss\"]\n try:\n m = U.metrics_from_model(self.model)\n if isinstance(m, list):\n metrics.extend(m)\n except:\n pass\n if self.val_data is not None:\n for m in metrics[:]:\n metrics.append(\"val_%s\" % (m))\n return metrics", "def _calculate_aggregated_metrics(self):\n\n # using the historical values, calculate the aggregate\n # there are two kinds of metrics:\n # a) cumulative metrics - only the delta of the last 2 recorded values is used (eg cpu cycles)\n # b) absolute metrics - the last absolute value is used\n\n running_pids_set = set(self.__pids)\n\n for pid, process_metrics in self.__metrics_history.items():\n for _metric, _metric_values in process_metrics.items():\n if not self.__aggregated_metrics.get(_metric):\n self.__aggregated_metrics[_metric] = 0\n if _metric.is_cumulative:\n if pid in running_pids_set:\n if len(_metric_values) > 1:\n # only report the cumulative metrics for more than one sample\n self.__aggregated_metrics[_metric] += (\n _metric_values[-1] - _metric_values[-2]\n )\n else:\n if pid in running_pids_set:\n # absolute metric - accumulate the last reported value\n self.__aggregated_metrics[_metric] += _metric_values[-1]", "def store_metrics_to_params(self):\n\n model = self.model_name\n\n if self.stats_path.exists():\n with open(self.stats_path, \"rb\") as f:\n stats_dict = pickle.load(f)\n else:\n stats_dict = {}\n\n if model not in stats_dict:\n stats_dict[model] = defaultdict(list)\n\n stats_dict[model]['amine'].append(self.amine)\n stats_dict[model]['accuracies'].append(self.metrics['accuracies'])\n stats_dict[model]['confusion_matrices'].append(\n self.metrics['confusion_matrices'])\n stats_dict[model]['precisions'].append(self.metrics['precisions'])\n stats_dict[model]['recalls'].append(self.metrics['recalls'])\n stats_dict[model]['bcrs'].append(self.metrics['bcrs'])\n\n # Save this dictionary in case we need it later\n with open(self.stats_path, \"wb\") as f:\n pickle.dump(stats_dict, f)", "def visdom_append_metrics(vis, metrics, first_epoch=False):\n visited = {}\n\n sorted_metrics = sorted(metrics.columns, key=_column_original_name)\n for metric_basename, metric_list in it.groupby(sorted_metrics, key=_column_original_name):\n metric_list = list(metric_list)\n\n for metric in metric_list:\n if vis.win_exists(metric_basename) and (not visited.get(metric, False)) and first_epoch:\n update = 'replace'\n elif not vis.win_exists(metric_basename):\n update = None\n else:\n update = 'append'\n\n vis.line(\n metrics[metric].values,\n metrics.index.values,\n win=metric_basename,\n name=metric,\n opts={\n 'title': metric_basename,\n 'showlegend': True\n },\n update=update\n )\n\n if metric_basename != metric and len(metric_list) > 1:\n if vis.win_exists(metric) and first_epoch:\n update = 'replace'\n elif not vis.win_exists(metric):\n update = None\n else:\n update = 'append'\n\n vis.line(\n metrics[metric].values,\n metrics.index.values,\n win=metric,\n name=metric,\n opts={\n 'title': metric,\n 'showlegend': True\n },\n update=update\n )", "def record_time_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for metric in metrics:\n self.record_time_metric(metric)", "def valuate(self, outputs=None):\n\n # Compute if not given\n if not outputs:\n frames = next(self.dataset_it)\n outputs = self._model_compute_all(frames)\n\n # Collect all metrics\n metrics = {\n \"metrics/\" + name: value\n for name, value in outputs[\"metrics\"].items()}\n if outputs[\"loss\"] is not None:\n metrics[\"loss\"] = outputs[\"loss\"]\n metrics[\"learning_rate\"] = self.learning_rate if not self.decay_rate \\\n else self.learning_rate(self._step)\n\n # Log scalars (convert to mean max if necessary)\n scalars = {}\n for name, value in metrics.items():\n is_scalar = (\n isinstance(value, int) or isinstance(value, float) or\n value.ndim == 0)\n if is_scalar:\n scalars[name] = value\n else:\n scalars[name + \"_mean\"] = tf.math.reduce_mean(value)\n scalars[name + \"_max\"] = tf.math.reduce_max(value)\n self.logger.save_scalars(self._step, scalars)\n\n # Log images\n images = self.model.images(outputs[\"outputs\"])\n self.logger.save_images(self._step, images)\n\n # Log histograms\n histograms = self.model.histograms(outputs[\"outputs\"])\n self.logger.save_histogram(self._step, histograms)\n\n # Transform tensors to arrays for nice logs\n scalars = {\n name: var.numpy() if isinstance(var, tf.Tensor) else var\n for name, var in scalars.items()\n }\n\n return scalars", "def _gather_data(self):\n for data in self._collection:\n label = data.label\n label = disambiguate(label, self._data)\n self._data[label] = data", "def get_metric(self, metric, existing_dict=None):\n metric_key, metric_type, metric_name, metric_help = metric\n metric_dict = {\n 'name': metric_name,\n 'type': metric_type,\n 'help': metric_help,\n 'values': OrderedDict()\n }\n values = self.r.hgetall(metric_key) # new values\n # print \"values: %r\" % values\n metric_dict['values'] = values\n\n if existing_dict:\n # we're updating a metric we've already seen\n # print \"existing dict: %r\" % existing_dict\n for value in values:\n # print \"checking value: %r\" % value\n # value = json.loads(value)\n if value in existing_dict['values']:\n if metric_type == 'counter' or metric_type == 'histogram':\n # Combine the values if it's a counter or histogram\n # TODO: sort histogram buckets\n # TODO: append _bucket to histogram bucket names\n existing_dict['values'][value] = float(\n values[value]) + float(\n existing_dict['values'][value])\n elif metric_type == 'gauge':\n # use the last value we see for a gauge - # TODO: is there a better way? we could average it\n existing_dict['values'][value] = float(values[value])\n else:\n existing_dict['values'][value] = float(values[value])\n metric_dict['values'] = existing_dict['values']\n\n if metric_type == 'histogram':\n # json decode all of the labels\n samples = [json.loads(x, object_pairs_hook=OrderedDict) for x in metric_dict['values']]\n # we need to sort the values by the bucket labeled \"le\"\n sorted_keys = sorted(samples, key=lambda b: b['le'])\n # and then we need to store the values again json encoded\n vals = metric_dict['values']\n metric_dict['values'] = OrderedDict()\n for k in sorted_keys:\n kn = json.dumps(k, sort_keys=True)\n metric_dict['values'][kn] = vals[kn]\n\n return metric_dict" ]
[ "0.67049134", "0.60828054", "0.59038454", "0.5789189", "0.57760006", "0.5761314", "0.5761024", "0.57179546", "0.5702051", "0.5690251", "0.56831443", "0.56656265", "0.55119264", "0.55036944", "0.54828936", "0.5461199", "0.54519856", "0.54256064", "0.54026794", "0.5388167", "0.5367114", "0.535964", "0.5357133", "0.5353383", "0.5345288", "0.5276196", "0.5268314", "0.52327746", "0.5230708", "0.5218225", "0.52132744", "0.5180894", "0.51789635", "0.5176625", "0.5169682", "0.51691574", "0.51617163", "0.5154891", "0.5151999", "0.5149051", "0.514175", "0.51343197", "0.5133994", "0.51252127", "0.5117192", "0.5114606", "0.5107066", "0.50813705", "0.50671816", "0.50428915", "0.5035435", "0.5034334", "0.50339407", "0.5026039", "0.5023044", "0.50189805", "0.501512", "0.50148225", "0.50122213", "0.5000488", "0.50002545", "0.49927464", "0.49881518", "0.49864706", "0.49806368", "0.4979302", "0.49773842", "0.4969483", "0.49630207", "0.49619162", "0.49608058", "0.49556094", "0.4953103", "0.49508828", "0.4927999", "0.49245068", "0.49215564", "0.49214953", "0.49016833", "0.49011487", "0.48998454", "0.48954493", "0.48946846", "0.4887073", "0.48857668", "0.48782074", "0.48725206", "0.4857499", "0.48570198", "0.48555934", "0.4851561", "0.4848838", "0.48481542", "0.48447573", "0.48435524", "0.48381275", "0.4836388", "0.48355317", "0.48346198", "0.48335648" ]
0.59415793
2
Record a single sql metric, merging the data with any data from prior sql metrics for the same sql key.
def record_slow_sql_node(self, node): if not self.__settings: return key = node.identifier stats = self.__sql_stats_table.get(key) if stats is None: # Only record slow SQL if not already over the limit on # how many can be collected in the harvest period. settings = self.__settings maximum = settings.agent_limits.slow_sql_data if len(self.__sql_stats_table) < maximum: stats = SlowSqlStats() self.__sql_stats_table[key] = stats if stats: stats.merge_slow_sql_node(node) return key
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def record_apdex_metric(self, metric):\n\n if not self.__settings:\n return\n\n # Note that because we are using a scope here of an empty string\n # we can potentially clash with an unscoped metric. Using None,\n # although it may help to keep them separate in the agent will\n # not make a difference to the data collector which treats None\n # as an empty string anyway.\n\n key = (metric.name, '')\n stats = self.__stats_table.get(key)\n if stats is None:\n stats = ApdexStats(apdex_t=metric.apdex_t)\n self.__stats_table[key] = stats\n stats.merge_apdex_metric(metric)\n\n return key", "def log(self, metric, value, source, timestamp=None):\n if timestamp is None:\n timestamp = datetime.now()\n\n sql = \"insert into measurement(metric, value, source, timestamp) values('{0}', {1}, '{2}', '{3}');\".format(\n metric, value, source, timestamp)\n\n self._execute_sql(sql)", "def _AddMetric(self, metric):\n machine = metric.machine_id\n time = metric.timestamp\n payload = DotDict(json.loads(metric.payload)).flatten()\n\n self.machines.add(machine)\n self.timestamps.add(time)\n for k in payload:\n if k not in self.counter_data:\n continue\n val = payload.get(k, None)\n if val is not None:\n self.counter_data[k].AddSample(machine, time, val)", "def add_metric(self, metric: str):\n if metric not in self.metrics:\n self.metrics[metric] = self.creator.create_metric(metric)", "def record_custom_metric(self, name, value):\n key = (name, '')\n\n if isinstance(value, dict):\n if len(value) == 1 and 'count' in value:\n new_stats = CountStats(call_count=value['count'])\n else:\n new_stats = TimeStats(*c2t(**value))\n else:\n new_stats = TimeStats(1, value, value, value, value, value**2)\n\n stats = self.__stats_table.get(key)\n if stats is None:\n self.__stats_table[key] = new_stats\n else:\n stats.merge_stats(new_stats)\n\n return key", "def record_time_metric(self, metric):\n\n if not self.__settings:\n return\n\n # Scope is forced to be empty string if None as\n # scope of None is reserved for apdex metrics.\n\n key = (metric.name, metric.scope or '')\n stats = self.__stats_table.get(key)\n if stats is None:\n stats = TimeStats(call_count=1,\n total_call_time=metric.duration,\n total_exclusive_call_time=metric.exclusive,\n min_call_time=metric.duration,\n max_call_time=metric.duration,\n sum_of_squares=metric.duration ** 2)\n self.__stats_table[key] = stats\n else:\n stats.merge_time_metric(metric)\n\n return key", "def _record(self, metric_point: MetricPoint,\n measurement_map: MeasurementMap):\n metric_name = metric_point.metric_name\n tags = metric_point.tags\n\n metric = self._registry.get(metric_name)\n # Metrics should be always registered dynamically.\n assert metric\n\n tag_map = tag_map_module.TagMap()\n for key, value in tags.items():\n tag_key = tag_key_module.TagKey(key)\n tag_value = tag_value_module.TagValue(value)\n tag_map.insert(tag_key, tag_value)\n\n metric_value = metric_point.value\n measurement_map.measure_float_put(metric.measure, metric_value)\n # NOTE: When we record this metric, timestamp will be renewed.\n measurement_map.record(tag_map)", "def record_custom_metric(self, name, value):\n if isinstance(value, dict):\n if len(value) == 1 and 'count' in value:\n new_stats = CountStats(call_count=value['count'])\n else:\n new_stats = TimeStats(*c2t(**value))\n else:\n new_stats = TimeStats(1, value, value, value, value, value**2)\n\n stats = self.__stats_table.get(name)\n if stats is None:\n self.__stats_table[name] = new_stats\n else:\n stats.merge_stats(new_stats)", "def log_metric(data_category, key, value):\n # always, just print in logs\n log(logging.INFO, data_category, \"AML Metric({}={})\".format(key, value))\n if data_category == DataCategory.ONLY_PUBLIC_DATA:\n # if public, ask azureml to record (if azureml attached)\n run = AmlRunWrapper()\n run.setup(attach=True)\n run.log(key, value)\n run.flush()", "def add_metrics(_dict):\n for key, itr in _dict.items():\n if key not in self.metric_cols:\n self.metric_cols.append(key)", "def log_metric(key, value, step=None):\n mlflow.log_metric(key, value, step=step)", "def record_metrics(self, pid, metrics):\n\n for _metric, _metric_value in metrics.items():\n if not self.__metrics_history[pid].get(_metric):\n self.__metrics_history[pid][_metric] = []\n self.__metrics_history[pid][_metric].append(_metric_value)\n # only keep the last 2 running history for any metric\n self.__metrics_history[pid][_metric] = self.__metrics_history[pid][_metric][\n -2:\n ]", "def _update_metric(\n metrics: List[mlflow.entities.Metric], dataset: MetricsDict = {}\n ) -> MetricsDict:\n for metric in metrics:\n metric_dict = {\"step\": metric.step, \"value\": metric.value}\n if metric.key in dataset:\n if isinstance(dataset[metric.key], list):\n dataset[metric.key].append(metric_dict)\n else:\n dataset[metric.key] = [dataset[metric.key], metric_dict]\n else:\n dataset[metric.key] = metric_dict\n return dataset", "def add_metric(self, metric_name: str, metric_val: typing.Any):\n self.add_metrics({metric_name: metric_val})", "def merge_custom_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for name, other in metrics:\n key = (name, '')\n stats = self.__stats_table.get(key)\n if not stats:\n self.__stats_table[key] = other\n else:\n stats.merge_stats(other)", "def add_metric(self, metric, *, name=None, **kwargs):\n if name is None:\n name = metric.__name__\n\n self.metrics.append((metric, name, kwargs))", "def add_metric(self, metric, *, name=None, **kwargs):\n if name is None:\n name = metric.__name__\n\n self.metrics.append((metric, name, kwargs))", "def log_row(data_category, key, **kwargs):\n # always, just print in logs\n log(logging.INFO, data_category, \"AML MetricRow({}, {})\".format(key, kwargs))\n if data_category == DataCategory.ONLY_PUBLIC_DATA:\n # if public, ask azureml to record (if azureml attached)\n run = AmlRunWrapper()\n run.setup(attach=True)\n run.log_row(key, **kwargs)\n run.flush()", "def add_metric(self, metric_name, aggregate=None):\n\n clean_metric = metric_name.lower().strip()\n\n if clean_metric.lower() not in METRICS:\n raise Exception(\"Metric named: \" + metric_name + \" is not a valid benchmark metric.\")\n self.metrics.add(clean_metric)\n\n if not aggregate:\n self.raw_metrics.add(clean_metric)\n elif aggregate.lower().strip() in AGGREGATES:\n # Add aggregate to this metric\n clean_aggregate = aggregate.lower().strip()\n current_aggregates = self.aggregated_metrics.get(clean_metric, list())\n current_aggregates.append(clean_aggregate)\n self.aggregated_metrics[clean_metric] = current_aggregates\n else:\n raise Exception(\"Aggregate function \" + aggregate + \" is not a legal aggregate function name\");\n\n return self;", "def add_metric(self, metric):\n self.metrics.append(metric)\n self.estimate()", "def add_metric(self, metric_name, metric_value, login=False):\n if login:\n self._gc.login()\n\n try: \n if metric_name not in self._metric_dict:\n metric_index = len(self._metric_dict) + 2\n self._wks.update_cell(1, metric_index, metric_name)\n self._metric_dict[metric_name] = metric_index\n self.save_config()\n\n self._wks.update_cell(self.row_index, self._metric_dict[metric_name], metric_value)\n except Exception as ins:\n if not login:\n self.add_metric(metric_name, metric_value, login=True)\n else:\n return '\\n'.join([str(type(ins)), str(ins.args), str(ins)])\n return None", "def write_metrics(metrics, db_path):\n conn = sqlite3.connect(db_path)\n c = conn.cursor()\n c.execute('DELETE FROM metrics')\n for metric in metrics:\n c.execute(\n 'INSERT INTO metrics '\n '(timestamp, callerid, uniqueid, channel, channel_extension, name) '\n 'VALUES (datetime(?),?,?,?,?,?)',\n (metric['timestamp'],\n metric['callerid'],\n metric['uniqueid'],\n metric['channel'],\n metric['channel_extension'],\n metric['name']))\n conn.commit()\n conn.close()", "def metric_recorded(self, record):\n if record.name in self.headers and self.current_row is not None:\n if record.name == \"learning_rate\" and not record.is_scalar:\n # record is a list of scalars\n value = \",\".join([f\"{lr:.4f}\" for lr in record.value])\n elif record.is_scalar and isinstance(record.value, int):\n value = str(record.value)\n else:\n assert record.is_scalar\n\n value = f\"{record.value:.4f}\"\n\n self.current_row[record.name] = value", "def insert_new_measurement(database: Database, data_model, metric: Dict, measurement: Dict) -> Dict:\n if \"_id\" in measurement:\n del measurement[\"_id\"]\n metric_type = data_model[\"metrics\"][metric[\"type\"]]\n direction = metric.get(\"direction\") or metric_type[\"direction\"]\n for scale in metric_type[\"scales\"]:\n value = calculate_measurement_value(data_model, metric, measurement[\"sources\"], scale)\n status = determine_measurement_status(metric, direction, value)\n measurement[scale] = dict(value=value, status=status, direction=direction)\n for target in (\"target\", \"near_target\", \"debt_target\"):\n measurement[scale][target] = determine_target(\n metric, measurement, metric_type, scale, cast(TargetType, target))\n measurement[\"start\"] = measurement[\"end\"] = iso_timestamp()\n database.measurements.insert_one(measurement)\n del measurement[\"_id\"]\n return measurement", "def log(self, metric_name: str, value: float) -> None:\n if metric_name in self.metrics:\n self.metrics[metric_name].append(value)\n else:\n self.metrics[metric_name] = [value]", "def record_custom_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for name, value in metrics:\n self.record_custom_metric(name, value)", "def add_metrics(self, metric_dict: dict):\n self.metric_dict.update(metric_dict)", "def _increment_counter(metric: str):\n if metric not in db:\n db[metric] = 0\n db[metric] += 1", "def merge_time_metric(self, metric):\n\n self.merge_raw_time_metric(metric.duration, metric.exclusive)", "def save_metrics(self):\n self.data_stats.write.format(\"org.apache.spark.sql.cassandra\").mode(\"append\").options(table=self.cassandra_stats_table, keyspace=self.cassandra_keyspace).save()\n print (\"Saved data successfully\")", "def save_metric(key, value, timestamp=None):\n\n from analytics_client.settings import _ANALYTICS_ENABLED\n\n if not _ANALYTICS_ENABLED:\n return None\n\n from analytics_client.tasks import store_metric\n\n # Set a timestamp if it is undefined\n _timestamp = timestamp\n if _timestamp is None:\n _timestamp = datetime.now()\n\n store_metric.delay(Metric(key=key, value=value, timestamp=_timestamp))", "def process(self, metric):\n self.metrics.append(metric)\n if self.should_flush():\n self._send()", "def emit(self, metric):\n metric_data = self.unmarshal(metric)\n self.logger.log(\n self.log_level, metric.DEFAULT_LOG_FORMAT.format(**metric_data)\n )", "def _add_to_queue(key, value, step, time, run_id):\n met = Metric(key=key, value=value, timestamp=time, step=step)\n _metric_queue.append((run_id, met))\n if len(_metric_queue) > _MAX_METRIC_QUEUE_SIZE:\n _thread_pool.submit(_flush_queue)", "def _save(self, data: MetricsDict) -> None:\n client = MlflowClient()\n try:\n run_id = self.run_id\n except DataSetError:\n # If run_id can't be found log_metric would create new run.\n run_id = None\n\n log_metric = (\n partial(client.log_metric, run_id)\n if run_id is not None\n else mlflow.log_metric\n )\n metrics = (\n self._build_args_list_from_metric_item(k, v) for k, v in data.items()\n )\n\n if self._logging_activated:\n for k, v, i in chain.from_iterable(metrics):\n log_metric(k, v, step=i)", "def add_metric_class(self, metric: NNSimpleMetric):\n if metric.name not in self.metrics:\n self.metrics[metric.name] = metric", "def log_metric_id(self, log_metric_id):\n\n self._log_metric_id = log_metric_id", "def write_metric(self, metric_name: str, metric_value: Union[float, int]):\n self._metrics.append(Metric(metric_name, metric_value))", "def __push_metric(self, metric_name, value, timestamp):\n sock = self.__get_carbon_socket()\n _data = \"%s %d %d\\n\" % (metric_name, value, timestamp)\n LOGGER.debug(\"SEND: %s\", _data.replace(\"\\n\", \"\"))\n sock.send(_data.encode('utf-8'))", "def output_metric(self, key=None, metric='loss'):\n if key is None:\n key = self.key\n return self.metrics[key][metric][-1]", "def send_metrics(self):\n metrics = self.get_metrics()\n if not metrics:\n return\n\n for mkey, metric in metrics.items():\n for mname, mval in metric.items():\n try:\n self.agent.record_custom_metric(self.convert_metric_name(mkey, mname), mval, None)\n except Exception as e:\n print_(e)", "def metric(env, metric):\n envs = environments()\n check_env(env, envs)\n\n name = unquote(metric)\n metric = get_or_abort(puppetdb.metric, metric)\n return render_template(\n 'metric.html',\n name=name,\n metric=sorted(metric.items()),\n envs=envs,\n current_env=env)", "def record_metrics(metrics, args):\n with open('interpretation_metrics/model_metrics_{}'.format(args.file_num), 'a') as f:\n f.write(\"META DATA\\n\")\n f.write(\"---------\\n\")\n f.write(\"Model Name: {}\\n\".format(args.model_name))\n f.write(\"Attack Target: {}\\n\".format(args.attack_target))\n f.write(\"Gradient Model File: {}\\n\".format(args.gradient_model_file))\n f.write(\"Predictive Model File: {}\\n\".format(args.predictive_model_file))\n f.write(\"Cuda: {}\\n\".format(args.cuda))\n\n f.write(\"\\nSIMPLE GRADIENT COMBINED MODEL METRICS\\n\")\n f.write(\"----------------------------------------\\n\")\n for key, val in metrics['simple_gradient_combined'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nSIMPLE GRADIENT BASELINE MODEL METRICS\\n\")\n f.write(\"----------------------------------------\\n\")\n for key, val in metrics['simple_gradient_baseline'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nSMOOTH GRADIENT COMBINED MODEL METRICS\\n\")\n f.write(\"----------------------------------------\\n\")\n for key, val in metrics['smooth_gradient_combined'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nSMOOTH GRADIENT BASELINE MODEL METRICS\\n\")\n f.write(\"----------------------------------------\\n\")\n for key, val in metrics['smooth_gradient_baseline'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nINTEGRATED GRADIENT COMBINED MODEL METRICS\\n\")\n f.write(\"--------------------------------------------\\n\")\n for key, val in metrics['integr_gradient_combined'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nINTEGRATED GRADIENT BASELINE MODEL METRICS\\n\")\n f.write(\"--------------------------------------------\\n\")\n for key, val in metrics['integr_gradient_baseline'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))", "def record_apdex_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for metric in metrics:\n self.record_apdex_metric(metric)", "def post_save_metrics(sender, **kwargs):\r\n action = 'created' if kwargs.pop('created', False) else 'updated'\r\n\r\n tags = _database_tags(action, sender, kwargs)\r\n dog_stats_api.increment('edxapp.db.model', tags=tags)", "def _record_storage_event(metric, value=0):\n command_name = properties.VALUES.metrics.command_name.Get()\n metrics.CustomKeyValue(command_name, 'Storage-' + metric, value)", "def save_case_metrics_on_check_point(self) -> None:\n pd.read_csv(f'{self.path_to_case_metrics}/{self.file_name}.csv')\\\n .append(pd.DataFrame(self.case_metrics,\n columns=['stream_index', 'timestamp', 'check point', 'case',\n 'graph distance', 'time distance', 'label']))\\\n .to_csv(f'{self.path_to_case_metrics}/{self.file_name}.csv', index=False)\n self.case_metrics = []", "def merge_metric_stats(self, snapshot):\n\n if not self.__settings:\n return\n\n for key, other in six.iteritems(snapshot.__stats_table):\n stats = self.__stats_table.get(key)\n if not stats:\n self.__stats_table[key] = other\n else:\n stats.merge_stats(other)", "def add_metric(self, metric_fn):\n self._metrics.append(metric_fn)", "def log_model_scores_to_mysql(df, schema, db_conn):\n db.write_dataframe_to_database(df, schema, 'model_score', db_conn)", "def metrics_group():", "def add_statistics(self, stat_col):\n # Those will be displayed.\n stat_col.add_statistics(self.key_precision, '{:05.4f}')\n stat_col.add_statistics(self.key_recall, '{:05.4f}')\n stat_col.add_statistics(self.key_f1score, '{:05.4f}')\n # That one will be collected and used by aggregator.\n stat_col.add_statistics(self.key_f1score+'_support', None)", "def merge_apdex_metric(self, metric):\n\n self[0] += metric.satisfying\n self[1] += metric.tolerating\n self[2] += metric.frustrating\n\n self[3] = ((self[0] or self[1] or self[2]) and\n min(self[3], metric.apdex_t) or metric.apdex_t)\n self[4] = max(self[4], metric.apdex_t)", "def _get_metrics_to_collect(self, instance_key, additional_metrics):\n if instance_key not in self.metrics_to_collect_by_instance:\n self.metrics_to_collect_by_instance[instance_key] = \\\n self._build_metric_list_to_collect(additional_metrics)\n return self.metrics_to_collect_by_instance[instance_key]", "def aws_write(vl, flusher):\n flusher.add_metric(vl)", "def log_metrics(self, metrics: dict):\n self.metrics.update(metrics)\n\n self._sync_log_event()", "def __init_metrics(self):\n\n batch = {}\n # split data into batches of size batch_size or less\n for metric_name, metric_pattern in self.metrics.items():\n # get the batch list for that metric\n batch_list = []\n for s in range(1, self.schema + 1):\n for t in range(1, self.table + 1):\n k = '/metrics/type=IndexTable/keyspace={}/scope={}/name={}/mean'.format(s, t, metric_name)\n # from Python 3.6 onwards, the standard dict type maintains insertion order by default\n batch[k] = 0\n # if the batch has batch_size items or at the end of iteration,\n # append the batch to list of that metric and create a new empty batch\n if len(batch) == self.batch_size or (s == self.schema and t == self.table):\n batch_list.append(batch)\n batch = {}\n\n # parse metric patterns\n l = metric_pattern.split()\n if l[0] == '(>':\n self.metrics[metric_name] = IncMetricStruct(float(int(l[1])), float(l[2][1:]), float(l[4][:-2]),\n batch_list)\n else:\n self.metrics[metric_name] = RandMetricStruct(float(l[0][1:]), float(l[-1][:-1]), batch_list)", "def post(self):\r\n json_data = request.get_json(force=True)\r\n if not json_data:\r\n abort(400, message='No input data provided')\r\n # make sure the metric_id (temporary) and metric_type (model) are filled\r\n json_data[\"metric_id\"] = \"TBD\"\r\n json_data[\"metric_type\"] = \"model\"\r\n\r\n # validate and deserialize input\r\n new_metric = self.load(json_data, session=db.session)\r\n\r\n # get the next metric id and update metric object\r\n try:\r\n db.session.add(new_metric)\r\n db.session.commit()\r\n except SQLAlchemyError as e:\r\n abort(400, message=f'Database error. Reason: {e}')\r\n\r\n # dump to json and return result\r\n result = self.schema.dump(new_metric)\r\n return success(result, code=201)", "def to_metric(self):\r\n if self.units != 'metric':\r\n self.units = 'metric'\r\n for statement in self.statements:\r\n statement.to_metric()\r\n for tool in iter(self.tools.values()):\r\n tool.to_metric()\r\n for primitive in self.primitives:\r\n primitive.to_metric()\r\n for hit in self.hits:\r\n hit.to_metric()", "def get_influx_DB_write_string_from_metric_data(metric, metric_vals_at_bins, bin_times):\n # vals_at_bins = [ [ (val, groups), (val, groups), ...], ... ] where groups = {'Owner':'trjones',...}\n measurement = metric[MetricsFields.MEASUREMENT_NAME]\n metric_string = \"\"\n for i in range(len(metric_vals_at_bins)):\n for pair in metric_vals_at_bins[i]:\n val = pair[0]\n groups = pair[1]\n tag_segment = ','.join([label + '=' + groups[label] for label in groups])\n line = measurement + \",\" + tag_segment + \" value=\" + str(val) + \" \" + str(bin_times[i])\n metric_string += line + \"\\n\"\n return metric_string[:-1] # remove trailing newline", "def log_metric(self, name, val):\n raise NotImplementedError", "def add_metrics_to_db(self) -> None:\n\n model = {\n 'id': 'model1',\n 'name': 'Housing Price Prediction',\n 'metrics': {\n 'mean_squared_error': mean_squared_error(self._y_test, self._predictions),\n 'mean_absolute_error': mean_absolute_error(self._y_test, self._predictions),\n 'r2_score': r2_score(self._y_test, self._predictions)\n }\n }\n\n self._db.add_model(model)", "def update_log_record(self, current_date, score):\n \n ## Find the relevant index\n log_index = list(self.log_df[((self.log_df['city'] == self.location) & \n (self.log_df['type'] == self.type))].index)[0]\n \n ## Update last run date\n self.log_df.loc[log_index, 'last_run_dt'] = current_date\n \n ## Update the record's quality category counts\n if score < 25:\n self.log_df.loc[log_index, 'n_poor'] = self.log_df['n_poor'][log_index]+1\n elif score < 50:\n self.log_df.loc[log_index, 'n_fair'] = self.log_df['n_fair'][log_index]+1\n elif score < 75:\n self.log_df.loc[log_index, 'n_good'] = self.log_df['n_good'][log_index]+1\n else:\n self.log_df[log_index, 'n_great'] = self.log_df['n_great'][log_index]+1\n \n ## Update the average quality score and n_runs\n self.log_df.loc[log_index, 'avg_quality_score'] = ((self.log_df['avg_quality_score'][log_index]*\n self.log_df['n_runs'][log_index] + \n score)/(self.log_df['n_runs'][log_index]+1))\n self.log_df.loc[log_index, 'n_runs'] = self.log_df['n_runs'][log_index]+1", "def log_metric(self, name: str, value):\n self.metrics[name] = value\n\n self._sync_log_event()", "def sql_log(cls, sql_query, data=None):\n\t\t# if data exists , I replace them into `complete_sql_query`\n\t\tif data:\n\t\t\tfor key, value in data.items():\n\t\t\t\tsearch = ':{}'.format(key)\n\t\t\t\treplace = '`{}`'.format(value)\n\t\t\t\tsql_query = sql_query.replace(search, replace)\n\n\t\tprint('\\t{}'.format(sql_query))", "def _merge_report(self, target, new):\n time = None\n if 'ts' in new['parsed']:\n time = new['parsed']['ts']\n\n if (target.get('lastSeenDate', None) and\n time and\n target['lastSeenDate'] < time):\n target['lastSeenDate'] = time\n\n query_millis = int(new['parsed']['stats']['millis'])\n target['stats']['totalTimeMillis'] += query_millis\n target['stats']['count'] += 1\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']", "def _merge_report(self, target, new):\r\n time = None\r\n if 'ts' in new['parsed']:\r\n time = new['parsed']['ts']\r\n\r\n if (target.get('lastSeenDate', None) and\r\n time and\r\n target['lastSeenDate'] < time):\r\n target['lastSeenDate'] = time\r\n\r\n query_millis = int(new['parsed']['stats']['millis'])\r\n target['stats']['totalTimeMillis'] += query_millis\r\n target['stats']['count'] += 1\r\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']", "def insert_metrics(params):\n\n outgoing_metrics_path = os.path.join(\"s3://\" + params['learner']['bucket'],\n params['learner']['prefix'], params['learner']['metrics'])\n outgoing_metrics = pd.read_csv(outgoing_metrics_path)\n # Connect\n con = psycopg2.connect(host=params[\"labeller\"][\"db_host\"], database=params[\"labeller\"][\"db_production_name\"],\n user=params[\"labeller\"][\"db_username\"], password=params[\"labeller\"][\"db_password\"])\n curs = con.cursor()\n print('cursor made')\n\n # Update the iteration_metrics table\n try:\n insert_query = \"insert into iteration_metrics \" \\\n \"(run, iteration, tss, accuracy, aoi, iteration_time, precision, \" \\\n \"recall, fpr, tpr, auc) values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s); \"\n outgoing_metrics = outgoing_metrics[outgoing_metrics['iteration'] == get_current_iteration(params)]\n outgoing_metrics = outgoing_metrics[outgoing_metrics['run'] == params['learner']['runid']]\n # this is needed for multiple runs for multiple aois. incoming_names.csv will need an aoi column and its\n # corresponding table will need to have a aoi column that is a key like run and iteration\n # or we have a different incoming_names.csv for each aoi\n # outgoing_metrics = outgoing_metrics[outgoing_metrics['run']==params['learner']['aoiid']]\n outgoing_metrics = outgoing_metrics.reindex(\n columns=[\"run\", \"iteration\", \"tss\", \"accuracy\", \"aoi\", \"iteration_time\", \"precision\", \"recall\", \"fpr\",\n \"tpr\", \"AUC\"])\n outgoing_list = list(outgoing_metrics.iloc[0])\n # converts numpy types to basic python types for DB\n for i, n in enumerate(outgoing_list):\n if type(n) is not str:\n outgoing_list[i] = n.item()\n curs.execute(insert_query, outgoing_list)\n con.commit()\n print('Finished saving out the iteration metrics')\n except psycopg2.DatabaseError as err:\n print(\"Error updating database\")\n print(err)\n finally:\n if con:\n con.close()", "def report_metrics(prefix, metrics):\n series = []\n\n now = time.time()\n for key, value in metrics.iteritems():\n metric = '{prefix}.{key}'.format(prefix=prefix, key=key)\n point = [(now, value)]\n series.append({'metric':metric, 'points':point})\n\n if len(series) > 0:\n print u\"Sending {}\".format(series)\n dog_http_api.metrics(series)", "def put(self, metric, values, timestamp=None):\n if timestamp is None:\n timestamp = time.time()\n now_date = datetime.datetime.fromtimestamp(timestamp)\n\n if self.last is None:\n self.last = timestamp\n return\n\n self.last = timestamp\n\n values = [str(d) for d in [now_date, timestamp]+values]\n\n with open(self.filename, \"at\") as df:\n df.write(\"{}\\n\".format(\",\".join(values)))", "def update_metric(self, metric, value):\n if self.is_number(value):\n self.logger.debug(\"Collected raw metric: %s = %s\" % (metric, value))\n self.raw_metrics[metric] = value", "def sum(self, key, value):\n self._metrics[key] += value", "def log_metric(self, name, val, step):\n raise NotImplementedError", "def publish_metric(name, value, type):\n t = time.time()\n m = json.dumps({'monitor':name, type:value, 'time':t})\n r = redis.StrictRedis(host='localhost', port=6379, db=0) \n r.lpush('sensor_readings',m)", "def _cache_set(self, metric_name, metric):\n pass", "def record_time_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for metric in metrics:\n self.record_time_metric(metric)", "def generate_slurm_sql(metric: str, \n start: str, \n end: str, \n interval: str, \n aggregate: str):\n sql = \"\"\n if metric == 'node_jobs':\n sql = f\"SELECT time_bucket_gapfill('{interval}', timestamp) AS time, \\\n nodeid, jsonb_agg(jobs) AS jobs, jsonb_agg(cpus) AS cpus \\\n FROM slurm.{metric} \\\n WHERE timestamp >= '{start}' \\\n AND timestamp <= '{end}' \\\n GROUP BY time, nodeid \\\n ORDER BY time;\"\n else:\n sql = f\"SELECT time_bucket_gapfill('{interval}', timestamp) AS time, \\\n nodeid, {aggregate}(value) AS value\\\n FROM slurm.{metric} \\\n WHERE timestamp >= '{start}' \\\n AND timestamp <= '{end}' \\\n GROUP BY time, nodeid \\\n ORDER BY time;\"\n return sql", "def _record_hits(self, hits):\n self.connection = mdb.connect(\n host=self.db_host, user=self.db_user, passwd=self.db_pass,\n db=self.db_name, charset='utf8')\n\n sql = \"\"\"\n INSERT INTO statistics_access (ip, filename, is_download, session_time,\n is_redirect, event_category, event_action, lineno, status,\n is_error, event_name, date, session_start_date, path,\n extension, referrer, userid, length, user_agent,\n generation_time_milli, query_string, is_robot, full_path,\n country_code, country, city, latitude, longitude,\n region, region_name, organization)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,\n %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,\n %s, %s, %s, %s, %s, %s)\n \"\"\"\n try:\n c = self.connection.cursor()\n for hit in hits:\n if hit.session_time > 0:\n hit.session_start_date = hit.date - timedelta(\n seconds=hit.session_time)\n user_info = hit.user_agent.split(\":\")\n hit.country_code = ''\n hit.country = ''\n hit.city = ''\n hit.latitude = ''\n hit.longitude = ''\n hit.region = ''\n hit.region_name = ''\n hit.organization = ''\n if len(user_info) == 1:\n hit.user_agent = user_info[0]\n elif len(user_info) == 6:\n hit.country_code = user_info[0]\n hit.country = user_info[1]\n hit.city = user_info[2]\n hit.latitude = user_info[3]\n hit.longitude = user_info[4]\n hit.user_agent = user_info[5]\n elif len(user_info) == 7:\n hit.country_code = user_info[0]\n hit.country = user_info[1]\n hit.city = user_info[2]\n hit.latitude = user_info[3]\n hit.longitude = user_info[4]\n hit.organization = user_info[5]\n hit.user_agent = user_info[6]\n elif len(user_info) == 9:\n hit.country_code = user_info[0]\n hit.country = user_info[1]\n hit.city = user_info[2]\n hit.latitude = user_info[3]\n hit.longitude = user_info[4]\n hit.region = user_info[5]\n hit.region_name = user_info[6]\n hit.organization = user_info[7]\n hit.user_agent = user_info[8]\n try:\n c.execute(sql, (hit.ip, hit.filename, hit.is_download,\n hit.session_time, hit.is_redirect,\n hit.event_category, hit.event_action,\n hit.lineno, hit.status, hit.is_error,\n hit.event_name, hit.date,\n hit.session_start_date, hit.path,\n hit.extension, hit.referrer,\n hit.userid, hit.length, hit.user_agent,\n hit.generation_time_milli,\n hit.query_string, hit.is_robot,\n hit.full_path, hit.country_code,\n hit.country, hit.city, hit.latitude,\n hit.longitude, hit.region,\n hit.region_name, hit.organization))\n except Exception, e:\n print e\n except Exception, e:\n print e\n self.connection.commit()\n self.connection.close()\n stats.count_lines_recorded.advance(len(hits))", "def get_metric_data(config, metric_list, metric_grouping, start_time, end_time, collected_data_map):\n\n def format_data_entry(json_data_entry):\n metric_name = json_data_entry.get('metric')\n host_name = json_data_entry.get('tags', {}).get('host') or 'unknownHost'\n dps = json_data_entry.get('dps', {})\n metric_value = None\n header_field = normalize_key(metric_name) + \"[\" + host_name + \"]:\" + str(\n get_grouping_id(metric_name, metric_grouping))\n mtime = 0\n for stime, val in dps.items():\n if int(stime) > mtime:\n metric_value = val\n mtime = int(stime)\n\n epoch = mtime * 1000\n\n if epoch in collected_data_map:\n timestamp_value_map = collected_data_map[epoch]\n else:\n timestamp_value_map = {}\n\n timestamp_value_map[header_field] = str(metric_value)\n collected_data_map[epoch] = timestamp_value_map\n\n json_data = {\n \"token\": config['OPENTSDB_TOKEN'],\n \"start\": start_time,\n \"end\": end_time,\n \"queries\": map(lambda m: {\n \"aggregator\": \"avg\",\n \"downsample\": \"1m-avg\",\n \"metric\": m.encode('ascii')\n }, metric_list)\n }\n\n url = config[\"OPENTSDB_URL\"] + \"/api/query\"\n response = requests.post(url, data=json.dumps(json_data))\n if response.status_code == 200:\n rawdata_list = response.json()\n logger.debug(\"Get metric data from opentsdb: \" + str(len(rawdata_list)))\n\n # format metric and save to collected_data_map\n map(lambda d: format_data_entry(d), rawdata_list)", "def _cache_set(self, metric_name, metric):\n encoded_metric_name = self._encode(metric_name)\n key = encoded_metric_name\n value = self.__value_from_metric(metric)\n with self.__env.begin(self.__metric_to_metadata_db, write=True) as txn:\n txn.put(key, value, dupdata=False, overwrite=True)", "def get_metric(self, metric, existing_dict=None):\n metric_key, metric_type, metric_name, metric_help = metric\n metric_dict = {\n 'name': metric_name,\n 'type': metric_type,\n 'help': metric_help,\n 'values': OrderedDict()\n }\n values = self.r.hgetall(metric_key) # new values\n # print \"values: %r\" % values\n metric_dict['values'] = values\n\n if existing_dict:\n # we're updating a metric we've already seen\n # print \"existing dict: %r\" % existing_dict\n for value in values:\n # print \"checking value: %r\" % value\n # value = json.loads(value)\n if value in existing_dict['values']:\n if metric_type == 'counter' or metric_type == 'histogram':\n # Combine the values if it's a counter or histogram\n # TODO: sort histogram buckets\n # TODO: append _bucket to histogram bucket names\n existing_dict['values'][value] = float(\n values[value]) + float(\n existing_dict['values'][value])\n elif metric_type == 'gauge':\n # use the last value we see for a gauge - # TODO: is there a better way? we could average it\n existing_dict['values'][value] = float(values[value])\n else:\n existing_dict['values'][value] = float(values[value])\n metric_dict['values'] = existing_dict['values']\n\n if metric_type == 'histogram':\n # json decode all of the labels\n samples = [json.loads(x, object_pairs_hook=OrderedDict) for x in metric_dict['values']]\n # we need to sort the values by the bucket labeled \"le\"\n sorted_keys = sorted(samples, key=lambda b: b['le'])\n # and then we need to store the values again json encoded\n vals = metric_dict['values']\n metric_dict['values'] = OrderedDict()\n for k in sorted_keys:\n kn = json.dumps(k, sort_keys=True)\n metric_dict['values'][kn] = vals[kn]\n\n return metric_dict", "def Create(cls, group_key, machine_id, timestamp, payload):\n sort_key = util.CreateSortKeyPrefix(timestamp, randomness=False) + machine_id\n metric = Metric(group_key, sort_key)\n metric.machine_id = machine_id\n metric.timestamp = timestamp\n metric.payload = payload\n return metric", "def _upsert_ad_performance(ad_insights: [adsinsights.AdsInsights], con: sqlite3.Connection):\n con.execute(\"\"\"\nCREATE TABLE IF NOT EXISTS ad_performance (\n date DATE NOT NULL,\n ad_id BIGINT NOT NULL,\n device TEXT NOT NULL,\n performance TEXT NOT NULL,\n PRIMARY KEY (ad_id, device)\n);\"\"\")\n con.executemany(\"INSERT OR REPLACE INTO ad_performance VALUES (?,?,?,?)\",\n _to_insight_row_tuples(ad_insights))", "def create_metric(self, metric, metric_name=None):\n metric_name = metric_name or metric.name\n with self._accessor_lock:\n self._accessor.create_metric(metric)\n self._cache_set(metric_name, metric)", "def upload_metrics(metrics_dict, project, dataset, table):\n # Credentials will be loaded from envvar $GOOGLE_APPLICATION_CREDENTIALS.\n bq_client = bigquery.Client(project=project)\n table_ref = bq_client.dataset(dataset).table(table)\n errors = bq_client.insert_rows_json(table_ref, metrics_dict)\n return errors", "def aggregate(all_metrics, reducer, suffix):\n # Collect metric separately\n separated_metrics = {} # type: dict[frozenset, list[dict]]\n for el in all_metrics:\n key = frozenset(el[\"metric\"][\"dimensions\"].items())\n if key not in separated_metrics:\n separated_metrics[key] = [el]\n else:\n separated_metrics[key].append(el)\n\n # Collect all dimensions\n dims = {}\n for metric_dims in separated_metrics.keys():\n for prop, val in dict(metric_dims).iteritems():\n if prop in dims:\n dims[prop].add(val)\n else:\n dims[prop] = set(val)\n\n # Sort each metric\n for _, metric in separated_metrics.iteritems():\n metric.sort(key=lambda v: v[\"metric\"][\"timestamp\"])\n\n separated_metrics = sorted(separated_metrics.values(), key=len)\n separated_metrics.reverse()\n\n # Compute the new values\n new_values = []\n all_timestamps = map(\n lambda l: map(\n lambda x: x[\"metric\"][\"timestamp\"], l),\n separated_metrics)\n metric_count = len(separated_metrics)\n for index in range(0, len(separated_metrics[0])):\n new_value = reducer[0](\n separated_metrics[0][index][\"metric\"][\"value\"],\n metric_count)\n new_timestamp = separated_metrics[0][index][\"metric\"][\"timestamp\"]\n for metric_index in range(1, metric_count):\n new_value = reducer[1](new_value, helpers.interpolate(\n new_timestamp,\n separated_metrics[metric_index],\n all_timestamps[metric_index]\n ), metric_count)\n new_values.append((new_timestamp, new_value))\n\n # Aggregate the other details:\n metric_name = separated_metrics[0][0][\"metric\"][\"name\"] + suffix\n meta = separated_metrics[0][0][\"meta\"]\n new_metrics = [\n helpers.create_agg_metric(\n metric_name,\n meta,\n dims,\n val[0],\n val[1]\n ) for val in new_values\n ]\n return new_metrics", "def __init__(self, metrics_to_record):\n self.tape = {}\n\n for metric_name in metrics_to_record:\n self.tape[metric_name] = []", "def update_metrics(submission_id):\n print(\"Updating loglosses...\", submission_id)\n postgres_db = connect_to_postgres()\n cursor = postgres_db.cursor()\n submission = read_csv(postgres_db, submission_id).set_index('id')\n submission.rename(columns=lambda col: 'probability'\n if col.startswith('prediction_', ) else col,\n inplace=True)\n tournament, _round_number, dataset_path = get_round(\n postgres_db, submission_id)\n\n # Get the truth data\n print(\"Getting validation data...\", submission_id)\n dataset_version = dataset_path.split('/')[0]\n validation_data = tc.get_validation_data(s3,\n dataset_version).set_index('id')\n\n # Sort submission data\n print(\"Getting validation subset of data...\", submission_id)\n submission_validation_data = submission.loc[validation_data.index]\n\n # Calculate correlation\n print(\"Calculating validation_correlation...\", submission_id)\n validation_correlation = calc_correlation(\n validation_data[f\"target_{tournament}\"],\n submission_validation_data.probability)\n\n # Insert values into Postgres\n print(\"Updating validation_correlation...\", submission_id)\n query = \"UPDATE submissions SET validation_correlation={} WHERE id = '{}'\".format(\n validation_correlation, submission_id)\n print(query)\n cursor.execute(query)\n print(\"Updated {} with validation_correlation={}'\".format(\n submission_id, validation_correlation))\n postgres_db.commit()\n cursor.close()\n postgres_db.close()", "def log_model_metadata(model_uid, schema, db_conn):\n df = pd.DataFrame({\n 'training_timestamp': [get_current_timestamp()],\n 'model_uid': [model_uid]\n })\n df.to_sql(name='model_metadata', schema=schema, con=db_conn, if_exists='append', index=False)", "def get_metric(metric):\n try:\n return(json.dumps(holder.db[metric]))\n except KeyError as e:\n raise MqttError(\"Metric %s not found in holder's DB\" % metric, status_code=404)", "def record_sensor_readings(data_dict, metric_list, output_file_name):\n sensor_vals = []\n # going though metric_list to keep order consistent\n for metric in metric_list:\n if metric in data_dict:\n sensor_vals.append(str(data_dict[metric]))\n else:\n # value not recorded properly\n sensor_vals.append(\"null\")\n vals = \",\".join(sensor_vals)\n\n # write to file\n # TODO: keep file open for duration of the drive to avoid re-opening it at each iteration\n with open(output_file_name, 'a') as file:\n file.write(vals+\"\\n\")", "def append(self, key, record):\n if not self._schema:\n self._schema = _infer_schema(record)\n self._write_schema()\n\n # append record to datatmp\n offset = self._write_record(record)\n\n # add key and offset ptr to in-memory keymap dictionary\n self._keymap[key] = offset", "def add_metrics_point(self, data_points: Dict[str, float], timestamp: float):\n for name, value in data_points.items():\n # Using in-sort to insert while maintaining sorted ordering.\n bisect.insort(a=self.data[name], x=TimeStampedValue(timestamp, value))", "def record(method, arguments, result):\n recorder[call_to_key(method, arguments)] = result", "def add_category_stats(self, category, cursor):\n\n conf = CATEGORIES[category]\n if 'SQL' not in conf:\n return\n\n self.logger.debug(\"Collecting stats for %s\" % category)\n cursor.execute(conf['SQL'])\n\n # call the self.parse_\"parser\"_stats\" function for each one to get the raw key/value pairs\n results = getattr(self, \"parse_%s_stats\" % conf['parser'])(cursor)\n\n # now filter the results to only the things we care about\n for key in results:\n var = key.lower()\n metric_name = \"/\".join((category, var))\n\n val = self.parse_metric_value(results.get(key))\n\n # special case for slave/seconds_behind_master\n if not self.is_number(val) and var == \"seconds_behind_master\":\n val = -1.0\n\n self.update_metric(metric_name, val)\n\n if category == \"slave\":\n # log that we've processed slave data, if we have, so we know if\n # we should expect values in derive_newrelic_slaves()\n self.has_slave_data = True", "def push(self, record: Tuple[MeasureInput, MeasureResult]):\n # Push with -cost as heapq is min-heap as we want the worst record on the top.\n heapq.heappush(\n self._data, (-np.mean([v.value for v in record[1].costs]), record[1].timestamp, record)\n )", "def add_metric(self, metric_class, namespace, name, value=1.0, tags=None, interval=None):\n # type: (Type[Metric], str, str, float, MetricTagType, Optional[float]) -> None\n metric_id = Metric.get_id(name, namespace, tags, metric_class.metric_type)\n if metric_class is DistributionMetric:\n metrics_type_payload = TELEMETRY_TYPE_DISTRIBUTION\n else:\n metrics_type_payload = TELEMETRY_TYPE_GENERATE_METRICS\n\n with self._lock:\n existing_metric = self._metrics_data[metrics_type_payload][namespace].get(metric_id)\n if existing_metric:\n existing_metric.add_point(value)\n else:\n new_metric = metric_class(namespace, name, tags=tags, common=True, interval=interval)\n new_metric.add_point(value)\n self._metrics_data[metrics_type_payload][namespace][metric_id] = new_metric", "def register_additional_metric_ops(\n self, metric_ops: Dict[str, Tuple[tf.Tensor, tf.Tensor]]) -> None:\n for metric_name, (value_op, update_op) in metric_ops.items():\n if metric_name in self._metric_names:\n raise ValueError('tried to register new metric with name %s, but a '\n 'metric with that name already exists.' % metric_name)\n self._metric_names.append(metric_name)\n self._metric_value_ops.append(value_op)\n self._metric_update_ops.append(update_op)\n\n # Update metric variables incrementally with only the new elements in the\n # metric_variables collection.\n collection = self._graph.get_collection(\n tf.compat.v1.GraphKeys.METRIC_VARIABLES)\n collection = collection[len(self._metric_variable_nodes):]\n\n # Note that this is a node_list - it's not something that TFMA\n # configures, but something that TF.Learn configures.\n #\n # As such, we also use graph.get_tensor_by_name directly, instead of\n # TFMA's version which expects names encoded by TFMA.\n for node in collection:\n self._metric_variable_nodes.append(node)\n with self._graph.as_default():\n placeholder = tf.compat.v1.placeholder(\n dtype=node.dtype, shape=node.get_shape())\n self._metric_variable_placeholders.append(placeholder)\n self._metric_variable_assign_ops.append(\n tf.compat.v1.assign(node, placeholder))\n\n with self._graph.as_default():\n self._all_metric_variable_assign_ops = tf.group(\n *self._metric_variable_assign_ops)\n self._all_metric_update_ops = tf.group(*self._metric_update_ops)\n self._reset_variables_op = tf.compat.v1.local_variables_initializer()\n self._session.run(self._reset_variables_op)\n\n self._perform_metrics_update_fn = self._session.make_callable(\n fetches=self._all_metric_update_ops,\n feed_list=self._perform_metrics_update_fn_feed_list)", "def log_eval_summary(step: int,\n *,\n writer: metric_writers.MetricWriter,\n eval_metrics: Sequence[Dict[str, Tuple[float, int]]],\n extra_eval_summary: Optional[Dict[str, float]] = None,\n metrics_normalizer_fn: Optional[\n Callable[[Dict[str, Tuple[float, int]], str],\n Dict[str, float]]] = None,\n prefix: str = 'valid',\n key_separator: str = '_') -> Dict[str, float]:\n eval_metrics = stack_forest(eval_metrics)\n\n # Compute the sum over all examples in all batches.\n eval_metrics_summary = jax.tree_map(lambda x: x.sum(), eval_metrics)\n # Normalize metrics by the total number of exampels.\n metrics_normalizer_fn = metrics_normalizer_fn or normalize_metrics_summary\n eval_metrics_summary = metrics_normalizer_fn(eval_metrics_summary, 'eval')\n # If None, set to an empty dictionary.\n extra_eval_summary = extra_eval_summary or {}\n\n # Adds extra_eval_summary to the returned eval_summary.\n eval_metrics_summary.update(extra_eval_summary)\n\n writer.write_scalars(\n step, {\n key_separator.join((prefix, key)): val\n for key, val in eval_metrics_summary.items()\n })\n\n return eval_metrics_summary", "def parse_dw_key_val(self, line, dw_metrics):\n\n line = line.split(':')\n metric = line[0].strip()\n data = line[1].strip()\n\n url_pattern = re.compile(ALPHANUMERIC_URL_REGEX)\n\n if url_pattern.match(metric) is not None:\n percentage = float(data.split(',')[0][:-1])\n percentage = round(percentage, 3)\n dw_metrics['URL hit percentages'][metric] = percentage\n else:\n if 'N/A' in data:\n dw_metrics[metric] = -1.0\n return\n\n data = data.split('---')[0].strip().split(' ')\n metric, val = self.parse_dw_data(data, metric)\n dw_metrics[metric] = val" ]
[ "0.6167947", "0.59710056", "0.59208757", "0.5898851", "0.58011967", "0.5774532", "0.5704721", "0.5638924", "0.5566081", "0.5514177", "0.5501307", "0.54829776", "0.5433093", "0.5422249", "0.5399425", "0.53741735", "0.53741735", "0.5347321", "0.5339919", "0.533079", "0.53289974", "0.52881753", "0.5282723", "0.52649164", "0.5256933", "0.5252954", "0.52471787", "0.51843643", "0.51839995", "0.5177397", "0.51552534", "0.5142436", "0.5138828", "0.5131612", "0.5104482", "0.508665", "0.507186", "0.5058182", "0.5049857", "0.5037443", "0.502339", "0.49728012", "0.49469262", "0.49325562", "0.4931561", "0.49298567", "0.49276462", "0.49024597", "0.48964882", "0.4895837", "0.4895789", "0.4895409", "0.48843578", "0.48743364", "0.4854769", "0.48482728", "0.4843048", "0.48321316", "0.4821937", "0.48212942", "0.4815689", "0.4813874", "0.48098135", "0.4809735", "0.4800447", "0.47925147", "0.47921473", "0.47807217", "0.4758097", "0.4754272", "0.47512662", "0.47430998", "0.47262517", "0.47213268", "0.4715788", "0.47130287", "0.4699883", "0.4697116", "0.46949965", "0.4693397", "0.46832272", "0.4668509", "0.46619964", "0.46440595", "0.46395558", "0.4638425", "0.46381968", "0.46298963", "0.4622447", "0.46210343", "0.46196133", "0.46107846", "0.4602787", "0.4596055", "0.45933422", "0.45784202", "0.45743155", "0.4574097", "0.45627832", "0.45627093" ]
0.5720255
6
Check if transaction is the slowest transaction and update accordingly.
def _update_slow_transaction(self, transaction): slowest = 0 name = transaction.path if self.__slow_transaction: slowest = self.__slow_transaction.duration if name in self.__slow_transaction_map: slowest = max(self.__slow_transaction_map[name], slowest) if transaction.duration > slowest: # We are going to replace the prior slow transaction. # We need to be a bit tricky here. If we are overriding # an existing slow transaction for a different name, # then we need to restore in the transaction map what # the previous slowest duration was for that, or remove # it if there wasn't one. This is so we do not incorrectly # suppress it given that it was never actually reported # as the slowest transaction. if self.__slow_transaction: if self.__slow_transaction.path != name: if self.__slow_transaction_old_duration: self.__slow_transaction_map[ self.__slow_transaction.path] = ( self.__slow_transaction_old_duration) else: del self.__slow_transaction_map[ self.__slow_transaction.path] if name in self.__slow_transaction_map: self.__slow_transaction_old_duration = ( self.__slow_transaction_map[name]) else: self.__slow_transaction_old_duration = None self.__slow_transaction = transaction self.__slow_transaction_map[name] = transaction.duration
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_database_with_block(self, block):\n\n txs = sorted(block['txs'], key=lambda x: x['count'] if 'count' in x else -1)\n\n for tx in txs:\n result = self.update_database_with_tx(tx, block['length'])\n if not result:\n return False\n\n return True", "def merge_slow_sql_node(self, node):\n\n duration = node.duration\n\n self[1] += duration\n self[2] = self[0] and min(self[2], duration) or duration\n self[3] = max(self[3], duration)\n\n if self[3] == duration:\n self[4] = node\n\n # Must update the call count last as update of the\n # minimum call time is dependent on initial value.\n\n self[0] += 1", "def slow_update_duration(self):\n for i in range(len(self.data_file.sorted_data)):\n if self.data_file.sorted_data[i]['type'] == 'slow':\n slow_upd = self.data_file.sorted_data[i]['timestamp']\n Config.ANALYSIS.write(f\"slow at: {slow_upd}\\n\")\n if i == 0:\n after_slow = self.data_file.sorted_data[i + 1]['timestamp']\n Config.ANALYSIS.write(f\"after slow: ({slow_upd}, {after_slow}) \"\n f\"= {after_slow - slow_upd}\\n\\n\")\n elif i == len(self.data_file.sorted_data) - 1:\n before_slow = self.data_file.sorted_data[i - 1]['timestamp']\n Config.ANALYSIS.write(f\"before slow: ({before_slow}, {slow_upd}) \"\n f\"= {slow_upd - before_slow}\\n\\n\")\n else:\n before_slow = self.data_file.sorted_data[i - 1]['timestamp']\n after_slow = self.data_file.sorted_data[i + 1]['timestamp']\n Config.ANALYSIS.write(f\"before slow: ({before_slow}, {slow_upd}) \"\n f\"= {slow_upd - before_slow}\\n\")\n Config.ANALYSIS.write(f\"after slow: ({slow_upd}, {after_slow}) \"\n f\"= {after_slow - slow_upd}\\n\\n\")\n Config.ANALYSIS.write(\"\\n\\n\")", "def test_commit_optimize(self):\n # Same id, data and user_id\n id = data = user_id = get_rand_string()\n self.conn.add(id=id, user_id=user_id, data=data)\n\n # Make sure the changes weren't commited.\n results = self.conn.query(\"id:\" + id).results\n self.assertEquals(len(results), 0,\n (\"Changes to index shouldn't be visible without commiting, \"\n \"results:%s\" % (repr(results))))\n\n # Optimizing commits the changes\n self.conn.commit(_optimize=True)\n\n results = self.conn.query(\"id:\" + id).results\n self.assertEquals(len(results), 1,\n \"No documents returned, results:%s\" % (repr(results)))", "def exit_transaction():\n _state.transactions = max(get_transactions() - 1, 0)", "def search_UI_transaction_bigger(account):\n\t_amount = read_amount()\n\tfound = search_transaction_bigger(account, _amount, print_transaction)\n\tif (not found):\n\t\tprint(\"Nu exista nici o tranzactie cu suma mai mare de %f.\" % (_amount))", "def test_optimize(self):\n # Same id, data and user_id\n id = data = user_id = get_rand_string()\n self.conn.add(id=id, user_id=user_id, data=data)\n\n # Make sure the changes weren't commited.\n results = self.conn.query(\"id:\" + id).results\n self.assertEquals(len(results), 0,\n (\"Changes to index shouldn't be visible without call\"\n \"to optimize first, results:%s\" % (repr(results))))\n\n # Optimizing commits the changes\n self.conn.optimize()\n\n results = self.conn.query(\"id:\" + id).results\n self.assertEquals(len(results), 1,\n \"No documents returned, results:%s\" % (repr(results)))", "def optimize_for_dagit(self, statement_timeout):", "def slow(newETM): #Status: Done, not tested\r\n pass", "def _should_try_reoptimize(self, last_statistics_refresh_time: timedelta, last_event: Event):\n if last_statistics_refresh_time is None:\n return True\n return last_event.max_timestamp - last_statistics_refresh_time > self.__statistics_update_time_window", "def supports_transactions(self):\n return self._mysql_storage_engine != \"MyISAM\"", "def optimize_for_dagit(self, statement_timeout: int):", "def transaction_run():\n print('working...')\n # Get all transaction\n transactions = executor.submit(Transaction.query.filter_by(done=False).all)\n print(transactions.result())\n # Check if thier a transactions\n if transactions.result():\n # Go through each transaction\n for tran in transactions.result():\n print(\"Looping...\")\n # print(trans)\n # Get the currency account for the source user\n currency = executor.submit(Currency.query.filter_by(user_id=tran.user_id).first).result()\n print(currency)\n # target_user = executor.submit(User.query.filter_by(id=tran.target_user).first).result()\n # print(target_user)\n # Get the currency account for the target user\n target = executor.submit(Currency.query.filter_by(user_id=tran.target_user).first).result()\n # Get the transaction account for the target user\n trans_target = executor.submit(Transaction.query.filter_by(user_id=tran.target_user).first).result()\n ### # TODO:\n trans_source = executor.submit(Transaction.query.filter_by(user_id=tran.user_id).first).result()\n # update replace all tran with trans_source\n\n print(tran)\n # print(target_user)\n print(target)\n print(trans_target)\n # Check if the target user has account\n if target:\n # If the user send to himself fail the transaction\n if tran.user_id == tran.target_user:\n tran.state = \"Transaction faild.\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n else:\n # If the currency type is bitcoin\n # Check if the user has a bitcoin ID\n if tran.currency_Type.lower() == \"bitcoin\":\n if not currency.bitcoin_id:\n tran.state = \"Transaction faild.\"\n # trans_source.state = \"Transaction faild. You don't have a bitcoin account!\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n # If user has a bitcoin ID\n # Check if transfared money greater than his balance or not\n # Check if transfared money greater than the max amount per transaction or not\n else:\n if tran.currency_amount > currency.bitcoin_balance:\n tran.state = \"Transaction faild.\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n elif tran.currency_amount > currency.max_amount:\n tran.state = \"Transaction faild.\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n # Everything ok, then subtract the transfared money from source user\n # Add transfare maney to target user\n else:\n balance = currency.bitcoin_balance - tran.currency_amount\n # updated_balance = str(balance)\n currency.bitcoin_balance = balance\n db.session.merge(currency)\n db.session.commit()\n db.session.remove()\n\n balance_target = target.bitcoin_balance + tran.currency_amount\n target.bitcoin_balance = balance_target\n db.session.merge(target)\n db.session.commit()\n db.session.remove()\n\n tran.state = \"Transaction success.\"\n tran.time_processed = datetime.now().strftime(\"%d-%b-%Y (%H:%M:%S)\")\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n\n # If the currency type is ethereum\n # Check if the user has a ethereum ID\n elif tran.currency_Type.lower() == \"ethereum\":\n if not currency.ethereum_id:\n tran.state = \"Transaction faild.\"\n # trans_source.state = \"Transaction faild. You don't have a ethereum account!\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n # If user has a ethereum ID\n # Check if transfared money greater than his balance or not\n # Check if transfared money greater than the max amount per transaction or not\n else:\n if tran.currency_amount > currency.ethereum_balance:\n tran.state = \"Transaction faild.\"\n # trans_source.state = \"Transaction faild. You don't have enough money!\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n elif tran.currency_amount > currency.max_amount:\n tran.state = \"Transaction faild.\"\n # trans_source.state = \"Transaction faild. You exceed the max amount!\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n # Everything ok, then subtract the transfared money from source user\n # Add transfare maney to target\n else:\n balance = currency.ethereum_balance - tran.currency_amount\n currency.ethereum_balance = balance\n db.session.merge(currency)\n db.session.commit()\n db.session.remove()\n\n balance_target = target.ethereum_balance + tran.currency_amount\n target.ethereum_balance = balance_target\n db.session.merge(target)\n db.session.commit()\n db.session.remove()\n\n tran.state = \"Transaction success.\"\n tran.time_processed = datetime.now().strftime(\"%d-%b-%Y (%H:%M:%S)\")\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n # if the currency type not bitcoin or ethereum\n else:\n tran.state = \"Transaction faild.\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n # If the user has no currency account\n else:\n tran.state = \"Transaction faild.\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n\n\n # Finish the transaction request\n print(tran)\n tran.done = True\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n print('Done!!!!')", "def update_isolation(self, time: int):", "def _should_try_reoptimize(self, last_statistics_refresh_time: timedelta, last_event: Event):\n if self.__is_simultaneous_state:\n return False\n return super()._should_try_reoptimize(last_statistics_refresh_time, last_event)", "def update_highest_buy(self, limit):\n if limit.size == 0:\n #predecessor case\n limit = self.buy_tree.predecessor(limit)\n if limit is None:\n #no predecessor\n self.highest_buy = None\n else: # have a predecessor but dont know if it has order or not\n if limit.size == 0: #limit has no order but other limits in the tree might have orders\n if self.buy_tree.size == 0: #we know no other limits have an order\n self.highest_buy = None\n else: #other limits have an order\n while limit.size == 0:\n limit = self.buy_tree.predecessor(limit)\n #now our limit has a valid order\n self.highest_buy = limit.price\n else: #found valid pred\n self.highest_buy = limit.price", "def is_transaction(self) -> bool:\n return False", "def is_best(self, val) -> bool:\n if self.val is None or (val > self.val):\n self.val = val\n print(\"Updating Best\")\n return True\n else:\n return False", "async def short_sync_backtrack(\n self, peer: WSChiaConnection, peak_height: uint32, target_height: uint32, target_unf_hash: bytes32\n ) -> bool:\n try:\n if peer.peer_node_id not in self.sync_store.backtrack_syncing:\n self.sync_store.backtrack_syncing[peer.peer_node_id] = 0\n self.sync_store.backtrack_syncing[peer.peer_node_id] += 1\n\n unfinished_block: Optional[UnfinishedBlock] = self.full_node_store.get_unfinished_block(target_unf_hash)\n curr_height: int = target_height\n found_fork_point = False\n blocks = []\n while curr_height > peak_height - 5:\n # If we already have the unfinished block, don't fetch the transactions. In the normal case, we will\n # already have the unfinished block, from when it was broadcast, so we just need to download the header,\n # but not the transactions\n fetch_tx: bool = unfinished_block is None or curr_height != target_height\n curr = await peer.call_api(\n FullNodeAPI.request_block, full_node_protocol.RequestBlock(uint32(curr_height), fetch_tx)\n )\n if curr is None:\n raise ValueError(f\"Failed to fetch block {curr_height} from {peer.get_peer_logging()}, timed out\")\n if curr is None or not isinstance(curr, full_node_protocol.RespondBlock):\n raise ValueError(\n f\"Failed to fetch block {curr_height} from {peer.get_peer_logging()}, wrong type {type(curr)}\"\n )\n blocks.append(curr.block)\n if self.blockchain.contains_block(curr.block.prev_header_hash) or curr_height == 0:\n found_fork_point = True\n break\n curr_height -= 1\n if found_fork_point:\n for block in reversed(blocks):\n await self.add_block(block, peer)\n except (asyncio.CancelledError, Exception):\n self.sync_store.backtrack_syncing[peer.peer_node_id] -= 1\n raise\n\n self.sync_store.backtrack_syncing[peer.peer_node_id] -= 1\n return found_fork_point", "def update_trades(self, timestamp):\n if not self.trades:\n return\n\n trader = self.strategy.trader()\n\n #\n # for each trade check if the TP or SL is reached and trigger if necessary\n #\n\n self.lock()\n\n for trade in self.trades:\n\n #\n # managed operation\n #\n\n if trade.has_operations():\n mutated = False\n\n for operation in trade.operations:\n mutated |= operation.test_and_operate(trade, self.instrument, trader)\n\n if mutated:\n trade.cleanup_operations()\n\n #\n # active trade\n #\n\n if trade.is_active():\n # for statistics usage\n trade.update_stats(self.instrument.close_exec_price(trade.direction), timestamp)\n\n #\n # asset trade\n #\n\n if trade.trade_type == StrategyTrade.TRADE_BUY_SELL:\n if trade.is_closed():\n continue\n\n # process only on active trades\n if not trade.is_active():\n # @todo timeout if not filled before condition...\n continue\n\n if trade.is_closing():\n continue\n\n if not self.instrument.tradeable:\n continue\n\n if trade.is_dirty:\n # entry quantity changed need to update the exits orders\n trade.update_dirty(trader, self.instrument)\n\n # potential order exec close price (always close a long)\n close_exec_price = self.instrument.close_exec_price(Order.LONG)\n\n if (trade.tp > 0) and (close_exec_price >= trade.tp) and not trade.has_limit_order():\n # take profit trigger stop, close at market (taker fee)\n if trade.close(trader, self.instrument):\n # notify\n self.strategy.notify_order(trade.id, Order.SHORT, self.instrument.market_id,\n self.instrument.format_price(close_exec_price), timestamp, trade.timeframe,\n 'take-profit', trade.estimate_profit_loss(self.instrument))\n\n # streaming (but must be done with notify)\n if self._global_streamer:\n self._global_streamer.member('buy-exit').update(close_exec_price, timestamp)\n\n elif (trade.sl > 0) and (close_exec_price <= trade.sl) and not trade.has_stop_order():\n # stop loss trigger stop, close at market (taker fee)\n if trade.close(trader, self.instrument):\n # notify\n self.strategy.notify_order(trade.id, Order.SHORT, self.instrument.market_id,\n self.instrument.format_price(close_exec_price), timestamp, trade.timeframe,\n 'stop-loss', trade.estimate_profit_loss(self.instrument))\n\n # streaming (but must be done with notify)\n if self._global_streamer:\n self._global_streamer.member('buy-exit').update(close_exec_price, timestamp)\n\n #\n # margin trade\n #\n\n elif trade.trade_type in (StrategyTrade.TRADE_MARGIN, StrategyTrade.TRADE_POSITION, StrategyTrade.TRADE_IND_MARGIN):\n # process only on active trades\n if not trade.is_active():\n # @todo timeout if not filled before condition...\n continue\n\n if trade.is_closed():\n continue\n\n if trade.is_closing():\n continue\n\n if not self.instrument.tradeable:\n continue\n\n # potential order exec close price\n close_exec_price = self.instrument.close_exec_price(trade.direction)\n\n if (trade.tp > 0) and ((trade.direction > 0 and close_exec_price >= trade.tp) or (trade.direction < 0 and close_exec_price <= trade.tp)) and not trade.has_limit_order():\n # close in profit at market (taker fee)\n if trade.close(trader, self.instrument):\n # and notify\n self.strategy.notify_order(trade.id, trade.close_direction(), self.instrument.market_id,\n self.instrument.format_price(close_exec_price), timestamp, trade.timeframe,\n 'take-profit', trade.estimate_profit_loss(self.instrument))\n\n # and for streaming\n if self._global_streamer:\n self._global_streamer.member('sell-exit' if trade.direction < 0 else 'buy-exit').update(close_exec_price, timestamp)\n\n elif (trade.sl > 0) and ((trade.direction > 0 and close_exec_price <= trade.sl) or (trade.direction < 0 and close_exec_price >= trade.sl)) and not trade.has_stop_order():\n # close a long or a short position at stop-loss level at market (taker fee)\n if trade.close(trader, self.instrument):\n # and notify\n self.strategy.notify_order(trade.id, trade.close_direction(), self.instrument.market_id,\n self.instrument.format_price(close_exec_price), timestamp, trade.timeframe,\n 'stop-loss', trade.estimate_profit_loss(self.instrument))\n\n # and for streaming\n if self._global_streamer:\n self._global_streamer.member('sell-exit' if trade.direction < 0 else 'buy-exit').update(close_exec_price, timestamp)\n\n self.unlock()\n\n #\n # remove terminated, rejected, canceled and empty trades\n #\n\n mutated = False\n\n self.lock()\n\n for trade in self.trades:\n if trade.can_delete():\n mutated = True\n\n # cleanup if necessary before deleting the trade related refs\n trade.remove(trader)\n\n # record the trade for analysis and study\n if not trade.is_canceled():\n # last update of stats before logging\n trade.update_stats(self.instrument.close_exec_price(trade.direction), timestamp)\n\n # realized profit/loss\n profit_loss = trade.profit_loss - trade.entry_fees_rate() - trade.exit_fees_rate()\n\n # perf sommed here it means that its not done during partial closing\n if profit_loss != 0.0:\n self._stats['perf'] += profit_loss\n self._stats['best'] = max(self._stats['best'], profit_loss)\n self._stats['worst'] = min(self._stats['worst'], profit_loss)\n\n if profit_loss <= 0.0:\n self._stats['cont-loss'] += 1\n self._stats['cont-win'] = 1\n\n elif profit_loss > 0.0:\n self._stats['cont-loss'] = 0\n self._stats['cont-win'] += 1\n\n record = {\n 'id': trade.id,\n 'eot': trade.entry_open_time,\n 'xot': trade.exit_open_time,\n 'freot': trade.first_realized_entry_time,\n 'frxot': trade.first_realized_exit_time,\n 'lreot': trade.last_realized_entry_time,\n 'lrxot': trade.last_realized_exit_time,\n 'd': trade.direction_to_str(),\n 'l': self.instrument.format_quantity(trade.order_price),\n 'q': self.instrument.format_quantity(trade.order_quantity),\n 'e': self.instrument.format_quantity(trade.exec_entry_qty),\n 'x': self.instrument.format_quantity(trade.exec_exit_qty),\n 'tp': self.instrument.format_price(trade.take_profit),\n 'sl': self.instrument.format_price(trade.stop_loss),\n 'tf': timeframe_to_str(trade.timeframe),\n 'aep': self.instrument.format_price(trade.entry_price),\n 'axp': self.instrument.format_price(trade.exit_price),\n 's': trade.state_to_str(),\n 'b': self.instrument.format_price(trade.best_price()),\n 'w': self.instrument.format_price(trade.worst_price()),\n 'bt': trade.best_price_timestamp(),\n 'wt': trade.worst_price_timestamp(),\n 'pl': profit_loss,\n 'fees': trade.entry_fees_rate() + trade.exit_fees_rate(),\n 'c': trade.get_conditions(),\n 'com': trade.comment,\n 'rpnl': self.instrument.format_price(trade.unrealized_profit_loss), # once close its realized\n 'pnlcur': trade.profit_loss_currency\n }\n\n if profit_loss < 0:\n self._stats['failed'].append(record)\n elif profit_loss > 0:\n self._stats['success'].append(record)\n else:\n self._stats['roe'].append(record)\n\n if self._reporting == StrategyTrader.REPORTING_VERBOSE:\n self.report(trade, False)\n\n # recreate the list of trades\n if mutated:\n trades_list = []\n\n for trade in self.trades:\n if not trade.can_delete():\n # keep only active and pending trades\n trades_list.append(trade)\n\n self.trades = trades_list\n\n self.unlock()", "def deciding(self):\n\n if not self.db.cacheEmpty():\n cacheMsgs = self.db.getCacheMsgs()\n prev = datetime.datetime.min\n prev_location = \"FOO LOCATION\"\n for msg in cacheMsgs:\n neutrinoTime = msg[\"neutrino_time\"]\n # go through messages to check if any two or more are within the time threshold\n if neutrinoTime - datetime.timedelta(seconds=self.coinc_threshold) <= prev:\n # verify the locations are different\n if msg[\"location\"] != prev_location:\n return True\n prev = neutrinoTime\n prev_location = msg[\"location\"]\n return False\n\n # return not self.db.cacheEmpty()", "def cache_txn_manage(database, table, action, trans=None, **kw):\n trace = kw['trace']\n cache = server.data[database].tables['cache']\n transaction = request.get_json() if trans == None else trans\n if 'txn' in transaction:\n txn_id = transaction['txn']\n tx=None\n wait_time = 0.0 # total time waiting to commit txn \n wait_interval = txn_default_wait_in_sec # amount of time to wait between checks - if multiple txns exist \n # Get transaction from cache db\n if action == 'commit':\n while True:\n txns = cache.select('id','timestamp',\n where={'table_name': table}\n )\n if not txn_id in {tx['id'] for tx in txns}:\n return {\"message\": trace.error(f\"{txn_id} does not exist in cache\")}, 500\n if len(txns) == 1:\n if not txns[0]['id'] == txn_id:\n warning = f\"txn with id {txn_id} does not exist for {database} {table}\"\n return {'warning': trace.warning(warning)}, 500\n # txn_id is only value inside\n tx = txns[0]\n break\n # multiple pending txns - need to check timestamp to verify if this txn can be commited yet\n txns = sorted(txns, key=lambda txn: txn['timestamp'])\n for ind, txn in enumerate(txns):\n if txn['id'] == txn_id:\n if ind == 0:\n tx = txns[0]\n break\n if wait_time > txn_max_wait_time_in_sec:\n warning = f\"timeout of {wait_time} reached while waiting to commit {txn_id} for {database} {table}, waiting on {txns[:ind]}\"\n trace.warning(warning)\n trace.warning(f\"removing txn with id {txns[0]['id']} maxWaitTime of {txn_max_wait_time_in_sec} reached\")\n cache.delete(where={'id': txns[0]['id']})\n break\n break\n if tx == None:\n trace.warning(f\"txn_id {txn_id} is behind txns {txns[:ind]} - waiting {wait_time} to retry\")\n time.sleep(wait_interval)\n wait_time+=wait_interval \n # wait_interval scales up to txn_max_wait_interval_in_sec\n wait_interval+=wait_interval \n if wait_interval >= txn_max_wait_interval_in_sec:\n wait_interval = txn_max_wait_interval_in_sec\n continue\n break\n # Should not have broken out of loop here without a tx\n if tx == None:\n trace.error(\"tx is None, this should not hppen\")\n return {\"error\": \"tx was none\"}, 500\n tx = cache.select('type','txn',\n where={'id': txn_id})[0]\n try:\n r, rc = server.actions[tx['type']](database, table, tx['txn'])\n trace.warning(f\"##cache {action} response {r} rc {rc}\")\n except Exception as e:\n r, rc = trace.exception(f\"Exception when performing cache {action}\"), 500\n \n del_txn = cache.delete(\n where={'id': txn_id}\n )\n if rc == 200:\n # update last txn id\n set_params = {\n 'set': {\n 'last_txn_uuid': txn_id,\n 'last_mod_time': float(time.time())\n },\n 'where': {\n 'table_name': table\n }\n }\n server.data['cluster'].tables['pyql'].update(\n **set_params['set'],\n where=set_params['where']\n )\n return {\"message\": r, \"status\": rc}, rc\n if action == 'cancel':\n del_txn = cache.delete(\n where={'id': txn_id}\n )\n return {'deleted': txn_id}, 200", "async def _finish_sync(self) -> None:\n self.log.info(\"long sync done\")\n self.sync_store.set_long_sync(False)\n self.sync_store.set_sync_mode(False)\n self._state_changed(\"sync_mode\")\n if self._server is None:\n return None\n\n async with self.blockchain.priority_mutex.acquire(priority=BlockchainMutexPriority.high):\n await self.sync_store.clear_sync_info()\n\n peak: Optional[BlockRecord] = self.blockchain.get_peak()\n peak_fb: Optional[FullBlock] = await self.blockchain.get_full_peak()\n if peak_fb is not None:\n assert peak is not None\n state_change_summary = StateChangeSummary(peak, uint32(max(peak.height - 1, 0)), [], [], [])\n ppp_result: PeakPostProcessingResult = await self.peak_post_processing(\n peak_fb, state_change_summary, None\n )\n await self.peak_post_processing_2(peak_fb, None, state_change_summary, ppp_result)\n\n if peak is not None and self.weight_proof_handler is not None:\n await self.weight_proof_handler.get_proof_of_weight(peak.header_hash)\n self._state_changed(\"block\")", "def in_transaction(self):\n # We likely just changed data - give it a second to catch up\n time.sleep(0.1) # I think I keep reading journal watermark too soon without this\n \n # Get relevant data\n water_mark = pos.read_journal_watermark()\n self.log.info(f\"Watermark: [{water_mark}]\")\n balance = pos.read_balance()['Total']\n self.log.info(f\"Balance: [{balance}]\")\n \n # Decide if we need more checks based on watermark\n if water_mark == \"TRANSACTION IN PROGRESS\":\n self.log.info(\"In Transaction: In Transaction Watermark found\")\n return True\n elif water_mark == \"TRANSACTION COMPLETE\" or water_mark == \"TRANSACTION VOIDED\":\n self.log.info(\"Not in Transaction: Transaction Complete/Voided watermarks found\")\n return False\n else:\n # No watermark - decide based on balance\n if balance == \"$0.00\":\n self.log.info(\"Not in Transaction: $0 balance with no watermark\")\n return False\n else:\n self.log.info(\"In Transaction: Non-$0 balance with no watermark\")\n return True", "def get_long_trx(self):\n if self.skip_long_trx_check:\n return False\n processes = self.query(sql.show_processlist)\n for proc in processes:\n if not proc[\"Info\"]:\n sql_statement = \"\"\n else:\n if isinstance(proc[\"Info\"], bytes):\n sql_statement = proc[\"Info\"].decode(\"utf-8\", \"replace\")\n else:\n sql_statement = proc[\"Info\"]\n\n proc[\"Info\"] = sql_statement\n # Time can be None if the connection is in \"Connect\" state\n if (\n (proc.get(\"Time\") or 0) > self.long_trx_time\n and proc.get(\"db\", \"\") == self._current_db\n and self.table_name in \"--\" + sql_statement\n and not proc.get(\"Command\", \"\") == \"Sleep\"\n ):\n return proc", "def check_transaction(coins_inserted, cost_drink, machine_balance):\n if coins_inserted < cost_drink:\n return False\n else:\n if coins_inserted > cost_drink:\n change_given = coins_inserted - cost_drink\n print(f\"Here is ${change_given:0.2f} in change.\")\n return machine_balance + cost_drink", "def _warn_and_lock_if_needed(self, transaction: Transaction) -> None:\n budget = self.budget_manager.get_budget(transaction.budget_category)\n exceeded_ratio = budget.exceeded_ratio\n if exceeded_ratio > 1:\n self._notify_exceeded_budget(budget)\n self.print_transactions_for_review(budget)\n elif exceeded_ratio > 0.9:\n self._warn_nearing_exceed_budget(budget, 90)\n self.print_transactions_for_review(budget)", "def _warn_and_lock_if_needed(self, transaction: Transaction) -> None:\n pass", "def check_best(self):\n # Get the most profitable network based on our current data\n new_best = max(self.profit_data.iteritems(),\n key=operator.itemgetter(1))[0]\n\n if self.current_network is None:\n self.logger.info(\n \"No active network, so switching to {} with profit of {:,.4f}\"\n .format(new_best, self.profit_data[new_best]))\n self.next_network = new_best\n self.switch_network()\n return\n\n # If the currently most profitable network is 120% the profitability\n # of what we're mining on, we should switch immediately\n margin_switch = self.config['margin_switch']\n if (margin_switch and\n self.profit_data[self.next_network] >\n (self.profit_data[self.current_network] * margin_switch)):\n self.logger.info(\n \"Network {} {:,.4f} now more profitable than current network \"\n \"{} {:,.4f} by a fair margin. Switching NOW.\"\n .format(new_best, self.profit_data[new_best], self.current_network,\n self.profit_data[self.current_network]))\n self.next_network = new_best\n self.switch_network()\n return\n\n if new_best != self.next_network:\n self.logger.info(\n \"Network {} {:,.4f} now more profitable than current best \"\n \"{} {:,.4f}. Switching on next block from current network {}.\"\n .format(new_best, self.profit_data[new_best], self.next_network,\n self.profit_data[self.next_network], self.current_network))\n self.next_network = new_best\n return\n\n self.logger.debug(\"Network {} {:,.4f} still most profitable\"\n .format(new_best, self.profit_data[new_best]))", "def slow_upd_count(self):\n return self.upd_type_count(\"slow\", [0] * 24)", "def _update_time_delivered(self, time_delivered):\n # Update db record's time_delivered field\n update = {'time_delivered': time_delivered}\n datebase.update_transaction_record(filter=self.filter, update=update)\n \n # Update db record's estimated_time field\n datebase.update_transaction_record(filter=self.filter, {estimated_time:'0'})\n \n # Update db record's transaction status to delivered\n self._update_transaction_status(transaction_status='delivered')\n \t\t self.transaction_info.update(delivery_status='delivered')\n \n # Update object\n \t\tself.transaction_info.update(time_delivered=time_delivered)\n self.transaction_info.update(estimated_time=0)\n self.transaction_info(transaction_status='delivered')\n\n \tdef _update_transaction_status(self, transaction_status, photo=None):\n \"\"\"\n Update record's transaction_status and send sms msg to update seeker\n \"\"\"\n # Send text message when status changes \n self.send_text(message_type=transaction_status)\n\n # Update db record's transaction status\n update = {'transaction_status': transaction_status}\n datebase.update_transaction_record(filter=self.filter, update=update)\n\n # Update object\n self.transaction_info.update('transaction_seeker': transaction_status)\n\n # If delivered ... TODO: do we actually want to remove from db? \n \t\t# if transaction_status == 'delivered':\n # datebase.delete_transaction_record()\n # return 1 \n # arguments against: we wont be able to access delivered photo if we want to do that", "def record_transaction(self, transaction):\n\n if not self.__settings:\n return\n\n settings = self.__settings\n\n # Record the apdex, value and time metrics generated from the\n # transaction. Whether time metrics are reported as distinct\n # metrics or into a rollup is in part controlled via settings\n # for minimum number of unique metrics to be reported and thence\n # whether over a time threshold calculated as percentage of\n # overall request time, up to a maximum number of unique\n # metrics. This is intended to limit how many metrics are\n # reported for each transaction and try and cut down on an\n # explosion of unique metric names. The limits and thresholds\n # are applied after the metrics are reverse sorted based on\n # exclusive times for each metric. This ensures that the metrics\n # with greatest exclusive time are retained over those with\n # lesser time. Such metrics get reported into the performance\n # breakdown tab for specific web transactions.\n\n self.record_apdex_metrics(transaction.apdex_metrics(self))\n\n self.merge_custom_metrics(transaction.custom_metrics.metrics())\n\n self.record_time_metrics(transaction.time_metrics(self))\n\n # Capture any errors if error collection is enabled.\n # Only retain maximum number allowed per harvest.\n\n error_collector = settings.error_collector\n\n if (error_collector.enabled and settings.collect_errors and\n len(self.__transaction_errors) <\n settings.agent_limits.errors_per_harvest):\n self.__transaction_errors.extend(transaction.error_details())\n\n self.__transaction_errors = self.__transaction_errors[:\n settings.agent_limits.errors_per_harvest]\n\n if (error_collector.capture_events and\n error_collector.enabled and\n settings.collect_error_events):\n events = transaction.error_events(self.__stats_table)\n for event in events:\n self._error_events.add(event, priority=transaction.priority)\n\n # Capture any sql traces if transaction tracer enabled.\n\n if settings.slow_sql.enabled and settings.collect_traces:\n for node in transaction.slow_sql_nodes(self):\n self.record_slow_sql_node(node)\n\n # Remember as slowest transaction if transaction tracer\n # is enabled, it is over the threshold and slower than\n # any existing transaction seen for this period and in\n # the historical snapshot of slow transactions, plus\n # recording of transaction trace for this transaction\n # has not been suppressed.\n\n transaction_tracer = settings.transaction_tracer\n\n if (not transaction.suppress_transaction_trace and\n transaction_tracer.enabled and settings.collect_traces):\n\n # Transactions saved for Synthetics transactions\n # do not depend on the transaction threshold.\n\n self._update_synthetics_transaction(transaction)\n\n threshold = transaction_tracer.transaction_threshold\n\n if threshold is None:\n threshold = transaction.apdex_t * 4\n\n if transaction.duration >= threshold:\n self._update_slow_transaction(transaction)\n\n # Create the transaction event and add it to the\n # appropriate \"bucket.\" Synthetic requests are saved in one,\n # while transactions from regular requests are saved in another.\n\n if transaction.synthetics_resource_id:\n event = transaction.transaction_event(self.__stats_table)\n self._synthetics_events.add(event)\n\n elif (settings.collect_analytics_events and\n settings.transaction_events.enabled):\n\n event = transaction.transaction_event(self.__stats_table)\n self._transaction_events.add(event, priority=transaction.priority)\n\n # Merge in custom events\n\n if (settings.collect_custom_events and\n settings.custom_insights_events.enabled):\n self.custom_events.merge(transaction.custom_events)\n\n # Merge in span events\n\n if (settings.distributed_tracing.enabled and\n settings.span_events.enabled and settings.collect_span_events):\n if settings.infinite_tracing.enabled:\n for event in transaction.span_protos(settings):\n self._span_stream.put(event)\n elif transaction.sampled:\n for event in transaction.span_events(self.__settings):\n self._span_events.add(event, priority=transaction.priority)", "def _warn_and_lock_if_needed(self, transaction: Transaction) -> None:\n budget = self.budget_manager.get_budget(transaction.budget_category)\n exceeded_ratio = budget.exceeded_ratio\n if exceeded_ratio > 1.2:\n self._lock_budget(budget)\n self.print_transactions_for_review(budget)\n elif exceeded_ratio > 1:\n self._notify_exceeded_budget(budget)\n self.print_transactions_for_review(budget)\n elif exceeded_ratio > 0.75:\n self._warn_nearing_exceed_budget(budget, 75)\n self.print_transactions_for_review(budget)", "async def short_sync_batch(self, peer: WSChiaConnection, start_height: uint32, target_height: uint32) -> bool:\n # Don't trigger multiple batch syncs to the same peer\n\n if (\n peer.peer_node_id in self.sync_store.backtrack_syncing\n and self.sync_store.backtrack_syncing[peer.peer_node_id] > 0\n ):\n return True # Don't batch sync, we are already in progress of a backtrack sync\n if peer.peer_node_id in self.sync_store.batch_syncing:\n return True # Don't trigger a long sync\n self.sync_store.batch_syncing.add(peer.peer_node_id)\n\n self.log.info(f\"Starting batch short sync from {start_height} to height {target_height}\")\n if start_height > 0:\n first = await peer.call_api(\n FullNodeAPI.request_block, full_node_protocol.RequestBlock(uint32(start_height), False)\n )\n if first is None or not isinstance(first, full_node_protocol.RespondBlock):\n self.sync_store.batch_syncing.remove(peer.peer_node_id)\n raise ValueError(f\"Error short batch syncing, could not fetch block at height {start_height}\")\n if not self.blockchain.contains_block(first.block.prev_header_hash):\n self.log.info(\"Batch syncing stopped, this is a deep chain\")\n self.sync_store.batch_syncing.remove(peer.peer_node_id)\n # First sb not connected to our blockchain, do a long sync instead\n return False\n\n batch_size = self.constants.MAX_BLOCK_COUNT_PER_REQUESTS\n if self._segment_task is not None and (not self._segment_task.done()):\n try:\n self._segment_task.cancel()\n except Exception as e:\n self.log.warning(f\"failed to cancel segment task {e}\")\n self._segment_task = None\n\n try:\n for height in range(start_height, target_height, batch_size):\n end_height = min(target_height, height + batch_size)\n request = RequestBlocks(uint32(height), uint32(end_height), True)\n response = await peer.call_api(FullNodeAPI.request_blocks, request)\n if not response:\n raise ValueError(f\"Error short batch syncing, invalid/no response for {height}-{end_height}\")\n async with self.blockchain.priority_mutex.acquire(priority=BlockchainMutexPriority.high):\n state_change_summary: Optional[StateChangeSummary]\n success, state_change_summary, _ = await self.add_block_batch(response.blocks, peer, None)\n if not success:\n raise ValueError(f\"Error short batch syncing, failed to validate blocks {height}-{end_height}\")\n if state_change_summary is not None:\n try:\n peak_fb: Optional[FullBlock] = await self.blockchain.get_full_peak()\n assert peak_fb is not None\n ppp_result: PeakPostProcessingResult = await self.peak_post_processing(\n peak_fb,\n state_change_summary,\n peer,\n )\n await self.peak_post_processing_2(peak_fb, peer, state_change_summary, ppp_result)\n except Exception:\n # Still do post processing after cancel (or exception)\n peak_fb = await self.blockchain.get_full_peak()\n assert peak_fb is not None\n await self.peak_post_processing(peak_fb, state_change_summary, peer)\n raise\n finally:\n self.log.info(f\"Added blocks {height}-{end_height}\")\n except (asyncio.CancelledError, Exception):\n self.sync_store.batch_syncing.remove(peer.peer_node_id)\n raise\n self.sync_store.batch_syncing.remove(peer.peer_node_id)\n return True", "def time_limit_as_of_update(self, order_update):\n all_time_limit_updates = self.updates.exclude(\n time_limit_delta=timedelta())\n considered_time_limit_updates = all_time_limit_updates.filter(\n id__lte=order_update.id)\n\n log.debug(\n ('Computing time limit for %s ' % self) +\n ('up to the point of update %s. ' % order_update) +\n ('This covers %d updates and may be expensive.' %\n considered_time_limit_updates.count()))\n time_limit = timedelta()\n for update in considered_time_limit_updates:\n time_limit += update.time_limit_delta\n\n return time_limit", "def update_succ(self, trade_hash, blockNumber=-1, result=None):\n sql_succ = \"\"\"\n UPDATE t_trade\n SET f_status=1, f_block_id=%s\n WHERE f_trade_addr=%s\n \"\"\"\n\n sql_set_result = \"\"\"\n UPDATE t_trade\n SET f_status=2, f_result=%s\n WHERE f_trade_addr=%s\n\n \"\"\"\n\n current_expect_id = get_current_expect_id()\n try:\n if result:\n self.cursor.execute(sql_set_result, (trade_hash, result))\n else:\n push_user_place_hash(current_expect_id, trade_hash)\n self.cursor.execute(sql_succ, (blockNumber, trade_hash))\n self.db.commit()\n except:\n self.db.rollback()\n raise\n else:\n print(\"Oder update successful!\")", "def check_for_energy_tax_update(self):\n\t\tisChanged = False\n\t\t# TODO: discuss the validity of such approach when checking on the global optimality\n\t\t# if self.currentEnergy < self._globalMinimumEnergy and self.currentTax == 0:\n\t\tif self.currentEnergy < self._globalMinimumEnergy and self.contains_several_vertices(self.currentState):\n\t\t\toutput(\"\\t New global optimum registered: old value = {}, new value = {}, state = {}\"\\\n\t\t\t\t\t .format(str(self._globalMinimumEnergy),str(self.currentEnergy),self.getCurrentState()),isDebug=False)\n\t\t\tself._globalMinimumEnergy = self.currentEnergy\n\t\t\tself._globalMinimumState = copy(self.currentState)\n\t\t\tisChanged = True\n\t\tif self.currentEnergy < self.localMinimumEnergy and not self.isAllZeros(self.currentState): #should we update global as well? Now I do it\n\t\t\toutput(\"\\t New local optimum registered: old value = {}, new value = {}, state = {}\"\\\n\t\t\t\t\t .format(self.localMinimumEnergy,self.currentEnergy,self.getCurrentState()),isDebug=False)\n\t\t\tself.localMinimumEnergy = self.currentEnergy\n\t\t\tself.localMinimumState = copy(self.currentState)\n\t\t\tself.localMinimumTax = self.currentTax\n\t\t\tisChanged = True\n\t\treturn isChanged", "def _check_transactions_balance(self, transactions):\n self.balance_lock.acquire()\n try:\n balance = copy.deepcopy(self._balance)\n finally:\n self.balance_lock.release()\n for tx_json in transactions:\n recv_tx = Transaction.from_json(tx_json)\n # Sender must exist so if it doesn't, return false\n if recv_tx.sender not in balance:\n return False\n # Create new account for receiver if it doesn't exist\n if recv_tx.receiver not in balance:\n balance[recv_tx.receiver] = 0\n balance[recv_tx.sender] -= recv_tx.amount\n balance[recv_tx.receiver] += recv_tx.amount\n # Negative balance, return false\n if balance[recv_tx.sender] < 0 \\\n or balance[recv_tx.receiver] < 0:\n return False\n return True", "def find_trans_hung(attinfo, taskinfo):\n\n intro = False\n\n reptime = datetime.datetime.now()\n count = 0\n for attd in sorted(attinfo.values(), key=lambda x: x['start_time']):\n atid = attd['task_id']\n if atid in taskinfo:\n found = False\n for tdict in sorted(taskinfo[atid].values(), key=lambda x: x['start_time']):\n if (tdict['name'].startswith('trans_input') and (reptime - tdict['start_time']).total_seconds() > 2*60*60) or (tdict['name'].startswith('trans_output') and (reptime - tdict['start_time']).total_seconds() > 10*60*60):\n found = True\n if not found:\n break\n if not intro:\n h2(\"The following transfers may be hung\\n\")\n intro = True\n print \" %s_r%dp%02d %s %s %i\\n\" % (attd['unitname'], int(attd['reqnum']), int(attd['attnum']), attd['pfwid'], attd['archive_path'], atid)\n for tdict in sorted(taskinfo[atid].values(), key=lambda x: x['start_time']):\n if (tdict['name'].startswith('trans_input') and (reptime - tdict['start_time']).total_seconds() > 2*60*60) or (tdict['name'].startswith('trans_output') and (reptime - tdict['start_time']).total_seconds() > 10*60*60):\n print \" %s %s %s %s %s %s %s\" % (tdict['id'], tdict['name'], tdict['exec_host'], tdict['start_time'], tdict['request_time'], tdict['grant_time'], tdict['release_time'])\n count += 1\n if count == 0:\n print \"\\nNo hung transfers found.\\n\"", "async def _update_live(self, trade: Dict[str, Any]):\n\n order = await self.api.get_order(trade['pair'], trade['order_id'])\n if order is None:\n self.log.error(\"Could not update trade {}.\", trade['order_id'])\n return\n\n is_open = order['open']\n quantity = order['quantity']\n remaining = order['remaining']\n unit_value = order['value']\n fees = order['fees']\n\n trade['filled'] = not is_open\n trade['quantity'] = quantity\n trade['remaining'] = remaining\n\n if trade['filled'] and unit_value is not None:\n base_mult = await self.market.get_pair_base_mult(config['trade_base'], trade['pair'])\n adjusted_value = unit_value * base_mult\n trade['open_value'] = adjusted_value\n trade['base_value'] = base_mult\n trade['fees'] = fees * base_mult\n\n self.log.info(\"Updated trade {}: filled {}, quantity {}, remaining {}.\",\n trade['order_id'], trade['filled'], quantity, remaining)", "def test_wait_tx_settled_ok(self, is_transaction_settled_mock):\n wait_tx_settled(\"some\", \"some\", timeout=4)", "def T_elapsed(T_amount: BlockHeight) -> bool:\n T_now = getCurrentBlockHeight()\n return T_now - T_init >= T_amount", "def test_increasing_trend_is_true_if_price_increase_for_3_updates(self):\n self.given_a_series_of_prices([8, 10, 12])\n self.assertTrue(self.goog.is_increasing_trend())", "def mine(self, storage):\n if not self.unconfirmed_transactions:\n return False\n\n while is_mining():\n time.sleep(0.1)\n\n set_mining()\n last_block = self.last_block\n\n # session = Session(engine)\n # pending_txns = session.query(Transaction).all()\n\n # print(pending_txns)\n\n # if len(pending_txns) <= 0:\n # return False\n \n # pending_txns2 = [{\"sender\": i.sender, \"receiver\": i.receiver, \"value\": i.value, \"message\": bytes(i.message), \"timestamp\": i.timestamp} for i in pending_txns]\n # print(pending_txns2)\n # print(self.unconfirmed_transactions)\n\n new_block = Block(index=last_block.index + 1,\n transactions=self.unconfirmed_transactions,\n timestamp=time.time(),\n previous_hash=last_block.hash)\n\n # pending_txns.delete()\n\n proof = self.proof_of_work(new_block)\n self.add_block(new_block, proof)\n\n self.unconfirmed_transactions = []\n # announce it to the network\n announce_new_block(new_block)\n # with open(\"blockchain.pkl\", \"wb\") as f:\n # pickle.dump(self.chain, f)\n # with open(\"blockchain.json\", \"wb\") as f:\n # f.write(self.get_chain_json())\n # storage.child(\"/blockchain.pkl\").put(\"blockchain.pkl\")\n # # storage.child(\"/blockchain.pkl\").put(\"blockchain.pkl\")\n # set_notmining()\n # print(\"starting thread\")\n upload_thread = threading.Thread(target=self.upload_files, args=(storage,))\n upload_thread.start()\n # print(\"started thread\")\n return new_block.index", "def storage_final(index):\n i, t = index[0], NN - 1\n return storage_state[i, t] >= storage_start_state[i]", "def test_transactions_no_save(self):\n\n transactions = self.bidding_round_manager.transactions_no_save([self.bidding_round])\n transactions = sorted(transactions, key=lambda x: (x.sell.order_id, x.buy.order_id))\n transactions_iterator = iter(transactions)\n\n self.is_equal_transaction(transactions_iterator.next(),\n Transaction(sell=self.stock_order_seller_0, buy=self.stock_order_third_party_0,\n share_amount=2,\n share_price=self.stock_order_seller_0.order_price_per_share,\n transaction_status=PROCESSED))\n self.is_equal_transaction(transactions_iterator.next(),\n Transaction(sell=self.stock_order_seller_0, buy=self.stock_order_third_party_1,\n share_amount=2,\n share_price=self.stock_order_seller_0.order_price_per_share,\n transaction_status=PROCESSED))\n self.is_equal_transaction(transactions_iterator.next(),\n Transaction(sell=self.stock_order_seller_0, buy=self.stock_order_third_party_2,\n share_amount=1,\n share_price=self.stock_order_seller_0.order_price_per_share,\n transaction_status=PROCESSED))\n self.is_equal_transaction(transactions_iterator.next(),\n Transaction(sell=self.stock_order_seller_0, buy=self.stock_order_employer_0,\n share_amount=10,\n share_price=self.stock_order_seller_0.order_price_per_share,\n transaction_status=PROCESSED))\n self.is_equal_transaction(transactions_iterator.next(),\n Transaction(sell=self.stock_order_seller_0, buy=self.stock_order_employer_1,\n share_amount=10,\n share_price=self.stock_order_seller_0.order_price_per_share,\n transaction_status=PROCESSED))\n self.is_equal_transaction(transactions_iterator.next(),\n Transaction(sell=self.stock_order_seller_0, buy=self.stock_order_employer_2,\n share_amount=10,\n share_price=self.stock_order_seller_0.order_price_per_share,\n transaction_status=PROCESSED))\n self.is_equal_transaction(transactions_iterator.next(),\n Transaction(sell=self.stock_order_seller_1, buy=self.stock_order_third_party_2,\n share_amount=9,\n share_price=self.stock_order_seller_1.order_price_per_share,\n transaction_status=PROCESSED))", "def block(self):\n start=time.time()\n max_wait=100\n while ((time.time()-start)<max_wait):\n if not self.query_running(): break\n return (time.time()-start)<max_wait", "def transaction_trace_data(self, connections):\n\n _logger.debug('Generating transaction trace data.')\n\n if not self.__settings:\n return []\n\n # Create a set 'traces' that is a union of slow transaction,\n # and Synthetics transactions. This ensures we don't send\n # duplicates of a transaction.\n\n traces = set()\n if self.__slow_transaction:\n traces.add(self.__slow_transaction)\n traces.update(self.__synthetics_transactions)\n\n # Return an empty list if no transactions were captured.\n\n if not traces:\n return []\n\n # We want to limit the number of explain plans we do across\n # these. So work out what were the slowest and tag them.\n # Later the explain plan will only be run on those which are\n # tagged.\n\n agent_limits = self.__settings.agent_limits\n explain_plan_limit = agent_limits.sql_explain_plans_per_harvest\n maximum_nodes = agent_limits.transaction_traces_nodes\n\n database_nodes = []\n\n if explain_plan_limit != 0:\n for trace in traces:\n for node in trace.slow_sql:\n # Make sure we clear any flag for explain plans on\n # the nodes in case a transaction trace was merged\n # in from previous harvest period.\n\n node.generate_explain_plan = False\n\n # Node should be excluded if not for an operation\n # that we can't do an explain plan on. Also should\n # not be one which would not be included in the\n # transaction trace because limit was reached.\n\n if (node.node_count < maximum_nodes and\n node.connect_params and node.statement.operation in\n node.statement.database.explain_stmts):\n database_nodes.append(node)\n\n database_nodes = sorted(database_nodes,\n key=lambda x: x.duration)[-explain_plan_limit:]\n\n for node in database_nodes:\n node.generate_explain_plan = True\n\n else:\n for trace in traces:\n for node in trace.slow_sql:\n node.generate_explain_plan = True\n database_nodes.append(node)\n\n # Now generate the transaction traces. We need to cap the\n # number of nodes capture to the specified limit.\n\n trace_data = []\n\n for trace in traces:\n transaction_trace = trace.transaction_trace(\n self, maximum_nodes, connections)\n\n data = [transaction_trace,\n list(trace.string_table.values())]\n\n if self.__settings.debug.log_transaction_trace_payload:\n _logger.debug('Encoding slow transaction data where '\n 'payload=%r.', data)\n\n json_data = json_encode(data)\n\n level = self.__settings.agent_limits.data_compression_level\n level = level or zlib.Z_DEFAULT_COMPRESSION\n\n zlib_data = zlib.compress(six.b(json_data), level)\n\n pack_data = base64.standard_b64encode(zlib_data)\n\n if six.PY3:\n pack_data = pack_data.decode('Latin-1')\n\n root = transaction_trace.root\n\n if trace.record_tt:\n force_persist = True\n else:\n force_persist = False\n\n if trace.include_transaction_trace_request_uri:\n request_uri = trace.request_uri\n else:\n request_uri = None\n\n trace_data.append([transaction_trace.start_time,\n root.end_time - root.start_time,\n trace.path,\n request_uri,\n pack_data,\n trace.guid,\n None,\n force_persist,\n None,\n trace.synthetics_resource_id, ])\n\n return trace_data", "def modeSlow(self, latestCount):\n \n try:\n # Handle counts.\n avgCt = self.bufferAvg(latestCount, self.__c_ct_slow)\n \n # Print the things.\n self.__liveCountPrint(latestCount, avg = avgCt)\n \n # If we have a storage mode set up...\n if self.__stg is not None:\n # Store the things.\n try:\n self.__stg.storeDatapoint([datetime.datetime.utcnow(), round(avgCt * 60.0, 3)])\n except:\n print(\"Failed to store data point: %s\" %traceback.format_exc())\n \n except:\n raise\n \n return", "def _update_solved_count(delta, task, profile, save_task=True, save_profile=True):\n if delta == 0:\n return\n\n task.solved_count += delta\n if save_task:\n task.save()\n\n profile.solved_count += delta\n profile.update_diff_distribution(task, delta)\n if save_profile:\n profile.save()", "def check(transaction):\n if not isinstance(transaction, Transaction):\n transaction = Transaction.objects.get(id=transaction)\n\n r = requests.post(\"https://www.blockonomics.co/api/searchhistory\",\n data=json.dumps({\"addr\": transaction.to_address}))\n try:\n history_data = json.loads(r.content.decode('utf-8'))['history'][0]\n except:\n return\n\n set_tx_details(history_data, transaction)", "def _check_duplicate_trans(self):\n transactions_set = set(self._transactions)\n return len(transactions_set) == len(self._transactions)", "def update_with_trade_update(self, trade_update: TradeUpdate) -> bool:\n trade_id: str = trade_update.trade_id\n\n if (trade_id in self.order_fills\n or (self.client_order_id != trade_update.client_order_id\n and self.exchange_order_id != trade_update.exchange_order_id)):\n return False\n\n self.order_fills[trade_id] = trade_update\n\n self.executed_amount_base += trade_update.fill_base_amount\n self.executed_amount_quote += trade_update.fill_quote_amount\n\n self.last_update_timestamp = trade_update.fill_timestamp\n self.check_filled_condition()\n\n return True", "def __perform_reoptimization(self, last_statistics_refresh_time: timedelta, last_event: Event):\n self.__statistics_collector.handle_event(last_event)\n if not self._should_try_reoptimize(last_statistics_refresh_time, last_event):\n # it is not yet time to recalculate the statistics\n return last_statistics_refresh_time\n new_statistics = self.__statistics_collector.get_statistics()\n if self.__optimizer.should_optimize(new_statistics, self._pattern):\n new_tree_plan = self.__optimizer.build_new_plan(new_statistics, self._pattern)\n new_tree = Tree(new_tree_plan, self._pattern, self.__storage_params)\n self._tree_update(new_tree, last_event.max_timestamp)\n # this is the new last statistic refresh time\n return last_event.max_timestamp", "def slower():\n try:\n ttsEng.slower()\n except Exception, e:\n logging.error(e)", "def record_slow_sql_node(self, node):\n\n if not self.__settings:\n return\n\n key = node.identifier\n stats = self.__sql_stats_table.get(key)\n if stats is None:\n # Only record slow SQL if not already over the limit on\n # how many can be collected in the harvest period.\n\n settings = self.__settings\n maximum = settings.agent_limits.slow_sql_data\n if len(self.__sql_stats_table) < maximum:\n stats = SlowSqlStats()\n self.__sql_stats_table[key] = stats\n\n if stats:\n stats.merge_slow_sql_node(node)\n\n return key", "def _update_executed(self, tx: BaseTransaction) -> None:\n tx_meta = tx.get_metadata()\n assert tx.hash is not None\n assert not tx_meta.voided_by\n log = self.log.new(tx=tx.hash_hex)\n log.debug('update executed')\n # remove all inputs\n for tx_input in tx.inputs:\n spent_tx = tx.get_spent_tx(tx_input)\n spent_tx_output = spent_tx.outputs[tx_input.index]\n log_it = log.new(tx_id=spent_tx.hash_hex, index=tx_input.index)\n if _should_skip_output(spent_tx_output):\n log_it.debug('ignore input')\n continue\n log_it.debug('remove output that became spent')\n self._remove_utxo(UtxoIndexItem.from_tx_output(spent_tx, tx_input.index, spent_tx_output))\n # add outputs that aren't spent\n for index, tx_output in enumerate(tx.outputs):\n log_it = log.new(index=index)\n if _should_skip_output(tx_output):\n log_it.debug('ignore output')\n continue\n spent_by = tx_meta.get_output_spent_by(index)\n if spent_by is not None:\n log_it.debug('do not add output that is spent', spent_by=spent_by.hex())\n continue\n log_it.debug('add new unspent output')\n self._add_utxo(UtxoIndexItem.from_tx_output(tx, index, tx_output))", "def is_almost_finished(self, index_delta):\n return self.currIndex + index_delta >= self.data.shape[0]", "def _update_synthetics_transaction(self, transaction):\n\n settings = self.__settings\n\n if not transaction.synthetics_resource_id:\n return\n\n maximum = settings.agent_limits.synthetics_transactions\n if len(self.__synthetics_transactions) < maximum:\n self.__synthetics_transactions.append(transaction)", "def check_trade_timeout(self, trade, timestamp, profit_loss_rate=0.0):\n if trade.is_trade_timeout(timestamp) and trade.profit_loss > profit_loss_rate:\n trader = self.strategy.trader()\n trade.close(trader, self.instrument)\n\n self.strategy.notify_order(trade.id, trade.dir, self.instrument.market_id, self.instrument.format_price(trade.entry_price),\n timestamp, trade.timeframe, 'exit', None, self.instrument.format_price(trade.sl), self.instrument.format_price(trade.tp),\n comment='timeout')\n\n return True\n\n return False", "def get_newest_txs(self, count: int) -> tuple[list[BaseTransaction], bool]:\n raise NotImplementedError", "def perform_strategy(self, counter):\r\n if counter < self.percent * len(self.envelopes): # in the first self.percent percent\r\n self.curr_max = max(self.curr_max, self.envelopes[counter].money)\r\n return\r\n return self.envelopes[counter].money > self.curr_max", "def test_fail_transaction(self):\n source_wallet = self.source_user.wallets.last()\n target_wallet = self.target_user.wallets.last()\n\n source_balance_init = source_wallet.balance\n target_balance_init = target_wallet.balance\n\n data = {\n 'initial_amount': 1100,\n 'source_wallet': source_wallet,\n 'target_wallet': target_wallet,\n }\n execute_wallet_transaction(data)\n\n source_wallet.refresh_from_db()\n target_wallet.refresh_from_db()\n\n self.assertTrue(source_balance_init == source_wallet.balance)\n self.assertTrue(target_balance_init == target_wallet.balance)\n\n self.assertEqual(source_wallet.outcome_transactions.last().status, TRANSACTION_FAIL_STATUS)", "def transaction(self, transaction):\n # Allow for a list of blocks..\n transaction = utils.request_type(transaction)\n\n res = r.get(self.url + self.tx_info + str(transaction))\n return self.execute(res)", "def slowRPC(newETM): #Status: Done, not tested\r\n pass", "def get_newer_txs_after(self, timestamp: int, hash_bytes: bytes, count: int) -> tuple[list[BaseTransaction], bool]:\n raise NotImplementedError", "def task_saleorder_update_productskustats_waitingpay_num(sku_id):\n from flashsale.pay.models import SaleOrder\n\n product_id = ProductSku.objects.get(id=sku_id).product.id\n waitingpay_num_res = SaleOrder.objects.filter(item_id=product_id, sku_id=sku_id,\n status=SaleOrder.WAIT_BUYER_PAY).aggregate(\n Sum('num'))\n total = waitingpay_num_res['num__sum'] or 0\n stat = SkuStock.get_by_sku(sku_id)\n if stat.waitingpay_num != total:\n stat.waitingpay_num = total\n stat.save(update_fields=[\"waitingpay_num\"])", "def update_task_state(mapper, conn, target):\r\n sql_query = ('select count(id) from task_run \\\r\n where task_run.task_id=%s') % target.task_id\r\n n_answers = conn.scalar(sql_query)\r\n sql_query = ('select n_answers from task \\\r\n where task.id=%s') % target.task_id\r\n task_n_answers = conn.scalar(sql_query)\r\n if (n_answers) >= task_n_answers:\r\n sql_query = (\"UPDATE task SET state=\\'completed\\' \\\r\n where id=%s\") % target.task_id\r\n conn.execute(sql_query)", "def update_if_necessary(self, timesteps_executed):\n if self.updating:\n # Are we allowed to update?\n if timesteps_executed > self.steps_before_update and \\\n (self.agent.observe_spec[\"buffer_enabled\"] is False or # no update before some data in buffer\n timesteps_executed >= self.agent.observe_spec[\"buffer_size\"]) and \\\n timesteps_executed % self.update_interval == 0: # update frequency check\n loss = 0\n for _ in range_(self.update_steps):\n #l, s_, a_, r_, t_ = self.agent.update()\n loss += self.agent.update()\n #self.logger.info(\"FROM MEM: s={} a={} r={} t={}\".format(s_, a_, r_, t_))\n #loss += l\n return loss\n\n return None", "def speed_test(self):\n self.lg.debug('Performing speed test no. {}'.format(self.runs))\n self.st.get_best_server()\n self.st.upload()\n self.st.download()\n up = self.st.results.upload // 1e6\n down = self.st.results.download // 1e6\n timestamp = time.localtime(time.time())\n self.lg.debug('Timestamp: {}'.format(\n time.strftime('%H:%M:%S', timestamp)))\n self.lg.debug(\n 'Upload is {} Mbps'.format(up))\n self.lg.debug(\n 'Download is {} Mbps'.format(down))\n self.results_up.append(up)\n self.results_down.append(down)\n self.results_timestamp.append(timestamp)", "async def update_trade_stats(self):\n\n summary_keys = [base for base in config['min_base_volumes']] + ['global']\n summaries = {\n key: {\n 'open_count': 0,\n 'buys': 0,\n 'rebuys': 0,\n 'sells': 0,\n 'collect_sells': 0,\n 'soft_stop_sells': 0,\n 'total_profit': 0.0,\n 'total_loss': 0.0,\n 'total_fees': 0.0,\n 'balancer_refills': 0,\n 'balancer_remits': 0,\n 'balancer_stop_losses': 0,\n 'balancer_profit': 0.0,\n 'balancer_loss': 0.0,\n 'balancer_fees': 0.0,\n } for key in summary_keys\n }\n\n for pair in self.trades:\n if pair not in self.trade_stats[self.time_prefix]:\n continue\n\n base = pair.split('-', 1)[0]\n open_count = len(self.trades[pair]['open'])\n\n summaries[base]['open_count'] += open_count\n summaries[base]['buys'] += self.trade_stats[self.time_prefix][pair]['buys']\n summaries[base]['rebuys'] += self.trade_stats[self.time_prefix][pair]['rebuys']\n summaries[base]['sells'] += self.trade_stats[self.time_prefix][pair]['sells']\n summaries[base]['collect_sells'] += self.trade_stats[self.time_prefix][pair]['collect_sells']\n summaries[base]['soft_stop_sells'] += self.trade_stats[self.time_prefix][pair]['soft_stop_sells']\n summaries[base]['total_profit'] += self.trade_stats[self.time_prefix][pair]['total_profit']\n summaries[base]['total_loss'] += self.trade_stats[self.time_prefix][pair]['total_loss']\n summaries[base]['total_fees'] += self.trade_stats[self.time_prefix][pair]['total_fees']\n summaries[base]['balancer_refills'] += self.trade_stats[self.time_prefix][pair]['balancer_refills']\n summaries[base]['balancer_remits'] += self.trade_stats[self.time_prefix][pair]['balancer_remits']\n summaries[base]['balancer_profit'] += self.trade_stats[self.time_prefix][pair]['balancer_profit']\n summaries[base]['balancer_loss'] += self.trade_stats[self.time_prefix][pair]['balancer_loss']\n summaries[base]['balancer_fees'] += self.trade_stats[self.time_prefix][pair]['balancer_fees']\n\n summaries['global']['open_count'] += open_count\n summaries['global']['buys'] += self.trade_stats[self.time_prefix][pair]['buys']\n summaries['global']['rebuys'] += self.trade_stats[self.time_prefix][pair]['rebuys']\n summaries['global']['sells'] += self.trade_stats[self.time_prefix][pair]['sells']\n summaries['global']['collect_sells'] += self.trade_stats[self.time_prefix][pair]['collect_sells']\n summaries['global']['soft_stop_sells'] += self.trade_stats[self.time_prefix][pair]['soft_stop_sells']\n summaries['global']['total_profit'] += self.trade_stats[self.time_prefix][pair]['total_profit']\n summaries['global']['total_loss'] += self.trade_stats[self.time_prefix][pair]['total_loss']\n summaries['global']['total_fees'] += self.trade_stats[self.time_prefix][pair]['total_fees']\n summaries['global']['balancer_refills'] += self.trade_stats[self.time_prefix][pair]['balancer_refills']\n summaries['global']['balancer_remits'] += self.trade_stats[self.time_prefix][pair]['balancer_remits']\n summaries['global']['balancer_profit'] += self.trade_stats[self.time_prefix][pair]['balancer_profit']\n summaries['global']['balancer_loss'] += self.trade_stats[self.time_prefix][pair]['balancer_loss']\n summaries['global']['balancer_fees'] += self.trade_stats[self.time_prefix][pair]['balancer_fees']\n\n for key in summaries:\n self.trade_stats[self.time_prefix][key]['buys'] = summaries[key]['buys']\n self.trade_stats[self.time_prefix][key]['rebuys'] = summaries[key]['rebuys']\n self.trade_stats[self.time_prefix][key]['sells'] = summaries[key]['sells']\n self.trade_stats[self.time_prefix][key]['collect_sells'] = summaries[key]['collect_sells']\n self.trade_stats[self.time_prefix][key]['soft_stop_sells'] = summaries[key]['soft_stop_sells']\n self.trade_stats[self.time_prefix][key]['total_profit'] = summaries[key]['total_profit']\n self.trade_stats[self.time_prefix][key]['total_loss'] = summaries[key]['total_loss']\n self.trade_stats[self.time_prefix][key]['total_fees'] = summaries[key]['total_fees']\n self.trade_stats[self.time_prefix][key]['balancer_refills'] = summaries[key]['balancer_refills']\n self.trade_stats[self.time_prefix][key]['balancer_remits'] = summaries[key]['balancer_remits']\n self.trade_stats[self.time_prefix][key]['balancer_profit'] = summaries[key]['balancer_profit']\n self.trade_stats[self.time_prefix][key]['balancer_loss'] = summaries[key]['balancer_loss']\n self.trade_stats[self.time_prefix][key]['balancer_fees'] = summaries[key]['balancer_fees']\n\n if summaries[key]['open_count'] > self.trade_stats[self.time_prefix][key]['most_open']:\n self.trade_stats[self.time_prefix][key]['most_open'] = summaries[key]['open_count']\n\n filter_items = [pair for pair in self.trades] + [base for base in config['min_base_volumes']] + ['global']\n self.save_attr('trade_stats', max_depth=2, filter_items=filter_items, filter_keys=[self.time_prefix])", "async def research_zergling_speed(self):\n if not self.already_pending_upgrade(UpgradeId.ZERGLINGMOVEMENTSPEED):\n self.research(UpgradeId.ZERGLINGMOVEMENTSPEED)", "def update_max_speed(self, measured_down, measured_up):\n\n max_speed = self.speed_db.all()\n max_down = max(measured_down, max_speed[0]['download'])\n max_up = max(measured_up, max_speed[0]['upload'])\n\n if max_speed[0]['test']:\n self.speed_db.update({'download': max_down})\n self.speed_db.update({'upload': max_up})\n else:\n self.speed_db.update({'download': measured_down})\n self.speed_db.update({'upload': measured_up})\n\n self.speed_db.update({'test': True})", "def record_transaction(self, transaction: Transaction) -> bool:\n if self._locked:\n print('Failed to record transaction! Your account has been locked!'\n )\n return False\n\n if transaction.amount > self.bank_balance:\n print('Failed to record transaction! Not enough balance!')\n return False\n\n budget = self.budget_manager.get_budget(transaction.budget_category)\n if budget.locked:\n print('Failed to record transaction! This budget has been locked!')\n return False\n\n self.transactions.append(transaction)\n self.bank_balance -= transaction.amount\n budget.amount_spent += transaction.amount\n self._warn_and_lock_if_needed(transaction)\n return True", "def enter_transaction():\n _state.transactions = get_transactions() + 1", "def _warn_and_lock_if_needed(self, transaction: Transaction) -> None:\n budget = self.budget_manager.get_budget(transaction.budget_category)\n exceeded_ratio = budget.exceeded_ratio\n if exceeded_ratio > 1:\n self._notify_exceeded_budget(budget)\n self._lock_budget(budget)\n self.print_transactions_for_review(budget)\n if self.budget_manager.no_locked_budgets >= 2:\n self._locked = True\n print('YOUR BANK ACCOUNT HAS BEEN LOCKED!')\n elif exceeded_ratio > 0.5:\n self._warn_nearing_exceed_budget(budget, 50)\n self.print_transactions_for_review(budget)", "def update_if_stale(self) -> int:\n return int(get_feature(self.vm, \"qubes-vm-update-update-if-stale\",\n Settings.DEFAULT_UPDATE_IF_STALE))", "def execute(self):\r\n global db_runtime_context\r\n if db_runtime_context.current_db is None:\r\n print(\"!Failed to execute query because no database is selected!\")\r\n return None \r\n\r\n self.tableName = self.tableName.lower()\r\n \r\n if self.tableName is not None:\r\n\r\n update_table = db_runtime_context.current_db.getTableByName(self.tableName)\r\n\r\n if update_table is not None:\r\n pass\r\n else:\r\n print(\"!Failed to execute query on table\", self.tableName, \"because it does not exist!\")\r\n return None \r\n\r\n # Check for a lock\r\n if not db_runtime_context.current_db.isWritable(update_table.tableName):\r\n print(f\"Error: Table {update_table.tableName} is locked!\")\r\n return\r\n\r\n\r\n\r\n db_runtime_context.current_db.tables[self.tableName].update(self.targets, self.conditions)\r\n\r\n db_runtime_context.current_db.successfulTransactions += 1", "def updateLastPrice(self):\n dt = datetime.now(tz=pytz.UTC).replace(microsecond=0)\n\n dt_central = dt.astimezone(\n pytz.timezone('US/Central')).strftime(\"%H:%M\")\n\n # UPDATE POSITION LAST PRICE AND UPDATE HIGH PRICE\n open_positions = self.open_positions.find(\n {\"Trader\": self.user[\"Name\"], \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id})\n\n open_positions_list = []\n\n for position in open_positions:\n\n symbol = position[\"Symbol\"]\n\n if symbol not in open_positions_list:\n\n open_positions_list.append(symbol)\n\n if len(open_positions_list) > 0:\n\n resp = self.tdameritrade.getQuotes(open_positions_list)\n\n if resp:\n\n for key, value in resp.items():\n\n symbol = key\n\n last_price = value[\"lastPrice\"]\n\n self.open_positions.update_many({\"Trader\": self.user[\"Name\"], \"Symbol\": symbol, \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id}, {\n \"$set\": {\"Last_Price\": last_price}})\n\n if dt_central == \"15:00\":\n\n self.open_positions.update_many({\"Trader\": self.user[\"Name\"], \"Symbol\": symbol, \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id}, {\n \"$set\": {\"Opening_Price\": last_price}})\n\n # UPDATE QUEUE LAST PRICE\n queues = self.queue.find(\n {\"Trader\": self.user[\"Name\"], \"Asset_Type\": self.asset_type})\n\n queues_list = []\n\n for queue in queues:\n\n if self.asset_type == \"EQUITY\":\n\n symbol = queue[\"Symbol\"]\n\n elif self.asset_type == \"OPTION\":\n\n symbol = queue[\"Pre_Symbol\"]\n\n if symbol not in queues_list:\n\n queues_list.append(symbol)\n\n if len(queues_list) > 0:\n\n resp = self.tdameritrade.getQuotes(queues_list)\n\n for key, value in resp.items():\n\n symbol = key\n\n last_price = value[\"lastPrice\"]\n\n if self.asset_type == \"EQUITY\":\n\n self.queue.update_many({\"Trader\": self.user[\"Name\"], \"Symbol\": symbol, \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id}, {\n \"$set\": {\"Last_Price\": last_price}})\n\n elif self.asset_type == \"OPTION\":\n\n self.queue.update_many({\"Trader\": self.user[\"Name\"], \"Pre_Symbol\": symbol, \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id}, {\n \"$set\": {\"Last_Price\": last_price}})", "def update_table_result(self, tablename):\n logging.debug(f\"\"\"update_table_result\"\"\")\n conn = self.connect(self.cxRepo)\n\n \"\"\" qry1 : table has been processed : everything is ok, there are no step > 0\n \"\"\"\n sql = f\"\"\"update {self.schemaRepo}.tablediff set result = 'ok',\n server1_status = 'done',server2_status = 'done' where step = 0\n and lower(table_name) = lower('{tablename}')\n and server1_rows = server2_rows and server1_rows>0\n and not exists (select 1 from {self.schemaRepo}.tablediff\n where step>0 and server1_status = 'done'\n and server2_status = 'done' and result = 'nok'\n and lower(table_name) =lower('{tablename}'))\"\"\"\n with conn:\n with conn.cursor() as curs:\n try:\n curs.execute(sql)\n except conn.DatabaseError as exc:\n error, = exc.args\n logging.error(f\"\"\"error executing {sql} : {error.code}\"\"\")\n\n \"\"\" qry2 : table has been processed : one step > 0 is not ok so the\n global result is nok\n \"\"\"\n sql = f\"\"\"update {schemaRepo}.tablediff set result = 'nok',\n server1_status = 'done',server2_status = 'done' where step = 0\n and lower(table_name) = lower('{tablename}')\n and exists (select 1 from {schemaRepo}.tablediff where step>0\n and server1_status = 'done'\n and server2_status = 'done' and result = 'nok'\n and lower(table_name) =lower('{tablename}'))\"\"\"\n with conn:\n with conn.cursor() as curs:\n curs.execute(sql)\n\n \"\"\" qry3 : table has been processed : server1_rows and server2_rows\n are <> so the global result is nok\n \"\"\"\n sql = f\"\"\"update {schemaRepo}.tablediff set result = 'nok',\n server1_status = 'done',server2_status = 'done'\n where step = 0 and lower(table_name) = lower('{tablename}')\n and server1_rows<>server2_rows and server1_status = 'ready'\n and server2_status = 'ready'\"\"\"\n with conn:\n with conn.cursor() as curs:\n try:\n curs.execute(sql)\n except conn.DatabaseError as exc:\n error, = exc.args\n logging.error(f\"\"\"error executing {sql} : {error}\"\"\")\n\n \"\"\" qry4 : table has been processed : server1_rows = server2_rows = 0\n (because fo filter) so the global result is ok\n \"\"\"\n sql = f\"\"\"update {schemaRepo}.tablediff set result = 'ok',\n server1_status = 'done',server2_status = 'done' where step = 0\n and lower(table_name) = lower('{tablename}')\n and server1_rows = server2_rows and server1_rows = 0\n and server1_status = 'ready' and server2_status = 'ready'\"\"\"\n with conn:\n with conn.cursor() as curs:\n try:\n curs.execute(sql)\n except conn.DatabaseError as exc:\n error, = exc.args\n logging.error(f\"\"\"error executing {sql} : {error}\"\"\")", "def slow_transaction_data(self):\n\n # XXX This method no longer appears to be used. Being replaced\n # by the transaction_trace_data() method.\n\n if not self.__settings:\n return []\n\n if not self.__slow_transaction:\n return []\n\n maximum = self.__settings.agent_limits.transaction_traces_nodes\n\n transaction_trace = self.__slow_transaction.transaction_trace(\n self, maximum)\n\n data = [transaction_trace,\n list(self.__slow_transaction.string_table.values())]\n\n if self.__settings.debug.log_transaction_trace_payload:\n _logger.debug('Encoding slow transaction data where '\n 'payload=%r.', data)\n\n json_data = json_encode(data)\n\n level = self.__settings.agent_limits.data_compression_level\n level = level or zlib.Z_DEFAULT_COMPRESSION\n\n zlib_data = zlib.compress(six.b(json_data), level)\n\n pack_data = base64.standard_b64encode(zlib_data)\n\n if six.PY3:\n pack_data = pack_data.decode('Latin-1')\n\n root = transaction_trace.root\n\n trace_data = [[root.start_time,\n root.end_time - root.start_time,\n self.__slow_transaction.path,\n self.__slow_transaction.request_uri,\n pack_data]]\n\n return trace_data", "def mine(self):\n if not self.unconfirmed_transactions: \n return False\n \n last_block = self.last_block\n \n new_block = Block(index= last_block.index + 1, \n transactions = self.unconfirmed_transactions,\n timestamp = time.time(),\n previous_hash = last_block.hash)\n\n proof = self.proof_of_work(new_block)\n self.add_block(new_block, proof)\n self.unconfirmed_transactions = []\n return new_block.index", "def transactional():\n return {\"transactional\": bool(salt.utils.path.which(\"transactional-update\"))}", "def is_spent(tx_hash, index):\n try:\n response = make_request('http://tbtc.blockr.io/api/v1/tx/info/' + tx_hash)\n data = json.loads(response)\n result = bool(data['data']['vouts'][index]['is_spent'])\n except Exception as e:\n result = True\n\n return result", "def is_updated(self):\n return self.timestamp > 0", "def sync_update(self):\n for rec in self:\n if rec.ks_last_exported_date and rec.ks_sync_date:\n ks_reduced_ks_sync_time = rec.ks_last_exported_date - datetime.timedelta(seconds=30)\n ks_increased_ks_sync_time = rec.ks_last_exported_date + datetime.timedelta(seconds=30)\n if rec.ks_sync_date > ks_reduced_ks_sync_time and rec.ks_sync_date < ks_increased_ks_sync_time:\n rec.ks_sync_status = True\n else:\n rec.ks_sync_status = False\n else:\n rec.ks_sync_status = False", "def checksum_for_changes(self, single_trx=False):\n if self.eliminate_dups:\n log.warning(\"Skip checksum, because --eliminate-duplicate \" \"specified\")\n return\n elif not self.need_checksum_for_changes():\n return\n # Because chunk checksum use old pk combination for searching row\n # If we don't have a pk/uk on old table then it'll be very slow, so we\n # have to skip here\n elif self.is_full_table_dump:\n return\n else:\n log.info(\n \"Running checksum for rows have been changed since \"\n \"last checksum from change ID: {}\".format(self.last_checksumed_id)\n )\n start_time = time.time()\n old_table_checksum = self.checksum_by_replay_chunk(self.table_name)\n # Checksum for the __new table should be issued inside the transaction\n # too. Otherwise those invisible gaps in the __chg table will show\n # up when calculating checksums\n new_table_checksum = self.checksum_by_replay_chunk(self.new_table_name)\n # After calculation checksums from both tables, we now can close the\n # transaction, if we want\n if not single_trx:\n self.commit()\n self.compare_checksum(old_table_checksum, new_table_checksum)\n self.last_checksumed_id = self.last_replayed_id\n self.stats[\"time_in_delta_checksum\"] = self.stats.setdefault(\n \"time_in_delta_checksum\", 0\n ) + (time.time() - start_time)\n\n self.record_checksum()", "def is_update(self) -> bool:\n return self.statement.is_dml and self.statement.is_update", "def isTx(self):\n\t\treturn self.extension == '.tx'", "def verify_transaction(transaction):\n sender_balance = get_balance(transaction['sender'])\n return sender_balance >= transaction['amount']", "def rollback_block(self, block):\n # TODO: 0.007-12c changes\n current_length = self.db.get('length')\n if block['length'] != current_length:\n # Block is not at the top the chain\n return False\n\n for tx in block['txs']:\n tx_owner_address = tools.tx_owner_address(tx)\n owner_account = self.get_account(tx_owner_address)\n if tx['type'] == 'mint':\n owner_account['amount'] -= tools.block_reward(block['length'])\n self.db.put(tx_owner_address, owner_account)\n elif tx['type'] == 'spend':\n owner_account['amount'] += tx['amount']\n owner_account['count'] -= 1\n owner_account['tx_blocks'].remove(block['length'])\n\n receiver_account = self.db.get(tx['to'])\n receiver_account['amount'] -= tx['amount']\n receiver_account['tx_blocks'].remove(block['length'])\n\n self.db.put(tx_owner_address, owner_account)\n self.db.put(tx['to'], receiver_account)", "def test_update_loop(self):\n self.create_org(provider='qbo')\n old_task_count = 0\n\n while True:\n update_call = self.app.post('/adapter/qbo/test/update')\n self.assertEqual(update_call.status_code, 204)\n\n new_task_count = len(self.taskqueue.get_filtered_tasks())\n\n if new_task_count == old_task_count:\n break\n\n if new_task_count > 100:\n self.fail(\"too many adapter calls, infinite loop maybe???\")\n\n old_task_count = new_task_count\n\n self.assertEqual(new_task_count, 20)", "def _pair_stale(self, pair):\r\n (_conn, return_time) = pair\r\n now = time.time()\r\n return return_time + ConnectionPool.STALE_DURATION < now", "def iter_mempool_from_best_index(self) -> Iterator[Transaction]:\n assert self.indexes is not None\n if self.indexes.mempool_tips is not None:\n yield from self.indexes.mempool_tips.iter_all(self)\n else:\n yield from self.iter_mempool_from_tx_tips()", "def slower(self):\n self._prepare()\n rate = self._eng.getProperty(\"rate\")\n newrate = rate - 50\n logging.debug(\"slower %d => %d\" %(rate, newrate))\n self._eng.setProperty(\"rate\", newrate)\n self._eng.runAndWait()\n self.say(\"slower\")", "def check_transaction(self):\n if self.transaction_valid():\n transid = self._request.var(\"_transid\")\n if transid and transid != \"-1\":\n self._invalidate(transid)\n return True\n else:\n return False", "def _save_or_verify_genesis(self) -> None:\n self._saving_genesis = True\n for tx in self._get_genesis_from_settings():\n try:\n assert tx.hash is not None\n tx2 = self.get_transaction(tx.hash)\n assert tx == tx2\n except TransactionDoesNotExist:\n self.save_transaction(tx)\n self.add_to_indexes(tx)\n tx2 = tx\n assert tx2.hash is not None\n self._genesis_cache[tx2.hash] = tx2\n self._saving_genesis = False", "def faster():\n try:\n ttsEng.faster()\n except Exception, e:\n logging.error(e)", "def improve(update, close, guess=1, max_updates=100):\n k = 0\n while not close(guess) and k < max_updates:\n guess = update(guess)\n k = k + 1\n return guess", "def ifAlreadyDone(self, cxRepo, schemaRepo, schema, tablename):\n logging.debug(f\"\"\"check if {schema}.{tablename} has been analyzed\"\"\")\n conn = self.connect(cxRepo)\n sql = f\"\"\"select table_name from {schemaRepo}.tablediff where lower\n (table_name) = lower('{tablename}') and schema1 = '{schema}' and\n server1_status = 'ready' and server1_status = 'ready' and result in\n ('ready', 'init')\"\"\"\n with conn:\n with conn.cursor() as curs:\n curs.execute(sql)\n row = curs.fetchone()\n if row is None:\n return 1\n else:\n return 0" ]
[ "0.5796732", "0.56013685", "0.5567382", "0.5438813", "0.52951", "0.527724", "0.5276468", "0.52692777", "0.51808035", "0.5144416", "0.5136642", "0.51107734", "0.5102024", "0.5087963", "0.50874853", "0.5072691", "0.5062309", "0.503474", "0.49916357", "0.49607188", "0.4943801", "0.4930255", "0.4902352", "0.49019596", "0.48956656", "0.48263434", "0.48068967", "0.4802907", "0.4801067", "0.47867498", "0.47809693", "0.47674438", "0.47640356", "0.47615427", "0.4758213", "0.47550946", "0.4747565", "0.47402883", "0.4700052", "0.4699536", "0.469953", "0.46855187", "0.46836972", "0.46790412", "0.46787927", "0.46652392", "0.46599016", "0.465931", "0.46401915", "0.46377778", "0.46316016", "0.4630753", "0.46261266", "0.4622053", "0.46165088", "0.4614489", "0.4614411", "0.4612617", "0.46059397", "0.4600536", "0.4597893", "0.4597668", "0.4596869", "0.4587794", "0.4580221", "0.45791888", "0.45789686", "0.45687062", "0.45637774", "0.45580825", "0.4552644", "0.45512924", "0.4540829", "0.45375812", "0.45375556", "0.45335963", "0.4531187", "0.4529453", "0.45292464", "0.45280516", "0.45278555", "0.4526291", "0.4520214", "0.45161885", "0.4515844", "0.45055905", "0.45048964", "0.45044515", "0.45033613", "0.45030642", "0.4496469", "0.44960496", "0.44929412", "0.4491187", "0.4490119", "0.44854057", "0.44814512", "0.44795266", "0.44774947", "0.44744924" ]
0.78206486
0
Check if transaction is a synthetics trace and save it to __synthetics_transactions.
def _update_synthetics_transaction(self, transaction): settings = self.__settings if not transaction.synthetics_resource_id: return maximum = settings.agent_limits.synthetics_transactions if len(self.__synthetics_transactions) < maximum: self.__synthetics_transactions.append(transaction)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isSStx(tx):\n try:\n checkSStx(tx)\n\n except Exception as e:\n log.debug(\"isSStx: {}\".format(e))\n\n else:\n return True", "def save(self, trade: Trade) -> Trade:\n\n pass # pragma: no cover", "def is_transaction(self) -> bool:\n return False", "def isTx(self):\n\t\treturn self.extension == '.tx'", "def trackTrans(self):\n self.data_struct['_trackTrans'] = True", "def _is_trace_on():\n return AceQLHttpApi.is_trace_on()", "def is_transaction(self):\n return self._request.has_var(\"_transid\")", "def isSTraced(self):\n try:\n return self.sTraced\n except AttributeError:\n self.sTraced = False\n return False", "def report(self, trade, is_entry):\n pass", "def transaction_trace_data(self, connections):\n\n _logger.debug('Generating transaction trace data.')\n\n if not self.__settings:\n return []\n\n # Create a set 'traces' that is a union of slow transaction,\n # and Synthetics transactions. This ensures we don't send\n # duplicates of a transaction.\n\n traces = set()\n if self.__slow_transaction:\n traces.add(self.__slow_transaction)\n traces.update(self.__synthetics_transactions)\n\n # Return an empty list if no transactions were captured.\n\n if not traces:\n return []\n\n # We want to limit the number of explain plans we do across\n # these. So work out what were the slowest and tag them.\n # Later the explain plan will only be run on those which are\n # tagged.\n\n agent_limits = self.__settings.agent_limits\n explain_plan_limit = agent_limits.sql_explain_plans_per_harvest\n maximum_nodes = agent_limits.transaction_traces_nodes\n\n database_nodes = []\n\n if explain_plan_limit != 0:\n for trace in traces:\n for node in trace.slow_sql:\n # Make sure we clear any flag for explain plans on\n # the nodes in case a transaction trace was merged\n # in from previous harvest period.\n\n node.generate_explain_plan = False\n\n # Node should be excluded if not for an operation\n # that we can't do an explain plan on. Also should\n # not be one which would not be included in the\n # transaction trace because limit was reached.\n\n if (node.node_count < maximum_nodes and\n node.connect_params and node.statement.operation in\n node.statement.database.explain_stmts):\n database_nodes.append(node)\n\n database_nodes = sorted(database_nodes,\n key=lambda x: x.duration)[-explain_plan_limit:]\n\n for node in database_nodes:\n node.generate_explain_plan = True\n\n else:\n for trace in traces:\n for node in trace.slow_sql:\n node.generate_explain_plan = True\n database_nodes.append(node)\n\n # Now generate the transaction traces. We need to cap the\n # number of nodes capture to the specified limit.\n\n trace_data = []\n\n for trace in traces:\n transaction_trace = trace.transaction_trace(\n self, maximum_nodes, connections)\n\n data = [transaction_trace,\n list(trace.string_table.values())]\n\n if self.__settings.debug.log_transaction_trace_payload:\n _logger.debug('Encoding slow transaction data where '\n 'payload=%r.', data)\n\n json_data = json_encode(data)\n\n level = self.__settings.agent_limits.data_compression_level\n level = level or zlib.Z_DEFAULT_COMPRESSION\n\n zlib_data = zlib.compress(six.b(json_data), level)\n\n pack_data = base64.standard_b64encode(zlib_data)\n\n if six.PY3:\n pack_data = pack_data.decode('Latin-1')\n\n root = transaction_trace.root\n\n if trace.record_tt:\n force_persist = True\n else:\n force_persist = False\n\n if trace.include_transaction_trace_request_uri:\n request_uri = trace.request_uri\n else:\n request_uri = None\n\n trace_data.append([transaction_trace.start_time,\n root.end_time - root.start_time,\n trace.path,\n request_uri,\n pack_data,\n trace.guid,\n None,\n force_persist,\n None,\n trace.synthetics_resource_id, ])\n\n return trace_data", "def _apply_trx_trade_to_allocation(cls, allocation, block_trade):\n try:\n allocation.TrxTrade(block_trade)\n allocation.Commit()\n except Exception as e:\n error_message = 'Failed to stamp TrxTrade {0} on Allocation Trade: {1} , {2}'\n LOGGER.exception(error_message.format(block_trade.Oid(), allocation.Oid(), e))\n return False\n\n return True", "def sign_transaction_essence(self, prepared_transaction_data):\n return self._call_account_method(\n 'signTransactionEssence', {\n 'preparedTransactionData': prepared_transaction_data\n }\n )", "def is_sed(self) -> bool:\n return False", "def signSign(self):\r\n if \"signature\" in self: # or \"signatures\" in self ?\r\n self.pop(\"id\", False)\r\n try:\r\n self[\"signSignature\"] = dposlib.core.crypto.getSignature(\r\n self, self._secondPrivateKey,\r\n exclude_second_sig=True,\r\n )\r\n except AttributeError:\r\n raise Exception(\"no second private Key available\")\r\n else:\r\n raise Exception(\"transaction not signed\")", "async def check_trustline(\n cls, transaction: Transaction, server: Server, locks: Dict\n ):\n try:\n _, account = await get_account_obj_async(\n Keypair.from_public_key(transaction.to_address), server\n )\n except BaseRequestError:\n logger.exception(f\"Failed to load account {transaction.to_address}\")\n transaction.pending_execution_attempt = False\n await sync_to_async(transaction.save)()\n return\n trustline_found = False\n for balance in account[\"balances\"]:\n if balance.get(\"asset_type\") == \"native\":\n continue\n if (\n balance[\"asset_code\"] == transaction.asset.code\n and balance[\"asset_issuer\"] == transaction.asset.issuer\n ):\n trustline_found = True\n break\n if trustline_found:\n logger.debug(\n f\"detected transaction {transaction.id} is no longer pending trust\"\n )\n await cls.process_deposit(transaction, server, locks)\n else:\n transaction.pending_execution_attempt = False\n await sync_to_async(transaction.save)()", "def _save_internal_transactions(self, blocks_traces):\n docs = [\n self._preprocess_internal_transaction(transaction)\n for transaction in blocks_traces\n if transaction[\"transactionHash\"]\n ]\n if docs:\n for chunk in bulk_chunks(docs, None, BYTES_PER_CHUNK):\n self.client.bulk_index(docs=chunk, index=self.indices[\"internal_transaction\"], doc_type=\"itx\",\n id_field=\"hash\", refresh=True)", "def trace(self, trace=...):\n ...", "def _save_miner_transactions(self, blocks_traces):\n docs = [self._preprocess_internal_transaction(transaction) for transaction in blocks_traces if\n not transaction[\"transactionHash\"]]\n self.client.bulk_index(docs=docs, index=self.indices[\"miner_transaction\"], doc_type=\"tx\", id_field=\"hash\",\n refresh=True)", "def sign_trx(self, signture):\n self.trx_signature = signture", "def record_transaction(self, transaction):\n\n if not self.__settings:\n return\n\n settings = self.__settings\n\n # Record the apdex, value and time metrics generated from the\n # transaction. Whether time metrics are reported as distinct\n # metrics or into a rollup is in part controlled via settings\n # for minimum number of unique metrics to be reported and thence\n # whether over a time threshold calculated as percentage of\n # overall request time, up to a maximum number of unique\n # metrics. This is intended to limit how many metrics are\n # reported for each transaction and try and cut down on an\n # explosion of unique metric names. The limits and thresholds\n # are applied after the metrics are reverse sorted based on\n # exclusive times for each metric. This ensures that the metrics\n # with greatest exclusive time are retained over those with\n # lesser time. Such metrics get reported into the performance\n # breakdown tab for specific web transactions.\n\n self.record_apdex_metrics(transaction.apdex_metrics(self))\n\n self.merge_custom_metrics(transaction.custom_metrics.metrics())\n\n self.record_time_metrics(transaction.time_metrics(self))\n\n # Capture any errors if error collection is enabled.\n # Only retain maximum number allowed per harvest.\n\n error_collector = settings.error_collector\n\n if (error_collector.enabled and settings.collect_errors and\n len(self.__transaction_errors) <\n settings.agent_limits.errors_per_harvest):\n self.__transaction_errors.extend(transaction.error_details())\n\n self.__transaction_errors = self.__transaction_errors[:\n settings.agent_limits.errors_per_harvest]\n\n if (error_collector.capture_events and\n error_collector.enabled and\n settings.collect_error_events):\n events = transaction.error_events(self.__stats_table)\n for event in events:\n self._error_events.add(event, priority=transaction.priority)\n\n # Capture any sql traces if transaction tracer enabled.\n\n if settings.slow_sql.enabled and settings.collect_traces:\n for node in transaction.slow_sql_nodes(self):\n self.record_slow_sql_node(node)\n\n # Remember as slowest transaction if transaction tracer\n # is enabled, it is over the threshold and slower than\n # any existing transaction seen for this period and in\n # the historical snapshot of slow transactions, plus\n # recording of transaction trace for this transaction\n # has not been suppressed.\n\n transaction_tracer = settings.transaction_tracer\n\n if (not transaction.suppress_transaction_trace and\n transaction_tracer.enabled and settings.collect_traces):\n\n # Transactions saved for Synthetics transactions\n # do not depend on the transaction threshold.\n\n self._update_synthetics_transaction(transaction)\n\n threshold = transaction_tracer.transaction_threshold\n\n if threshold is None:\n threshold = transaction.apdex_t * 4\n\n if transaction.duration >= threshold:\n self._update_slow_transaction(transaction)\n\n # Create the transaction event and add it to the\n # appropriate \"bucket.\" Synthetic requests are saved in one,\n # while transactions from regular requests are saved in another.\n\n if transaction.synthetics_resource_id:\n event = transaction.transaction_event(self.__stats_table)\n self._synthetics_events.add(event)\n\n elif (settings.collect_analytics_events and\n settings.transaction_events.enabled):\n\n event = transaction.transaction_event(self.__stats_table)\n self._transaction_events.add(event, priority=transaction.priority)\n\n # Merge in custom events\n\n if (settings.collect_custom_events and\n settings.custom_insights_events.enabled):\n self.custom_events.merge(transaction.custom_events)\n\n # Merge in span events\n\n if (settings.distributed_tracing.enabled and\n settings.span_events.enabled and settings.collect_span_events):\n if settings.infinite_tracing.enabled:\n for event in transaction.span_protos(settings):\n self._span_stream.put(event)\n elif transaction.sampled:\n for event in transaction.span_events(self.__settings):\n self._span_events.add(event, priority=transaction.priority)", "def hasTx(self):\n\t\tif self.isTx:\n\t\t\treturn True\n\t\treturn textureFile( self.path.replace( self.extension, '.tx' ) ).exists", "def _is_transaction_isolation_error(self, error):\n from psycopg2.extensions import TransactionRollbackError\n\n # Django can wrap errors, adding it to the `__cause__` attribute\n for e in (error, getattr(error, '__cause__', None)):\n if isinstance(e, TransactionRollbackError):\n return True\n return False", "def save_transcription(trs_fname, trs):\n existed = os.path.exists(trs_fname)\n if not trs.endswith('\\n'):\n trs += '\\n'\n with codecs.open(trs_fname, 'w+', encoding='UTF-8') as trs_file:\n trs_file.write(trs)\n return existed", "def record_trace(self):\n\n tfname = str(int(time.time())) + \".obd2_reader.trace\"\n self.tf_out = open(tfname, 'a')\n self.RecordTrace = 1\n print \"Recoding trace to:\", tfname", "def isSSGen(tx):\n try:\n checkSSGen(tx)\n\n except Exception as e:\n log.debug(\"isSSGen: {}\".format(e))\n\n else:\n return True", "def check(transaction):\n if not isinstance(transaction, Transaction):\n transaction = Transaction.objects.get(id=transaction)\n\n r = requests.post(\"https://www.blockonomics.co/api/searchhistory\",\n data=json.dumps({\"addr\": transaction.to_address}))\n try:\n history_data = json.loads(r.content.decode('utf-8'))['history'][0]\n except:\n return\n\n set_tx_details(history_data, transaction)", "def is_sedes(obj):\n return hasattr(obj, 'serialize') and hasattr(obj, 'deserialize')", "def save(self, *args, **kwargs):\n\n # Call the \"real\" save() method.\n super(Product, self).save(*args, **kwargs)\n\n # If sold and is a Widget\n if self.sale and self.kind == self.WIDGET:\n # But has not stamp\n try:\n self.stamp\n except:\n s = Stamp(owned_by=self.sale.customer, obtained_with=self)\n s.save()", "def supports_transactions(self):\n return False", "def save(self):\n self.lock()\n\n trader = self.strategy.trader()\n\n for trade in self.trades:\n t_data = trade.dumps()\n ops_data = [operation.dumps() for operation in trade.operations]\n\n # store per trade\n Database.inst().store_user_trade((trader.name, trader.account.name, self.instrument.market_id,\n self.strategy.identifier, trade.id, trade.trade_type, t_data, ops_data))\n\n # dumps of regions\n trader_data = {}\n regions_data = [region.dumps() for region in self.regions]\n\n Database.inst().store_user_trader((trader.name, trader.account.name, self.instrument.market_id,\n self.strategy.identifier, self.activity, trader_data, regions_data))\n\n self.unlock()", "def is_tx(self):\n return self._pin_name in TX_CHANNELS", "def _save_transactions(self):\r\n\t\tlogger.debug(\"Enter\")\r\n\t\t\r\n\t\twith open(self._state_file, 'wb') as tmp:\r\n\t\t\tlogger.debug(\"Dumping transactions: %r\" % self.transactions)\r\n\t\t\tpickle.dump(self.transactions, tmp)\r\n\t\t\r\n\t\tlogger.debug(\"Exit\")", "def add_transaction(self,transaction):\n if type(transaction) != PoWGenericTransaction:\n raise Exception('TYPEERROR','transaction should be type of \"PoWGenericTransaction\" but got {}'.format(type(transaction)))\n if not transaction.is_validation_passed():\n print 'The transaction is not valid. Skipped...'\n return\n self.transactions.append(transaction)", "def in_transaction(self):\n # We likely just changed data - give it a second to catch up\n time.sleep(0.1) # I think I keep reading journal watermark too soon without this\n \n # Get relevant data\n water_mark = pos.read_journal_watermark()\n self.log.info(f\"Watermark: [{water_mark}]\")\n balance = pos.read_balance()['Total']\n self.log.info(f\"Balance: [{balance}]\")\n \n # Decide if we need more checks based on watermark\n if water_mark == \"TRANSACTION IN PROGRESS\":\n self.log.info(\"In Transaction: In Transaction Watermark found\")\n return True\n elif water_mark == \"TRANSACTION COMPLETE\" or water_mark == \"TRANSACTION VOIDED\":\n self.log.info(\"Not in Transaction: Transaction Complete/Voided watermarks found\")\n return False\n else:\n # No watermark - decide based on balance\n if balance == \"$0.00\":\n self.log.info(\"Not in Transaction: $0 balance with no watermark\")\n return False\n else:\n self.log.info(\"In Transaction: Non-$0 balance with no watermark\")\n return True", "def istraceback(object):\r\n return isinstance(object, types.TracebackType)", "def serialize(\n self,\n transactions: Sequence[SnubaTransaction],\n errors: Sequence[SnubaError],\n roots: Sequence[SnubaTransaction],\n warning_extra: Dict[str, str],\n event_id: Optional[str],\n detailed: bool = False,\n ) -> Sequence[FullResponse]:\n parent_map = self.construct_parent_map(transactions)\n error_map = self.construct_error_map(errors)\n parent_events: Dict[str, TraceEvent] = {}\n results_map: Dict[Optional[str], List[TraceEvent]] = defaultdict(list)\n to_check: Deque[SnubaTransaction] = deque()\n # The root of the orphan tree we're currently navigating through\n orphan_root: Optional[SnubaTransaction] = None\n if roots:\n results_map[None] = []\n for root in roots:\n root_event = TraceEvent(root, None, 0)\n parent_events[root[\"id\"]] = root_event\n results_map[None].append(root_event)\n to_check.append(root)\n\n with sentry_sdk.start_span(op=\"building.trace\", description=\"full trace\"):\n iteration = 0\n has_orphans = False\n while parent_map or to_check:\n if len(to_check) == 0:\n has_orphans = True\n # Grab any set of events from the parent map\n parent_span_id, current_events = parent_map.popitem()\n\n current_event, *siblings = current_events\n # If there were any siblings put them back\n if siblings:\n parent_map[parent_span_id] = siblings\n\n previous_event = parent_events[current_event[\"id\"]] = TraceEvent(\n current_event, None, 0\n )\n\n # Used to avoid removing the orphan from results entirely if we loop\n orphan_root = current_event\n results_map[parent_span_id].append(previous_event)\n else:\n current_event = to_check.popleft()\n previous_event = parent_events[current_event[\"id\"]]\n\n # We've found the event for the trace navigator so we can remove everything in the deque\n # As they're unrelated ancestors now\n if event_id and current_event[\"id\"] == event_id:\n # Remove any remaining events so we don't think they're orphans\n while to_check:\n to_remove = to_check.popleft()\n if to_remove[\"trace.parent_span\"] in parent_map:\n del parent_map[to_remove[\"trace.parent_span\"]]\n to_check = deque()\n\n # This is faster than doing a call to get_events, since get_event_by_id only makes a call to snuba\n # when non transaction events are included.\n with sentry_sdk.start_span(op=\"nodestore\", description=\"get_event_by_id\"):\n nodestore_event = eventstore.get_event_by_id(\n current_event[\"project.id\"], current_event[\"id\"]\n )\n\n previous_event.nodestore_event = nodestore_event\n\n spans: NodeSpans = nodestore_event.data.get(\"spans\", [])\n\n # Need to include the transaction as a span as well\n #\n # Important that we left pad the span id with 0s because\n # the span id is stored as an UInt64 and converted into\n # a hex string when quering. However, the conversion does\n # not ensure that the final span id is 16 chars long since\n # it's a naive base 10 to base 16 conversion.\n spans.append({\"span_id\": previous_event.event[\"trace.span\"].rjust(16, \"0\")})\n\n for child in spans:\n if child[\"span_id\"] in error_map:\n previous_event.errors.extend(\n [\n self.serialize_error(error)\n for error in error_map.pop(child[\"span_id\"])\n ]\n )\n # We need to connect back to an existing orphan trace\n if (\n has_orphans\n and\n # The child event has already been checked\n child[\"span_id\"] in results_map\n and orphan_root is not None\n and\n # In the case of a span loop popping the current root removes the orphan subtrace\n child[\"span_id\"] != orphan_root[\"trace.parent_span\"]\n ):\n orphan_subtraces = results_map.pop(child[\"span_id\"])\n for orphan_subtrace in orphan_subtraces:\n orphan_subtrace.parent_event_id = previous_event.event[\"id\"]\n previous_event.children.extend(orphan_subtraces)\n if child[\"span_id\"] not in parent_map:\n continue\n # Avoid potential span loops by popping, so we don't traverse the same nodes twice\n child_events = parent_map.pop(child[\"span_id\"])\n\n for child_event in child_events:\n parent_events[child_event[\"id\"]] = TraceEvent(\n child_event,\n current_event[\"id\"],\n previous_event.generation + 1\n if previous_event.generation is not None\n else None,\n )\n # Add this event to its parent's children\n previous_event.children.append(parent_events[child_event[\"id\"]])\n\n to_check.append(child_event)\n # Limit iterations just to be safe\n iteration += 1\n if iteration > MAX_TRACE_SIZE:\n sentry_sdk.set_tag(\"discover.trace-view.warning\", \"surpassed-trace-limit\")\n logger.warning(\n \"discover.trace-view.surpassed-trace-limit\",\n extra=warning_extra,\n )\n break\n\n root_traces: List[TraceEvent] = []\n orphans: List[TraceEvent] = []\n for index, result in enumerate(results_map.values()):\n for subtrace in result:\n self.update_children(subtrace)\n if index > 0 or len(roots) == 0:\n orphans.extend(result)\n elif len(roots) > 0:\n root_traces = result\n # We sort orphans and roots separately because we always want the root(s) as the first element(s)\n root_traces.sort(key=child_sort_key)\n orphans.sort(key=child_sort_key)\n\n if len(orphans) > 0:\n sentry_sdk.set_tag(\"discover.trace-view.contains-orphans\", \"yes\")\n logger.warning(\"discover.trace-view.contains-orphans\", extra=warning_extra)\n\n return [trace.full_dict(detailed) for trace in root_traces] + [\n orphan.full_dict(detailed) for orphan in orphans\n ]", "def traceXml(self, traceConfXml):\r\n if core.FW_conf['tracing_enabled']:\r\n core.FW_conf['trace'].traceActivation(traceConfXml, scriptActivation = True )", "def _is_valid_sbl_settlement(settlement):\n if not settlement.IsKindOf(acm.FSettlement):\n return False\n\n if not settlement.Trade():\n return False\n \n trade = settlement.Trade()\n acquirer = trade.Acquirer().Name()\n instrument = trade.Instrument().InsType()\n delivery_type = trade.AddInfoValue(\"SL_SWIFT\")\n if instrument != 'SecurityLoan':\n return False\n if acquirer != 'SECURITY LENDINGS DESK':\n return False\n if delivery_type != 'SWIFT':\n return False\n if settlement.Type() not in ['Security Nominal', 'End Security']:\n return False\n\n return True", "def transcription(self, sid):\r\n return recordings.Transcription(self, sid)", "def isStakeBase(tx):\n # A stake base (SSGen) must only have two transaction inputs.\n if len(tx.txIn) != 2:\n return False\n\n # The previous output of a coin base must have a max value index and\n # a zero hash, as well as null fraud proofs.\n if not isNullOutpoint(tx):\n return False\n\n if not isNullFraudProof(tx):\n return False\n\n return True", "def serialize(\n self,\n transactions: Sequence[SnubaTransaction],\n errors: Sequence[SnubaError],\n roots: Sequence[SnubaTransaction],\n warning_extra: Dict[str, str],\n event_id: Optional[str],\n detailed: bool = False,\n ) -> Sequence[LightResponse]:\n if event_id is None:\n raise ParseError(detail=\"An event_id is required for the light trace\")\n snuba_event, nodestore_event = self.get_current_transaction(transactions, errors, event_id)\n parent_map = self.construct_parent_map(transactions)\n error_map = self.construct_error_map(errors)\n trace_results: List[TraceEvent] = []\n current_generation: Optional[int] = None\n root_id: Optional[str] = None\n\n with sentry_sdk.start_span(op=\"building.trace\", description=\"light trace\"):\n # Going to nodestore is more expensive than looping twice so check if we're on the root first\n for root in roots:\n if root[\"id\"] == snuba_event[\"id\"]:\n current_generation = 0\n break\n\n if current_generation is None:\n for root in roots:\n # We might not be necessarily connected to the root if we're on an orphan event\n if root[\"id\"] != snuba_event[\"id\"]:\n # Get the root event and see if the current event's span is in the root event\n root_event = eventstore.get_event_by_id(root[\"project.id\"], root[\"id\"])\n root_spans: NodeSpans = root_event.data.get(\"spans\", [])\n root_span = find_event(\n root_spans,\n lambda item: item is not None\n and item[\"span_id\"] == snuba_event[\"trace.parent_span\"],\n )\n\n # We only know to add the root if its the direct parent\n if root_span is not None:\n # For the light response, the parent will be unknown unless it is a direct descendent of the root\n root_id = root[\"id\"]\n trace_results.append(\n TraceEvent(\n root,\n None,\n 0,\n )\n )\n current_generation = 1\n break\n\n current_event = TraceEvent(snuba_event, root_id, current_generation)\n trace_results.append(current_event)\n\n spans: NodeSpans = nodestore_event.data.get(\"spans\", [])\n # Need to include the transaction as a span as well\n #\n # Important that we left pad the span id with 0s because\n # the span id is stored as an UInt64 and converted into\n # a hex string when quering. However, the conversion does\n # not ensure that the final span id is 16 chars long since\n # it's a naive base 10 to base 16 conversion.\n spans.append({\"span_id\": snuba_event[\"trace.span\"].rjust(16, \"0\")})\n\n for span in spans:\n if span[\"span_id\"] in error_map:\n current_event.errors.extend(\n [self.serialize_error(error) for error in error_map.pop(span[\"span_id\"])]\n )\n if span[\"span_id\"] in parent_map:\n child_events = parent_map.pop(span[\"span_id\"])\n trace_results.extend(\n [\n TraceEvent(\n child_event,\n snuba_event[\"id\"],\n (\n current_event.generation + 1\n if current_event.generation is not None\n else None\n ),\n )\n for child_event in child_events\n ]\n )\n\n return [result.to_dict() for result in trace_results]", "def _save_or_verify_genesis(self) -> None:\n self._saving_genesis = True\n for tx in self._get_genesis_from_settings():\n try:\n assert tx.hash is not None\n tx2 = self.get_transaction(tx.hash)\n assert tx == tx2\n except TransactionDoesNotExist:\n self.save_transaction(tx)\n self.add_to_indexes(tx)\n tx2 = tx\n assert tx2.hash is not None\n self._genesis_cache[tx2.hash] = tx2\n self._saving_genesis = False", "def on_trade(self, trade: TradeData):\n pass", "def is_trashed(self):\n return self.has_label(TRASHED_LABEL)", "def is_transient_error(self):\n return self._tag == 'transient_error'", "def checkSStx(tx):\n # Check to make sure there aren't too many inputs.\n # CheckTransactionSanity already makes sure that number of inputs is\n # greater than 0, so no need to check that.\n if len(tx.txIn) > MaxInputsPerSStx:\n raise DecredError(\"SStx has too many inputs\")\n\n # Check to make sure there aren't too many outputs.\n if len(tx.txOut) > MaxOutputsPerSStx:\n raise DecredError(\"SStx has too many outputs\")\n\n # Check to make sure there are some outputs.\n if len(tx.txOut) == 0:\n raise DecredError(\"SStx has no outputs\")\n\n # Check to make sure that all output scripts are the consensus version.\n for idx, txOut in enumerate(tx.txOut):\n if txOut.version != consensusVersion:\n raise DecredError(\"invalid script version found in txOut idx %d\" % idx)\n\n # Ensure that the first output is tagged OP_SSTX.\n if getScriptClass(tx.txOut[0].version, tx.txOut[0].pkScript) != StakeSubmissionTy:\n raise DecredError(\n \"First SStx output should have been OP_SSTX tagged, but it was not\"\n )\n\n # Ensure that the number of outputs is equal to the number of inputs\n # + 1.\n if (len(tx.txIn) * 2 + 1) != len(tx.txOut):\n raise DecredError(\n \"The number of inputs in the SStx tx was not the number of outputs/2 - 1\"\n )\n\n # Ensure that the rest of the odd outputs are 28-byte OP_RETURN pushes that\n # contain putative pubkeyhashes, and that the rest of the odd outputs are\n # OP_SSTXCHANGE tagged.\n for outTxIndex in range(1, len(tx.txOut)):\n scrVersion = tx.txOut[outTxIndex].version\n rawScript = tx.txOut[outTxIndex].pkScript\n\n # Check change outputs.\n if outTxIndex % 2 == 0:\n if getScriptClass(scrVersion, rawScript) != StakeSubChangeTy:\n raise DecredError(\n \"SStx output at output index %d was not an sstx change output\",\n outTxIndex,\n )\n continue\n\n # Else (odd) check commitment outputs. The script should be a\n # NullDataTy output.\n if getScriptClass(scrVersion, rawScript) != NullDataTy:\n raise DecredError(\n \"SStx output at output index %d was not a NullData (OP_RETURN) push\",\n outTxIndex,\n )\n\n # The length of the output script should be between 32 and 77 bytes long.\n if len(rawScript) < SStxPKHMinOutSize or len(rawScript) > SStxPKHMaxOutSize:\n raise DecredError(\n \"SStx output at output index %d was a NullData (OP_RETURN) push\"\n \" of the wrong size\",\n outTxIndex,\n )", "def on_trade(self, trade: TradeData):\n # self.on_event(EVENT_TRADE, trade)\n # self.on_event(EVENT_TRADE + trade.vt_symbol, trade)\n pass", "def _save_to_weakref(self, tx: BaseTransaction) -> None:\n if self._tx_weakref_disabled:\n return\n assert tx.hash is not None\n tx2 = self._tx_weakref.get(tx.hash, None)\n if tx2 is None:\n self._tx_weakref[tx.hash] = tx\n else:\n assert tx is tx2, 'There are two instances of the same transaction in memory ({})'.format(tx.hash_hex)", "def has_save_strategy(self):\r\n return self.save_strategy is not None", "def isTransmitted(self) -> bool:\r\n\r\n return self.__is_transmitted", "def _handle_reference_enabled_save(self, error):\n err_id = error.get('id', None)\n if err_id is None:\n return False\n\n # this is a kind of signature of the error\n tail = filtered_error_tail(error)\n\n # check all errors in the storage\n for existing_id, existing_error in self._storage.items():\n error_tail = filtered_error_tail(existing_error)\n\n # error signature is the same\n if error_tail == tail:\n info = dict(referencedError=existing_id,\n time=error.get('time'),\n userid=error.get('userid') or 'anon')\n self._index[err_id] = info\n return True\n\n # save it\n return self._handle_save(error)", "def save_axs_table(self, tblname, calculate_zone=False):\n from axs.catalog import AxsCatalog\n AxsCatalog(None).save_axs_table(self, tblname, True, calculate_zone)", "def transaction_exists(self, transaction: \"Transaction\") -> bool:\n try:\n super().inspect_transaction(transaction=transaction)\n return True\n except grpc.RpcError as err:\n err: grpc.Call\n if err.code() == grpc.StatusCode.NOT_FOUND:\n return False\n raise err", "def verify_trace_roundtrip(\n sch: Schedule,\n mod: Union[PrimFunc, IRModule],\n *,\n debug_mask: Union[str, int] = \"all\",\n text_format: Union[str, Sequence[str]] = [\"python\", \"json\"],\n) -> Schedule:\n from tvm.script import tir as T # pylint: disable=import-outside-toplevel\n\n if not isinstance(text_format, str):\n for opt in text_format:\n new_sch = verify_trace_roundtrip(sch, mod, debug_mask=debug_mask, text_format=opt)\n return new_sch\n\n trace = sch.trace\n assert trace is not None\n\n # Step 1. Perform a round-trip through the text-format\n new_sch = Schedule(mod=mod, debug_mask=debug_mask)\n if text_format == \"json\":\n json_obj = trace.as_json()\n Trace.apply_json_to_schedule(json_obj=json_obj, sch=new_sch)\n elif text_format == \"python\":\n py_trace = \"\\n\".join(trace.as_python())\n vars_dict = {\"T\": T}\n vars_dict.update(tvm.tir.__dict__)\n exec(py_trace, vars_dict, {\"sch\": new_sch}) # pylint: disable=exec-used\n else:\n assert text_format in (\"json\", \"python\"), f\"Unknown text format: {text_format}\"\n\n # Step 2. Verify that the round-trip produced the same scheduling\n assert_structural_equal(new_sch.mod, sch.mod)\n\n # Step 3. Check the consistency of the text format between the old and new traces\n py_repr = \"\\n\".join(trace.as_python())\n new_py_repr = \"\\n\".join(new_sch.trace.as_python())\n assert py_repr == new_py_repr\n\n # Step 4. Return the new schedule in case it could be useful\n return new_sch", "def save(self, strm):\n raise NotImplementedError(\n '\"save\" should be implemented in the' \" class derived from SurrogateBase\"\n )", "def record_transaction(self, transaction: Transaction) -> bool:\n if self._locked:\n print('Failed to record transaction! Your account has been locked!'\n )\n return False\n\n if transaction.amount > self.bank_balance:\n print('Failed to record transaction! Not enough balance!')\n return False\n\n budget = self.budget_manager.get_budget(transaction.budget_category)\n if budget.locked:\n print('Failed to record transaction! This budget has been locked!')\n return False\n\n self.transactions.append(transaction)\n self.bank_balance -= transaction.amount\n budget.amount_spent += transaction.amount\n self._warn_and_lock_if_needed(transaction)\n return True", "def set_signal_trace(self):\n\t\t\n\t\tassert self.signal_trace_file is not None, \"Need to set \"\\\n\t\t\t\"'signal_trace_file' var before calling set_signal_trace; \"\\\n\t\t\t\"var should be set without extension, which must be .dat\"\n\t\t\n\t\tsignal_data = load_signal_trace_from_file(self.signal_trace_file)\n\t\tprint 'Signal time trace from file %s.dat loaded\\n' \\\n\t\t\t\t% self.signal_trace_file\n\t\tself.signal_trace_Tt = signal_data[:, 0]\n\t\tself.signal_trace = (signal_data[:, 1] + self.signal_trace_offset)*\\\n\t\t\t\t\t\t\t\tself.signal_trace_multiplier\n\t\t\n\t\tif self.signal_trace_file_2 is not None:\n\t\t\tsignal_data_2 = load_signal_trace_from_file(self.signal_trace_file_2)\n\t\t\tprint 'Signal time trace 2 from file %s.dat loaded\\n' \\\n\t\t\t\t% self.signal_trace_file_2\n\t\t\tassert len(self.signal_trace_Tt) == len(signal_data_2[:, 0]), \\\n\t\t\t\t\"signal_trace_file_2 must be same length as signal_trace_file\"\n\t\t\tassert sp.allclose(self.signal_trace_Tt, signal_data_2[:, 0], \n\t\t\t\t1e-6), \"signal_trace_file_2 must have same time array as \"\\\n\t\t\t\t\"signal_trace_file\"\n\t\t\tself.signal_trace_2 = (signal_data_2[:, 1] + \\\n\t\t\t\t\t\t\t\t\tself.signal_trace_offset_2)*\\\n\t\t\t\t\t\t\t\t\tself.signal_trace_multiplier_2", "def store(self, timeseries_dict: ty.Dict[str, TimeSeries]):\n raise NotImplementedError(\"read_forecast\")", "def add_transaction(self, tx_json):\n recv_tx = Transaction.from_json(tx_json)\n if not recv_tx.verify():\n raise Exception(\"New transaction failed signature verification.\")\n with self.all_tx_lock:\n if tx_json in self._all_transactions:\n print(f\"{self.name} - Transaction already exist in pool.\")\n return\n self._all_transactions.add(tx_json)", "def _handle_save(self, error):\n err_id = error.get('id', None)\n if err_id is None:\n return False\n # save each error under its id\n self._storage[err_id] = error\n # notify new error\n event.notify(NotifyTraceback(error))\n return True", "def flag_excitation_type(self, tracks: pd.DataFrame):\n # FIXME: Don't force convert to int, but raise an error (?)\n # First, the track method needs to preserve the data type of the time\n # column\n eseq = self.frame_selector.eval_seq()\n frames = tracks[\"donor\", self.columns[\"time\"]].to_numpy(dtype=int)\n et = pd.Series(eseq[frames % len(eseq)], dtype=\"category\")\n # Assignment to dataframe is done by matching indices, not line-wise\n # Thus copy index\n et.index = tracks.index\n tracks[\"fret\", \"exc_type\"] = et", "def symptomatic_infection(self, s):\n if self.disease_status == 0:\n self.disease_status = 1\n self.time_since_infection = 0\n s.number_of_symptomatic += 1\n s.infected.add(self.identifier)", "def is_tentative(self):\n return self.state == TrackState.Tentative", "def is_tentative(self):\n return self.state == TrackState.Tentative", "def _update_voided(self, tx: BaseTransaction) -> None:\n tx_meta = tx.get_metadata()\n assert tx.hash is not None\n assert tx_meta.voided_by\n log = self.log.new(tx=tx.hash_hex)\n log.debug('update voided')\n # remove all outputs\n for index, tx_output in enumerate(tx.outputs):\n log_it = log.new(index=index)\n if _should_skip_output(tx_output):\n log_it.debug('ignore output')\n continue\n log_it.debug('remove voided output')\n self._remove_utxo(UtxoIndexItem.from_tx_output(tx, index, tx_output))\n # re-add inputs that aren't voided\n for tx_input in tx.inputs:\n spent_tx = tx.get_spent_tx(tx_input)\n spent_tx_output = spent_tx.outputs[tx_input.index]\n log_it = log.new(tx_id=spent_tx.hash_hex, index=tx_input.index)\n if _should_skip_output(spent_tx_output):\n log_it.debug('ignore input')\n continue\n if spent_tx.get_metadata().voided_by:\n log_it.debug('do not re-add input that spend voided')\n continue\n spent_tx_meta = spent_tx.get_metadata()\n spent_by = spent_tx_meta.get_output_spent_by(tx_input.index)\n if spent_by is not None and spent_by != tx.hash:\n log_it.debug('do not re-add input that is spent by other tx', spent_by=spent_by.hex())\n continue\n log_it.debug('re-add input that became unspent')\n self._add_utxo(UtxoIndexItem.from_tx_output(spent_tx, tx_input.index, spent_tx_output))", "def _store_transaction(account, transaction):\n tr_tx = transaction['tx']\n meta = transaction.get('meta', {})\n\n if meta.get('TransactionResult') != 'tesSUCCESS':\n return\n\n amount = meta.get('delivered_amount') or tr_tx.get('Amount', {})\n\n is_unprocessed = (\n tr_tx['TransactionType'] == 'Payment' and\n tr_tx['Destination'] == account and\n isinstance(amount, dict) and\n not Transaction.objects.filter(hash=tr_tx['hash'])\n )\n if is_unprocessed:\n logger.info(\n format_log_message(\n 'Saving transaction: %s', transaction\n )\n )\n\n transaction_object = Transaction.objects.create(\n account=tr_tx['Account'],\n hash=tr_tx['hash'],\n destination=account,\n ledger_index=tr_tx['ledger_index'],\n destination_tag=tr_tx.get('DestinationTag'),\n source_tag=tr_tx.get('SourceTag'),\n status=Transaction.RECEIVED,\n currency=amount['currency'],\n issuer=amount['issuer'],\n value=amount['value']\n )\n\n logger.info(\n format_log_message(\n \"Transaction saved: %s\", transaction_object\n )\n )", "def transform_perfetto_trace_to_systrace(path_to_perfetto_trace: str,\n path_to_tmp_systrace: str) -> None:\n cmd_utils.run_command_nofail([str(TRACECONV_BIN),\n 'systrace',\n path_to_perfetto_trace,\n path_to_tmp_systrace])", "def market_snap(self, timestamp = -1):\n # refresh account states\n self.trading_acc1.sync_account_with_exh()\n self.trading_acc2.sync_account_with_exh()\n\n signal__arbitrage_delta = self.get_signal__arbitrage_delta()\n signal__gdax_has_usd = self.get_signal__gdax_has_usd()\n signal__gdax_has_eth = self.get_signal__gdax_has_eth()\n signal__cex_has_eth = self.get_signal__cex_has_eth()\n\n def mk_audit_js():\n gdax_account = self.trading_acc1.get_account()\n cex_account = self.trading_acc2.get_account()\n transaction_t = epoch.current_milli_time() if timestamp == -1 else timestamp\n\n audit_js = OrderedDict()\n audit_js['strategy_run_id'] = self.run_id\n audit_js['timestamp'] = epoch.to_str(transaction_t)\n audit_js['timestamp__long'] = transaction_t\n audit_js['ticker'] = self.ticker\n audit_js['strategy_info'] = self._strategy_info\n\n audit_js['signal'] = OrderedDict()\n audit_js['signal']['signal__gdax_has_usd'] = signal__gdax_has_usd\n audit_js['signal']['signal__gdax_has_eth'] = signal__gdax_has_eth\n audit_js['signal']['signal__cex_has_eth'] = signal__cex_has_eth\n audit_js['signal']['signal__arbitrage_delta'] = signal__arbitrage_delta\n\n audit_js['total_usd__num'] = gdax_account.js['usd__num'] + cex_account.js['usd__num']\n audit_js['total_eth__num'] = gdax_account.js['eth__num'] + cex_account.js['eth__num']\n audit_js['gdax_account'] = gdax_account.js\n audit_js['cex_account'] = cex_account.js\n return audit_js\n\n snap_again = False # Only repeat if we have an gdax buy action\n if signal__gdax_has_usd['signal'] and signal__arbitrage_delta['signal']:\n exec_context = self.exec_gdax_buy(timestamp)\n snap_again = True\n\n # Audit\n audit_js = mk_audit_js()\n audit_js['action'] = exec_context\n audit = AuditTradeModel.build(audit_js)\n logger.info('-----Executed GDAX Buy-----')\n logger.info(audit)\n logger.info('---------------------------')\n audit.db_save(es)\n\n if signal__gdax_has_eth['signal']:\n exec_context = self.exec_eth_transfer()\n\n # Audit\n audit_js = mk_audit_js()\n audit_js['action'] = exec_context\n audit = AuditTradeModel.build(audit_js)\n logger.info('-----Executed ETH TRANSFER-----')\n logger.info(audit)\n logger.info('---------------------------')\n audit.db_save(es)\n\n if signal__cex_has_eth['signal']:\n exec_context = self.exec_cex_sell(timestamp)\n\n # Audit\n audit_js = mk_audit_js()\n audit_js['action'] = exec_context\n audit = AuditTradeModel.build(audit_js)\n logger.info('-----Executed CEX Sell-----')\n logger.info(audit)\n logger.info('---------------------------')\n audit.db_save(es)\n\n # Extra logging\n audit_js = mk_audit_js()\n logger.info('post-snapping states: \\n' + json.dumps(audit_js, indent=2))\n\n return snap_again", "def save(self, ts):\n with open(self, 'w') as f:\n Timestamp.wrap(ts).dump(f)", "def save_transaction(record, bucket=None):\n if not isinstance(record, _TransactionRecord):\n raise TypeError(\"You can only write TransactionRecord objects \"\n \"to the ledger!\")\n\n if not record.is_null():\n if bucket is None:\n bucket = _login_to_service_account()\n\n _ObjectStore.set_object_from_json(bucket,\n Ledger.get_key(record.uid()),\n record.to_data())", "def save_scopeTraces(fileName, scope, channel, noPulses):\n scope._get_preamble(channel)\n results = utils.PickleFile(fileName, 1)\n results.add_meta_data(\"timeform_1\", scope.get_timeform(channel))\n\n #ct = scope.acquire_time_check()\n #if ct == False:\n # print 'No triggers for this data point. Will skip and set data to 0.'\n # results.save()\n # results.close()\n # return False\n\n t_start, loopStart = time.time(),time.time()\n for i in range(noPulses):\n try:\n ct = scope.acquire_time_check(timeout=.4)\n results.add_data(scope.get_waveform(channel), 1)\n except Exception, e:\n print \"Scope died, acquisition lost.\"\n print e\n if i % 100 == 0 and i > 0:\n print \"%d traces collected - This loop took : %1.1f s\" % (i, time.time()-loopStart)\n loopStart = time.time()\n print \"%d traces collected TOTAL - took : %1.1f s\" % (i, (time.time()-t_start))\n results.save()\n results.close()\n return True", "def writeSTH(seis, STH_Key, newSTH):\n traces = seis.traces\n for i, trace in enumerate(traces):\n trace.header.__setattr__(STH_Key, newSTH[i])", "def _before_stockpyle_deserialize(self, obj):\n \n # only merge SA objects\n if _is_sqlalchemy_object(obj):\n self.__session.merge(obj, load=False)", "def recordTransaction(self, loop, transaction):\n\n a = {}\n a['time'] = transaction.transactionTime\n a['atm'] = transaction.transactionATM.atmID\n a['transaction'] = transaction.transactionType\n a['cash'] = transaction.transactionATM.atmCash\n a['status'] = transaction.transactionStatus\n self._atmDict[loop] = a\n\n c = {}\n c['time'] = transaction.transactionTime\n c['client'] = transaction.transactionCard.cardAccount.accountClient.clientID\n c['account'] = transaction.transactionCard.cardAccount.accountNumber\n c['transaction'] = transaction.transactionType\n c['balance'] = transaction.transactionCard.cardAccount.accountBalance\n c['status'] = transaction.transactionStatus\n self._clientDict[loop] = c\n\n t = {}\n t['time'] = transaction.transactionTime\n t['transaction'] = transaction.transactionType\n t['amount'] = transaction.transactionAmount\n t['status'] = transaction.transactionStatus\n self._transactionDict[loop] = t", "def _save(self):\n if self.character:\n self.character.db_command_stack = dumps(self)", "def process_trace(self, trace):\n # type: (List[Span]) -> Optional[List[Span]]\n pass", "def test_log_track_with_transform(self):\n tf_id = self.add_transform(cond_artist=True, pattern_artist='Artist',\n change_artist=True, to_artist='Artist 2')\n self.assertNotEqual(tf_id, 0)\n self.app.load_data()\n\n track = self.app.log_track(self.track_obj('silence.mp3'))\n self.assertEqual(self.get_track_count(), 1)\n track_row = self.get_track_by_id(track.pk)\n self.assertNotEqual(track_row, None)\n self.assertEqual(track_row['lasttransform'], tf_id)\n self.assertEqual(track_row['artist'], 'Artist 2')\n self.assertEqual(track_row['album'], 'Album')\n self.assertEqual(track_row['title'], 'Track')\n self.assertEqual(track_row['source'], 'xmms')", "def is_stair(game_object: GameObject) -> bool:\n return CommonObjectTagUtils.has_game_tags(game_object, (\n CommonGameTag.BUILD_STAIR,\n ))", "def change_TTS_engine(self):\n\t\t\n\t\tif self.isActiveDualTTS:\n\t\t\t#dual TTS\n\t\t\tpath_to_file = '/var/lib/snips/skills/snips_app_pilldispenser/settings/dual_TTS.sh'\n\t\t\tsubprocess.call([path_to_file])\n\t\t\tprint('Dual TTS is enabled. Using Amazon Polly TTS in case of internet connection, else use offline Picotts TTS.')\n\t\t\t\n\t\telse:\n\t\t\t#go back to single offline Picotts TTS\n\t\t\tpath_to_file = '/var/lib/snips/skills/snips_app_pilldispenser/settings/single_TTS.sh'\n\t\t\tsubprocess.call([path_to_file])\n\t\t\tprint('Dual TTS is disabled. Using offline Picotts TTS regardless of internect connection.')", "def supports_transactions(self):\n return self._mysql_storage_engine != \"MyISAM\"", "def process_transaction(self, transaction):\n instrument = transaction.instrument\n if isinstance(instrument, Future):\n try:\n old_price = self._payout_last_sale_prices[instrument]\n except KeyError:\n self._payout_last_sale_prices[instrument] = transaction.price\n else:\n position = self.position_tracker.positions[instrument]\n amount = position.amount\n price = transaction.price\n\n self._cash_flow(\n self._calculate_payout(\n instrument.multiplier,\n amount,\n old_price,\n price,\n ),\n )\n\n if amount + transaction.amount == 0:\n del self._payout_last_sale_prices[instrument]\n else:\n self._payout_last_sale_prices[instrument] = price\n else:\n self._cash_flow(-(transaction.price * transaction.amount))\n\n self.position_tracker.execute_transaction(transaction)\n\n # we only ever want the dict form from now on\n transaction_dict = transaction.to_dict()\n try:\n self._processed_transactions[transaction.dt].append(\n transaction_dict,\n )\n except KeyError:\n self._processed_transactions[transaction.dt] = [transaction_dict]", "def test_save_unjsonable_subs_to_store(self):\r\n with self.assertRaises(NotFoundError):\r\n contentstore().find(self.content_location_unjsonable)\r\n\r\n with self.assertRaises(TypeError):\r\n transcripts_utils.save_subs_to_store(\r\n self.unjsonable_subs,\r\n self.unjsonable_subs_id,\r\n self.course)\r\n\r\n with self.assertRaises(NotFoundError):\r\n contentstore().find(self.content_location_unjsonable)", "def _is_valid_trade(self, trade):\n if not trade:\n return False\n\n if trade.Status() in VALID_TRADE_STATUSES:\n if acm.Time().AsDate(trade.TradeTime()) > self.start_date:\n return False\n print '1'\n ins_type = trade.Instrument().InsType()\n if ins_type == 'Curr':\n if trade.ValueDay() > self.start_date:\n return True\n elif ins_type == 'Combination':\n for comb_ins in trade.Instrument().Instruments():\n trades = comb_ins.Trades()\n if trades and trades[0] in VALID_TRADE_STATUSES:\n trade = trades[0]\n ins_type = trade.Instrument().InsType()\n if (self._is_basis_trade(trade) and\n ins_type in ('Swap', 'FRA')):\n return True\n elif ins_type == 'CurrSwap':\n if trade.Instrument().ExpiryDateOnly() > self.start_date:\n return True\n else:\n if trade.Instrument().ExpiryDateOnly() > self.start_date:\n if (self._is_basis_trade(trade) and\n ins_type in ('Swap', 'FRA')):\n return True\n\n return False", "def visualize_transcription(self):\n # # Test if current nwb file contains Survey table\n # if 'behavior' in self.model.nwb.processing:\n # list_surveys = [v for v in self.model.nwb.processing['behavior'].data_interfaces.values()\n # if v.neurodata_type == 'SurveyTable']\n # if len(list_surveys) > 0:\n # ShowSurveyDialog(nwbfile=self.model.nwb)\n ShowTranscriptionDialog(nwbfile=self.model.nwb)\n pass", "def _set_types(self):\n if isinstance(self.elastic_span, elasticapm.traces.Transaction):\n attributes = self.elastic_span.context.get(\"otel_attributes\", {})\n span_kind = self.elastic_span.context[\"otel_spankind\"]\n is_rpc = bool(attributes.get(\"rpc.system\"))\n is_http = bool(attributes.get(\"http.url\")) or bool(attributes.get(\"http.scheme\"))\n is_messaging = bool(attributes.get(\"messaging.system\"))\n if span_kind == SpanKind.SERVER.name and (is_rpc or is_http):\n transaction_type = \"request\"\n elif span_kind == SpanKind.CONSUMER.name and is_messaging:\n transaction_type = \"messaging\"\n else:\n transaction_type = \"unknown\"\n self.elastic_span.transaction_type = transaction_type\n else:\n attributes = self.elastic_span.context.get(\"otel_attributes\", {})\n span_type = None\n span_subtype = None\n resource = None\n\n def http_port_from_scheme(scheme: str):\n if scheme == \"http\":\n return 80\n elif scheme == \"https\":\n return 443\n return None\n\n def parse_net_name(url: str):\n u = urllib.parse.urlparse(url)\n if u.port:\n return u.netloc\n else:\n port = http_port_from_scheme(u.scheme)\n return u.netloc if not port else \"{}:{}\".format(u.netloc, port)\n\n net_port = attributes.get(\"net.peer.port\", -1)\n net_name = net_peer = attributes.get(\"net.peer.name\", attributes.get(\"net.peer.ip\", \"\"))\n\n if net_name and (net_port > 0):\n net_name = f\"{net_name}:{net_port}\"\n\n if attributes.get(\"db.system\"):\n span_type = \"db\"\n span_subtype = attributes.get(\"db.system\")\n resource = net_name or span_subtype\n if attributes.get(\"db.name\"):\n resource = \"{}/{}\".format(resource, attributes.get(\"db.name\"))\n elif attributes.get(\"messaging.system\"):\n span_type = \"messaging\"\n span_subtype = attributes.get(\"messaging.system\")\n if not net_name and attributes.get(\"messaging.url\"):\n net_name = parse_net_name(attributes.get(\"messaging.url\"))\n resource = net_name or span_subtype\n if attributes.get(\"messaging.destination\"):\n resource = \"{}/{}\".format(resource, attributes.get(\"messaging.destination\"))\n elif attributes.get(\"rpc.system\"):\n span_type = \"external\"\n span_subtype = attributes.get(\"rpc.system\")\n resource = net_name or span_subtype\n if attributes.get(\"rpc.service\"):\n resource = \"{}/{}\".format(resource, attributes.get(\"rpc.service\"))\n elif attributes.get(\"http.url\") or attributes.get(\"http.scheme\"):\n span_type = \"external\"\n span_subtype = \"http\"\n http_host = attributes.get(\"http.host\", net_peer)\n if http_host:\n if net_port < 0:\n net_port = http_port_from_scheme(attributes.get(\"http.scheme\"))\n resource = http_host if net_port < 0 else f\"{http_host}:{net_port}\"\n elif attributes.get(\"http.url\"):\n resource = parse_net_name(attributes[\"http.url\"])\n\n if not span_type:\n span_kind = self.elastic_span.context[\"otel_spankind\"]\n if span_kind == SpanKind.INTERNAL.name:\n span_type = \"app\"\n span_subtype = \"internal\"\n else:\n span_type = \"unknown\"\n self.elastic_span.type = span_type\n self.elastic_span.subtype = span_subtype\n if resource:\n if \"destination\" not in self.elastic_span.context:\n self.elastic_span.context[\"destination\"] = {\"service\": {\"resource\": resource}}\n elif \"service\" not in self.elastic_span.context[\"destination\"]:\n self.elastic_span.context[\"destination\"][\"service\"] = {\"resource\": resource}\n else:\n self.elastic_span.context[\"destination\"][\"service\"][\"resource\"] = resource", "def on_save_interpretation_button(self, event):\n if \"specimen_int_uT\" not in self.Data[self.s]['pars']:\n return\n if 'deleted' in self.Data[self.s]['pars']:\n self.Data[self.s]['pars'].pop('deleted')\n self.Data[self.s]['pars']['saved'] = True\n\n # collect all interpretation by sample\n sample = self.Data_hierarchy['specimens'][self.s]\n if sample not in list(self.Data_samples.keys()):\n self.Data_samples[sample] = {}\n if self.s not in list(self.Data_samples[sample].keys()):\n self.Data_samples[sample][self.s] = {}\n self.Data_samples[sample][self.s]['B'] = self.Data[self.s]['pars'][\"specimen_int_uT\"]\n\n # collect all interpretation by site\n # site=thellier_gui_lib.get_site_from_hierarchy(sample,self.Data_hierarchy)\n site = thellier_gui_lib.get_site_from_hierarchy(\n sample, self.Data_hierarchy)\n if site not in list(self.Data_sites.keys()):\n self.Data_sites[site] = {}\n if self.s not in list(self.Data_sites[site].keys()):\n self.Data_sites[site][self.s] = {}\n self.Data_sites[site][self.s]['B'] = self.Data[self.s]['pars'][\"specimen_int_uT\"]\n\n self.draw_sample_mean()\n self.write_sample_box()\n self.close_warning = True", "def test_serialize_sinfo(self):\n self.assert_raises(RuntimeError, self.instance.serialize,\n self.testing_options['objects'][0],\n add_serializer_info=True)", "def test_add_trace(self):\n payload = Payload()\n\n # Add falsey traces\n for val in (False, None, 0, '', [], dict()):\n payload.add_trace(val)\n self.assertEqual(payload.length, 0)\n self.assertTrue(payload.empty)\n\n # Add a single trace to the payload\n trace = [Span(self.tracer, name='root.span'), Span(self.tracer, name='child.span')]\n payload.add_trace(trace)\n\n self.assertEqual(payload.length, 1)\n self.assertFalse(payload.empty)", "def filter_trace_variable_event(self, event):\n return isinstance(event, (TraceAccess, TraceAssign, TraceDelete))", "def process_traces(st,positions):\n spti = {}\n isis = {}\n rav = {}\n\n nspti = {}\n nisis = {}\n nrav = {}\n\n for i,sti in enumerate(st.keys()):\n print('analysing trace nr %i'%i)\n cst = st[sti]\n cp = positions[sti]\n cisi = np.diff(cst)\n if len(cisi) > 0:\n spti[sti], isis[sti], positions[sti] = delete_artifacts(cst,cisi,cp)\n nspti[sti], nisis[sti] = fill_gaps(spti[sti][:-1],np.diff(spti[sti]))\n\n print('smooting traces')\n\n return smooth_traces(nspti,nisis,5), positions", "def _add_stix_object(self, stix_object: _Observable):\n if stix_object.id in self._all_objects:\n if len(stix_object.serialize()) > len(self._all_objects[stix_object.id].serialize()):\n self._all_objects[stix_object.id] = stix_object\n else:\n self._all_objects[stix_object.id] = stix_object", "def _check_duplicate_trans(self):\n transactions_set = set(self._transactions)\n return len(transactions_set) == len(self._transactions)", "def loads_trade(self, trade_id, trade_type, data, operations):\n trade = None\n\n if trade_type == StrategyTrade.TRADE_BUY_SELL:\n trade = StrategyAssetTrade(0)\n elif trade_type == StrategyTrade.TRADE_MARGIN:\n trade = StrategyMarginTrade(0)\n elif trade_type == StrategyTrade.TRADE_POSITION:\n trade = StrategyPositionTrade(0)\n elif trade_type == StrategyTrade.TRADE_IND_MARGIN:\n trade = StrategyIndMarginTrade(0)\n else:\n error_logger.error(\"During loads, usupported trade type %i\" % (trade_type,))\n return\n\n trade.loads(data, self.strategy.service)\n\n # operations\n for op in operations:\n if op['name'] in self.strategy.service.tradeops:\n try: \n operation = self.strategy.service.tradeops[op['name']]()\n operation.loads(op)\n\n if operation.check(trade):\n # append the operation to the trade\n trade.add_operation(operation)\n else:\n error_logger.error(\"During loads, operation checking error %s\" % (op_name,))\n except Exception as e:\n error_logger.error(repr(e))\n else:\n error_logger.error(\"During loads, region checking error %s\" % (r['name'],))\n\n # ignored for now because need to check assets/positions/orders\n # self.add_trade(trade)", "def create_incoming_transaction(transaction: IncomingTransactionCreate, db: Session = Depends(get_db), auth_user: User=Depends(manager)):\n try:\n transaction = transaction_service.create(db, auth_user, transaction)\n return transaction\n except ItensNotFound as err:\n\t raise HTTPException(status_code=404, detail=f\"Os seguintes produtos não foram encontrados no sistema: {str(err)}\")\n except ProductsNotFound as err:\n raise HTTPException(status_code=400, detail=\"A movimentação a ser registrada deve conter no minimo um produto.\")\n except ProviderNotFound as err:\n\t raise HTTPException(status_code=404, detail=f\"O fornecedor informado não foi encontrado: {str(err)}\")\n except InvalidStockQuantity as err:\n products_missing = transaction_service.make_response(db, str(err))\n raise HTTPException(status_code=400, detail={\n \"message\": \"A quantidade informada para os seguintes produtos deve ser maior do que zero.\",\n \"products_missing\": products_missing\n })", "def saveTrans(self):\n modtranDataDir = os.getenv('MODTRAN_DATADIR')\n outputfile = '{0}/{1}_final.plt'.format(\n self.outfilename, self.outfilename)\n outputpath = os.path.join(modtranDataDir, outputfile)\n with open(outputpath, 'w') as transmf:\n transmf.write('$ FINAL ATMOSPHERE TRANSMISSION\\n')\n for val in range(len(self.modtran_wl)):\n data = '\\t'.join('{0:f}'.format(self.transmittance[run][val])\n for run in range(len(self.modtran_wl)))\n line = '{0}\\t{1}\\n'.format(self.modtran_wl[val], data)\n transmf.write(line)", "def save(self):\n self.add_statements(self.triples())", "def configureThisTrace(self):\n for spec in Trace.traceSpec:\n if (spec.compiledRegex.match(self.entityName)):\n self.traceLevel = spec.level\n break\n #endIf\n #endFor", "def _is_transaction_isolation_error(self, error):\n return False", "def is_serious(self, level=ERROR):\n return self.level >= level", "def should_save(event) -> bool:\n schema = event[\"$schemaRef\"]\n if schema == \"https://eddn.edcd.io/schemas/commodity/3\":\n return True\n elif schema == \"https://eddn.edcd.io/schemas/journal/1\":\n return event[\"message\"][\"event\"] == \"Docked\"" ]
[ "0.54398495", "0.5149518", "0.51403934", "0.51361316", "0.5022585", "0.48563206", "0.48534927", "0.48511583", "0.4807828", "0.47867486", "0.46448067", "0.46145433", "0.45834467", "0.4583097", "0.45755658", "0.4567124", "0.45389777", "0.45228583", "0.45225585", "0.45150757", "0.45129964", "0.4472929", "0.44627464", "0.44578195", "0.4440597", "0.44364247", "0.44289523", "0.44144338", "0.4412158", "0.44016916", "0.43960384", "0.43888324", "0.43816006", "0.43731356", "0.4363284", "0.4361652", "0.43608597", "0.43204397", "0.43175736", "0.43163043", "0.43141267", "0.43092197", "0.43065256", "0.43032444", "0.42844808", "0.4249547", "0.42486185", "0.42334342", "0.42308417", "0.42304826", "0.4226509", "0.42261812", "0.42091802", "0.4205113", "0.42026576", "0.41993904", "0.4199355", "0.4192711", "0.4183708", "0.41762114", "0.41716656", "0.41671136", "0.4164237", "0.4164237", "0.41612288", "0.41561615", "0.4154866", "0.41522595", "0.4148224", "0.41455764", "0.4143687", "0.41236743", "0.41112763", "0.4109861", "0.41051835", "0.41039652", "0.41023162", "0.40989587", "0.40866426", "0.4084628", "0.40788648", "0.4075117", "0.407139", "0.40675253", "0.40657637", "0.4063273", "0.4062249", "0.40614104", "0.40573442", "0.405548", "0.40554017", "0.40541264", "0.40516865", "0.40495834", "0.40349567", "0.4034339", "0.4026953", "0.40243247", "0.4020469", "0.40203857" ]
0.64152426
0
Record any apdex and time metrics for the transaction as well as any errors which occurred for the transaction. If the transaction qualifies to become the slow transaction remember it for later.
def record_transaction(self, transaction): if not self.__settings: return settings = self.__settings # Record the apdex, value and time metrics generated from the # transaction. Whether time metrics are reported as distinct # metrics or into a rollup is in part controlled via settings # for minimum number of unique metrics to be reported and thence # whether over a time threshold calculated as percentage of # overall request time, up to a maximum number of unique # metrics. This is intended to limit how many metrics are # reported for each transaction and try and cut down on an # explosion of unique metric names. The limits and thresholds # are applied after the metrics are reverse sorted based on # exclusive times for each metric. This ensures that the metrics # with greatest exclusive time are retained over those with # lesser time. Such metrics get reported into the performance # breakdown tab for specific web transactions. self.record_apdex_metrics(transaction.apdex_metrics(self)) self.merge_custom_metrics(transaction.custom_metrics.metrics()) self.record_time_metrics(transaction.time_metrics(self)) # Capture any errors if error collection is enabled. # Only retain maximum number allowed per harvest. error_collector = settings.error_collector if (error_collector.enabled and settings.collect_errors and len(self.__transaction_errors) < settings.agent_limits.errors_per_harvest): self.__transaction_errors.extend(transaction.error_details()) self.__transaction_errors = self.__transaction_errors[: settings.agent_limits.errors_per_harvest] if (error_collector.capture_events and error_collector.enabled and settings.collect_error_events): events = transaction.error_events(self.__stats_table) for event in events: self._error_events.add(event, priority=transaction.priority) # Capture any sql traces if transaction tracer enabled. if settings.slow_sql.enabled and settings.collect_traces: for node in transaction.slow_sql_nodes(self): self.record_slow_sql_node(node) # Remember as slowest transaction if transaction tracer # is enabled, it is over the threshold and slower than # any existing transaction seen for this period and in # the historical snapshot of slow transactions, plus # recording of transaction trace for this transaction # has not been suppressed. transaction_tracer = settings.transaction_tracer if (not transaction.suppress_transaction_trace and transaction_tracer.enabled and settings.collect_traces): # Transactions saved for Synthetics transactions # do not depend on the transaction threshold. self._update_synthetics_transaction(transaction) threshold = transaction_tracer.transaction_threshold if threshold is None: threshold = transaction.apdex_t * 4 if transaction.duration >= threshold: self._update_slow_transaction(transaction) # Create the transaction event and add it to the # appropriate "bucket." Synthetic requests are saved in one, # while transactions from regular requests are saved in another. if transaction.synthetics_resource_id: event = transaction.transaction_event(self.__stats_table) self._synthetics_events.add(event) elif (settings.collect_analytics_events and settings.transaction_events.enabled): event = transaction.transaction_event(self.__stats_table) self._transaction_events.add(event, priority=transaction.priority) # Merge in custom events if (settings.collect_custom_events and settings.custom_insights_events.enabled): self.custom_events.merge(transaction.custom_events) # Merge in span events if (settings.distributed_tracing.enabled and settings.span_events.enabled and settings.collect_span_events): if settings.infinite_tracing.enabled: for event in transaction.span_protos(settings): self._span_stream.put(event) elif transaction.sampled: for event in transaction.span_events(self.__settings): self._span_events.add(event, priority=transaction.priority)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_slow_transaction(self, transaction):\n\n slowest = 0\n name = transaction.path\n\n if self.__slow_transaction:\n slowest = self.__slow_transaction.duration\n if name in self.__slow_transaction_map:\n slowest = max(self.__slow_transaction_map[name], slowest)\n\n if transaction.duration > slowest:\n # We are going to replace the prior slow transaction.\n # We need to be a bit tricky here. If we are overriding\n # an existing slow transaction for a different name,\n # then we need to restore in the transaction map what\n # the previous slowest duration was for that, or remove\n # it if there wasn't one. This is so we do not incorrectly\n # suppress it given that it was never actually reported\n # as the slowest transaction.\n\n if self.__slow_transaction:\n if self.__slow_transaction.path != name:\n if self.__slow_transaction_old_duration:\n self.__slow_transaction_map[\n self.__slow_transaction.path] = (\n self.__slow_transaction_old_duration)\n else:\n del self.__slow_transaction_map[\n self.__slow_transaction.path]\n\n if name in self.__slow_transaction_map:\n self.__slow_transaction_old_duration = (\n self.__slow_transaction_map[name])\n else:\n self.__slow_transaction_old_duration = None\n\n self.__slow_transaction = transaction\n self.__slow_transaction_map[name] = transaction.duration", "def transaction_trace_data(self, connections):\n\n _logger.debug('Generating transaction trace data.')\n\n if not self.__settings:\n return []\n\n # Create a set 'traces' that is a union of slow transaction,\n # and Synthetics transactions. This ensures we don't send\n # duplicates of a transaction.\n\n traces = set()\n if self.__slow_transaction:\n traces.add(self.__slow_transaction)\n traces.update(self.__synthetics_transactions)\n\n # Return an empty list if no transactions were captured.\n\n if not traces:\n return []\n\n # We want to limit the number of explain plans we do across\n # these. So work out what were the slowest and tag them.\n # Later the explain plan will only be run on those which are\n # tagged.\n\n agent_limits = self.__settings.agent_limits\n explain_plan_limit = agent_limits.sql_explain_plans_per_harvest\n maximum_nodes = agent_limits.transaction_traces_nodes\n\n database_nodes = []\n\n if explain_plan_limit != 0:\n for trace in traces:\n for node in trace.slow_sql:\n # Make sure we clear any flag for explain plans on\n # the nodes in case a transaction trace was merged\n # in from previous harvest period.\n\n node.generate_explain_plan = False\n\n # Node should be excluded if not for an operation\n # that we can't do an explain plan on. Also should\n # not be one which would not be included in the\n # transaction trace because limit was reached.\n\n if (node.node_count < maximum_nodes and\n node.connect_params and node.statement.operation in\n node.statement.database.explain_stmts):\n database_nodes.append(node)\n\n database_nodes = sorted(database_nodes,\n key=lambda x: x.duration)[-explain_plan_limit:]\n\n for node in database_nodes:\n node.generate_explain_plan = True\n\n else:\n for trace in traces:\n for node in trace.slow_sql:\n node.generate_explain_plan = True\n database_nodes.append(node)\n\n # Now generate the transaction traces. We need to cap the\n # number of nodes capture to the specified limit.\n\n trace_data = []\n\n for trace in traces:\n transaction_trace = trace.transaction_trace(\n self, maximum_nodes, connections)\n\n data = [transaction_trace,\n list(trace.string_table.values())]\n\n if self.__settings.debug.log_transaction_trace_payload:\n _logger.debug('Encoding slow transaction data where '\n 'payload=%r.', data)\n\n json_data = json_encode(data)\n\n level = self.__settings.agent_limits.data_compression_level\n level = level or zlib.Z_DEFAULT_COMPRESSION\n\n zlib_data = zlib.compress(six.b(json_data), level)\n\n pack_data = base64.standard_b64encode(zlib_data)\n\n if six.PY3:\n pack_data = pack_data.decode('Latin-1')\n\n root = transaction_trace.root\n\n if trace.record_tt:\n force_persist = True\n else:\n force_persist = False\n\n if trace.include_transaction_trace_request_uri:\n request_uri = trace.request_uri\n else:\n request_uri = None\n\n trace_data.append([transaction_trace.start_time,\n root.end_time - root.start_time,\n trace.path,\n request_uri,\n pack_data,\n trace.guid,\n None,\n force_persist,\n None,\n trace.synthetics_resource_id, ])\n\n return trace_data", "def recordTransaction(self, loop, transaction):\n\n a = {}\n a['time'] = transaction.transactionTime\n a['atm'] = transaction.transactionATM.atmID\n a['transaction'] = transaction.transactionType\n a['cash'] = transaction.transactionATM.atmCash\n a['status'] = transaction.transactionStatus\n self._atmDict[loop] = a\n\n c = {}\n c['time'] = transaction.transactionTime\n c['client'] = transaction.transactionCard.cardAccount.accountClient.clientID\n c['account'] = transaction.transactionCard.cardAccount.accountNumber\n c['transaction'] = transaction.transactionType\n c['balance'] = transaction.transactionCard.cardAccount.accountBalance\n c['status'] = transaction.transactionStatus\n self._clientDict[loop] = c\n\n t = {}\n t['time'] = transaction.transactionTime\n t['transaction'] = transaction.transactionType\n t['amount'] = transaction.transactionAmount\n t['status'] = transaction.transactionStatus\n self._transactionDict[loop] = t", "def trackTrans(self):\n self.data_struct['_trackTrans'] = True", "def slow_transaction_data(self):\n\n # XXX This method no longer appears to be used. Being replaced\n # by the transaction_trace_data() method.\n\n if not self.__settings:\n return []\n\n if not self.__slow_transaction:\n return []\n\n maximum = self.__settings.agent_limits.transaction_traces_nodes\n\n transaction_trace = self.__slow_transaction.transaction_trace(\n self, maximum)\n\n data = [transaction_trace,\n list(self.__slow_transaction.string_table.values())]\n\n if self.__settings.debug.log_transaction_trace_payload:\n _logger.debug('Encoding slow transaction data where '\n 'payload=%r.', data)\n\n json_data = json_encode(data)\n\n level = self.__settings.agent_limits.data_compression_level\n level = level or zlib.Z_DEFAULT_COMPRESSION\n\n zlib_data = zlib.compress(six.b(json_data), level)\n\n pack_data = base64.standard_b64encode(zlib_data)\n\n if six.PY3:\n pack_data = pack_data.decode('Latin-1')\n\n root = transaction_trace.root\n\n trace_data = [[root.start_time,\n root.end_time - root.start_time,\n self.__slow_transaction.path,\n self.__slow_transaction.request_uri,\n pack_data]]\n\n return trace_data", "def transaction_time(self, transaction_time):\n\n self._transaction_time = transaction_time", "def _update_time_delivered(self, time_delivered):\n # Update db record's time_delivered field\n update = {'time_delivered': time_delivered}\n datebase.update_transaction_record(filter=self.filter, update=update)\n \n # Update db record's estimated_time field\n datebase.update_transaction_record(filter=self.filter, {estimated_time:'0'})\n \n # Update db record's transaction status to delivered\n self._update_transaction_status(transaction_status='delivered')\n \t\t self.transaction_info.update(delivery_status='delivered')\n \n # Update object\n \t\tself.transaction_info.update(time_delivered=time_delivered)\n self.transaction_info.update(estimated_time=0)\n self.transaction_info(transaction_status='delivered')\n\n \tdef _update_transaction_status(self, transaction_status, photo=None):\n \"\"\"\n Update record's transaction_status and send sms msg to update seeker\n \"\"\"\n # Send text message when status changes \n self.send_text(message_type=transaction_status)\n\n # Update db record's transaction status\n update = {'transaction_status': transaction_status}\n datebase.update_transaction_record(filter=self.filter, update=update)\n\n # Update object\n self.transaction_info.update('transaction_seeker': transaction_status)\n\n # If delivered ... TODO: do we actually want to remove from db? \n \t\t# if transaction_status == 'delivered':\n # datebase.delete_transaction_record()\n # return 1 \n # arguments against: we wont be able to access delivered photo if we want to do that", "def record_apdex_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for metric in metrics:\n self.record_apdex_metric(metric)", "def transaction_failed_before_processing(self):", "def _post_record(\n self, ret_record_args, error, cost, start_time, end_time, record\n ):\n\n ret_record_args['main_error'] = str(error)\n ret_record_args['calls'] = record\n ret_record_args['cost'] = cost\n ret_record_args['perf'] = Perf(start_time=start_time, end_time=end_time)\n ret_record_args['app_id'] = self.app_id\n ret_record_args['tags'] = self.tags\n\n ret_record = Record(**ret_record_args)\n\n if error is not None:\n if self.feedback_mode == FeedbackMode.WITH_APP:\n self._handle_error(record=ret_record, error=error)\n\n elif self.feedback_mode in [FeedbackMode.DEFERRED,\n FeedbackMode.WITH_APP_THREAD]:\n TP().runlater(\n self._handle_error, record=ret_record, error=error\n )\n\n raise error\n\n if self.feedback_mode == FeedbackMode.WITH_APP:\n self._handle_record(record=ret_record)\n\n elif self.feedback_mode in [FeedbackMode.DEFERRED,\n FeedbackMode.WITH_APP_THREAD]:\n TP().runlater(self._handle_record, record=ret_record)\n\n return ret_record", "def record_slow_sql_node(self, node):\n\n if not self.__settings:\n return\n\n key = node.identifier\n stats = self.__sql_stats_table.get(key)\n if stats is None:\n # Only record slow SQL if not already over the limit on\n # how many can be collected in the harvest period.\n\n settings = self.__settings\n maximum = settings.agent_limits.slow_sql_data\n if len(self.__sql_stats_table) < maximum:\n stats = SlowSqlStats()\n self.__sql_stats_table[key] = stats\n\n if stats:\n stats.merge_slow_sql_node(node)\n\n return key", "def transaction_failed(self):", "async def test_all_transactions(self):\n response = await self.collect(get_request_text=self.GATLING_LOG)\n self.assert_measurement(response, value=\"2\")", "def process_transaction(self, transaction):\n instrument = transaction.instrument\n if isinstance(instrument, Future):\n try:\n old_price = self._payout_last_sale_prices[instrument]\n except KeyError:\n self._payout_last_sale_prices[instrument] = transaction.price\n else:\n position = self.position_tracker.positions[instrument]\n amount = position.amount\n price = transaction.price\n\n self._cash_flow(\n self._calculate_payout(\n instrument.multiplier,\n amount,\n old_price,\n price,\n ),\n )\n\n if amount + transaction.amount == 0:\n del self._payout_last_sale_prices[instrument]\n else:\n self._payout_last_sale_prices[instrument] = price\n else:\n self._cash_flow(-(transaction.price * transaction.amount))\n\n self.position_tracker.execute_transaction(transaction)\n\n # we only ever want the dict form from now on\n transaction_dict = transaction.to_dict()\n try:\n self._processed_transactions[transaction.dt].append(\n transaction_dict,\n )\n except KeyError:\n self._processed_transactions[transaction.dt] = [transaction_dict]", "def _log_update_time(self, *_):\n import time\n if not hasattr(self, '_time'):\n setattr(self, '_time', time.time())\n _time = time.time()\n debug('Time since last call: {:.6f}s'.format(_time - getattr(self, '_time')))\n setattr(self, '_time', _time)", "def _record_current_time(self):\n now = time.time()\n delta = now - self._last_time\n self._last_time = now\n self._timing_recorder.append(delta)", "def record_transaction(self, transaction: Transaction) -> bool:\n if self._locked:\n print('Failed to record transaction! Your account has been locked!'\n )\n return False\n\n if transaction.amount > self.bank_balance:\n print('Failed to record transaction! Not enough balance!')\n return False\n\n budget = self.budget_manager.get_budget(transaction.budget_category)\n if budget.locked:\n print('Failed to record transaction! This budget has been locked!')\n return False\n\n self.transactions.append(transaction)\n self.bank_balance -= transaction.amount\n budget.amount_spent += transaction.amount\n self._warn_and_lock_if_needed(transaction)\n return True", "def update_isolation(self, time: int):", "def audit(self):\n self.ping()", "def Analyze(self):\n \n self._analyzeLogs()\n for user in self._start_times:\n self._result[user] = self._zipTimes(user)", "def slow_update_duration(self):\n for i in range(len(self.data_file.sorted_data)):\n if self.data_file.sorted_data[i]['type'] == 'slow':\n slow_upd = self.data_file.sorted_data[i]['timestamp']\n Config.ANALYSIS.write(f\"slow at: {slow_upd}\\n\")\n if i == 0:\n after_slow = self.data_file.sorted_data[i + 1]['timestamp']\n Config.ANALYSIS.write(f\"after slow: ({slow_upd}, {after_slow}) \"\n f\"= {after_slow - slow_upd}\\n\\n\")\n elif i == len(self.data_file.sorted_data) - 1:\n before_slow = self.data_file.sorted_data[i - 1]['timestamp']\n Config.ANALYSIS.write(f\"before slow: ({before_slow}, {slow_upd}) \"\n f\"= {slow_upd - before_slow}\\n\\n\")\n else:\n before_slow = self.data_file.sorted_data[i - 1]['timestamp']\n after_slow = self.data_file.sorted_data[i + 1]['timestamp']\n Config.ANALYSIS.write(f\"before slow: ({before_slow}, {slow_upd}) \"\n f\"= {slow_upd - before_slow}\\n\")\n Config.ANALYSIS.write(f\"after slow: ({slow_upd}, {after_slow}) \"\n f\"= {after_slow - slow_upd}\\n\\n\")\n Config.ANALYSIS.write(\"\\n\\n\")", "def output_stats(self):\n elapsed = self.timer.elapsed.total_seconds()\n count = self.copied + self.errored\n total = self.total\n # Time per key in milliseconds\n avg = round(elapsed / count * 1000, 3)\n # Time remaining in seconds\n remaining = 1.0 * elapsed / count * (total - count)\n # Time remaining in minutes\n remaining = round(remaining / 60.0, 1)\n # Time taken in minutes\n elapsed = round(elapsed / 60.0, 1)\n\n self.log.info(f\"{self.prefix}: {avg}ms avg, {elapsed}min passed, \"\n f\"{remaining}min remaining. ({count:,}/{total:,})\")", "def test_avg_database_time(self):\n time = timeit(\n '''post(\"http://127.0.0.1:5000/database?format=json&merge=0\",\\\n data='{\"RUR\": 1.0, \"EUR\": 2.0, \"USD\": 3.0}')''',\n number=1000,\n globals=globals())\n print(time/1000, end=' ', flush=True)", "def monitor_transactions(account):\n start_time = datetime.datetime.now()\n logger.info(\n format_log_message(\n 'Looking for new ripple transactions since last run'\n )\n )\n ledger_min_index = _get_min_ledger_index(account)\n marker = None\n has_results = True\n\n try:\n timeout = settings.RIPPLE_TIMEOUT\n except AttributeError:\n timeout = 5\n\n while has_results:\n try:\n response = account_tx(account,\n ledger_min_index,\n limit=PROCESS_TRANSACTIONS_LIMIT,\n marker=marker,\n timeout=timeout)\n except (RippleApiError, ConnectionError), e:\n logger.error(format_log_message(e))\n break\n\n transactions = response['transactions']\n marker = response.get('marker')\n has_results = bool(marker)\n\n for transaction in transactions:\n _store_transaction(account, transaction)\n\n transactions_timeout_reached = (\n datetime.datetime.now() - start_time >= datetime.timedelta(\n seconds=PROCESS_TRANSACTIONS_TIMEOUT\n )\n )\n\n if transactions_timeout_reached and has_results:\n has_results = False\n logger.error(\n 'Process_transactions command terminated because '\n '(%s seconds) timeout: %s',\n PROCESS_TRANSACTIONS_TIMEOUT, unicode(marker)\n )", "def record_transaction(self) -> None:\n Menu.prompt_record_transaction()\n tx_data = Transaction.prompt_record_tx()\n new_tx = Transaction.generate_new_tx(tx_data)\n\n # Convert the user budget category int input to the enum\n budget_category_int = new_tx.budget_category\n budget_category = BudgetManager.category_mapping[budget_category_int]\n\n # Retrieve the budget object using the enum as the key\n budget = self.user.budget_manager.budget_dict[budget_category]\n\n # Validate the transaction before proceeding\n validated_tx, error_msg = self.validate_transaction_record(new_tx,\n budget)\n if not validated_tx:\n print(\"\\n[red]Warning:[/red] Unable to record transaction!\")\n print(error_msg)\n print(f\"{self.user.account}\\n\")\n print(budget)\n return\n\n # User has successfully recorded a transaction\n budget.add_amount_spent(new_tx.tx_amount)\n self.user.account.add_amount_spent(new_tx.tx_amount)\n self.user.tx_manager.add_transaction(new_tx)\n self.user.update_lock_status()\n print(\"\\nSuccessfully recorded the following transaction:\")\n print(new_tx)\n print(\"\\nTransaction has been recorded under the following budget \"\n \"category:\")\n print(budget)\n\n self.user.check_and_issue_user_warnings(budget)", "def log_and_dispatch(self, state_manager, state_change):\n state_change_id = self.raiden.transaction_log.log(state_change)\n events = self.dispatch(state_manager, state_change)\n self.raiden.transaction_log.log_events(\n state_change_id,\n events,\n self.raiden.get_block_number()\n )", "def log_results(self, filename=None):\n\n self.ad_log['train_auc'] = self.diag['train']['auc'][-1]\n self.ad_log['train_accuracy'] = self.diag['train']['acc'][-1]\n self.ad_log['train_time'] = self.train_time\n\n self.ad_log['test_auc'] = self.diag['test']['auc'][-1]\n self.ad_log['test_accuracy'] = self.diag['test']['acc'][-1]\n self.ad_log['test_time'] = self.test_time\n\n self.ad_log.save_to_file(filename=filename)", "def record_time_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for metric in metrics:\n self.record_time_metric(metric)", "def record_trace(self):\n\n tfname = str(int(time.time())) + \".obd2_reader.trace\"\n self.tf_out = open(tfname, 'a')\n self.RecordTrace = 1\n print \"Recoding trace to:\", tfname", "def _log_progress(self, t):\n\n # Run the update only 2 step before the actual logging happens in order to\n # make sure that the most recent possible values will be stored in\n # self.summary. This is a hacky workaround in order to support OffPolicyAgent\n # which runs 2 threads without coordination\n if (t+2) % self.log_freq == 0 and self.learn_started:\n episode_rewards = self.env_monitor.get_episode_rewards()\n self.episode_rewards = np.asarray(episode_rewards)\n if self.episode_rewards.size > 0:\n self.mean_ep_rew = np.mean(episode_rewards[-self.stats_n:])\n self.best_mean_ep_rew = max(self.best_mean_ep_rew, self.mean_ep_rew)\n\n if t % self.log_freq == 0 and self.learn_started:\n stats_logger.info(\"\")\n for s, lambda_v in self.log_info:\n stats_logger.info(s.format(lambda_v(t)))\n stats_logger.info(\"\")\n\n if self.summary:\n # Log with TensorBoard\n self.tb_writer.add_summary(self.summary, global_step=t)", "def _get_recordTtl(self):\n return self.__recordTtl", "def get_post_stats(self):\n stats = self.stats\n stats.results = self.job.result().get_counts(stats.iteration)\n stats.datetime = str(datetime.now())", "def _trace(self):\n self.__aceQLHttpApi.trace()", "def report(self):\n self.last_contacted = time.time()", "def report():\n _get_cached_tracker().report()", "def flush_transaction(self):\n self.flush_split()\n if not self.transaction_cancelled:\n if self.transaction_info.account is None and self.account_info and self.account_info.name:\n self.transaction_info.account = self.account_info.name\n if self.transaction_info.is_valid():\n self.transaction_infos.append(self.transaction_info)\n self.transaction_cancelled = False\n self.transaction_info = TransactionInfo()", "async def test_transaction_specific_response_time_target(self):\n self.set_source_parameter(\"transaction_specific_target_response_times\", [\"[Bb]ar:150\"])\n response = await self.collect(get_request_json_return_value=self.GATLING_JSON)\n self.assert_measurement(response, value=\"1\", entities=self.expected_entities[:1])", "def _record_result(self, action, data, tags=None):\r\n if tags is None:\r\n tags = []\r\n\r\n tags.append(u'result:{}'.format(data.get('success', False)))\r\n tags.append(u'action:{}'.format(action))\r\n dog_stats_api.increment(self._metric_name('request.count'), tags=tags)", "async def update_trade_stats(self):\n\n summary_keys = [base for base in config['min_base_volumes']] + ['global']\n summaries = {\n key: {\n 'open_count': 0,\n 'buys': 0,\n 'rebuys': 0,\n 'sells': 0,\n 'collect_sells': 0,\n 'soft_stop_sells': 0,\n 'total_profit': 0.0,\n 'total_loss': 0.0,\n 'total_fees': 0.0,\n 'balancer_refills': 0,\n 'balancer_remits': 0,\n 'balancer_stop_losses': 0,\n 'balancer_profit': 0.0,\n 'balancer_loss': 0.0,\n 'balancer_fees': 0.0,\n } for key in summary_keys\n }\n\n for pair in self.trades:\n if pair not in self.trade_stats[self.time_prefix]:\n continue\n\n base = pair.split('-', 1)[0]\n open_count = len(self.trades[pair]['open'])\n\n summaries[base]['open_count'] += open_count\n summaries[base]['buys'] += self.trade_stats[self.time_prefix][pair]['buys']\n summaries[base]['rebuys'] += self.trade_stats[self.time_prefix][pair]['rebuys']\n summaries[base]['sells'] += self.trade_stats[self.time_prefix][pair]['sells']\n summaries[base]['collect_sells'] += self.trade_stats[self.time_prefix][pair]['collect_sells']\n summaries[base]['soft_stop_sells'] += self.trade_stats[self.time_prefix][pair]['soft_stop_sells']\n summaries[base]['total_profit'] += self.trade_stats[self.time_prefix][pair]['total_profit']\n summaries[base]['total_loss'] += self.trade_stats[self.time_prefix][pair]['total_loss']\n summaries[base]['total_fees'] += self.trade_stats[self.time_prefix][pair]['total_fees']\n summaries[base]['balancer_refills'] += self.trade_stats[self.time_prefix][pair]['balancer_refills']\n summaries[base]['balancer_remits'] += self.trade_stats[self.time_prefix][pair]['balancer_remits']\n summaries[base]['balancer_profit'] += self.trade_stats[self.time_prefix][pair]['balancer_profit']\n summaries[base]['balancer_loss'] += self.trade_stats[self.time_prefix][pair]['balancer_loss']\n summaries[base]['balancer_fees'] += self.trade_stats[self.time_prefix][pair]['balancer_fees']\n\n summaries['global']['open_count'] += open_count\n summaries['global']['buys'] += self.trade_stats[self.time_prefix][pair]['buys']\n summaries['global']['rebuys'] += self.trade_stats[self.time_prefix][pair]['rebuys']\n summaries['global']['sells'] += self.trade_stats[self.time_prefix][pair]['sells']\n summaries['global']['collect_sells'] += self.trade_stats[self.time_prefix][pair]['collect_sells']\n summaries['global']['soft_stop_sells'] += self.trade_stats[self.time_prefix][pair]['soft_stop_sells']\n summaries['global']['total_profit'] += self.trade_stats[self.time_prefix][pair]['total_profit']\n summaries['global']['total_loss'] += self.trade_stats[self.time_prefix][pair]['total_loss']\n summaries['global']['total_fees'] += self.trade_stats[self.time_prefix][pair]['total_fees']\n summaries['global']['balancer_refills'] += self.trade_stats[self.time_prefix][pair]['balancer_refills']\n summaries['global']['balancer_remits'] += self.trade_stats[self.time_prefix][pair]['balancer_remits']\n summaries['global']['balancer_profit'] += self.trade_stats[self.time_prefix][pair]['balancer_profit']\n summaries['global']['balancer_loss'] += self.trade_stats[self.time_prefix][pair]['balancer_loss']\n summaries['global']['balancer_fees'] += self.trade_stats[self.time_prefix][pair]['balancer_fees']\n\n for key in summaries:\n self.trade_stats[self.time_prefix][key]['buys'] = summaries[key]['buys']\n self.trade_stats[self.time_prefix][key]['rebuys'] = summaries[key]['rebuys']\n self.trade_stats[self.time_prefix][key]['sells'] = summaries[key]['sells']\n self.trade_stats[self.time_prefix][key]['collect_sells'] = summaries[key]['collect_sells']\n self.trade_stats[self.time_prefix][key]['soft_stop_sells'] = summaries[key]['soft_stop_sells']\n self.trade_stats[self.time_prefix][key]['total_profit'] = summaries[key]['total_profit']\n self.trade_stats[self.time_prefix][key]['total_loss'] = summaries[key]['total_loss']\n self.trade_stats[self.time_prefix][key]['total_fees'] = summaries[key]['total_fees']\n self.trade_stats[self.time_prefix][key]['balancer_refills'] = summaries[key]['balancer_refills']\n self.trade_stats[self.time_prefix][key]['balancer_remits'] = summaries[key]['balancer_remits']\n self.trade_stats[self.time_prefix][key]['balancer_profit'] = summaries[key]['balancer_profit']\n self.trade_stats[self.time_prefix][key]['balancer_loss'] = summaries[key]['balancer_loss']\n self.trade_stats[self.time_prefix][key]['balancer_fees'] = summaries[key]['balancer_fees']\n\n if summaries[key]['open_count'] > self.trade_stats[self.time_prefix][key]['most_open']:\n self.trade_stats[self.time_prefix][key]['most_open'] = summaries[key]['open_count']\n\n filter_items = [pair for pair in self.trades] + [base for base in config['min_base_volumes']] + ['global']\n self.save_attr('trade_stats', max_depth=2, filter_items=filter_items, filter_keys=[self.time_prefix])", "def set_cache_data(self) -> None:\n if isinstance(self.tx_storage, TransactionCacheStorage):\n hits = self.tx_storage.stats.get(\"hit\")\n misses = self.tx_storage.stats.get(\"miss\")\n if hits:\n self.transaction_cache_hits = hits\n if misses:\n self.transaction_cache_misses = misses", "def report_total_usage(self):\n work_time = 0\n if self.type == 'normal':\n work_time = self.fwk.fwk_global_time - self.start_exec_time\n elif self.type == 'sandia_work':\n self.total_time += self.fwk.fwk_global_time - self.start_exec_time\n self.total_usage = self.total_time * self.nproc\n if self.state == \"running\":\n # update total work done\n self.sim.completed_work += self.fwk.fwk_global_time - self.start_exec_time\n elif self.state == \"failed\":\n # add this work to the work to be redone\n self.sim.rework_todo += self.fwk.fwk_global_time - self.start_exec_time\n self.state = \"not_ready\"\n self.num_faults += 1\n elif self.type == 'sandia_rework':\n self.total_rework_time += self.fwk.fwk_global_time - self.start_exec_time\n self.total_rework_usage = self.total_rework_time * self.nproc\n if self.state == \"running\":\n # update total work done\n self.sim.next_ckpt = self.sim.ckpt_interval - (self.fwk.fwk_global_time - self.start_exec_time)\n self.sim.rework_todo -= self.fwk.fwk_global_time - self.start_exec_time\n elif self.state == \"failed\":\n # add this work to the work to be redone\n self.state = \"not_ready\"\n self.num_faults += 1\n elif self.type == 'sandia_ckpt':\n self.total_ckpt_time += self.fwk.fwk_global_time - self.start_exec_time\n self.total_ckpt_usage = self.total_ckpt_time * self.nproc\n if self.state == \"running\":\n # update last ckpt\n self.sim.last_ckpt = self.sim.completed_work\n elif self.state == \"failed\":\n # add work to rework\n self.sim.rework_todo += self.sim.next_ckpt\n self.state = \"not_ready\"\n self.num_faults += 1\n elif self.type == 'sandia_restart':\n print(\"time spent in rework\", self.fwk.fwk_global_time - self.start_exec_time)\n self.total_restart_time += self.fwk.fwk_global_time - self.start_exec_time\n self.total_restart_usage = self.total_restart_time * self.nproc\n #if self.state == \"running\":\n # nothing to do?\n # pass\n if self.state == \"failed\":\n # gotta try again\n self.state = \"ready\"\n self.num_faults += 1\n else:\n print(\"problems updating state in report_total_usage\")\n raise\n if self.type == 'normal':\n if self.sim.state == 'rework':\n self.total_rework_time += work_time\n self.total_rework_usage = self.total_rework_time * self.nproc\n else: # sim.state == 'work'\n if self.retry:\n self.total_rework_time += work_time\n self.total_rework_usage = self.total_rework_time * self.nproc\n else:\n self.total_time += work_time\n self.total_usage = self.total_time * self.nproc", "def record(self, time, increment):\n raise NotImplementedError(\"Abstract method not implemented.\")", "def track_duration(self):\n # raise NotImplementedError\n self.out_schema.append(\"run_duration\")\n self._track_duration = True\n # self.runner = GridExecutor.timer(self.runner)", "def emit_metrics(self):\n parse_time = time.perf_counter() - self._parsing_start_time\n Stats.gauge(\"dag_processing.total_parse_time\", parse_time)\n Stats.gauge(\"dagbag_size\", sum(stat.num_dags for stat in self._file_stats.values()))\n Stats.gauge(\n \"dag_processing.import_errors\", sum(stat.import_errors for stat in self._file_stats.values())\n )", "def market_snap(self, timestamp = -1):\n # refresh account states\n self.trading_acc1.sync_account_with_exh()\n self.trading_acc2.sync_account_with_exh()\n\n signal__arbitrage_delta = self.get_signal__arbitrage_delta()\n signal__gdax_has_usd = self.get_signal__gdax_has_usd()\n signal__gdax_has_eth = self.get_signal__gdax_has_eth()\n signal__cex_has_eth = self.get_signal__cex_has_eth()\n\n def mk_audit_js():\n gdax_account = self.trading_acc1.get_account()\n cex_account = self.trading_acc2.get_account()\n transaction_t = epoch.current_milli_time() if timestamp == -1 else timestamp\n\n audit_js = OrderedDict()\n audit_js['strategy_run_id'] = self.run_id\n audit_js['timestamp'] = epoch.to_str(transaction_t)\n audit_js['timestamp__long'] = transaction_t\n audit_js['ticker'] = self.ticker\n audit_js['strategy_info'] = self._strategy_info\n\n audit_js['signal'] = OrderedDict()\n audit_js['signal']['signal__gdax_has_usd'] = signal__gdax_has_usd\n audit_js['signal']['signal__gdax_has_eth'] = signal__gdax_has_eth\n audit_js['signal']['signal__cex_has_eth'] = signal__cex_has_eth\n audit_js['signal']['signal__arbitrage_delta'] = signal__arbitrage_delta\n\n audit_js['total_usd__num'] = gdax_account.js['usd__num'] + cex_account.js['usd__num']\n audit_js['total_eth__num'] = gdax_account.js['eth__num'] + cex_account.js['eth__num']\n audit_js['gdax_account'] = gdax_account.js\n audit_js['cex_account'] = cex_account.js\n return audit_js\n\n snap_again = False # Only repeat if we have an gdax buy action\n if signal__gdax_has_usd['signal'] and signal__arbitrage_delta['signal']:\n exec_context = self.exec_gdax_buy(timestamp)\n snap_again = True\n\n # Audit\n audit_js = mk_audit_js()\n audit_js['action'] = exec_context\n audit = AuditTradeModel.build(audit_js)\n logger.info('-----Executed GDAX Buy-----')\n logger.info(audit)\n logger.info('---------------------------')\n audit.db_save(es)\n\n if signal__gdax_has_eth['signal']:\n exec_context = self.exec_eth_transfer()\n\n # Audit\n audit_js = mk_audit_js()\n audit_js['action'] = exec_context\n audit = AuditTradeModel.build(audit_js)\n logger.info('-----Executed ETH TRANSFER-----')\n logger.info(audit)\n logger.info('---------------------------')\n audit.db_save(es)\n\n if signal__cex_has_eth['signal']:\n exec_context = self.exec_cex_sell(timestamp)\n\n # Audit\n audit_js = mk_audit_js()\n audit_js['action'] = exec_context\n audit = AuditTradeModel.build(audit_js)\n logger.info('-----Executed CEX Sell-----')\n logger.info(audit)\n logger.info('---------------------------')\n audit.db_save(es)\n\n # Extra logging\n audit_js = mk_audit_js()\n logger.info('post-snapping states: \\n' + json.dumps(audit_js, indent=2))\n\n return snap_again", "def report(self, trade, is_entry):\n pass", "def store_info(self):\r\n _debug('Protocol: store_info' ) \r\n \r\n #Times\r\n if self.measure_type == '3PL':\r\n self.t_probe_p_s .append(self.t_probe) \r\n self.t_probe_m_s .append(self.t_probe) \r\n if self.measure_type == '4PL':\r\n self.t_probe_p_s .append(self.tp) \r\n self.t_probe_m_s .append(self.tm) \r\n \r\n self.t_pulseSequences_s.append(self.t_pulseSequences)\r\n self.t_process_s .append(self.t_process)\r\n #Total, accumulated, times\r\n self.t_tot_pulseSequences_s.append(self.t_tot_pulseSequences) \r\n self.t_tot_process_s .append(self.t_tot_process) \r\n #Rates\r\n self.Gp_guess_s .append(self.Gp_guess) #Mean of gamma+ \r\n self.Gm_guess_s .append(self.Gm_guess) #Mean of gamma- \r\n self.eGp_guess_s .append(self.eGp_guess) #Uncertainty of gamma+\r\n self.eGm_guess_s .append(self.eGm_guess) #Uncertainty of gamma- \r\n self.cov_GpGm_s .append(self.cov_GpGm) #Covariance of gamma- & gamma- \r\n #Other\r\n self.nb_iteration_s.append(self.iter)\r\n self.R_tot_s .append(self.R_tot)", "def _store_transaction(account, transaction):\n tr_tx = transaction['tx']\n meta = transaction.get('meta', {})\n\n if meta.get('TransactionResult') != 'tesSUCCESS':\n return\n\n amount = meta.get('delivered_amount') or tr_tx.get('Amount', {})\n\n is_unprocessed = (\n tr_tx['TransactionType'] == 'Payment' and\n tr_tx['Destination'] == account and\n isinstance(amount, dict) and\n not Transaction.objects.filter(hash=tr_tx['hash'])\n )\n if is_unprocessed:\n logger.info(\n format_log_message(\n 'Saving transaction: %s', transaction\n )\n )\n\n transaction_object = Transaction.objects.create(\n account=tr_tx['Account'],\n hash=tr_tx['hash'],\n destination=account,\n ledger_index=tr_tx['ledger_index'],\n destination_tag=tr_tx.get('DestinationTag'),\n source_tag=tr_tx.get('SourceTag'),\n status=Transaction.RECEIVED,\n currency=amount['currency'],\n issuer=amount['issuer'],\n value=amount['value']\n )\n\n logger.info(\n format_log_message(\n \"Transaction saved: %s\", transaction_object\n )\n )", "def tpc_finish(self, transaction):\n raise NotImplementedError", "def time_metrics(self, stats, root, parent):\n\n product = self.product\n operation = self.operation or 'other'\n target = self.target\n\n # Determine the scoped metric\n\n statement_metric_name = 'Datastore/statement/%s/%s/%s' % (product,\n target, operation)\n\n operation_metric_name = 'Datastore/operation/%s/%s' % (product,\n operation)\n\n if target:\n scoped_metric_name = statement_metric_name\n else:\n scoped_metric_name = operation_metric_name\n\n yield TimeMetric(name=scoped_metric_name, scope=root.path,\n duration=self.duration, exclusive=self.exclusive)\n\n # Unscoped rollup metrics\n\n yield TimeMetric(name='Datastore/all', scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n yield TimeMetric(name='Datastore/%s/all' % product, scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n if root.type == 'WebTransaction':\n yield TimeMetric(name='Datastore/allWeb', scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n yield TimeMetric(name='Datastore/%s/allWeb' % product, scope='',\n duration=self.duration, exclusive=self.exclusive)\n else:\n yield TimeMetric(name='Datastore/allOther', scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n yield TimeMetric(name='Datastore/%s/allOther' % product, scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n # Unscoped operation metric\n\n yield TimeMetric(name=operation_metric_name, scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n # Unscoped statement metric\n\n if target:\n yield TimeMetric(name=statement_metric_name, scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n # Unscoped instance Metric\n\n if self.instance_hostname and self.port_path_or_id:\n\n instance_metric_name = 'Datastore/instance/%s/%s/%s' % (product,\n self.instance_hostname, self.port_path_or_id)\n\n yield TimeMetric(name=instance_metric_name, scope='',\n duration=self.duration, exclusive=self.exclusive)", "def performance_history(self, request, pk=None, **kwargs):\n # Get the goal even though we don't need it (we could just use the pk)\n # so we can ensure we have permission to do so.\n goal = self.get_object()\n\n # - Get all the transaction with this goal involved that are of reason 'Execution'.\n # We want the volume, ticker id, date ordered by date. [(date, {ticker: vol}, ...]\n qs = Transaction.objects.filter(Q(to_goal=goal) | Q(from_goal=goal),\n reason=Transaction.REASON_EXECUTION).order_by('executed')\n txs = qs.values_list('execution_distribution__execution__executed',\n 'execution_distribution__execution__asset__id',\n 'execution_distribution__volume')\n ts = []\n entry = (None,)\n aids = set()\n # If there were no transactions, there can be no performance\n if len(txs) == 0:\n return Response([])\n\n # Because executions are stored with timezone, but other things are just as date, we need to make datetimes\n # naive before doing date arithmetic on them.\n bd = timezone.make_naive(txs[0][0]).date()\n ed = timezone.make_naive(timezone.now()).date()\n for tx in txs:\n aids.add(tx[1])\n txd = timezone.make_naive(tx[0]).date()\n if txd == entry[0]:\n entry[1][tx[1]] += tx[2]\n else:\n if entry[0] is not None:\n ts.append(entry)\n entry = (txd, defaultdict(int))\n entry[1][tx[1]] = tx[2]\n ts.append(entry)\n\n # - Get the time-series of prices for each instrument from the first transaction date until now.\n # Fill empty dates with previous value [(date, {ticker: price}, ...]\n pqs = DailyPrice.objects.filter(date__range=(bd, ed),\n instrument_content_type=ContentType.objects.get_for_model(Ticker).id,\n instrument_object_id__in=aids)\n prices = pqs.to_timeseries(fieldnames=['price', 'date', 'instrument_object_id'],\n index='date',\n storage='long',\n pivot_columns='instrument_object_id',\n values='price')\n # Remove negative prices and fill missing values\n # We replace negs with None so they are interpolated.\n prices[prices <= 0] = None\n prices = prices.reindex(pd.date_range(bd, ed), method='ffill').fillna(method='bfill')\n\n # For each day, calculate the performance\n piter = prices.itertuples()\n res = []\n # Process the first day - it's special\n row = next(piter)\n p_m1 = row[1:]\n vols_m1 = [0] * len(prices.columns)\n tidlocs = {tid: ix for ix, tid in enumerate(prices.columns)}\n for tid, vd in ts.pop(0)[1].items():\n vols_m1[tidlocs[tid]] += vd\n res.append((dt2ed(row[0]), 0)) # First day has no performance as there wasn't a move\n # Process the rest\n for row in piter:\n # row[0] (a datetime) is a naive timestamp, so we don't need to convert it\n if ts and row[0].date() == ts[0][0]:\n vols = vols_m1.copy()\n dtrans = ts.pop(0)[1] # The transactions for the current processed day.\n for tid, vd in dtrans.items():\n vols[tidlocs[tid]] += vd\n # The exposed assets for the day. These are the assets we know for sure were exposed for the move.\n pvol = list(map(min, vols, vols_m1))\n else:\n vols = vols_m1\n pvol = vols\n pdelta = list(map(operator.sub, row[1:], p_m1)) # The change in price from yesterday\n impact = sum(map(operator.mul, pvol, pdelta)) # The total portfolio impact due to price moves for exposed assets.\n b_m1 = sum(map(operator.mul, pvol, p_m1)) # The total portfolio value yesterday for the exposed assets.\n perf = 0 if b_m1 == 0 else impact / b_m1\n # row[0] (a datetime) is a naive timestamp, so we don't need to convert it\n res.append((dt2ed(row[0]), decimal.Decimal.from_float(perf).quantize(decimal.Decimal('1.000000'))))\n p_m1 = row[1:]\n vols_m1 = vols[:]\n\n return Response(res)", "def __preprocess_transactions(self):\n\n p_bar = tqdm(range(14), desc=\"Preprocessing transactions\", leave=False)\n\n try:\n # 0. If optional fields not in the transactions add missing\n optional_fields = [\n \"Sector\",\n \"Industry\",\n \"Country\",\n \"Region\",\n \"Fees\",\n \"Premium\",\n \"ISIN\",\n ]\n if not set(optional_fields).issubset(set(self.__transactions.columns)):\n for field in optional_fields:\n if field not in self.__transactions.columns:\n self.__transactions[field] = np.nan\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 1. Convert Date to datetime\n self.__transactions[\"Date\"] = pd.to_datetime(self.__transactions[\"Date\"])\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 2. Sort transactions by date\n self.__transactions = self.__transactions.sort_values(by=\"Date\")\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 3. Capitalize Ticker and Type [of instrument...]\n self.__transactions[\"Ticker\"] = self.__transactions[\"Ticker\"].map(\n lambda x: x.upper() if isinstance(x, str) else x\n )\n self.__transactions[\"Type\"] = self.__transactions[\"Type\"].map(\n lambda x: x.upper() if isinstance(x, str) else x\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 4. Translate side: [\"deposit\", \"buy\"] -> 1 and [\"withdrawal\", \"sell\"] -> -1\n self.__transactions[\"Signal\"] = self.__transactions[\"Side\"].map(\n lambda x: 1\n if x.lower() in [\"deposit\", \"buy\"]\n else (-1 if x.lower() in [\"withdrawal\", \"sell\"] else 0)\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 5. Convert quantity to signed integer\n self.__transactions[\"Quantity\"] = (\n abs(self.__transactions[\"Quantity\"]) * self.__transactions[\"Signal\"]\n )\n\n # Adjust quantity and price for splits\n for ticker in self.__transactions[\"Ticker\"].unique():\n try:\n splits_df = get_splits(ticker)\n if not splits_df.empty:\n splits_df = splits_df.tz_localize(tz=None)\n for split_date in splits_df.index:\n self.__transactions[\"Quantity\"] = np.where(\n (self.__transactions[\"Ticker\"] == ticker)\n & (self.__transactions[\"Date\"] < split_date),\n self.__transactions[\"Quantity\"]\n * splits_df.loc[split_date].values,\n self.__transactions[\"Quantity\"],\n )\n self.__transactions[\"Price\"] = np.where(\n (self.__transactions[\"Ticker\"] == ticker)\n & (self.__transactions[\"Date\"] < split_date),\n self.__transactions[\"Price\"]\n / splits_df.loc[split_date].values,\n self.__transactions[\"Price\"],\n )\n\n except Exception:\n console.print(\"\\nCould not get splits adjusted\")\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 6. Determining the investment/divestment value\n self.__transactions[\"Investment\"] = (\n self.__transactions[\"Quantity\"] * self.__transactions[\"Price\"]\n + self.__transactions[\"Fees\"]\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 7. Reformat crypto tickers to yfinance format (e.g. BTC -> BTC-USD)\n crypto_trades = self.__transactions[self.__transactions.Type == \"CRYPTO\"]\n self.__transactions.loc[\n (self.__transactions.Type == \"CRYPTO\"), \"Ticker\"\n ] = [\n f\"{crypto}-{currency}\"\n for crypto, currency in zip(\n crypto_trades.Ticker, crypto_trades.Currency\n )\n ]\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 8. Reformat STOCK/ETF tickers to yfinance format if ISIN provided.\n\n # If isin not valid ticker is empty\n self.__transactions[\"yf_Ticker\"] = self.__transactions[\"ISIN\"].apply(\n lambda x: yf.utils.get_ticker_by_isin(x) if not pd.isna(x) else np.nan\n )\n\n empty_tickers = list(\n self.__transactions[\n (self.__transactions[\"yf_Ticker\"] == \"\")\n | (self.__transactions[\"yf_Ticker\"].isna())\n ][\"Ticker\"].unique()\n )\n\n # If ticker from isin is empty it is not valid in yfinance, so check if user provided ticker is supported\n removed_tickers = []\n for item in empty_tickers:\n with suppress_stdout():\n # Suppress yfinance failed download message if occurs\n valid_ticker = not (\n yf.download(\n item,\n start=datetime.datetime.now() + datetime.timedelta(days=-5),\n progress=False,\n ).empty\n )\n if valid_ticker:\n # Invalid ISIN but valid ticker\n self.__transactions.loc[\n self.__transactions[\"Ticker\"] == item, \"yf_Ticker\"\n ] = np.nan\n else:\n self.__transactions.loc[\n self.__transactions[\"Ticker\"] == item, \"yf_Ticker\"\n ] = \"\"\n removed_tickers.append(item)\n\n # Merge reformatted tickers into Ticker\n self.__transactions[\"Ticker\"] = self.__transactions[\"yf_Ticker\"].fillna(\n self.__transactions[\"Ticker\"]\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 9. Remove unsupported ISINs that came out empty\n self.__transactions.drop(\n self.__transactions[self.__transactions[\"Ticker\"] == \"\"].index,\n inplace=True,\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 10. Create tickers dictionary with structure {'Type': [Ticker]}\n unsupported_type = self.__transactions[\n (~self.__transactions[\"Type\"].isin([\"STOCK\", \"ETF\", \"CRYPTO\"]))\n ].index\n if unsupported_type.any():\n self.__transactions.drop(unsupported_type, inplace=True)\n console.print(\n \"[red]Unsupported transaction type detected and removed. Supported types: stock, etf or crypto.[/red]\"\n )\n\n for ticker_type in set(self.__transactions[\"Type\"]):\n self.tickers[ticker_type] = list(\n set(\n self.__transactions[\n self.__transactions[\"Type\"].isin([ticker_type])\n ][\"Ticker\"]\n )\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 11. Create list with tickers except cash\n self.tickers_list = list(set(self.__transactions[\"Ticker\"]))\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 12. Save transactions inception date\n self.inception_date = self.__transactions[\"Date\"].iloc[0]\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 13. Populate fields Sector, Industry and Country\n if (\n self.__transactions.loc[\n self.__transactions[\"Type\"] == \"STOCK\",\n optional_fields,\n ]\n .isnull()\n .values.any()\n ):\n # If any fields is empty for stocks (overwrites any info there)\n self.__load_company_data()\n\n p_bar.n += 1\n p_bar.refresh()\n\n # Warn user of removed ISINs\n if removed_tickers:\n p_bar.disable = True\n console.print(\n f\"\\n[red]The following tickers are not supported and were removed: {removed_tickers}.\"\n f\"\\nManually edit the 'Ticker' field with the proper Yahoo Finance suffix or provide a valid ISIN.\"\n f\"\\nSuffix info on 'Yahoo Finance market coverage':\"\n \" https://help.yahoo.com/kb/exchanges-data-providers-yahoo-finance-sln2310.html\"\n f\"\\nE.g. IWDA -> IWDA.AS[/red]\\n\"\n )\n except Exception:\n console.print(\"\\nCould not preprocess transactions.\")\n raise", "def track_transaction(self, transaction, items):\n trans = Transaction()\n trans.order_id = transaction.get('order_id', None)\n trans.total = transaction.get('total', None)\n trans.tax = transaction.get('tax', None)\n trans.affiliation = transaction.get('affiliation', None)\n trans.shipping = transaction.get('shipping', None)\n trans.city = transaction.get('city', None)\n trans.state = transaction.get('state', None)\n trans.country = transaction.get('country', None)\n\n for item in items:\n gitem = gaItem()\n gitem.sku = item.get('sku', None)\n gitem.name = item.get('name', None)\n gitem.variation = item.get('variation', None)\n gitem.price = item.get('price', None)\n gitem.quantity = item.get('quantity', 1)\n trans.add_item(gitem)\n\n self.ga_tracker.track_transaction(transaction=trans,session=self.ga_session,visitor=self.ga_visitor)", "def internal_event (self):\n self.clock_time += 1\n self.log()", "def _analyze(self):\r\n if self.value is None or self.value == self.previous:\r\n pass\r\n elif self._operation == \"add\":\r\n self._additions = self.value\r\n elif self._operation == \"remove\":\r\n self._removals = self.value\r\n elif self.previous is None:\r\n self._assignments = self.value\r\n else:\r\n # partial update time\r\n self._additions = (self.value - self.previous) or None\r\n self._removals = (self.previous - self.value) or None\r\n self._analyzed = True", "def track_tick_processing_performance(self, duration):\n\n self.parse_count += 1\n self.total_parse_time += duration\n self.mean_parse_time = self.total_parse_time / self.parse_count", "def start_transaction(self) -> None:\n pass", "def log_all(self):\n self.save_raw()\n self.log()", "def test_peformance(self):\n timedeltas = []\n for file in os.listdir(settings.ANALYSIS_REPORT_FOLDER):\n _file = open(os.path.join(settings.ANALYSIS_REPORT_FOLDER, file), \"r\")\n report = json.loads(_file.read())\n timedeltas.append(\n parse_datetime(report['finish']) - parse_datetime(report['start']))\n _file.close()\n\n # number of queue\n print('NUMBER OF QUEUE = {}'.format(len(timedeltas)))\n\n # get average time\n average_timedelta = sum(timedeltas, datetime.timedelta(0)) / len(timedeltas)\n print('AVERAGE = {}'.format(average_timedelta))\n self.assertTrue(average_timedelta < datetime.timedelta(minutes=3))\n\n # get total process time\n total = timedeltas[0]\n for delta in timedeltas[:1]:\n total += delta\n print('TOTAL = {}'.format(total))\n self.assertTrue(total < datetime.timedelta(minutes=3 * len(timedeltas)))", "def test_timeout_processing(self):\n # setup\n self.transaction_behaviour.processing_time = None\n\n # operation\n self.transaction_behaviour._timeout_processing()\n\n # after\n self.assert_quantity_in_outbox(0)", "def _save_internal_transactions(self, blocks_traces):\n docs = [\n self._preprocess_internal_transaction(transaction)\n for transaction in blocks_traces\n if transaction[\"transactionHash\"]\n ]\n if docs:\n for chunk in bulk_chunks(docs, None, BYTES_PER_CHUNK):\n self.client.bulk_index(docs=chunk, index=self.indices[\"internal_transaction\"], doc_type=\"itx\",\n id_field=\"hash\", refresh=True)", "def test_log_last_completed_datetime(self):\n initial_count = CostUsageReportStatus.objects.count()\n saver = ReportStatsDBAccessor(\"myreport\", self.manifest_id)\n saver.log_last_completed_datetime()\n self.assertIsNotNone(saver.get_last_completed_datetime())\n saver.delete()\n self.assertEqual(CostUsageReportStatus.objects.count(), initial_count)", "def record_failure(self, now=None) -> None:\n logging.info('Recording failure at %r', now or int(time.time()))\n self.failure_timestamp = now or int(time.time())\n self.put()", "def transaction(fn):\n @wraps(fn)\n def transaction_inner(*args, **kwargs): #1\n start = time()\n stmp_id = id_gen()\n session = operation.session\n sessionid = id(session)\n \n # set distributed transaction id to 0 for single transaction\n try:\n operation.id\n except: \n operation.id = str(uuid4())\n \n try:\n # get runtime info\n cp = current_process()\n ct = current_thread() \n \n # format request params\n params = []\n for item in args:\n params.append(unicode(item))\n for k,v in kwargs.iteritems():\n params.append(u\"'%s':'%s'\" % (k, v))\n \n # call internal function\n res = fn(*args, **kwargs)\n \n session.commit()\n elapsed = round(time() - start, 4)\n logger.debug(u'%s.%s - %s - transaction - %s - %s - OK - %s' % (\n operation.id, stmp_id, sessionid, fn.__name__, \n params, elapsed))\n \n return res\n except ModelError as ex:\n elapsed = round(time() - start, 4)\n logger.error(u'%s.%s - %s - transaction - %s - %s - KO - %s' % (\n operation.id, stmp_id, sessionid, fn.__name__, \n params, elapsed))\n if ex.code not in [409]:\n #logger.error(ex.desc, exc_info=1)\n logger.error(ex.desc)\n \n session.rollback()\n raise TransactionError(ex.desc, code=ex.code)\n except IntegrityError as ex:\n elapsed = round(time() - start, 4)\n logger.error(u'%s.%s - %s - transaction - %s - %s - KO - %s' % (\n operation.id, stmp_id, sessionid, fn.__name__, \n params, elapsed))\n logger.error(ex.orig, exc_info=1)\n logger.error(ex.orig)\n\n session.rollback()\n raise TransactionError(ex.orig)\n except DBAPIError as ex:\n elapsed = round(time() - start, 4)\n logger.error(u'%s.%s - %s - transaction - %s - %s - KO - %s' % (\n operation.id, stmp_id, sessionid, fn.__name__, \n params, elapsed))\n #logger.error(ex.orig, exc_info=1)\n logger.error(ex.orig)\n \n session.rollback()\n raise TransactionError(ex.orig)\n \n except Exception as ex:\n elapsed = round(time() - start, 4)\n logger.error(u'%s.%s - %s - transaction - %s - %s - KO - %s' % (\n operation.id, stmp_id, sessionid, fn.__name__, \n params, elapsed))\n #logger.error(ex, exc_info=1)\n logger.error(ex)\n \n session.rollback()\n raise TransactionError(ex)\n\n return transaction_inner", "def report_store(analysis_store, helpers, timestamp_yesterday):\n case = analysis_store.get_cases()[0]\n helpers.add_analysis(\n analysis_store, case, pipeline=Pipeline.MIP_DNA, started_at=timestamp_yesterday\n )\n helpers.add_analysis(analysis_store, case, pipeline=Pipeline.MIP_DNA, started_at=datetime.now())\n # Mock sample dates to calculate processing times\n for family_sample in analysis_store.get_case_samples_by_case_id(\n case_internal_id=case.internal_id\n ):\n family_sample.sample.ordered_at = timestamp_yesterday - timedelta(days=2)\n family_sample.sample.received_at = timestamp_yesterday - timedelta(days=1)\n family_sample.sample.prepared_at = timestamp_yesterday\n family_sample.sample.sequenced_at = timestamp_yesterday\n family_sample.sample.delivered_at = datetime.now()\n return analysis_store", "def transaction(self, context: InjectionContext = None) -> \"ProfileSession\":", "def progress(transactionId, function):\n _log.info(f\"{transactionId.hex} PROGRESS {function}\")", "def log_metrics_for_packet(self, packet: \"Packet\") -> None:\n client_timestamp = packet.client_timestamp\n router_incoming_timestamp = packet.router_incoming_timestamp\n router_outgoing_timestamp = packet.router_outgoing_timestamp\n server_timestamp = packet.server_timestamp\n response_timestamp = time.time()\n if router_outgoing_timestamp is None:\n router_outgoing_timestamp = server_timestamp\n if router_incoming_timestamp is None:\n router_incoming_timestamp = router_outgoing_timestamp\n if client_timestamp is None:\n client_timestamp = router_incoming_timestamp\n client_to_router = max(0, router_incoming_timestamp - client_timestamp)\n router_processing = max(0, router_outgoing_timestamp - router_incoming_timestamp)\n router_to_server = max(0, server_timestamp - router_outgoing_timestamp)\n server_processing = max(0, response_timestamp - server_timestamp)\n e2e_time = max(0, response_timestamp - client_timestamp)\n E2E_PACKET_LATENCY.labels(packet_type=packet.type, stage=\"client_to_router\").observe(\n client_to_router\n )\n E2E_PACKET_LATENCY.labels(packet_type=packet.type, stage=\"router_processing\").observe(\n router_processing\n )\n E2E_PACKET_LATENCY.labels(packet_type=packet.type, stage=\"router_to_server\").observe(\n router_to_server\n )\n E2E_PACKET_LATENCY.labels(packet_type=packet.type, stage=\"server_processing\").observe(\n server_processing\n )\n E2E_PACKET_LATENCY.labels(packet_type=packet.type, stage=\"e2e_time\").observe(e2e_time)", "def internal_event(self):\n # log activity\n self.log_activity(LogEntry(\n sys_time=time(),\n logical_time=self.logical_clock,\n action=\"work\"\n ))", "def report_performance(self):\n performance = self.amygdala.visualize(self.timestep, \n self.name, \n self.log_dir)\n print('Final performance is {0:.3}'.format(performance))\n self.backup()\n return performance", "async def test_ignore_transaction(self):\n self.set_source_parameter(\"transactions_to_ignore\", [self.API2])\n response = await self.collect(get_request_json_return_value=self.JMETER_JSON)\n self.assert_measurement(response, value=\"123\", entities=[])", "def rest_api_log(self):\n with self.resource_lock:\n pass", "def report_trial(self):\n pass", "def monitor_usage(records_output_csv, total_time, delay_time):\n total_time *= 60.0 # convert to seconds\n get_usage(total_time, delay_time, records_output_csv)", "def _log_performance_metrics(\n self, task: \"tasks.ClassyTask\", local_variables: Dict[str, Any]\n ) -> None:\n phase_type = task.phase_type\n batches = len(task.losses)\n\n if self.start_time is None:\n logging.warning(\"start_time not initialized\")\n else:\n # Average batch time calculation\n total_batch_time = time.time() - self.start_time\n average_batch_time = total_batch_time / batches\n logging.info(\n \"Average %s batch time (ms) for %d batches: %d\"\n % (phase_type, batches, 1000.0 * average_batch_time)\n )\n\n # Train step time breakdown\n if local_variables.get(\"perf_stats\") is None:\n logging.warning('\"perf_stats\" not set in local_variables')\n elif task.train:\n logging.info(\n \"Train step time breakdown (rank {}):\\n{}\".format(\n get_rank(), local_variables[\"perf_stats\"].report_str()\n )\n )", "async def _record_logs(self, report):\n\t\tif report.action == Frame.Report.PARSE:\n\t\t\t# Collects the tests parsing log for further writing to Test_Parser.log\n\t\t\tif report.success:\n\t\t\t\tself._parse_logs[\"success\"] += [report.log]\n\t\t\telse:\n\t\t\t\tself._parse_logs[\"failure\"] += [report.log]\n\t\telif report.action == Frame.Report.EXECUTE:\n\t\t\t# Writes a test log and dump to the results directory\n\t\t\ttest_log = (\"EXECUTE STATUS: SUCCESS\\n\\n\" if report.success else \"EXECUTE STATUS: FAILURE\\n\\n\") + report.log\n\t\t\tfor task in as_completed([self._event_loop.run_in_executor(self._thread_executor, FileSystem.dump_to, \n\t\t\t\t self._result_directory_name + \"/Log/\" + report.test_name + \".log\", test_log)]):\n\t\t\t\tawait task\n\t\t\tfor task in as_completed([self._event_loop.run_in_executor(self._thread_executor, TestLogger._write_test_dump, \n\t\t\t\t self._result_directory_name + \"/Dump/\" + report.test_name + \".pcap\", report.dump)]):\n\t\t\t\tawait task", "def perform_patient_audit(self):\n \n # Continuous audit\n while True:\n \n # General patient audit (put audit results in dictionary and add to audit DataFrame)\n audit = dict()\n audit['day'] = self._env.now\n audit['negative'] = len(self._pop.negative_patients)\n audit['positive'] = len(self._pop.positive_patients)\n audit['recovered'] = len(self._pop.recovered_patients)\n audit['inpatient'] = len(self._pop.inpatients)\n audit['died'] = len(self._pop.died_patients)\n audit['total'] = len(self._pop.patients)\n audit['unallocated'] = len(self._pop.unallocated_patients)\n # Add dictionary to existing DataFrame\n self.patient_audit = \\\n self.patient_audit.append(audit, ignore_index=True)\n \n # Displaced patient audit (put audit results in dictionary and add to audit DataFrame)\n audit = dict()\n # Get displaced times\n additional_time = []\n for patient in self._pop.displaced_patients:\n additional_time.append(patient.displaced_additional_time)\n audit['day'] = self._env.now\n audit['number'] = len(self._pop.displaced_patients)\n if len(additional_time) > 0:\n # Dispalced patients exist, calculate statistics\n audit['add_time_min'] = np.min(additional_time)\n audit['add_time_1Q'] = np.quantile(additional_time, 0.25)\n audit['add_time_median'] = np.quantile(additional_time, 0.50)\n audit['add_time_3Q'] = np.quantile(additional_time, 0.75)\n audit['add_time_max'] = np.max(additional_time)\n audit['add_time_total'] = np.sum(additional_time)\n else:\n # No displaced patients exist, set all statistics to zero\n audit['add_time_min'] = 0\n audit['add_time_1Q'] = 0\n audit['add_time_median'] = 0\n audit['add_time_3Q'] = 0\n audit['add_time_max'] = 0\n audit['add_time_total'] = 0\n # Add dictionary to existing DataFrame\n self.displaced_audit = \\\n self.displaced_audit.append(audit, ignore_index=True)\n \n # Trigger next audit after interval\n yield self._env.timeout(self._params.audit_interval)", "def _merge_report(self, target, new):\r\n time = None\r\n if 'ts' in new['parsed']:\r\n time = new['parsed']['ts']\r\n\r\n if (target.get('lastSeenDate', None) and\r\n time and\r\n target['lastSeenDate'] < time):\r\n target['lastSeenDate'] = time\r\n\r\n query_millis = int(new['parsed']['stats']['millis'])\r\n target['stats']['totalTimeMillis'] += query_millis\r\n target['stats']['count'] += 1\r\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']", "def _preprocess_internal_transaction(self, transaction):\n transaction = transaction.copy()\n for field in [\"action\", \"result\"]:\n if (field in transaction.keys()) and (transaction[field]):\n transaction.update(transaction[field])\n del transaction[field]\n for field in [\"value\", \"gasPrice\", \"gasUsed\"]:\n if (field in transaction.keys()) and (transaction[field]):\n value_string = transaction[field][0:2] + \"0\" + transaction[field][2:]\n transaction[field] = int(value_string, 0) / 1e18\n if \"gasUsed\" in transaction:\n transaction[\"gasUsed\"] = int(transaction[\"gasUsed\"] * 1e18)\n return transaction", "def _assess(self):\n # get eruptions\n with open(os.sep.join(getfile(currentframe()).split(os.sep)[:-2]+['data','eruptive_periods.txt']),'r') as fp:\n self.tes = [datetimeify(ln.rstrip()) for ln in fp.readlines()]\n # check if data file exists\n self.exists = os.path.isfile(self.file)\n if not self.exists:\n t0 = datetime(2011,1,1)\n t1 = datetime(2011,1,2)\n self.update(t0,t1)\n # check date of latest data in file\n self.df = pd.read_csv(self.file, index_col=0, parse_dates=[0,], infer_datetime_format=True)\n self.ti = self.df.index[0]\n self.tf = self.df.index[-1]", "def record_time_metric(self, metric):\n\n if not self.__settings:\n return\n\n # Scope is forced to be empty string if None as\n # scope of None is reserved for apdex metrics.\n\n key = (metric.name, metric.scope or '')\n stats = self.__stats_table.get(key)\n if stats is None:\n stats = TimeStats(call_count=1,\n total_call_time=metric.duration,\n total_exclusive_call_time=metric.exclusive,\n min_call_time=metric.duration,\n max_call_time=metric.duration,\n sum_of_squares=metric.duration ** 2)\n self.__stats_table[key] = stats\n else:\n stats.merge_time_metric(metric)\n\n return key", "def CallLoggedAndAccounted(f):\n\n @functools.wraps(f)\n def Decorator(*args, **kwargs):\n try:\n start_time = time.time()\n result = f(*args, **kwargs)\n latency = time.time() - start_time\n\n DB_REQUEST_LATENCY.RecordEvent(latency, fields=[f.__name__])\n logging.debug(\"DB request %s SUCCESS (%.3fs)\", f.__name__, latency)\n\n return result\n except db.Error as e:\n DB_REQUEST_ERRORS.Increment(fields=[f.__name__, \"grr\"])\n logging.debug(\"DB request %s GRR ERROR: %s\", f.__name__,\n utils.SmartUnicode(e))\n raise\n except Exception as e:\n DB_REQUEST_ERRORS.Increment(fields=[f.__name__, \"db\"])\n logging.debug(\"DB request %s INTERNAL DB ERROR : %s\", f.__name__,\n utils.SmartUnicode(e))\n raise\n\n return Decorator", "def trace(self, trace=...):\n ...", "def __init__(self):\n self._profiling_mode = False\n self._total_time_ms = 0.0\n self._traced_records = []\n self._statistical_results = {}", "def _merge_report(self, target, new):\n time = None\n if 'ts' in new['parsed']:\n time = new['parsed']['ts']\n\n if (target.get('lastSeenDate', None) and\n time and\n target['lastSeenDate'] < time):\n target['lastSeenDate'] = time\n\n query_millis = int(new['parsed']['stats']['millis'])\n target['stats']['totalTimeMillis'] += query_millis\n target['stats']['count'] += 1\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']", "def emit(self, record):\n self.eng.dispose()\n trys = 3\n info = {key: value for key, value in record.__dict__.items() if not key.startswith(\"__\")}\n for t in range(trys):\n # Having what i think is an issue to reflect so try a couple times and don't complain if breaks\n failed = False\n try:\n with self.eng.connect() as con:\n # formating values to convert from python or parsl to db standards\n info['task_fail_history'] = str(info.get('task_fail_history', None))\n info['timestamp'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(record.created))\n run_id = info['run_id']\n\n # if workflow or task has completed, update their entries with the time.\n if 'time_completed' in info.keys() and info['time_completed'] != 'None':\n workflows = self.meta.tables['workflows']\n up = workflows.update().values(time_completed=info['time_completed']).where(workflows.c.run_id == run_id)\n con.execute(up)\n return\n if 'task_time_returned' in info.keys() and info['task_time_returned'] is not None:\n workflow = self.meta.tables['task']\n up = workflow.update().values(task_time_returned=info['task_time_returned']).where(workflow.c.task_id == info['task_id'])\\\n .where(workflow.c.run_id == run_id)\n con.execute(up)\n\n # create workflows table if this is a new database without one\n if 'workflows' not in self.meta.tables.keys():\n workflows = create_workflows_table(self.meta)\n self.meta.create_all(con)\n # if this is the first sight of the workflow, add it to the workflows table\n if len(con.execute(self.meta.tables['workflows'].select(self.meta.tables['workflows'].c.run_id == run_id)).fetchall()) == 0:\n workflows = self.meta.tables['workflows']\n ins = workflows.insert().values(**{k: v for k, v in info.items() if k in workflows.c})\n con.execute(ins)\n\n # if log has task counts, update the workflow entry in the workflows table\n if 'tasks_completed_count' in info.keys():\n workflows = self.meta.tables['workflows']\n up = workflows.update().values(tasks_completed_count=info['tasks_completed_count']).where(workflows.c.run_id == run_id)\n con.execute(up)\n if 'tasks_failed_count' in info.keys():\n workflows = self.meta.tables['workflows']\n up = workflows.update().values(tasks_failed_count=info['tasks_failed_count']).where(workflows.c.run_id == run_id)\n con.execute(up)\n\n # create task table if this is a new run without one\n if 'task' not in self.meta.tables.keys():\n workflow = create_task_table(self.meta)\n self.meta.create_all(con)\n\n # check to make sure it is a task log and not just a workflow overview log\n if info.get('task_id', None) is not None:\n # if this is the first sight of the task in the workflow, add it to the workflow table\n if len(con.execute(self.meta.tables['task'].select(self.meta.tables['task'].c.task_id == info['task_id'])\n .where(self.meta.tables['task'].c.run_id == run_id)).fetchall()) == 0:\n if 'psutil_process_pid' in info.keys():\n # this is the race condition that a resource log is before a status log so ignore this resource update\n return\n\n workflow = self.meta.tables['task']\n ins = workflow.insert().values(**{k: v for k, v in info.items() if k in workflow.c})\n con.execute(ins)\n\n if 'task_status' in info.keys():\n # if this is the first sight of a task, create a task_status_table to hold this task's updates\n if 'task_status' not in self.meta.tables.keys():\n task_status_table = create_task_status_table(self.meta)\n self.meta.create_all(con)\n con.execute(task_status_table.insert().values(**{k: v for k, v in info.items() if k in task_status_table.c}))\n # if this status table already exists, just insert the update\n else:\n task_status_table = self.meta.tables['task_status']\n con.execute(task_status_table.insert().values(**{k: v for k, v in info.items() if k in task_status_table.c}))\n return\n\n if 'psutil_process_pid' in info.keys():\n # if this is a task resource update then handle that, if the resource table DNE then create it\n if 'task_resources' not in self.meta.tables.keys():\n task_resource_table = create_task_resource_table(self.meta)\n self.meta.create_all(con)\n con.execute(task_resource_table.insert().values(**{k: v for k, v in info.items() if k in task_resource_table.c}))\n # if this resource table already exists, just insert the update\n else:\n task_resource_table = self.meta.tables['task_resources']\n con.execute(task_resource_table.insert().values(**{k: v for k, v in info.items() if k in task_resource_table.c}))\n return\n\n except Exception as e:\n logger.error(\"Try a couple times since some known issues can occur. Number of Failures: {} Error: {}\".format(t, str(e)))\n failed = True\n time.sleep(5)\n if not failed:\n return", "def test_upsert_metadata_for_rate_plan(self):\n pass", "def do_timestamp_messages(self, messages):\n timestamp = self.env.now\n self.reception_records[timestamp] = messages\n log.debug(\"{} recorded {}\".format(self, self.reception_records))", "def get_gdax_historical_data():\n \n start = None\n while not start:\n start,end,tid = getStartAndEndHistoric()\n if not start:\n time.sleep(60)\n #Todo: change this to 1min\n firsttimestamp = start\n engine = sa.create_engine(sql_address)\n products = [\"LTC-USD\",\"LTC-BTC\",\"ETH-USD\",\"ETH-BTC\",\"BTC-USD\"]\n public_client = gdax.PublicClient()\n deltat = datetime.timedelta(seconds = 200)\n timewindows = []\n while end - start > datetime.timedelta(seconds=0):\n if start + deltat > end:\n endx = end\n else:\n endx = start + deltat\n timewindows.append([start,endx])\n start += deltat\n results = []\n total = len(timewindows)\n current_idx = 0\n timeold = time.time()\n numofqueries = 0\n engine = sa.create_engine(sql_address)\n Base.metadata.bind = engine\n DBSession = sa.orm.sessionmaker()\n DBSession.bind = engine\n session = DBSession()\n for startx,endx in timewindows:\n\n current_idx += 1\n for i in products:\n repeat = True\n while repeat:\n\n #delay if ratelimts are close\n if numofqueries < 3:\n while time.time() - timeold < 1:\n time.sleep(0.05)\n \n timeold = time.time()\n numofqueries = 0\n try:\n alist = public_client.get_product_historic_rates(i, start = startx, end = endx, granularity = 1)\n except:\n time.sleep(30)\n public_client = gdax.PublicClient()\n alist = public_client.get_product_historic_rates(i, start = startx, end = endx, granularity = 1)\n\n alist = public_client.get_product_historic_rates(i, start = startx, end = endx, granularity = 1)\n\n numofqueries += 1\n\n #rate limit exceeded has 'message' as dict.\n if not 'message' in alist:\n repeat = False\n for a in alist:\n a[0] = datetime.datetime.fromtimestamp(float(a[0]))\n tmp = i.split('-')\n d = dict(coin = tmp[0], currency = tmp[1], timestamp = a[0], low=a[1], high=a[2], open=a[3], close=a[4], volume=a[5])\n results.append(d)\n lasttimestamp = a[0]\n\n #upload with batch size of 10000\n if len(results) > 10000:\n engine.execute(\n GADXHistoricalDataOneSecondOHLC.__table__.insert(),\n results\n )\n results = []\n \n update = session.query(historicalDataProgramState).filter(sa.and_(historicalDataProgramState.transaction_id == tid,historicalDataProgramState.entry_type == 'update')).first()\n if update:\n update.end = lasttimestamp\n session.commit()\n else:\n new_update = historicalDataProgramState(entry_type = 'update',transaction_id = tid,start=firsttimestamp,end=lasttimestamp,platform='GDAX',status='incomplete')\n session.add(new_update)\n session.commit()\n if len(results) > 0:\n engine.execute(\n GADXHistoricalDataOneSecondOHLC.__table__.insert(),\n results\n )\n results = []\n \n update = session.query(historicalDataProgramState).filter(sa.and_(historicalDataProgramState.transaction_id == tid,historicalDataProgramState.entry_type == 'update')).first()\n if update:\n update.end = lasttimestamp\n session.commit()\n else:\n new_update = historicalDataProgramState(entry_type = 'update',transaction_id = tid,start=firsttimestamp,end=lasttimestamp,platform='GDAX',status='incomplete')\n session.add(new_update)\n session.commit()\n\n update = session.query(historicalDataProgramState).filter(sa.and_(historicalDataProgramState.transaction_id == tid,historicalDataProgramState.entry_type == 'update')).first()\n update.status='complete'\n order = session.query(historicalDataProgramState).filter(sa.and_(historicalDataProgramState.transaction_id == tid,historicalDataProgramState.entry_type == 'order')).first()\n order.status='complete'\n session.commit()", "def test_log_last_started_datetime(self):\n initial_count = CostUsageReportStatus.objects.count()\n saver = ReportStatsDBAccessor(\"myreport\", self.manifest_id)\n saver.log_last_started_datetime()\n self.assertIsNotNone(saver.get_last_started_datetime())\n saver.delete()\n self.assertEqual(CostUsageReportStatus.objects.count(), initial_count)", "def commit_transaction(self) -> None:\n pass", "def exec_eth_transfer(self, timestamp=-1):\n ticker = self.ticker\n gdax_trading_account = self.trading_acc1\n cex_trading_account = self.trading_acc2\n gdax_eth_amount = gdax_trading_account.get_balance(ticker)\n cex_deposit_address = cex_trading_account.get_crypto_deposit_address(ticker)\n\n # transfer\n gdax_trading_account.transfer_crypto(ticker, cex_deposit_address, gdax_eth_amount)\n\n # audit info\n audit_info = {\n 'ticker': ticker,\n 'action_type': 'exec_eth_transfer',\n 'gdax_eth_amount': gdax_eth_amount,\n 'cex_deposit_address': cex_deposit_address\n }\n return audit_info", "def record_inference_stats(self, nms_step_time: float, inference_round_trip_time: Tuple[float, float, float], inference_step_time: float):\n\n # inference_round_trip_time is an average time needed for a step\n self.inference_times.append(inference_round_trip_time)\n # inference_step_time is the time taken to complete the step, and used to calculate the throughput\n inference_throughput = self.image_count/inference_step_time\n self.inference_throughputs.append(inference_throughput)\n\n self.nms_times.append(nms_step_time)\n\n total_step_time = inference_step_time + nms_step_time\n self.total_times.append(total_step_time)\n\n total_throughput = self.image_count/total_step_time\n self.total_throughputs.append(total_throughput)", "def trace_time(func):\n\n @functools.wraps(func)\n def wrapper(*args, **kw):\n begin = time.time()\n logging.debug(\"begin at %s: %s()\" % (__format_time(begin), func.__name__))\n result = func(*args, **kw)\n end = time.time()\n logging.debug(\n \"end at %s, cost %.2fs: %s() -- return type: %s\"\n % (__format_time(end), end - begin, func.__name__, type(result).__name__))\n return result\n\n return wrapper", "def logging(self, function):\n avg_nms_time_per_step = sum(self.nms_times)/len(self.nms_times)\n avg_total_time_per_step = sum(self.total_times)/len(self.total_times)\n\n avg_min_latency = [x[0] for x in self.inference_times]\n avg_max_latency = [x[1] for x in self.inference_times]\n avg_latency = [x[2] for x in self.inference_times]\n\n function(\"Inference stats: image size {}x{}, batches per step {}, batch size {}, {} steps\".format(\n self.cfg.model.image_size, self.cfg.model.image_size, self.cfg.ipuopts.batches_per_step, self.cfg.model.micro_batch_size, len(self.total_times)\n ))\n function(\"--------------------------------------------------\")\n function(\"Inference\")\n function(\"Average Min Latency per Batch: {:.3f} ms\".format(1000 * sum(avg_min_latency)/len(self.inference_times)))\n function(\"Average Max Latency per Batch: {:.3f} ms\".format(1000 * sum(avg_max_latency)/len(self.inference_times)))\n function(\"Average Latency per Batch: {:.3f} ms\".format(1000 * sum(avg_latency)/len(self.inference_times)))\n function(\"Average Inference Throughput: {:.3f} img/s\".format(sum(self.inference_throughputs)/len(self.inference_throughputs)))\n function(\"--------------------------------------------------\")\n # TODO remove the NMS and end-to-end time report once NMS is on device\n function(\"End-to-end\")\n function(\"Average NMS Latency per Batch: {:.3f} ms\".format(1000 * avg_nms_time_per_step/self.cfg.ipuopts.batches_per_step))\n function(\"Average End-to-end Latency per Batch: {:.3f} ms\".format(1000 * avg_total_time_per_step/self.cfg.ipuopts.batches_per_step))\n function(\"End-to-end Throughput: {:.3f} img/s\".format(sum(self.total_throughputs)/len(self.total_throughputs)))\n function(\"==================================================\")\n\n if self.cfg.eval.metrics:\n self.compute_and_print_eval_metrics()", "def acc(ev):\n profData = getProfilingData(ev)\n if profData is not None:\n return profData.Tacc()\n return \"\"", "def cache_txn_manage(database, table, action, trans=None, **kw):\n trace = kw['trace']\n cache = server.data[database].tables['cache']\n transaction = request.get_json() if trans == None else trans\n if 'txn' in transaction:\n txn_id = transaction['txn']\n tx=None\n wait_time = 0.0 # total time waiting to commit txn \n wait_interval = txn_default_wait_in_sec # amount of time to wait between checks - if multiple txns exist \n # Get transaction from cache db\n if action == 'commit':\n while True:\n txns = cache.select('id','timestamp',\n where={'table_name': table}\n )\n if not txn_id in {tx['id'] for tx in txns}:\n return {\"message\": trace.error(f\"{txn_id} does not exist in cache\")}, 500\n if len(txns) == 1:\n if not txns[0]['id'] == txn_id:\n warning = f\"txn with id {txn_id} does not exist for {database} {table}\"\n return {'warning': trace.warning(warning)}, 500\n # txn_id is only value inside\n tx = txns[0]\n break\n # multiple pending txns - need to check timestamp to verify if this txn can be commited yet\n txns = sorted(txns, key=lambda txn: txn['timestamp'])\n for ind, txn in enumerate(txns):\n if txn['id'] == txn_id:\n if ind == 0:\n tx = txns[0]\n break\n if wait_time > txn_max_wait_time_in_sec:\n warning = f\"timeout of {wait_time} reached while waiting to commit {txn_id} for {database} {table}, waiting on {txns[:ind]}\"\n trace.warning(warning)\n trace.warning(f\"removing txn with id {txns[0]['id']} maxWaitTime of {txn_max_wait_time_in_sec} reached\")\n cache.delete(where={'id': txns[0]['id']})\n break\n break\n if tx == None:\n trace.warning(f\"txn_id {txn_id} is behind txns {txns[:ind]} - waiting {wait_time} to retry\")\n time.sleep(wait_interval)\n wait_time+=wait_interval \n # wait_interval scales up to txn_max_wait_interval_in_sec\n wait_interval+=wait_interval \n if wait_interval >= txn_max_wait_interval_in_sec:\n wait_interval = txn_max_wait_interval_in_sec\n continue\n break\n # Should not have broken out of loop here without a tx\n if tx == None:\n trace.error(\"tx is None, this should not hppen\")\n return {\"error\": \"tx was none\"}, 500\n tx = cache.select('type','txn',\n where={'id': txn_id})[0]\n try:\n r, rc = server.actions[tx['type']](database, table, tx['txn'])\n trace.warning(f\"##cache {action} response {r} rc {rc}\")\n except Exception as e:\n r, rc = trace.exception(f\"Exception when performing cache {action}\"), 500\n \n del_txn = cache.delete(\n where={'id': txn_id}\n )\n if rc == 200:\n # update last txn id\n set_params = {\n 'set': {\n 'last_txn_uuid': txn_id,\n 'last_mod_time': float(time.time())\n },\n 'where': {\n 'table_name': table\n }\n }\n server.data['cluster'].tables['pyql'].update(\n **set_params['set'],\n where=set_params['where']\n )\n return {\"message\": r, \"status\": rc}, rc\n if action == 'cancel':\n del_txn = cache.delete(\n where={'id': txn_id}\n )\n return {'deleted': txn_id}, 200", "def process_request(self, request):\n self.db.set_profiling_level(0)\n try:\n self.start_ts = self.db.system.profile.find()\\\n .sort(\"ts\", pymongo.DESCENDING)\\\n .limit(1)[0].get('ts')\n except IndexError:\n self.start_ts = None\n\n self.db.set_profiling_level(2)", "def startTransaction(self) -> int:\n ...", "def update_time(self):\n time_metrics = self._fetch_time_metrics_and_clear()\n self._logger.info('update_time. time_metrics = %s', build_metrics_times_data(time_metrics))" ]
[ "0.57431966", "0.56813395", "0.5386816", "0.5268158", "0.52590114", "0.5248265", "0.51790476", "0.51674783", "0.51302767", "0.5116144", "0.5102775", "0.5091443", "0.50542235", "0.5050009", "0.5020564", "0.50163", "0.50032055", "0.4916298", "0.48824197", "0.4858216", "0.4846671", "0.48407298", "0.4829507", "0.48275587", "0.481562", "0.4814692", "0.4810644", "0.48033282", "0.47969192", "0.47949752", "0.47873122", "0.47837195", "0.47808605", "0.47747633", "0.4774446", "0.47730988", "0.47151595", "0.47100452", "0.4707107", "0.4703334", "0.47032672", "0.46971434", "0.46826646", "0.46750718", "0.46651492", "0.46387458", "0.4631215", "0.4616959", "0.46131918", "0.461252", "0.46110803", "0.46048835", "0.4600367", "0.46002933", "0.45998284", "0.45987594", "0.45985317", "0.45939773", "0.45909387", "0.45868132", "0.45790964", "0.45719755", "0.4568749", "0.45644447", "0.4562327", "0.45622173", "0.45621982", "0.45553368", "0.4545873", "0.45452747", "0.45441023", "0.45418426", "0.4539685", "0.45349243", "0.4534225", "0.45304772", "0.45269358", "0.45267773", "0.45234093", "0.45226473", "0.45220056", "0.451876", "0.45021942", "0.44993764", "0.44898534", "0.44806254", "0.4477325", "0.44668064", "0.44652733", "0.44632363", "0.44623008", "0.44589537", "0.44572914", "0.44551933", "0.44494697", "0.44488522", "0.44426656", "0.44426435", "0.4440055", "0.4433733" ]
0.7942365
0
Returns a list containing the low level metric data for sending to the core application pertaining to the reporting period. This consists of tuple pairs where first is dictionary with name and scope keys with corresponding values, or integer identifier if metric had an entry in dictionary mapping metric (name, scope) as supplied from core application. The second is the list of accumulated metric data, the list always being of length 6.
def metric_data(self, normalizer=None): if not self.__settings: return [] result = [] normalized_stats = {} # Metric Renaming and Re-Aggregation. After applying the metric # renaming rules, the metrics are re-aggregated to collapse the # metrics with same names after the renaming. if self.__settings.debug.log_raw_metric_data: _logger.info('Raw metric data for harvest of %r is %r.', self.__settings.app_name, list(six.iteritems(self.__stats_table))) if normalizer is not None: for key, value in six.iteritems(self.__stats_table): key = (normalizer(key[0])[0], key[1]) stats = normalized_stats.get(key) if stats is None: normalized_stats[key] = copy.copy(value) else: stats.merge_stats(value) else: normalized_stats = self.__stats_table if self.__settings.debug.log_normalized_metric_data: _logger.info('Normalized metric data for harvest of %r is %r.', self.__settings.app_name, list(six.iteritems(normalized_stats))) for key, value in six.iteritems(normalized_stats): key = dict(name=key[0], scope=key[1]) result.append((key, value)) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_metric_list(self) -> List[str]:\n ...", "def get_all_metrics(self):\n up_time = self.uptime()\n down_time = self.downtime()\n customer_sla = self.sla()\n objective = self.slo()\n indicator = self.sli()\n avail_percentage = self.availability()\n mt_bf = self.mtbf(up_time)\n mt_tr = self.mttr(down_time)\n list_results = [up_time,down_time,customer_sla,objective,indicator,avail_percentage,mt_bf,mt_tr]\n return list_results", "def __get_metrics_list(self):\n metrics = metrics_calculator.MetricsCalculator(self.processor)\n metric_list = []\n # Populate the list\n for key in metrics.get_raw_metrics().keys():\n name = metrics.get_raw_metrics()[key][\"NAME\"]\n formula = metrics.get_raw_metrics()[key][\"FORMULA\"]\n description = metrics.get_raw_metrics()[key][\"DESCRIPTION\"]\n metric = Metric(name, formula, description)\n metric_list.append(metric)\n return metric_list", "def list_metrics(self):\n pass", "def metrics(self):\n self.metrics = []\n \n self.clients()\n\n if len(self.metrics) > 0:\n return self.metrics\n else:\n return []", "def metrics(self):\n metrics_registry = getattr(self._thread_local, \"klio_metrics\", None)\n if not metrics_registry:\n self._thread_local.klio_metrics = self._get_metrics_registry()\n return self._thread_local.klio_metrics", "def get_metrics(self) -> Dict[str, base.Number]:\n return self._metrics", "def list_definition(self):\n return self._get(path='metrics')", "def metrics(self):\n\n return six.iteritems(self.__stats_table)", "def build_metrics_gauge_data(gauge_metrics):\n return [{'name': name, 'value': value} for name, value in iteritems(gauge_metrics)]", "def get_data(self):\n data = list(IgnitionRow.objects.all().order_by('-pub_date')[:self.num_ticks].values())\n two_hours = data[::-1]\n pct_flop_data = [[int(elem['pct_flop_{}'.format(key)]) for elem in two_hours] \n \tfor key in self.keys]\n pct_flop_data = [[min(elem, 100) for elem in arr] for arr in pct_flop_data] # Assume a max pot size of 2000 BBs\n return pct_flop_data", "def get_metric_variables(self) -> List[Any]:\n with self._lock:\n return self._get_metric_variables()", "def get_metrics(self):\n self.logger.debug(\"Fetching metrics.\")\n return self._api_query(\"metrics\")['metrics']", "def metrics(self) -> list[dict[str, dict[str, float | int]]]:\n return self.performance[\"performances\"]", "def metrics(self):\n\n data = requests.get(\n f\"http://{self.prometheus_host}:{self.prometheus_port}/metrics\"\n ).content.decode()\n lines = [line for line in data.split(\"\\n\") if not line.startswith(\"#\")]\n metrics = {}\n for line in lines:\n if not line:\n continue\n\n name, value = line.split(\" \")\n\n try:\n value = int(value) # type: ignore\n except ValueError:\n value = float(value) # type: ignore\n\n if \"{\" in name and \"}\" in name:\n base = name[: name.index(\"{\")]\n tags = name[name.index(\"{\") + 1 : -1]\n tags = [tag.split(\"=\") for tag in tags.split(\",\")]\n tags = [(key, val.replace('\"', \"\")) for key, val in tags]\n\n name = base + \"#\" + \",\".join(f\"{k}:{v}\" for k, v in sorted(tags))\n\n metrics[name] = value\n\n return metrics", "def metrics(self) -> list:\n my_metrics = [\n FramesMetric(\"frames\"),\n FPSMetric(\"fps\"),\n EpisodeRewardMetric('PMM:episode_rewards'),\n EpisodeRewardMetricQuantile('P09:episode_rewards', quantile=0.9),\n EpisodeRewardMetricQuantile('P01:episode_rewards', quantile=0.1),\n EpisodeLengthMetric(\"episode_length\")\n ]\n\n return my_metrics + self.algo.metrics() + self.env_roller.metrics()", "def tracked_metrics(self) -> list:\n metric_names = [\"loss\"]\n if self.metrics_map is not None:\n metric_names.extend([key for key in self.metrics_map.keys()])\n return metric_names", "def stats(self):\r\n\t\tdata = self._get('global/', query=None)\r\n\t\treturn data", "def metrics(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'metrics')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def metrics(self):\n if not self.df:\n return []\n\n column_metric_strings = [col.split(self.sep)[0] for col in self.df.columns]\n\n metrics = set()\n for colstring in column_metric_strings:\n try:\n metrics.add(Metric(colstring))\n except ValueError:\n continue\n\n return sorted(list(set(metrics)))", "def get_metrics(self) -> dict:\n return self.metric_dict", "def convert(report):\n M = []\n for row in report['data']['rows']:\n dimensions = row['dimensions']\n metrics = row['metrics'][0]['values']\n M.append(dimensions + metrics)\n return M", "def metrics(self):\n return self.__metrics", "def metrics(self):\n return self._metrics", "def metrics(self):\n return self._metrics", "def get_all_metrics(self):\n metrics = {}\n for item in self.list_metrics():\n metric_name = item[2]\n metric = self.get_metric(\n item,\n existing_dict=metrics.get(metric_name, None))\n metrics[metric_name] = metric\n return metrics", "def get_measurements(self):\n metrics = {}\n for key in self.fields.keys():\n metrics[key] = []\n # What's in output:\n # proc_pid date virt res shrd cpu mem power gpus_power\n while not self.queue.empty():\n data = self.queue.get().strip().split()\n for field in self.fields:\n tp = self.fields[field]['type']\n idx = self.fields[field]['index']\n count = self.fields[field]['count']\n if count == -1:\n metrics[field].append(ResourceMonitor.str_to_type(data[idx], tp))\n elif count == 0:\n metrics[field].append([ResourceMonitor.str_to_type(data[idx], tp)])\n else:\n metrics[field].append([\n ResourceMonitor.str_to_type(data[index], tp) for index in xrange(idx, idx+count)\n ])\n return metrics", "def metrics_group():", "def metrics(self) -> List[Metric]:\n return self._metrics", "def get_metrics(self):\n return None", "def get_all_metrics():\n return get_overlap_metrics() + get_distance_metrics() + get_distance_metrics()", "def list_metrics(self):\n results = []\n if self.r.exists(self.metrics_key):\n keys = self.r.smembers(self.metrics_key)\n for k in keys:\n # metric_key, metric_type, metric_name, metric_help = keys.split(\" \", 3)\n results.append(k.split(\" \", 3))\n return results", "def get_metric_info(self):\n metric_data_object = self.client.get_metric_data(\n MetricDataQueries=[\n {\n \"Id\": \"cdbdata_invocations\",\n \"MetricStat\": {\n \"Metric\": {\n \"Namespace\": \"AWS/Lambda\",\n \"MetricName\": \"Invocations\",\n \"Dimensions\": [\n {\n \"Name\": \"FunctionName\",\n \"Value\": self.function_name\n }\n ]\n },\n \"Period\": 60,\n \"Stat\": \"Sum\"\n },\n \"ReturnData\": True\n },\n {\n \"Id\": \"cdbdata_errors\",\n \"MetricStat\": {\n \"Metric\": {\n \"Namespace\": \"AWS/Lambda\",\n \"MetricName\": \"Errors\",\n \"Dimensions\": [\n {\n \"Name\": \"FunctionName\",\n \"Value\": self.function_name\n }\n ]\n },\n \"Period\": 60,\n \"Stat\": \"Sum\"\n },\n \"ReturnData\": True\n },\n {\n \"Id\": \"cdbdata_throttles\",\n \"MetricStat\": {\n \"Metric\": {\n \"Namespace\": \"AWS/Lambda\",\n \"MetricName\": \"Throttles\",\n \"Dimensions\": [\n {\n \"Name\": \"FunctionName\",\n \"Value\": self.function_name\n }\n ]\n },\n \"Period\": 60,\n \"Stat\": \"Sum\"\n },\n \"ReturnData\": True\n },\n {\n \"Id\": \"cdbdata_concurrentexec\",\n \"MetricStat\": {\n \"Metric\": {\n \"Namespace\": \"AWS/Lambda\",\n \"MetricName\": \"ConcurrentExecutions\",\n \"Dimensions\": [\n {\n \"Name\": \"FunctionName\",\n \"Value\": self.function_name\n }\n ]\n },\n \"Period\": 60,\n \"Stat\": \"Sum\"\n },\n \"ReturnData\": True\n }\n ],\n StartTime=self.start_timestamp,\n EndTime=self.end_timestamp,\n ScanBy='TimestampDescending'\n )\n\n metric_data_points = metric_data_object[DataPointsCollector.RESPONSE_KEY]\n\n return metric_data_points", "def getLineData(self):\n # Each view must have exactly one DateRange object\n date_range = DateRange.objects.filter(foreign_key=self.id).first()\n assert(date_range is not None)\n\n begin, end = date_range.getBeginEnd()\n return {\n \"begin\" : begin.strftime(\"%Y%m%d\"),\n \"end\" : end.strftime(\"%Y%m%d\"),\n \"data_sets\" : [\n {\n \"label\" : filter_set.label,\n \"color\" : filter_set.color,\n \"data\" : [\n {\n \"cnt\": row[\"cnt\"],\n \"date\": row[\"date\"].strftime(\"%Y%m%d\")\n } for row in filter_set.getMessageCountPerDay()\n ],\n } for filter_set in self.filterset_set.all()\n ]\n }", "def collect(self): # pylint: disable=no-self-use\n start = time.time()\n for metric in metric_rq():\n yield metric\n\n gauge = GaugeMetricFamily(\n \"nautobot_rq_metrics_processing_ms\", \"Time in ms to generate the app metrics endpoint\"\n )\n duration = time.time() - start\n gauge.add_metric([], format(duration * 1000, \".5f\"))\n yield gauge", "def supported_metrics(cls) -> List[str]:\n ...", "def getReportMetrics(self):\n return self.__unwrapResults().reportMetrics", "def metrics(self) -> dict:\n if not self.exp_metadata.metrics:\n self.exp_metadata.metrics = {}\n return self.exp_metadata.metrics", "def metrics(self):\n raise NotImplementedError(\"metrics\")", "def get_data(self):\n data = list(IgnitionRow.objects.all().order_by('-pub_date')[:self.num_ticks].values())\n two_hours = data[::-1] # The most recent two hours of data\n# print([elem['avg_pot_5'] for elem in two_hours])\n# avg_pot_data = [[float(elem['avg_pot_{}'.format(key)]) / (int(key) / 100) for elem in two_hours]\n avg_pot_data = [[float(elem['avg_pot_{}'.format(key)]) for elem in two_hours] \n \tfor key in self.keys]\n# print(avg_pot_data[0][-5:])\n avg_pot_data = [[max(min(elem, 100),0) for elem in arr] for arr in avg_pot_data] # Assume a max pot size of 2000 BBs\n avg_pot_data = [[elem if elem != 100 else 0 for elem in arr] for arr in avg_pot_data] # Assume a max pot size of 2000 BBs\n# print(avg_pot_data[0][-5:])\n return avg_pot_data", "def _compute_group_stats():\n group_stats = []\n \n wmt16_group = Group.objects.filter(name='WMT16')\n wmt16_users = _get_active_users_for_group(wmt16_group)\n \n # Aggregate information about participating groups.\n groups = set()\n for user in wmt16_users:\n for group in _identify_groups_for_user(user):\n groups.add(group)\n \n # TODO: move this to property of evaluation group or add dedicated data model.\n # GOAL: should be configurable from within the Django admin backend.\n #\n # MINIMAL: move to local_settings.py?\n #\n # The following dictionary defines the number of HITs each group should\n # have completed during the WMT16 evaluation campaign.\n \n for group in groups:\n _name = group.name\n \n _group_stats = HIT.compute_status_for_group(group)\n _total = _group_stats[0]\n \n if _total > 0 and not _name in GROUP_HIT_REQUIREMENTS.keys():\n _required = 0\n elif _name in GROUP_HIT_REQUIREMENTS.keys():\n _required = GROUP_HIT_REQUIREMENTS[_name]\n _delta = _total - _required\n _data = (_total, _required, _delta)\n \n if _data[0] > 0:\n group_stats.append((_name, _data))\n \n # Sort by number of remaining HITs.\n group_stats.sort(key=lambda x: x[1][2])\n \n # Add totals at the bottom.\n global_total = sum([x[1][0] for x in group_stats])\n global_required = sum([x[1][1] for x in group_stats])\n global_delta = global_total - global_required\n global_data = (global_total, global_required, global_delta)\n group_stats.append((\"Totals\", global_data))\n \n return group_stats", "def getMapData(self):\n return [\n {\n \"label\": filter_set.label,\n \"color\": filter_set.color,\n \"data\" : list(filter_set.getMessageCountPerCountry())\n } for filter_set in self.filterset_set.all()\n ]", "def global_stats(self):\n\n glob_stats = []\n for job in self.jobs:\n\n g_stats = job.get_global_stats()\n if g_stats:\n glob_stats.append(\n {\n \"job_name\": job.name,\n \"globals\": g_stats\n }\n )\n\n return glob_stats", "def metrics(self):\r\n if not hasattr(self, '_observable_metrics'):\r\n self._observable_metrics = Metrics()\r\n return self._observable_metrics", "def gpu_metrics(self) -> List[ClaraGpuUtilization]:\r\n return self._gpu_metrics", "def _monitor_metrics(self):\n metrics = [\"loss\"]\n try:\n m = U.metrics_from_model(self.model)\n if isinstance(m, list):\n metrics.extend(m)\n except:\n pass\n if self.val_data is not None:\n for m in metrics[:]:\n metrics.append(\"val_%s\" % (m))\n return metrics", "def read_metric_values(self):\n inv_objs = self._inventory_mgr.current_inventory()\n monitored_metrics = self._metric_mgr.get_monitored_metrics()\n perf_manager = self._si.RetrieveServiceContent().perfManager\n for mor in inv_objs.keys():\n for inv_obj in inv_objs[mor]:\n inv_obj_metrics = inv_obj.metric_id_map\n desired_keys = list(set(inv_obj_metrics.keys()) & set(monitored_metrics[mor].keys()))\n if not len(desired_keys) == 0:\n metric_id_objs = [inv_obj_metrics[key] for key in desired_keys]\n query_spec = vim.PerformanceManager.QuerySpec(\n entity=inv_obj.mor, metricId=metric_id_objs,\n intervalId=inv_obj.INSTANT_INTERVAL,\n maxSample=1, format='normal'\n )\n try:\n results = perf_manager.QueryPerf(querySpec=[query_spec])\n except Exception as e:\n self._logger.error(\"Exception while making performance query : {0}\".format(e))\n if results:\n dps = self._parse_query(inv_obj, results, monitored_metrics[mor])\n payload = self._build_payload(dps)\n self._dispatch_metrics(payload)\n else:\n self._logger.warning(\"Empty result from query : {0}\".format(query_spec))", "def collected_data(self) -> Iterable[Dict[str, float]]:\n return self._collected_data", "def _hydro_metrics(self) -> list:\n\n return self._minimal() + [\n 'fdc_flv', 'fdc_fhv',\n 'kge', 'kge_np', 'kge_mod', 'kge_bound', 'kgeprime_c2m', 'kgenp_bound',\n 'nse', 'nse_alpha', 'nse_beta', 'nse_mod', 'nse_bound']", "def collect(self): # pylint: disable=no-self-use\n start = time.time()\n\n if \"jobs\" in PLUGIN_SETTINGS and PLUGIN_SETTINGS[\"jobs\"]:\n for metric in metric_jobs():\n yield metric\n\n if \"models\" in PLUGIN_SETTINGS:\n for metric in metric_models(PLUGIN_SETTINGS[\"models\"]):\n yield metric\n\n # --------------------------------------------------------------\n # Extras Function defined in configuration.py or the Regristry\n # # --------------------------------------------------------------\n if \"extras\" in PLUGIN_SETTINGS:\n for metric in collect_extras_metric(PLUGIN_SETTINGS[\"extras\"]):\n yield metric\n\n for metric in collect_extras_metric(__REGISTRY__):\n yield metric\n\n gauge = GaugeMetricFamily(\n \"nautobot_app_metrics_processing_ms\", \"Time in ms to generate the app metrics endpoint\"\n )\n duration = time.time() - start\n gauge.add_metric([], format(duration * 1000, \".5f\"))\n yield gauge", "def get_timestamped_metric_values_as_strings(self):\n ret_list = []\n i = 0\n while i < len(self.__metric_value_list):\n ret_list.append(self.__metric_value_list[i].timestamp.strftime(\"%Y-%m-%d %H:%M:%S\") + \" \" +\n str(self.__metric_value_list[i].value) +\n \"(\" + str(self.__metric_value_list[i].metric_def_ID) + \")\")\n i += 1\n return ret_list", "def get_telemetry ():\n telemetry = OrderedDict()\n\n telemetry[\"ip_addr\"] = socket.gethostbyname(socket.gethostname())\n\n telemetry[\"mem_free\"] = psutil.virtual_memory().free\n\n telemetry[\"cpu_num\"] = psutil.NUM_CPUS\n\n x = psutil.cpu_times()\n telemetry[\"cpu_times\"] = OrderedDict([ (\"user\", x.user), (\"system\", x.system), (\"idle\", x.idle) ])\n\n x = psutil.disk_usage(\"/tmp\")\n telemetry[\"disk_usage\"] = OrderedDict([ (\"free\", x.free), (\"percent\", x.percent) ])\n\n x = psutil.disk_io_counters()\n telemetry[\"disk_io\"] = OrderedDict([ (\"read_count\", x.read_count), (\"write_count\", x.write_count), (\"read_bytes\", x.read_bytes), (\"write_bytes\", x.write_bytes), (\"read_time\", x.read_time), (\"write_time\", x.write_time) ])\n\n x = psutil.network_io_counters()\n telemetry[\"network_io\"] = OrderedDict([ (\"bytes_sent\", x.bytes_sent), (\"bytes_recv\", x.bytes_recv), (\"packets_sent\", x.packets_sent), (\"packets_recv\", x.packets_recv), (\"errin\", x.errin), (\"errout\", x.errout), (\"dropin\", x.dropin), (\"dropout\", x.dropout) ])\n\n return telemetry", "def gather_metric(self):\n device_dict = {}\n # Delete first and last line of output of adb.\n output = self._shell.run(self.COMMAND).stdout\n\n # Example Line, Device Serial Num TAB Phone Status\n # 00bd977c7f504caf\toffline\n if output:\n for line in output.split('\\n'):\n spl_line = line.split('\\t')\n # spl_line[0] is serial, [1] is status. See example line.\n device_dict[spl_line[0]] = spl_line[1]\n\n return {self.DEVICES: device_dict}", "def get_metric_data(config, metric_list, metric_grouping, start_time, end_time, collected_data_map):\n\n def format_data_entry(json_data_entry):\n metric_name = json_data_entry.get('metric')\n host_name = json_data_entry.get('tags', {}).get('host') or 'unknownHost'\n dps = json_data_entry.get('dps', {})\n metric_value = None\n header_field = normalize_key(metric_name) + \"[\" + host_name + \"]:\" + str(\n get_grouping_id(metric_name, metric_grouping))\n mtime = 0\n for stime, val in dps.items():\n if int(stime) > mtime:\n metric_value = val\n mtime = int(stime)\n\n epoch = mtime * 1000\n\n if epoch in collected_data_map:\n timestamp_value_map = collected_data_map[epoch]\n else:\n timestamp_value_map = {}\n\n timestamp_value_map[header_field] = str(metric_value)\n collected_data_map[epoch] = timestamp_value_map\n\n json_data = {\n \"token\": config['OPENTSDB_TOKEN'],\n \"start\": start_time,\n \"end\": end_time,\n \"queries\": map(lambda m: {\n \"aggregator\": \"avg\",\n \"downsample\": \"1m-avg\",\n \"metric\": m.encode('ascii')\n }, metric_list)\n }\n\n url = config[\"OPENTSDB_URL\"] + \"/api/query\"\n response = requests.post(url, data=json.dumps(json_data))\n if response.status_code == 200:\n rawdata_list = response.json()\n logger.debug(\"Get metric data from opentsdb: \" + str(len(rawdata_list)))\n\n # format metric and save to collected_data_map\n map(lambda d: format_data_entry(d), rawdata_list)", "def get_aggregated_values(self):\n if not self._initialized:\n raise Exception(\"To readout you must first initialize, then\"\n \"process batches!\")\n else:\n ret_vals = [q.readout() for q in self.quantities]\n return dict(zip(self.quantity_names, ret_vals))", "def build_metrics_times_data(time_metrics):\n return [{'name': name, 'latencies': latencies.get_latencies()}\n for name, latencies in iteritems(time_metrics)]", "def get_stats(self):\n return {\n \"pings_sent\" : self.ping_count,\n \"measurements\" : self.measurements,\n }", "def logs(self):\n dataTuples = list()\n try:\n with sqlite3.connect(self.db_file) as conn:\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM curves\")\n dataTuples = cur.fetchall()\n return [{\"id\": a[0], \"name\": a[1], \"units\": a[2],\n \"descr\": a[3]} for a in dataTuples]\n except:\n return dataTuples", "def collect(self) -> Metric:\n ret = self.source()\n if ret is None:\n LOGGER.warning('Statistics are not available')\n return\n gauge = GaugeMetricFamily('wemo_device_state', 'Status of Wemo device', labels=['address', 'parameter'])\n gauge.add_metric([ret.address, 'today_kwh'], ret.today_kwh, timestamp=ret.collection_time.timestamp())\n gauge.add_metric([ret.address, 'current_power_mW'], ret.current_power,\n timestamp=ret.collection_time.timestamp())\n gauge.add_metric([ret.address, 'today_on_time'], ret.today_on_time, timestamp=ret.collection_time.timestamp())\n gauge.add_metric([ret.address, 'on_for'], ret.on_for, timestamp=ret.collection_time.timestamp())\n gauge.add_metric([ret.address, 'today_standby_time'], ret.today_standby_time,\n timestamp=ret.collection_time.timestamp())\n\n yield gauge\n\n counter = CounterMetricFamily('wemo_power_usage', 'Today power consumption', labels=['address'])\n counter.add_metric([ret.address], ret.today_kwh, timestamp=ret.collection_time.timestamp())\n yield counter", "def get_data(self):\n data = list(IgnitionRow.objects.all().order_by('-pub_date')[:self.num_ticks].values())\n two_hours = data[::-1]\n #two_hours = data\n num_players_data = [[max(min(elem['num_players_{}'.format(key)],50),0) for elem in two_hours] for key in self.keys]\n return num_players_data", "def _aggregate_perf_data(perf_all_ordinals: List[str]):\n aggregate = {}\n\n pd = PerfData()\n for data in perf_all_ordinals:\n worker_pd = PerfData(**json.loads(data))\n if len(perf_all_ordinals) > 1:\n aggregate.setdefault(\"ordinals\", [])\n aggregate[\"ordinals\"].append(worker_pd.throughput_dict())\n\n pd.merge(worker_pd)\n\n aggregate.update(dataclasses.asdict(pd))\n return aggregate", "def get_all_stats():\n\n return get_component(CachingPackage.COMPONENT_NAME).get_all_stats()", "def get_metric_list(config):\n metric_list = []\n url = config[\"OPENTSDB_URL\"] + \"/api/suggest?type=metrics&q=\"\n response = requests.get(url)\n if response.status_code == 200:\n metric_list = response.json()\n logger.debug(\"Get metric list from opentsdb: \" + str(metric_list))\n return metric_list", "def get_metrics_list(self, project, callback=None):\n metrics = sorted((yield gen.Task(self.client.smembers,\n self.get_metrics_key(project))))\n if callback:\n callback(metrics)", "def get_data(self):\n return [10, 20, 30]", "def _get_daily_statistics(self, cr, uid, ids, field_name, arg, context=None):\n obj = self.pool['mail.mail.statistics']\n res = {}\n for mailing in self.browse(cr, uid, ids, context=context):\n res[mailing.id] = {}\n date = mailing.sent_date if mailing.sent_date else mailing.create_date\n date_begin = datetime.strptime(date, tools.DEFAULT_SERVER_DATETIME_FORMAT)\n date_end = date_begin + relativedelta.relativedelta(days=self._period_number - 1)\n date_begin_str = date_begin.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)\n date_end_str = date_end.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)\n domain = [('mass_mailing_id', '=', mailing.id), ('opened', '>=', date_begin_str), ('opened', '<=', date_end_str)]\n res[mailing.id]['opened_daily'] = json.dumps(self.__get_bar_values(cr, uid, obj, domain, ['opened'], 'opened_count', 'opened:day', date_begin, context=context))\n domain = [('mass_mailing_id', '=', mailing.id), ('replied', '>=', date_begin_str), ('replied', '<=', date_end_str)]\n res[mailing.id]['replied_daily'] = json.dumps(self.__get_bar_values(cr, uid, obj, domain, ['replied'], 'replied_count', 'replied:day', date_begin, context=context))\n return res", "def data_quartiles(self):\n data = []\n for graph in self._graphs.values():\n data += graph.data.values()\n data.sort()\n datalen = len(data)\n return(data[0], data[datalen/4], data[datalen/2],\n data[3*datalen/4], data[-1])", "def get_stats(self):\n return utils.csv_to_dict(wait(self.proto.stat()))", "def metrics_get(period):\n return flask.jsonify({\"message\": \"noop\"}), 200", "def getAllMetrics(self):\n result = self.getReportMetrics()\n result.update(self.getOptimizationMetrics())\n return result", "def build_metrics_counter_data(count_metrics):\n return [{'name': name, 'delta': delta} for name, delta in iteritems(count_metrics)]", "def metrics(self, request):\n return OtterMetrics(self.store).app.resource()", "def get_metrics(event):\n return tba_session.get(BASE_URL + '/event/%s/oprs' %event).json()", "def listmetrics(namespace: str = None, Region_name: str = None) -> List:\r\n cloudwatch = client('cloudwatch', region_name=Region_name)\r\n paginator = cloudwatch.get_paginator('list_metrics')\r\n metrics=[] # type: List\r\n if namespace is not None:\r\n page = paginator.paginate(Namespace=namespace) \r\n else:\r\n page = paginator.paginate()\r\n for response in page:\r\n for metric in response['Metrics']:\r\n metrics.append(metric)\r\n return metrics", "def metrics(self):\n return self.verificationRun.metrics()", "def get_metric(ms):\n\treturn '['+','.join(str(m) for m in ms)+']'", "def parse_response(response):\n data = []\n \n for report in response.get('reports', []):\n columnHeader = report.get('columnHeader', {})\n dimensionHeaders = columnHeader.get('dimensions', [])\n metricHeaders = columnHeader.get('metricHeader', {}).get('metricHeaderEntries', [])\n rows = report.get('data', {}).get('rows', [])\n \n row_count = 0 \n for row in rows:\n #print '\\n\\n', 'ROW_COUNT: ', row_count, '\\n'\n data.append({}) \n\n dimensions = row.get('dimensions', [])\n dateRangeValues = row.get('metrics', [])\n\n for header, dimension in zip(dimensionHeaders, dimensions):\n #print header + ': ' + dimension\n data[row_count][header[3:]] = dimension\n \n for i, values in enumerate(dateRangeValues):\n #print 'Date range (' + str(i) + ')'\n for metricHeader, value in zip(metricHeaders, values.get('values')):\n #print metricHeader.get('name') + ': ' + value\n data[row_count][metricHeader.get('name')[3:]] = value\n \n row_count += 1 \n \n return data", "def calculate_metrics(metrics_data: List[Tuple[Metric, DataType]]) -> List[float]:\n pass", "def collect(self):\n service = build_service('calendar', 'v3')\n data = []\n\n for time_window, suffix in zip(self._parameters['time_windows'],\n self._parameters['suffixes']):\n # TODO: Get Search prefix and calendar from configs\n events = get_calendar_entries_by_query(service, \"S-\", time_window, \"Tracking\")\n\n # If we don't have any events we set it to the window size (which is in hours)\n seconds_since = [time_window * 60 * 60]\n long_count = 0\n short_count = 0\n for event in events:\n start_time = parse(event['start'].get('dateTime'))\n end_time = parse(event['end'].get('dateTime'))\n seconds_since.append(\n (datetime.datetime.now(datetime.timezone.utc) - start_time).total_seconds())\n event_length = end_time - start_time\n if event_length.seconds <= 30*60:\n short_count += 1\n else:\n long_count += 1\n\n data.append(DataPoint(time.time(), self._base_name + 's_count.' + suffix, len(events)))\n data.append(DataPoint(time.time(), self._base_name + 's_seconds_since.' + suffix, min(seconds_since)))\n data.append(DataPoint(time.time(), self._base_name + 's_short_count.' + suffix, short_count))\n data.append(DataPoint(time.time(), self._base_name + 's_long_count.' + suffix, long_count))\n return data", "def getAllMeasurement(self): \n return self.measurement", "def getFormattedJobStatistics(self):\n\t\tformatted_job_stats = [self.name]\n\t\tformatted_job_stats.append(str(self.retry_count))\n\t\tif self.site is None:\n\t\t\tformatted_job_stats.append('-')\n\t\telse:\n\t\t\tformatted_job_stats.append(self.site)\n\t\tformatted_job_stats.append(round_to_str(self.kickstart))\n\t\tformatted_job_stats.append(round_to_str(self.post))\n\t\tformatted_job_stats.append(round_to_str(self.condor_delay))\n\t\tformatted_job_stats.append(round_to_str(self.resource))\n\t\tformatted_job_stats.append(round_to_str(self.runtime))\n\t\tformatted_job_stats.append(round_to_str(self.seqexec))\n\t\tformatted_job_stats.append(round_to_str(self.seqexec_delay))\n\t\treturn formatted_job_stats", "def flush(self):\n with self._lock:\n batch = self._batch\n timestamps = self._timestamps\n\n items = []\n for identity, value in batch.items():\n metric = {}\n typ, name, tags = identity\n metric[\"name\"] = name\n if typ:\n metric[\"type\"] = typ\n else:\n metric[\"timestamp\"] = timestamps[identity]\n\n if tags:\n metric[\"attributes\"] = dict(tags)\n\n metric[\"value\"] = value\n items.append(metric)\n\n items = tuple(items)\n\n batch.clear()\n timestamps.clear()\n\n common = self._common.copy()\n common[\"timestamp\"] = self._interval_start\n now = int(time.time() * 1000.0)\n interval = now - self._interval_start\n common[\"interval.ms\"] = interval\n\n self._interval_start = now\n\n return items, common", "def metrics(_):\r\n collector = BuildsCollector()\r\n build_metrics, headers = collector.get_metrics_table()\r\n print(tabulate(build_metrics, headers=headers))", "def get_scoped_to_feature() -> Iterable[Feature] | Feature | None:\n return Feature.METRICS", "def statistics(self):\n return self.get_statistics()", "def get_general_stats() ->List[BaseStat]:\n return [PositionalTendencies(),\n SpeedTendencies(),\n ItemGoals(),\n DropshotGoals(),\n DropshotBallPhaseTimes(),\n DropshotStats()\n ]", "def init_metric_definitions():\n metric_definitions = []\n\n # add info to list in memory, one by one, following signature values\n metric_def_ID = 1\n metric_def_name = \"Recovery Time\"\n metric_def_info = \"Measures time taken by ONAP to restore a VNF\"\n metric_definitions.append(RecoveryTimeDef(metric_def_ID, metric_def_name,\n metric_def_info))\n\n metric_def_ID = 2\n metric_def_name = \"Uptime Percentage\"\n metric_def_info = \"Measures ratio of uptime to reference time, not counting planned downtime\"\n metric_definitions.append(UptimePercentageDef(metric_def_ID, metric_def_name,\n metric_def_info))\n\n\n # write list to binary file\n write_list_bin(metric_definitions, FILE_METRIC_DEFINITIONS)\n\n return metric_definitions", "def get_next_batch(self):\n\n metrics = {}\n for struct in self.metrics.values():\n metrics = {**metrics, **struct.get_next_batch()}\n\n return metrics", "def get_data(self):\n data = list(IgnitionRowPredictionOLS.objects.all().order_by('-pub_date')[:self.num_ticks].values())\n two_hours = data[::-1]\n num_players_data = [[elem['num_players_{}'.format(key)] for elem in two_hours] for key in self.keys]\n return num_players_data", "def get_stats(self): \n return dict(l.split('\\t', 1) \\\n for l in wait(self.proto.stat()).splitlines() if l)", "def read_metrics_and_assertions(self) -> (\n typing.Iterable[typing.Tuple[str, float, metrics_pb2.Assertion]]):\n raise NotImplementedError", "def metrics():\n\tmetrics = pd.DataFrame(index=['accuracy', 'precision', 'recall'],\n\t\t\t\t\t\t columns=['LogisticReg', 'NeuralNetwork'])\n\treturn metrics", "def get_values(self, names):\n r = []\n for n in names:\n if n in self.raw_metrics:\n r.append(self.raw_metrics[n])\n else:\n return None\n return r", "def core_stats():\n data = get_tsv_dataset(os.path.join(DATA_DIR, TOTALS_FILE))\n if data is None:\n return make_response(jsonify({'error': 'Data could not be read'}), 500)\n # parse up so we can manipulate things.\n dataset = [int(x) for x in data]\n annual_sightings = sum(dataset)\n # for each 'month' (selection of x4\n monthly_sightings = []\n max_sightings = 0\n max_month = 0\n\n # grab each month's data into its own list for post processing.\n # also calculate some other numbers as we go.\n for i in range(0, len(dataset), 4):\n # select 4x data points.\n this_month = dataset[i:i + 4]\n total_sightings_this_month = sum(this_month)\n monthly_sightings.append(total_sightings_this_month)\n old_max = max_sightings\n max_sightings = max(max_sightings, total_sightings_this_month)\n if old_max < max_sightings:\n # it could be the 0th month.\n max_month = len(monthly_sightings)\n\n mean_monthly_sightings = mean(monthly_sightings)\n month_name = list(calendar.month_name)[max_month]\n return make_response(jsonify({'annual_sightings': annual_sightings,\n 'max_sightings': max_sightings,\n 'max_sighting_month': month_name,\n 'mean_monthly_sightings': mean_monthly_sightings}), 200)", "def stats(self):\r\n return {}", "def mymetrics(): \n _update_metric_counters()\n logging.debug(prom_objects_seen.collect())\n return flask.Response(generate_latest(), mimetype='text/plain')", "def get_metric_statistics(self, period, start_time, end_time, metric_name,\n statistics, dimensions=None):\n # Ugly heuristic to determine where to query a metric from\n if dimensions and dimensions.get('pid') and metric_name in self.app_metrics:\n query_type = 'application'\n index_by = 'pid'\n elif dimensions and dimensions.get('pid') and metric_name in self.host_metrics:\n query_type = 'host'\n index_by = 'pid'\n elif dimensions and dimensions.get('hostname') and metric_name in self.app_metrics:\n query_type = 'application'\n index_by = 'hostname'\n elif 'app_attributes' in metric_name:\n query_type = 'application'\n index_by = 'pid'\n else:\n query_type = 'host'\n index_by = 'hostname'\n\n if not isinstance(start_time, datetime):\n raise TypeError(\"start_time must be a datetime object\")\n if not isinstance(end_time, datetime):\n raise TypeError(\"end_time must be a datetime object\")\n\n tformat = \"%Y%m%d %H:%M\"\n interval = \"%s,%s\" % (start_time.strftime(tformat), end_time.strftime(tformat))\n time_group = int(period)\n\n if metric_name.startswith('app_attributes:'):\n metric_name, app_attribute = metric_name.split(':')\n else:\n app_attribute = ''\n if index_by == 'pid':\n query_fields = [metric_name, ]\n else:\n query_fields = [index_by, metric_name, ]\n\n script = self._build_script(query_fields, query_type, interval, time_group, dimensions)\n\n authenticate = 'basic' if self.username and self.password else None\n url = self._build_query_url(self.base_url, authenticate=authenticate,\n script=script)\n request = urllib2.Request(url)\n\n if self.username and self.password:\n auth_header = self._build_auth_header(self.username, self.password)\n request.add_header('Authorization', auth_header)\n\n reply = urllib2.urlopen(request)\n results = {}\n reader = csv.reader(reply)\n for metrics in reader:\n if metrics == []:\n continue\n\n if index_by == 'pid':\n match = re.search('pid=(.*?)&', metrics[0])\n if match:\n index = match.group(1)\n else:\n continue\n else:\n index = metrics.pop(0)\n if index == '':\n continue\n\n result = results.get(index, {Statistics.SERIES: []})\n for i, metric in enumerate(metrics):\n if metric_name == 'app_attributes' and app_attribute:\n metric = _extract_app_attribute(metric, app_attribute)\n\n result[Statistics.SERIES].append(metric)\n\n results[index] = result\n\n for index, metric in results.iteritems():\n series = metric[Statistics.SERIES]\n if Statistics.AVERAGE in statistics:\n try:\n metric[Statistics.AVERAGE] = sum(map(float, series)) / float(len(series))\n except (ZeroDivisionError, ValueError):\n metric[Statistics.AVERAGE] = 0.0\n if Statistics.SUM in statistics:\n metric[Statistics.SUM] = sum(map(float, series))\n if Statistics.SAMPLE_COUNT in statistics:\n metric[Statistics.SAMPLE_COUNT] = len(series)\n if Statistics.MAXIMUM in statistics:\n metric[Statistics.MAXIMUM] = max(map(float, series))\n if Statistics.MINIMUM in statistics:\n metric[Statistics.MINIMUM] = min(map(float, series))\n\n return results", "def get_metric_for_all_dates(inargs, exp_id):\n date_list = []\n # Loop over dates and collect data\n for date in h.make_timelist(inargs.date_start, inargs.date_stop,\n inargs.hours_inc):\n date_list.append(get_metric_for_one_day(inargs, exp_id, date))\n return np.array(date_list)", "def population_timeline_chart_data(matchid):\n ps = Match.query.get(matchid).populationstats.all()\n labels = []\n popcounts = []\n lowestPop = 100\n\n for snapshot in ps:\n labels.append(snapshot.time.strftime('%H:%M'))\n popcounts.append(snapshot.popcount)\n if snapshot.popcount is None or snapshot.popcount < lowestPop:\n lowestPop = snapshot.popcount\n\n return json.dumps(labels), json.dumps(popcounts), lowestPop", "def get_data(self):\n data = list(IgnitionRowPredictionCVX.objects.all().order_by('-pub_date')[:self.num_ticks].values())\n two_hours = data[::-1]\n num_players_data = [[elem['num_players_{}'.format(key)] for elem in two_hours] for key in self.keys]\n return num_players_data" ]
[ "0.63188374", "0.61312807", "0.59498423", "0.59364164", "0.59249073", "0.58868825", "0.5866983", "0.5859181", "0.57975364", "0.5722001", "0.57070863", "0.57040983", "0.5697402", "0.5691452", "0.5689769", "0.5687561", "0.5686129", "0.5646755", "0.56391305", "0.5638463", "0.5635187", "0.5616114", "0.5606177", "0.5598606", "0.5598606", "0.55425334", "0.55383384", "0.5533974", "0.5528502", "0.551542", "0.5487736", "0.5486818", "0.54844344", "0.54571635", "0.54278886", "0.5421712", "0.5421639", "0.5405077", "0.53709936", "0.5367911", "0.5354709", "0.53222334", "0.53190094", "0.52956116", "0.5294608", "0.52773863", "0.52704525", "0.5257143", "0.52420276", "0.5241034", "0.5234681", "0.52256286", "0.52200747", "0.51918274", "0.5176487", "0.5160102", "0.51492894", "0.5149026", "0.51411676", "0.513769", "0.51329356", "0.5132127", "0.51149696", "0.511279", "0.5111572", "0.5111125", "0.51075345", "0.5103818", "0.5102552", "0.5097158", "0.5093842", "0.5091793", "0.5087501", "0.5086499", "0.50805986", "0.50791514", "0.50786304", "0.50695014", "0.50573736", "0.50434643", "0.50434095", "0.50417304", "0.50390923", "0.50360835", "0.50340307", "0.5033167", "0.5032438", "0.5029268", "0.5021769", "0.5021764", "0.5013847", "0.5007797", "0.50061584", "0.49977797", "0.4997243", "0.4994867", "0.49878496", "0.49873653", "0.4973891", "0.49721482" ]
0.5418441
37
Returns a count of the number of unique metrics.
def metric_data_count(self): if not self.__settings: return 0 return len(self.__stats_table)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def metrics_count(self):\n\n return len(self.__stats_table)", "def get_count(cls):\n total = 0\n for counter in SimpleCounterShard.objects.all():\n total += counter.count\n return total", "def count(self):\n return self._reduce_for_stat_function(F.count, only_numeric=False)", "def number_of_running_metrics(self):\n try:\n return len(self.get_classads(\"OSGRSV==\\\"metrics\\\"\"))\n except TypeError:\n self.rsv.log(\"ERROR\", \"Classad parsing failed, unable to count running metrics\")", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def test_task_unique_total(self):\r\n # from bookie.bcelery import tasks\r\n tasks.count_unique()\r\n\r\n stat = StatBookmark.query.first()\r\n self.assertEqual(stat.attrib, stats.UNIQUE_CT)\r\n self.assertEqual(stat.data, 3)", "def count():", "def count_unique_features(self):\n return N_UNIQUE_FEATS", "def count(self, key):\n self._metrics[key] += 1", "def count(self):\n return sum(1 for _ in self)", "def count_unique(self) -> int:\n # Binary Search Tree == empty\n if self.root is None:\n return 0\n\n #count values\n q = Queue()\n return self.count_unique_helper(self.root, q)", "def count(self):\n # TODO not implemented yet\n return 0", "def _fetch_count_metrics_and_clear(self):\n with self._count_rlock:\n count_metrics = self._count_metrics\n self._count_metrics = defaultdict(int)\n\n return count_metrics", "def count(self):\n return len([i for i in self.iteritems()])", "def count(self) -> Tuple[groupable, pdarray]:\n repMsg = generic_msg(\n cmd=\"countReduction\",\n args={\"segments\": cast(pdarray, self.segments), \"size\": self.length},\n )\n self.logger.debug(repMsg)\n return self.unique_keys, create_pdarray(repMsg)", "def get_counts(self):\n self._update_counts()\n return self.failures, self.warnings, self.infos", "def count_unique():\r\n trans = transaction.begin()\r\n StatBookmarkMgr.count_unique_bookmarks()\r\n trans.commit()", "def count_counts(self):\n count_counts = defaultdict(Counter)\n for token, followers in self._dict.items():\n for f, count in followers.items():\n count_counts[token][count] += 1\n count_counts[token][0] = len(self._dict) - sum(count_counts[token].values())\n return count_counts", "def get_number_of_unique_students(self):\n unique_students = set()\n for row in self.responses:\n unique_students.add(row.student)\n return len(unique_students)", "def count_unique(self) -> int:\n if self.root is None: # If tree is empty\n return 0\n\n q = Queue()\n return self.unique_helper(self.root, q)", "def __uniqueCounts(rows):\n results = {} #Initialize a dictionary to store the results\n for row in rows: #Iterate over all rows of data\n #The result is the last column\n r = row[-1]\n if r not in results: results[r] = 0 #Start the count for each class at zero\n results[r] += 1 #Increment the count for this row's class by 1\n return results", "def count_umls(self) -> int:\n return self._count_model(Umls)", "def counts(self):\n return sum(self.counter.values()), len(self.visited)", "def counts(self):\n return sum(self.counter.values()), len(self.visited)", "async def test_nr_of_metrics(self):\n response = await self.collect(get_request_json_side_effect=[self.data_model, self.reports])\n self.assert_measurement(\n response,\n value=str(len(self.entities)),\n total=self.expected_software_metrics,\n entities=self.entities,\n )", "def count(self):\n\n raise NotImplementedError", "def count(self):\n return self.get_count()", "def count() -> int:\n pass", "def get_count():\n _check_init()\n return _pypm.CountDevices()", "def count_barcodes(metrics_file):\n\n barcodes = pd.read_csv(metrics_file, sep=\"\\t\", header=0, names=[\"barcode\", \"randomer\", \"count\"])\n return Counter(dict(barcodes.groupby(\"barcode\")['count'].sum().iteritems()))", "def count(self, tokens):\n return self.counts[tokens]", "def test_counts(self):\n c = array([5,0,1,1,5,5])\n obs = counts(c)\n exp = array([1,2,0,0,0,3])\n self.assertEqual(obs, exp)\n d = array([2,2,1,0])\n obs = counts(d, obs)\n exp = array([2,3,2,0,0,3])\n self.assertEqual(obs, exp)", "def uniqueNodeCount(eval):\n if not isEvaluator(eval):\n return 0\n return eval.ReferencedUniqueNodes().Size()", "def count_elements_in_dataset(dataset):\n return dataset.count()", "def count(self) -> int:\n return pulumi.get(self, \"count\")", "def _get_count(results):\n return len(results)", "def count(self):\n return int()", "def count(self):\n return int()", "def count(self) -> int:\n return self.__count", "def count(self, cls=None):\n return len(self.all(cls))", "def __len__(self):\n return self.data.index.get_level_values(0).to_series().nunique()", "def Count(self):\r\n\t\treturn self._get_attribute('count')", "def Count(self):\r\n\t\treturn self._get_attribute('count')", "def get_count(name, key):\n total = 0\n query = CounterShard.all().filter('name = ', name).filter('reference_key = ', key)\n for counter in query:\n total += counter.count\n \n return total", "def count(self):\n\n return self._get(\"count\", rtype=UInt)", "def count(self):\n return self.size()", "def get_number_of_unique_values(local_data, attr):\n\tvalues = []\n\tfor element in local_data:\n\t\tif element[attr] not in values:\n\t\t\tvalues.extend([element[attr]])\n\treturn len(values)", "def count(self) -> float:\n return pulumi.get(self, \"count\")", "def count(self):\n nreq, nres = 0, 0\n for entry in self.__history:\n if entry.oreq is not None:\n nreq += 1\n if entry.ores is not None:\n nres += 1\n return nreq, nres", "def get_count(self):\n\n\t\treturn self.__count", "def count_unique_bookmarks():\r\n total = BmarkMgr.count(distinct=True)\r\n stat = StatBookmark(attrib=UNIQUE_CT, data=total)\r\n DBSession.add(stat)", "def count(self):\n\n if self.cluster:\n return self.cluster.count()\n\n return super().count()", "def specht(mu):\n return StandardTableaux(mu).cardinality().n()", "def tally(self):\n return self.count", "def count(self):\n \n return self._count", "def _count(self):\n if self._count_valid:\n return self._total_results\n\n result = self._cb.get_object(self._doc_class.urlobject.format(self._cb.credentials.org_key))\n results = result.get(\"results\", [])\n\n self._total_results = len(results)\n self._count_valid = True\n\n return self._total_results", "def count(self) -> int:\n return self._count", "def count(self) -> int:\n return self._count", "def count(self) -> int:\n return self._count", "def count(self, tokens):\n return self._count.get(tuple(tokens), 0)", "def count(self, resource):\n return len(self.all(resource))", "def observation_count(self):\n if not self.can_update():\n self._handle_error(910, [self.type])\n return self.tc_requests.observation_count(\n self.api_type, self.api_branch, self.unique_id, owner=self.owner\n )", "def oneup_count(self):\n return self.oneups.filter(Oneup.state >= 0).count()", "def count(self, axis=None):\n return self.data.count(axis=axis)", "def count_benchmarks():\n return len(setup_storage().fetch_benchmark({}))", "def get_num_measured_outputs(self):\n i = 0\n for o in self.outputs:\n if o.is_measured_output():\n i += 1\n return i", "def Count(self):\n return self._get_attribute('count')", "def totalhashes(self):\n return np.sum(self.counts)", "def n_users(self):\n if self._n_users is None:\n self._n_users = len(self.user_unique_vals)\n return self._n_users", "def fetch_counts_for_debug(stdout):\n test_names = TEST_NAMES_DEBUG_APP_PATTERN.findall(stdout)\n test_counts = collections.Counter(test_class for test_class, _ in test_names\n if test_class not in IGNORED_CLASSES)\n\n return test_counts", "def count(self):\n return len(self)", "def test_get_virtual_machine_count_metrics1(self):\n pass", "def totalCount(self):\n return sum(self.values())", "def totalCount(self):\n return sum(self.values())", "def totalCount(self):\n return sum(self.values())", "def get_untested_count(self):\n return sum(1 for outcome in (r.outcome for r in self.values()) if outcome == Result.UNTESTED)", "def get_total_counts(self):\n ret = {}\n all_loggers_count = 0\n for logger, name_map in self.acc_map.items():\n cur_logger_count = 0\n ret[logger.name] = {}\n for name, status_map in name_map.items():\n cur_name_count = 0\n ret[logger.name][name] = {}\n for status, acc in status_map.items():\n cur_count = acc.total_count\n ret[logger.name][name][status] = cur_count\n cur_name_count += cur_count\n cur_logger_count += cur_count\n all_loggers_count += cur_count\n ret[logger.name][name]['__all__'] = cur_name_count\n ret[logger.name]['__all__'] = cur_logger_count\n ret['__all__'] = all_loggers_count\n return ret", "def resultCounter(detections):\n counter = 0\n for attribute, value in classIterator(detections):\n if 'crease' in attribute:\n counter += len(value)\n return counter", "def count(self):\n return len(self.names)", "def snmpqosqos_sch_sessions_regulated_count(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_sch_sessions_regulated_count\n\t\texcept Exception as e:\n\t\t\traise e", "def count(self) -> int:\n return self._adapter.count()", "def Count():\n return CheckForError(lib.Generators_Get_Count())", "def test_set_count(self) -> int:\n return pulumi.get(self, \"test_set_count\")", "def num_humans(self):\n return len(self._human_list)", "def num_humans(self):\n return len(self._human_list)", "def count(self, tokens):\n return self._count[tuple(tokens)]", "def count(self):\n return self._lift(\"count\")", "def get_count(self):\r\n return self.count", "def update_count(self):\n count_metrics = self._fetch_count_metrics_and_clear()\n self._logger.info('update_count. count_metrics = %s',\n build_metrics_counter_data(count_metrics))", "def GetCount(name):\n counter = StrongCounter.get_or_insert(key_name=name)\n return counter.count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count" ]
[ "0.71416634", "0.688916", "0.6736414", "0.65794754", "0.6549508", "0.6549508", "0.6549508", "0.6549508", "0.6548206", "0.65326285", "0.64953864", "0.64506423", "0.64230627", "0.64027196", "0.6389793", "0.6381835", "0.6377822", "0.6373317", "0.6364511", "0.63238037", "0.62901294", "0.6253662", "0.62463033", "0.624508", "0.6204462", "0.6198452", "0.6198452", "0.61811167", "0.6174914", "0.6163041", "0.6151766", "0.6147231", "0.6133947", "0.60912174", "0.6071731", "0.60696566", "0.6054436", "0.60463285", "0.60330665", "0.60213363", "0.60213363", "0.6017155", "0.59825283", "0.59763074", "0.5969955", "0.5969955", "0.59678435", "0.596004", "0.595392", "0.5943759", "0.5931562", "0.59295124", "0.59247833", "0.59237444", "0.59171695", "0.59169304", "0.5916226", "0.5903465", "0.5897899", "0.58902204", "0.58902204", "0.58902204", "0.5889679", "0.58847314", "0.58793694", "0.5874305", "0.58727044", "0.5870249", "0.5857602", "0.5856956", "0.5853532", "0.5850423", "0.58494127", "0.5848211", "0.58432674", "0.58417815", "0.58417815", "0.58417815", "0.5840477", "0.58390623", "0.5838034", "0.5830424", "0.58255076", "0.5825012", "0.5819198", "0.58173656", "0.5815668", "0.5815668", "0.58103424", "0.58102286", "0.58098453", "0.58083326", "0.5800024", "0.5797845", "0.5797845", "0.5797845", "0.5797845", "0.5797845", "0.5797845", "0.5797845" ]
0.67860687
2
Returns a to a list containing any errors collected during the reporting period.
def error_data(self): if not self.__settings: return [] return self.__transaction_errors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getErrorsList(self):\n return self.__errors", "def errors(self) -> List[Error]:", "def getErrors(self) -> java.util.Collection:\n ...", "def get_errors(self):\n return [result for result in self.values() if result.outcome == Result.ERROR]", "def errors(self) -> List[Error]:\n return self._errors_files + list(self._errors.values())", "def retrieve_error_messages(self):\n return self.errors_seen[:]", "def getParseErrors(self):\n return [x for x in self.xeps if x.parseErrors]", "def GetAll(self):\n return self._errors.copy()", "def errors(self):\n return self._errors", "def errors():\n return THE_LOGGER.errors", "def errors (self):\n return self._errors", "def errors (self):\n return self._errors", "def getErrors(self):\n return self.errors", "def get_reports(self):\r\n result = QtSql.QSqlQuery('''Select * FROM failures''')\r\n list = []\r\n while result.next():\r\n failure = Failure(unicode(result.value(0).toString()), # id\r\n unicode(result.value(1).toString()), # comment\r\n unicode(result.value(2).toString()), # indicator\r\n bool(result.value(3))) # release\r\n p = self.get_presentation(failure.talkId)\r\n r = Report(p, failure)\r\n list.append(r)\r\n return list", "def errors(self):\n return self.__errors", "def get_errors(self):\n df = self.get_events()\n return df[df.error.notnull()]", "def errors(self) -> List[Error]:\n # May have inherited errors with a different path.\n for error in self._errors.values():\n error.path = self.path\n if self.is_removed: # Mark all of our errors as non-persistant.\n error.is_persistant = False\n return list(self._errors.values())", "def all_errors(self) -> List[XMLSchemaParseError]:\n errors = []\n for comp in self.iter_components():\n if comp.errors:\n errors.extend(comp.errors)\n return errors", "def formatErrors(self):\n errorlist = []\n xepsWithErrors = sorted(\n set(self.getParseErrors() + self.getBuildErrors()),\n key=lambda x: str(x))\n if self.getErrors() or xepsWithErrors:\n if self.getErrors():\n errorlist.append(\"********** Read errors **********\")\n for error in self.getErrors():\n errorlist.append(error)\n for xep in xepsWithErrors:\n errorlist.append(\n \"********** Error report for {} **********\".format(str(xep)))\n if xep.parseErrors:\n errorlist.append(\"********** Parsing Errors **********\")\n errors = list(set(xep.parseErrors))\n for error in errors:\n errorlist.append(error)\n if xep.buildErrors:\n errorlist.append(\"********** Build Errors **********\")\n for error in xep.buildErrors:\n if len(error.splitlines()) > 4:\n error = ''.join(error.splitlines()[:4])\n errorlist.append(error)\n return '\\n'.join(errorlist)\n else:\n return None", "def Errors(self):\r\n\t\treturn self._get_attribute('errors')", "def error(self) -> list:\n return self.__err", "def getBuildErrors(self):\n return [x for x in self.xeps if x.buildErrors]", "def errors_fatal(self) -> List[Error]:", "def _filter_return_errors_list(self, url, har=None):\r\n if not har:\r\n har = self.har\r\n \r\n matches = []\r\n for entry in har[\"log\"][\"entries\"]:\r\n temp = entry[\"request\"][\"url\"].encode('ascii', 'ignore')\r\n if url in entry[\"request\"][\"url\"] and temp not in matches and entry[\"response\"][\"status\"] >= 400:\r\n print \"\\nRequest failed w/ \" + str(entry[\"response\"][\"status\"]) + \" error:\\n\" + entry[\"request\"][\"url\"]\r\n if entry[\"response\"][\"content\"].get(\"text\"):\r\n print \"RESPONSE: \" + str(entry[\"response\"][\"content\"][\"text\"].encode('ascii', 'ignore'))\r\n temp = entry[\"request\"][\"url\"].encode('ascii', 'ignore')\r\n matches.append([temp,entry[\"response\"][\"content\"].get(\"text\",\"\")])\r\n return matches", "def Errcheck(self) -> list:\n\n myError = []\n\n ErrorList = self.myFieldFox.query(\"SYST:ERR?\").split(',')\n\n Error = ErrorList[0]\n\n if int(Error) == 0:\n\n print (\"+0, No Error!\")\n\n else:\n\n while int(Error)!=0:\n\n print (\"Error #: \" + ErrorList[0])\n\n print (\"Error Description: \" + ErrorList[1])\n\n myError.append(ErrorList[0])\n\n myError.append(ErrorList[1])\n\n ErrorList = self.myFieldFox.query(\"SYST:ERR?\").split(',')\n\n Error = ErrorList[0]\n\n myError = list(myError)\n\n return myError", "def analysis_errors(self) -> str:\n errors = []\n\n # Get any callback errors\n for cid, callback in self._analysis_callbacks.items():\n if callback.status == AnalysisStatus.ERROR:\n errors.append(f\"\\n[Analysis Callback ID: {cid}]: {callback.error_msg}\")\n\n return \"\".join(errors)", "def check_errors(self):\n\n errors = []\n while True:\n err = self.values(\"SYST:ERR?\")\n if int(err[0]) != 0:\n errmsg = \"Agilent 5313xA: {0}: {1}\".format(err[0], err[1])\n log.error(errmsg + '\\n')\n errors.append(errmsg)\n else:\n break\n\n return errors", "def get_error(self) -> List[str]:\n return []", "def get_error(self) -> List[str]:\n return []", "def Errors(self):\n return self._get_attribute('errors')", "def get_validation_errors(self):\n return [err.to_dict() for err in self._schema.validator.validation_errors]", "def tasks_with_errors(self):\n errs = []\n while True:\n try:\n errs.append(self._errq.get_nowait())\n except Queue.Empty:\n break\n return errs", "def _get_errors(exc):\n if hasattr(exc, 'message'):\n errors = exc.messages\n else:\n errors = [str(exc)]\n return errors", "def _get_resends(self):\n if not self.has_error():\n return []\n\n errors = []\n i = 0\n for item in self.my_json['results']:\n if item.has_key('error') and item['error'] == 'Unavailable':\n errors.append((i, item['error']))\n i += 1\n return errors", "def refined_errors(self):\r\n errs = []\r\n for err in self.errors:\r\n if err['typo'].lower() not in self.terms:\r\n errs.append(err)\r\n return errs", "def flushErrors(exc_type: type) -> list[Exception]:\n # There is no public API for flushing logged errors if you're not\n # using one of trial's TestCase classes...\n from twisted.trial.runner import _logObserver # type: ignore[attr-defined]\n\n result = _logObserver.flushErrors(exc_type)\n assert isinstance(result, list)\n return result", "def errors(self):\n return self._properties.get(\"errors\")", "def report_transaction_error_messages(self):\n response = self.__get_transaction_response()\n\n # get response data from response object\n response_data = response.json()\n\n # get error messages\n response_error = response_data['Error']\n response_error_messages = response_error['messages']\n\n # add all error messages to the report\n error_messages_to_report = []\n for response_error_message in response_error_messages:\n error_description = response_error_message['description']\n error_messages_to_report.append(error_description)\n\n return error_messages_to_report", "def validation_errors(self):\n return self._validation_errors", "def date_errors(self):\r\n try:\r\n _date_errors = self._validate_date(self.sourceDateCol)\r\n return _date_errors\r\n except:\r\n return None", "def error_report():\n db, c = connect(DBNAME)\n c.execute(\"select to_char(time,'FMMonth DD, YYYY') as date, \"\n \"round((sum(case when status = '200 OK' \"\n \"then 0 else 1 end)::decimal / count(*)) * 100,2) \"\n \"as percent_error from log group by date \"\n \"having (sum(case when status = '200 OK' \"\n \"then 0 else 1 end)::decimal / count(*)) * 100 > 1\")\n error_table = c.fetchall()\n db.close()\n print \"\\nDates on Which Over 1% of Requests Led to Errors:\"\n for error in error_table:\n if __name__ == '__main__':\n print str(error[0]) + \" - \" + str(error[1]) + \"%\"", "def get_form_errors(form):\n all_errors = []\n for field in form.errors:\n all_errors += form.errors[field]\n return all_errors", "def get_all_failures(self):\n return self._get_filtered_results(success=False)", "def error_messages(self) -> List[str]:\n spatial_msgs = []\n temporal_msgs = []\n if self.spatial:\n spatial_msgs = [m for v, m in self.spatial_validations if not v(self.spatial)]\n if self.temporal:\n temporal_msgs = [m for v, m in self.temporal_validations if not v(self.temporal)]\n\n return spatial_msgs + temporal_msgs", "def errors(self) -> Tuple[MqexsErrorInfo, ...]:\n return self.__errors", "def get_error_messages(self):\n\n if len(self._sensor_results_list) == 0:\n return\n\n error_msgs = []\n\n for reading in self._sensor_results_list:\n if reading.is_error():\n error_msgs.append(reading.get_error_msg())\n\n if len(error_msgs) > 0:\n return error_msgs\n else:\n return \"No Error Readings\"", "def errors(self):\r\n if not hasattr(self, '_errors_cache'):\r\n self._errors_cache = self.form.get_field_errors(self)\r\n return self._errors_cache", "def get_diagnostics(self) -> List[Diagnostic]:\n raise NotImplementedError", "def _get_errors(sheet, row, col):\n field = _FIELDS['primary data']\n val = sheet.cell(row + field['row'], col + field['column']).value\n if not val:\n return []\n final_row = row + field['row']\n error = sheet.cell(final_row, col + field['column']).value\n while error:\n final_row += 1\n error = sheet.cell(final_row, col + field['column']).value\n return [sheet.cell(i, col + field['column']).value\n for i in range(row + field['row'], final_row)]", "def all_errata(self):\n return self._all_errata", "def get_errors(self, path: str,\n is_ancillary: bool = False,\n is_system: bool = False,\n is_removed: bool = False) -> List[str]:", "def output_errors(self) -> List[str]:\n output = list()\n for error in sorted(self.errors):\n if len(error) == 2:\n line = f\"{error[0]} - Could not find {error[1]} in map names!\"\n else:\n line = f\"{error[0]} - {error[1]}: Could not find {error[2]} in map names!\"\n logger.error(line)\n output.append(line)\n return output", "def _pydantic_errors_to_validation_results(\n errors: list[dict | Exception] | ValidationError,\n file_path: Path,\n scope: Scope,\n) -> list[ValidationResult]:\n out = []\n for e in (\n errors.errors() if isinstance(errors, ValidationError) else cast(list, errors)\n ):\n if isinstance(e, Exception):\n message = getattr(e, \"message\", str(e))\n id = \"exception\"\n scope = Scope.FILE\n else:\n id = \".\".join(\n filter(\n bool,\n (\n \"dandischema\",\n e.get(\"type\", \"UNKNOWN\"),\n \"+\".join(e.get(\"loc\", [])),\n ),\n )\n )\n message = e.get(\"message\", e.get(\"msg\", None))\n out.append(\n ValidationResult(\n origin=ValidationOrigin(\n name=\"dandischema\",\n version=dandischema.__version__,\n ),\n severity=Severity.ERROR,\n id=id,\n scope=scope,\n path=file_path,\n message=message,\n # TODO? dataset_path=dataset_path,\n # TODO? dandiset_path=dandiset_path,\n )\n )\n return out", "def get_errors(response):\n errors = response.get(\"error\")\n if errors:\n return [e.get(\"message\") for e in errors]\n return None", "def errors_fatal(self) -> List[Error]:\n return self._errors_fatal_files + self._errors_fatal", "def _get_retriable_errors(out: List[str]) -> List[str]:\n return [\n line for line in out\n if any(error in line for error in RETRIABLE_ERRORS)\n ]", "def get_errors(self, request):\n\n value = request._get_parameter_value(self)\n return value.errors", "def errors(self):\n raise NotImplementedError", "def recorded_messages(self):\n messages = []\n for time in sorted(self.reception_records):\n messages.extend(self.reception_records[time])\n return messages", "def warnings(self) -> List[Error]:", "def get_aggregated_exceptions(self) -> Payload:\n return Payload(aggregated_errors=list(self._aggregated_exceptions.values()))", "def get_errors(self, path: str,\n is_ancillary: bool = False,\n is_system: bool = False,\n is_removed: bool = False) -> List[str]:\n u_file = self.__api.files.get(path, is_ancillary=is_ancillary,\n is_system=is_system,\n is_removed=is_removed)\n return [e.message for e in u_file.errors]", "def check_set_errors(self):\n response = self.read()\n return [] if response == \"\" else [response]", "def getErrors(self):\n errorList = []\n\n # E0\n try:\n if not self.e0.isValid():\n errorList.append(\"Invalid first error axis in ErrorEllipse Class\")\n except (NameError, AttributeError):\n errorList.append(\"No first error axis in ErrorEllipse Class.\")\n\n # E1\n try:\n if not self.e1.isValid():\n errorList.append(\"Invalid second error axis in ErrorEllipse Class\")\n except (NameError, AttributeError):\n errorList.append(\"No second error axis in ErrorEllipse Class.\")\n\n # E2\n try:\n if not self.e2.isValid():\n errorList.append(\"Invalid third error axis in ErrorEllipse Class\")\n except (NameError, AttributeError):\n errorList.append(\"No third error axis in ErrorEllipse Class.\")\n\n # maximumHorizontalProjection\n try:\n self.maximumHorizontalProjection\n except (NameError, AttributeError):\n errorList.append(\"No MaximumHorizontalProjection in ErrorEllipse Class.\")\n\n # maximumVerticalProjection\n try:\n self.maximumVerticalProjection\n except (NameError, AttributeError):\n errorList.append(\"No MaximumVerticalProjection in ErrorEllipse Class\")\n\n # equivalentHorizontalRadius\n try:\n self.equivalentHorizontalRadius\n except (NameError, AttributeError):\n errorList.append(\"No EquivalentHorizontalRadius in ErrorEllipse class\")\n\n return errorList", "def error_wrapper(x):\n errors = list()\n for error_key, error_list in list(x.items()):\n for error in error_list:\n if error_key == 'non_field_errors':\n errors.append(error)\n else:\n errors.append(\"%s: %s\" % (error_key, error))\n return errors", "def err_rec(self):\n if len(self.dt_list) < self.err_len:\n self.err_len = len(self.dt_list)\n\n last_dt_sum = sum(self.dt_list[int(-1 * self.err_len):])\n last_dt = self.dt_list[-8]\n\n # Records the discrepancies\n self.lin_int_err.append(abs(self.lin_int_est_next[0] - last_dt_sum))\n self.lin_int_no_err.append(abs(self.lin_int_no_est_next[0] - last_dt_sum))\n self.average_int_err.append(abs(self.average_int_est_next[0] - last_dt_sum))\n self.average_int_no_err.append(abs(self.average_int_no_est_next[0] - last_dt_sum))\n\n self.lin_err.append(abs(self.lin_est_next[0] - last_dt_sum))\n self.lin_no_err.append(abs(self.lin_no_est_next[0] - last_dt_sum))\n self.average_err.append(abs(self.average_est_next[0] - last_dt_sum))\n self.average_no_err.append(abs(self.average_no_est_next[0] - last_dt_sum))\n\n # Removes the oldest entry\n self.lin_err.pop(0)\n self.average_err.pop(0)", "def getLogs():\n # in flux, it may be possible to provide more structured information\n # like python Failure instances", "def _filter_return_errors(self, url, har=None):\r\n if not har:\r\n har = self.har\r\n \r\n matches = []\r\n for entry in har[\"log\"][\"entries\"]:\r\n temp = entry[\"request\"][\"url\"].encode('ascii', 'ignore')\r\n if url in entry[\"request\"][\"url\"] and temp not in matches and entry[\"response\"][\"status\"] >= 400:\r\n print \"\\nRequest failed w/ \" + str(entry[\"response\"][\"status\"]) + \" error:\\n\" + entry[\"request\"][\"url\"]\r\n if entry[\"response\"][\"content\"].get(\"text\"):\r\n print \"RESPONSE: \" + str(entry[\"response\"][\"content\"][\"text\"].encode('ascii', 'ignore'))\r\n temp = entry[\"request\"][\"url\"].encode('ascii', 'ignore')\r\n matches.append(temp)\r\n return matches", "def get_error_days():\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n # dividing views of bad requests and total request to get percentage\n c.execute(\"select bad_request.time, \"\n \"(bad_request.num * 1.0 / total_request.num) as errors \"\n \"from bad_request, total_request \"\n \"where bad_request.time = total_request.time \"\n \"and (bad_request.num * 1.0 / total_request.num) > 0.01\")\n results = c.fetchall()\n text_file = open(\"text.txt\", \"a+\") # append to text file\n text_file.write(\"Day(s) where more than 1 percent of requests were errors:\"\n \"\\n\\n\")\n for time, errors in results:\n text_file.write(time.strftime('%B %d, %Y') + \" - \" +\n str(errors * 100)[:3] + \"% errors\\n\")\n text_file.write(\"\\n\")\n text_file.close()\n db.close()", "def getFatalErrors(self):\n global hadFatalErrors\n if hadFatalErrors:\n text = '\\n'.join(hadFatalErrors)\n hadFatalErrors = []\n return text", "def time_errors(self):\r\n try:\r\n _time_errors = self._validate_time(self.sourceTimeCol)\r\n return _time_errors\r\n except:\r\n return None", "def errors(self) -> pulumi.Output[Sequence['outputs.BatchAIErrorResponse']]:\n return pulumi.get(self, \"errors\")", "def filter_draft_errors(result):\n error_messages = []\n for field, msgs in result.get('messages', {}).items():\n if msgs.get('state', None) == 'error':\n for m in msgs['messages']:\n error_messages.append(dict(\n field=field,\n message=m,\n code=error_codes['validation_error'],\n ))\n return error_messages", "def _parse_file_errors(self, file):\n\n with open(file, 'r') as lines:\n errors = []\n\n for line in lines:\n if self.log_regexp.TRACEBACK.match(line):\n errors.append({})\n\n if self.log_regexp.ERROR_PLACE.match(line):\n code_line = re.search(self.log_regexp.ERROR_LINE, line)[0]\n error_file = re.search(self.log_regexp.ERROR_FILE, line)[0]\n errors[-1][self.output_params.LINE] = int(code_line.split()[1])\n errors[-1][self.output_params.ERROR_FILE] = error_file.strip('\"')\n\n if self.log_regexp.ERROR_TYPE.match(line):\n type_error = re.search(self.log_regexp.ERROR_TYPE, line)[0]\n message = line[len(type_error)+1:]\n errors[-1].update({self.output_params.TYPE: type_error})\n errors[-1].update({self.output_params.MESSAGE: message.strip()})\n\n return errors", "def failing_periods(self) -> 'outputs.DynamicThresholdFailingPeriodsResponse':\n return pulumi.get(self, \"failing_periods\")", "def error_dates():\n\n results = query_database(QUERIES[2])\n print('\\nOn which days did more than 1% of requests lead to errors?\\n')\n for date, rate in results:\n print(' * {} -- {:.2%}'.format(date, rate))", "def _step_errors(sequence: Sequence) -> List[TransformationMetrics]:\n errors = [TransformationMetrics.calc(t_pred, t_gt, time)\n for t_pred, t_gt, time in zip(sequence.prediction.transforms, sequence.ground_truth.transforms,\n sequence.times)]\n return errors", "def get_failed_spectra_list(diag_workspace):\n if type(diag_workspace) == str:\n diag_workspace = mtd[diag_workspace]\n\n if hasattr(diag_workspace, \"getAxis\") == False:\n raise ValueError(\"Invalid input to get_failed_spectra_list. \"\n \"A workspace handle or name is expected\")\n \n spectra_axis = diag_workspace.getAxis(1)\n failed_spectra = []\n for i in range(diag_workspace.getNumberHistograms()):\n try:\n det = diag_workspace.getDetector(i)\n except RuntimeError:\n continue\n if det.isMasked():\n failed_spectra.append(spectra_axis.spectraNumber(i))\n\n return failed_spectra", "def get_validation_errors(\n self,\n schema_version: Optional[str] = None,\n devel_debug: bool = False,\n ) -> list[ValidationResult]:\n ...", "def getFailedJobs(self):\n return self.__failedJobs", "def listErrors(individual): #TODO: add other errors implemented elsewhere\n\n results = [birthBeforeMarriage(individual), birthBeforeDeath(individual), marriageBeforeDeath(individual), \n datesBeforeCurrentDate(individual), noBigamy(individual)]\n results = [x for x in results if x is not None]\n return not all(results)", "def get_fails(self):\n return [result for result in self.values() if result.outcome == Result.FAIL]", "def parse(self, errors, explicit_ignore):\n\n error_list = []\n if errors is None:\n return error_list\n\n errors.sort(key=linter.cmp_to_key(lambda a, b: a.lineno < b.lineno))\n for error in errors:\n error_level = 'W' if not hasattr(error, 'level') else error.level\n message = error.message.capitalize()\n\n error_data = {\n 'underline_range': False,\n 'level': error_level,\n 'lineno': error.lineno,\n 'message': message,\n 'raw_error': str(error)\n }\n if hasattr(error, 'offset'):\n error_data['offset'] = error.offset\n elif hasattr(error, 'col'):\n error_data['offset'] = error.col\n\n if (isinstance(error, (linter.OffsetError))):\n error_data['underline_range'] = True\n error_list.append(error_data)\n elif (isinstance(\n error, (\n pyflakes.messages.RedefinedWhileUnused,\n pyflakes.messages.RedefinedInListComp,\n pyflakes.messages.UndefinedName,\n pyflakes.messages.UndefinedExport,\n pyflakes.messages.UndefinedLocal,\n pyflakes.messages.UnusedVariable)) and\n error.__class__.__name__ not in explicit_ignore):\n\n error_data['len'] = len(error.message_args[0])\n error_data['regex'] = (\n r'((and|or|not|if|elif|while|in)\\s+|[+\\-*^%%<>=\\(\\{{])*\\s'\n '*(?P<underline>[\\w\\.]*{0}[\\w]*)'.format(re.escape(\n error.message_args[0]\n ))\n )\n error_list.append(error_data)\n elif isinstance(error, pyflakes.messages.ImportShadowedByLoopVar):\n regex = 'for\\s+(?P<underline>[\\w]*{0}[\\w*])'.format(\n re.escape(error.message_args[0])\n )\n error_data['regex'] = regex\n error_list.append(error_data)\n elif (isinstance(\n error, (\n pyflakes.messages.UnusedImport,\n pyflakes.messages.ImportStarUsed)) and\n error.__class__.__name__ not in explicit_ignore):\n if isinstance(error, pyflakes.messages.ImportStarUsed):\n word = '*'\n else:\n word = error.message_args[0]\n\n linematch = '(from\\s+[\\w_\\.]+\\s+)?import\\s+(?P<match>[^#;]+)'\n r = '(^|\\s+|,\\s*|as\\s+)(?P<underline>[\\w]*{0}[\\w]*)'.format(\n re.escape(word)\n )\n error_data['regex'] = r\n error_data['linematch'] = linematch\n error_list.append(error_data)\n elif (isinstance(error, pyflakes.messages.DuplicateArgument) and\n error.__class__.__name__ not in explicit_ignore):\n regex = 'def [\\w_]+\\(.*?(?P<underline>[\\w]*{0}[\\w]*)'.format(\n re.escape(error.message_args[0])\n )\n error_data['regex'] = regex\n error_list.append(error_data)\n elif isinstance(error, pyflakes.messages.LateFutureImport):\n pass\n elif isinstance(error, linter.PythonError):\n print(error)\n else:\n print(\n 'Ooops, we missed an error type for pyflakes', type(error)\n )\n\n return error_list", "def failed_items(self) -> ItemLog:\n if self._failed_items is None:\n self._failed_items = ItemLog(self.dir / 'dropped-failed.log.csv', DROPPED_FAILED_FIELDNAMES, 'id')\n return self._failed_items", "def errors(self):\n _errors = {}\n # pylint: disable=no-member\n for name, field in self._fields.items():\n if field.errors:\n _errors[name] = field.errors.pop()\n\n return _errors", "def warnings(self) -> List[Error]:\n return self._get_warnings()", "def validation_errors_to_error_messages(validation_errors):\n errorMessages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n errorMessages.append(f\"{field} : {error}\")\n return errorMessages", "def validation_errors_to_error_messages(validation_errors):\n errorMessages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n errorMessages.append(f\"{field} : {error}\")\n return errorMessages", "def validation_errors_to_error_messages(validation_errors):\n errorMessages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n errorMessages.append(f\"{field} : {error}\")\n return errorMessages", "def validation_errors_to_error_messages(validation_errors):\n errorMessages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n errorMessages.append(f\"{field} : {error}\")\n return errorMessages", "def validation_errors_to_error_messages(validation_errors):\n errorMessages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n errorMessages.append(f\"{field} : {error}\")\n return errorMessages", "def getErrorReport(self):\n return self.sError;", "def getErrorReport(self):\n return self.sError;", "def job_errors(self) -> str:\n errors = []\n\n # Get any job errors\n for job in self._jobs.values():\n if job and job.status() == JobStatus.ERROR:\n if hasattr(job, \"error_message\"):\n error_msg = job.error_message()\n else:\n error_msg = \"\"\n errors.append(f\"\\n[Job ID: {job.job_id()}]: {error_msg}\")\n\n # Get any job futures errors:\n for jid, fut in self._job_futures.items():\n if fut and fut.done() and fut.exception():\n ex = fut.exception()\n errors.append(\n f\"[Job ID: {jid}]\"\n \"\\n\".join(traceback.format_exception(type(ex), ex, ex.__traceback__))\n )\n return \"\".join(errors)", "def get_alerts(self):\n url = 'http://www.p2000-online.net/p2000.php?%s&nofilterform=1'\n url = url % '&'.join(['%s=1' % x for x in self.regions])\n if self.verbose:\n print time.ctime(), url\n try:\n data = urllib.urlopen(url).read()\n except IOError:\n if self.verbose:\n import traceback\n traceback.print_exc()\n return []\n\n doc = soup(data)\n alerts = []\n table = doc.body('table', {'style': 'align:center'})[0]\n for tr in table('tr'):\n if tr.td.get('class', None) == 'DT':\n alerts.append(Alert(*[x.text for x in tr('td')]))\n else:\n recipient = tr('td')[-1].text\n if recipient != '&nbsp;':\n alerts[-1].recipients.append(recipient)\n return alerts", "def __call__(self, errors: List[float]) -> List[float]:", "def get_errors(self):\n errors = []\n\n if not self.title:\n msg = 'Title not found: {0}'.format(self.number)\n print(msg)\n errors.append(msg)\n\n if not self.ref:\n msg = 'Ref not found: {0}'.format(self.number)\n print(msg)\n errors.append(msg)\n\n chapter_index = int(self.number) - 1\n\n # get the expected number of frames for this chapter\n expected_frame_count = chapters_and_frames.frame_counts[chapter_index]\n\n for x in range(1, expected_frame_count + 1):\n\n # frame id is formatted like '01-01'\n frame_id = '{0}-{1}'.format(self.number.zfill(2), str(x).zfill(2))\n\n # get the next frame\n frame = next((f for f in self.frames if f['id'] == frame_id), None) # type: dict\n if not frame:\n msg = 'Frame not found: {0}'.format(frame_id)\n print(msg)\n errors.append(msg)\n else:\n # check the frame img and values\n if 'img' not in frame or not frame['img']:\n msg = 'Attribute \"img\" is missing for frame {0}'.format(frame_id)\n print(msg)\n errors.append(msg)\n\n if 'text' not in frame or not frame['text']:\n msg = 'Attribute \"text\" is missing for frame {0}'.format(frame_id)\n print(msg)\n errors.append(msg)\n\n return errors", "def errors(self) -> str:\n return self.job_errors() + self.analysis_errors()", "def errDays():\n c = db.cursor()\n c.execute(\"select date, percent from avg_error\\\n where percent > 1.00;\")\n results = c.fetchall()\n c.close()\n return results", "def json(self):\n d = [err.json for err in self.errors]\n return d" ]
[ "0.7021791", "0.70215106", "0.6847071", "0.6754863", "0.6702647", "0.6692272", "0.65891427", "0.65589136", "0.65341383", "0.65235007", "0.65032333", "0.65032333", "0.6477152", "0.6440303", "0.642983", "0.6413375", "0.6399626", "0.6374042", "0.6354887", "0.6273033", "0.62713027", "0.62526685", "0.62447596", "0.62249994", "0.6223605", "0.62194806", "0.620146", "0.6165684", "0.6165684", "0.6119376", "0.6090197", "0.60822266", "0.607846", "0.6057128", "0.6053458", "0.59767246", "0.591607", "0.5914257", "0.5898345", "0.58874625", "0.5881139", "0.58664495", "0.58615905", "0.5856585", "0.5831075", "0.5809601", "0.5794592", "0.57865477", "0.5765051", "0.57592994", "0.57558554", "0.5746094", "0.57423496", "0.57266444", "0.57260984", "0.5724483", "0.5722599", "0.5718919", "0.57043016", "0.56999606", "0.567887", "0.56754166", "0.5673889", "0.5672582", "0.5659258", "0.56456894", "0.5644008", "0.56153643", "0.5607474", "0.56057703", "0.5600659", "0.5588476", "0.5578138", "0.55776983", "0.55689037", "0.5560539", "0.5550898", "0.5548416", "0.55364776", "0.55314523", "0.5522481", "0.5513594", "0.55006075", "0.5498907", "0.5474333", "0.5464376", "0.54519504", "0.54519504", "0.54519504", "0.54519504", "0.54519504", "0.54490054", "0.54490054", "0.54457843", "0.5445546", "0.54447746", "0.5437848", "0.5430879", "0.5413174", "0.54126525" ]
0.59131587
38
Returns a list of slow transaction data collected during the reporting period.
def transaction_trace_data(self, connections): _logger.debug('Generating transaction trace data.') if not self.__settings: return [] # Create a set 'traces' that is a union of slow transaction, # and Synthetics transactions. This ensures we don't send # duplicates of a transaction. traces = set() if self.__slow_transaction: traces.add(self.__slow_transaction) traces.update(self.__synthetics_transactions) # Return an empty list if no transactions were captured. if not traces: return [] # We want to limit the number of explain plans we do across # these. So work out what were the slowest and tag them. # Later the explain plan will only be run on those which are # tagged. agent_limits = self.__settings.agent_limits explain_plan_limit = agent_limits.sql_explain_plans_per_harvest maximum_nodes = agent_limits.transaction_traces_nodes database_nodes = [] if explain_plan_limit != 0: for trace in traces: for node in trace.slow_sql: # Make sure we clear any flag for explain plans on # the nodes in case a transaction trace was merged # in from previous harvest period. node.generate_explain_plan = False # Node should be excluded if not for an operation # that we can't do an explain plan on. Also should # not be one which would not be included in the # transaction trace because limit was reached. if (node.node_count < maximum_nodes and node.connect_params and node.statement.operation in node.statement.database.explain_stmts): database_nodes.append(node) database_nodes = sorted(database_nodes, key=lambda x: x.duration)[-explain_plan_limit:] for node in database_nodes: node.generate_explain_plan = True else: for trace in traces: for node in trace.slow_sql: node.generate_explain_plan = True database_nodes.append(node) # Now generate the transaction traces. We need to cap the # number of nodes capture to the specified limit. trace_data = [] for trace in traces: transaction_trace = trace.transaction_trace( self, maximum_nodes, connections) data = [transaction_trace, list(trace.string_table.values())] if self.__settings.debug.log_transaction_trace_payload: _logger.debug('Encoding slow transaction data where ' 'payload=%r.', data) json_data = json_encode(data) level = self.__settings.agent_limits.data_compression_level level = level or zlib.Z_DEFAULT_COMPRESSION zlib_data = zlib.compress(six.b(json_data), level) pack_data = base64.standard_b64encode(zlib_data) if six.PY3: pack_data = pack_data.decode('Latin-1') root = transaction_trace.root if trace.record_tt: force_persist = True else: force_persist = False if trace.include_transaction_trace_request_uri: request_uri = trace.request_uri else: request_uri = None trace_data.append([transaction_trace.start_time, root.end_time - root.start_time, trace.path, request_uri, pack_data, trace.guid, None, force_persist, None, trace.synthetics_resource_id, ]) return trace_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def slow_transaction_data(self):\n\n # XXX This method no longer appears to be used. Being replaced\n # by the transaction_trace_data() method.\n\n if not self.__settings:\n return []\n\n if not self.__slow_transaction:\n return []\n\n maximum = self.__settings.agent_limits.transaction_traces_nodes\n\n transaction_trace = self.__slow_transaction.transaction_trace(\n self, maximum)\n\n data = [transaction_trace,\n list(self.__slow_transaction.string_table.values())]\n\n if self.__settings.debug.log_transaction_trace_payload:\n _logger.debug('Encoding slow transaction data where '\n 'payload=%r.', data)\n\n json_data = json_encode(data)\n\n level = self.__settings.agent_limits.data_compression_level\n level = level or zlib.Z_DEFAULT_COMPRESSION\n\n zlib_data = zlib.compress(six.b(json_data), level)\n\n pack_data = base64.standard_b64encode(zlib_data)\n\n if six.PY3:\n pack_data = pack_data.decode('Latin-1')\n\n root = transaction_trace.root\n\n trace_data = [[root.start_time,\n root.end_time - root.start_time,\n self.__slow_transaction.path,\n self.__slow_transaction.request_uri,\n pack_data]]\n\n return trace_data", "def return_trade_history(\n self,\n start: Timestamp,\n end: Timestamp,\n ) -> list[dict[str, Any]]:\n limit = 100\n data: list[dict[str, Any]] = []\n start_ms = start * 1000\n end_ms = end * 1000\n while True:\n new_data = self.api_query_list('/trades', {\n 'startTime': start_ms,\n 'endTime': end_ms,\n 'limit': limit,\n })\n results_length = len(new_data)\n if data == [] and results_length < limit:\n return new_data # simple case - only one query needed\n\n latest_ts_ms = start_ms\n # add results to data and prepare for next query\n existing_ids = {x['id'] for x in data}\n for trade in new_data:\n try:\n timestamp_ms = trade['createTime']\n latest_ts_ms = max(latest_ts_ms, timestamp_ms)\n # since we query again from last ts seen make sure no duplicates make it in\n if trade['id'] not in existing_ids:\n data.append(trade)\n except (DeserializationError, KeyError) as e:\n msg = str(e)\n if isinstance(e, KeyError):\n msg = f'Missing key entry for {msg}.'\n self.msg_aggregator.add_warning(\n 'Error deserializing a poloniex trade. Check the logs for details',\n )\n log.error(\n 'Error deserializing poloniex trade',\n trade=trade,\n error=msg,\n )\n continue\n\n if results_length < limit:\n break # last query has less than limit. We are done.\n\n # otherwise we query again from the last ts seen in the last result\n start_ms = latest_ts_ms\n continue\n\n return data", "def get_reports(self):\r\n return sorted(self._reports,\r\n key=lambda x: x['stats']['totalTimeMillis'],\r\n reverse=True)", "def get_reports(self):\n return sorted(self._reports,\n key=lambda x: x['stats']['totalTimeMillis'],\n reverse=True)", "def slow_queries(self):\n request = Request(method=\"get\", endpoint=\"/query/slow\")\n\n def response_handler(resp):\n if not resp.is_success:\n raise C8QLQueryListError(resp, request)\n return self._format_queries(resp.body)\n\n return self._execute(request, response_handler)", "def showTransactions(self):\n self.scanTransactions()\n txns = []\n\n # Summarize the stats\n for x in range(len(self._trans)):\n stats = self._trans[x]\n trans_time = 0\n remote_calls = 0\n for name, stat in stats:\n trans_time += stat.total_tt\n remote_calls += 1\n txns.append((x, trans_time, remote_calls))\n\n results = [\"TX#\\tTime\\tCalls\",\n \"=\" * 22]\n\n for item in txns:\n results.append(\"%3d\\t%4f\\t%5d\" % item)\n \n return \"\\n\".join(results)", "def fetch(self, daterange=(datetime.now() - timedelta(1), datetime.now())):\n cursor = self.conn.cursor()\n sql = 'SELECT measure_dt, ping, download, upload FROM speedlogs ' + \\\n ' WHERE measure_dt BETWEEN ? AND ?'\n cursor.execute(sql, daterange)\n return cursor.fetchall()", "def gather_data(self, *args, **kwargs):\n instrument_arg = kwargs.get('instrument', 'EUR_USD')\n granularity_arg = kwargs.get('granularity', 'M1')\n candle_format = kwargs.get('candleFormat', 'bidask')\n start_time = kwargs.get('start', '2014-10-01T00:00:00.000000Z')\n count_arg = kwargs.get('count', 5000)\n out_data = []\n data_complete = False\n while(not data_complete):\n response = self.oanda.get_history(instrument=instrument_arg,\n granularity=granularity_arg,\n candleFormat=candle_format,\n start=start_time,\n count=count_arg)\n raw_data = response['candles']\n if (len(out_data) == 0):\n out_data = out_data + raw_data\n elif (len(out_data) > 1):\n # raw_data[0] is already in out_data as raw_data[-1] from last\n # iteration\n out_data = out_data + raw_data[1:]\n start_time = raw_data[-1]['time']\n if (len(raw_data) < 5000):\n data_complete = True\n\n out_data = self._list_to_df(out_data)\n return out_data", "def get_gdax_historical_data():\n \n start = None\n while not start:\n start,end,tid = getStartAndEndHistoric()\n if not start:\n time.sleep(60)\n #Todo: change this to 1min\n firsttimestamp = start\n engine = sa.create_engine(sql_address)\n products = [\"LTC-USD\",\"LTC-BTC\",\"ETH-USD\",\"ETH-BTC\",\"BTC-USD\"]\n public_client = gdax.PublicClient()\n deltat = datetime.timedelta(seconds = 200)\n timewindows = []\n while end - start > datetime.timedelta(seconds=0):\n if start + deltat > end:\n endx = end\n else:\n endx = start + deltat\n timewindows.append([start,endx])\n start += deltat\n results = []\n total = len(timewindows)\n current_idx = 0\n timeold = time.time()\n numofqueries = 0\n engine = sa.create_engine(sql_address)\n Base.metadata.bind = engine\n DBSession = sa.orm.sessionmaker()\n DBSession.bind = engine\n session = DBSession()\n for startx,endx in timewindows:\n\n current_idx += 1\n for i in products:\n repeat = True\n while repeat:\n\n #delay if ratelimts are close\n if numofqueries < 3:\n while time.time() - timeold < 1:\n time.sleep(0.05)\n \n timeold = time.time()\n numofqueries = 0\n try:\n alist = public_client.get_product_historic_rates(i, start = startx, end = endx, granularity = 1)\n except:\n time.sleep(30)\n public_client = gdax.PublicClient()\n alist = public_client.get_product_historic_rates(i, start = startx, end = endx, granularity = 1)\n\n alist = public_client.get_product_historic_rates(i, start = startx, end = endx, granularity = 1)\n\n numofqueries += 1\n\n #rate limit exceeded has 'message' as dict.\n if not 'message' in alist:\n repeat = False\n for a in alist:\n a[0] = datetime.datetime.fromtimestamp(float(a[0]))\n tmp = i.split('-')\n d = dict(coin = tmp[0], currency = tmp[1], timestamp = a[0], low=a[1], high=a[2], open=a[3], close=a[4], volume=a[5])\n results.append(d)\n lasttimestamp = a[0]\n\n #upload with batch size of 10000\n if len(results) > 10000:\n engine.execute(\n GADXHistoricalDataOneSecondOHLC.__table__.insert(),\n results\n )\n results = []\n \n update = session.query(historicalDataProgramState).filter(sa.and_(historicalDataProgramState.transaction_id == tid,historicalDataProgramState.entry_type == 'update')).first()\n if update:\n update.end = lasttimestamp\n session.commit()\n else:\n new_update = historicalDataProgramState(entry_type = 'update',transaction_id = tid,start=firsttimestamp,end=lasttimestamp,platform='GDAX',status='incomplete')\n session.add(new_update)\n session.commit()\n if len(results) > 0:\n engine.execute(\n GADXHistoricalDataOneSecondOHLC.__table__.insert(),\n results\n )\n results = []\n \n update = session.query(historicalDataProgramState).filter(sa.and_(historicalDataProgramState.transaction_id == tid,historicalDataProgramState.entry_type == 'update')).first()\n if update:\n update.end = lasttimestamp\n session.commit()\n else:\n new_update = historicalDataProgramState(entry_type = 'update',transaction_id = tid,start=firsttimestamp,end=lasttimestamp,platform='GDAX',status='incomplete')\n session.add(new_update)\n session.commit()\n\n update = session.query(historicalDataProgramState).filter(sa.and_(historicalDataProgramState.transaction_id == tid,historicalDataProgramState.entry_type == 'update')).first()\n update.status='complete'\n order = session.query(historicalDataProgramState).filter(sa.and_(historicalDataProgramState.transaction_id == tid,historicalDataProgramState.entry_type == 'order')).first()\n order.status='complete'\n session.commit()", "def timings(self):\r\n return self._timings", "def _get_meas_times_from_db(self):\n meas_times = []\n if self._data['report_save_historical_instances_ind'] != 'Y':\n # for non historical reports take measurement time from saved dataset\n dataset = self._jfile.get_current_stored_dataset()\n try:\n meas_time = datetime.datetime.strptime(dataset['meas_time'], '%Y-%m-%d %H:%M:%S')\n except ValueError:\n raise Exception(\"Cannot unformat string %s to datetime\" % dataset['meas_time'])\n meas_times.append(meas_time)\n\n else:\n # for historical reports take measurement times from db datasets\n where_sql = ''\n where_sql_list = list()\n params = [self._id, self._segment_value_id]\n\n if self._process_dataset_ids:\n for dataset_id in self._process_dataset_ids:\n if type(dataset_id) == list:\n where_sql_list.append(\"(report_data_set_instance_id >= %s AND report_data_set_instance_id <= %s)\")\n if dataset_id[0] < dataset_id[1]:\n params.append(dataset_id[0])\n params.append(dataset_id[1])\n else:\n params.append(dataset_id[1])\n params.append(dataset_id[0])\n else:\n where_sql_list.append(\"report_data_set_instance_id = %s\")\n params.append(dataset_id)\n where_sql = ' AND (%s)' % ' OR '.join(where_sql_list)\n\n self._db.Query(\"\"\"SELECT measurement_time\n FROM report_data_set_instance\n WHERE\n `element_id`= %%s\n AND segment_value_id = %%s\n %s\n ORDER BY measurement_time ASC\"\"\" % where_sql, tuple(params))\n meas_times = [item['measurement_time'] for item in self._db.record]\n\n return meas_times", "def getTransferListSummary(self):\n p_ids_and_prices = {}\n players = self.getAllPlayerInfoTransferlist()\n\n # Get IDs of all players\n log_event(self.queue, \"Gathering player prices... \")\n for p in players:\n p_bidstatus = p[1]\n p_id = p[8]\n # removed Filter for unlisted / expired players\n if p_id not in p_ids_and_prices:\n p_sellprice = self.getPlayerSellPrice(p_id)\n # If sell price returns 0, need to fetch from Futbin\n if p_sellprice == 0:\n p_sellprice = self.getFutbinPrice_opentab(p_id)\n self.sleep_approx(5) # Delay iteration to not anger futbin\n # Add player ID and price to dict\n p_ids_and_prices[p_id] = p_sellprice\n\n for p_id in p_ids_and_prices:\n p_price = p_ids_and_prices[p_id]\n p_name = self.getPlayerCardName(p_id)\n log_event(self.queue, str(p_name) + \" - #\" +\n str(p_id) + \" Price \" + str(p_price))\n\n num_p_sold = 0\n num_p_expired = 0\n num_p_unlisted = 0\n num_p_listed = 0\n\n sold_p_value = 0\n expired_p_value = 0\n unlisted_p_value = 0\n listed_p_value = 0\n\n for p in players:\n p_bidstatus = p[1]\n p_id = p[8]\n p_soldprice = p[5] # is 0 if unlisted\n p_sellprice = int(p_ids_and_prices[p_id])\n\n if \"won\" in p_bidstatus:\n num_p_sold += 1\n sold_p_value += p_soldprice\n if \"expired\" in p_bidstatus:\n num_p_expired += 1\n expired_p_value += p_sellprice\n if (p_bidstatus == \"listFUTItem\"):\n num_p_unlisted += 1\n unlisted_p_value += p_sellprice\n if (p_bidstatus == \"listFUTItem has-auction-data\"):\n num_p_listed += 1\n listed_p_value += p_sellprice\n\n log_event(self.queue, \"Players sold: \" + str(num_p_sold))\n log_event(self.queue, \"Players expired: \" + str(num_p_expired))\n log_event(self.queue, \"Players listed: \" + str(num_p_listed))\n log_event(self.queue, \"Players unlisted: \" + str(num_p_unlisted))\n log_event(self.queue, \" - - - \")\n log_event(self.queue, \"Sold players value: \" + str(sold_p_value))\n log_event(self.queue, \"Expired players value: \" +\n str(expired_p_value))\n log_event(self.queue, \"Unlisted players value: \" +\n str(unlisted_p_value))\n log_event(self.queue, \"Listed players value: \" + str(listed_p_value))\n\n # TODO subtract bought price\n self.user_players_won += int(num_p_unlisted)\n self.p_ids_and_prices = p_ids_and_prices\n intel = [p_ids_and_prices, num_p_sold, num_p_expired, num_p_unlisted,\n num_p_listed, sold_p_value, expired_p_value, unlisted_p_value, listed_p_value]\n return intel", "def describe_slow_log_records(\n self,\n request: dds_20151201_models.DescribeSlowLogRecordsRequest,\n ) -> dds_20151201_models.DescribeSlowLogRecordsResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_slow_log_records_with_options(request, runtime)", "def time_list(self):\n return (self.N_T * (np.arange(self.N_itr) + 1) /\n self.N_itr * 1000 * self.DT)", "def get_new_data(self):\n\n # record bar parse performance\n self.logger.debug(\"Started parsing new ticks.\")\n start_parse = time.time()\n for exchange in self.exchanges:\n exchange.parse_ticks()\n end_parse = time.time()\n duration = round(end_parse - start_parse, 5)\n\n self.logger.debug(\n \"Parsed \" + str(self.total_instruments) +\n \" instruments' ticks in \" + str(duration) + \" seconds.\")\n self.track_tick_processing_performance(duration)\n\n # wrap new 1 min bars in market events\n new_market_events = []\n for exchange in self.exchanges:\n bars = exchange.get_new_bars()\n for symbol in exchange.get_symbols():\n for bar in bars[symbol]:\n event = MarketEvent(exchange.get_name(), bar)\n new_market_events.append(event)\n # add bars to save-to-db-later queue\n # TODO: store new bars concurrently with a processpool\n self.bars_save_to_db.put(event)\n return new_market_events", "def timeseries_report(self):\n report = pd.DataFrame(index=self.price.index)\n report.loc[:, \"FR Energy Throughput (kWh)\"] = self.ene_results['ene']\n report.loc[:, \"FR Energy Throughput Up (Charging) (kWh)\"] = self.variables['regu_c']*self.krd_avg*self.dt*self.storage.rte\n report.loc[:, \"FR Energy Throughput Up (Discharging) (kWh)\"] = self.variables['regu_d']*self.krd_avg*self.dt\n report.loc[:, \"FR Energy Throughput Down (Charging) (kWh)\"] = self.variables['regd_c']*self.krd_avg*self.dt*self.storage.rte\n report.loc[:, \"FR Energy Throughput Down (Discharging) (kWh)\"] = self.variables['regd_d']*self.krd_avg*self.dt\n report.loc[:, \"FR Energy Settlement Price Signal ($/kWh)\"] = self.price\n report.loc[:, 'Regulation Up (Charging) (kW)'] = self.variables['regu_c']\n report.loc[:, 'Regulation Up (Discharging) (kW)'] = self.variables['regu_d']\n report.loc[:, 'Regulation Down (Charging) (kW)'] = self.variables['regd_c']\n report.loc[:, 'Regulation Down (Discharging) (kW)'] = self.variables['regd_d']\n report.loc[:, \"Regulation Up Price Signal ($/kW)\"] = self.p_regu\n report.loc[:, \"Regulation Down Price Signal ($/kW)\"] = self.p_regd\n\n return report", "async def get_trades(self) -> List[TradeRequest]:\n data = j.dumps({\n 'startindex': 0,\n 'statustype': 'inbound'\n })\n r = await self.request.request(url='https://www.roblox.com/my/money.aspx/getmyitemtrades', data=data, method='POST')\n data = json.loads(r.json()['d'])[\"Data\"]\n trades = []\n for trade in data:\n t = json.loads(trade)\n trades.append(TradeRequest(self.request, t['Date'], t['Expires'], t['TradePartner'], t['TradePartnerID'], t['Status'], t['TradeSessionID']))\n return trades", "def tobs():\n temps = engine.execute(\"SELECT date, tobs FROM Measurement WHERE date BETWEEN '2016-08-23' AND '2017-08-23'\").fetchall()\n\n # Convert list of tuples into normal list\n temps_list = list(np.ravel(temps))\n\n return jsonify(temps_list)", "def get_pending_transactions():\n\n return History.get_pending().get()", "def get_all_coins_history(self, end_date=None, start_date=None, verbose=True):\n infos = []\n for coin in self.get_coins():\n if verbose:\n print(\"Collecting data for >> \" + coin)\n if start_date:\n start_date = start_date\n else:\n start_date = '20130428'\n if end_date:\n end_date = end_date\n else:\n now = str(datetime.now().date()).replace('-', '')\n end_date = now\n coin_url = self.coins[coin]\n coin_url = coin_url + '/historical-data/?start=' + start_date + '&end=' + end_date\n content = urlopen(coin_url).read()\n soup = BeautifulSoup(content, 'html.parser')\n results = soup.find_all(\"tr\", class_=\"text-right\")\n\n for result in results:\n date = result.find_all('td')[0].text\n\n open_val = result.find_all('td')[1].text\n if open_val == '-':\n open_val = None\n else:\n open_val = float(result.find_all('td')[1].text.replace(',', ''))\n\n high_val = result.find_all('td')[2].text\n if high_val == '-':\n high_val = None\n else:\n high_val = float(result.find_all('td')[2].text.replace(',', ''))\n\n low_val = result.find_all('td')[3].text\n if low_val == '-':\n low_val = None\n else:\n low_val = float(result.find_all('td')[3].text.replace(',', ''))\n\n close_val = result.find_all('td')[4].text\n if close_val == '-':\n close_val = None\n else:\n close_val = float(result.find_all('td')[4].text.replace(',', ''))\n\n volume = result.find_all('td')[5].text\n if volume == '-':\n volume = None\n else:\n volume = float(result.find_all('td')[5].text.replace(',', ''))\n\n market_cap = result.find_all('td')[6].text\n if market_cap == '-':\n market_cap = None\n else:\n market_cap = float(result.find_all('td')[6].text.replace(',', ''))\n temp = {\n \"coin\": coin, # soup.title.text.split()[0],\n \"date\": date,\n \"symbol\": soup.title.text.split()[1].replace('(', '').replace(')', ''),\n \"open_val\": open_val,\n \"high_val\": high_val,\n \"low_val\": low_val,\n \"close_val\": close_val,\n \"volume\": volume,\n \"market_cap\": market_cap\n }\n infos.append(temp)\n df_all = pd.DataFrame.from_dict(infos)\n df_all['middle_val'] = (df_all.high_val + df_all.low_val) / 2\n df_all['datetime'] = pd.to_datetime(df_all['date'])\n df_all = df_all.sort_values(by='datetime')\n self.coins_history = df_all", "def get_report_list(jtl_file):\n df = None\n try:\n df = pd.read_csv(jtl_file,\n low_memory=False,\n error_bad_lines=False,\n quoting=csv.QUOTE_NONE,\n encoding='utf-8')\n except Exception as e:\n err_msg = 'read jtl file error. detail:{e}'.format(e=e)\n LOGGER.error(err_msg)\n if df is None:\n return\n threads = int(jtl_file.split(os.sep)[-1].split('_')[0])\n success, elapsed, latency, sent_bytes, receive_bytes = [df.get(x) for x in\n ['success', 'elapsed', 'Latency', 'sentBytes', 'bytes']]\n samples = df.shape[0]\n error_count = success.value_counts().get(False)\n if not error_count:\n error_count = 0\n error_rate = str(float(error_count / samples) * 100) + '%'\n label = df.loc[0, 'label']\n start_time = df.iat[0, 0]\n end_time = df.iloc[-1, 0]\n last_req_time = df.iat[-1, 1]\n\n # 如果最后一行数据无效,则取上一行\n i = 1\n while not len(str(end_time)) == 13 and not re.findall('[\\d]{13}', str(end_time)):\n i += 1\n end_time = df.iloc[-i, 0]\n last_req_time = df.iat[-i, 1]\n samples -= 1\n\n if isinstance(start_time, str):\n start_time = int(start_time)\n if isinstance(end_time, str):\n end_time = int(end_time)\n\n local_start_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start_time / 1000))\n local_end_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end_time / 1000))\n\n durations = (end_time + last_req_time - start_time) / 1000\n throughput = samples / durations\n\n report_list = [label, local_start_time, local_end_time, durations, threads, throughput, error_rate,\n elapsed.min(), elapsed.max(), elapsed.mean(), samples, sent_bytes.mean(), receive_bytes.mean(),\n latency.mean(), latency.min(), latency.max()]\n\n return report_list", "def TOBS():\n session = Session(engine)\n # Query all passengers\n\n TOBS = session.query(Measurement.date,Measurement.tobs).filter(Measurement.date >= '2010-08-23').all()\n\n # Convert list of tuples into normal list\n all_TOBS = list(np.ravel(TOBS))\n\n return jsonify(all_TOBS)", "def get_trades_history(self, symbol, start_time, end_time, limit=1000):\n payload = {'symbol': symbol, 'start': start_time, 'end': end_time, 'limit': limit}\n return self.public_request('GET', '/api/v1/trades', **payload)", "def get_data(symbol_id='BTC', period_id='1DAY', request_limit=1000, tdelta=30):\n now = datetime.utcnow()\n month = timedelta(days=tdelta)\n past_month = (now - month).isoformat()\n\n parameters = {'symbol_id': symbol_id, 'period_id': period_id, 'time_start': past_month[:-3], 'limit':request_limit}\n response = requests.get(HISTORY_URL, params=parameters, headers=header)\n\n while response.status_code != 200:\n time.sleep(5)\n response = requests.get(HISTORY_URL, params=parameters, headers=header)\n \n data = response.json()\n \n # this is a commnet\n csv_headers = ['time_period_start', 'time_period_end', 'price_high', 'price_low', 'price_close', 'price_open', 'trades_count', \n 'volume_traded', 'time_open', 'time_close']\n\n\n with open(str(datafolder / f'{symbol_id}_{tdelta}_day.csv'), 'w', newline='') as f:\n writer = csv.DictWriter(f, csv_headers)\n writer.writeheader()\n for item in data:\n writer.writerow(item)", "def returnTradeHistory(self, time=1 * 60 * 60, limit=100):\n assert limit <= 100, \"'limit' has to be smaller than 100\"\n return self.dpay.rpc.get_trade_history(\n transactions.formatTimeFromNow(-time),\n transactions.formatTimeFromNow(),\n limit,\n api=\"market_history\"\n )", "def timeseries_report(self):\n try:\n n = self.n.value\n except AttributeError:\n n = self.n\n results = pd.DataFrame(index=self.variables.index)\n results['ICE Generation (kW)'] = self.variables['ice_gen']\n results['ICE On (y/n)'] = self.variables['on_ice']\n results['ICE P_min (kW)'] = self.p_min\n results['ICE Genset P_max (kW)'] = self.rated_power * n\n return results", "def read_daily_messages_report(self):\n from itertools import repeat\n\n self.ID_TWEET_ORANGE_FLOW = kpi_from_db_config.ID_TWEET_ORANGE_FLOW\n self.ID_PROCESSING_MESSAGES = kpi_from_db_config.ID_PROCESSING_MESSAGES\n self.ID_CANDIDATES_PROCESSED = kpi_from_db_config.ID_CANDIDATES_PROCESSED\n\n list_id = [self.ID_TWEET_ORANGE_FLOW, \n self.ID_PROCESSING_MESSAGES, \n self.ID_CANDIDATES_PROCESSED]\n len_need_list = [7, 8, 2]\n list_result = [[] for i in repeat(None,len(list_id))]\n\n for i in range(len(list_id)):\n self.cursor.execute('''\n SELECT value\n FROM public.kpi_report\n WHERE id = %s\n ORDER BY created_at DESC\n LIMIT %s\n ''', [list_id[i], len_need_list[i]])\n rows_count = self.cursor.rowcount\n\n if (rows_count == len_need_list[i]): # If rows_count as expected \n for doc in self.cursor:\n list_result[i].append(int(doc[0]))\n elif (rows_count > 0 and rows_count < len_need_list[i]):\n for doc in self.cursor:\n list_result[i].append(int(doc[0]))\n list_result[i] = list_result[i] + [0] * (len_need_list[i] - rows_count) \n else:\n list_result[i] = [0] * len_need_list[i]\n\n return list_result", "def get_tiingo_prices(tickers, start, end, api_key=None):\n\n all_results = []\n if api_key is None:\n api_key = os.getenv('TIINGO_API_KEY')\n # Sort tickers so that error logging can be used to identify progress\n tickers = sorted(tickers)\n\n for i, ticker in enumerate(tickers):\n try:\n df = web.DataReader(name=ticker,\n data_source='tiingo',\n start=start,\n end=end,\n api_key=api_key)\n df = df[['adjClose']]\n except KeyError as e:\n if e.args[0] == 'date':\n # Patch to handle issue in pandas_datareader\n # where empty results cause a KeyError\n print(f'Got empty df for i={i}, ticker={tickers[i]}')\n df = pd.DataFrame()\n except Exception as e:\n print('Received an unexpected error:', e)\n print(f'Only fetched up to {i-1} inclusive. Returning.')\n return pd.concat(all_results)\n\n if (i % 50 == 0) and i > 0:\n # Sleep to avoid timeouts. Empirically found 20s to be sufficient\n time.sleep(20)\n\n all_results.append(df)\n return pd.concat(all_results)", "def temps(): \n \n # Create session and save reference to table\n session = Session(engine)\n Measurement = Base.classes.measurement\n\n # Query\n tobs_query = session.query(Measurement.date, func.avg(Measurement.tobs).label('tobs'))\\\n .filter(Measurement.date >= '2016-08-23').group_by(Measurement.date)\n \n tobs_list = []\n for row in tobs_query:\n tobs_list.append(row._asdict())\n \n return jsonify(tobs_list)\n\n session.close()", "def tobs ():\n # Query the last 12 months \n # session.query(func.max (Measurement.date)).all()f \n # temperature observation data for this station \n\n last = session.query(func.max (Measurement.date)).all()\n prev = dt.date(last) - dt.timedelta(days=365)\n\n\n #make a query that goes back 12 months before that date\n results = session.query(Measurement.date, Measurement.tobs).\\\n filter(Measurement.date >= prev).all()\n\n all_Tobs = list(np.ravel(results))\n\n return jsonify(all_Tobs)", "def determineUnitHistory():\n\tunitTracker = Unitiser()\n\t\n\timport transactions\n\ttrades = transactions.allTransactions()\n\t\n\thistory = dict()\n\t\n\tfor date in timeline():\n\t\t#print(\"\\ntimelime:\", date.strftime('%Y-%m-%d %H:%M:%S'))\n\t\timport valuator\n\t\tvalue = valuator.getPortfolioValueAt(date)\n\t\tif date in trades:\n\t\t\tprior = getPortfolioBefore(date)\n\t\t\tprior_value = valuator.getPortfolioValueAt(date, portfolio = prior)\n\n\t\t\tinvested = Decimal('0.0')\n\t\t\tfor equity in trades[date]:\n\t\t\t\ttrade = trades[date][equity]\n\t\t\t\t#print(equity, trade)\n\t\t\t\tif trade['action'] == 'buy':\n\t\t\t\t\tinvested = invested + Decimal(trade['value'])\n\t\t\t\telif trade['action'] == 'sell':\n\t\t\t\t\tinvested = invested - Decimal(trade['value'])\n\n\t\t\tsince = getPortfolioAt(date)\n\t\t\tsince_value = valuator.getPortfolioValueAt(date, portfolio = since)\n\n\t\t\t#print(\"change amount is\", invested)\n\t\t\tif invested > 0:\n\t\t\t\tunitTracker.invest(invested, prior_value)\n\t\t\telif invested < 0:\n\t\t\t\tunitTracker.divest(abs(invested), prior_value)\n\n\t\thistory[date] = {\n\t\t\t 'date' : date,\n\t\t\t 'value' : value.quantize(TWOPLACES),\n\t\t\t 'units' : unitTracker.numberOfUnits().quantize(TWOPLACES),\n\t\t\t 'price' : unitTracker.pricePerUnit(value).quantize(TWOPLACES),\n\t\t\t 'invested' : unitTracker.invested\n\t\t\t }\n\t\n\treturn history", "def get_transactions(self):\n # open a cursor object\n cur = self.get_cursor()\n\n # get transactions from database\n cur.execute(\"SELECT * FROM transactions\")\n transactions_data = cur.fetchall()\n\n # convert into a dict of values.\n transactions_list = []\n [transactions_list.append({'transaction_id': transaction[0],\n 'date': transaction[1],\n 'payee_id': transaction[2],\n 'description': transaction[3],\n 'amount': transaction[4]})\n for transaction in transactions_data]\n\n # close the cursor\n self.close_cursor()\n\n return transactions_list", "def performance_history(self, request, pk=None, **kwargs):\n # Get the goal even though we don't need it (we could just use the pk)\n # so we can ensure we have permission to do so.\n goal = self.get_object()\n\n # - Get all the transaction with this goal involved that are of reason 'Execution'.\n # We want the volume, ticker id, date ordered by date. [(date, {ticker: vol}, ...]\n qs = Transaction.objects.filter(Q(to_goal=goal) | Q(from_goal=goal),\n reason=Transaction.REASON_EXECUTION).order_by('executed')\n txs = qs.values_list('execution_distribution__execution__executed',\n 'execution_distribution__execution__asset__id',\n 'execution_distribution__volume')\n ts = []\n entry = (None,)\n aids = set()\n # If there were no transactions, there can be no performance\n if len(txs) == 0:\n return Response([])\n\n # Because executions are stored with timezone, but other things are just as date, we need to make datetimes\n # naive before doing date arithmetic on them.\n bd = timezone.make_naive(txs[0][0]).date()\n ed = timezone.make_naive(timezone.now()).date()\n for tx in txs:\n aids.add(tx[1])\n txd = timezone.make_naive(tx[0]).date()\n if txd == entry[0]:\n entry[1][tx[1]] += tx[2]\n else:\n if entry[0] is not None:\n ts.append(entry)\n entry = (txd, defaultdict(int))\n entry[1][tx[1]] = tx[2]\n ts.append(entry)\n\n # - Get the time-series of prices for each instrument from the first transaction date until now.\n # Fill empty dates with previous value [(date, {ticker: price}, ...]\n pqs = DailyPrice.objects.filter(date__range=(bd, ed),\n instrument_content_type=ContentType.objects.get_for_model(Ticker).id,\n instrument_object_id__in=aids)\n prices = pqs.to_timeseries(fieldnames=['price', 'date', 'instrument_object_id'],\n index='date',\n storage='long',\n pivot_columns='instrument_object_id',\n values='price')\n # Remove negative prices and fill missing values\n # We replace negs with None so they are interpolated.\n prices[prices <= 0] = None\n prices = prices.reindex(pd.date_range(bd, ed), method='ffill').fillna(method='bfill')\n\n # For each day, calculate the performance\n piter = prices.itertuples()\n res = []\n # Process the first day - it's special\n row = next(piter)\n p_m1 = row[1:]\n vols_m1 = [0] * len(prices.columns)\n tidlocs = {tid: ix for ix, tid in enumerate(prices.columns)}\n for tid, vd in ts.pop(0)[1].items():\n vols_m1[tidlocs[tid]] += vd\n res.append((dt2ed(row[0]), 0)) # First day has no performance as there wasn't a move\n # Process the rest\n for row in piter:\n # row[0] (a datetime) is a naive timestamp, so we don't need to convert it\n if ts and row[0].date() == ts[0][0]:\n vols = vols_m1.copy()\n dtrans = ts.pop(0)[1] # The transactions for the current processed day.\n for tid, vd in dtrans.items():\n vols[tidlocs[tid]] += vd\n # The exposed assets for the day. These are the assets we know for sure were exposed for the move.\n pvol = list(map(min, vols, vols_m1))\n else:\n vols = vols_m1\n pvol = vols\n pdelta = list(map(operator.sub, row[1:], p_m1)) # The change in price from yesterday\n impact = sum(map(operator.mul, pvol, pdelta)) # The total portfolio impact due to price moves for exposed assets.\n b_m1 = sum(map(operator.mul, pvol, p_m1)) # The total portfolio value yesterday for the exposed assets.\n perf = 0 if b_m1 == 0 else impact / b_m1\n # row[0] (a datetime) is a naive timestamp, so we don't need to convert it\n res.append((dt2ed(row[0]), decimal.Decimal.from_float(perf).quantize(decimal.Decimal('1.000000'))))\n p_m1 = row[1:]\n vols_m1 = vols[:]\n\n return Response(res)", "def get_stock_price(df_excld):\n\n ts = TimeSeries(os.environ['ALPHA_VANTAGE_KEY'])\n\n info = []\n symbols = []\n counter = 0\n\n for t in df_excld['Ticker']:\n\n if counter % 5 == 0:\n time.sleep(65)\n\n i, m = ts.get_daily(symbol=t, outputsize='full')\n info.append(i)\n symbols.append(m['2. Symbol'])\n counter += 1\n\n return info, symbols", "async def get_blacklist_hist(self, search_time, limit=1000):\n\n start = search_time[0][0]\n end = search_time[0][1]\n\n url = f'https://{self.__api}/v3/blacklist/history'\n continuation = None\n full_resp = {}\n flag = True\n body = {\"filter[clientid]\": self.clientid, \"filter[start_time]\": start, \"filter[end_time]\": end,\n \"limit\": limit, \"continuation\": continuation}\n while True:\n with requests.get(url, params=body,\n headers={'X-WallarmAPI-UUID': self.__uuid,\n 'X-WallarmAPI-Secret': self.__secret}) as response:\n if response.status not in [200, 201, 202, 204, 304]:\n raise NonSuccessResponse(response.status, await response.text)\n continuation = response.json().get('body').get('continuation')\n\n if flag:\n full_resp = response.json()\n\n if continuation is not None:\n body['continuation'] = continuation\n if not flag:\n full_resp['body']['objects'].extend(response.json().get('body').get('objects'))\n else:\n break\n flag = False\n return full_resp", "async def test_all_transactions(self):\n response = await self.collect(get_request_text=self.GATLING_LOG)\n self.assert_measurement(response, value=\"2\")", "def slow_update_duration(self):\n for i in range(len(self.data_file.sorted_data)):\n if self.data_file.sorted_data[i]['type'] == 'slow':\n slow_upd = self.data_file.sorted_data[i]['timestamp']\n Config.ANALYSIS.write(f\"slow at: {slow_upd}\\n\")\n if i == 0:\n after_slow = self.data_file.sorted_data[i + 1]['timestamp']\n Config.ANALYSIS.write(f\"after slow: ({slow_upd}, {after_slow}) \"\n f\"= {after_slow - slow_upd}\\n\\n\")\n elif i == len(self.data_file.sorted_data) - 1:\n before_slow = self.data_file.sorted_data[i - 1]['timestamp']\n Config.ANALYSIS.write(f\"before slow: ({before_slow}, {slow_upd}) \"\n f\"= {slow_upd - before_slow}\\n\\n\")\n else:\n before_slow = self.data_file.sorted_data[i - 1]['timestamp']\n after_slow = self.data_file.sorted_data[i + 1]['timestamp']\n Config.ANALYSIS.write(f\"before slow: ({before_slow}, {slow_upd}) \"\n f\"= {slow_upd - before_slow}\\n\")\n Config.ANALYSIS.write(f\"after slow: ({slow_upd}, {after_slow}) \"\n f\"= {after_slow - slow_upd}\\n\\n\")\n Config.ANALYSIS.write(\"\\n\\n\")", "def _get_meas_times(self, last_meas_time):\n meas_times = list()\n data = None\n \n if self._process_type == 'soft_gen':\n meas_times = self._get_meas_times_from_db()\n else:\n if self._data['data_fetch_method'] == 'sql':\n # get from outer sql db\n data = self._get_meas_times_sql(last_meas_time)\n elif self._data['data_fetch_method'] == 'web service':\n # get from web service\n data = self._get_meas_times_web_service(last_meas_time)\n\n\n if data:\n clear_data = [row[0] for row in data['data']]\n # check if we have values in list of datetime type\n if clear_data:\n if type(clear_data[0]) == datetime.datetime:\n meas_times = clear_data\n else:\n # it's a date type\n meas_times = [datetime.datetime.combine(d, datetime.time.min) for d in clear_data]\n\n \n\n\n # sort measurement times if they weren't sorted before\n meas_times.sort()\n # if do not save history, take only last element\n if self._data['report_save_historical_instances_ind'] != 'Y':\n if len(meas_times) > 1:\n del meas_times[:-1]\n \n return meas_times", "def tobs():\n # calculate year ago from latest date in database\n latest_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first()\n year_ago = dt.datetime.strptime(latest_date[0], \"%Y-%m-%d\") - dt.timedelta(days=366)\n\n # retrieve temp observations and convert to list\n temps = list(session.query(Measurement.date, Measurement.tobs).filter(Measurement.date >= year_ago).all())\n return jsonify(temps)", "def tobs():\n # Query all all dates and temperature observations from the last year\n tobs_list = session.query(Measurement.date, Measurement.tobs).filter(Measurement.date >= \"2016-08-23\").filter(Measurement.station == 'USC00519281').order_by(Measurement.date).all()\n # Convert list of tuples into normal list\n all_tobs = list(np.ravel(tobs_list))\n\n return jsonify(all_tobs)", "def aggregate_timetable_details(logger):\n df_array = []\n for page in range(0, MAX_PAGE+1): # MAX_PAGE inclusive\n filename = dirname + 'TimeTablePage_{}'.format(page) + '.csv'\n if os.exists(filename):\n # When database is ready, this info can be recorded there\n logger.info('File[{file}] already downloaded. Reading it'.format(file=filename))\n url = filename\n else:\n count = page * 20\n url = TIMETABLE_URL + '&count={count}&page={page}'.format(count=count, page=page)\n logger.info('Fetch TimeTableDetails from URL[{url}] page[{page}] into {directory}'.format(url=url, page=page, directory=dirname))\n \n try:\n df = pd.read_html(url)[1] # the table of interest from the list\n except Exception as e:\n logger.error('Exception when reading HTML:[{exception}]'.format(exception=e))\n exit(0)\n\n logger.info('Writing to [{file}]'.format(file=filename))\n df.to_csv(filename, index=False)\n df_array.append(df)\n\n df = pd.concat(df_array)\n print(df.head())\n print(df.tail())\n\n return 0", "def temporal_database():\n return TimeHistory()", "def trades_for(self, symbol, from_id=None, timestamp=None, limit=None):\n\t\tif self._session:\n\t\t\tdata = {'symbol': symbol}\n\n\t\t\tif from_id:\n\t\t\t\tdata['fromId'] = from_id\n\n\t\t\tif timestamp:\n\t\t\t\tdata['startTime'] = int(timestamp * 1000.0)\n\n\t\t\tif limit:\n\t\t\t\tdata['limit'] = limit\n\n\t\t\treturn self._session.get_my_trades(**data)\n\n\t\treturn []", "def get_chartdata():\n callback = bottle.request.query.get('callback')\n y_axis = bottle.request.query.get('y_axis').strip()\n w_acts = [\"action='%s'\" % act for act in bottle.request.query.get('actions').strip().split(',')]\n w_acts = 'AND (%s)' % ' OR '.join(w_acts) if w_acts else ''\n f_value = 'AVG(latency)' if y_axis.startswith('avg') else 'COUNT(timestamp)'\n atomic = 1 if y_axis in ['aops', 'avgl'] else 0\n\n db_conn = tools.get_db_conn('%s.db' % bottle.request.query.test_run_id)\n sql = 'SELECT test_run_status, timestamp_started, timestamp_completed FROM info LIMIT 1'\n status, started, finished = tools.db_query(db_conn, sql)[1][0]\n progress = int(float(finished) - float(started)) if finished \\\n else int(tools.get_timestamp() - float(started))\n\n sql = 'SELECT substr(timestamp, 0, 11), code, %s FROM recs ' % f_value + \\\n 'WHERE atomic=%s %s GROUP BY code, substr(timestamp, 0, 11) ' % (atomic, w_acts) + \\\n 'ORDER BY id DESC LIMIT 3600' # last 1 hour activity\n\n result = tools.db_query(db_conn, sql)[1] if finished else tools.db_query(db_conn, sql)[1][:-1]\n result = list(reversed(result))\n results = {str(abs(int(item[0]) - int(float(started)))):\n {'failed': 0, 'passed': 0, 'incomplete': 0} for item in result}\n for item in result: # item[0] - timestamp, item[1] - code (None if incomplete), item[2] - value\n timestamp = str(int(item[0]) - int(float(started)))\n value = item[2] or 0\n results[timestamp]['failed'] += value if item[1] and item[1] != 200 else 0\n results[timestamp]['passed'] += value if item[1] == 200 else 0\n results[timestamp]['incomplete'] += value if item[1] == None else 0\n results = [{'timestamp': key, 'failed': value['failed'], 'passed': value['passed'],\n 'incomplete': value['incomplete']} for key, value in results.items()]\n result = {bottle.request.query.slave: results, 'status': status,\n 'started': started, 'finished': finished or '(not finished)', 'progress': progress}\n return '{0}({1})'.format(callback, result)", "def tobs():\n\n # Query date and tobs values from a year ago\n one_year_from_last_results = session.query(Measurement.date, Measurement.tobs)\\\n .filter(Measurement.date >= '2016-08-23').all()\n\n # Create a all_last_year_tobs dictionary and adding a list of date and tob values\n all_last_year_tobs = []\n for result in one_year_from_last_results:\n tobs_result_dict = {}\n tobs_result_dict['date'] = result.date\n tobs_result_dict['tobs'] = result.tobs\n all_last_year_tobs.append(tobs_result_dict)\n\n return jsonify(all_last_year_tobs)", "def get_all_files_to_instrument_for_live_session():\n sql=\"SELECT * FROM files WHERE should_instrument=1 AND is_history=0\"\n conn=sqlite3.connect(CONNECTION_STRING)\n c=conn.cursor()\n c.execute(sql)\n results=c.fetchall()\n conn.close()\n return results", "def get_meter_data_for_time_slice(apt_no, start_time, end_time):\n if apt_no in ['102A', 102]:\n apt_no = '102A'\n\n logger.debug(\"sMap: Getting meter data for %s between %s and %s\", apt_no, start_time, end_time)\n\n query = (\"select data in ('\" + str(start_time) + \"','\" + str(end_time) + \"') \"\n \"limit 200000 \"\n \"where Metadata/LoadLocation/FlatNumber ='\" + str(apt_no) + \"' and \"\n \"Metadata/Extra/PhysicalParameter='Power'\")\n\n r = requests.post(url, data=query)\n # logger.debug (\"%s\",r)\n payload = r.json()\n # logger.debug(\"Payload:%s\", payload)\n\n if apt_no in ['102A', 102]:\n apt_no = 102\n meters = retrieve_meter_info(apt_no)\n logger.debug(\"Meters: %s\", meters)\n\n streams = []\n meter_type = []\n l_meters = range(0, len(meters))\n for i in l_meters:\n uuid = payload[i]['uuid']\n\n # Get meter type based on uuid\n for meter in meters:\n if meter['uuid'] == uuid:\n m_type = meter['type']\n # logger.debug (uuid, m_type)\n\n meter_type.append(m_type)\n streams.append(np.array(payload[i]['Readings']))\n # logger.debug(\"Streams: %s\", streams)\n\n if len(streams[0]) > 0:\n\n df = [pd.DataFrame({'time': readings[:, 0] / 1000, 'power': readings[:, 1],\n 'type': [meter_type[i]] * len(readings)},\n columns=['time', 'power', 'type']) for i, readings in enumerate(streams)]\n else:\n df = []\n\n return df", "def get_market_inf_more_data(start=None, end=None, index=None, retry_count=3, pause=0.001):\n # data to be sent to post request\n data = {'startDate': start,\n 'endDate': end,\n 'searchRecentMarket': 'Search Recent Market'}\n\n for _ in range(retry_count):\n time.sleep(pause)\n try:\n r = requests.post(\n url=vs.DSE_URL+vs.DSE_MARKET_INF_MORE_URL, data=data)\n except Exception as e:\n print(e)\n else:\n #soup = BeautifulSoup(r.text, 'html.parser')\n soup = BeautifulSoup(r.content, 'html5lib')\n\n quotes = [] # a list to store quotes\n\n table = soup.find('table', attrs={\n 'class': 'table table-bordered background-white text-center'})\n\n for row in table.find_all('tr')[1:]:\n cols = row.find_all('td')\n quotes.append({'Date': cols[0].text.strip().replace(\",\", \"\"),\n 'Total Trade': int(cols[1].text.strip().replace(\",\", \"\")),\n 'Total Volume': int(cols[2].text.strip().replace(\",\", \"\")),\n 'Total Value in Taka(mn)': float(cols[3].text.strip().replace(\",\", \"\")),\n 'Total Market Cap. in Taka(mn)': float(cols[4].text.strip().replace(\",\", \"\")),\n 'DSEX Index': float(cols[5].text.strip().replace(\",\", \"\")),\n 'DSES Index': float(cols[6].text.strip().replace(\",\", \"\")),\n 'DS30 Index': float(cols[7].text.strip().replace(\",\", \"\")),\n 'DGEN Index': float(cols[8].text.strip().replace(\"-\", \"0\"))\n })\n df = pd.DataFrame(quotes)\n if 'date' in df.columns:\n if (index == 'date'):\n df = df.set_index('date')\n df = df.sort_index(ascending=True)\n df = df.sort_index(ascending=True)\n else:\n print('No data found')\n return df", "def _historical_user_data_for_decisions(user: User, days: int) -> List:\n data = []\n for begin, end in _time_range_list(days):\n db_clicks_statements = DBDiscussionSession.query(ClickedStatement).filter(\n ClickedStatement.author_uid == user.uid,\n ClickedStatement.timestamp >= begin,\n ClickedStatement.timestamp < end).all()\n db_clicks_arguments = DBDiscussionSession.query(ClickedArgument).filter(\n ClickedArgument.author_uid == user.uid,\n ClickedArgument.timestamp >= begin,\n ClickedArgument.timestamp < end).all()\n clicks = len(db_clicks_statements) + len(db_clicks_arguments)\n data.append(clicks)\n\n return data", "def get_timeseries_data(self, table, datetime_start, datetime_end, timechunk=datetime.timedelta(hours=1)):\n table_schema = LMTDB_TABLES.get(table.upper())\n if table_schema is None:\n raise KeyError(\"Table '%s' is not valid\" % table)\n else:\n result_columns = ['TIMESTAMP'] + table_schema['columns']\n format_dict = {\n 'schema': ', '.join(result_columns).replace(\"TS_ID,\", \"TIMESTAMP_INFO.TS_ID,\"),\n 'table': table,\n }\n\n index0 = len(self.saved_results.get(table, {'rows': []})['rows'])\n chunk_start = datetime_start\n while chunk_start < datetime_end:\n if timechunk is None:\n chunk_end = datetime_end\n else:\n chunk_end = chunk_start + timechunk\n if chunk_end > datetime_end:\n chunk_end = datetime_end\n start_stamp = chunk_start.strftime(\"%Y-%m-%d %H:%M:%S\")\n end_stamp = chunk_end.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n query_str = \"\"\"SELECT\n %(schema)s\n FROM\n %(table)s\n INNER JOIN TIMESTAMP_INFO ON TIMESTAMP_INFO.TS_ID = %(table)s.TS_ID\n WHERE\n TIMESTAMP_INFO.TIMESTAMP >= %%(ps)s\n AND TIMESTAMP_INFO.TIMESTAMP < %%(ps)s\n \"\"\" % format_dict\n self.query(query_str, (start_stamp, end_stamp), table=table, table_schema=table_schema)\n if timechunk is not None:\n chunk_start += timechunk\n\n return self.saved_results[table]['rows'][index0:], result_columns", "def returnOrderTrades(self, order_number):", "def temp_monthly():\n # Calculate the date 1 year ago from last date in database\n prev_year = dt.date(2017, 8, 23) - dt.timedelta(days=365)\n results = session.query(Measurement.tobs).\\\n filter(Measurement.station == 'USC00519281').\\\n filter(Measurement.date >= prev_year).all()\n # Unravel results into a ID array and convert to a list\n temps = list(np.ravel(results))\n \n # Return the results\n return jsonify(temps)", "def probe_times(self):\r\n probe_times = []\r\n for probe in self.__probes.values():\r\n if probe.complete():\r\n if probe.round_trip_time() > 20:\r\n \"Long probe: %s \" %self.__id\r\n probe_times.append(probe.round_trip_time())\r\n return probe_times", "def transaction_data(self):\n return list(map(lambda transaction:transaction.to_json(), self.transaction_map.values()))", "def get_latest_transactions(self):\n first_run = False\n if not self._transactions:\n first_run = True\n transactions = []\n for account in self.accounts:\n self._logger.debug('Getting transactions for account \"%s\"', account.ynab_account.name)\n for transaction in account.get_latest_transactions():\n if not self._filter_transaction(transaction):\n transactions.append(transaction)\n self._logger.debug('Caching %s transactions', len(transactions))\n self._transactions.extend(transactions)\n if first_run:\n self._logger.info('First run detected, discarding transactions until now')\n return []\n return transactions", "def temp():\n \n #Query temp from a year from last data point\n query = session.query(func.max(Measurement.date)).first()\n maxDate = dt.datetime.strptime(query[0],'%Y-%m-%d')\n year_ago = maxDate - dt.timedelta(days=365)\n\n results = session.query(Measurement.date,Measurement.tobs).filter(Measurement.date>=year_ago).all()\n\n # convert list of tuples into normal list\n all_dates = (result[0] for result in results)\n all_tobs = (result[1] for result in results)\n\n # Convert result to dictionary using date as key and tobs as value\n tobs_dict = dict(zip(all_dates,all_tobs))\n\n # Return on webpage\n return jsonify(tobs_dict)", "def get_timeseries():\n\n # generate the result files\n name = os.path.join(project.output_folder, project.scenario+'.sww')\n log.debug('get_timeseries: input SWW file=%s' % name)\n log.debug('get_timeseries: gauge file=%s' % project.gauge_file)\n anuga.sww2csv_gauges(name, project.gauge_file, quantities=project.layers_list,\n verbose=False)\n\n # since ANUGA code doesn't return a list of generated files,\n # look in output directory for 'gauge_*.csv' files.\n glob_mask = os.path.join(project.output_folder, 'gauge_*.csv')\n return glob.glob(glob_mask)", "def report_data(self):\n report = [donor_obj.data for donor_obj in self.donor_list]\n return report", "def hourly_data():\n return generate_df_for_tests(freq=\"H\", periods=24 * 500)", "def get_target_timestamps(self):\n times=[]\n curr = self.begin_ts\n while curr<=self.end_ts:\n times.append(curr)\n curr = curr + 24 * 60 * 60\n return times", "def spending_over_time_test_data():\n for i in range(30):\n # Define some values that are calculated and used multiple times\n transaction_id = i\n award_id = i + 1000\n awarding_agency_id = i + 2000\n toptier_awarding_agency_id = i + 3000\n subtier_awarding_agency_id = i + 4000\n funding_agency_id = i + 5000\n toptier_funding_agency_id = i + 6000\n subtier_funding_agency_id = i + 7000\n federal_action_obligation = i + 8000\n total_obligation = i + 9000\n federal_account_id = i + 10000\n treasury_account_id = i + 11000\n\n action_date = f\"20{i % 10 + 10}-{i % 9 + 1}-{i % 28 + 1}\"\n action_date_obj = datetime.datetime.strptime(action_date, \"%Y-%m-%d\")\n fiscal_month = generate_fiscal_month(action_date_obj)\n fiscal_year = generate_fiscal_year(action_date_obj)\n fiscal_action_date = f\"{fiscal_year}-{fiscal_month}-{i % 28 + 1}\"\n contract_award_type = [\"A\", \"B\", \"C\", \"D\"][i % 4]\n grant_award_type = [\"02\", \"03\", \"04\", \"05\"][i % 4]\n is_fpds = i % 2 == 0\n\n # Award\n baker.make(\n \"search.AwardSearch\",\n award_id=award_id,\n fain=f\"fain_{transaction_id}\" if not is_fpds else None,\n is_fpds=is_fpds,\n latest_transaction_id=transaction_id,\n piid=f\"piid_{transaction_id}\" if is_fpds else None,\n total_obligation=total_obligation,\n type=contract_award_type if is_fpds else grant_award_type,\n action_date=\"2020-01-01\",\n )\n\n # Federal, Treasury, and Financial Accounts\n baker.make(\n \"accounts.FederalAccount\",\n id=federal_account_id,\n parent_toptier_agency_id=toptier_awarding_agency_id,\n account_title=f\"federal_account_title_{transaction_id}\",\n federal_account_code=f\"federal_account_code_{transaction_id}\",\n )\n baker.make(\n \"accounts.TreasuryAppropriationAccount\",\n agency_id=f\"taa_aid_{transaction_id}\",\n allocation_transfer_agency_id=f\"taa_ata_{transaction_id}\",\n availability_type_code=f\"taa_a_{transaction_id}\",\n beginning_period_of_availability=f\"taa_bpoa_{transaction_id}\",\n ending_period_of_availability=f\"taa_epoa_{transaction_id}\",\n federal_account_id=federal_account_id,\n main_account_code=f\"taa_main_{transaction_id}\",\n sub_account_code=f\"taa_sub_{transaction_id}\",\n treasury_account_identifier=treasury_account_id,\n )\n tas_components = [\n f\"aid=taa_aid_{transaction_id}\"\n f\"main=taa_main_{transaction_id}\"\n f\"ata=taa_ata_{transaction_id}\"\n f\"sub=taa_sub_{transaction_id}\"\n f\"bpoa=taa_bpoa_{transaction_id}\"\n f\"epoa=taa_epoa_{transaction_id}\"\n f\"a=taa_a_{transaction_id}\"\n ]\n baker.make(\"awards.FinancialAccountsByAwards\", award_id=award_id, treasury_account_id=treasury_account_id)\n\n # Awarding Agency\n baker.make(\n \"references.Agency\",\n id=awarding_agency_id,\n subtier_agency_id=subtier_awarding_agency_id,\n toptier_agency_id=toptier_awarding_agency_id,\n )\n baker.make(\n \"references.ToptierAgency\",\n abbreviation=f\"toptier_awarding_agency_abbreviation_{transaction_id}\",\n name=f\"toptier_awarding_agency_agency_name_{transaction_id}\",\n toptier_agency_id=toptier_awarding_agency_id,\n toptier_code=f\"toptier_awarding_agency_code_{transaction_id}\",\n )\n baker.make(\n \"references.SubtierAgency\",\n abbreviation=f\"subtier_awarding_agency_abbreviation_{transaction_id}\",\n name=f\"subtier_awarding_agency_agency_name_{transaction_id}\",\n subtier_agency_id=subtier_awarding_agency_id,\n subtier_code=f\"subtier_awarding_agency_code_{transaction_id}\",\n )\n\n # Funding Agency\n baker.make(\n \"references.Agency\",\n id=funding_agency_id,\n subtier_agency_id=subtier_funding_agency_id,\n toptier_agency_id=toptier_funding_agency_id,\n )\n baker.make(\n \"references.ToptierAgency\",\n abbreviation=f\"toptier_funding_agency_abbreviation_{transaction_id}\",\n name=f\"toptier_funding_agency_agency_name_{transaction_id}\",\n toptier_agency_id=toptier_funding_agency_id,\n toptier_code=f\"toptier_funding_agency_code_{transaction_id}\",\n )\n baker.make(\n \"references.SubtierAgency\",\n abbreviation=f\"subtier_funding_agency_abbreviation_{transaction_id}\",\n name=f\"subtier_funding_agency_agency_name_{transaction_id}\",\n subtier_agency_id=subtier_funding_agency_id,\n subtier_code=f\"subtier_funding_agency_code_{transaction_id}\",\n )\n\n # Ref Country Code\n baker.make(\"references.RefCountryCode\", country_code=\"USA\", country_name=\"UNITED STATES\")\n\n # FPDS / FABS\n if is_fpds:\n baker.make(\n \"search.TransactionSearch\",\n transaction_id=transaction_id,\n is_fpds=is_fpds,\n action_date=action_date,\n fiscal_year=fiscal_year,\n fiscal_action_date=fiscal_action_date,\n award_id=award_id,\n awarding_agency_id=awarding_agency_id,\n business_categories=[f\"business_category_1_{transaction_id}\", f\"business_category_2_{transaction_id}\"],\n transaction_description=f\"This is a test description {transaction_id}\"\n if transaction_id % 2 == 0\n else None,\n federal_action_obligation=federal_action_obligation,\n generated_pragmatic_obligation=federal_action_obligation,\n award_amount=total_obligation,\n funding_agency_id=funding_agency_id,\n type=contract_award_type if is_fpds else grant_award_type,\n awarding_agency_code=f\"toptier_awarding_agency_code_{transaction_id}\",\n awarding_toptier_agency_name=f\"toptier_awarding_agency_agency_name_{transaction_id}\",\n awarding_toptier_agency_abbreviation=f\"toptier_awarding_agency_agency_name_{transaction_id}\",\n funding_agency_code=f\"toptier_funding_agency_code_{transaction_id}\",\n funding_toptier_agency_name=f\"toptier_funding_agency_agency_name_{transaction_id}\",\n funding_toptier_agency_abbreviation=f\"toptier_funding_agency_agency_name_{transaction_id}\",\n awarding_sub_tier_agency_c=f\"subtier_awarding_agency_code_{transaction_id}\",\n awarding_subtier_agency_name=f\"subtier_awarding_agency_agency_name_{transaction_id}\",\n funding_sub_tier_agency_co=f\"subtier_funding_agency_code_{transaction_id}\",\n funding_subtier_agency_name=f\"subtier_funding_agency_agency_name_{transaction_id}\",\n funding_subtier_agency_abbreviation=f\"subtier_funding_agency_agency_name_{transaction_id}\",\n recipient_name=f\"recipient_name_{transaction_id}\",\n recipient_unique_id=f\"{transaction_id:09d}\",\n recipient_hash=\"c687823d-10af-701b-1bad-650c6e680190\" if transaction_id == 21 else None,\n recipient_levels=[\"R\"] if i == 21 else [],\n extent_competed=f\"extent_competed_{transaction_id}\",\n recipient_location_country_code=\"USA\",\n recipient_location_country_name=\"USA\",\n recipient_location_state_code=f\"LE_STATE_CODE_{transaction_id}\",\n recipient_location_county_code=f\"{transaction_id:03d}\",\n recipient_location_county_name=f\"LE_COUNTY_NAME_{transaction_id}\",\n recipient_location_congressional_code=f\"{transaction_id:02d}\",\n recipient_location_zip5=f\"LE_ZIP5_{transaction_id}\",\n recipient_location_city_name=f\"LE_CITY_NAME_{transaction_id}\",\n naics_code=f\"{transaction_id}{transaction_id}\",\n naics_description=f\"naics_description_{transaction_id}\",\n piid=f\"piid_{transaction_id}\",\n pop_country_code=\"USA\",\n pop_country_name=\"UNITED STATES\",\n pop_state_code=f\"POP_STATE_CODE_{transaction_id}\",\n pop_county_code=f\"{transaction_id:03d}\",\n pop_county_name=f\"POP_COUNTY_NAME_{transaction_id}\",\n pop_zip5=f\"POP_ZIP5_{transaction_id}\",\n pop_congressional_code=f\"{transaction_id:02d}\",\n pop_city_name=f\"POP_CITY_NAME_{transaction_id}\",\n product_or_service_code=str(transaction_id).zfill(4),\n product_or_service_description=f\"psc_description_{transaction_id}\",\n type_of_contract_pricing=f\"type_of_contract_pricing_{transaction_id}\",\n type_set_aside=f\"type_set_aside_{transaction_id}\",\n tas_components=tas_components,\n )\n baker.make(\n \"references.NAICS\",\n code=f\"{transaction_id}\",\n description=f\"naics_description_{transaction_id}\",\n )\n baker.make(\n \"references.PSC\", code=str(transaction_id).zfill(4), description=f\"psc_description_{transaction_id}\"\n )\n else:\n baker.make(\n \"search.TransactionSearch\",\n transaction_id=transaction_id,\n is_fpds=is_fpds,\n action_date=action_date,\n fiscal_year=fiscal_year,\n fiscal_action_date=fiscal_action_date,\n award_id=award_id,\n awarding_agency_id=awarding_agency_id,\n business_categories=[f\"business_category_1_{transaction_id}\", f\"business_category_2_{transaction_id}\"],\n transaction_description=f\"This is a test description {transaction_id}\"\n if transaction_id % 2 == 0\n else None,\n federal_action_obligation=federal_action_obligation,\n generated_pragmatic_obligation=federal_action_obligation,\n award_amount=total_obligation,\n funding_agency_id=funding_agency_id,\n type=contract_award_type if is_fpds else grant_award_type,\n awarding_agency_code=f\"toptier_awarding_agency_code_{transaction_id}\",\n awarding_toptier_agency_name=f\"toptier_awarding_agency_agency_name_{transaction_id}\",\n awarding_toptier_agency_abbreviation=f\"toptier_awarding_agency_agency_name_{transaction_id}\",\n funding_agency_code=f\"toptier_funding_agency_code_{transaction_id}\",\n funding_toptier_agency_name=f\"toptier_funding_agency_agency_name_{transaction_id}\",\n funding_toptier_agency_abbreviation=f\"toptier_funding_agency_agency_name_{transaction_id}\",\n awarding_sub_tier_agency_c=f\"subtier_awarding_agency_code_{transaction_id}\",\n awarding_subtier_agency_name=f\"subtier_awarding_agency_agency_name_{transaction_id}\",\n funding_sub_tier_agency_co=f\"subtier_funding_agency_code_{transaction_id}\",\n funding_subtier_agency_name=f\"subtier_funding_agency_agency_name_{transaction_id}\",\n funding_subtier_agency_abbreviation=f\"subtier_funding_agency_agency_name_{transaction_id}\",\n recipient_name=f\"recipient_name_{transaction_id}\",\n recipient_unique_id=f\"{transaction_id:09d}\",\n recipient_hash=\"c687823d-10af-701b-1bad-650c6e680190\" if transaction_id == 21 else None,\n recipient_levels=[\"R\"] if i == 21 else [],\n cfda_number=f\"cfda_number_{transaction_id}\",\n fain=f\"fain_{transaction_id}\",\n recipient_location_country_code=\"USA\",\n recipient_location_country_name=\"USA\",\n recipient_location_state_code=f\"LE_STATE_CODE_{transaction_id}\",\n recipient_location_county_code=f\"{transaction_id:03d}\",\n recipient_location_county_name=f\"LE_COUNTY_NAME_{transaction_id}\",\n recipient_location_congressional_code=f\"{transaction_id:02d}\",\n recipient_location_zip5=f\"LE_ZIP5_{transaction_id}\",\n recipient_location_city_name=f\"LE_CITY_NAME_{transaction_id}\",\n pop_country_code=\"USA\",\n pop_country_name=\"UNITED STATES\",\n pop_state_code=f\"POP_STATE_CODE_{transaction_id}\",\n pop_county_code=f\"{transaction_id:03d}\",\n pop_county_name=f\"POP_COUNTY_NAME_{transaction_id}\",\n pop_zip5=f\"POP_ZIP5_{transaction_id}\",\n pop_congressional_code=f\"{transaction_id:02d}\",\n pop_city_name=f\"POP_CITY_NAME{transaction_id}\",\n tas_components=tas_components,\n )", "def temperatures():\n hi_act= session.query(measurements.tobs,measurements.date,measurements.station).\\\n filter(measurements.station == 'USC00519281').\\\n filter(measurements.date >last_12).\\\n order_by(measurements.date).all()\n hi_act_df=pd.DataFrame(hi_act).set_index('date')\n hi_act_dict=hi_act_df.to_dict()\n return jsonify(hi_act_dict)", "def frame(self):\n microseconds = np.array(self.results['times']) * 1e6\n return pd.DataFrame(self.results, index=microseconds)", "def query_tickets(self):\n return self._call_txtrader_api('query_tickets', {})", "async def describe_slow_log_records_async(\n self,\n request: dds_20151201_models.DescribeSlowLogRecordsRequest,\n ) -> dds_20151201_models.DescribeSlowLogRecordsResponse:\n runtime = util_models.RuntimeOptions()\n return await self.describe_slow_log_records_with_options_async(request, runtime)", "def wem_market_value_all():\n __query = \"\"\"\n select\n date_trunc('month', wfs.trading_interval) AS trading_day,\n sum(wfs.eoi_quantity * wbs.price) as energy_interval,\n wf.fueltech_id\n from wem_facility_scada wfs\n left join wem_facility wf on wfs.facility_id = wf.code\n join wem_balancing_summary wbs on wfs.trading_interval = wbs.trading_interval\n where\n wf.fueltech_id is not null\n group by 1, wf.fueltech_id\n order by 1 desc, 2 asc\n \"\"\"\n\n query = __query.format()\n\n json_envelope = {}\n\n with engine.connect() as c:\n rows = c.execute(query)\n\n current_tech = None\n\n for row in rows:\n\n current_tech = row[2]\n\n if current_tech not in json_envelope.keys():\n json_envelope[current_tech] = {\n \"id\": f\"wem.fuel_tech.{current_tech}.market_value\",\n \"fuel_tech\": current_tech,\n \"region\": \"wa\",\n \"type\": \"market_value\",\n \"units\": \"AUD\",\n \"history\": {\n \"interval\": \"1M\",\n \"start\": None,\n \"last\": None,\n \"data\": [],\n },\n }\n\n if (\n json_envelope[current_tech][\"history\"][\"start\"] == None\n or row[0] < json_envelope[current_tech][\"history\"][\"start\"]\n ):\n json_envelope[current_tech][\"history\"][\"start\"] = row[0]\n\n if (\n json_envelope[current_tech][\"history\"][\"last\"] == None\n or row[0] > json_envelope[current_tech][\"history\"][\"last\"]\n ):\n json_envelope[current_tech][\"history\"][\"last\"] = row[0]\n\n json_envelope[current_tech][\"history\"][\"data\"].append(row[1])\n\n return [json_envelope[i] for i in json_envelope.keys()]", "def tobs():\n # Create our session (link) from Python to the DB.\n session = Session(engine)\n\n # Calculate the date 1 year ago from the last data point in the database.\n last_measurement_data_point_tuple = session.query(Measurement.date).order_by(Measurement.date.desc()).first()\n (latest_date, ) = last_measurement_data_point_tuple\n latest_date = dt.datetime.strptime(latest_date, '%Y-%m-%d')\n latest_date = latest_date.date()\n date_year_ago = latest_date - relativedelta(years=1)\n\n # Perform a query to retrieve the data and temperature scores.\n data_from_last_year = session.query(Measurement.date, Measurement.tobs).filter(Measurement.date >= date_year_ago).all()\n\n session.close()\n\n # Convert results to a dictionary \n all_temperatures = []\n for date, temp in data_from_last_year:\n if temp != None:\n temp_dict = {}\n temp_dict[date] = temp\n all_temperatures.append(temp_dict)\n # Return JSON\n return jsonify(all_temperatures)", "def test_get_events_history_filtering_by_timestamp(rotkehlchen_api_server: 'APIServer'):\n tx_hex = deserialize_evm_tx_hash('0xb226ddb8cbb286a7a998a35263ad258110eed5f923488f03a8d890572cd4608e') # noqa: E501\n ethereum_inquirer = rotkehlchen_api_server.rest_api.rotkehlchen.chains_aggregator.ethereum.node_inquirer # noqa: E501\n database = rotkehlchen_api_server.rest_api.rotkehlchen.data.db\n get_decoded_events_of_transaction(\n evm_inquirer=ethereum_inquirer,\n database=database,\n tx_hash=tx_hex,\n )\n # Call time range\n from_timestamp = 1627401169\n to_timestamp = 1627401170\n async_query = random.choice([False, True])\n with patch(\n 'rotkehlchen.chain.ethereum.modules.sushiswap.sushiswap.Sushiswap.get_balances',\n side_effect=lambda _: {},\n ):\n response = requests.get(\n api_url_for(\n rotkehlchen_api_server,\n 'modulestatsresource',\n module='sushiswap',\n ),\n json={\n 'async_query': async_query,\n 'from_timestamp': from_timestamp,\n 'to_timestamp': to_timestamp,\n },\n )\n if async_query:\n task_id = assert_ok_async_response(response)\n outcome = wait_for_async_task(rotkehlchen_api_server, task_id, timeout=120)\n assert outcome['message'] == ''\n result = outcome['result']\n else:\n result = assert_proper_response_with_result(response)\n\n events_balances = result[TEST_EVENTS_ADDRESS_1]\n\n assert len(events_balances) == 1", "def get_stats(evts_perigee) -> Table:\n rows = []\n\n for evt in reversed(evts_perigee):\n rows.append(evt.info)\n\n out = Table(rows=rows)\n return out", "def get_all(self):\r\n return [{ 'label': x[0], 'timing': x[1], 'is_tool': x[0] in self._tool_labels}\r\n for x in sorted(self._timings_by_path.items(), key=lambda x: x[1], reverse=True)]", "def getDataWithTimeIndex(self, t):\n\n return self.sensorDf.iloc[t,:self.sensorChannels].values", "def unbalanced(self):\n # TODO: Find a way to make a sql query to return all unbalanced transactions\n return []", "def get_non_indexed_results(self):\n\n with sqlite3.connect(self.db_path) as conn:\n cursor = conn.cursor()\n\n cursor.execute(\"SELECT status_code, file_count, start_time, end_time, website_id\"\n \" FROM TaskResult WHERE indexed_time IS NULL\")\n db_result = cursor.fetchall()\n\n cursor.execute(\"UPDATE TaskResult SET indexed_time=CURRENT_TIMESTAMP WHERE indexed_time IS NULL\")\n conn.commit()\n\n return [TaskResult(r[0], r[1], r[2], r[3], r[4]) for r in db_result]", "def tobs():\n # Query all tobs for the previous year\n results = session.query(Measurement.date, Measurement.tobs).\\\n filter(Measurement.date >= '2016-10-01').all()\n\n all_tobs = []\n\n for result in results:\n tobs_dict = {}\n tobs_dict[\"date\"] = result[0]\n tobs_dict[\"tobs\"] = result[1]\n all_tobs.append(tobs_dict)\n return jsonify(all_tobs)", "def transactions(self):\n url = f'{self._ynab.api_url}/budgets/{self.id}/transactions'\n response = self._ynab.session.get(url)\n if not response.ok:\n self._logger.error('Error retrieving transactions, response was : %s with status code : %s',\n response.text,\n response.status_code)\n return []\n return [Transaction(self._ynab, transaction)\n for transaction in response.json().get('data', {}).get('transactions', [])]", "def get_reports(self):\r\n result = QtSql.QSqlQuery('''Select * FROM failures''')\r\n list = []\r\n while result.next():\r\n failure = Failure(unicode(result.value(0).toString()), # id\r\n unicode(result.value(1).toString()), # comment\r\n unicode(result.value(2).toString()), # indicator\r\n bool(result.value(3))) # release\r\n p = self.get_presentation(failure.talkId)\r\n r = Report(p, failure)\r\n list.append(r)\r\n return list", "def fetch_all_tx(self):\n transactions = []\n for block in self.chain:\n transactions.append(block.data)\n return transactions", "def read_daily_qualified_report(self):\n from itertools import repeat\n\n self.ID_TOTAL_CANDIDATES = kpi_from_db_config.ID_TOTAL_CANDIDATES\n self.ID_TOTAL_PROCESSED = kpi_from_db_config.ID_TOTAL_PROCESSED\n self.ID_TOTAL_EXPORTED = kpi_from_db_config.ID_TOTAL_EXPORTED\n self.ID_TOTAL_CLASSIFIED = kpi_from_db_config.ID_TOTAL_CLASSIFIED\n self.ID_TOTAL_QUALIFIED = kpi_from_db_config.ID_TOTAL_QUALIFIED\n self.ID_TOTAL_DISQUALIFIED = kpi_from_db_config.ID_TOTAL_DISQUALIFIED\n\n list_id = [self.ID_TOTAL_CANDIDATES, \n self.ID_TOTAL_PROCESSED, \n self.ID_TOTAL_EXPORTED, \n self.ID_TOTAL_CLASSIFIED, \n self.ID_TOTAL_QUALIFIED, \n self.ID_TOTAL_DISQUALIFIED]\n list_result = [[] for i in repeat(None,len(list_id))]\n\n for i in range(len(list_id)):\n self.cursor.execute('''\n SELECT value\n FROM public.kpi_report\n WHERE id = %s\n ORDER BY created_at DESC\n LIMIT 2\n ''', [list_id[i]])\n\n rows_count = self.cursor.rowcount\n if (rows_count == 2):\n for doc in self.cursor:\n list_result[i].append(int(doc[0]))\n elif (rows_count == 1):\n for doc in self.cursor:\n list_result[i].append(int(doc[0]))\n list_result[i] = list_result[i] + [0]\n else:\n list_result[i] = [0] * 2 \n\n# print \"TESTING .... {}\".format(list_result)\n return list_result", "def logs(self):\n dataTuples = list()\n try:\n with sqlite3.connect(self.db_file) as conn:\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM curves\")\n dataTuples = cur.fetchall()\n return [{\"id\": a[0], \"name\": a[1], \"units\": a[2],\n \"descr\": a[3]} for a in dataTuples]\n except:\n return dataTuples", "def trades(self) -> List[ClosedTrade]:\n return store.completed_trades.trades", "def tobs():\n \n # Obtain the current year from the date and using that date determine the previous year appending 01-01 and 12-31\n compare_date = dt.date.today()\n start_date = f\"{compare_date.year - 1}-01-01\"\n end_date = f\"{compare_date.year - 1}-12-31\"\n \n tobs_result = session.query(Measurement.tobs).filter((Measurement.date >= start_date) & (Measurement.date <= end_date)\n ).order_by(Measurement.date).all()\n \n tobs = []\n tobs = list(np.ravel(tobs_result))\n return jsonify(tobs)", "def gather_stock_data(tickers, save=True):\n prices = pd.DataFrame()\n ts = TimeSeries(key='EY2QBMV6MD9FX9CP', output_format='pandas')\n\n for ticker in tickers:\n successful_grab = False\n ticker_daily_adj = None\n\n while successful_grab is not True:\n try:\n ticker_daily_adj = ts.get_daily_adjusted(ticker, outputsize='full')[0]\n successful_grab = True\n except ValueError:\n print('Waiting for API to let me in')\n time.sleep(10)\n\n ticker_daily_adj.loc[:, '0. ticker'] = ticker\n ticker_daily_adj = ticker_daily_adj[sorted(ticker_daily_adj.columns)]\n\n prices = pd.concat([prices, ticker_daily_adj])\n\n prices.sort_index(inplace=True)\n prices.reset_index(inplace=True)\n prices['date'] = pd.to_datetime(prices['date'])\n if save:\n prices.to_csv('stockdata.csv', index=True)\n\n return prices", "def test_get_settled_batch_list(self):\n self.trans_details.get_settled_batch_list(\n include_statistics = True,\n )\n\n self.trans_details.get_settled_batch_list(\n first_settlement_date=u\"2011-01-01T01:00:00\",\n )\n\n self.trans_details.get_settled_batch_list(\n last_settlement_date=u\"2011-01-01T01:00:00\",\n )\n\n # all three together\n self.trans_details.get_settled_batch_list(\n include_statistics = True,\n first_settlement_date=u\"2011-01-01T01:00:00\",\n last_settlement_date=u\"2011-01-02T01:00:00\"\n )", "def tobsdata():\n # query for the dates and temperature observations from a year from the last data point.\n # * Return a JSON list of Temperature Observations (tobs) for the previous year.\n # as this should be a list, I'm just grabbing the station name\n session = Session(engine)\n results = session.query(Measurement.tobs).filter(Measurement.date>='2016-08-23').all()\n session.close()\n tobs_list = list(np.ravel(results))\n\n # * Return the JSON representation of your dictionary.\n return jsonify(tobs_list)", "def get_data(self):\n data = list(IgnitionRow.objects.all().order_by('-pub_date')[:self.num_ticks].values())\n two_hours = data[::-1]\n pct_flop_data = [[int(elem['pct_flop_{}'.format(key)]) for elem in two_hours] \n \tfor key in self.keys]\n pct_flop_data = [[min(elem, 100) for elem in arr] for arr in pct_flop_data] # Assume a max pot size of 2000 BBs\n return pct_flop_data", "def get_data(self,tickers,alpha,max_retries = 5):\n data = []\n skipped = []\n for ticker in tqdm(tickers,desc = \"Acquiring data\"):\n try:\n company = Company(ticker,alpha = alpha)\n data.append(company)\n except Exception as e:\n time.sleep(10)\n skipped.append(ticker)\n\n for i in range(max_retries):\n new_data,skipped = self.get_data(skipped,alpha = alpha,max_retries = 0)\n data.extend(new_data)\n if len(skipped) == 0:\n break\n\n return data,skipped", "def timestamps(self):\n return self.source.timestamps[self._time_keep]", "def timers(self):\n return self['timers']", "def get_all_metrics(self):\n up_time = self.uptime()\n down_time = self.downtime()\n customer_sla = self.sla()\n objective = self.slo()\n indicator = self.sli()\n avail_percentage = self.availability()\n mt_bf = self.mtbf(up_time)\n mt_tr = self.mttr(down_time)\n list_results = [up_time,down_time,customer_sla,objective,indicator,avail_percentage,mt_bf,mt_tr]\n return list_results", "def _get_trades(self):\n\n trade_url = self.trade_url % (self.date, self.instrument, self.exchange)\n self.trades = pd.read_csv(trade_url, parse_dates=[0],\n date_parser=lambda t: pd.to_datetime(str(t), format='%Y%m%dT%H%M%S'))\n\n self.trades.fillna(np.nan)\n self.trades.index = pd.to_datetime(self.trades.time, unit='s')\n self.trades.time = pd.to_datetime(self.trades.time, unit='s')\n self.trades.columns = ['time', 'price', 'volume', 'source', 'buyer', 'seller', 'initiator']\n # del self.trades['time']\n\n if self.exclude_derivative:\n self.trades = self.trades[(self.trades.source != 'Derivatives trade') & (self.trades.source != 'Official')]", "def report(self):\n log = self._array.state()\n result = []\n for record in log:\n result.append(f\"{record.worker_name()}\\t${record.task_payment()}\")\n return \"\\n\".join(result)", "def get_transactions(self):\n\n df = self.__transactions[\n [\n \"Date\",\n \"Type\",\n \"Ticker\",\n \"Side\",\n \"Price\",\n \"Quantity\",\n \"Fees\",\n \"Investment\",\n \"Currency\",\n \"Sector\",\n \"Industry\",\n \"Country\",\n \"Region\",\n ]\n ]\n df = df.replace(np.nan, \"-\")\n df[\"Date\"] = df[\"Date\"].dt.strftime(\"%Y-%m-%d\")\n df.sort_values(by=\"Date\", ascending=False, inplace=True)\n return df", "def tobs():\n # query for the last day\n\n # Create our session (link) from Python to the DB\n session = Session(engine)\n \n last_day = session.query(Measurement.date).order_by(Measurement.date.desc()).first()[0]\n len_months = 12\n # convert result to datetime format\n last_day = datetime.datetime.strptime(last_day, \"%Y-%m-%d\")\n # calculate start day\n start_day = last_day - datetime.timedelta(days=365)\n start_day = \"{:%Y-%m-%d}\".format(start_day)\n\n # Design a query to retrieve the last 12 months of temperature data and plot the results\n results = session.query(Measurement.date, Measurement.tobs, Measurement.station).\\\n filter(Measurement.date >= start_day ).\\\n order_by(Measurement.date).all()\n\n session.close()\n \n temps = []\n for result in results:\n temp_dict = {}\n temp_dict[\"date\"] = result.date\n temp_dict[\"tobs\"] = result.tobs\n temp_dict[\"station\"] = result.station\n temps.append(temp_dict)\n \n return jsonify(temps)", "def _get_unit_records(self, start_time):\r\n\r\n if self.optMTTF.get_active():\r\n _query = \"SELECT t2.fld_unit, t1.fld_incident_id, \\\r\n t1.fld_age_at_incident, t1.fld_failure, \\\r\n t1.fld_suspension, t1.fld_cnd_nff, \\\r\n t1.fld_occ_fault, t1.fld_initial_installation, \\\r\n t1.fld_interval_censored, t2.fld_request_date, \\\r\n t2.fld_hardware_id \\\r\n FROM rtk_incident_detail AS t1 \\\r\n INNER JOIN \\\r\n ( \\\r\n SELECT DISTINCT MIN(fld_unit, fld_request_date), \\\r\n fld_incident_id, fld_request_date, \\\r\n fld_unit, fld_hardware_id \\\r\n FROM rtk_incident \\\r\n GROUP BY fld_unit \\\r\n ) AS t2 \\\r\n ON t2.fld_incident_id=t1.fld_incident_id \\\r\n WHERE t1.fld_age_at_incident >= {0:f} \\\r\n ORDER BY t2.fld_unit ASC, \\\r\n t1.fld_age_at_incident ASC, \\\r\n t2.fld_request_date ASC\".format(start_time)\r\n\r\n elif self.optMTBBD.get_active():\r\n _query = \"SELECT t2.fld_unit, t1.fld_incident_id, \\\r\n t1.fld_age_at_incident, t1.fld_failure, \\\r\n t1.fld_suspension, t1.fld_cnd_nff, \\\r\n t1.fld_occ_fault, t1.fld_initial_installation, \\\r\n t1.fld_interval_censored, t2.fld_request_date, \\\r\n t2.fld_hardware_id \\\r\n FROM rtk_incident_detail AS t1 \\\r\n INNER JOIN \\\r\n ( \\\r\n SELECT fld_incident_id, fld_request_date, fld_unit, \\\r\n fld_hardware_id \\\r\n FROM rtk_incident \\\r\n GROUP BY fld_unit, fld_request_date \\\r\n ) AS t2 \\\r\n ON t2.fld_incident_id=t1.fld_incident_id \\\r\n WHERE t1.fld_age_at_incident >= {0:f} \\\r\n GROUP BY t2.fld_unit, t1.fld_age_at_incident \\\r\n ORDER BY t2.fld_unit ASC, \\\r\n t1.fld_age_at_incident ASC, \\\r\n t2.fld_request_date ASC\".format(start_time)\r\n\r\n elif self.optMTBF.get_active():\r\n _query = \"SELECT t2.fld_unit, t1.fld_incident_id, \\\r\n t1.fld_age_at_incident, t1.fld_failure, \\\r\n t1.fld_suspension, t1.fld_cnd_nff, \\\r\n t1.fld_occ_fault, t1.fld_initial_installation, \\\r\n t1.fld_interval_censored, t2.fld_request_date, \\\r\n t2.fld_hardware_id \\\r\n FROM rtk_incident_detail AS t1 \\\r\n INNER JOIN rtk_incident AS t2 \\\r\n ON t2.fld_incident_id=t1.fld_incident_id \\\r\n WHERE t1.fld_age_at_incident >= {0:f} \\\r\n ORDER BY t2.fld_unit ASC, \\\r\n t1.fld_age_at_incident ASC, \\\r\n t2.fld_request_date ASC\".format(start_time)\r\n\r\n (_results, _error_code, __) = self._dao.execute(_query, commit=False)\r\n\r\n return(_results, _error_code)", "def get_stats(self, epg_dn):\n # Apic saves up to 95 different objects with statistic information\n traffic_list = []\n for i in range(10, -1, -1):\n traffic = self.moDir.lookupByDn(epg_dn + '/HDl2IngrBytesAg15min-%s' % str(i))\n if traffic is not None:\n traffic_list.append(traffic)\n return traffic_list", "def process_ticket_data():\r\n c = conn.cursor()\r\n c.execute('Select Count(*) from raw_ticket_data')\r\n totalleft = c.fetchone()[0]\r\n print('{} total rows required'.format(totalleft))\r\n np.random.seed(1)\r\n df_total = pd.read_sql_query('Select Ticketnumber, TickIssueDate, TickIssueTime, ViolationDesc, '\r\n ' VehMake, TickRPPlate, TickStreetNo, TickMeter, Agency, TickBadgeIssued, '\r\n 'TickStreetName , TotalPaid, TotalAmtDue from raw_ticket_data ', conn)\r\n columnlist = df_total.columns.tolist()\r\n df_total.sort_values(by = 'TickIssueDate', inplace = True)\r\n n = 500000\r\n totalsize = df_total.shape[0]\r\n indexes = [i for i in range(0,totalsize, n)]\r\n columnlist = df_total.columns.tolist()\r\n columnlist.append('address')\r\n tqdm.pandas()\r\n j = 1\r\n for i in indexes:\r\n df = df_total[i:i+n]\r\n print('Iteration {} started at {}. {} records left'.format(j, dt.datetime.now().strftime(\"%H:%M\"), totalsize))\r\n df['TickStreetNo'] = df['TickStreetNo'].apply(return_num)\r\n df['ViolationDesc'] = df['ViolationDesc'].apply(lambda x: x.replace('METER DTN','MTR OUT DT'))\r\n df['TickStreetName'] = df['TickStreetName'].apply(replace_street)\r\n df['TickStreetName'] = df['TickStreetName'].apply(return_street)\r\n df['TotalPaid'] = df['TotalPaid'].apply(return_cost)\r\n df['TotalAmtDue'] = df['TotalAmtDue'].apply(lambda x: re.sub('[^1-9]', '', str(x)))\r\n df['TickRPPlate'] = df['TickRPPlate'].apply(lambda x: 'None' if len(re.findall('[\\w+]', str(x))) == 0 else str(x).replace('[^\\w+]', ''))\r\n df['Tdelt'] = df['TickIssueTime'].apply(return_time_delta)\r\n\r\n\r\n df_1 = df.merge(single_address, left_on = ['TickStreetNo', 'TickStreetName'], right_on = ['number', 'streetname'])\r\n df_2 = df.merge(double_address, left_on = ['TickStreetNo', 'TickStreetName'], right_on = ['number', 'streetname'])\r\n\r\n df_2 = df_2.merge(df_1, how = 'left', left_on = ['TickIssueDate', 'TickBadgeIssued', 'nhood'], right_on = ['TickIssueDate', 'TickBadgeIssued', 'nhood'])\r\n df_3 = df_2[pd.isnull(df_2['Tdelt_y'])]\r\n df_2.dropna(subset = ['Tdelt_y'], inplace = True)\r\n df_2['timedelta'] = df_2.apply(lambda x: np.abs(x['Tdelt_y'] - x['Tdelt_x']), axis = 1)\r\n df_2.sort_values(by = 'timedelta', inplace = True)\r\n\r\n df_2.columns = [col.replace('_x', '') for col in df_2.columns]\r\n df_3.columns = [col.replace('_x', '') for col in df_3.columns]\r\n df_2.drop_duplicates(subset = 'TicketNumber', inplace = True)\r\n print(\"Searching for unmatchable addresses\")\r\n df_3['address'] = df_3.progress_apply(return_address, axis = 1)\r\n\r\n df = df_1.append(df_2)\r\n df = df.append(df_3)\r\n df['TickIssueDate'] = df.apply(Time, axis = 1)\r\n df = df[columnlist]\r\n\r\n if i == 0:\r\n df.to_sql('ticket_data', if_exists = 'replace',con = conn)\r\n else:\r\n df.to_sql('ticket_data', if_exists = 'append',con = conn)\r\n\r\n totalsize -= n\r\n j+=1\r\n\r\n del c\r\n\r\n return", "def get_all(self):\n total_expense_reports = []\n get_count = {\n 'query': {\n 'object': 'EEXPENSES',\n 'select': {\n 'field': 'RECORDNO'\n },\n 'pagesize': '1'\n }\n }\n\n response = self.format_and_send_request(get_count)\n count = int(response['data']['@totalcount'])\n pagesize = 2000\n offset = 0\n for i in range(0, count, pagesize):\n data = {\n 'query': {\n 'object': 'EEXPENSES',\n 'select': {\n 'field': [\n 'RECORDNO',\n 'RECORDID',\n 'WHENCREATED',\n 'WHENPOSTED',\n 'TOTALENTERED',\n 'STATE',\n 'TOTALDUE',\n 'DESCRIPTION',\n 'CURRENCY',\n 'BASECURR',\n 'MEMO'\n ]\n },\n 'pagesize': pagesize,\n 'offset': offset\n }\n }\n expense_reports = self.format_and_send_request(data)['data']['EEXPENSES']\n total_expense_reports = total_expense_reports + expense_reports\n offset = offset + pagesize\n return total_expense_reports", "def get_ticker_details(self):\n\n ticker_data = sequential_data_scrape(\n scrape.download_ticker_details,\n [\n f\"https://finviz.com/quote.ashx?&t={row.get('Ticker')}\"\n for row in self.data\n ],\n self._user_agent,\n )\n\n for entry in ticker_data:\n for key, value in entry.items():\n for ticker_generic in self.data:\n if ticker_generic.get(\"Ticker\") == key:\n if \"Sales\" not in self.headers:\n self.headers.extend(list(value[0].keys()))\n\n ticker_generic.update(value[0])\n self.analysis.extend(value[1])\n\n return self.data", "def get_send_statistics(self):\r\n return self._make_request('GetSendStatistics')", "def GenerateTimeBlockData(self, time_ranges):\r\n # Reset the class data for the next log parsing.\r\n for time_range in time_ranges:\r\n time_range.Reset()\r\n\r\n self.ParseLog(time_ranges)\r\n\r\n daily_data = {}\r\n html_string = \"\"\r\n\r\n html_string += \"Summary: %s <br>\\n\" % self.station\r\n html_string += \"<table border='1'>\\n\"\r\n html_string += \"<tr><th></th><th>Completed</th><th>Canceled</th>\"\r\n html_string += \"<th>Errors</th>\"\r\n html_string += \"<th>Average Rig Scan Time</th>\"\r\n html_string += \"<th>Average Operator Scan Time</th>\"\r\n html_string += \"<th>Average Time Between Scans</th>\"\r\n html_string += \"<th>Total Idle Time</th></tr>\"\r\n\r\n # Filter the log data into different timeblocks\r\n for time_range in time_ranges:\r\n daily_data[str(time_range)] = time_range.total\r\n daily_data[str(time_range) + \"_canceled\"] = time_range.canceled\r\n daily_data[str(time_range) + \"_error\"] = time_range.errors\r\n\r\n html_string += self.GenerateTimeblockString(time_range)\r\n\r\n # Special case for total\r\n daily_data[\"Total\"] = self.total_time_range.total\r\n daily_data[\"Total_canceled\"] = self.total_time_range.canceled\r\n daily_data[\"Total_error\"] = self.total_time_range.errors\r\n\r\n html_string += self.GenerateTimeblockString(self.total_time_range)\r\n\r\n html_string += \"</table><br>\\n\"\r\n\r\n daily_data[\"html_string\"] = html_string\r\n\r\n return daily_data" ]
[ "0.7311632", "0.5812507", "0.571646", "0.57079303", "0.5684339", "0.55696833", "0.55493486", "0.553089", "0.5528401", "0.5502346", "0.548358", "0.544936", "0.544525", "0.54149175", "0.5411108", "0.54054636", "0.5394847", "0.5392003", "0.53741133", "0.5357601", "0.5319892", "0.5315574", "0.5306587", "0.5294445", "0.5268564", "0.5268471", "0.52652085", "0.5246509", "0.5229982", "0.5213194", "0.52094626", "0.5193845", "0.51731986", "0.51707786", "0.5167521", "0.5164851", "0.5157847", "0.5154265", "0.51487666", "0.51421916", "0.5126859", "0.51257807", "0.51206225", "0.5117641", "0.5113356", "0.5107234", "0.51052964", "0.51018983", "0.51013494", "0.50983304", "0.5089383", "0.5087617", "0.50871193", "0.5077727", "0.5075697", "0.50666744", "0.50664437", "0.5065563", "0.5059428", "0.50571084", "0.50521255", "0.50519127", "0.50451815", "0.50408965", "0.5040734", "0.5034725", "0.50309044", "0.5028216", "0.5027254", "0.5022196", "0.5015484", "0.5015203", "0.50140107", "0.50135046", "0.501185", "0.5005963", "0.5002926", "0.500187", "0.49993232", "0.4993229", "0.49838415", "0.4979748", "0.4978617", "0.49767402", "0.49660423", "0.49655992", "0.49651027", "0.4963297", "0.4958102", "0.4958062", "0.49552363", "0.4948401", "0.49470198", "0.4946902", "0.49459326", "0.49453542", "0.49379748", "0.49336943", "0.49332044", "0.4929667" ]
0.6642906
1
Returns a list containing any slow transaction data collected during the reporting period. NOTE Currently only the slowest transaction for the reporting period is retained.
def slow_transaction_data(self): # XXX This method no longer appears to be used. Being replaced # by the transaction_trace_data() method. if not self.__settings: return [] if not self.__slow_transaction: return [] maximum = self.__settings.agent_limits.transaction_traces_nodes transaction_trace = self.__slow_transaction.transaction_trace( self, maximum) data = [transaction_trace, list(self.__slow_transaction.string_table.values())] if self.__settings.debug.log_transaction_trace_payload: _logger.debug('Encoding slow transaction data where ' 'payload=%r.', data) json_data = json_encode(data) level = self.__settings.agent_limits.data_compression_level level = level or zlib.Z_DEFAULT_COMPRESSION zlib_data = zlib.compress(six.b(json_data), level) pack_data = base64.standard_b64encode(zlib_data) if six.PY3: pack_data = pack_data.decode('Latin-1') root = transaction_trace.root trace_data = [[root.start_time, root.end_time - root.start_time, self.__slow_transaction.path, self.__slow_transaction.request_uri, pack_data]] return trace_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def transaction_trace_data(self, connections):\n\n _logger.debug('Generating transaction trace data.')\n\n if not self.__settings:\n return []\n\n # Create a set 'traces' that is a union of slow transaction,\n # and Synthetics transactions. This ensures we don't send\n # duplicates of a transaction.\n\n traces = set()\n if self.__slow_transaction:\n traces.add(self.__slow_transaction)\n traces.update(self.__synthetics_transactions)\n\n # Return an empty list if no transactions were captured.\n\n if not traces:\n return []\n\n # We want to limit the number of explain plans we do across\n # these. So work out what were the slowest and tag them.\n # Later the explain plan will only be run on those which are\n # tagged.\n\n agent_limits = self.__settings.agent_limits\n explain_plan_limit = agent_limits.sql_explain_plans_per_harvest\n maximum_nodes = agent_limits.transaction_traces_nodes\n\n database_nodes = []\n\n if explain_plan_limit != 0:\n for trace in traces:\n for node in trace.slow_sql:\n # Make sure we clear any flag for explain plans on\n # the nodes in case a transaction trace was merged\n # in from previous harvest period.\n\n node.generate_explain_plan = False\n\n # Node should be excluded if not for an operation\n # that we can't do an explain plan on. Also should\n # not be one which would not be included in the\n # transaction trace because limit was reached.\n\n if (node.node_count < maximum_nodes and\n node.connect_params and node.statement.operation in\n node.statement.database.explain_stmts):\n database_nodes.append(node)\n\n database_nodes = sorted(database_nodes,\n key=lambda x: x.duration)[-explain_plan_limit:]\n\n for node in database_nodes:\n node.generate_explain_plan = True\n\n else:\n for trace in traces:\n for node in trace.slow_sql:\n node.generate_explain_plan = True\n database_nodes.append(node)\n\n # Now generate the transaction traces. We need to cap the\n # number of nodes capture to the specified limit.\n\n trace_data = []\n\n for trace in traces:\n transaction_trace = trace.transaction_trace(\n self, maximum_nodes, connections)\n\n data = [transaction_trace,\n list(trace.string_table.values())]\n\n if self.__settings.debug.log_transaction_trace_payload:\n _logger.debug('Encoding slow transaction data where '\n 'payload=%r.', data)\n\n json_data = json_encode(data)\n\n level = self.__settings.agent_limits.data_compression_level\n level = level or zlib.Z_DEFAULT_COMPRESSION\n\n zlib_data = zlib.compress(six.b(json_data), level)\n\n pack_data = base64.standard_b64encode(zlib_data)\n\n if six.PY3:\n pack_data = pack_data.decode('Latin-1')\n\n root = transaction_trace.root\n\n if trace.record_tt:\n force_persist = True\n else:\n force_persist = False\n\n if trace.include_transaction_trace_request_uri:\n request_uri = trace.request_uri\n else:\n request_uri = None\n\n trace_data.append([transaction_trace.start_time,\n root.end_time - root.start_time,\n trace.path,\n request_uri,\n pack_data,\n trace.guid,\n None,\n force_persist,\n None,\n trace.synthetics_resource_id, ])\n\n return trace_data", "def get_reports(self):\r\n return sorted(self._reports,\r\n key=lambda x: x['stats']['totalTimeMillis'],\r\n reverse=True)", "def get_reports(self):\n return sorted(self._reports,\n key=lambda x: x['stats']['totalTimeMillis'],\n reverse=True)", "def return_trade_history(\n self,\n start: Timestamp,\n end: Timestamp,\n ) -> list[dict[str, Any]]:\n limit = 100\n data: list[dict[str, Any]] = []\n start_ms = start * 1000\n end_ms = end * 1000\n while True:\n new_data = self.api_query_list('/trades', {\n 'startTime': start_ms,\n 'endTime': end_ms,\n 'limit': limit,\n })\n results_length = len(new_data)\n if data == [] and results_length < limit:\n return new_data # simple case - only one query needed\n\n latest_ts_ms = start_ms\n # add results to data and prepare for next query\n existing_ids = {x['id'] for x in data}\n for trade in new_data:\n try:\n timestamp_ms = trade['createTime']\n latest_ts_ms = max(latest_ts_ms, timestamp_ms)\n # since we query again from last ts seen make sure no duplicates make it in\n if trade['id'] not in existing_ids:\n data.append(trade)\n except (DeserializationError, KeyError) as e:\n msg = str(e)\n if isinstance(e, KeyError):\n msg = f'Missing key entry for {msg}.'\n self.msg_aggregator.add_warning(\n 'Error deserializing a poloniex trade. Check the logs for details',\n )\n log.error(\n 'Error deserializing poloniex trade',\n trade=trade,\n error=msg,\n )\n continue\n\n if results_length < limit:\n break # last query has less than limit. We are done.\n\n # otherwise we query again from the last ts seen in the last result\n start_ms = latest_ts_ms\n continue\n\n return data", "def get_latest_transactions(self):\n first_run = False\n if not self._transactions:\n first_run = True\n transactions = []\n for account in self.accounts:\n self._logger.debug('Getting transactions for account \"%s\"', account.ynab_account.name)\n for transaction in account.get_latest_transactions():\n if not self._filter_transaction(transaction):\n transactions.append(transaction)\n self._logger.debug('Caching %s transactions', len(transactions))\n self._transactions.extend(transactions)\n if first_run:\n self._logger.info('First run detected, discarding transactions until now')\n return []\n return transactions", "def slow_queries(self):\n request = Request(method=\"get\", endpoint=\"/query/slow\")\n\n def response_handler(resp):\n if not resp.is_success:\n raise C8QLQueryListError(resp, request)\n return self._format_queries(resp.body)\n\n return self._execute(request, response_handler)", "def timings(self):\r\n return self._timings", "def get_pending_transactions():\n\n return History.get_pending().get()", "def returnTradeHistory(self, time=1 * 60 * 60, limit=100):\n assert limit <= 100, \"'limit' has to be smaller than 100\"\n return self.dpay.rpc.get_trade_history(\n transactions.formatTimeFromNow(-time),\n transactions.formatTimeFromNow(),\n limit,\n api=\"market_history\"\n )", "def unbalanced(self):\n # TODO: Find a way to make a sql query to return all unbalanced transactions\n return []", "def trades(self) -> List[ClosedTrade]:\n return store.completed_trades.trades", "def get_all_latest_transactions(self):\n transactions = []\n for account in self.accounts:\n self._logger.debug('Getting transactions for account \"%s\"', account.ynab_account.name)\n for transaction in account.get_latest_transactions():\n if not self._filter_transaction(transaction):\n transactions.append(transaction)\n return transactions", "def showTransactions(self):\n self.scanTransactions()\n txns = []\n\n # Summarize the stats\n for x in range(len(self._trans)):\n stats = self._trans[x]\n trans_time = 0\n remote_calls = 0\n for name, stat in stats:\n trans_time += stat.total_tt\n remote_calls += 1\n txns.append((x, trans_time, remote_calls))\n\n results = [\"TX#\\tTime\\tCalls\",\n \"=\" * 22]\n\n for item in txns:\n results.append(\"%3d\\t%4f\\t%5d\" % item)\n \n return \"\\n\".join(results)", "def getTransferListSummary(self):\n p_ids_and_prices = {}\n players = self.getAllPlayerInfoTransferlist()\n\n # Get IDs of all players\n log_event(self.queue, \"Gathering player prices... \")\n for p in players:\n p_bidstatus = p[1]\n p_id = p[8]\n # removed Filter for unlisted / expired players\n if p_id not in p_ids_and_prices:\n p_sellprice = self.getPlayerSellPrice(p_id)\n # If sell price returns 0, need to fetch from Futbin\n if p_sellprice == 0:\n p_sellprice = self.getFutbinPrice_opentab(p_id)\n self.sleep_approx(5) # Delay iteration to not anger futbin\n # Add player ID and price to dict\n p_ids_and_prices[p_id] = p_sellprice\n\n for p_id in p_ids_and_prices:\n p_price = p_ids_and_prices[p_id]\n p_name = self.getPlayerCardName(p_id)\n log_event(self.queue, str(p_name) + \" - #\" +\n str(p_id) + \" Price \" + str(p_price))\n\n num_p_sold = 0\n num_p_expired = 0\n num_p_unlisted = 0\n num_p_listed = 0\n\n sold_p_value = 0\n expired_p_value = 0\n unlisted_p_value = 0\n listed_p_value = 0\n\n for p in players:\n p_bidstatus = p[1]\n p_id = p[8]\n p_soldprice = p[5] # is 0 if unlisted\n p_sellprice = int(p_ids_and_prices[p_id])\n\n if \"won\" in p_bidstatus:\n num_p_sold += 1\n sold_p_value += p_soldprice\n if \"expired\" in p_bidstatus:\n num_p_expired += 1\n expired_p_value += p_sellprice\n if (p_bidstatus == \"listFUTItem\"):\n num_p_unlisted += 1\n unlisted_p_value += p_sellprice\n if (p_bidstatus == \"listFUTItem has-auction-data\"):\n num_p_listed += 1\n listed_p_value += p_sellprice\n\n log_event(self.queue, \"Players sold: \" + str(num_p_sold))\n log_event(self.queue, \"Players expired: \" + str(num_p_expired))\n log_event(self.queue, \"Players listed: \" + str(num_p_listed))\n log_event(self.queue, \"Players unlisted: \" + str(num_p_unlisted))\n log_event(self.queue, \" - - - \")\n log_event(self.queue, \"Sold players value: \" + str(sold_p_value))\n log_event(self.queue, \"Expired players value: \" +\n str(expired_p_value))\n log_event(self.queue, \"Unlisted players value: \" +\n str(unlisted_p_value))\n log_event(self.queue, \"Listed players value: \" + str(listed_p_value))\n\n # TODO subtract bought price\n self.user_players_won += int(num_p_unlisted)\n self.p_ids_and_prices = p_ids_and_prices\n intel = [p_ids_and_prices, num_p_sold, num_p_expired, num_p_unlisted,\n num_p_listed, sold_p_value, expired_p_value, unlisted_p_value, listed_p_value]\n return intel", "def time_list(self):\n return (self.N_T * (np.arange(self.N_itr) + 1) /\n self.N_itr * 1000 * self.DT)", "def _get_meas_times_from_db(self):\n meas_times = []\n if self._data['report_save_historical_instances_ind'] != 'Y':\n # for non historical reports take measurement time from saved dataset\n dataset = self._jfile.get_current_stored_dataset()\n try:\n meas_time = datetime.datetime.strptime(dataset['meas_time'], '%Y-%m-%d %H:%M:%S')\n except ValueError:\n raise Exception(\"Cannot unformat string %s to datetime\" % dataset['meas_time'])\n meas_times.append(meas_time)\n\n else:\n # for historical reports take measurement times from db datasets\n where_sql = ''\n where_sql_list = list()\n params = [self._id, self._segment_value_id]\n\n if self._process_dataset_ids:\n for dataset_id in self._process_dataset_ids:\n if type(dataset_id) == list:\n where_sql_list.append(\"(report_data_set_instance_id >= %s AND report_data_set_instance_id <= %s)\")\n if dataset_id[0] < dataset_id[1]:\n params.append(dataset_id[0])\n params.append(dataset_id[1])\n else:\n params.append(dataset_id[1])\n params.append(dataset_id[0])\n else:\n where_sql_list.append(\"report_data_set_instance_id = %s\")\n params.append(dataset_id)\n where_sql = ' AND (%s)' % ' OR '.join(where_sql_list)\n\n self._db.Query(\"\"\"SELECT measurement_time\n FROM report_data_set_instance\n WHERE\n `element_id`= %%s\n AND segment_value_id = %%s\n %s\n ORDER BY measurement_time ASC\"\"\" % where_sql, tuple(params))\n meas_times = [item['measurement_time'] for item in self._db.record]\n\n return meas_times", "def fetch(self, daterange=(datetime.now() - timedelta(1), datetime.now())):\n cursor = self.conn.cursor()\n sql = 'SELECT measure_dt, ping, download, upload FROM speedlogs ' + \\\n ' WHERE measure_dt BETWEEN ? AND ?'\n cursor.execute(sql, daterange)\n return cursor.fetchall()", "async def get_trades(self) -> List[TradeRequest]:\n data = j.dumps({\n 'startindex': 0,\n 'statustype': 'inbound'\n })\n r = await self.request.request(url='https://www.roblox.com/my/money.aspx/getmyitemtrades', data=data, method='POST')\n data = json.loads(r.json()['d'])[\"Data\"]\n trades = []\n for trade in data:\n t = json.loads(trade)\n trades.append(TradeRequest(self.request, t['Date'], t['Expires'], t['TradePartner'], t['TradePartnerID'], t['Status'], t['TradeSessionID']))\n return trades", "def get_trades_for_symbol(self, symbol, time_range=5):\n return [trade for trade in self\n if trade.symbol == symbol and (trade.timestamp > (int(time.time())-time_range*60))]", "def get_new_data(self):\n\n # record bar parse performance\n self.logger.debug(\"Started parsing new ticks.\")\n start_parse = time.time()\n for exchange in self.exchanges:\n exchange.parse_ticks()\n end_parse = time.time()\n duration = round(end_parse - start_parse, 5)\n\n self.logger.debug(\n \"Parsed \" + str(self.total_instruments) +\n \" instruments' ticks in \" + str(duration) + \" seconds.\")\n self.track_tick_processing_performance(duration)\n\n # wrap new 1 min bars in market events\n new_market_events = []\n for exchange in self.exchanges:\n bars = exchange.get_new_bars()\n for symbol in exchange.get_symbols():\n for bar in bars[symbol]:\n event = MarketEvent(exchange.get_name(), bar)\n new_market_events.append(event)\n # add bars to save-to-db-later queue\n # TODO: store new bars concurrently with a processpool\n self.bars_save_to_db.put(event)\n return new_market_events", "def get_gdax_historical_data():\n \n start = None\n while not start:\n start,end,tid = getStartAndEndHistoric()\n if not start:\n time.sleep(60)\n #Todo: change this to 1min\n firsttimestamp = start\n engine = sa.create_engine(sql_address)\n products = [\"LTC-USD\",\"LTC-BTC\",\"ETH-USD\",\"ETH-BTC\",\"BTC-USD\"]\n public_client = gdax.PublicClient()\n deltat = datetime.timedelta(seconds = 200)\n timewindows = []\n while end - start > datetime.timedelta(seconds=0):\n if start + deltat > end:\n endx = end\n else:\n endx = start + deltat\n timewindows.append([start,endx])\n start += deltat\n results = []\n total = len(timewindows)\n current_idx = 0\n timeold = time.time()\n numofqueries = 0\n engine = sa.create_engine(sql_address)\n Base.metadata.bind = engine\n DBSession = sa.orm.sessionmaker()\n DBSession.bind = engine\n session = DBSession()\n for startx,endx in timewindows:\n\n current_idx += 1\n for i in products:\n repeat = True\n while repeat:\n\n #delay if ratelimts are close\n if numofqueries < 3:\n while time.time() - timeold < 1:\n time.sleep(0.05)\n \n timeold = time.time()\n numofqueries = 0\n try:\n alist = public_client.get_product_historic_rates(i, start = startx, end = endx, granularity = 1)\n except:\n time.sleep(30)\n public_client = gdax.PublicClient()\n alist = public_client.get_product_historic_rates(i, start = startx, end = endx, granularity = 1)\n\n alist = public_client.get_product_historic_rates(i, start = startx, end = endx, granularity = 1)\n\n numofqueries += 1\n\n #rate limit exceeded has 'message' as dict.\n if not 'message' in alist:\n repeat = False\n for a in alist:\n a[0] = datetime.datetime.fromtimestamp(float(a[0]))\n tmp = i.split('-')\n d = dict(coin = tmp[0], currency = tmp[1], timestamp = a[0], low=a[1], high=a[2], open=a[3], close=a[4], volume=a[5])\n results.append(d)\n lasttimestamp = a[0]\n\n #upload with batch size of 10000\n if len(results) > 10000:\n engine.execute(\n GADXHistoricalDataOneSecondOHLC.__table__.insert(),\n results\n )\n results = []\n \n update = session.query(historicalDataProgramState).filter(sa.and_(historicalDataProgramState.transaction_id == tid,historicalDataProgramState.entry_type == 'update')).first()\n if update:\n update.end = lasttimestamp\n session.commit()\n else:\n new_update = historicalDataProgramState(entry_type = 'update',transaction_id = tid,start=firsttimestamp,end=lasttimestamp,platform='GDAX',status='incomplete')\n session.add(new_update)\n session.commit()\n if len(results) > 0:\n engine.execute(\n GADXHistoricalDataOneSecondOHLC.__table__.insert(),\n results\n )\n results = []\n \n update = session.query(historicalDataProgramState).filter(sa.and_(historicalDataProgramState.transaction_id == tid,historicalDataProgramState.entry_type == 'update')).first()\n if update:\n update.end = lasttimestamp\n session.commit()\n else:\n new_update = historicalDataProgramState(entry_type = 'update',transaction_id = tid,start=firsttimestamp,end=lasttimestamp,platform='GDAX',status='incomplete')\n session.add(new_update)\n session.commit()\n\n update = session.query(historicalDataProgramState).filter(sa.and_(historicalDataProgramState.transaction_id == tid,historicalDataProgramState.entry_type == 'update')).first()\n update.status='complete'\n order = session.query(historicalDataProgramState).filter(sa.and_(historicalDataProgramState.transaction_id == tid,historicalDataProgramState.entry_type == 'order')).first()\n order.status='complete'\n session.commit()", "def pending_transactions(self):\n return self._call_account_method(\n 'pendingTransactions'\n )", "def transactions(self):\n url = f'{self._ynab.api_url}/budgets/{self.id}/transactions'\n response = self._ynab.session.get(url)\n if not response.ok:\n self._logger.error('Error retrieving transactions, response was : %s with status code : %s',\n response.text,\n response.status_code)\n return []\n return [Transaction(self._ynab, transaction)\n for transaction in response.json().get('data', {}).get('transactions', [])]", "def get_trades_history(self, symbol, start_time, end_time, limit=1000):\n payload = {'symbol': symbol, 'start': start_time, 'end': end_time, 'limit': limit}\n return self.public_request('GET', '/api/v1/trades', **payload)", "def get_transactions(self):\n # open a cursor object\n cur = self.get_cursor()\n\n # get transactions from database\n cur.execute(\"SELECT * FROM transactions\")\n transactions_data = cur.fetchall()\n\n # convert into a dict of values.\n transactions_list = []\n [transactions_list.append({'transaction_id': transaction[0],\n 'date': transaction[1],\n 'payee_id': transaction[2],\n 'description': transaction[3],\n 'amount': transaction[4]})\n for transaction in transactions_data]\n\n # close the cursor\n self.close_cursor()\n\n return transactions_list", "def describe_slow_log_records(\n self,\n request: dds_20151201_models.DescribeSlowLogRecordsRequest,\n ) -> dds_20151201_models.DescribeSlowLogRecordsResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_slow_log_records_with_options(request, runtime)", "async def get_blacklist_hist(self, search_time, limit=1000):\n\n start = search_time[0][0]\n end = search_time[0][1]\n\n url = f'https://{self.__api}/v3/blacklist/history'\n continuation = None\n full_resp = {}\n flag = True\n body = {\"filter[clientid]\": self.clientid, \"filter[start_time]\": start, \"filter[end_time]\": end,\n \"limit\": limit, \"continuation\": continuation}\n while True:\n with requests.get(url, params=body,\n headers={'X-WallarmAPI-UUID': self.__uuid,\n 'X-WallarmAPI-Secret': self.__secret}) as response:\n if response.status not in [200, 201, 202, 204, 304]:\n raise NonSuccessResponse(response.status, await response.text)\n continuation = response.json().get('body').get('continuation')\n\n if flag:\n full_resp = response.json()\n\n if continuation is not None:\n body['continuation'] = continuation\n if not flag:\n full_resp['body']['objects'].extend(response.json().get('body').get('objects'))\n else:\n break\n flag = False\n return full_resp", "def _get_meas_times(self, last_meas_time):\n meas_times = list()\n data = None\n \n if self._process_type == 'soft_gen':\n meas_times = self._get_meas_times_from_db()\n else:\n if self._data['data_fetch_method'] == 'sql':\n # get from outer sql db\n data = self._get_meas_times_sql(last_meas_time)\n elif self._data['data_fetch_method'] == 'web service':\n # get from web service\n data = self._get_meas_times_web_service(last_meas_time)\n\n\n if data:\n clear_data = [row[0] for row in data['data']]\n # check if we have values in list of datetime type\n if clear_data:\n if type(clear_data[0]) == datetime.datetime:\n meas_times = clear_data\n else:\n # it's a date type\n meas_times = [datetime.datetime.combine(d, datetime.time.min) for d in clear_data]\n\n \n\n\n # sort measurement times if they weren't sorted before\n meas_times.sort()\n # if do not save history, take only last element\n if self._data['report_save_historical_instances_ind'] != 'Y':\n if len(meas_times) > 1:\n del meas_times[:-1]\n \n return meas_times", "def transactions(self, dt=None):\n if dt is None:\n # flatten the by-day transactions\n return [\n txn\n for by_day in itervalues(self._processed_transactions)\n for txn in by_day\n ]\n\n return self._processed_transactions.get(dt, [])", "def fetch_all_tx(self):\n transactions = []\n for block in self.chain:\n transactions.append(block.data)\n return transactions", "def gather_data(self, *args, **kwargs):\n instrument_arg = kwargs.get('instrument', 'EUR_USD')\n granularity_arg = kwargs.get('granularity', 'M1')\n candle_format = kwargs.get('candleFormat', 'bidask')\n start_time = kwargs.get('start', '2014-10-01T00:00:00.000000Z')\n count_arg = kwargs.get('count', 5000)\n out_data = []\n data_complete = False\n while(not data_complete):\n response = self.oanda.get_history(instrument=instrument_arg,\n granularity=granularity_arg,\n candleFormat=candle_format,\n start=start_time,\n count=count_arg)\n raw_data = response['candles']\n if (len(out_data) == 0):\n out_data = out_data + raw_data\n elif (len(out_data) > 1):\n # raw_data[0] is already in out_data as raw_data[-1] from last\n # iteration\n out_data = out_data + raw_data[1:]\n start_time = raw_data[-1]['time']\n if (len(raw_data) < 5000):\n data_complete = True\n\n out_data = self._list_to_df(out_data)\n return out_data", "def timestamps(self):\n return self.source.timestamps[self._time_keep]", "def timings(self):\n if self._C_timings is None:\n raise RuntimeError(\"Cannot extract timings with non-finalized Profiler.\")\n return {field: max(getattr(self._C_timings, field), 10**-6)\n for field, _ in self._C_timings._fields_}", "def query_tickets(self):\n return self._call_txtrader_api('query_tickets', {})", "def probe_times(self):\r\n probe_times = []\r\n for probe in self.__probes.values():\r\n if probe.complete():\r\n if probe.round_trip_time() > 20:\r\n \"Long probe: %s \" %self.__id\r\n probe_times.append(probe.round_trip_time())\r\n return probe_times", "def get_non_indexed_results(self):\n\n with sqlite3.connect(self.db_path) as conn:\n cursor = conn.cursor()\n\n cursor.execute(\"SELECT status_code, file_count, start_time, end_time, website_id\"\n \" FROM TaskResult WHERE indexed_time IS NULL\")\n db_result = cursor.fetchall()\n\n cursor.execute(\"UPDATE TaskResult SET indexed_time=CURRENT_TIMESTAMP WHERE indexed_time IS NULL\")\n conn.commit()\n\n return [TaskResult(r[0], r[1], r[2], r[3], r[4]) for r in db_result]", "def timeseries_report(self):\n report = pd.DataFrame(index=self.price.index)\n report.loc[:, \"FR Energy Throughput (kWh)\"] = self.ene_results['ene']\n report.loc[:, \"FR Energy Throughput Up (Charging) (kWh)\"] = self.variables['regu_c']*self.krd_avg*self.dt*self.storage.rte\n report.loc[:, \"FR Energy Throughput Up (Discharging) (kWh)\"] = self.variables['regu_d']*self.krd_avg*self.dt\n report.loc[:, \"FR Energy Throughput Down (Charging) (kWh)\"] = self.variables['regd_c']*self.krd_avg*self.dt*self.storage.rte\n report.loc[:, \"FR Energy Throughput Down (Discharging) (kWh)\"] = self.variables['regd_d']*self.krd_avg*self.dt\n report.loc[:, \"FR Energy Settlement Price Signal ($/kWh)\"] = self.price\n report.loc[:, 'Regulation Up (Charging) (kW)'] = self.variables['regu_c']\n report.loc[:, 'Regulation Up (Discharging) (kW)'] = self.variables['regu_d']\n report.loc[:, 'Regulation Down (Charging) (kW)'] = self.variables['regd_c']\n report.loc[:, 'Regulation Down (Discharging) (kW)'] = self.variables['regd_d']\n report.loc[:, \"Regulation Up Price Signal ($/kW)\"] = self.p_regu\n report.loc[:, \"Regulation Down Price Signal ($/kW)\"] = self.p_regd\n\n return report", "def read_daily_messages_report(self):\n from itertools import repeat\n\n self.ID_TWEET_ORANGE_FLOW = kpi_from_db_config.ID_TWEET_ORANGE_FLOW\n self.ID_PROCESSING_MESSAGES = kpi_from_db_config.ID_PROCESSING_MESSAGES\n self.ID_CANDIDATES_PROCESSED = kpi_from_db_config.ID_CANDIDATES_PROCESSED\n\n list_id = [self.ID_TWEET_ORANGE_FLOW, \n self.ID_PROCESSING_MESSAGES, \n self.ID_CANDIDATES_PROCESSED]\n len_need_list = [7, 8, 2]\n list_result = [[] for i in repeat(None,len(list_id))]\n\n for i in range(len(list_id)):\n self.cursor.execute('''\n SELECT value\n FROM public.kpi_report\n WHERE id = %s\n ORDER BY created_at DESC\n LIMIT %s\n ''', [list_id[i], len_need_list[i]])\n rows_count = self.cursor.rowcount\n\n if (rows_count == len_need_list[i]): # If rows_count as expected \n for doc in self.cursor:\n list_result[i].append(int(doc[0]))\n elif (rows_count > 0 and rows_count < len_need_list[i]):\n for doc in self.cursor:\n list_result[i].append(int(doc[0]))\n list_result[i] = list_result[i] + [0] * (len_need_list[i] - rows_count) \n else:\n list_result[i] = [0] * len_need_list[i]\n\n return list_result", "def getScanList(self):\n \n scanList = []\n for row in self._calData:\n if str(row.calType()) == 'CAL_DELAY':\n scanList.append(row.scanSet()[0])\n return scanList", "def get_tiingo_prices(tickers, start, end, api_key=None):\n\n all_results = []\n if api_key is None:\n api_key = os.getenv('TIINGO_API_KEY')\n # Sort tickers so that error logging can be used to identify progress\n tickers = sorted(tickers)\n\n for i, ticker in enumerate(tickers):\n try:\n df = web.DataReader(name=ticker,\n data_source='tiingo',\n start=start,\n end=end,\n api_key=api_key)\n df = df[['adjClose']]\n except KeyError as e:\n if e.args[0] == 'date':\n # Patch to handle issue in pandas_datareader\n # where empty results cause a KeyError\n print(f'Got empty df for i={i}, ticker={tickers[i]}')\n df = pd.DataFrame()\n except Exception as e:\n print('Received an unexpected error:', e)\n print(f'Only fetched up to {i-1} inclusive. Returning.')\n return pd.concat(all_results)\n\n if (i % 50 == 0) and i > 0:\n # Sleep to avoid timeouts. Empirically found 20s to be sufficient\n time.sleep(20)\n\n all_results.append(df)\n return pd.concat(all_results)", "def trades(self) -> list[TradeOffer]:\n return self._connection.trades", "def get_target_timestamps(self):\n times=[]\n curr = self.begin_ts\n while curr<=self.end_ts:\n times.append(curr)\n curr = curr + 24 * 60 * 60\n return times", "def transactions(self):\n url = f'{self._ynab.api_url}/budgets/{self.budget.id}/accounts/{self.id}/transactions'\n response = self._ynab.session.get(url)\n if not response.ok:\n self._logger.error('Error retrieving transactions, response was : %s with status code : %s',\n response.text,\n response.status_code)\n return []\n return [Transaction(self._ynab, transaction)\n for transaction in response.json().get('data', {}).get('transactions', [])]", "def related_reports(self):\n return Report.objects.exclude(contact_email__isnull=True).filter(contact_email__iexact=self.contact_email).order_by('status', '-create_date')[:1000]", "def determineUnitHistory():\n\tunitTracker = Unitiser()\n\t\n\timport transactions\n\ttrades = transactions.allTransactions()\n\t\n\thistory = dict()\n\t\n\tfor date in timeline():\n\t\t#print(\"\\ntimelime:\", date.strftime('%Y-%m-%d %H:%M:%S'))\n\t\timport valuator\n\t\tvalue = valuator.getPortfolioValueAt(date)\n\t\tif date in trades:\n\t\t\tprior = getPortfolioBefore(date)\n\t\t\tprior_value = valuator.getPortfolioValueAt(date, portfolio = prior)\n\n\t\t\tinvested = Decimal('0.0')\n\t\t\tfor equity in trades[date]:\n\t\t\t\ttrade = trades[date][equity]\n\t\t\t\t#print(equity, trade)\n\t\t\t\tif trade['action'] == 'buy':\n\t\t\t\t\tinvested = invested + Decimal(trade['value'])\n\t\t\t\telif trade['action'] == 'sell':\n\t\t\t\t\tinvested = invested - Decimal(trade['value'])\n\n\t\t\tsince = getPortfolioAt(date)\n\t\t\tsince_value = valuator.getPortfolioValueAt(date, portfolio = since)\n\n\t\t\t#print(\"change amount is\", invested)\n\t\t\tif invested > 0:\n\t\t\t\tunitTracker.invest(invested, prior_value)\n\t\t\telif invested < 0:\n\t\t\t\tunitTracker.divest(abs(invested), prior_value)\n\n\t\thistory[date] = {\n\t\t\t 'date' : date,\n\t\t\t 'value' : value.quantize(TWOPLACES),\n\t\t\t 'units' : unitTracker.numberOfUnits().quantize(TWOPLACES),\n\t\t\t 'price' : unitTracker.pricePerUnit(value).quantize(TWOPLACES),\n\t\t\t 'invested' : unitTracker.invested\n\t\t\t }\n\t\n\treturn history", "def getTempBanIps(self):\n banned = []\n q = \"\"\"SELECT clients.ip AS target_ip FROM penalties INNER JOIN clients ON penalties.client_id = clients.id\n WHERE penalties.type = 'TempBan' AND penalties.inactive = 0 AND penalties.time_expire > %s\n GROUP BY clients.ip\"\"\" % int(time())\n cursor = self.query(q)\n if cursor:\n while not cursor.EOF:\n banned.append(cursor.getValue('target_ip'))\n cursor.moveNext()\n cursor.close()\n return banned", "def timers(self):\n return self['timers']", "def get_stock_price(df_excld):\n\n ts = TimeSeries(os.environ['ALPHA_VANTAGE_KEY'])\n\n info = []\n symbols = []\n counter = 0\n\n for t in df_excld['Ticker']:\n\n if counter % 5 == 0:\n time.sleep(65)\n\n i, m = ts.get_daily(symbol=t, outputsize='full')\n info.append(i)\n symbols.append(m['2. Symbol'])\n counter += 1\n\n return info, symbols", "def get_all_coins_history(self, end_date=None, start_date=None, verbose=True):\n infos = []\n for coin in self.get_coins():\n if verbose:\n print(\"Collecting data for >> \" + coin)\n if start_date:\n start_date = start_date\n else:\n start_date = '20130428'\n if end_date:\n end_date = end_date\n else:\n now = str(datetime.now().date()).replace('-', '')\n end_date = now\n coin_url = self.coins[coin]\n coin_url = coin_url + '/historical-data/?start=' + start_date + '&end=' + end_date\n content = urlopen(coin_url).read()\n soup = BeautifulSoup(content, 'html.parser')\n results = soup.find_all(\"tr\", class_=\"text-right\")\n\n for result in results:\n date = result.find_all('td')[0].text\n\n open_val = result.find_all('td')[1].text\n if open_val == '-':\n open_val = None\n else:\n open_val = float(result.find_all('td')[1].text.replace(',', ''))\n\n high_val = result.find_all('td')[2].text\n if high_val == '-':\n high_val = None\n else:\n high_val = float(result.find_all('td')[2].text.replace(',', ''))\n\n low_val = result.find_all('td')[3].text\n if low_val == '-':\n low_val = None\n else:\n low_val = float(result.find_all('td')[3].text.replace(',', ''))\n\n close_val = result.find_all('td')[4].text\n if close_val == '-':\n close_val = None\n else:\n close_val = float(result.find_all('td')[4].text.replace(',', ''))\n\n volume = result.find_all('td')[5].text\n if volume == '-':\n volume = None\n else:\n volume = float(result.find_all('td')[5].text.replace(',', ''))\n\n market_cap = result.find_all('td')[6].text\n if market_cap == '-':\n market_cap = None\n else:\n market_cap = float(result.find_all('td')[6].text.replace(',', ''))\n temp = {\n \"coin\": coin, # soup.title.text.split()[0],\n \"date\": date,\n \"symbol\": soup.title.text.split()[1].replace('(', '').replace(')', ''),\n \"open_val\": open_val,\n \"high_val\": high_val,\n \"low_val\": low_val,\n \"close_val\": close_val,\n \"volume\": volume,\n \"market_cap\": market_cap\n }\n infos.append(temp)\n df_all = pd.DataFrame.from_dict(infos)\n df_all['middle_val'] = (df_all.high_val + df_all.low_val) / 2\n df_all['datetime'] = pd.to_datetime(df_all['date'])\n df_all = df_all.sort_values(by='datetime')\n self.coins_history = df_all", "def slow_update_duration(self):\n for i in range(len(self.data_file.sorted_data)):\n if self.data_file.sorted_data[i]['type'] == 'slow':\n slow_upd = self.data_file.sorted_data[i]['timestamp']\n Config.ANALYSIS.write(f\"slow at: {slow_upd}\\n\")\n if i == 0:\n after_slow = self.data_file.sorted_data[i + 1]['timestamp']\n Config.ANALYSIS.write(f\"after slow: ({slow_upd}, {after_slow}) \"\n f\"= {after_slow - slow_upd}\\n\\n\")\n elif i == len(self.data_file.sorted_data) - 1:\n before_slow = self.data_file.sorted_data[i - 1]['timestamp']\n Config.ANALYSIS.write(f\"before slow: ({before_slow}, {slow_upd}) \"\n f\"= {slow_upd - before_slow}\\n\\n\")\n else:\n before_slow = self.data_file.sorted_data[i - 1]['timestamp']\n after_slow = self.data_file.sorted_data[i + 1]['timestamp']\n Config.ANALYSIS.write(f\"before slow: ({before_slow}, {slow_upd}) \"\n f\"= {slow_upd - before_slow}\\n\")\n Config.ANALYSIS.write(f\"after slow: ({slow_upd}, {after_slow}) \"\n f\"= {after_slow - slow_upd}\\n\\n\")\n Config.ANALYSIS.write(\"\\n\\n\")", "def get_consumption_transactions(self, exclude_inferred_receipts=False):\n from casexml.apps.stock.const import (\n TRANSACTION_TYPE_STOCKONHAND,\n TRANSACTION_TYPE_RECEIPTS,\n TRANSACTION_TYPE_CONSUMPTION\n )\n transactions = [\n ConsumptionTransaction(\n TRANSACTION_TYPE_RECEIPTS if self.delta > 0 else TRANSACTION_TYPE_CONSUMPTION,\n abs(self.delta),\n self.report_date\n )\n ]\n if self.type == LedgerTransaction.TYPE_BALANCE:\n if self.delta > 0 and exclude_inferred_receipts:\n transactions = []\n\n transactions.append(\n ConsumptionTransaction(\n TRANSACTION_TYPE_STOCKONHAND,\n self.updated_balance,\n self.report_date\n )\n )\n return transactions", "def trades_for(self, symbol, from_id=None, timestamp=None, limit=None):\n\t\tif self._session:\n\t\t\tdata = {'symbol': symbol}\n\n\t\t\tif from_id:\n\t\t\t\tdata['fromId'] = from_id\n\n\t\t\tif timestamp:\n\t\t\t\tdata['startTime'] = int(timestamp * 1000.0)\n\n\t\t\tif limit:\n\t\t\t\tdata['limit'] = limit\n\n\t\t\treturn self._session.get_my_trades(**data)\n\n\t\treturn []", "def deposits_for_period(self):\n cash_transaction = CashTransaction(self.user)\n limits = self.user.limits\n deposits = 0\n if limits.exists():\n deposit_limit = self.user.limits.get(type=Limit.DEPOSIT)\n deposits = \\\n cash_transaction.get_all_deposits(date_range=deposit_limit.time_period_boundaries)[\n 'amount__sum']\n return deposits", "def get_reports(self):\r\n result = QtSql.QSqlQuery('''Select * FROM failures''')\r\n list = []\r\n while result.next():\r\n failure = Failure(unicode(result.value(0).toString()), # id\r\n unicode(result.value(1).toString()), # comment\r\n unicode(result.value(2).toString()), # indicator\r\n bool(result.value(3))) # release\r\n p = self.get_presentation(failure.talkId)\r\n r = Report(p, failure)\r\n list.append(r)\r\n return list", "def get_all(self):\r\n return [{ 'label': x[0], 'timing': x[1], 'is_tool': x[0] in self._tool_labels}\r\n for x in sorted(self._timings_by_path.items(), key=lambda x: x[1], reverse=True)]", "def get_report_list(jtl_file):\n df = None\n try:\n df = pd.read_csv(jtl_file,\n low_memory=False,\n error_bad_lines=False,\n quoting=csv.QUOTE_NONE,\n encoding='utf-8')\n except Exception as e:\n err_msg = 'read jtl file error. detail:{e}'.format(e=e)\n LOGGER.error(err_msg)\n if df is None:\n return\n threads = int(jtl_file.split(os.sep)[-1].split('_')[0])\n success, elapsed, latency, sent_bytes, receive_bytes = [df.get(x) for x in\n ['success', 'elapsed', 'Latency', 'sentBytes', 'bytes']]\n samples = df.shape[0]\n error_count = success.value_counts().get(False)\n if not error_count:\n error_count = 0\n error_rate = str(float(error_count / samples) * 100) + '%'\n label = df.loc[0, 'label']\n start_time = df.iat[0, 0]\n end_time = df.iloc[-1, 0]\n last_req_time = df.iat[-1, 1]\n\n # 如果最后一行数据无效,则取上一行\n i = 1\n while not len(str(end_time)) == 13 and not re.findall('[\\d]{13}', str(end_time)):\n i += 1\n end_time = df.iloc[-i, 0]\n last_req_time = df.iat[-i, 1]\n samples -= 1\n\n if isinstance(start_time, str):\n start_time = int(start_time)\n if isinstance(end_time, str):\n end_time = int(end_time)\n\n local_start_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start_time / 1000))\n local_end_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end_time / 1000))\n\n durations = (end_time + last_req_time - start_time) / 1000\n throughput = samples / durations\n\n report_list = [label, local_start_time, local_end_time, durations, threads, throughput, error_rate,\n elapsed.min(), elapsed.max(), elapsed.mean(), samples, sent_bytes.mean(), receive_bytes.mean(),\n latency.mean(), latency.min(), latency.max()]\n\n return report_list", "def get_ser_spktimes(self):\n\n spktimes = []\n for sweep_no in range(self.get_no_sweeps()):\n spktimes_singlesweep = []\n for cell_no in range(self.get_no_ser_neurons()):\n spktimes_singlesweep.append(\n np.where(self.ser_spktrains[sweep_no, cell_no, :] > 0.5)[0]\n * self.get_dt()\n )\n spktimes.append(spktimes_singlesweep)\n return spktimes", "def get_all_files_to_instrument_for_live_session():\n sql=\"SELECT * FROM files WHERE should_instrument=1 AND is_history=0\"\n conn=sqlite3.connect(CONNECTION_STRING)\n c=conn.cursor()\n c.execute(sql)\n results=c.fetchall()\n conn.close()\n return results", "def transaction_data(self):\n return list(map(lambda transaction:transaction.to_json(), self.transaction_map.values()))", "def timers(self):\n return self.client.call('GET', self.name + '/timers')", "def get_send_statistics(self):\r\n return self._make_request('GetSendStatistics')", "def get_all(self):\n total_expense_reports = []\n get_count = {\n 'query': {\n 'object': 'EEXPENSES',\n 'select': {\n 'field': 'RECORDNO'\n },\n 'pagesize': '1'\n }\n }\n\n response = self.format_and_send_request(get_count)\n count = int(response['data']['@totalcount'])\n pagesize = 2000\n offset = 0\n for i in range(0, count, pagesize):\n data = {\n 'query': {\n 'object': 'EEXPENSES',\n 'select': {\n 'field': [\n 'RECORDNO',\n 'RECORDID',\n 'WHENCREATED',\n 'WHENPOSTED',\n 'TOTALENTERED',\n 'STATE',\n 'TOTALDUE',\n 'DESCRIPTION',\n 'CURRENCY',\n 'BASECURR',\n 'MEMO'\n ]\n },\n 'pagesize': pagesize,\n 'offset': offset\n }\n }\n expense_reports = self.format_and_send_request(data)['data']['EEXPENSES']\n total_expense_reports = total_expense_reports + expense_reports\n offset = offset + pagesize\n return total_expense_reports", "def performance_history(self, request, pk=None, **kwargs):\n # Get the goal even though we don't need it (we could just use the pk)\n # so we can ensure we have permission to do so.\n goal = self.get_object()\n\n # - Get all the transaction with this goal involved that are of reason 'Execution'.\n # We want the volume, ticker id, date ordered by date. [(date, {ticker: vol}, ...]\n qs = Transaction.objects.filter(Q(to_goal=goal) | Q(from_goal=goal),\n reason=Transaction.REASON_EXECUTION).order_by('executed')\n txs = qs.values_list('execution_distribution__execution__executed',\n 'execution_distribution__execution__asset__id',\n 'execution_distribution__volume')\n ts = []\n entry = (None,)\n aids = set()\n # If there were no transactions, there can be no performance\n if len(txs) == 0:\n return Response([])\n\n # Because executions are stored with timezone, but other things are just as date, we need to make datetimes\n # naive before doing date arithmetic on them.\n bd = timezone.make_naive(txs[0][0]).date()\n ed = timezone.make_naive(timezone.now()).date()\n for tx in txs:\n aids.add(tx[1])\n txd = timezone.make_naive(tx[0]).date()\n if txd == entry[0]:\n entry[1][tx[1]] += tx[2]\n else:\n if entry[0] is not None:\n ts.append(entry)\n entry = (txd, defaultdict(int))\n entry[1][tx[1]] = tx[2]\n ts.append(entry)\n\n # - Get the time-series of prices for each instrument from the first transaction date until now.\n # Fill empty dates with previous value [(date, {ticker: price}, ...]\n pqs = DailyPrice.objects.filter(date__range=(bd, ed),\n instrument_content_type=ContentType.objects.get_for_model(Ticker).id,\n instrument_object_id__in=aids)\n prices = pqs.to_timeseries(fieldnames=['price', 'date', 'instrument_object_id'],\n index='date',\n storage='long',\n pivot_columns='instrument_object_id',\n values='price')\n # Remove negative prices and fill missing values\n # We replace negs with None so they are interpolated.\n prices[prices <= 0] = None\n prices = prices.reindex(pd.date_range(bd, ed), method='ffill').fillna(method='bfill')\n\n # For each day, calculate the performance\n piter = prices.itertuples()\n res = []\n # Process the first day - it's special\n row = next(piter)\n p_m1 = row[1:]\n vols_m1 = [0] * len(prices.columns)\n tidlocs = {tid: ix for ix, tid in enumerate(prices.columns)}\n for tid, vd in ts.pop(0)[1].items():\n vols_m1[tidlocs[tid]] += vd\n res.append((dt2ed(row[0]), 0)) # First day has no performance as there wasn't a move\n # Process the rest\n for row in piter:\n # row[0] (a datetime) is a naive timestamp, so we don't need to convert it\n if ts and row[0].date() == ts[0][0]:\n vols = vols_m1.copy()\n dtrans = ts.pop(0)[1] # The transactions for the current processed day.\n for tid, vd in dtrans.items():\n vols[tidlocs[tid]] += vd\n # The exposed assets for the day. These are the assets we know for sure were exposed for the move.\n pvol = list(map(min, vols, vols_m1))\n else:\n vols = vols_m1\n pvol = vols\n pdelta = list(map(operator.sub, row[1:], p_m1)) # The change in price from yesterday\n impact = sum(map(operator.mul, pvol, pdelta)) # The total portfolio impact due to price moves for exposed assets.\n b_m1 = sum(map(operator.mul, pvol, p_m1)) # The total portfolio value yesterday for the exposed assets.\n perf = 0 if b_m1 == 0 else impact / b_m1\n # row[0] (a datetime) is a naive timestamp, so we don't need to convert it\n res.append((dt2ed(row[0]), decimal.Decimal.from_float(perf).quantize(decimal.Decimal('1.000000'))))\n p_m1 = row[1:]\n vols_m1 = vols[:]\n\n return Response(res)", "def get_staking_transaction_history(address, page=0, page_size=1000, include_full_tx=False, tx_type='ALL',\n order='ASC', endpoint=_default_endpoint, timeout=_default_timeout\n ) -> list:\n params = [\n {\n 'address': address,\n 'pageIndex': page,\n 'pageSize': page_size,\n 'fullTx': include_full_tx,\n 'txType': tx_type,\n 'order': order\n }\n ]\n # Using v2 API, because getStakingTransactionHistory not implemented in v1\n method = 'hmyv2_getStakingTransactionsHistory'\n stx_history = rpc_request(method, params=params, endpoint=endpoint, timeout=timeout)['result']\n try:\n return stx_history['staking_transactions']\n except KeyError as e:\n raise InvalidRPCReplyError(method, endpoint) from e", "def get_extension_list(self, timespan, db_lookup):\n\n if (self.search_list_extension != None):\n return [self.search_list_extension]\n \n t1 = time.time()\n\n # Get UTC offset\n stop_struct = time.localtime(timespan.stop)\n utc_offset = (calendar.timegm(stop_struct) - calendar.timegm(time.gmtime(time.mktime(stop_struct))))/60\n\n # Get our start time, numDays days ago but aligned with start of day\n # first get the start of today\n _ts = startOfDay(timespan.stop)\n # then go back numDays days\n _ts_dt = datetime.datetime.fromtimestamp(_ts)\n _start_dt = _ts_dt - datetime.timedelta(days=self.numDays)\n _start_ts = time.mktime(_start_dt.timetuple())\n\n if (self.numDays == 3):\n _start_ts = time.mktime((_ts_dt - datetime.timedelta(days=0)).timetuple())\n _end_ts = time.mktime((_ts_dt + datetime.timedelta(days=3)).timetuple())\n timespan = TimeSpan(_start_ts, _end_ts)\n \n # Get our temperature vector\n (time_start_vt, time_stop_vt, outTemp_vt) = db_lookup().getSqlVectors(TimeSpan(_start_ts, timespan.stop),'outTemp')\n # Convert our temperature vector\n outTemp_vt = self.generator.converter.convert(outTemp_vt)\n # Can't use ValueHelper so round our results manually\n # Get the number of decimal points\n tempRound = int(self.generator.skin_dict['Units']['StringFormats'].get(outTemp_vt[1], \"1f\")[-2])\n # Do the rounding\n outTempRound_vt = [roundNone(x,tempRound) for x in outTemp_vt[0]]\n # Get our time vector in ms (w34highcharts requirement)\n # Need to do it for each getSqlVectors result as they might be different\n outTemp_time_ms = [time_stop_vt[0][0] if (x == 0) else time_stop_vt[0][x] - time_stop_vt[0][0] for x in range(len(time_stop_vt[0]))]\n \n # Get our dewpoint vector\n (time_start_vt, time_stop_vt, dewpoint_vt) = db_lookup().getSqlVectors(TimeSpan(_start_ts, timespan.stop),'dewpoint')\n dewpoint_vt = self.generator.converter.convert(dewpoint_vt)\n # Can't use ValueHelper so round our results manually\n # Get the number of decimal points\n dewpointRound = int(self.generator.skin_dict['Units']['StringFormats'].get(dewpoint_vt[1], \"1f\")[-2])\n # Do the rounding\n dewpointRound_vt = [roundNone(x,dewpointRound) for x in dewpoint_vt[0]]\n # Get our time vector in ms (w34highcharts requirement)\n # Need to do it for each getSqlVectors result as they might be different\n dewpoint_time_ms = [time_stop_vt[0][0] if (x == 0) else time_stop_vt[0][x] - time_stop_vt[0][0] for x in range(len(time_stop_vt[0]))]\n\n # Get our humidity vector\n (time_start_vt, time_stop_vt, outHumidity_vt) = db_lookup().getSqlVectors(TimeSpan(_start_ts, timespan.stop),'outHumidity')\n # Can't use ValueHelper so round our results manually\n # Get the number of decimal points\n outHumidityRound = int(self.generator.skin_dict['Units']['StringFormats'].get(outHumidity_vt[1], \"1f\")[-2])\n # Do the rounding\n outHumidityRound_vt = [roundNone(x,outHumidityRound) for x in outHumidity_vt[0]]\n # Get our time vector in ms (w34highcharts requirement)\n # Need to do it for each getSqlVectors result as they might be different\n outHumidity_time_ms = [time_stop_vt[0][0] if (x == 0) else time_stop_vt[0][x] - time_stop_vt[0][0] for x in range(len(time_stop_vt[0]))]\n # Format our vectors in json format. Need the zip() to get time/value pairs\n # Assumes all vectors have the same number of elements\n outTemp_json = json.dumps(zip(outTemp_time_ms, outTempRound_vt))\n dewpoint_json = json.dumps(zip(dewpoint_time_ms, dewpointRound_vt))\n outHumidity_json = json.dumps(zip(outHumidity_time_ms, outHumidityRound_vt))\n # Put into a dictionary to return\n self.search_list_extension = {\n 'outTempWeekjson' : outTemp_json,\n 'dewpointWeekjson' : dewpoint_json,\n 'outHumidityWeekjson' : outHumidity_json,\n 'utcOffset': utc_offset,\n 'weekPlotStart' : _start_ts * 1000,\n 'weekPlotEnd' : timespan.stop * 1000}\n t2 = time.time()\n logdbg2(\"w34highcharts_temp_week SLE executed in %0.3f seconds\" % (t2 - t1))\n\n # Return our json data\n return [self.search_list_extension]", "def TOBS():\n session = Session(engine)\n # Query all passengers\n\n TOBS = session.query(Measurement.date,Measurement.tobs).filter(Measurement.date >= '2010-08-23').all()\n\n # Convert list of tuples into normal list\n all_TOBS = list(np.ravel(TOBS))\n\n return jsonify(all_TOBS)", "def recorded_messages(self):\n messages = []\n for time in sorted(self.reception_records):\n messages.extend(self.reception_records[time])\n return messages", "def returnTradeHistory(self,\n currency_pair=\"all\",\n start=datetime.now() - timedelta(days=1),\n end=datetime.now()):\n pass", "def returnTradeHistory(self,\n currency_pair=\"all\",\n start=datetime.now() - timedelta(days=1),\n end=datetime.now()):\n pass", "def aggregated_results(self, limit=2000) -> List[dict]:\n stored_events = []\n for events in self._iter_events():\n stored_events.extend(events)\n if len(stored_events) >= limit:\n return stored_events[:limit]\n return stored_events", "def get_transactions(self):\n\n df = self.__transactions[\n [\n \"Date\",\n \"Type\",\n \"Ticker\",\n \"Side\",\n \"Price\",\n \"Quantity\",\n \"Fees\",\n \"Investment\",\n \"Currency\",\n \"Sector\",\n \"Industry\",\n \"Country\",\n \"Region\",\n ]\n ]\n df = df.replace(np.nan, \"-\")\n df[\"Date\"] = df[\"Date\"].dt.strftime(\"%Y-%m-%d\")\n df.sort_values(by=\"Date\", ascending=False, inplace=True)\n return df", "def getDataWithTimeIndex(self, t):\n\n return self.sensorDf.iloc[t,:self.sensorChannels].values", "def aggregate_timetable_details(logger):\n df_array = []\n for page in range(0, MAX_PAGE+1): # MAX_PAGE inclusive\n filename = dirname + 'TimeTablePage_{}'.format(page) + '.csv'\n if os.exists(filename):\n # When database is ready, this info can be recorded there\n logger.info('File[{file}] already downloaded. Reading it'.format(file=filename))\n url = filename\n else:\n count = page * 20\n url = TIMETABLE_URL + '&count={count}&page={page}'.format(count=count, page=page)\n logger.info('Fetch TimeTableDetails from URL[{url}] page[{page}] into {directory}'.format(url=url, page=page, directory=dirname))\n \n try:\n df = pd.read_html(url)[1] # the table of interest from the list\n except Exception as e:\n logger.error('Exception when reading HTML:[{exception}]'.format(exception=e))\n exit(0)\n\n logger.info('Writing to [{file}]'.format(file=filename))\n df.to_csv(filename, index=False)\n df_array.append(df)\n\n df = pd.concat(df_array)\n print(df.head())\n print(df.tail())\n\n return 0", "def find_all(self) -> List[Trade]:\n\n pass # pragma: no cover", "def service_times(self):\r\n service_times = []\r\n for task in self.__tasks.values():\r\n if task.complete():\r\n x = task.service_time()\r\n service_times.append(task.service_time())\r\n return service_times", "def get_data(symbol_id='BTC', period_id='1DAY', request_limit=1000, tdelta=30):\n now = datetime.utcnow()\n month = timedelta(days=tdelta)\n past_month = (now - month).isoformat()\n\n parameters = {'symbol_id': symbol_id, 'period_id': period_id, 'time_start': past_month[:-3], 'limit':request_limit}\n response = requests.get(HISTORY_URL, params=parameters, headers=header)\n\n while response.status_code != 200:\n time.sleep(5)\n response = requests.get(HISTORY_URL, params=parameters, headers=header)\n \n data = response.json()\n \n # this is a commnet\n csv_headers = ['time_period_start', 'time_period_end', 'price_high', 'price_low', 'price_close', 'price_open', 'trades_count', \n 'volume_traded', 'time_open', 'time_close']\n\n\n with open(str(datafolder / f'{symbol_id}_{tdelta}_day.csv'), 'w', newline='') as f:\n writer = csv.DictWriter(f, csv_headers)\n writer.writeheader()\n for item in data:\n writer.writerow(item)", "def get_timeseries():\n\n # generate the result files\n name = os.path.join(project.output_folder, project.scenario+'.sww')\n log.debug('get_timeseries: input SWW file=%s' % name)\n log.debug('get_timeseries: gauge file=%s' % project.gauge_file)\n anuga.sww2csv_gauges(name, project.gauge_file, quantities=project.layers_list,\n verbose=False)\n\n # since ANUGA code doesn't return a list of generated files,\n # look in output directory for 'gauge_*.csv' files.\n glob_mask = os.path.join(project.output_folder, 'gauge_*.csv')\n return glob.glob(glob_mask)", "def to_exclude(self):\n reporting_period = self.message_number - 3\n one_day_ago = now() - datetime.timedelta(hours=24)\n\n return PollingReport.objects.filter(\n period_number=reporting_period,\n creation_date__gte=one_day_ago,\n ).values_list('phone_number', flat=True)", "def _parse_transactions_file(self, path_to_transactions_file: str) -> List[Transaction]:\n ticker_params_to_ticker = {\n (ticker.name, ticker.security_type, ticker.point_value): ticker for ticker in self.tickers\n }\n\n def get_matching_ticker(row: QFSeries) -> Ticker:\n \"\"\" Returns the matching specific ticker. In case if the ticker does not belong to the list of tickers\n passed as the parameter, the transaction is excluded. \"\"\"\n ticker_str = row.loc[\"Contract symbol\"]\n name = row.loc[\"Asset Name\"]\n sec_type = SecurityType(row.loc[\"Security type\"])\n point_value = row.loc[\"Contract size\"]\n ticker = ticker_params_to_ticker.get((name, sec_type, point_value), None)\n if isinstance(ticker, FutureTicker):\n ticker_type = ticker.supported_ticker_type()\n ticker = ticker_type(ticker_str, sec_type, point_value)\n return ticker\n\n transactions_df = pd.read_csv(path_to_transactions_file)\n transactions = [Transaction(pd.to_datetime(row.loc[\"Timestamp\"]),\n get_matching_ticker(row),\n row.loc[\"Quantity\"],\n row.loc[\"Price\"],\n row.loc[\"Commission\"]) for _, row in transactions_df.iterrows()]\n transactions = [t for t in transactions if t.ticker is not None]\n return transactions", "def temporal_database():\n return TimeHistory()", "def get_relevant_reports(self):\r\n split_reports = [x.split(' ') for x in self.get_directory_list()]\r\n formatted_reps = [list(filter(None, line)) for line in split_reports]\r\n recent_reports = [line for line in formatted_reps if self.is_report_recent(line)]\r\n return recent_reports", "def fetch_bank_transactions(self):\n return self.fetch('/bank_transactions')", "def scrape_validator_withdrawals(\n validator_index: int,\n last_known_timestamp: Timestamp,\n) -> list[tuple[Timestamp, ChecksumEvmAddress, FVal]]:\n withdrawals = []\n now = ts_now()\n start = 0\n page_length = 10\n stop_iterating = False\n\n while True:\n url = f'{BEACONCHAIN_ROOT_URL}/validator/{validator_index}/withdrawals?draw=1&columns%5B0%5D%5Bdata%5D=0&columns%5B0%5D%5Bname%5D=&columns%5B0%5D%5Bsearchable%5D=true&columns%5B0%5D%5Borderable%5D=true&columns%5B0%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B0%5D%5Bsearch%5D%5Bregex%5D=false&columns%5B1%5D%5Bdata%5D=1&columns%5B1%5D%5Bname%5D=&columns%5B1%5D%5Bsearchable%5D=true&columns%5B1%5D%5Borderable%5D=true&columns%5B1%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B1%5D%5Bsearch%5D%5Bregex%5D=false&columns%5B2%5D%5Bdata%5D=2&columns%5B2%5D%5Bname%5D=&columns%5B2%5D%5Bsearchable%5D=true&columns%5B2%5D%5Borderable%5D=true&columns%5B2%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B2%5D%5Bsearch%5D%5Bregex%5D=false&columns%5B3%5D%5Bdata%5D=3&columns%5B3%5D%5Bname%5D=&columns%5B3%5D%5Bsearchable%5D=true&columns%5B3%5D%5Borderable%5D=true&columns%5B3%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B3%5D%5Bsearch%5D%5Bregex%5D=false&columns%5B4%5D%5Bdata%5D=4&columns%5B4%5D%5Bname%5D=&columns%5B4%5D%5Bsearchable%5D=true&columns%5B4%5D%5Borderable%5D=true&columns%5B4%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B4%5D%5Bsearch%5D%5Bregex%5D=false&order%5B0%5D%5Bcolumn%5D=0&order%5B0%5D%5Bdir%5D=desc&start={start}&length={page_length}&search%5Bvalue%5D=&search%5Bregex%5D=false&_={now}' # noqa: E501\n response = _query_page(url, 'withdrawals')\n try:\n result = response.json()\n except json.JSONDecodeError as e:\n raise RemoteError(f'Could not parse {response.text} from beaconchain as json') from e\n\n for entry in result['data']: # data appears in descending time\n epoch_match = EPOCH_PARSE_REGEX.match(entry[0])\n if epoch_match is None:\n log.error(f'Failed to match epoch regex for {entry[0]}')\n raise RemoteError('Failed to parse withdrawals response from beaconchain. Check logs') # noqa: E501\n groups = epoch_match.groups()\n if len(groups) != 1:\n log.error(f'Failed to match epoch regex for {entry[0]}')\n raise RemoteError('Failed to parse withdrawals response from beaconchain. Check logs') # noqa: E501\n epoch = deserialize_int_from_str(groups[0], location='beaconchain epoch')\n timestamp = epoch_to_timestamp(epoch)\n if timestamp <= last_known_timestamp:\n stop_iterating = True\n break # we already know about this withdrawal\n\n address_match = ADDRESS_PARSE_REGEX.match(entry[3])\n if address_match is None:\n log.error(f'Failed to match address regex for {entry[3]}')\n raise RemoteError('Failed to parse withdrawals response from beaconchain. Check logs') # noqa: E501\n groups = address_match.groups()\n if len(groups) != 1:\n log.error(f'Failed to match address regex for {entry[3]}')\n raise RemoteError('Failed to parse withdrawals response from beaconchain. Check logs') # noqa: E501\n address = deserialize_evm_address(groups[0])\n\n eth_match = ETH_PARSE_REGEX.match(entry[4])\n if eth_match is None:\n log.error(f'Failed to match eth regex for {entry[4]}')\n raise RemoteError('Failed to parse withdrawals response from beaconchain. Check logs') # noqa: E501\n groups = eth_match.groups()\n if len(groups) != 1:\n log.error(f'Failed to match eth regex for {entry[4]}')\n raise RemoteError('Failed to parse withdrawals response from beaconchain. Check logs') # noqa: E501\n eth_amount = deserialize_fval(groups[0], name='withdrawal ETH', location='beaconchain query') # noqa: E501\n\n withdrawals.append((timestamp, address, eth_amount))\n\n if stop_iterating or len(withdrawals) >= result['recordsTotal']:\n break # reached the end\n start += page_length\n\n return withdrawals", "def get_df_transactions():\n\n _, res = DBX.files_download(c.io.FILE_TRANSACTIONS)\n return pd.read_excel(io.BytesIO(res.content), index_col=0)", "def wem_market_value_all():\n __query = \"\"\"\n select\n date_trunc('month', wfs.trading_interval) AS trading_day,\n sum(wfs.eoi_quantity * wbs.price) as energy_interval,\n wf.fueltech_id\n from wem_facility_scada wfs\n left join wem_facility wf on wfs.facility_id = wf.code\n join wem_balancing_summary wbs on wfs.trading_interval = wbs.trading_interval\n where\n wf.fueltech_id is not null\n group by 1, wf.fueltech_id\n order by 1 desc, 2 asc\n \"\"\"\n\n query = __query.format()\n\n json_envelope = {}\n\n with engine.connect() as c:\n rows = c.execute(query)\n\n current_tech = None\n\n for row in rows:\n\n current_tech = row[2]\n\n if current_tech not in json_envelope.keys():\n json_envelope[current_tech] = {\n \"id\": f\"wem.fuel_tech.{current_tech}.market_value\",\n \"fuel_tech\": current_tech,\n \"region\": \"wa\",\n \"type\": \"market_value\",\n \"units\": \"AUD\",\n \"history\": {\n \"interval\": \"1M\",\n \"start\": None,\n \"last\": None,\n \"data\": [],\n },\n }\n\n if (\n json_envelope[current_tech][\"history\"][\"start\"] == None\n or row[0] < json_envelope[current_tech][\"history\"][\"start\"]\n ):\n json_envelope[current_tech][\"history\"][\"start\"] = row[0]\n\n if (\n json_envelope[current_tech][\"history\"][\"last\"] == None\n or row[0] > json_envelope[current_tech][\"history\"][\"last\"]\n ):\n json_envelope[current_tech][\"history\"][\"last\"] = row[0]\n\n json_envelope[current_tech][\"history\"][\"data\"].append(row[1])\n\n return [json_envelope[i] for i in json_envelope.keys()]", "def timeseries_report(self):\n try:\n n = self.n.value\n except AttributeError:\n n = self.n\n results = pd.DataFrame(index=self.variables.index)\n results['ICE Generation (kW)'] = self.variables['ice_gen']\n results['ICE On (y/n)'] = self.variables['on_ice']\n results['ICE P_min (kW)'] = self.p_min\n results['ICE Genset P_max (kW)'] = self.rated_power * n\n return results", "def timestamps(self) -> List[T]:\n return self._timestamps", "def get_pnl_trades(self):\n\n if self._pnl_trades is None:\n tsc = TimeSeriesCalcs()\n self._pnl_trades = tsc.calculate_individual_trade_gains(self._signal, self._pnl)\n\n return self._pnl_trades", "def get_all_stats():\n\n return get_component(CachingPackage.COMPONENT_NAME).get_all_stats()", "def listunspent(self, minconf=1, maxconf=999999):\n return [TransactionInfo(**tx) for tx in\n self.proxy.listunspent(minconf, maxconf)]", "def get_stats(self, epg_dn):\n # Apic saves up to 95 different objects with statistic information\n traffic_list = []\n for i in range(10, -1, -1):\n traffic = self.moDir.lookupByDn(epg_dn + '/HDl2IngrBytesAg15min-%s' % str(i))\n if traffic is not None:\n traffic_list.append(traffic)\n return traffic_list", "def main_looper(df: pd.DataFrame()) -> list:\n for_sale_dictionary = {}\n sell_dictionary = {}\n repeat_sale = {}\n unrecorded_sale = []\n for idx, row in df.iterrows():\n if row['function_call']['function'] == 'createSaleAuction':\n # TODO - change for different NFT\n # entry into dict is kitty ID (unique to cryptokitties)\n # checks if already posted to be sold, continues if this is a reentry to sell, recording it\n if row['function_call']['_kittyId'] in for_sale_dictionary:\n if row['function_call']['_kittyId'] in repeat_sale:\n repeat_sale[row['function_call']['_kittyId']] += 1\n continue\n else:\n repeat_sale[row['function_call']['_kittyId']] = 1 \n continue\n for_sale_dictionary.update({str(row['function_call']['_kittyId']) : row['block_timestamp']})\n \n if row['function_call']['function'] == 'transfer':\n # must have posted for sale in an earlier block, cant find interval\n if not row['function_call']['_tokenId'] in for_sale_dictionary:\n unrecorded_sale.append([row['hash'], row['function_call']['_tokenId']])\n # print(f\"token not recoreded for sale: {row['function_call']['_tokenId']}\")\n # print(row['hash'])\n continue\n if for_sale_dictionary[row['function_call']['_tokenId']] == -1:\n print('missed for sale block, somehow??')\n continue\n # TODO find a smarter way to track times transfered!!\n sell_entry = str(row['function_call']['_tokenId']) + '_' + str(row['block_number'])\n print('')\n print('')\n print('')\n print(row['block_timestamp'])\n print(for_sale_dictionary['_tokenId'])\n time_interval = row['block_timestamp'] - for_sale_dictionary['_tokenId']\n for_sale_dictionary[row['function_call']['_tokenId']] = -1\n print(time_interval)\n sell_dictionary.update({sell_entry : row['block_timestamp']})\n print(len(unrecorded_sale))\n if len(unrecorded_sale) > 0:\n rng = min(7, len(unrecorded_sale))\n for ii in range(rng):\n print(unrecorded_sale[ii])\n for jj in range(rng):\n print(unrecorded_sale[len(unrecorded_sale)-jj-1])\n \n\n # print(f'Token IDs posted for sale:')\n # for key in for_sale_dictionary:\n # print(f'{key} : {for_sale_dictionary[key]}')", "def _get_current_session_tiling_list(self) -> List:\n return self._data[-1][History._TILINGS]", "def get_open_transactions(self):\n return self.__open_transactions[:]", "def _get_tweets(self):\n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)\n api = tweepy.API(auth)\n search = api.search(self.term, lang='en', count=100)\n\n print(f\"Getting tweets that mention '{self.term}', \"\n f\"this may take a while...\")\n\n save_tweet_text = [tweet._json['text'] for tweet in search]\n while len(save_tweet_text) < 1000:\n try:\n oldest = search[-1].id - 1\n search = api.search(self.term, lang='en', count=100, max_id=oldest)\n new_tweets = [tweet._json['text'] for tweet in search]\n save_tweet_text.extend(new_tweets)\n\n # Turn into a set to remove duplicated tweets, then back to list\n save_tweet_text = list(set(save_tweet_text))\n except IndexError:\n break\n\n print(f\"Done. {len(save_tweet_text)} Tweets received.\")\n return save_tweet_text", "def _historical_user_data_for_decisions(user: User, days: int) -> List:\n data = []\n for begin, end in _time_range_list(days):\n db_clicks_statements = DBDiscussionSession.query(ClickedStatement).filter(\n ClickedStatement.author_uid == user.uid,\n ClickedStatement.timestamp >= begin,\n ClickedStatement.timestamp < end).all()\n db_clicks_arguments = DBDiscussionSession.query(ClickedArgument).filter(\n ClickedArgument.author_uid == user.uid,\n ClickedArgument.timestamp >= begin,\n ClickedArgument.timestamp < end).all()\n clicks = len(db_clicks_statements) + len(db_clicks_arguments)\n data.append(clicks)\n\n return data", "def get_blocking_times(individuals):\n ambulance_patients_times = []\n other_patients_times = []\n patients_still_in_system = []\n\n for ind in individuals:\n if ind.data_records[0].node == 1 and len(ind.data_records) == 2:\n ambulance_patients_times.append(\n ind.data_records[0].time_blocked + ind.data_records[1].time_blocked\n )\n elif ind.data_records[0].node == 2 and len(ind.data_records) == 1:\n other_patients_times.append(ind.data_records[0].time_blocked)\n else:\n patients_still_in_system.append(ind)\n return [ambulance_patients_times, other_patients_times, patients_still_in_system]", "def get_extension_list(self, timespan, db_lookup):\n\n if (self.search_list_extension != None):\n return [self.search_list_extension]\n \n t1 = time.time()\n\n # Get UTC offset\n stop_struct = time.localtime(timespan.stop)\n utc_offset = (calendar.timegm(stop_struct) - calendar.timegm(time.gmtime(time.mktime(stop_struct))))/60\n\n # Get our start time, numDays days ago but aligned with start of day\n # first get the start of today\n _ts = startOfDay(timespan.stop)\n # then go back numDays days\n _ts_dt = datetime.datetime.fromtimestamp(_ts)\n _start_dt = _ts_dt - datetime.timedelta(days=self.numDays)\n _start_ts = time.mktime(_start_dt.timetuple())\n\n if (self.numDays == 3):\n _start_ts = time.mktime((_ts_dt - datetime.timedelta(days=0)).timetuple())\n _end_ts = time.mktime((_ts_dt + datetime.timedelta(days=3)).timetuple())\n timespan = TimeSpan(_start_ts, _end_ts)\n \n # Get our barometer vector\n (time_start_vt, time_stop_vt, barometer_vt) = db_lookup().getSqlVectors(TimeSpan(_start_ts, timespan.stop),\n 'barometer')\n barometer_vt = self.generator.converter.convert(barometer_vt)\n # Can't use ValueHelper so round our results manually\n # Get the number of decimal points\n barometerRound = int(self.generator.skin_dict['Units']['StringFormats'].get(barometer_vt[1], \"1f\")[-2])\n # Do the rounding\n barometerRound_vt = [roundNone(x,barometerRound) for x in barometer_vt[0]]\n # Get our time vector in ms (w34highcharts requirement)\n # Need to do it for each getSqlVectors result as they might be different\n barometer_time_ms = [time_stop_vt[0][0] if (x == 0) else time_stop_vt[0][x] - time_stop_vt[0][0] for x in range(len(time_stop_vt[0]))]\n\n # Get our rain vector, need to sum over the hour\n (time_start_vt, time_stop_vt, rain_vt) = db_lookup().getSqlVectors(TimeSpan(_start_ts, timespan.stop),'rain')\n (time_start_vt, time_stop_vt, rainRate_vt) = db_lookup().getSqlVectors(TimeSpan(_start_ts, timespan.stop),'rainRate')\n # Check if we have a partial hour at the end\n # If we do then set the last time in the time vector to the hour\n # Avoids display issues with the column chart\n # Need to make sure we have at least 2 records though\n #if len(time_stop_vt[0]) > 1:\n # if time_stop_vt[0][-1] < time_stop_vt[0][-2] + 3600:\n # time_stop_vt[0][-1] = time_stop_vt[0][-2] + 3600\n # Convert our rain vector\n rain_vt = self.generator.converter.convert(rain_vt)\n rainRate_vt = self.generator.converter.convert(rainRate_vt)\n # Can't use ValueHelper so round our results manually\n # Get the number of decimal points\n rainRound = int(self.generator.skin_dict['Units']['StringFormats'].get(rain_vt[1], \"1f\")[-2])\n rainRateRound = int(self.generator.skin_dict['Units']['StringFormats'].get(rainRate_vt[1], \"1f\")[-2])\n # Do the rounding\n rainRound_vt = [roundNone(x,rainRound) for x in rain_vt[0]]\n rainRateRound_vt = [roundNone(x,rainRateRound) for x in rainRate_vt[0]]\n # Get our time vector in ms (w34highcharts requirement)\n # Need to do it for each getSqlVectors result as they might be different\n timeRain_ms = [time_stop_vt[0][0] if (x == 0) else time_stop_vt[0][x] - time_stop_vt[0][0] for x in range(len(time_stop_vt[0]))]\n\n # Format our vectors in json format. Need the zip() to get time/value pairs\n # Assumes all vectors have the same number of elements\n barometer_json = json.dumps(zip(barometer_time_ms, barometerRound_vt))\n rain_json = json.dumps(zip(timeRain_ms, rainRound_vt))\n rainRate_json = json.dumps(zip(timeRain_ms, rainRateRound_vt))\n\n # Put into a dictionary to return\n self.search_list_extension = {\n 'barometerWeekjson' : barometer_json,\n 'rainWeekjson' : rain_json,\n 'rainRateWeekjson' : rainRate_json,\n 'utcOffset': utc_offset,\n 'weekPlotStart' : _start_ts * 1000,\n 'weekPlotEnd' : timespan.stop * 1000}\n\n t2 = time.time()\n logdbg2(\"w34highcharts_bar_rain_week SLE executed in %0.3f seconds\" % (t2 - t1))\n\n # Return our json data\n return [self.search_list_extension]", "def report_data(self):\n report = [donor_obj.data for donor_obj in self.donor_list]\n return report", "def test_get_events_history_filtering_by_timestamp(rotkehlchen_api_server: 'APIServer'):\n tx_hex = deserialize_evm_tx_hash('0xb226ddb8cbb286a7a998a35263ad258110eed5f923488f03a8d890572cd4608e') # noqa: E501\n ethereum_inquirer = rotkehlchen_api_server.rest_api.rotkehlchen.chains_aggregator.ethereum.node_inquirer # noqa: E501\n database = rotkehlchen_api_server.rest_api.rotkehlchen.data.db\n get_decoded_events_of_transaction(\n evm_inquirer=ethereum_inquirer,\n database=database,\n tx_hash=tx_hex,\n )\n # Call time range\n from_timestamp = 1627401169\n to_timestamp = 1627401170\n async_query = random.choice([False, True])\n with patch(\n 'rotkehlchen.chain.ethereum.modules.sushiswap.sushiswap.Sushiswap.get_balances',\n side_effect=lambda _: {},\n ):\n response = requests.get(\n api_url_for(\n rotkehlchen_api_server,\n 'modulestatsresource',\n module='sushiswap',\n ),\n json={\n 'async_query': async_query,\n 'from_timestamp': from_timestamp,\n 'to_timestamp': to_timestamp,\n },\n )\n if async_query:\n task_id = assert_ok_async_response(response)\n outcome = wait_for_async_task(rotkehlchen_api_server, task_id, timeout=120)\n assert outcome['message'] == ''\n result = outcome['result']\n else:\n result = assert_proper_response_with_result(response)\n\n events_balances = result[TEST_EVENTS_ADDRESS_1]\n\n assert len(events_balances) == 1" ]
[ "0.6520807", "0.5953189", "0.5941957", "0.5698872", "0.56704205", "0.55845326", "0.55000305", "0.54975384", "0.53581554", "0.5356301", "0.5327412", "0.53010184", "0.5276863", "0.52720934", "0.5257284", "0.52468723", "0.5164559", "0.5162055", "0.51554185", "0.5149581", "0.5126841", "0.51238465", "0.512099", "0.5117233", "0.5102619", "0.5096786", "0.5094895", "0.50867134", "0.50841445", "0.5080421", "0.50669205", "0.5043826", "0.5013088", "0.5007789", "0.5005582", "0.5003696", "0.499864", "0.4992441", "0.499195", "0.49833015", "0.4983003", "0.4982164", "0.497292", "0.49664688", "0.49629444", "0.49527597", "0.49433863", "0.49418467", "0.49315026", "0.49312994", "0.49193215", "0.49047247", "0.49017963", "0.489988", "0.48939842", "0.4880181", "0.48619732", "0.48574516", "0.4830393", "0.48298204", "0.48180303", "0.48159268", "0.48088557", "0.48060074", "0.48047212", "0.4799049", "0.47962448", "0.4793121", "0.4793121", "0.4793023", "0.47926268", "0.47911507", "0.47899738", "0.4788828", "0.47856945", "0.4781686", "0.477621", "0.47705534", "0.47657394", "0.4764844", "0.47592068", "0.47548464", "0.47517413", "0.4749733", "0.47467735", "0.4745751", "0.47418064", "0.47402418", "0.47398", "0.4738336", "0.47336483", "0.47329706", "0.47326845", "0.4732064", "0.47302482", "0.47302148", "0.47300956", "0.4726414", "0.4725063", "0.47225544" ]
0.72200763
0
Resets the accumulated statistics back to initial state and associates the application settings object with the stats engine. This should be called when application is first activated and combined application settings incorporating server side settings are available. Would also be called on any forced restart of agent or a reconnection due to loss of connection.
def reset_stats(self, settings, reset_stream=False): self.__settings = settings self.__stats_table = {} self.__sql_stats_table = {} self.__slow_transaction = None self.__slow_transaction_map = {} self.__slow_transaction_old_duration = None self.__transaction_errors = [] self.__synthetics_transactions = [] self.reset_transaction_events() self.reset_error_events() self.reset_custom_events() self.reset_span_events() self.reset_synthetics_events() # streams are never reset after instantiation if reset_stream: self._span_stream = StreamBuffer( settings.infinite_tracing.span_queue_size)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stats_reset(self):\n self.stats.reset()", "def stats_reset(self):\n self.stats.reset()", "def reset_settings():\n settings = Settings()\n settings.reset()\n settings.save()", "def initialize(self):\n super(Stats, self).initialize()\n if not hasattr(self.application, 'rabbitmq'):\n setattr(self.application, 'rabbitmq', dict())\n if not hasattr(self.application, 'host'):\n setattr(self.application, 'host',\n socket.gethostname())", "def update(self, settings):\n self.settings.cache_clear()\n self._settings = settings\n log.info(\"Updated settings to %s\", self._settings)", "def reset(self):\n self.stats = {}", "def reset(self):\n reset_system_health_series()", "def update_settings(self):\n\n self.sim.account.set_balance(int(self.balance_str.get()))\n\n self.sim.config.set_base_bet(int(self.base_bet_str.get()))\n self.sim.config.set_payout(float(self.payout_str.get()))\n self.sim.config.set_iterations(int(self.iterations_str.get()))\n self.sim.config.set_loss_adder(int(self.loss_adder_str.get()))", "def reset_metric_stats(self):\n\n self.__stats_table = {}", "async def _reset_settings(self, ctx):\n data = await self.get_data(ctx)\n await data.Settings.clear()\n msg = (\"{0.name} ({0.id}) reset all \"\n \"casino settings.\").format(ctx.author)\n await ctx.send(msg)", "async def _reset_all_settings(self, ctx):\n await self._reset_settings(ctx)\n await self._reset_memberships(ctx)\n await self._reset_games(ctx)\n await self._reset_cooldowns(ctx)", "def reset_metric_stats(self):\n self.__stats_table = {}", "def reset(self):\n self.manager.delete_all()\n for name, val in DEFAULT_SETTINGS.items():\n val['name'] = name\n val['default_value'] = val['value']\n self.manager.from_dict(val)", "def resetSettings(self):\n\n # it does this 4 times because for some reason it would not grab everything one time through. Investigate\n for i in range(4):\n\n networkNode = self.returnNetworkNode\n attrs = cmds.listAttr(networkNode, ud=True)\n\n for attr in attrs:\n attrType = str(cmds.getAttr(networkNode + \".\" + attr, type=True))\n\n if attrType == \"double\":\n cmds.setAttr(networkNode + \".\" + attr, lock=False)\n cmds.setAttr(networkNode + \".\" + attr, 0, lock=True)\n\n if attrType == \"bool\":\n cmds.setAttr(networkNode + \".\" + attr, lock=False)\n cmds.setAttr(networkNode + \".\" + attr, True, lock=True)\n\n if attrType == \"enum\":\n cmds.setAttr(networkNode + \".\" + attr, lock=False)\n cmds.setAttr(networkNode + \".\" + attr, 0, lock=True)\n\n # relaunch the UI\n self.updateSettingsUI()\n self.applyModuleChanges(self)", "def initial_config(self, server_id):\n\n if server_id not in self.settings:\n self.settings[server_id] = {'inactive': True,\n 'output': [],\n 'cleanup': False,\n 'usercache': [],\n 'multiout': False\n }\n self.save_json()", "def reset(self):\n self.sum_metric = 0.\n self.num_inst = 0.\n self.metrics.reset_stats()", "def __load_settings(self):\n\n self.app_settings = sublime.load_settings(self.SETTINGS_FILE)\n self.__refresh_settings(True)\n\n # The settings may change during execution so we need to listen for changes\n self.app_settings.add_on_change(self.SETTINGS_CALLBACK_KEY, self.__refresh_settings)", "def set_unhandled_settings(self):\n # Set any custom settings\n # which where not setted (ex. on some widget's state changed)\n\n # Save all settings\n settings.save()", "def clear_settings(site_name): # untested - do I need/want this?\n return update_settings(site_name, {})", "def reset(self) -> None:\n self.statistics = defaultdict(self._mp_hack)\n self._ddp_backend = get_backend()", "def reset(self) -> None:\n self.statistics = defaultdict(self._mp_hack)\n self._ddp_backend = get_backend()", "def reset(self) -> None:\n self.statistics = defaultdict(self._mp_hack)\n self._ddp_backend = get_backend()", "def on_settings(self):\n\n # Pull the current app state from the relay Observer object\n status, interval, ntfc_status, ntfc_state = settings_state.get_state()\n\n # Pass it to the Observable object in order to render the Settings window\n settings_changed, update_interval, ntfc_changed, ntfc_selected = render_settings_window(\n status, interval, ntfc_status, ntfc_state, settings_state)\n\n # Register any state changes\n settings_state.update_state(settings_changed, update_interval, ntfc_changed, ntfc_selected)\n\n # If the interval has changed, reprogram scheduler to run at the new interval\n if settings_state.intrvl_change_trig:\n modify_scheduler(JOB_ID, settings_state.settings_interval)\n\n if settings_state.notification_change_trig:\n NewsIndicator.notifications = False if not settings_state.notification_state else True", "def reset(self) -> None:\n self.statistics = defaultdict(int)", "async def reset(self, ctx):\n await self.config.clear_all_guilds()\n await ctx.send(\"Reset all settings to default values.\")", "def __init__(self, settings):\n self._settings = settings\n self._stats = None", "def _create_config_and_reset_app(cls):\n cls.server_configuration = cls.create_configuration()\n utils.copy_resources(cls.server_configuration.file_server_root)\n server.reset_app(cls.server_configuration)\n\n cls._set_hash_mechanism_to_plaintext()", "def merge_settings(self, settings):\n self.settings.merge_settings(settings)", "def refresh_config(self):\n self._user_config = UserConfig(None)", "def reset(self) -> None:\n self.statistics = defaultdict(float)", "def add_settings_early(self):\n pass", "def load_settings(self):\n # Set the default settings. In case in a later version of this script the settings change, new default variables will be added automatically\n self.settings = {\n # Connection settings to OBS Studio websockets plugin\n \"host\": \"localhost\",\n \"port\": 4444,\n \"password\": \"\",\n \"update_frequency\": 1, # seconds, how often the script loads the SC2 UI location\n }\n if os.path.isfile(self.settings_path):\n with open(self.settings_path) as f:\n self.settings.update(json.load(f))", "def open_settings(self, event):\n settings_dialog = cfg.SettingsDialog(parent=self, exclude=['window'])\n res = settings_dialog.ShowModal()\n if res == wx.ID_OK:\n # Reload relevant parts of app\n restart_monitor_timer = False\n restart_gui_timer = False\n reload_correlations = False\n reload_logger = False\n reload_graph = False\n\n for setting in settings_dialog.changed_settings:\n # If any 'monitor.' settings except 'monitor.divergence_threshold have changed then restart\n # monitoring timer with new settings.\n # If 'monitor.interval has changed then restart gui timer.\n # If 'monitor.monitoring_threshold' has changed, then refresh correlation data.\n # If any 'logging.' settings have changed, then reload logger config.\n if setting.startswith('monitor.') and setting != 'monitor.divergence_threshold':\n restart_monitor_timer = True\n if setting == 'monitor.interval':\n restart_gui_timer = True\n if setting == 'monitor.monitoring_threshold':\n reload_correlations = True\n if setting.startswith('logging.'):\n reload_logger = True\n if setting.startswith('monitor.calculations'):\n reload_graph = True\n\n # Now perform the actions\n if restart_monitor_timer:\n self.__log.info(\"Settings updated. Reloading monitoring timer.\")\n self.__cor.stop_monitor()\n\n # Build calculation params and start monitor\n calculation_params = [self.__config.get('monitor.calculations.long'),\n self.__config.get('monitor.calculations.medium'),\n self.__config.get('monitor.calculations.short')]\n\n self.__cor.start_monitor(interval=self.__config.get('monitor.interval'),\n calculation_params=calculation_params,\n cache_time=self.__config.get('monitor.tick_cache_time'),\n autosave=self.__config.get('monitor.autosave'),\n filename=self.__opened_filename)\n\n if restart_gui_timer:\n self.__log.info(\"Settings updated. Restarting gui timer.\")\n self.timer.Stop()\n self.timer.Start(self.__config.get('monitor.interval') * 1000)\n\n if reload_correlations:\n self.__log.info(\"Settings updated. Updating monitoring threshold and reloading grid.\")\n self.__cor.monitoring_threshold = self.__config.get(\"monitor.monitoring_threshold\")\n self.__refresh_grid()\n\n if reload_logger:\n self.__log.info(\"Settings updated. Reloading logger.\")\n log_config = cfg.Config().get('logging')\n logging.config.dictConfig(log_config)\n\n if reload_graph:\n self.__log.info(\"Settings updated. Reloading graph.\")\n if len(self.__selected_correlation) == 2:\n self.show_graph(symbol1=self.__selected_correlation[0], symbol2=self.__selected_correlation[1])", "def reset_active_settings(self):\n self.compute = yacman.YacAttMap()\n return True", "def clearAllSettings(self) -> None:\n ...", "def init_settings(self):\n self.app.config.setdefault('SIMPLE_DOMAINS', [])\n self.app.config.setdefault('AWS_ACCESS_KEY_ID', environ.get('AWS_ACCESS_KEY_ID'))\n self.app.config.setdefault('AWS_SECRET_ACCESS_KEY', environ.get('AWS_SECRET_ACCESS_KEY'))\n self.app.config.setdefault('AWS_REGION', environ.get('AWS_REGION', self.DEFAULT_REGION))", "def reset( self ):\n self.conf = self.defaults", "def refresh(self) -> None:\n self.data = {}\n self.load_settings_file(self.default_settings_path / \"settings.yaml\", file_key=\"internal\")\n self.load_systems(self.default_settings_path / \"systems\")\n self.load_settings_file(self.personal_dir / \"settings.yaml\", file_key=\"user\")\n self.load_systems(self.personal_dir / \"systems\")", "def reset_stats(self):\n self.ships_left = self.ai_settings.ship_limit\n self.score = 0\n # Never reset the high score!\n self.high_score = self.read_score()\n self.level = 1", "def settings(self, settings):\n\n self._settings = settings", "def override_appsettings(**settings):\n\n def _dec(func):\n @wraps(func)\n def _inner(*args, **kwargs):\n # Apply new settings, backup old, clear caches\n old_values = {}\n for key, new_value in settings.items():\n old_values[key] = getattr(appsettings, key)\n setattr(appsettings, key, new_value)\n _reset_setting_caches()\n\n func(*args, **kwargs)\n for key, old_value in old_values.items():\n setattr(appsettings, key, old_value)\n\n # reset caches\n _reset_setting_caches()\n\n return _inner\n\n return _dec", "def reset(self):\n self.start_times = {}\n self.stats = defaultdict(OnlineMeter) # float defaults to 0", "def _post_initialisations(self):\n # Init the settings module.\n self.dummy_for_settings = SectionConfig(self.app.id, self.__class__.__name__)\n global settings\n settings = self.dummy_for_settings\n\n self.dummy_for_options = OptionConfig(self.app.id)\n global options\n options = self.dummy_for_options\n\n # Bind message boxes.\n self.MessageBox = MessageBox(self)\n self.msg = self.MessageBox.Message\n self.are_you_sure = self.MessageBox.are_you_sure\n\n # Set previous size and state.\n width = settings.get('width', 350)\n height = settings.get('height', 350)\n self.set_title(self.app.localizedname)\n self.resize(width, height)\n if settings.get_bool('maximized', False):\n self.maximize()\n # Load any other settings here.\n self.load_xinput_devices()", "def add_settings_early(self):\n\n # config settings\n config = {\n # some generic settings for every site, to point to location of some stuff\n mconst.DEF_SETTINGNAME_pkgdirimps_sitempacks: [pkgdirimp_sitempacks],\n mconst.DEF_SETTINGNAME_controllerroot: pkgdirimp_controllers,\n mconst.DEF_SETTINGNAME_sitefilepath: misc.calc_modulefiledirpath(__file__),\n # should we also load mewlo site installed setuptools plugins\n mconst.DEF_SETTINGNAME_flag_importsetuptoolspacks: True,\n mconst.DEF_SETTINGNAME_replaceshadowpath: '${sitefilepath}/replaceshadow',\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_config, config)\n\n # config settings\n config = {\n # Name of site\n mconst.DEF_SETTINGNAME_sitename: 'Mewlo',\n # Specify where this site serves from\n # these siteurls should not end in / so if you are serving a site at root just use relative of '' and absolute of 'http://sitename.com'\n mconst.DEF_SETTINGNAME_siteurl_relative: '',\n mconst.DEF_SETTINGNAME_siteurl_absolute: 'http://127.0.0.1:8080',\n #mconst.DEF_SETTINGNAME_siteurl_relative: '/public/publicity',\n #mconst.DEF_SETTINGNAME_siteurl_absolute: 'http://127.0.0.1:8080/public/publicity',\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_config, config)\n\n # config settings\n config = {\n # online status information\n mconst.DEF_SETTINGNAME_isenabled: True,\n mconst.DEF_SETTINGNAME_isonline: True,\n mconst.DEF_SETTINGNAME_offline_mode: 'maintenance',\n mconst.DEF_SETTINGNAME_offline_message: 'We are down for leap-year maintenance; we will be back soon.',\n mconst.DEF_SETTINGNAME_offline_allowadmin: False,\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_config, config)\n\n\n\n # extension pack config -- we need to explicitly enable plugins\n packconfig = {\n 'mouser.mewlotestplug' : {\n 'isenabled': False,\n },\n 'mouser.testpack' : {\n 'isenabled': False,\n },\n 'mewlo.siteaddon.account' : {\n 'isenabled': True,\n },\n 'mewlo.siteaddon.group' : {\n 'isenabled': True,\n },\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_packs, packconfig)\n\n\n # database config\n databaseconfig = {\n 'settings' : {\n 'sqlalchemy_loglevel' : logging.NOTSET,\n #'sqlalchemy_loglevel' : logging.INFO,\n },\n 'default' : {\n 'url' : 'sqlite:///${dbfilepath}/mewlo_testsite1.sqlite',\n #'tablename_prefix': 'mewlo_',\n 'flag_echologging' : False,\n },\n 'mysql_unused' : {\n # Sample configuration for mysql\n 'url' : 'mysql://mewlo_user:mewlo_pass@localhost:3306/mewlo_testsite1',\n 'tablename_prefix': 'mewlo_'\n },\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_database, databaseconfig)\n self.settings.listappend_settings_key(mconst.DEF_SETTINGSEC_make_dirs, '${dbfilepath}')\n\n # email config settings\n mailconfig = {\n # online status information\n 'smtp_host': self.get_configval('mail_smtp_host'),\n 'smtp_login': self.get_configval('mail_smtp_login'),\n 'smtp_port': self.get_configval('mail_smtp_port'),\n 'smtp_mode': self.get_configval('mail_smtp_mode'),\n 'smtp_password': self.get_configval('mail_smtp_password'),\n 'mail_from' : self.get_configval('mail_from'),\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_mail, mailconfig)\n\n\n # account siteaddon settings\n siteaddonconfig = {\n # online status information\n 'registration_mode': 'immediate',\n 'flag_require_email_verified_before_login': False,\n }\n self.settings.merge_settings_key('siteaddon_account', siteaddonconfig)\n\n\n\n # ATTN: UNFINISHED\n # asset mounts config\n if (False):\n assetmountconfig = {\n 'default' : {\n # an internal assetmount just needs a url route\n 'type': 'internal',\n 'routeid': 'static_files',\n },\n 'external' : {\n 'type': 'external',\n 'filepath': '${mewlofilepath}/public_assets',\n 'urlpath': 'http://127.0.0.1/mewlo/public_assets',\n },\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_asset_mounts, assetmountconfig)\n\n\n\n\n\n #print \"TESTING CONFIG1:\"\n #self.run_configfunc('sayhello',1,2,3)\n #print \"TESTING CONFIG2:\"\n #self.run_allconfigfuncs('sayhello',1,2,3)", "def reload_settings(self):\n importlib.reload(sys.modules['micromasters.settings'])\n # Restore settings to original settings after test\n self.addCleanup(importlib.reload, sys.modules['micromasters.settings'])\n return vars(sys.modules['micromasters.settings'])", "def _reset_server_settings(self, server_id):\n\t\tself._remove_cfg_from_list(server_id)\n\t\tself._remove_settings_file(server_id)", "def set_config(self): # called from button_set object \n self.settings['lights_on'] = self.lights_on.get()\n self.settings['lights_off'] = self.lights_off.get()\n self.settings['ambient_min'] = self.ambient_min.get()\n self.settings['soil_1'] = self.smc1.get()\n self.settings['soil_2'] = self.smc2.get()\n self.settings['soil_3'] = self.smc3.get()\n self.settings['soil_4'] = self.smc4.get()\n self.settings['overhead_level'] = self.overhead_level.get()\n\n # Save settings to config file in case of reboot / power-loss\n print \"UPDATING SETTINGS FILE\"\n with open(self.settings_path, 'w') as jsonfile:\n jsonfile.write(json.dumps(self.settings, indent=4))\n self.active_changes = True # (flag) changes are active!", "def action_settings(self):\n\n cur_datadir = self.config.starbound_data_dir\n settings = SettingsDialog(self)\n settings.exec()\n new_datadir = self.config.starbound_data_dir\n if new_datadir:\n if cur_datadir != new_datadir:\n self.load_data()\n self.scene.refresh(self.data)\n else:\n self.close_world()\n\n # Make sure our menus are enabled/disabled as appropriate\n self.enforce_menu_state()\n\n # Re-focus the main window\n self.activateWindow()", "def reset_stats(self):\r\n self.ships_left = self.ai_settings.ship_limit\r\n self.score = 0\r\n self.level = 1", "def reset_stats(self):\n self.ships_left= self.settings.ship_limit\n self.score = 0\n self.level = 1", "def reload(self):\n self.load_config()\n # Seems we need to explicitly refresh this\n if self.main_instance:\n self.main_instance.config = self.config", "def _setup_stats(self) -> None:\n\n # Save statistics\n self.mass = np.array([0])\n self.mass_balance = np.array([0])\n self.mass_balance_trend = np.array([0])", "def merge_onto(cls, settings):\r\n for key, value in cls.SETTINGS.iteritems():\r\n setattr(settings, key, value)", "def _soft_reset(self):\n self._reset_specific_envs(self.episodes_done)\n self._update_other_info()", "def clear_stats(self):\n self._stats = None", "def reset_metric_variables(self) -> None:\n with self._lock:\n self._reset_metric_variables()", "def reset_stats(self):\n print(\"Reseting stats\")\n self.player_lives = self.ai_stts.player_lives\n self.score = 0\n self.level = 1", "def user_settings(self, user_settings):\n\n self._user_settings = user_settings", "def reset_stats(self):\n self.lives_left = self.game_settings.lives\n self.score = 0\n self.level = 1", "def reset(self):\n self.settings = None\n self.sublime_settings = None\n self.settings_base = \"Javatar.sublime-settings\"\n self.sublime_base = \"Preferences.sublime-settings\"", "def onSettings(self):\n pass", "def _clear_gui_settings(self):\n self._settings.clear()\n self._settings.sync()\n self.gui_settings_clear_button.setEnabled(False)\n self.cleared = True", "def __setSettingsToStorage(value):\n AccountSettings.setSettings(NEW_SETTINGS_COUNTER, value)", "def set_app_defaults(self):\n self.curve_render = 0\n self.image_render = 0\n self.image_height = 200\n self.image_data = []\n self.auto_scale = True\n\n self.create_actions()\n self.setup_signals()\n self.reset_graph()\n\n self.fps = utils.SimpleFPS()\n\n # Click the live button\n self.ui.actionContinue_Live_Updates.trigger()", "def reset_stats() -> None:\n STATS[\"cleaned\"] = 0\n STATS[\"null\"] = 0\n STATS[\"unknown\"] = 0", "def reset_hl_stats(self):\n\n self.ships_left = self.settings.ship_limit\n self.score = 0\n self.level = 1", "def update(self):\n registry = getUtility(IRegistry)\n site_settings = registry.forInterface(\n ISiteSchema, prefix=\"plone\", check=False)\n try:\n if site_settings.webstats_js:\n self.webstats_js = site_settings.webstats_js\n except AttributeError:\n pass", "def add_earlydefault_settings(self):\n self.add_default_settings_config()\n self.add_default_settings_aliases()", "def reset_stats(self):\r\n self.ship_left = self.settings.ship_limit\r\n self.score = 0\r\n self.level = 1", "def merge_into_settings(self, settings):\n if not self._meta_dict:\n self._load_from_file()\n\n settings.chat_name = self._meta_dict[DumpMetadata.CHAT_NAME]\n settings.last_message_id = self._meta_dict[DumpMetadata.LAST_MESSAGE_ID]\n settings.exporter = self._meta_dict[DumpMetadata.EXPORTER]", "def _apply_settings(self):\n if 'fixed_delta_seconds' in self.dict_settings:\n self.settings.fixed_delta_seconds = self.dict_settings['fixed_delta_seconds']\n\n self.world.apply_settings(self.settings)", "def reset(self):\n self._config = Config()\n self._router = Router(())\n self._middleware = []\n self._start_response = None", "def reset(cls):\n cls._options = None\n cls._scoped_instances = {}", "def refresh_configuration(self):\n pass", "def _onSettings(self, event):\n dialog = sc.SettingsDialog(self)\n if dialog.ShowModal() == wx.ID_OK:\n dialog.saveSettings()\n dialog.Destroy()", "def reset(frequency=None):\n assert state.profile_level == 0, b\"Can't reset() while statprof is running\"\n CodeSite.cache.clear()\n state.reset(frequency)", "def reset_stats(self):\n self.ships_left = self.sett.ship_limit\n self.score = 0\n self.level = 1", "async def reset_config(self):\n self.config = {\"enable_auto_gen\": False, \"enable_world_barrier\": False}\n await shared.event_handler.call_async(\"world:reset_config\")\n self.gamerule_handler = mcpython.common.world.GameRule.GameRuleHandler(self)", "def reload_settings():\n global settings, cancel_thread\n\n # cancel the thread if the settings say so\n if cancel_thread is None:\n if settings.get('disabled') is False:\n cancel_thread = start_thread()\n else:\n if settings.get('disabled') is True:\n light_scheme_set = None\n current_timeout = 0\n cancel_thread()\n cancel_thread = None", "def reset_el_stats(self):\n\n # Number of available ships.\n self.ships_left = self.settings.ship_limit\n self.score = 0 # Score.\n self.level = 1 # Level.", "def resetSelfWithDefaults( self ):\n self.__dict__.update( self._defDict )", "def reset_stats(self):\r\n self.pepes_left = self.ai_settings.pepe_limit\r\n self.score = 0\r\n self.level = 1", "def __init__(self) -> None:\n self._settings = {}\n\n # Load values from global_settings (only uppercase)\n self.filter_and_set(global_settings)\n\n settings_env_value: str = os.environ.get(SETTINGS_ENV)\n if settings_env_value:\n # Load values from custom settings\n try:\n module = importlib.import_module(settings_env_value)\n except ModuleNotFoundError:\n msg = \"Can't import custom settings. Is it under PYTHONPATH?\"\n raise ModuleError(msg)\n self.filter_and_set(module)", "def reset_env(env, num_active_adv=0):\n if hasattr(env, 'domain_randomization'):\n env.domain_randomization = False\n if num_active_adv > 0:\n env.adversary_range = env.advs_per_strength * env.num_adv_strengths", "def gatherActiveDataStats(self, config):\n gatherWMDataMiningStats(config.wmstats_url, config.reqmgrdb_url, \n config.wmdatamining_url, False, log = cherrypy.log)\n return", "def on_actionSettings_triggered(self):\n self.start_app(SettingsApp)", "def kill_all(self):\n self.settings['lights_on'] = 12\n self.settings['lights_off'] = 12\n self.settings['overhead_level'] = 0\n self.settings['soil_1'] = 0\n self.settings['soil_2'] = 0\n self.settings['soil_3'] = 0\n self.settings['soil_4'] = 0\n self.scale_overhead_level.set(self.settings['overhead_level'])\n self.scale_smc1.set(self.settings['soil_1'])\n self.scale_smc2.set(self.settings['soil_2'])\n self.scale_smc3.set(self.settings['soil_3'])\n self.scale_smc4.set(self.settings['soil_4'])\n self.active_changes = True # (flag) Once changes are retrieved, we assume that they will be sent to the controller", "def requestConfig(self):\n self.radioConfig = None\n self.channels = None\n self.partialChannels = [] # We keep our channels in a temp array until finished\n\n self._requestSettings()", "def reset(self):\n\n for value in self.__dict__.itervalues():\n if isinstance(value, EnvParm):\n value._value = 'use_env'\n getattr(value, 'value')", "def load_rule_settings():\n logger.info('Loading the default rule settings')\n with create_app().app_context():\n sess = GlobalDB.db().session\n # Clearing the current defaults before reloading them\n sess.query(RuleSetting).filter(RuleSetting.agency_code.is_(None)).delete(synchronize_session=False)\n sess.commit()\n load_default_rule_settings(sess)", "def set_settings_devices(self):\n self.set_thermostat, self.set_humidifier, self.set_sprinklers, self.set_ventilation = self.settings[3:]", "def init(self):\n\n if self.has_settings:\n print(\n TERM.bold_red('Error:'),\n 'Settings file already exists. Doing nothing.'\n )\n return\n\n new_settings = {\n 'strategy': self.ns.strategy,\n 'branch': self.ns.branch,\n 'scoring': self.ns.scoring,\n }\n\n with open(self.settings, 'w') as f:\n f.write(yaml.dump(new_settings, default_flow_style=False))\n\n print(\n TERM.bold_green('Yay!'),\n 'Wrote settings file {0}'.format(self.settings)\n )", "def afterSetUp(self):\n self.load_config = {}\n self.load_config['monitor_interval'] = 1\n self.load_config['limit_number_request'] = 100\n self.load_config['limit_memory_used'] = 500", "def save_settings(self):\n logger.info(f'Saving settings: {self.settings_dict}')\n for k, section in self.settings_dict.items():\n for setting_name in section.keys():\n value = self.get_control_value(setting_name)\n if value is not None:\n section[setting_name] = value\n\n write_settings(self.settings_dict)", "def tearDownConfig(self):\n print time.ctime(), 'enter tearDownConfig'\n\n self.site1 = self.globalCfg['site1']\n self.site2 = self.globalCfg['site2']\n self.site3 = self.globalCfg['site3']\n\n self.site1.databaseLandscapeInfo()\n self.site2.databaseLandscapeInfo()\n self.site3.databaseLandscapeInfo()\n self.site1.systemReplicationStatus()\n\n if self.globalCfg['sync_mode'] == 'sync' and self.site1.fullSync:\n try:\n self.site1.srDisableFullSync(self.site1.getHost(\"WORKER1\"))\n self.site1.fullSync = False\n except Exception, e:\n print 'disable full_sync in tearDownConfig failed: %s' % e\n\n for h in range(1, self.site1.getHostNo()):\n self.site1.setConfigParameter(h, \"daemon.ini\", \"ConfigMgrPy.HOST\", \"indexserver.c\", \"instanceids\", None)\n for h in range(1, self.site2.getHostNo()):\n self.site2.setConfigParameter(h, \"daemon.ini\", \"ConfigMgrPy.HOST\", \"indexserver.c\", \"instanceids\", None)\n for h in range(1, self.site3.getHostNo()):\n self.site3.setConfigParameter(h, \"daemon.ini\", \"ConfigMgrPy.HOST\", \"indexserver.c\", \"instanceids\", None)\n\n self.site1.resetStatXSToMaster(self.globalCfg['multiDB'])\n self.site2.resetStatXSToMaster(self.globalCfg['multiDB'])\n self.site3.resetStatXSToMaster(self.globalCfg['multiDB'])\n\n self.site1.setTraceLevel(self.site1.getHost(\"WORKER1\"), \"global.ini\", \"sr_nameserver\", None)\n self.site1.setTraceLevel(self.site1.getHost(\"WORKER1\"), \"global.ini\", \"sr_dataaccess\", None)\n self.site1.setTraceLevel(self.site1.getHost(\"WORKER1\"), \"global.ini\", \"sr_log_retention\", None)\n self.site1.setTraceLevel(self.site1.getHost(\"WORKER1\"), \"global.ini\", \"pitrestart\", None)\n self.site1.setTraceLevel(self.site1.getHost(\"WORKER1\"), \"global.ini\", \"warm_upper\", None)\n self.site1.setTraceLevel(self.site1.getHost(\"WORKER1\"), \"global.ini\", \"sr_spcoordinator\", None)\n self.site1.setTraceLevel(self.site1.getHost(\"WORKER1\"), \"global.ini\", \"persistencelayer\", None)\n self.site1.setTraceLevel(self.site1.getHost(\"WORKER1\"), \"nameserver.ini\", \"nameserver\", None)\n\n self.site2.setTraceLevel(self.site2.getHost(\"WORKER1\"), \"global.ini\", \"sr_nameserver\", None)\n self.site2.setTraceLevel(self.site2.getHost(\"WORKER1\"), \"global.ini\", \"sr_dataaccess\", None)\n self.site2.setTraceLevel(self.site2.getHost(\"WORKER1\"), \"global.ini\", \"sr_log_retention\", None)\n self.site2.setTraceLevel(self.site2.getHost(\"WORKER1\"), \"global.ini\", \"pitrestart\", None)\n self.site2.setTraceLevel(self.site2.getHost(\"WORKER1\"), \"global.ini\", \"warm_upper\", None)\n self.site2.setTraceLevel(self.site2.getHost(\"WORKER1\"), \"global.ini\", \"sr_spcoordinator\", None)\n self.site2.setTraceLevel(self.site2.getHost(\"WORKER1\"), \"global.ini\", \"persistencelayer\", None)\n self.site2.setTraceLevel(self.site2.getHost(\"WORKER1\"), \"nameserver.ini\", \"nameserver\", None)\n\n self.site3.setTraceLevel(self.site3.getHost(\"WORKER1\"), \"global.ini\", \"sr_nameserver\", None)\n self.site3.setTraceLevel(self.site3.getHost(\"WORKER1\"), \"global.ini\", \"sr_dataaccess\", None)\n self.site3.setTraceLevel(self.site3.getHost(\"WORKER1\"), \"global.ini\", \"sr_log_retention\", None)\n self.site3.setTraceLevel(self.site3.getHost(\"WORKER1\"), \"global.ini\", \"pitrestart\", None)\n self.site3.setTraceLevel(self.site3.getHost(\"WORKER1\"), \"global.ini\", \"warm_upper\", None)\n self.site3.setTraceLevel(self.site3.getHost(\"WORKER1\"), \"global.ini\", \"sr_spcoordinator\", None)\n self.site3.setTraceLevel(self.site3.getHost(\"WORKER1\"), \"global.ini\", \"persistencelayer\", None)\n self.site3.setTraceLevel(self.site3.getHost(\"WORKER1\"), \"nameserver.ini\", \"nameserver\", None)\n\n # for normal tear down(unregister/disable), the steps should be in order\n # the primary cannot be disabled if there's secondary attached\n # so there's no need to use multi-thread\n # executing here means the landscape has been resorded to site1--(sync/syncmem)--site2--(async)--site3\n #pdb.set_trace()\n self.site3.tearDown()\n self.site2.tearDown()\n self.site1.tearDown()", "async def admin_reset(self, ctx: commands.Context):\n await self.config.clear_all()\n await self.initialize_internals()\n await ctx.send('Global team management factory reset complete.')", "def updateSettings(self):\n self.parser.read(self.file)\n self.showTicker = self.parser.getboolean('Settings', 'showTicker')\n self.verbose = self.parser.getboolean('Settings', 'verbose')\n self.sleepTime = self.parser.getint('Settings', 'sleeptime')\n self.saveGraph = self.parser.getboolean('Settings', 'saveGraph')\n self.graphDPI = self.parser.getint('Settings', 'graphDPI')", "def reset():\n global HEALTH_AGGREGATOR\n\n HEALTH_AGGREGATOR.cleanup()\n HEALTH_AGGREGATOR = HealthAggregator()\n\n return jsonify({}), 200", "def _reset(cls):\r\n cls._CONFIGURED = False\r\n cls._ENABLED = {}", "def reset_cfg(self, server):\n\t\tserver = valid_server(server)\n\t\tself._reset_server_settings(server)" ]
[ "0.5851397", "0.5851397", "0.5782384", "0.57592905", "0.56791097", "0.5615239", "0.5549352", "0.5518156", "0.5513039", "0.54954475", "0.54496795", "0.54446083", "0.54427564", "0.54313743", "0.5414981", "0.5411828", "0.5359997", "0.5353914", "0.5333628", "0.53336173", "0.53336173", "0.53336173", "0.5309062", "0.52863175", "0.5286004", "0.52740467", "0.526341", "0.52472097", "0.5238809", "0.5224572", "0.52174425", "0.5216299", "0.51908445", "0.5179103", "0.5168972", "0.51673394", "0.51620424", "0.5158321", "0.5149918", "0.51469815", "0.5140568", "0.51253647", "0.512178", "0.5106764", "0.5105461", "0.5102273", "0.50912786", "0.50823903", "0.50636727", "0.5056766", "0.5053086", "0.5043886", "0.5032688", "0.5025051", "0.5023266", "0.5014213", "0.50134873", "0.50077343", "0.5001891", "0.50008655", "0.49986854", "0.49972528", "0.49856833", "0.49840286", "0.49810112", "0.49803767", "0.4970735", "0.49671605", "0.49631295", "0.49618486", "0.49591592", "0.4952905", "0.49467528", "0.49416956", "0.49380693", "0.49368733", "0.49245253", "0.49237898", "0.4918992", "0.49160004", "0.4915216", "0.4908733", "0.4905474", "0.4896978", "0.48875263", "0.488396", "0.4880871", "0.48739615", "0.48630226", "0.485475", "0.4845176", "0.48421398", "0.48334652", "0.48331612", "0.48301736", "0.48291224", "0.48269537", "0.4826309", "0.48174787", "0.48165002" ]
0.5501668
9
Resets the accumulated statistics back to initial state for metric data.
def reset_metric_stats(self): self.__stats_table = {}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset(self):\n self.sum_metric = 0.\n self.num_inst = 0.\n self.metrics.reset_stats()", "def reset_metric_stats(self):\n self.__stats_table = {}", "def stats_reset(self):\n self.stats.reset()", "def stats_reset(self):\n self.stats.reset()", "def reset(self) -> None:\n self.statistics = defaultdict(float)", "def reset(self) -> None:\n self.statistics = defaultdict(int)", "def reset(self):\n self.stats = {}", "def reset(self):\n self.num_inst = 0\n self.sum_metric = 0.0", "def reset_state(self):\n for name in self.metrics:\n self.metrics[name].reset_state()", "def reset_stats() -> None:\n STATS[\"cleaned\"] = 0\n STATS[\"null\"] = 0\n STATS[\"unknown\"] = 0", "def reset(self):\n self.start_times = {}\n self.stats = defaultdict(OnlineMeter) # float defaults to 0", "def reset(self):\n self.avg = 0\n self.sum = 0\n self.cnt = 0", "def reset_metric_variables(self) -> None:\n with self._lock:\n self._reset_metric_variables()", "def clear_stats(self):\n self._stats = None", "def reset(self):\n self.accumulation = None", "def reset(self):\n self.accumulation = None", "def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0", "def reset_metrics(self):\n self.metrics['loss'] = 0.0\n self.metrics['num_tokens'] = 0\n self.metrics['correct_tokens'] = 0\n self.metrics['correct_pred'] = 0\n self.metrics['pred_count'] = 0", "def reset(self) -> None:\n self.statistics = defaultdict(self._mp_hack)\n self._ddp_backend = get_backend()", "def reset(self) -> None:\n self.statistics = defaultdict(self._mp_hack)\n self._ddp_backend = get_backend()", "def reset(self) -> None:\n self.statistics = defaultdict(self._mp_hack)\n self._ddp_backend = get_backend()", "def reset(self):\n self.sum = [0.] * len(self.topk)\n self.data_num = 0\n self.pfm = [0.] * len(self.topk)", "def reset(self) -> None:\n self.statistics = defaultdict(self._mp_hack)\n self._is_ddp = get_rank() > -1", "def reset(self):\n self._total_value = 0.0\n self._count = 0", "def reset_all(self) -> None:\n for metric in self:\n metric.reset()", "def reset(self):\n self.ref_value = 0.0\n self._average = 0.0\n self.num_samples = 0", "def reset(self):\n for i in range(0, len(self.__counts)):\n self.__counts[i] = 0\n self.__overflow = 0\n self.__total_count = 0\n self.__total_values = 0\n self.__min = None\n self.__max = None", "def reset(self):\n self._value_estimates[:] = self.prior\n self.action_attempts[:] = 0\n self.last_action = None\n self.t = 0", "def reset(self):\n self._accumulated_time.clear()\n self._hit_count.clear()", "def reset(self):\n super().reset()\n self.m_n = 1\n self.m_num_errors = 0\n self.m_d = 0\n self.m_lastd = 0\n self.m_mean = 0.0\n self.m_std_temp = 0.0\n self.m_m2s_max = 0.0\n self.estimation = 0.0", "def _reset(self):\n\n # Checking one attribute is enough, because they are all set together\n # in partial_fit\n if hasattr(self, 'scale_'):\n del self.scale_\n del self.n_samples_seen_\n del self.mean_\n del self.var_", "def reset_turn_stats(self):\n\n # Set the attribute value to 0\n self._current_score = 0", "def reset() -> None:\n Stat._cache = SortedDict()", "def reset(self):\n\n self.scaler = None\n self.isFitted = False\n self.__create_scaler()", "def reset(self):\n self.reset_count += 1\n self._init_data()", "def reset(self):\n super().reset()\n self.sample_count = 1\n self.miss_prob = 1.0\n self.miss_std = 0.0\n self.miss_prob_sd_min = float(\"inf\")\n self.miss_prob_min = float(\"inf\")\n self.miss_sd_min = float(\"inf\")", "def reset(self):\n self.current_exposure = None\n self.scores = {}", "def reset(self):\n reset_system_health_series()", "def reset_average(self):\n self._total_time = 0\n self._average_time = 0\n self._calls = 0", "def reset(self):\n self.damage_dealt = 0\n self.kills = 0\n self.got_killed = False\n self.fitness = 0", "def reset(self):\n self.values.clear()\n\n self.on_reset()", "def reset(self):\n self.loss = 0\n self.cnt = 0", "def _reset(self):\n\n # Checking one attribute is enough, becase they are all set together\n # in partial_fit\n if hasattr(self, 'scale_'):\n del self.scale_\n del self.mean_\n del self.var_", "def clear(self):\n self.sum_hit_at_one = 0.0\n self.sum_perr = 0.0\n self.sum_loss = 0.0\n self.map_calculator.clear()\n self.global_ap_calculator.clear()\n self.num_examples = 0", "def reset_memory_statistics(sender, **kwargs): # pylint: disable=unused-argument\n MemoryUsageData.start_counting()", "def reset(self):\n self.m = normalize(self.m0)\n self.t = 0.0", "def reset(self, complete=False):\n self.sum = 0\n self.n = 0\n if complete:\n self.running_avg = []", "def reset(self):\n previous_solution_values = tf.constant(np.tile((self._action_lower_bound + self._action_upper_bound) / 2,\n [self._planning_horizon * self._num_agents, 1]), dtype=tf.float32)\n previous_solution_values = tf.reshape(previous_solution_values, [-1])\n solution_variance_values = tf.constant(\n np.tile(np.square(self._action_lower_bound - self._action_upper_bound) / 16,\n [self._planning_horizon * self._num_agents, 1]), dtype=tf.float32)\n solution_variance_values = tf.reshape(solution_variance_values, [-1])\n self._m.assign(previous_solution_values)\n self._sigma.assign(tf.math.sqrt(solution_variance_values))", "def clear(self):\r\n\r\n\t\tself.ITerm = 0.0\r\n\t\tself.DTerm = 0.0\r\n\t\tself.last_error = 0.0\r\n\r\n\t\tself.state_history = []\r\n\t\tself.error_history = []\r\n\t\tself.output_history = []\r\n\t\tself.sample_times = []\r\n\t\t\r\n\t\tself.OutputValue = 0.0", "def reset(self):\n self.total_pulls = 0\n self.total_score = 0\n self.npulls = np.zeros(self.k)\n self.score = np.zeros(self.k)", "def reset(self):\n self.temp_data.clear()", "def _reset(self):\n self._values = {}", "def reset(self):\n self.acc_loss = 0\n self.norm_term = 0", "def reset(self):\n if self.monotonic_energy is not None:\n self.monotonic_energy.reset()\n if self.chunk_energy is not None:\n self.chunk_energy.reset()\n self.bd_L_prev = 0\n self.key_tail = None", "def reset(self):\n\n self.results = []\n self._plot()", "def reset(self):\n self.observation = None\n self.history.clear()\n for i in range(len(self.answers)):\n self.answers[i] = None\n self.reset_metrics()", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.score = None\n self.true = None\n self.meta = None", "def reset_data_recorder(self):\n\n self.t_values = []\n self.x_values = []\n self.tau_values = []", "def reset(self):\n self.data = {}\n self.is_bound = False\n self._errors = None", "def reset(self):\n self.error_p = 0.0\n self.error_i = 0.0\n self.error_d = 0.0\n self.errors = [ 0.0 ] * self.samples\n if callable(self.debug_callback):\n self.debug_callback(\"reset\")", "def reset_stats(self):\n self.ships_left = self.sett.ship_limit\n self.score = 0\n self.level = 1", "def reset(self):\n self.table[:, :] = 0\n self.counts[:] = 0\n self.names = []\n self.hashesperid.resize(0)\n self.dirty = True", "def reset(self):\r\n self.state = copy.copy(self.mu)", "def reset(self):\n self.reset_image_estimate()\n self.init_m_aux()\n self.reset_hessian_and_bias()\n self.reset_adadelta_variables()", "def reset(self):\n self.visited = False\n self.calculated = False\n self.past_value = self.value\n self.value = 0", "def reset(self):\n self.pred_classes.clear()\n self.gold_classes.clear()\n self.pred_probas.clear()\n self.gold_probas.clear()\n self.loss = 0\n self.nb_batches = 0\n self.prec_rec_f1 = None\n self.acc = None\n self.mcc = None", "def reset(self):\n self.data = self._defaults", "def reset(self):\n self.mode = 0\n self.graphs = [[], [], []]\n self.coefficients = []\n self.sample = []", "def reset_wm(self):\n\n self.plan = []\n self.hist = []", "def reset(self):\n\t\tself._initial = None\n\t\tself._start = None\n\t\tself._time = 0\n\t\tself._total = 0\n\t\treturn self", "def reset(self):\n self._proportional = 0\n self._integral = 0\n self._derivative = 0\n\n self._last_time = self._current_time()\n self._last_output = None\n self._last_input = None", "def reset(self):\n self.__sets = []\n self._computed = False", "def reset(self):\n self._stat = CardMeta()", "def reset(self):\n self._hist.reset()\n return self", "def reset(self):\n self._clusters = {}\n self._clusters_val = {}\n self._centroids = {}\n self.store()", "def reset_stats(self):\n print(\"Reseting stats\")\n self.player_lives = self.ai_stts.player_lives\n self.score = 0\n self.level = 1", "def reset(self):\n self.edges = None\n self.chi = None\n self.k = None\n self.n_bins = None\n self.classes = None\n self.n_params = None", "def clear_all_accumulators(self):\n self._require_state(\"INITIALIZING\")\n for mi,accum in self._accums.items():\n accum.clear()", "def reset(self):\n self.work_state = work_state[\"Measuring\"]\n self.report_mode = report_mode[\"Initiative\"]\n self.duty_cycle = 0\n self.logger.info(\"{}: sensor resetted.\".format(self.sensor_name))", "def reset(self):\n self.x_prev = np.zeros_like(self.mu)", "def reset(self):\n self.baseline = None\n self.cut = None\n self.manual_push = 0", "def reset(self):\n self.head_pos = 0\n self.left_expands = 0\n self.memory = np.zeros(\n (\n self.max_memory if self.fixed_size else 1,\n self.memory_unit_size\n )\n )\n self.previous_read = np.zeros(self.memory_unit_size)\n if self.history is not None:\n self.history = defaultdict(list)", "def __reset(self):\n\t\tself.__highest = -float('inf')\n\t\tself.__lowest = float('inf')\n\t\tself.__total = 0\n\t\tself.__steps = 0\n\t\tself.__cold_days = 0", "def reset(self):\n self.reserve.reset()\n self.revenue.reset()\n self.transfers.reset()\n self.missions.reset()\n self.debt.reset()\n self.genfund.reset()\n self.macro.reset(self.pop[self.start_yr],self.eco_first)\n self.summary = pd.DataFrame(index=self.names,columns=[t for t in range(self.start_yr,self.stop_yr)])\n self.year = self.start_yr", "def reset(self):\n self.values = None\n self.keys = None\n self.mask = None", "def _reset(self):\n\n # Checking one attribute is enough, becase they are all set together\n # in partial_fit\n if hasattr(self, 'scale_'):\n del self.scale_", "def _reset(self):\n\n # Checking one attribute is enough, becase they are all set together\n # in partial_fit\n if hasattr(self, 'scale_'):\n del self.scale_", "def reset(self):\n self.train_loss.reset_states()\n self.train_accuracy.reset_states()\n self.val_loss.reset_states()\n self.val_accuracy.reset_states()\n self.train_mIoU.reset_states()\n self.val_mIoU.reset_states()", "def reset(self):\n # Attempt to reset data loader\n self.data_loader_iter = iter(self.data_loader)\n self.num_batches = 0\n\n # Make sure calibrator will check the cache again when reset.\n self.cache_contents = None" ]
[ "0.8448358", "0.83412653", "0.83299094", "0.83299094", "0.8270503", "0.8153123", "0.79735655", "0.7772603", "0.77270293", "0.77003264", "0.76665866", "0.7614711", "0.75709176", "0.7540221", "0.75036174", "0.75036174", "0.7490715", "0.7471787", "0.74488425", "0.74488425", "0.74488425", "0.7399214", "0.7371752", "0.7350859", "0.73405546", "0.73228246", "0.7312936", "0.72774065", "0.7229525", "0.71553284", "0.7075319", "0.706154", "0.7059099", "0.70363504", "0.702773", "0.70184267", "0.70123726", "0.70105654", "0.70089495", "0.6993825", "0.69876575", "0.6979957", "0.69488895", "0.69401044", "0.68768877", "0.686644", "0.6866367", "0.6860591", "0.68563175", "0.6852236", "0.68487674", "0.6847574", "0.68278927", "0.6826811", "0.6824579", "0.6819496", "0.6812596", "0.6812596", "0.6812596", "0.6812596", "0.6812596", "0.6812596", "0.6812596", "0.6812596", "0.6812596", "0.6812596", "0.6812596", "0.6810124", "0.6801448", "0.6796431", "0.6786861", "0.6774378", "0.676991", "0.6768513", "0.6748076", "0.674789", "0.6747709", "0.6743537", "0.67379457", "0.67296934", "0.67281467", "0.6725452", "0.67183644", "0.67178744", "0.6709824", "0.6704403", "0.6679279", "0.6678569", "0.66781825", "0.6674983", "0.66681176", "0.6668089", "0.6666285", "0.66623515", "0.6658285", "0.66577923", "0.66570085", "0.66570085", "0.66526616", "0.665174" ]
0.84143066
1
Resets the accumulated statistics back to initial state for sample analytics data.
def reset_transaction_events(self): if self.__settings is not None: self._transaction_events = SampledDataSet( self.__settings.event_harvest_config. harvest_limits.analytic_event_data) else: self._transaction_events = SampledDataSet()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stats_reset(self):\n self.stats.reset()", "def stats_reset(self):\n self.stats.reset()", "def reset(self) -> None:\n self.statistics = defaultdict(float)", "def reset(self) -> None:\n self.statistics = defaultdict(int)", "def reset(self):\n self.stats = {}", "def reset(self):\n self.sum_metric = 0.\n self.num_inst = 0.\n self.metrics.reset_stats()", "def reset_metric_stats(self):\n\n self.__stats_table = {}", "def reset_metric_stats(self):\n self.__stats_table = {}", "def clear_stats(self):\n self._stats = None", "def reset_stats() -> None:\n STATS[\"cleaned\"] = 0\n STATS[\"null\"] = 0\n STATS[\"unknown\"] = 0", "def reset(self):\n self.avg = 0\n self.sum = 0\n self.cnt = 0", "def reset(self):\n self.start_times = {}\n self.stats = defaultdict(OnlineMeter) # float defaults to 0", "def reset(self):\n self.ref_value = 0.0\n self._average = 0.0\n self.num_samples = 0", "def reset(self) -> None:\n self.statistics = defaultdict(self._mp_hack)\n self._ddp_backend = get_backend()", "def reset(self) -> None:\n self.statistics = defaultdict(self._mp_hack)\n self._ddp_backend = get_backend()", "def reset(self) -> None:\n self.statistics = defaultdict(self._mp_hack)\n self._ddp_backend = get_backend()", "def reset(self):\n self.num_inst = 0\n self.sum_metric = 0.0", "def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0", "def reset(self) -> None:\n self.statistics = defaultdict(self._mp_hack)\n self._is_ddp = get_rank() > -1", "def reset(self):\n super().reset()\n self.sample_count = 1\n self.miss_prob = 1.0\n self.miss_std = 0.0\n self.miss_prob_sd_min = float(\"inf\")\n self.miss_prob_min = float(\"inf\")\n self.miss_sd_min = float(\"inf\")", "def reset(self):\n self.accumulation = None", "def reset(self):\n self.accumulation = None", "def reset(self):\n self._total_value = 0.0\n self._count = 0", "def reset(self):\n for i in range(0, len(self.__counts)):\n self.__counts[i] = 0\n self.__overflow = 0\n self.__total_count = 0\n self.__total_values = 0\n self.__min = None\n self.__max = None", "def reset(self):\n self._accumulated_time.clear()\n self._hit_count.clear()", "def reset(self):\n reset_system_health_series()", "def reset_data_recorder(self):\n\n self.t_values = []\n self.x_values = []\n self.tau_values = []", "def reset_average(self):\n self._total_time = 0\n self._average_time = 0\n self._calls = 0", "def reset(self):\n self.sum = [0.] * len(self.topk)\n self.data_num = 0\n self.pfm = [0.] * len(self.topk)", "def reset(self):\n self._value_estimates[:] = self.prior\n self.action_attempts[:] = 0\n self.last_action = None\n self.t = 0", "def clear(self):\r\n\r\n\t\tself.state_history = []\r\n\t\tself.error_history = []\r\n\t\tself.output_history = []\r\n\t\tself.sample_times = []\r\n\t\t\r\n\t\tself.OutputValue = 0.0", "def reset_stats(self):\n print(\"Reseting stats\")\n self.player_lives = self.ai_stts.player_lives\n self.score = 0\n self.level = 1", "def reset(self):\n self.current_exposure = None\n self.scores = {}", "def clear(self):\r\n\r\n\t\tself.state_history = []\r\n\t\tself.error_history = []\r\n\t\tself.output_history = []\r\n\t\tself.sample_times = []\r\n\t\t\r\n\t\tself.LastOutputValue = 0.0\r\n\t\tself.OutputValue = 0.0", "def clear(self):\r\n\r\n\t\tself.state_history = []\r\n\t\tself.error_history = []\r\n\t\tself.output_history = []\r\n\t\tself.sample_times = []\r\n\t\t\r\n\t\tself.LastOutputValue = 0.0\r\n\t\tself.OutputValue = 0.0", "def reinit(self):\n self.logger.info(\"Reinit called. Clear the population.\")\n self.set_init_population([], perf_name=None)\n self._gt_rollouts = []\n self._gt_scores = []", "def reset(self):\n self.reset_count += 1\n self._init_data()", "def clear(self):\r\n\r\n\t\tself.ITerm = 0.0\r\n\t\tself.DTerm = 0.0\r\n\t\tself.last_error = 0.0\r\n\r\n\t\tself.state_history = []\r\n\t\tself.error_history = []\r\n\t\tself.output_history = []\r\n\t\tself.sample_times = []\r\n\t\t\r\n\t\tself.OutputValue = 0.0", "def reset_state(self):\n for name in self.metrics:\n self.metrics[name].reset_state()", "def reset(self):\n self.error_p = 0.0\n self.error_i = 0.0\n self.error_d = 0.0\n self.errors = [ 0.0 ] * self.samples\n if callable(self.debug_callback):\n self.debug_callback(\"reset\")", "def reset(self):\n super().reset()\n self.m_n = 1\n self.m_num_errors = 0\n self.m_d = 0\n self.m_lastd = 0\n self.m_mean = 0.0\n self.m_std_temp = 0.0\n self.m_m2s_max = 0.0\n self.estimation = 0.0", "def clear(self):\n self.sum_hit_at_one = 0.0\n self.sum_perr = 0.0\n self.sum_loss = 0.0\n self.map_calculator.clear()\n self.global_ap_calculator.clear()\n self.num_examples = 0", "def reset_turn_stats(self):\n\n # Set the attribute value to 0\n self._current_score = 0", "def clearstats(self, clearstats) :\n\t\ttry :\n\t\t\tself._clearstats = clearstats\n\t\texcept Exception as e:\n\t\t\traise e", "def clearstats(self, clearstats) :\n\t\ttry :\n\t\t\tself._clearstats = clearstats\n\t\texcept Exception as e:\n\t\t\traise e", "def clearstats(self, clearstats) :\n\t\ttry :\n\t\t\tself._clearstats = clearstats\n\t\texcept Exception as e:\n\t\t\traise e", "def clearstats(self, clearstats) :\n\t\ttry :\n\t\t\tself._clearstats = clearstats\n\t\texcept Exception as e:\n\t\t\traise e", "def reset(self):\n self.curr_idx = 0\n # shuffle data in each bucket\n random.shuffle(self.idx)\n for i, buck in enumerate(self.utterances):\n self.indices[i], self.utterances[i], self.intents[i] = shuffle(self.indices[i],\n self.utterances[i],\n self.intents[i])\n self.ndindex = []\n self.ndsent = []\n self.ndlabel = []\n\n # append the lists with an array\n for i, buck in enumerate(self.utterances):\n self.ndindex.append(ndarray.array(self.indices[i], dtype=self.dtype))\n self.ndsent.append(ndarray.array(self.utterances[i], dtype=self.dtype))\n self.ndlabel.append(ndarray.array(self.intents[i], dtype=self.dtype))", "def _reset(self):\n\n # Checking one attribute is enough, because they are all set together\n # in partial_fit\n if hasattr(self, 'scale_'):\n del self.scale_\n del self.n_samples_seen_\n del self.mean_\n del self.var_", "def reset_stats(self, settings, reset_stream=False):\n\n self.__settings = settings\n self.__stats_table = {}\n self.__sql_stats_table = {}\n self.__slow_transaction = None\n self.__slow_transaction_map = {}\n self.__slow_transaction_old_duration = None\n self.__transaction_errors = []\n self.__synthetics_transactions = []\n\n self.reset_transaction_events()\n self.reset_error_events()\n self.reset_custom_events()\n self.reset_span_events()\n self.reset_synthetics_events()\n # streams are never reset after instantiation\n if reset_stream:\n self._span_stream = StreamBuffer(\n settings.infinite_tracing.span_queue_size)", "def reset(self):\n self.mode = 0\n self.graphs = [[], [], []]\n self.coefficients = []\n self.sample = []", "def reset(self):\n self.st = segment_tree.SegmentTreeSampler(self.n, np.ones(self.n) * self.reg, self.random_state)", "def reset(self):\r\n self.buffer = np.zeros(self.nBins)\r\n self.counter = 0", "def reset(self):\n self.algo_state = {}\n self.actual_repetitions = 0\n self.next_session = -1\n self.last_session = -1\n self.past_quality = []", "def reset(self):\n self.temp_data.clear()", "def reset(self, complete=False):\n self.sum = 0\n self.n = 0\n if complete:\n self.running_avg = []", "def reset(self):\n self.correct_count = 0\n self.total_count = 0", "def reset_metrics(self):\n self.metrics['loss'] = 0.0\n self.metrics['num_tokens'] = 0\n self.metrics['correct_tokens'] = 0\n self.metrics['correct_pred'] = 0\n self.metrics['pred_count'] = 0", "def _reset_histories(self):\n self.train_loss_history = []\n self.train_pos_dist_history = []\n self.train_neg_dist_history = []\n self.val_loss_history = []\n self.val_pos_dist_history = []\n self.val_neg_dist_history = []", "def reset(self):\n # Sample random state from initial state distribution\n self._cur_state = self._sample_state(self._mdp.I)\n self._prev_state = self._cur_state", "def reset(self):\n\n self.results = []\n self._plot()", "def reset(self):\n self.test = 0\n self.hit = 0", "def reset(self):\n self.test = 0\n self.hit = 0", "def reset(self):\n self.test = 0\n self.hit = 0", "def reset() -> None:\n Stat._cache = SortedDict()", "def reset(self):\n\n self.scaler = None\n self.isFitted = False\n self.__create_scaler()", "def _reset_histories(self):\n\t\tself.train_loss_history = []\n\t\tself.train_acc_history = []\n\t\tself.val_acc_history = []\n\t\tself.val_loss_history = []", "def reset(self):\n self.observation = None\n self.history.clear()\n for i in range(len(self.answers)):\n self.answers[i] = None\n self.reset_metrics()", "def reset_memory_statistics(sender, **kwargs): # pylint: disable=unused-argument\n MemoryUsageData.start_counting()", "def reset_stats(self):\n self.lives_left = self.game_settings.lives\n self.score = 0\n self.level = 1", "def clear(self):\n self.__indexclusters[:] = []\n self.__sample_size = 0\n self.__samples[:] = []\n self.__simifunc = None", "def reset(self):\n self.damage_dealt = 0\n self.kills = 0\n self.got_killed = False\n self.fitness = 0", "def reset(self):\n if not self._data_writer.is_ran_last():\n if not self._data_writer.is_ran_ever():\n logger.error(\"Ignoring the reset before the run\")\n else:\n logger.error(\"Ignoring the repeated reset call\")\n return\n for population in self._data_writer.iterate_populations():\n population._cache_data() # pylint: disable=protected-access\n\n # Call superclass implementation\n AbstractSpinnakerBase.reset(self)", "def reset(self):\n self.reserve.reset()\n self.revenue.reset()\n self.transfers.reset()\n self.missions.reset()\n self.debt.reset()\n self.genfund.reset()\n self.macro.reset(self.pop[self.start_yr],self.eco_first)\n self.summary = pd.DataFrame(index=self.names,columns=[t for t in range(self.start_yr,self.stop_yr)])\n self.year = self.start_yr", "def reset(self):\n self._proportional = 0\n self._integral = 0\n self._derivative = 0\n\n self._last_time = self._current_time()\n self._last_output = None\n self._last_input = None", "def _reset(self):\n self._values = {}", "def reset(self):\n if self.monotonic_energy is not None:\n self.monotonic_energy.reset()\n if self.chunk_energy is not None:\n self.chunk_energy.reset()\n self.bd_L_prev = 0\n self.key_tail = None", "def reset(self):\n self.score = None\n self.true = None\n self.meta = None", "def reset(self):\n self.curr_idx = 0\n #shuffle data in each bucket\n random.shuffle(self.idx)\n for i, buck in enumerate(self.sentences):\n self.indices[i], self.sentences[i], self.characters[i], self.label[i] = shuffle(self.indices[i],\n self.sentences[i],\n self.characters[i],\n self.label[i])\n\n self.ndindex = []\n self.ndsent = []\n self.ndchar = []\n self.ndlabel = []\n\n #for each bucket of data\n for i, buck in enumerate(self.sentences):\n #append the lists with an array\n self.ndindex.append(ndarray.array(self.indices[i], dtype=self.dtype))\n self.ndsent.append(ndarray.array(self.sentences[i], dtype=self.dtype))\n self.ndchar.append(ndarray.array(self.characters[i], dtype=self.dtype))\n self.ndlabel.append(ndarray.array(self.label[i], dtype=self.dtype))", "def _reset(self):\n\n # Checking one attribute is enough, becase they are all set together\n # in partial_fit\n if hasattr(self, 'scale_'):\n del self.scale_\n del self.mean_\n del self.var_", "def reset(self):\n self.x_prev = np.zeros_like(self.mu)", "def reset_stats(self):\r\n self.pepes_left = self.ai_settings.pepe_limit\r\n self.score = 0\r\n self.level = 1", "def reset(self):\n self.total_pulls = 0\n self.total_score = 0\n self.npulls = np.zeros(self.k)\n self.score = np.zeros(self.k)", "def clear(self):\n self.counts = [0] * len(self.values)\n if HAS_NUMPY:\n self.counts = numpy.array(self.counts)", "def _reset(self):\n self._set(\"_n_init_features\", None)\n self._set(\"_n_output_features\", None)\n self._set(\"_n_intervals\", None)\n self._set(\"_mapper\", {})\n self._set(\"_cpp_preprocessor\", None)\n self._set(\"_fitted\", False)", "def reset_results_arrays(self):\n raw_dim0 = self._dataset.raw_dims[0]\n \n if len(self.data) != raw_dim0:\n self.freq_current = np.zeros(raw_dim0, complex)", "def reset(self):\n\n self.results = {}", "def reset(self):\n self.baseline = None\n self.cut = None\n self.manual_push = 0", "def reset_metric_variables(self) -> None:\n with self._lock:\n self._reset_metric_variables()", "def reset(self):\n self.values.clear()\n\n self.on_reset()", "def reset(self):\n self.tot = 0\n self.cnt = [0.0 for _ in range( self.alpha.getLen() )]", "def reset(self):\n self.reset_image_estimate()\n self.init_m_aux()\n self.reset_hessian_and_bias()\n self.reset_adadelta_variables()", "def reset(self):\n self.table[:, :] = 0\n self.counts[:] = 0\n self.names = []\n self.hashesperid.resize(0)\n self.dirty = True", "def reset(self):\n self.loss = 0\n self.cnt = 0", "def restore_auxiliary_stats(self):\n self.rms.restore_rms()", "def test_reset(self):\n # Set tracked variables to some non-zero values\n base_gcmc_sampler.n_accepted = 1\n base_gcmc_sampler.n_moves = 1\n base_gcmc_sampler.Ns = [1]\n\n # Reset base_gcmc_sampler\n base_gcmc_sampler.reset()\n\n # Check that the values have been reset\n assert base_gcmc_sampler.n_accepted == 0\n assert base_gcmc_sampler.n_moves == 0\n assert len(base_gcmc_sampler.Ns) == 0\n\n return None", "def reset_for_new_run(\n self,\n state: State\n ):\n\n super().reset_for_new_run(state)\n\n if self.Q is None:\n self.Q = {\n a: IncrementalSampleAverager(\n initial_value=self.initial_q_value,\n alpha=self.alpha\n )\n for a in self.most_recent_state.AA\n }\n else:\n for averager in self.Q.values():\n averager.reset()", "def reset(self):\n self.__sets = []\n self._computed = False", "def reset(self):\n self.edges = None\n self.chi = None\n self.k = None\n self.n_bins = None\n self.classes = None\n self.n_params = None", "def reset(self) -> None:\n self.current = 0\n self.num_cuts = 0", "def reset(self):\n self._hist.reset()\n return self" ]
[ "0.8175733", "0.8175733", "0.81559753", "0.81235486", "0.78766066", "0.78731924", "0.77236235", "0.76826066", "0.7648331", "0.7643053", "0.75607705", "0.7420023", "0.73855245", "0.735026", "0.735026", "0.735026", "0.7306659", "0.7305569", "0.72605497", "0.72255266", "0.7146157", "0.7146157", "0.71064705", "0.70541924", "0.70354277", "0.7002376", "0.6995929", "0.698548", "0.69429284", "0.69302046", "0.69075143", "0.6902323", "0.6900659", "0.6899984", "0.6899984", "0.68919635", "0.6878506", "0.6862571", "0.68519914", "0.6809108", "0.67935514", "0.67851716", "0.67811215", "0.6769244", "0.6769244", "0.6769244", "0.6769244", "0.67565143", "0.67392087", "0.67041886", "0.67033416", "0.6702633", "0.6696533", "0.66853565", "0.66830385", "0.66795826", "0.66770935", "0.667698", "0.66607165", "0.66598594", "0.6652083", "0.66517764", "0.66517764", "0.66517764", "0.6633372", "0.6620441", "0.66171205", "0.66151804", "0.66147643", "0.6590698", "0.6590537", "0.65893716", "0.65869385", "0.6565601", "0.6563439", "0.65514684", "0.6551448", "0.65482515", "0.6542173", "0.6540907", "0.6536822", "0.65345055", "0.6529401", "0.6519701", "0.6516039", "0.6505992", "0.6504654", "0.6504634", "0.65002054", "0.6497957", "0.6494268", "0.6486475", "0.64846194", "0.64806986", "0.6479931", "0.646618", "0.64621705", "0.64560664", "0.6455684", "0.6452345", "0.64394677" ]
0.0
-1
Resets the accumulated statistics back to initial state for Synthetics events data.
def reset_synthetics_events(self): if self.__settings is not None: self._synthetics_events = LimitedDataSet( self.__settings.agent_limits.synthetics_events) else: self._synthetics_events = LimitedDataSet()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset(self) -> None:\n self.statistics = defaultdict(float)", "def stats_reset(self):\n self.stats.reset()", "def stats_reset(self):\n self.stats.reset()", "def reset(self) -> None:\n self.statistics = defaultdict(int)", "def reset(self):\n self.stats = {}", "def reset_stats() -> None:\n STATS[\"cleaned\"] = 0\n STATS[\"null\"] = 0\n STATS[\"unknown\"] = 0", "def reset(self):\n self.sum_metric = 0.\n self.num_inst = 0.\n self.metrics.reset_stats()", "def reset(self):\n self.start_times = {}\n self.stats = defaultdict(OnlineMeter) # float defaults to 0", "def reset_metric_stats(self):\n\n self.__stats_table = {}", "def reset_metric_stats(self):\n self.__stats_table = {}", "def reset(self):\n reset_system_health_series()", "def reset(self):\n self.avg = 0\n self.sum = 0\n self.cnt = 0", "def clear_stats(self):\n self._stats = None", "def reset(self):\n self.sum = [0.] * len(self.topk)\n self.data_num = 0\n self.pfm = [0.] * len(self.topk)", "def reset(self) -> None:\n self.statistics = defaultdict(self._mp_hack)\n self._ddp_backend = get_backend()", "def reset(self) -> None:\n self.statistics = defaultdict(self._mp_hack)\n self._ddp_backend = get_backend()", "def reset(self) -> None:\n self.statistics = defaultdict(self._mp_hack)\n self._ddp_backend = get_backend()", "def reset(self):\n self.num_inst = 0\n self.sum_metric = 0.0", "def reset(self):\n self.ref_value = 0.0\n self._average = 0.0\n self.num_samples = 0", "def reset_average(self):\n self._total_time = 0\n self._average_time = 0\n self._calls = 0", "def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0", "def reset_data_recorder(self):\n\n self.t_values = []\n self.x_values = []\n self.tau_values = []", "def reset(self) -> None:\n self.statistics = defaultdict(self._mp_hack)\n self._is_ddp = get_rank() > -1", "def reset(self):\n super().reset()\n self.m_n = 1\n self.m_num_errors = 0\n self.m_d = 0\n self.m_lastd = 0\n self.m_mean = 0.0\n self.m_std_temp = 0.0\n self.m_m2s_max = 0.0\n self.estimation = 0.0", "def reset(self):\n self.accumulation = None", "def reset(self):\n self.accumulation = None", "def reset(self):\n self._accumulated_time.clear()\n self._hit_count.clear()", "def reset(self):\n self.reset_count += 1\n self._init_data()", "def reset(self):\n super().reset()\n self.sample_count = 1\n self.miss_prob = 1.0\n self.miss_std = 0.0\n self.miss_prob_sd_min = float(\"inf\")\n self.miss_prob_min = float(\"inf\")\n self.miss_sd_min = float(\"inf\")", "def reset(self):\n self.error_p = 0.0\n self.error_i = 0.0\n self.error_d = 0.0\n self.errors = [ 0.0 ] * self.samples\n if callable(self.debug_callback):\n self.debug_callback(\"reset\")", "def reset(self):\n self.current_exposure = None\n self.scores = {}", "def reset_state(self):\n for name in self.metrics:\n self.metrics[name].reset_state()", "def reset(self):\n self.work_state = work_state[\"Measuring\"]\n self.report_mode = report_mode[\"Initiative\"]\n self.duty_cycle = 0\n self.logger.info(\"{}: sensor resetted.\".format(self.sensor_name))", "def reset_stats(self, settings, reset_stream=False):\n\n self.__settings = settings\n self.__stats_table = {}\n self.__sql_stats_table = {}\n self.__slow_transaction = None\n self.__slow_transaction_map = {}\n self.__slow_transaction_old_duration = None\n self.__transaction_errors = []\n self.__synthetics_transactions = []\n\n self.reset_transaction_events()\n self.reset_error_events()\n self.reset_custom_events()\n self.reset_span_events()\n self.reset_synthetics_events()\n # streams are never reset after instantiation\n if reset_stream:\n self._span_stream = StreamBuffer(\n settings.infinite_tracing.span_queue_size)", "def resetSimulatedData(self):\n self.space = 0*self.space # numpy array that represents the physical space\n self.susceptible = self.space.size # the current number of healthy people in the population\n self.infected = 0 # the current number of infected people in the population\n self.recovered = 0 # the current number of infected people in the population\n self.snapshots = [] # this will contain numpy arrays that represent each time step\n self.SIR = np.zeros((3,1)) #this contains a time series of the number of susceptible, infected, recovered\n self.time = np.zeros(1) # this array will be the time array\n self.total_infections = 0 # set the number of total infections back to zero", "def reset(self):\n for i in range(0, len(self.__counts)):\n self.__counts[i] = 0\n self.__overflow = 0\n self.__total_count = 0\n self.__total_values = 0\n self.__min = None\n self.__max = None", "def reset(self):\n\n self.scaler = None\n self.isFitted = False\n self.__create_scaler()", "def reset(self):\n self._total_value = 0.0\n self._count = 0", "def reset(self):\n self.temp_data.clear()", "def reset(self,prepare=True):\n if self.lowmem:\n self.load(self.filename,prepare=prepare)\n else:\n self.data = np.copy(self.data_orig)\n self.weights = np.copy(self.weights_orig)\n self.durations = self.getSubintinfo('TSUBINT')\n #if prepare:\n # self.scrunch()", "def reset_state(self):\n self.s = np.copy(self.s_i)", "def reset(self):\n self.data = {}\n self.is_bound = False\n self._errors = None", "def reset(self):\n self.baseline = None\n self.cut = None\n self.manual_push = 0", "def reset(self):\n self.dims.clear()\n self.xlabels.clear()\n self.annotators.clear()\n self._figTitle = None\n self.tbmTitle = None\n self._isSubplot = False\n self._universal_xlabel = False\n self._plotter = None\n self.Nsp = 0", "def reset(self):\n for k,v in self.events.items():\n self.events[k] = None", "def reset(self):\n if self.monotonic_energy is not None:\n self.monotonic_energy.reset()\n if self.chunk_energy is not None:\n self.chunk_energy.reset()\n self.bd_L_prev = 0\n self.key_tail = None", "def _reset(self):\n self._values = {}", "def reset(self):\n\n # Ending variables\n self.time_idle = 0\n self.time_episode = 0\n self.done_time_idle = False\n self.done_falling = False\n self.done_time_episode = False\n\n # hero variables\n self.last_location = None\n self.last_velocity = 0\n\n # Sensor stack\n self.prev_image_0 = None\n self.prev_image_1 = None\n self.prev_image_2 = None\n\n self.last_heading_deviation = 0", "def reset(self):\n self.values.clear()\n\n self.on_reset()", "def reset(self):\n\n self.results = []\n self._plot()", "def reset(self):\n\t\tself._initial = None\n\t\tself._start = None\n\t\tself._time = 0\n\t\tself._total = 0\n\t\treturn self", "def reset(self):\n self.m = normalize(self.m0)\n self.t = 0.0", "def reset(self):\n self._stat = CardMeta()", "def reset(self):\n self.mode = 0\n self.graphs = [[], [], []]\n self.coefficients = []\n self.sample = []", "def clear(self):\r\n\r\n\t\tself.ITerm = 0.0\r\n\t\tself.DTerm = 0.0\r\n\t\tself.last_error = 0.0\r\n\r\n\t\tself.state_history = []\r\n\t\tself.error_history = []\r\n\t\tself.output_history = []\r\n\t\tself.sample_times = []\r\n\t\t\r\n\t\tself.OutputValue = 0.0", "def clear(self):\r\n\r\n\t\tself.state_history = []\r\n\t\tself.error_history = []\r\n\t\tself.output_history = []\r\n\t\tself.sample_times = []\r\n\t\t\r\n\t\tself.OutputValue = 0.0", "def reset(self):\n self._proportional = 0\n self._integral = 0\n self._derivative = 0\n\n self._last_time = self._current_time()\n self._last_output = None\n self._last_input = None", "def clear(self):\r\n\r\n\t\tself.state_history = []\r\n\t\tself.error_history = []\r\n\t\tself.output_history = []\r\n\t\tself.sample_times = []\r\n\t\t\r\n\t\tself.LastOutputValue = 0.0\r\n\t\tself.OutputValue = 0.0", "def clear(self):\r\n\r\n\t\tself.state_history = []\r\n\t\tself.error_history = []\r\n\t\tself.output_history = []\r\n\t\tself.sample_times = []\r\n\t\t\r\n\t\tself.LastOutputValue = 0.0\r\n\t\tself.OutputValue = 0.0", "def reset_stats(self):\n print(\"Reseting stats\")\n self.player_lives = self.ai_stts.player_lives\n self.score = 0\n self.level = 1", "def _reset(self):\n\n # Checking one attribute is enough, because they are all set together\n # in partial_fit\n if hasattr(self, 'scale_'):\n del self.scale_\n del self.n_samples_seen_\n del self.mean_\n del self.var_", "def clearstats(self, clearstats) :\n\t\ttry :\n\t\t\tself._clearstats = clearstats\n\t\texcept Exception as e:\n\t\t\traise e", "def clearstats(self, clearstats) :\n\t\ttry :\n\t\t\tself._clearstats = clearstats\n\t\texcept Exception as e:\n\t\t\traise e", "def clearstats(self, clearstats) :\n\t\ttry :\n\t\t\tself._clearstats = clearstats\n\t\texcept Exception as e:\n\t\t\traise e", "def clearstats(self, clearstats) :\n\t\ttry :\n\t\t\tself._clearstats = clearstats\n\t\texcept Exception as e:\n\t\t\traise e", "def reset(self):\n self.score = None\n self.true = None\n self.meta = None", "def reset(self):\n self.algo_state = {}\n self.actual_repetitions = 0\n self.next_session = -1\n self.last_session = -1\n self.past_quality = []", "def reset(self):\n self._data = []", "def reset(self):\n self.reserve.reset()\n self.revenue.reset()\n self.transfers.reset()\n self.missions.reset()\n self.debt.reset()\n self.genfund.reset()\n self.macro.reset(self.pop[self.start_yr],self.eco_first)\n self.summary = pd.DataFrame(index=self.names,columns=[t for t in range(self.start_yr,self.stop_yr)])\n self.year = self.start_yr", "def reset(self):\n self.data = self._defaults", "def reset(self):\n self.control_counter = 0\n self.last_position_error = np.zeros(3)\n self.integral_position_error = np.zeros(3)\n self.last_attitude_error = np.zeros(3)\n self.integral_attitude_error = np.zeros(3)", "def reset(self):\n self.source_data = self.get_dict_from_range(None, None)\n self.selection_bounds = None\n self.selection = []\n for c in self.callbacks[\"reset_data\"]:\n c()\n if self.context is not None:\n self.context.doc.add_next_tick_callback(self.update_source)", "def reset_states(self):\n self.mean_makespan_baseline.assign(0)\n self.mean_makespan_train.assign(0)\n self.step.assign(0)", "def reset_metric_variables(self) -> None:\n with self._lock:\n self._reset_metric_variables()", "def reset_global(self):\n self.T = 0\n self.ep = 0\n self.t = 0\n self.G = 0.0\n self._ep_starttime = time.time()", "def reset(self):\n self.damage_dealt = 0\n self.kills = 0\n self.got_killed = False\n self.fitness = 0", "def reset(self):\n self.game_status = game_status.GameStatus(copy.deepcopy(self.initial_rosters),\n self.game_status.game_date)\n self._current_event_ndx = 0", "def reset_sgr(self):\n self.intensity = 0\n self.italic = False\n self.bold = False\n self.underline = False\n self.foreground_color = None\n self.background_color = None", "def reset() -> None:\n Stat._cache = SortedDict()", "def reset_metrics(self):\n self.metrics['loss'] = 0.0\n self.metrics['num_tokens'] = 0\n self.metrics['correct_tokens'] = 0\n self.metrics['correct_pred'] = 0\n self.metrics['pred_count'] = 0", "def reset(self):\n self.N = self.N[0:2]\n self.t = 2\n self.harvest_available = 0\n self.harvest_record = [0,0]", "def reset(self):\n self.x_prev = np.zeros_like(self.mu)", "def reset_turn_stats(self):\n\n # Set the attribute value to 0\n self._current_score = 0", "def reset(self):\n self._value_estimates[:] = self.prior\n self.action_attempts[:] = 0\n self.last_action = None\n self.t = 0", "def __reset(self):\n\t\tself.__highest = -float('inf')\n\t\tself.__lowest = float('inf')\n\t\tself.__total = 0\n\t\tself.__steps = 0\n\t\tself.__cold_days = 0", "def _reset(self):\n\n # Checking one attribute is enough, becase they are all set together\n # in partial_fit\n if hasattr(self, 'scale_'):\n del self.scale_\n del self.mean_\n del self.var_", "def reset(self):\r\n self.myOutputs = list()\r\n self.myPlots = list()\r\n self.pause = 0\r\n self.doMPL = False\r\n self.graphLabelsX = []\r\n self.graphLabelsY = []\r\n for i in self.xData.iterkeys():\r\n self.xData[i] = []\r\n self.yData[i] = []\r\n self.xyData[i] = []\r\n self.graphs[i] = Gnuplot(debug=0)\r\n self.figures[i] = 0\r\n self.mplFigCount = 0", "def reset_results_arrays(self):\n raw_dims = self._dataset.raw_dims\n raw_dims = list(raw_dims[::-1])\n \n if len(self.data) != raw_dims:\n self.freq_all = np.zeros(raw_dims, complex)\n self.time_all = np.zeros(raw_dims, complex)", "def reinit(self):\n self.logger.info(\"Reinit called. Clear the population.\")\n self.set_init_population([], perf_name=None)\n self._gt_rollouts = []\n self._gt_scores = []", "def reset(self) -> None:\n self.current = 0\n self.num_cuts = 0", "def _reset_histories(self):\n self.train_loss_history = []\n self.train_pos_dist_history = []\n self.train_neg_dist_history = []\n self.val_loss_history = []\n self.val_pos_dist_history = []\n self.val_neg_dist_history = []", "def reset(self):\n self.tracker.reset()\n self.total_max_q = 0.0\n self.episode_step = 0\n self.episode += 1", "def reset(self):\n self.__sets = []\n self._computed = False", "def reset_results_arrays(self):\n raw_dim0 = self._dataset.raw_dims[0]\n \n if len(self.data) != raw_dim0:\n self.freq_current = np.zeros(raw_dim0, complex)", "def reset(self):\n\n self.elapsed_time = 0", "def _reset_histories(self):\n\t\tself.train_loss_history = []\n\t\tself.train_acc_history = []\n\t\tself.val_acc_history = []\n\t\tself.val_loss_history = []", "def reset_transaction_events(self):\n\n if self.__settings is not None:\n self._transaction_events = SampledDataSet(\n self.__settings.event_harvest_config.\n harvest_limits.analytic_event_data)\n else:\n self._transaction_events = SampledDataSet()", "def reset():", "def reset():", "def reset():" ]
[ "0.7742906", "0.76873755", "0.76873755", "0.7634818", "0.75161463", "0.7410027", "0.7406162", "0.7288068", "0.72181994", "0.7202865", "0.7152267", "0.7146272", "0.69828737", "0.6976172", "0.6946784", "0.6946784", "0.6946784", "0.6941798", "0.69132906", "0.6901035", "0.68861943", "0.6865253", "0.68582", "0.68410826", "0.68407196", "0.68407196", "0.68193007", "0.6816973", "0.68089724", "0.67746085", "0.67536366", "0.6739397", "0.67226845", "0.67193407", "0.6700165", "0.6682633", "0.66578156", "0.665363", "0.66438955", "0.661865", "0.6606193", "0.65978545", "0.6582929", "0.65576357", "0.6550307", "0.6543814", "0.6543017", "0.65373474", "0.6536049", "0.653503", "0.65348774", "0.6529204", "0.6526249", "0.65160036", "0.6513358", "0.65102786", "0.6509054", "0.6502991", "0.6502991", "0.6495653", "0.6491463", "0.64601403", "0.64601403", "0.64601403", "0.64601403", "0.64558244", "0.64521027", "0.6446963", "0.6431929", "0.642925", "0.6427681", "0.64273185", "0.64247394", "0.6422794", "0.6422269", "0.64195305", "0.6416748", "0.6406664", "0.6394978", "0.63949555", "0.63794404", "0.6379068", "0.63702387", "0.63695616", "0.6369141", "0.6368345", "0.6358609", "0.6358318", "0.6352675", "0.63474905", "0.63444775", "0.63426757", "0.634069", "0.6338464", "0.6336864", "0.6329956", "0.6321943", "0.6317886", "0.6317886", "0.6317886" ]
0.71011585
12
Creates a snapshot of the accumulated statistics, error details and slow transaction and returns it. This is a shallow copy, only copying the top level objects. The originals are then reset back to being empty, with the exception of the dictionary mapping metric (name, scope) to the integer identifiers received from the core application. The latter is retained as should carry forward to subsequent runs. This method would be called to snapshot the data when doing the harvest.
def harvest_snapshot(self, flexible=False): snapshot = self._snapshot() # Data types only appear in one place, so during a snapshot it must be # represented in either the snapshot or in the current stats object. # # If we're in flexible harvest, the goal is to have everything in the # whitelist appear in the snapshot. This means, we must remove the # whitelist data types from the current stats object. # # If we're not in flexible harvest, everything excluded from the # whitelist appears in the snapshot and is removed from the current # stats object. if flexible: whitelist_stats, other_stats = self, snapshot snapshot.reset_non_event_types() else: whitelist_stats, other_stats = snapshot, self self.reset_non_event_types() event_harvest_whitelist = \ self.__settings.event_harvest_config.whitelist # Iterate through harvest types. If they are in the list of types to # harvest reset them on stats_engine otherwise remove them from the # snapshot. for nr_method, stats_methods in EVENT_HARVEST_METHODS.items(): for stats_method in stats_methods: if nr_method in event_harvest_whitelist: reset = getattr(whitelist_stats, stats_method) else: reset = getattr(other_stats, stats_method) reset() return snapshot
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_flat_results(self):\n test_results, error_dict, framestats = self.get_results()\n test_results = self._merge_test_results(test_results, error_dict)\n\n results = copy.deepcopy(test_results)\n results.update(framestats)\n\n return results", "def __copy__(self) :\n return self.build(self.scope.copy(), self.scope_vids.copy(), np.array(self.table))", "def copy(self):\n\n return BenchmarkObj(self.__df_timings.copy(), dtype=self.dtype, multivar=self.multivar, multiindex=self.multiindex)", "def copy(self):\n\t\tassert ltrace_func(TRACE_BASE)\n\t\ttemp = self.__class__()\n\t\ttemp.copy_from(self)\n\t\treturn temp", "def get_pure_data_copy(self):\n import copy\n data=copy.copy(self)\n data.xp = data.xp.get_pure_data_copy()\n data.timetable = data.timetable.get_pure_data_copy() \n return data", "def get_snapshot(self):\n data = {\n \"t\": self.sim.t,\n \"time\": self.time,\n \"vehicles\": self.sim.vehicles,\n \"stations\": self.sim.stations,\n \"state\": self.state,\n \"done\": self.is_done}\n return copy.deepcopy(data)", "def create_workarea(self):\n\n stats = copy.copy(self)\n stats.reset_stats(self.__settings)\n\n return stats", "def snapshot(self) -> dict:\n snapshot: Dict[str, Any] = {\"status\": self.state.name, \"reals\": {}}\n for real in self.children:\n snapshot[\"reals\"][real.id_] = {\"status\": real.state.name, \"steps\": {}}\n if real.state.data:\n snapshot[\"reals\"][real.id_][\"data\"] = real.state.data\n\n for step in real.children:\n step_d: Dict[str, Any] = {\n \"status\": step.state.name,\n \"jobs\": {},\n }\n if step.state.data:\n step_d[\"data\"] = step.state.data\n snapshot[\"reals\"][real.id_][\"steps\"][step.id_] = step_d\n for job in step.children:\n job_d: Dict[str, Any] = {\"status\": job.state.name}\n if job.state.data:\n job_d[\"data\"] = job.state.data\n snapshot[\"reals\"][real.id_][\"steps\"][step.id_][\"jobs\"][\n job.id_\n ] = job_d\n return snapshot", "def CreateSnapshot(self) -> Any:\r\n\r\n return (\r\n id(self),\r\n self.normalized_iter.Clone(),\r\n len(self.results),\r\n self._ignore_whitespace_ctr,\r\n )", "def copy(self):\n copy = GeneSet(dict())\n\n copy.gene_sets = deepcopy(self.gene_sets)\n copy.gene_set_names = deepcopy(self.gene_set_names)\n copy.gene_set_size = deepcopy(self.gene_set_size)\n copy.interactors = deepcopy(self.interactors)\n copy.n_curated = deepcopy(self.n_curated)\n copy.n_interactors = deepcopy(self.n_interactors)\n\n return copy", "def fresh_copy(self):\n return OrderedGraph()", "def copy(self):\n return self.update({})", "def copy(self):\n return self.as_dataframe(self.data.copy())", "def __deepcopy__(self, memo):\n\t\tcopy_paster = Log()\n\t\tcopy_paster.__dict__.update(self.__dict__)\n\t\tcopy_paster.cur_tensor = self.cur_tensor.clone()\n\t\treturn copy_paster", "def take_snapshot(self):\r\n self.snapshot = self.max_gain, self.__dup_array(), copy.copy(self.free_cell_list)", "def __init__(self, aggregation_depth, include_bytes=True):\n\n self._prev_stats = {}\n self._aggregation_depth = aggregation_depth\n self._include_bytes = include_bytes\n\n self.init_cur_stats()", "def fresh_copy(self):\n return OrderedMultiGraph()", "def snapshot(self):\n pass", "def copy(self):\n return self.from_dict(self.to_dict(True))", "def copy(self):\n new = MetaResult(self.estimator,\n self.masker,\n copy.deepcopy(self.maps))\n return new", "def __copy__(self):\n cls = self.__class__\n result = cls.__new__(cls)\n to_copy = {\"_cache\", \"_buffers\", \"_parameters\", \"_modules\"}\n result.__dict__.update(\n {k: v.copy() if k in to_copy else v for k, v in self.__dict__.items()}\n )\n return result", "def copy(self):\n s = MemoryData(self.address, self.size, self.sort, self.irsb, self.irsb_addr, self.stmt, self.stmt_idx,\n pointer_addr=self.pointer_addr, max_size=self.max_size, insn_addr=self.insn_addr\n )\n s.refs = self.refs.copy()\n\n return s", "def clean_copy(self) -> \"StorableObject\":\n return StorableObject(\n id=self.id, data=self.data, tags=self.tags, description=self.description\n )", "def copy(self) -> \"SampleMetadata\":\n return type(self)(\n samples=self.__internal_samples.copy(),\n axis=0,\n metadata=self.metadata,\n name=self.name,\n )", "def copy(self):\n return self.__class__(dict(self))", "def _shallow_clone_dataset(self: TAvalancheDataset) -> TAvalancheDataset:\n dataset_copy = copy.copy(self)\n dataset_copy._flat_data = self._flat_data._shallow_clone_dataset()\n return dataset_copy", "def reset(self):\n self.stats = {}", "def deepcopy(self):\n return self.copy()", "def copy(self):\n cls = self.__class__\n new_graph = cls.__new__(cls)\n new_graph._nodes = self._nodes[:]\n new_graph._node_wip = self._node_wip[:]\n new_graph._edges = self._edges[:]\n if self._sorted_nodes:\n new_graph._sorted_nodes = self._sorted_nodes[:]\n else:\n new_graph._sorted_nodes = None\n new_graph.predecessors = {}\n for key, val in self.predecessors.items():\n new_graph.predecessors[key] = self.predecessors[key][:]\n new_graph.successors = {}\n for key, val in self.successors.items():\n new_graph.successors[key] = self.successors[key][:]\n return new_graph", "def deepcopy(self):\n return copy.deepcopy(self)", "def copy(self) -> \"Memory\":\n return Memory(values={dataclasses.replace(k): v for k, v in self.values.items()})", "def get_immutable_clone(self) -> Snapshot:\n return Snapshot(\n signers=frozenset(self.signers),\n block_hash=self.block_hash,\n votes=frozenset(self.votes),\n tallies=self.tallies.copy()\n )", "def deep_copy(self):\n return self.__class__(self.inputs, self.outputs, self.middle)", "def copy(self, copy_results: bool = True) -> \"ExperimentData\":\n new_instance = ExperimentData(\n backend=self.backend,\n service=self.service,\n parent_id=self.parent_id,\n job_ids=self.job_ids,\n child_data=list(self._child_data.values()),\n verbose=self.verbose,\n )\n new_instance._db_data = self._db_data.copy()\n new_instance._db_data.experiment_id = str(\n uuid.uuid4()\n ) # different id for copied experiment\n if self.experiment is None:\n new_instance._experiment = None\n else:\n new_instance._experiment = self.experiment.copy()\n\n LOG.debug(\n \"Copying experiment data [Experiment ID: %s]: %s\",\n self.experiment_id,\n new_instance.experiment_id,\n )\n\n # Copy basic properties and metadata\n\n new_instance._jobs = self._jobs.copy_object()\n new_instance._auto_save = self._auto_save\n new_instance._extra_data = self._extra_data\n\n # Copy circuit result data and jobs\n with self._result_data.lock: # Hold the lock so no new data can be added.\n new_instance._result_data = self._result_data.copy_object()\n for jid, fut in self._job_futures.items():\n if not fut.done():\n new_instance._add_job_future(new_instance._jobs[jid])\n\n # If not copying results return the object\n if not copy_results:\n return new_instance\n\n # Copy results and figures.\n # This requires analysis callbacks to finish\n self._wait_for_futures(self._analysis_futures.values(), name=\"analysis\")\n with self._analysis_results.lock:\n new_instance._analysis_results = ThreadSafeOrderedDict()\n new_instance.add_analysis_results([result.copy() for result in self.analysis_results()])\n with self._figures.lock:\n new_instance._figures = ThreadSafeOrderedDict()\n new_instance.add_figures(self._figures.values())\n\n # Recursively copy child data\n child_data = [data.copy(copy_results=copy_results) for data in self.child_data()]\n new_instance._set_child_data(child_data)\n return new_instance", "def take_snapshot(self):\r\n self.snapshot = self.name, self.size, copy.copy(self.cells)\r\n self.bucket_array.take_snapshot()", "def take_snapshot(self):\r\n self.snapshot = self.blockA, self.blockB, self.blockA_locked, self.blockB_locked, self.blockA_free, \\\r\n self.blockB_free, copy.copy(self.blockA_cells), copy.copy(self.blockB_cells), self.cut", "def __copy__(self):\n\t\tcopy_paster = Log()\n\t\tcopy_paster.__dict__.update(self.__dict__)\n\t\tcopy_paster.cur_tensor = self.cur_tensor.clone()\n\t\treturn copy_paster", "def fresh_copy(self):\n return OrderedMultiDiGraph()", "def __deepcopy__(self, memo):\n copy = self.__class__()\n copy.wvalues = self.wvalues\n return copy", "def __deepcopy__(self, memo):\n obj = self.__class__()\n for k, v in self.__dict__.items():\n if k == \"_result_cache\":\n obj.__dict__[k] = None\n else:\n obj.__dict__[k] = copy.deepcopy(v, memo)\n return obj", "def __deepcopy__(self, memo):\n cls = self.__class__\n result = cls.__new__(cls)\n memo[id(self)] = result\n for k, v in self.__dict__.items():\n if k not in ['viewer', 'automatic_rendering_callback']:\n setattr(result, k, copy.deepcopy(v, memo))\n else:\n setattr(result, k, None)\n return result", "def fresh_copy(self):\n return OrderedDiGraph()", "def duplicate(self):\r\n duplicate = Profile()\r\n \r\n for i in self.__dict__:\r\n if type(getattr(self, i)) is dict:\r\n setattr(duplicate, i, getattr(self, i).copy())\r\n else:\r\n setattr(duplicate, i, getattr(self, i))\r\n\r\n return duplicate", "def __deepcopy__(self, memo):\n obj = self.__class__()\n for k, v in self.__dict__.items():\n if k == '_result_cache':\n obj.__dict__[k] = None\n else:\n obj.__dict__[k] = copy.deepcopy(v, memo)\n return obj", "def copy(self):\n return self.__class__(\n self.xs.copy(), self.ys.copy(),\n self.gauge_length,\n self.sample_width,\n self.sample_thickness,\n self.name\n )", "def copy(self):\n\t\tassert ltrace_func(TRACE_BASE)\n\t\treturn NamedObject.copy(self)", "def __deepcopy__(self, memo):\n obj = self.__class__()\n for k, v in self.__dict__.items():\n if k in ('_iter', '_result_cache'):\n obj.__dict__[k] = None\n else:\n obj.__dict__[k] = copy.deepcopy(v, memo)\n return obj", "def deepcopy(self):\n return copymod.deepcopy(self)", "def main_data(self) -> gpd.GeoDataFrame:\n if self._cached_data is None:\n data = self._get_main_data_dict()\n geometry = self.shot_geolocations\n\n self._cached_data = gpd.GeoDataFrame(data, geometry=geometry, crs=WGS84)\n\n return self._cached_data", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def to_dataframe(self, copy=False):\n\n if not copy:\n return self.__df_timings\n else:\n return self.__df_timings.copy()", "def Clone(self):\n st = PunterGameState()\n st.fullGraph = self.fullGraph\n st.score = self.score\n st.playerJustMoved = self.playerJustMoved\n st.pathes = copy.deepcopy(self.pathes)\n st.scores = copy.deepcopy(self.scores)\n st.endpoints = self.endpoints[:]\n return st", "def __init__(self):\n self.stats = {}\n self.stats['hits'] = 0\n self.stats['operations'] = {}\n self.stats['operations']['GetCapabilities'] = {}\n self.stats['operations']['GetCapabilities']['hits'] = 0\n self.stats['operations']['POST'] = {}\n self.stats['operations']['POST']['hits'] = 0", "def reset_metric_stats(self):\n self.__stats_table = {}", "def reset_metric_stats(self):\n\n self.__stats_table = {}", "def snapshot(self):\n return {\"unit_name\": self.unit_name}", "def copy(self):\n return Dataset(self._data.copy(), self.gene_meta.copy(), self.n_genes)", "def copy(self):\r\n return copy.deepcopy(self)", "def __deepcopy__(self, memo):\n cls = self.__class__\n result = cls.__new__(cls)\n memo[id(self)] = result\n for k, v in self.__dict__.items():\n if k not in ['viewer', '_record_video_wrapper']:\n setattr(result, k, copy.deepcopy(v, memo))\n else:\n setattr(result, k, None)\n return result", "def clone(self):\n memo = dict()\n c = self._clone(memo)\n c._clone_rip(memo)\n return c", "def get_historical_data_copy(self):\n return copy.deepcopy(self._historical_data)", "def copy(self):\n return self.__class__(\n self.kind, self.link_ids.copy(), self.included_nodes.copy(), self.mass,\n self.name, self.crossring_cleavages.copy(), self.composition.copy())", "def init_cur_stats(self):\n self._cur_stats = defaultdict(lambda: defaultdict(int))\n self._cur_stats[\"writes\"][\"/\"] = 0\n self._cur_stats[\"reads\"][\"/\"] = 0\n self._cur_stats[\"total\"][\"/writes\"] = 0\n self._cur_stats[\"total\"][\"/reads\"] = 0\n\n if self._include_bytes:\n self._cur_stats[\"writesBytes\"][\"/\"] = 0\n self._cur_stats[\"readsBytes\"][\"/\"] = 0\n self._cur_stats[\"total\"][\"/writeBytes\"] = 0\n self._cur_stats[\"total\"][\"/readBytes\"] = 0", "def _transfer(self, dfnew):\n newobj = copy.deepcopy(self) #This looks like None, but is it type (MetaPandasObject, just __union__ prints None\n newobj._frame = dfnew\n \n # THESE ARE NEVER TRANSFERED AT DF LEVEL, JUST CREATED NEW. TRY\n # df.loc\n # a = df*50\n # a._loc ---> Will be None\n #newobj._loc = self._loc\n #newobj._iloc = self._iloc\n #newobj._ix = self._ix \n return newobj", "def derive_newrelic_qcache(self):\n # Query Cache\n vals = self.get_values([\"status/qcache_hits\", \"status/com_select\", \"status/qcache_free_blocks\",\n \"status/qcache_total_blocks\", \"status/qcache_inserts\", \"status/qcache_not_cached\"])\n if vals:\n qc_hits, reads, free, total, inserts, not_cached = vals\n\n self.update_metric(\"newrelic/query_cache_hits\", qc_hits)\n self.update_metric(\"newrelic/query_cache_misses\", inserts)\n self.update_metric(\"newrelic/query_cache_not_cached\", not_cached)\n\n pct_query_cache_hit_utilization = 0.0\n if (qc_hits + reads) > 0:\n pct_query_cache_hit_utilization = (qc_hits / (qc_hits + reads)) * 100.0\n\n self.update_metric(\"newrelic/pct_query_cache_hit_utilization\", pct_query_cache_hit_utilization)\n\n pct_query_cache_memory_in_use = 0.0\n if total > 0:\n pct_query_cache_memory_in_use = 100.0 - ((free / total) * 100.0)\n\n self.update_metric(\"newrelic/pct_query_cache_memory_in_use\", pct_query_cache_memory_in_use)\n\n # Temp Table\n vals = self.get_values([\"status/created_tmp_tables\", \"status/created_tmp_disk_tables\"])\n if vals:\n tmp_tables, tmp_tables_disk = vals\n\n pct_tmp_tables_written_to_disk = 0.0\n if tmp_tables > 0:\n pct_tmp_tables_written_to_disk = (tmp_tables_disk / tmp_tables) * 100.0\n\n self.update_metric(\"newrelic/pct_tmp_tables_written_to_disk\", pct_tmp_tables_written_to_disk)", "def copy(self):\n return Level(repr=self.as_dict())", "def copy(self):\n cpy = self._copyBase(MapEvalTree(self[:]))\n cpy.cachesize = self.cachesize # member specific to MapEvalTree\n return cpy", "def copy(self):\n cls = type(self)\n new = cls()\n new.default = deepcopy(self.default)\n new.current = deepcopy(self.current)\n new.stepnames = deepcopy(self.stepnames)\n return new", "def _copy_(self):\n return copy.copy(self)", "def __deepcopy__(self, memo):\n return Quantity(copy.deepcopy(self._value, memo), self.unit)", "def updated_object(self):\n o = deepcopy(self.object)\n o[\"name\"] += \"-copy\"\n return o", "def copy(self):\n return super().copy()", "def _shallow_clone_dataset(self: TDataWTransform) -> TDataWTransform:\n dataset_copy = copy.copy(self)\n dataset_copy._transform_groups = copy.copy(dataset_copy._transform_groups)\n dataset_copy._frozen_transform_groups = copy.copy(\n dataset_copy._frozen_transform_groups\n )\n return dataset_copy", "def derive_newrelic_stats(self):\n self.logger.debug(\"Collecting stats for newrelic\")\n self.derive_newrelic_volume()\n self.derive_newrelic_throughput()\n self.derive_newrelic_innodb()\n self.derive_newrelic_qcache()\n self.derive_newrelic_slaves()", "def take_snapshot(self):\r\n self.snapshot = self.gain, self.block, self.locked, self.bucket_num", "def copy(self):\n import copy as pcopy\n return pcopy.deepcopy(self)", "def copy(self):\n copy = TemporalGraph(self._start, self._end)\n copy.update(self)\n return copy", "def copy(self):\n return Result(super().copy())", "def get_state(self, deepcopy: bool = True):\n s = self.cache_ if hasattr(self, \"cache_\") else {}\n return copy.deepcopy(s) if deepcopy else s", "def _clone(self):\n c = self.__class__(\n model=self.model,\n query=self.query.chain(),\n using=self._db,\n hints=self._hints,\n )\n c._sticky_filter = self._sticky_filter\n c._for_write = self._for_write\n c._prefetch_related_lookups = self._prefetch_related_lookups[:]\n c._known_related_objects = self._known_related_objects\n c._iterable_class = self._iterable_class\n c._fields = self._fields\n return c", "def copy(self):\n return self.mutate().simple_copy()", "def __deepcopy__(self, memo):\r\n new_inst = super().__deepcopy__(memo)\r\n new_inst.road_width = self.road_width\r\n new_inst.road_length = self.road_length\r\n new_inst.surface = self.surface\r\n \r\n return new_inst", "def copy(self):\n new = self.__class__(integration=None, data=None)\n for attribute, value in self.__dict__.items():\n if attribute in self.referenced_attributes:\n setattr(new, attribute, value)\n elif hasattr(value, 'copy'):\n setattr(new, attribute, value.copy())\n else:\n setattr(new, attribute, deepcopy(value))\n return new", "def copy(self):\n return pdict(dict.copy(self))", "def __deepcopy__(self, memo: dict[Any, Any]) -> geom:\n cls = self.__class__\n result = cls.__new__(cls)\n memo[id(self)] = result\n old = self.__dict__\n new = result.__dict__\n\n # don't make a deepcopy of data, or environment\n shallow = {\"data\", \"_kwargs\", \"environment\"}\n for key, item in old.items():\n if key in shallow:\n new[key] = old[key]\n memo[id(new[key])] = new[key]\n else:\n new[key] = deepcopy(old[key], memo)\n\n return result", "def snapshot(self):\n snapshot = super(VirtualMachineDAO, self).snapshot()\n for entry in snapshot:\n vm = entry.get(VirtualMachineDAO.INNER_OBJ)\n vm['network'] = VMNetworkDAO(self.session, vm.get(VirtualMachineDAO.FOREIGN_KEY)).snapshot()\n return snapshot" ]
[ "0.5934949", "0.58573073", "0.5802806", "0.5787985", "0.5758199", "0.5758142", "0.57447433", "0.57192504", "0.56812185", "0.56527025", "0.5649071", "0.56446093", "0.5642422", "0.5546954", "0.5542068", "0.5531035", "0.5524276", "0.55112106", "0.54917693", "0.54811484", "0.54652274", "0.54537374", "0.5438235", "0.54345053", "0.54282206", "0.5424229", "0.5424176", "0.54232955", "0.5418356", "0.54171884", "0.54165465", "0.5414276", "0.5410124", "0.5392498", "0.5387318", "0.5377655", "0.53765255", "0.5370712", "0.5362439", "0.53606987", "0.5359754", "0.53553754", "0.53531265", "0.5347806", "0.532736", "0.5310748", "0.5309094", "0.53088975", "0.5307617", "0.5303066", "0.5303066", "0.5303066", "0.5303066", "0.5303066", "0.5303066", "0.5303066", "0.5303066", "0.5303066", "0.5303066", "0.5303066", "0.5303066", "0.5303066", "0.5303066", "0.5303066", "0.5303066", "0.5300905", "0.5287608", "0.52860683", "0.5285083", "0.5276645", "0.52746713", "0.52724516", "0.5260488", "0.5257124", "0.52552867", "0.52547115", "0.5252767", "0.52497095", "0.5237924", "0.52359235", "0.5228307", "0.52232224", "0.5220752", "0.5211702", "0.5210698", "0.52106315", "0.5210178", "0.5204689", "0.51973915", "0.5195693", "0.51934046", "0.5191894", "0.51897335", "0.5188902", "0.5187907", "0.5186611", "0.5184845", "0.51817137", "0.51781636", "0.5169009", "0.51677275" ]
0.0
-1
Creates and returns a new empty stats engine object. This would be used to distill stats from a single web transaction before then merging it back into the parent under a thread lock.
def create_workarea(self): stats = copy.copy(self) stats.reset_stats(self.__settings) return stats
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create( ):\n \n uuid = generate_uuid( 10 )\n\n stats = Stats( key_name = uuid,\n uuid = uuid,\n )\n stats.put()\n return stats", "def empty_instance():\n from weighted_graph import Graph\n return Graph()", "def __init__(self, stats, keep=False):\n self.stats = stats if keep else None\n self.market_price_usd = stats.get('market_price_usd')\n self.hash_rate = stats.get('hash_rate')\n self.total_fees_btc = stats.get('total_fees_btc')\n self.n_btc_mined = stats.get('n_btc_mined')\n self.n_tx = stats.get('n_tx')\n self.n_blocks_mined = stats.get('n_blocks_mined')\n self.minutes_between_blocks = stats.get('minutes_between_blocks')\n self.total_bc = stats.get('totalbc')\n self.n_blocks_total = stats.get('n_blocks_total')\n self.estimated_transaction_volume_usd = stats.get('estimated_transaction_volume_usd')\n self.blocks_size = stats.get('blocks_size')\n self.miners_revenue_usd = stats.get('miners_revenue_usd')\n self.next_retarget = stats.get('nextretarget')\n self.difficulty = stats.get('difficulty')\n self.estimated_btc_sent = stats.get('estimated_btc_sent')\n self.miners_revenue_btc = stats.get('miners_revenue_btc')\n self.total_btc_sent = stats.get('total_btc_sent')\n self.trade_volume_btc = stats.get('trade_volume_btc')\n self.trade_volume_usd = stats.get('trade_volume_usd')\n self.timestamp = stats.get('timestamp')", "def start(cls, stats, keep=False):\n return cls(stats, keep)", "def Get() -> stats_collector.StatsCollector:\n if _stats_singleton is None:\n raise StatsNotInitializedError()\n return _stats_singleton", "def newStatGraph():\n e = ET.Element('stats')\n n = newElement(e, 'ok')\n n = newElement(e, 'not_ok')\n return ET.ElementTree(e)", "def __init__(self):\n self.stats = None\n self.ticker = None", "def __init__(self):\n self.stats = {}\n self.stats['hits'] = 0\n self.stats['operations'] = {}\n self.stats['operations']['GetCapabilities'] = {}\n self.stats['operations']['GetCapabilities']['hits'] = 0\n self.stats['operations']['POST'] = {}\n self.stats['operations']['POST']['hits'] = 0", "def __init__(self):\n self._workload = None\n self._engine = Engine()", "def __init__(self, aggregation_depth, include_bytes=True):\n\n self._prev_stats = {}\n self._aggregation_depth = aggregation_depth\n self._include_bytes = include_bytes\n\n self.init_cur_stats()", "def reset_stats(self, settings, reset_stream=False):\n\n self.__settings = settings\n self.__stats_table = {}\n self.__sql_stats_table = {}\n self.__slow_transaction = None\n self.__slow_transaction_map = {}\n self.__slow_transaction_old_duration = None\n self.__transaction_errors = []\n self.__synthetics_transactions = []\n\n self.reset_transaction_events()\n self.reset_error_events()\n self.reset_custom_events()\n self.reset_span_events()\n self.reset_synthetics_events()\n # streams are never reset after instantiation\n if reset_stream:\n self._span_stream = StreamBuffer(\n settings.infinite_tracing.span_queue_size)", "def _create(self, context, values):\n # initialize load stats from existing instances:\n compute_node = db.compute_node_create(context, values)\n return compute_node", "def __init__(self, settings):\n self._settings = settings\n self._stats = None", "def __init__(self, stats):\n self._meta = stats['meta'].item()\n self._stats = stats['data'].item()\n self._moving_average_cache = {}", "def __enter__(self):\r\n if not self._engine:\r\n self.restart()\r\n return self", "def statsWorker():\n logger.info('STATS: Starting. Will report out every {0:.1g} hours'.format(\n config.STATS_HOURS))\n while True:\n gevent.sleep(timedelta(hours=config.STATS_HOURS).total_seconds())\n logger.info('STATS: {0}'.format(stats))\n stats.resetStats()\n\n return", "def get_engine(settings: dict) -> sqlalchemy.engine.base.Engine:\n engine = create_engine(settings['sqlalchemy.url'], pool_recycle=3600)\n return engine", "def __init__(self):\n self.ts = dict()\n self.cache = dict()", "def _init_local_node_stats_publisher(self):\n stats = self._local_node_stats\n # Init cache\n stats.cache = StatsCache(stats.cache_size)\n # Init source\n stats_source = NodeStatsSource()\n # Configure stats publishing\n stats.publisher = StatsPublisher(stats_source, stats.update_interval)\n stats.publisher.subscribe(stats.cache)\n self._publishers.append(stats.publisher)\n # Configure handlers\n self._routes['/stats/local/node/cache'] = HandlerInfo(\n handler_class=CachedStatsHandler,\n init_kwargs=dict(stats_cache=stats.cache)\n )\n self._routes['/stats/local/node/current'] = HandlerInfo(\n handler_class=CurrentStatsHandler,\n init_kwargs=dict(stats_source=stats_source)\n )", "def __init__(self):\n super(MultiProcessEngine, self).__init__()\n self._debug_output = False\n self._name = 'Main'\n self._last_worker_number = 0\n self._log_filename = None\n self._pid = os.getpid()\n self._process_information = process_info.ProcessInfo(self._pid)\n self._process_information_per_pid = {}\n self._processes_per_pid = {}\n self._quiet_mode = False\n self._rpc_clients_per_pid = {}\n self._rpc_errors_per_pid = {}\n self._status_update_active = False\n self._status_update_thread = None\n self._storage_writer = None\n self._worker_memory_limit = definitions.DEFAULT_WORKER_MEMORY_LIMIT", "def __enter__(self):\n self.__init__()\n return self", "def _new_instance(self):\n return self.__class__(self._vmodule)", "def __init__(self, name='TemporaryStorage'):\n\n BaseStorage.__init__(self, name)\n\n self._index = {}\n self._referenceCount = {}\n self._oreferences = {}\n self._opickle = {}\n self._tmp = []\n self._conflict_cache = {}\n self._last_cache_gc = 0\n self._recently_gc_oids = [None for x in range(RECENTLY_GC_OIDS_LEN)]\n self._oid = z64\n self._ltid = z64\n\n # Alow overrides for testing.\n self._conflict_cache_gcevery = CONFLICT_CACHE_GCEVERY\n self._conflict_cache_maxage = CONFLICT_CACHE_MAXAGE", "def __init__(self, router): # Promotion constructor :)\r\n # TODO: Use __bases__ to do this instead?\r\n self.__dict__ = router.__dict__\r\n self.reset()\r\n # StatsRouters should not be destroyed when Tor forgets about them\r\n # Give them an extra refcount:\r\n self.refcount += 1\r\n plog(\"DEBUG\", \"Stats refcount \"+str(self.refcount)+\" for \"+self.idhex)", "def StartProfiling(self):\n hp = hpy()\n hp.setrelheap()\n return hp", "def _create_empty(cls):\n self = object.__new__(cls)\n self.avatar_hash = 0\n self.avatar_type = IconType.none\n self.boosts_since = None\n self.flags = GuildProfileFlag()\n self.joined_at = None\n self.nick = None\n self.pending = False\n self.role_ids = None\n self.timed_out_until = None\n return self", "def __init__(self):\n self.stats = Statblock(parent=self)\n self.compiled_circ = None\n self.backend = None\n self.job_id = None\n self.transpiler_config = None\n self.circuit = None\n\n # if isinstance(circuit, QuantumCircuit):\n # self.circuit = circuit\n # elif circuit is not None:\n # raise TypeError(f'Circuit must be a QuantumCircuit, or Premade. Was given type: {type(circuit)}')", "def __new__(cls, *args, **kwargs):\n if GraphData._instance is None:\n with GraphData._instance_lock:\n if GraphData._instance is None:\n GraphData._instance = object.__new__(cls)\n return GraphData._instance", "def __init__(self) -> None:\n self.metrics = {}\n self.current = None\n self.run = None", "def __init__(self):\n self._maxTime = 0\n self._activeHeap = []\n self._activeDict = {}\n self._graph = _Graph()", "def __enter__(self):\n self.initialize()\n return self", "def _create_load_stats(self, context, instance=None):\n values = {}\n\n if instance:\n instances = [instance]\n else:\n self.stats.clear() # re-generating all, so clear old stats\n\n # grab all instances that are not yet DELETED\n filters = {'host': self.host, 'deleted': False}\n instances = db.instance_get_all_by_filters(context,\n {'host': self.host})\n\n for instance in instances:\n self.stats.add_stats_for_instance(instance)\n\n values['current_workload'] = self.stats.calculate_workload()\n values['running_vms'] = self.stats.num_instances\n values['vcpus_used'] = self.stats.num_vcpus_used\n values['stats'] = self.stats\n return values", "def init_cur_stats(self):\n self._cur_stats = defaultdict(lambda: defaultdict(int))\n self._cur_stats[\"writes\"][\"/\"] = 0\n self._cur_stats[\"reads\"][\"/\"] = 0\n self._cur_stats[\"total\"][\"/writes\"] = 0\n self._cur_stats[\"total\"][\"/reads\"] = 0\n\n if self._include_bytes:\n self._cur_stats[\"writesBytes\"][\"/\"] = 0\n self._cur_stats[\"readsBytes\"][\"/\"] = 0\n self._cur_stats[\"total\"][\"/writeBytes\"] = 0\n self._cur_stats[\"total\"][\"/readBytes\"] = 0", "def create_instance(min_bds, max_bds, debug=True):\n if not debug:\n return StatsTrackerDoNothing()\n\n try:\n return StatsTrackerBase(min_bds, max_bds)\n except:\n return StatsTrackerArray(min_bds, max_bds)", "def get_statistics(self):\n\t\treturn Job(SDK.PrlSrv_GetStatistics(self.handle)[0])", "def create_statistics(self):\n now = datetime.now()\n min_timestamp = Statistic.objects.all().aggregate(Max('timestamp_end'))[\"timestamp_end__max\"]\n max_timestamp = (now + ((datetime.min - now) % timedelta(minutes=60)) - timedelta(minutes=60)).replace(tzinfo=pytz.UTC)\n\n if min_timestamp is None:\n min_timestamp = datetime(2000, 1, 1, tzinfo=timezone('UTC'))\n\n aggregated_measurements = MeasurementService.get_aggregate_measurements(min_timestamp,max_timestamp)\n StatisticService.create_statistics(aggregated_measurements)", "def __init__(self):\n # 保存用户推特数据\n self.user_pool = defaultdict(UserInfo)\n self.twitter_pool = defaultdict(list)\n self.time = 0", "def __init__(self):\n OWSReport.__init__(self)\n self.stats['type'] = 'OGC:WMS'\n self.stats['operations']['GetMap'] = {}\n self.stats['operations']['GetMap']['hits'] = 0\n self.stats['operations']['GetMap']['resource'] = {}\n self.stats['operations']['GetMap']['resource']['param'] = 'layers'\n self.stats['operations']['GetMap']['resource']['list'] = {}\n self.stats['operations']['GetFeatureInfo'] = {}\n self.stats['operations']['GetFeatureInfo']['hits'] = 0\n self.stats['operations']['GetLegendGraphic'] = {}\n self.stats['operations']['GetLegendGraphic']['hits'] = 0\n self.stats['operations']['GetStyles'] = {}\n self.stats['operations']['GetStyles']['hits'] = 0\n self.stats['operations']['DescribeLayer'] = {}\n self.stats['operations']['DescribeLayer']['hits'] = 0", "def __new__(cls):\n game_engine = get_gameengine()\n if game_engine is not None:\n return game_engine\n else:\n return super(GameEngine, cls).__new__(cls)\n # end if", "def __init__(self):\n OWSReport.__init__(self)\n self.stats['type'] = 'OGC:WFS'\n self.stats['operations']['GetFeature'] = {}\n self.stats['operations']['GetFeature']['hits'] = 0\n self.stats['operations']['GetFeature']['resource'] = {}\n self.stats['operations']['GetFeature']['resource']['param'] = 'typename'\n self.stats['operations']['GetFeature']['resource']['list'] = {}\n self.stats['operations']['DescribeFeatureType'] = {}\n self.stats['operations']['DescribeFeatureType']['hits'] = 0", "def get_stats(ns_profnum, clear=False, **kwargs):\n global SLOCK, STATS\n SLOCK.acquire()\n st = STATS\n if clear:\n STATS['ntotal'] = 0\n STATS['rtotal'] = 0\n STATS['oktotal'] = 0\n STATS['ertotal'] = 0\n STATS['ettotal'] = 0.0\n STATS['ethigh'] = 0.0\n STATS['etlow'] = 0.0\n SLOCK.release()\n #_LOGGER.info('get_stats(): %d %f %d', st['ntotal'], st['ettotal'], st['rtotal'])\n return st", "def __enter__(self):\n self.new_session()\n return self", "def __init__(self, accessor, settings, name=None):\n assert accessor\n self._lock = threading.Lock()\n self._accessor_lock = threading.Lock()\n self._accessor = accessor\n # _json_cache associates unparsed json to metadata instances.\n # The idea is that there are very few configs in use in a given\n # cluster so the few same strings will show up over and over.\n self._json_cache_lock = threading.Lock()\n self._json_cache = {}\n\n if name is None:\n name = str(hash(self))\n self.name = name\n\n self._size = CACHE_SIZE.labels(self.TYPE, name)\n self._size.set_function(lambda: self.stats()[\"size\"])\n self._max_size = CACHE_MAX_SIZE.labels(self.TYPE, name)\n self._hits = CACHE_HITS.labels(self.TYPE, name)\n self._misses = CACHE_MISSES.labels(self.TYPE, name)", "def reset(self):\n self.stats = {}", "def make_es_worker(search_conn, es_index, es_doc_type, class_name):\n new_esbase = copy.copy(search_conn)\n new_esbase.es_index = es_index\n new_esbase.doc_type = es_doc_type\n log.info(\"Indexing '%s' into ES index '%s' doctype '%s'\",\n class_name.pyuri,\n es_index,\n es_doc_type)\n return new_esbase", "def __init__(self):\n OWSReport.__init__(self)\n self.stats['type'] = 'OGC:SOS'\n self.stats['operations']['GetObservation'] = {}\n self.stats['operations']['GetObservation']['hits'] = 0\n self.stats['operations']['GetObservation']['resource'] = {}\n self.stats['operations']['GetObservation']['resource']['param'] = 'observedproperty'\n self.stats['operations']['GetObservation']['resource']['list'] = {}\n self.stats['operations']['DescribeSensor'] = {}\n self.stats['operations']['DescribeSensor']['hits'] = 0", "def __init__(self, name):\n super(Engine, self).__init__(name)\n self._outbuffer = []", "def __init__(self):\n OWSReport.__init__(self)\n self.stats['type'] = 'OGC:WCS'\n self.stats['operations']['GetCoverage'] = {}\n self.stats['operations']['GetCoverage']['hits'] = 0\n self.stats['operations']['GetCoverage']['resource'] = {}\n self.stats['operations']['GetCoverage']['resource']['param'] = 'coverage'\n self.stats['operations']['GetCoverage']['resource']['list'] = {}\n self.stats['operations']['DescribeCoverage'] = {}\n self.stats['operations']['DescribeCoverage']['hits'] = 0", "def tc_stat_wrapper():\n\n # Default, empty TcStatWrapper with some configuration values set\n # to /path/to:\n conf = metplus_config()\n return TcStatWrapper(conf, None)", "def _new_instance(self):\n return self.__class__(self._vmodule, self._tensor_rank)", "def generate_statistics():\r\n statistics = cache.get('statistics')\r\n if statistics is None:\r\n statistics = {}\r\n statistics['nr_hashtags'] = ('Number of Hashtags',\r\n get_number_hashtags())\r\n statistics['nr_tokens'] = ('Number of Tokens', get_token_count())\r\n statistics['media_storage_size'] = ('Storage Folder Size (MB)',\r\n str(get_folder_size(\r\n cfg['media_storage'])))\r\n\r\n cache.set('statistics', statistics,\r\n cfg['flask_cache_timeout'] * 60)\r\n\r\n return statistics", "async def _get_connection_pool(self) -> aiopg.sa.Engine:\n if self._engine is None:\n self._engine = await aiopg.sa.create_engine(_CONNECTION_STRING)\n return self._engine", "def __init__(self, name=None):\n super(TTSEngine, self).__init__()", "def __init__(self):\n self.__engine = create_engine('mysql+mysqldb://{}:{}@{}/{}'\n .format(os.environ['HBNB_MYSQL_USER'],\n os.environ['HBNB_MYSQL_PWD'],\n os.environ['HBNB_MYSQL_HOST'],\n os.environ['HBNB_MYSQL_DB']),\n pool_pre_ping=True)\n try:\n if os.environ['HBNB_ENV'] == 'test':\n Base.metadata.drop_all(self.__engine)\n except KeyError:\n pass", "def new(self, Entity_ID = None):\n if Entity_ID is None:\n tempID = len(self.pool[\"default\"])+1\n else:\n if self.exists(Entity_ID): return None\n tempID = Entity_ID\n\n tempent = Entity(tempID)\n self.pool[\"default\"].append(tempent)\n\n return self.pool[\"default\"][-1]", "def __new__(cls):\n self = object.__new__(cls)\n self.acquired = False\n return self", "def __init__(self, config, refresh='No'):\n logging.debug(\"Initializing Neostore object\")\n self.config = config\n self.graph = self._connect2db()\n if refresh == 'Yes':\n self._delete_all()\n self.calendar = GregorianCalendar(self.graph)\n return", "def __new__(cls, *args, **kwargs):\n if cls.__instance is None:\n cls.__instance = super(CacheManagerSingleton, cls).__new__(cls)\n # Generate all ground truth data files from hard-coded data\n CacheManagerSingleton.export_all_ground_truth_data()\n return cls.__instance", "def __init__(self):\n self._data = dict() # stores the data in the context\n self._last_context = None # stores previous context if the current one is used in a with block\n self._thread_id = self._get_thread_id() # the ID of the thread that context lives in", "def get_cache_instance ( ):\n cache_strategy_instance = SimpleCacheStrategy ( )\n cache_strategy_instance.apply_cache_strategy ( threshold=50, default_timeout=100 )\n return cache_strategy_instance", "def create():\n # for clean test cases, first the available databases will be flushed\n get_redis().flushdb()\n graph = FileStructureProcessor()\n return graph.get_graph()", "def __init__(self, lifetime: int = 60):\n # maps transaction ID -> result, insertion time\n # can be made more efficient but we focus on that later\n # e.g. list ordered by insertion time, etc.\n self._lifetime = lifetime\n self._cache: dict[int, tuple[Union[bytes, Exception], float]] = {}", "def new_session(self):\n return self.Session()", "def __new__(cls, ctx):\n return cls.__run(cls, ctx)", "def __init__(self):\r\n\t\t\r\n\t\tself.redis = redis.Redis()\r\n\t\tself.info_to_get = ['text', 'created_at', 'user']\r\n\t\tself.search_results = {}\r\n\t\tself.raw_data_directory_name = \"raw_mining_data\"\r\n\t\tself.filtered_data_directory_name = \"filtered_mining_data\"\r\n\t\tenglish_file = pjoin( sys.path[0], \"sentiment_word_files\", \"Nielsen2010Responsible_english.csv\")\r\n\t\tself.analyzeEnglish = dict(map(lambda (w,e): (w, int(e)), \\\r\n\t\t\t\t\t\t\t\t\t[ line.strip().lower().split('\\t') for line in open(english_file) ]))\r\n\t\tself.tweets_count = 0", "def create(model: TModel, dataset: Dataset) -> aggregator.StatisticsAggregator:\n model_backend = get_backend(model)\n if model_backend == BackendType.ONNX:\n from nncf.onnx.statistics.aggregator import ONNXStatisticsAggregator\n\n return ONNXStatisticsAggregator(dataset)\n if model_backend == BackendType.OPENVINO:\n from nncf.openvino.statistics.aggregator import OVStatisticsAggregator\n\n return OVStatisticsAggregator(dataset)\n if model_backend == BackendType.TORCH:\n from nncf.torch.statistics.aggregator import PTStatisticsAggregator\n\n return PTStatisticsAggregator(dataset)\n raise RuntimeError(\n \"Cannot create backend-specific statistics aggregator because {} is not supported!\".format(model_backend)\n )", "def __enter__(self):\n self.start = time.time()\n return self", "def stat():\n from .Stat import Stat\n\n return Stat", "def grid_stat_wrapper():\n\n conf = metplus_config()\n return GridStatWrapper(conf, None)", "def __init__(self, **kwargs):\n creator = kwargs.pop(\"creator\", None)\n if not creator:\n import MySQLdb\n creator = MySQLdb\n mincached = kwargs.pop(\"mincached\", 2)\n maxcached = kwargs.pop(\"maxcached\", 10)\n maxshared = kwargs.pop(\"maxshared\", 10)\n maxconnections = kwargs.pop(\"maxconnections\", 20)\n blocking = kwargs.pop(\"blocking\", 0)\n reset = kwargs.pop(\"reset\", True)\n maxusage = kwargs.pop(\"maxusage\", 0)\n setsession = kwargs.pop(\"setsession\", [\"set autocommit = 0\"])\n ping = kwargs.pop(\"ping\", 1)\n\n self._pool = PooledDB(creator=creator, mincached=mincached, maxcached=maxcached,\n maxshared=maxshared, maxconnections=maxconnections,\n blocking=blocking, maxusage=maxusage,reset=reset,\n setsession=setsession, ping=ping, **kwargs)", "def reset():\n global HEALTH_AGGREGATOR\n\n HEALTH_AGGREGATOR.cleanup()\n HEALTH_AGGREGATOR = HealthAggregator()\n\n return jsonify({}), 200", "def __init__(self, publish_interval, health_stat_plugin):\n Thread.__init__(self)\n self.log = LogFactory().get_log(__name__)\n self.publish_interval = publish_interval\n \"\"\":type : int\"\"\"\n self.terminated = False\n self.publisher = HealthStatisticsPublisher()\n \"\"\":type : HealthStatisticsPublisher\"\"\"\n # If there are no health stat reader plugins, create the default reader instance\n self.stats_reader = health_stat_plugin if health_stat_plugin is not None else DefaultHealthStatisticsReader()", "def Empty():\n return Container(name='(empty)',\n metadata={},\n section_sizes={},\n metrics_by_file={})", "def single_threaded_session():\n return make_session(num_cpu=1)", "def __enter__(self) -> \"AbstractMetricsPusher\":\n self.start()\n return self", "def __init__(self, context, laststate=False):\n self.context = self.clone_context(context)\n self.global_filter = None\n self._rebalancetime = None\n\n self._swarm = None\n self._swarm_stats = None\n self._swarm_inposition = None\n\n self._picked_swarm = None\n self._picked_inposition = None\n self._picked_exposure = None\n\n self._last_date = None\n self._last_exoquote = None\n self._last_exposure = None\n self._last_members_list = None\n self._last_rebalance_date = None\n self._last_delta = None\n self._last_prev_exposure = None\n self._max_exposure = None\n\n self._swarm_series = None\n self._delta = None\n\n self._islast_state = laststate\n\n strategy_settings = self.context['strategy']\n # Initialize strategy class\n self.strategy = strategy_settings['class'](self.context)\n\n self._swarm_avg = None\n self._swarm = None\n self._swarm_exposure = None", "def __enter__(self):\n if self._transaction_count == 0:\n self._db_copy = self.db._read()\n self._transaction_count += 1\n return self", "def new(cls):\n return cls()", "def __init__(self):\n self._inst = {}", "def new_session(self):\n return self._SessionLocal()", "def __new__(cls, *args, **kwargs):\n if not cls.__instances__:\n cls.__instances__ = super().__new__(cls, *args, **kwargs)\n cls._thread_runing = True\n cls._thread = cls._run_monitor_thread()\n return cls.__instances__", "def __new__(cls):\n self = object.__new__(cls)\n self.synced = False\n self.value = None\n return self", "def _generate_trading_instances(self):\n print(\"Creating DataHandler, Strategy, Portfolio, and ExecutionHandler for\")\n\n # Set internal data members equal to the classes we passed in earlier, along with necessary parameters.\n # https://softwareengineering.stackexchange.com/questions/131403/what-is-the-name-of-in-python/131415\n self.data_handler = self.data_handler_class(self.events, self.csv_dir, self.symbol_list)\n self.strategy = self.strategy_class(self.data_handler, self.events)\n self.portfolio = self.portfolio_class(self.data_handler, self.events, self.start_date, self.initial_capital)\n self.execution_handler = self.execution_handler_class(self.events) # The Event Queue sent to ExecutionHandler", "def __enter__(self):\n\n self.create()\n return super().__enter__()", "def _init_local_processes_stats_publisher(self):\n stats = self._local_processes_stats\n # Init cache\n stats.cache = StatsCache(stats.cache_size)\n # Init source\n stats_source = ProcessesStatsSource()\n # Configure stats publishing\n stats.publisher = StatsPublisher(stats_source, stats.update_interval)\n stats.publisher.subscribe(stats.cache)\n self._publishers.append(stats.publisher)\n # Configure handlers\n self._routes['/stats/local/processes/cache'] = HandlerInfo(\n handler_class=CachedStatsHandler,\n init_kwargs=dict(stats_cache=stats.cache)\n )\n self._routes['/stats/local/processes/current'] = HandlerInfo(\n handler_class=CurrentStatsHandler,\n init_kwargs=dict(stats_source=stats_source)\n )", "def _init_pool(self, cfg: dict):\n pool = PyMysqlPoolBase(**cfg)\n return pool", "def open(self):\n super(MemoryCache, self).open()\n\n def _timer():\n # Use a custom timer to try to spread expirations. Within one instance it\n # won't change anything but it will be better if you run multiple instances.\n return time.time() + self.__ttl * random.uniform(-0.25, 0.25)\n\n self.__cache = cachetools.TTLCache(\n maxsize=self.__size, ttl=self.__ttl, timer=_timer\n )", "def _createModuleObj(self):\n ModuleTimeHistory.__init__(self)", "def __init__(self, create_index=True, online=True):\n self.online = online\n index_exists = self.index_exists()\n if create_index and not index_exists:\n self.create_index()", "def _new_instance(self):\n return self.__class__(self._fmodule, self._tensor_rank)", "def stats(self):\r\n return {}", "def __enter__(self):\n return self._get_storage().__enter__()", "def get_table(base, engine):\n class w1_temp_table(base):\n __tablename__ = 'w1_temp'\n __table_args__ = {\"useexisting\": True}\n\n id = sa.Column(sa.types.Integer, primary_key=True)\n logger_id = sa.Column(sa.types.Integer)\n value = sa.Column(sa.types.String)\n datetime = sa.Column(sa.types.DateTime)\n return w1_temp_table", "def __init__(self):\n super(_SerializedEventHeap, self).__init__()\n self._heap = []\n self.data_size = 0", "def __initStats(self):\n players = self.teamparser.getPlayers()\n try:\n stats = players[(self.team, self.position)]\n except KeyError, err:\n stats = (0, 0, 0, 0, 0, 0)\n raise TypeError, \"Invalid Team/Position: \" + self.team\n self.max = int(stats[0]) #maximum\n self.ma = int(stats[1]) #movement\n self.st = int(stats[2]) #strength\n self.ag = int(stats[3]) #agility\n self.av = int(stats[4]) #armor value\n self.costs = int(stats[5]) #costs\n self.injury = 0 #injury\n self.ssp = 0 #starplayerpoints\n self.touchdowns = 0 #touchdown\n self.completions = 0 #completions\n self.interceptions = 0 #interceptions\n self.casualties = 0 #casualties\n self.mvpawards = 0 #most valuable player awards", "def create_run_tracker(info_dir=None):\r\n # TODO(John Sirois): Rework uses around a context manager for cleanup of the info_dir in a more\r\n # disciplined manner\r\n info_dir = info_dir or safe_mkdtemp()\r\n run_tracker = RunTracker(info_dir)\r\n report = Report()\r\n run_tracker.start(report)\r\n return run_tracker", "def initial_meter_statistics(apps, schema_editor):\n MeterStatistics = apps.get_model('dsmr_datalogger', 'MeterStatistics')\n DsmrReading = apps.get_model('dsmr_datalogger', 'DsmrReading')\n\n # We can't (and shouldn't) use Solo here.\n stats = MeterStatistics.objects.create() # All fields are NULL in database, by design.\n assert MeterStatistics.objects.exists()\n\n try:\n # Just use the latest DSMR reading, if any.\n latest_reading = DsmrReading.objects.all().order_by('-timestamp')[0]\n except IndexError:\n return\n\n stats.electricity_tariff = latest_reading.electricity_tariff\n stats.power_failure_count = latest_reading.power_failure_count\n stats.long_power_failure_count = latest_reading.long_power_failure_count\n stats.voltage_sag_count_l1 = latest_reading.voltage_sag_count_l1\n stats.voltage_sag_count_l2 = latest_reading.voltage_sag_count_l2\n stats.voltage_sag_count_l3 = latest_reading.voltage_sag_count_l3\n stats.voltage_swell_count_l1 = latest_reading.voltage_swell_count_l1\n stats.voltage_swell_count_l2 = latest_reading.voltage_swell_count_l2\n stats.voltage_swell_count_l3 = latest_reading.voltage_swell_count_l3\n stats.save()", "def _new_instance(self):\n return self.__class__(self._fmodule)", "def _new_instance(self):\n return self.__class__(self._fmodule)", "def open(self):\r\n self._init_stats()\r\n return self.__trans.open()" ]
[ "0.6012898", "0.54610676", "0.5434459", "0.5365949", "0.5345452", "0.53324264", "0.5254955", "0.5204557", "0.5093632", "0.50626856", "0.5004467", "0.498278", "0.49784213", "0.4975857", "0.49731496", "0.49413025", "0.49334928", "0.49183187", "0.49155334", "0.48889562", "0.48528484", "0.48501238", "0.48281246", "0.48213986", "0.48183307", "0.4807528", "0.48038778", "0.47972417", "0.47818562", "0.47612146", "0.47572654", "0.4754027", "0.4751779", "0.4746518", "0.4746509", "0.47376403", "0.47287735", "0.4727484", "0.47271544", "0.47113094", "0.4710821", "0.47056285", "0.46858755", "0.46821922", "0.46600673", "0.46520022", "0.46483654", "0.46482888", "0.4647542", "0.46469098", "0.46374536", "0.4632999", "0.4632792", "0.46295866", "0.46284005", "0.46269456", "0.46265864", "0.46236548", "0.46183944", "0.46167243", "0.46141404", "0.46110973", "0.46074614", "0.459869", "0.4594149", "0.45720103", "0.45667917", "0.45557636", "0.45546347", "0.45536262", "0.4550131", "0.4544091", "0.45414013", "0.45355815", "0.45200574", "0.45192948", "0.45170444", "0.45016652", "0.45014402", "0.45014146", "0.45009196", "0.44961432", "0.44957238", "0.44913545", "0.4489568", "0.44846746", "0.44842175", "0.44842076", "0.4479024", "0.44771558", "0.4476004", "0.44755167", "0.44703248", "0.44579223", "0.44554898", "0.44524682", "0.4448781", "0.44460458", "0.44460458", "0.44418684" ]
0.53285325
6