query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
as reduce2lat_old, but uses the averager module for greater capabilities | def reduce2lat( mv, vid=None ):
if vid==None: # Note that the averager function returns a variable with meaningless id.
vid = 'reduced_'+mv.id
axes = allAxes( mv )
axis_names = [ a.id for a in axes if a.id!='lat' ]
axes_string = '('+')('.join(axis_names)+')'
avmv = averager( mv, axis=axes_string )
avmv.id = vid
avmv.units = mv.units
return avmv | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reduce2lat_old( mv, vid=None ):\n # >>> For now, I'm assuming that the only axes are time,lat,lon\n # And I'm assuming equal spacing in lon (so all longitudes contribute equally to the average)\n # If they aren't, it's best to use area from cell_measures attribute if available; otherwise\n # compute it with lat_bnds, lon_bnds etc.\n # If I base another reduction function on this one, it's important to note that an average\n # in the lat direction will unavoidably need weights, because of the geometry.\n\n if vid==None:\n vid = 'reduced_'+mv.id\n time_axis, lat_axis, lon_axis = tllAxes( mv )\n\n mvta = timeave_old( mv )\n\n zm = numpy.zeros( mvta.shape[0] )\n for i in range(len(lat_axis)):\n for j in range(len(lon_axis)):\n zm[i] += mvta[i,j]\n zm[i] /= len(lon_axis)\n zmv = cdms2.createVariable( zm, axes=[lat_axis], id=vid )\n return zmv",
"def reduce2latlon( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n axis_names = [ a.id for a in axes if a.id!='lat' and a.id!='lon' ]\n axes_string = '('+')('.join(axis_names)+')'\n for ax in axes:\n # The averager insists on bounds. Sometimes they don't exist, especially for obs.\n if ax.id!='lat' and ax.id!='lon' and not hasattr( ax, 'bounds' ):\n ax.setBounds( ax.genGenericBounds() )\n\n avmv = averager( mv, axis=axes_string )\n avmv.id = vid\n avmv.units = mv.units\n\n return avmv",
"def reduce2scalar_zonal_old( mv, latmin=-90, latmax=90, vid=None ):\n # For now, I'm assuming that the only axes are time,lat,lon - so that zm is a scalar.\n # And I'm assuming equal spacing in lon (so all longitudes contribute equally to the average)\n # If they aren't, it's best to use area from cell_measures attribute if available; otherwise\n # compute it with lat_bnds, lon_bnds etc.\n if vid==None:\n vid = 'reduced_'+mv.id\n time,lat,lon = tllAxes(mv)\n if hasattr(mv.parent,'variables'):\n fil = mv.parent # mv is a fileVariable and fil is a file.\n lat_bnds = fil[lat.bounds]\n else:\n lataxis = latAxis(mv) # mv is a TransientVariable\n lat_bnds = lataxis._bounds_\n\n mvta = timeave_old( mv )\n\n # In computing the average, we use area weighting.\n # Sometimes the area is available in cell_measures, but for now I'll just use the backup method:\n # The area of a lonlat cell is R^2*delta(lon)*delta(sin(lat)).\n # With equally spaced lon, we don't need delta(lon) for weights.\n # I'll assume that lat,lon are in degrees, which is the only way I've ever seen them.\n wgtsum = 0\n zm = 0\n for i,lati in enumerate(lat):\n # The following test could be sped up a lot, because lat[i] is ordered...\n # >>> to do: partial overlaps\n if latmin<=lati and lati<latmax:\n latlo = lat_bnds[i,0]\n lathi = lat_bnds[i,1]\n wgti = sin(radians(lathi))-sin(radians(latlo))\n zi = 0.0\n for j in range(len(lon)):\n zi += mvta[i,j]\n zi *= wgti\n wgtsum += wgti*len(lon)\n zm += zi\n zm /= wgtsum\n # zm is a scalar, so createVariable gets no axes argument:\n zmv = cdms2.createVariable( zm, id=vid )\n return zmv",
"def reduce2levlat( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n if levAxis(mv) is None: return None\n if latAxis(mv) is None: return None\n axes = allAxes( mv )\n timeax = timeAxis(mv)\n if timeax.getBounds()==None:\n timeax._bounds_ = timeax.genGenericBounds()\n axis_names = [ a.id for a in axes if a.id!='lev' and a.id!='lat' ]\n axes_string = '('+')('.join(axis_names)+')'\n\n avmv = averager( mv, axis=axes_string )\n avmv.id = vid\n avmv.units = mv.units\n\n return avmv",
"def reduce2scalar_zonal( mv, latmin=-90, latmax=90, vid=None ):\n if vid==None:\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n ilat = None\n for i,ax in enumerate(axes):\n if ax.id=='lat': ilat = i\n # reduce size of lat axis to (latmin,latmax)\n # Let's home a direct search will be fast enough:\n lataxis = latAxis( mv )\n lmin = -1\n lmax = len(lataxis)\n if lataxis[0]>=latmin: lmin = 0\n if lataxis[-1]<=latmax: lmax = len(lataxis)-1\n if lmin==-1 or lmax==len(lataxis):\n for l,ax in enumerate(lataxis):\n if lmin==-1 and ax>=latmin: lmin = max( 0, l )\n if lmax==len(lataxis) and ax>=latmax: lmax = min( l, len(lataxis) )\n lataxis_shrunk = lataxis.subaxis(lmin,lmax)\n mv2shape = list(mv.shape)\n mv2shape[ilat] = lmax-lmin+1\n axes[ilat] = lataxis_shrunk\n mvd1 = numpy.delete( mv, slice(0,lmin), ilat )\n mvdata = numpy.delete( mvd1, slice(lmax-lmin,len(lataxis)-lmin), ilat )\n mv2 = cdms2.createVariable( mvdata, axes=axes )\n\n axis_names = [ a.id for a in axes ]\n axes_string = '('+')('.join(axis_names)+')'\n avmv = averager( mv2, axis=axes_string )\n avmv.id = vid # Note that the averager function returns a variable with meaningless id.\n ammv.units = mv.units\n\n return avmv",
"def reduce2latlon_seasonal( mv, seasons=seasonsyr, vid=None ):\n # This differs from reduce2lat_seasonal only in the line \"axis_names =\"....\n # I need to think about how to structure the code so there's less cut-and-paste!\n if vid==None:\n vid = 'reduced_'+mv.id\n # Note that the averager function returns a variable with meaningless id.\n # The climatology function returns the same id as mv, which we also don't want.\n\n # The slicers in time.py require getBounds() to work.\n # If it doesn't, we'll have to give it one.\n # Setting the _bounds_ attribute will do it.\n timeax = timeAxis(mv)\n if timeax.getBounds()==None:\n timeax._bounds_ = timeax.genGenericBounds()\n mvseas = seasons.climatology(mv)\n \n axes = allAxes( mv )\n axis_names = [ a.id for a in axes if a.id!='lat' and a.id!='lon' and a.id!='time']\n axes_string = '('+')('.join(axis_names)+')'\n\n if len(axes_string)>2:\n for axis in mvseas.getAxisList():\n if axis.getBounds() is None:\n axis._bounds_ = axis.genGenericBounds()\n avmv = averager( mvseas, axis=axes_string )\n else:\n avmv = mvseas\n if avmv is None: return avmv\n avmv.id = vid\n if hasattr(mv,'units'): avmv.units = mv.units\n avmv = delete_singleton_axis( avmv, vid='time' )\n avmv.units = mv.units\n return avmv",
"def gavg(idata):\n\t\n\twgt1=np.cos(np.deg2rad(idata.lat))*(idata*0+1)\n\tga=(wgt1*idata).sum(dim=['lat','lon'])/wgt1.sum(dim=['lat','lon'])\n\n\treturn ga",
"def geo_m_v2(data_array):\n r = 6378.137 #promien ziemi w km\n delta = np.zeros(data_array.size//7-1)\n alo = data_array[0][1]\n ala = data_array[0][2]\n count = 0\n for row in data_array[1:]:\n dLat = (row[2] - ala) * math.pi/180.0\n dLon = (row[1] - alo) * math.pi/180.0\n a = math.sin(dLat/2.0)**2 + math.cos(ala * math.pi/180.0) * math.cos(row[2] * math.pi/180.0)\\\n * math.sin(dLon/2.0)**2\n delta[count] = r * 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))#w km\n count += 1\n alo = row[1]\n ala = row[2]\n return delta",
"def reduce2levlat_seasonal( mv, seasons=seasonsyr, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n if levAxis(mv) is None: return None\n if latAxis(mv) is None: return None\n axes = allAxes( mv )\n timeax = timeAxis(mv)\n if timeax.getBounds()==None:\n timeax._bounds_ = timeax.genGenericBounds()\n\n if timeax.units=='months':\n # Special check necessary for LEGATES obs data, because\n # climatology() won't accept this incomplete specification\n timeax.units = 'months since 0001-01-01'\n mvseas = seasons.climatology(mv)\n\n axis_names = [ a.id for a in axes if a.id!='lev' and a.id!='lat' and a.id!='time']\n axes_string = '('+')('.join(axis_names)+')'\n\n if len(axes_string)>2:\n avmv = averager( mvseas, axis=axes_string )\n else:\n avmv = mvseas\n avmv.id = vid\n avmv = delete_singleton_axis( avmv, vid='time' )\n avmv.units = mv.units\n\n return avmv",
"def coarsen_byavg(invar,lat,lon,deg,tol,latweight=True,verbose=True,ignorenan=False):\n\n # Make new Arrays\n lon5 = np.arange(0,360+deg,deg)\n lat5 = np.arange(-90,90+deg,deg)\n \n \n # Set up latitude weights\n if latweight:\n _,Y = np.meshgrid(lon,lat)\n wgt = np.cos(np.radians(Y)) # [lat x lon]\n invar *= wgt[None,:,:] # Multiply by latitude weight\n \n # Get time dimension and preallocate\n nt = invar.shape[0]\n outvar = np.zeros((nt,len(lat5),len(lon5)))\n \n # Loop and regrid\n i=0\n for o in range(len(lon5)):\n for a in range(len(lat5)):\n lonf = lon5[o]\n latf = lat5[a]\n \n lons = np.where((lon >= lonf-tol) & (lon <= lonf+tol))[0]\n lats = np.where((lat >= latf-tol) & (lat <= latf+tol))[0]\n \n varf = invar[:,lats[:,None],lons[None,:]]\n \n if latweight:\n wgtbox = wgt[lats[:,None],lons[None,:]]\n if ignorenan:\n varf = np.nansum(varf/np.nansum(wgtbox,(0,1)),(1,2)) # Divide by the total weight for the box\n else:\n varf = np.sum(varf/np.sum(wgtbox,(0,1)),(1,2)) # Divide by the total weight for the box\n \n \n else:\n if ignorenan: \n varf = np.nanmean(varf,axis=(1,2))\n else:\n varf = varf.mean((1,2))\n \n outvar[:,a,o] = varf.copy()\n i+= 1\n msg=\"\\rCompleted %i of %i\"% (i,len(lon5)*len(lat5))\n print(msg,end=\"\\r\",flush=True)\n return outvar,lat5,lon5",
"def coldaverage( names):\n\n rs = radioastronomy.Spectrum() # create input and average structures\n avenames = names # create an output list to average\n\n# assume only a limited range of galactic latitudes are available\n# not range above +/-60.\n use60Range = False\n minGlat = 90. # initialize to extremea\n maxGlat = -90.\n maxEl = -90.\n minEl = 90.\n ncold = 0\n\n # for all input files\n for filename in names:\n\n parts = filename.split('/')\n nparts = len(parts)\n if nparts == 1:\n aname = parts[0]\n else:\n aname = parts[nparts-1]\n\n parts = aname.split('.')\n nparts = len(parts)\n if nparts < 2:\n print 'File is not an astronomy file: ',filename\n continue\n else:\n extension = parts[nparts-1]\n\n extension = extension.upper()\n if extension != 'AST': # speed up by only looking at astronomy files\n continue\n \n rs.read_spec_ast(filename) # An observation, read values\n\n if rs.telel < 0: # only working with observations, skip elevation <= 0.\n continue\n\n maxGlat = max( rs.gallat, maxGlat)\n minGlat = min( rs.gallat, minGlat)\n maxEl = max( rs.telel, maxEl)\n minEl = min( rs.telel, minEl)\n # end for all files loop, looking for max el and latitude ranges\n\n # if any high galactic latitudes, use only above +/-60d \n if minGlat < -60. or maxGlat > 60.:\n minGlat = -60.\n maxGlat = 60.\n else: # else no high galactic latitude data\n # use highest galactic latitudes - +/-5.degrees\n if -minGlat > maxGlat: # if negative latitudes higher\n minGlat = minGlat + 5.\n maxGlat = 90.\n else: # else positive latitudes higher\n maxGlat = maxGlat - 5.\n minGlat = -90.\n\n # only use the elevations above 60 degrees, if any\n if maxEl > 60.:\n maxEl = 60.\n else:\n maxEl = maxEl - 10. #else must use highest elevations available\n\n # now average coldest data for calibration\n for filename in names:\n\n rs.read_spec_ast(filename)\n rs.azel2radec() # compute ra,dec from az,el\n\n if rs.telel < maxEl:\n continue\n\n if rs.gallat > maxGlat or rs.gallat < minGlat:\n avenames[ncold] = filename\n ncold = ncold + 1\n # end of for all files loop\n\n ncold, cold = average( avenames[0:ncold]) # now use generic program for averages\n if ncold < 1:\n print 'No Cold load files; can not calibrate!'\n exit()\n\n return ncold, cold, minEl, maxEl",
"def reduce2lat_seasonal( mv, seasons=seasonsyr, vid=None ):\n if vid==None:\n vid = 'reduced_'+mv.id\n # Note that the averager function returns a variable with meaningless id.\n # The climatology function returns the same id as mv, which we also don't want.\n\n # The slicers in time.py require getBounds() to work.\n # If it doesn't, we'll have to give it one.\n # Setting the _bounds_ attribute will do it.\n for ax in mv.getAxisList():\n if ax.getBounds() is None:\n ax._bounds_ = ax.genGenericBounds()\n timeax = timeAxis(mv)\n if timeax.units=='months':\n # Special check necessary for LEGATES obs data, because\n # climatology() won't accept this incomplete specification\n timeax.units = 'months since 0001-01-01'\n mvseas = seasons.climatology(mv)\n if mvseas is None:\n # Among other cases, this can happen if mv has all missing values.\n return None\n \n axes = allAxes( mv )\n axis_names = [ a.id for a in axes if a.id!='lat' and a.id!='time']\n axes_string = '('+')('.join(axis_names)+')'\n\n if len(axes_string)>2:\n avmv = averager( mvseas, axis=axes_string )\n else:\n avmv = mvseas\n avmv.id = vid\n\n avmv = delete_singleton_axis( avmv, vid='time' )\n avmv.units = mv.units\n return avmv",
"def _sum_over_lat_lon(arr):\n return arr.sum(internal_names.LAT_STR).sum(internal_names.LON_STR)",
"def geo_transform(self):\n pass",
"def fAvg(H, r):\n p = r[['start_lat', 'start_lng']]\n p.columns = ['lat', 'lng']\n d = r[['end_lat', 'end_lng']]\n d.columns = ['lat', 'lng']\n\n return f(H, p, d).sum()",
"def geo_m_accumulate(data_array):\n dst = DataOperation.geo_m(data_array)\n sum = 0\n count = 0\n data = np.zeros(dst.size)\n for d in dst:\n sum += d\n data[count] = sum\n count += 1\n return data",
"def avg_equivlat(in_field, pv_field, n_lon, n_lat):\n # constants\n PI = np.pi\n\n # grid characteristics\n n_grid = int(n_lon)*int(n_lat)\n phi = PI/n_lat\n phih = 0.5*PI - phi*np.arange(n_lat+1)\n\n area_field = np.zeros([n_lon, n_lat])\n for j in range(n_lat):\n area_field[:, j] = 2*PI*(np.sin(phih[j]) - np.sin(phih[j+1]))/n_lon\n\n # reorder the fields\n ord_ind = np.argsort(pv_field, axis=None)[::-1]\n infield_ordered = in_field.flatten()[ord_ind]\n pv_ordered = pv_field.flatten()[ord_ind]\n area_ordered = area_field.flatten()[ord_ind]\n\n # areas of equivalent latitude bands for output\n # sum area along latitude bands\n area_band = np.sum(area_field, axis = 0)\n infield_eq = np.zeros(n_lat)\n\n ll = 0\n area_now = 0.0\n infield_tot = 0.0\n\n # loop to average in equivalent latitude bands\n for nn in range(n_grid):\n area_now += area_ordered[nn]\n infield_tot += area_ordered[nn]*infield_ordered[nn]\n if (area_now >= area_band[ll] or (nn == n_grid-1)):\n infield_tot -= (area_now - area_band[ll])*infield_ordered[nn]\n infield_eq[ll] = infield_tot/area_band[ll]\n infield_tot = (area_now - area_band[ll])*infield_ordered[nn]\n area_now -= area_band[ll]\n ll += 1\n \n # in field is averaged along eq. latitude bands from 90N - 90S\n # legacy from times when we were mostly interested in NH \n lat = PI/2 - np.arange(n_lat)*phi \n return (lat, infield_eq)",
"def _get_lat_avg(self, report):\n match = re.search(\"\\s*lat\\s*\\((\\w+)\\).*avg\\=\\s*(\\d+\\.{0,1}\\d*)\",\n report)\n if match:\n unit = match.group(1)\n value = float(match.group(2))\n if unit.lower() == \"usec\":\n value = value / 1000\n return value",
"def weighted_loc2(df, lat, lon, by, val2avg):\n import pandas as pd\n import swifter\n df_use = df.loc[:, [(lat), (lon), (by), val2avg]]\n df_use.loc[:, 'lat_wt'] = df_use.swifter.apply(lambda y: y[lat] * y[val2avg], axis=1).copy()\n df_use.loc[:, 'lon_wt'] = df_use.swifter.apply(lambda y: y[lon] * y[val2avg], axis=1).copy()\n\n sumwts = pd.DataFrame(df_use.copy().groupby(str(by)).apply(lambda y: sum_values(y[str(val2avg)])), columns={'totwts'})\n sumwts.loc[:, 'min_reads'] = sumwts.copy().index\n sumwts = sumwts.reset_index(drop=True).rename(columns={\"min_reads\": str(by)})\n totlats = pd.DataFrame(df_use.groupby(str(by)).apply(lambda y: sum_values(y['lat_wt'])), columns=['totlats'])\n totlats['min_reads'] = totlats.index.copy()\n totlats = totlats.reset_index(drop=True)\n totlats = totlats.rename(columns={\"min_reads\": str(by)})\n totlons = pd.DataFrame(df_use.groupby(str(by)).apply(lambda y: sum_values(y['lon_wt'])), columns=['totlons'])\n totlons['min_reads'] = totlons.index.copy()\n totlons = totlons.reset_index(drop=True)\n totlons = totlons.rename(columns={\"min_reads\": str(by)})\n df_use = pd.merge(totlats, df_use, on=str(by))\n df_use = pd.merge(totlons, df_use, on=str(by))\n df_use = pd.merge(sumwts, df_use, on=str(by))\n df_use.loc[:, 'overall_LON'] = df_use.swifter.apply(lambda y: y['totlons'] / y['totwts'], axis=1)\n df_use.loc[:, 'overall_LAT'] = df_use.swifter.apply(lambda y: y['totlats'] / y['totwts'], axis=1)\n return (df_use.loc[:, [(str(by)), ('overall_LON'), ('overall_LAT')]].drop_duplicates().rename(\n columns={'overall_LON': str(lon), 'overall_LAT': str(lat)}))",
"def winter_avg(var_nc,lat_slice=None,lon_slice=None): \n #\n # accumulate in shape [plev,lat,lon]\n #\n # use the whole array if slice objects are missing\n #\n if (lat_slice is None) and (lon_slice is None):\n num_lats=var_nc.shape[2]\n num_lons=var_nc.shape[3]\n lat_slice=slice(0,num_lats)\n lon_slice=slice(0,num_lons)\n print \"in winter avg: \",lat_slice,lon_slice\n else:\n num_lats=lat_slice.stop - lat_slice.start\n num_lons=lon_slice.stop - lon_slice.start\n num_levs=var_nc.shape[1]\n accumulate=ma.zeros([num_levs,num_lats,num_lons],dtype=var_nc.dtype)\n #\n # year 0 is special case since it doesn't have a december\n #\n djf0=np.array([0,1],dtype=np.int32) #january and feburary\n the_slice=var_nc[djf0,:,lat_slice,lon_slice]\n the_slice=ma.mean(the_slice,axis=0) #average over the two months\n accumulate+=the_slice\n num_years=var_nc.shape[0]//12\n #\n # now year 1 has year 0's december\n #\n djf=np.array([11,12,13],dtype=np.int32)\n #\n # iterate one year less because we've alread\n # done year zero as a special case\n #\n for the_year in np.arange(0,num_years-1):\n the_slice=var_nc[djf,:,lat_slice,lon_slice]\n the_slice=ma.mean(the_slice,axis=0)\n accumulate+=the_slice\n djf=djf+12\n accumulate=accumulate/num_years \n the_avg=ma.mean(accumulate,axis=1)\n the_avg=ma.mean(the_avg,axis=1)\n return the_avg",
"def geo_m(data_array):\n earth_r = 12756.490 #srednica Ziemi na rowniku [km]\n delta = np.zeros(data_array.size//7-1)\n alo = data_array[0][1]\n ala = data_array[0][2]\n count = 0\n for row in data_array[1:]:\n a = (row[1] - alo) * math.cos(ala*math.pi/180.0)\n b = (row[2] - ala)\n delta[count] = math.sqrt(a*a + b*b)*math.pi*earth_r/36.0*100# wynik w m\n count += 1\n alo = row[1]\n ala = row[2]\n return delta",
"def aggregate(map_to_latent: str, latent_features: Union[GVec, torch.Tensor]) -> GVec:\n if map_to_latent.lower() == \"sum\":\n return GVec(\n {\n weight: torch.sum(value, dim=-3, keepdim=True).unsqueeze(dim=-3)\n for weight, value in latent_features.items()\n }\n )\n\n elif map_to_latent.lower() in (\"mean\", \"average\"):\n return GVec(\n {\n weight: torch.mean(value, dim=-3, keepdim=True)\n for weight, value in latent_features.items()\n }\n )\n\n elif map_to_latent.lower() == \"max\":\n p4 = latent_features[(1, 1)]\n return GVec(\n {\n weight: get_max_features(value)\n for weight, value in latent_features.items()\n }\n )\n\n elif map_to_latent.lower() == \"min\":\n p4 = latent_features[(1, 1)]\n return GVec(\n {\n weight: get_min_features(value)\n for weight, value in latent_features.items()\n }\n )\n\n elif map_to_latent.lower() == \"mix\": # will be processed in the next step\n return latent_features\n\n # simply add different latent features\n # TODO: learnable parameters based on Lorentz scalars\n elif \"+\" in map_to_latent.lower():\n if \"mix\" in map_to_latent.lower():\n raise NotImplementedError(\n \"Adding with mix aggregation not implemented yet.\"\n )\n methods = map_to_latent.split(\"+\")\n if len(methods) < 1:\n raise ValueError(f\"No aggregation method specified: {map_to_latent}.\")\n weights = latent_features.keys()\n features = [aggregate(method, latent_features) for method in methods]\n\n return GVec(\n {\n weight: sum([feature[weight] for feature in features]) / len(methods)\n for weight in weights\n }\n )\n\n elif \"&\" in map_to_latent:\n if \"mix\" in map_to_latent.lower():\n raise NotImplementedError(\n \"Concatenating with mix aggregation not implemented yet.\"\n )\n methods = map_to_latent.split(\"&\")\n if len(methods) < 1:\n raise ValueError(f\"No aggregation method specified: {map_to_latent}.\")\n weights = latent_features.keys()\n features = [aggregate(method, latent_features) for method in methods]\n return GVec(\n {\n weight: torch.cat([feature[weight] for feature in features], dim=3)\n for weight in weights\n }\n )\n\n else:\n raise NotImplementedError(f\"{map_to_latent} is not implemented.\")",
"def restrict_lat( mv, latmin, latmax ):\n if latmin==-90: latmin = -91 # just to make sure\n if latmax==90: latmax = 91\n\n # axes\n latax,idx = latAxis2(mv)\n if latax is None: return None\n imin = min( [i for i in range(len(latax)) if latax[i]>=latmin and latax[i]<=latmax ] )\n imax = max( [i for i in range(len(latax)) if latax[i]>=latmin and latax[i]<=latmax ] )\n newlatax = latax.subaxis( imin, imax+1 )\n # TO DO: use latax.bounds (if present) for newlatax.bounds\n # At the moment, I'm working with data for which latax.bounds doesn't exist.\n # At the moment, we don't need bounds. This would get us through if necessary:\n # newlatax.bounds = newlatax.genGenericBounds()\n newaxes = list( allAxes(mv) ) # shallow copy\n newaxes[idx] = newlatax\n\n # shrink the data to match the shrunk lat axis\n newmv_shape = list( mv.shape )\n newmv_shape[idx] = imax+1 - imin\n if imin>0:\n nd = numpy.delete( mv.data, slice(0,imin), idx ) # doesn't change mv\n else:\n nd = mv\n lenidx = nd.shape[idx]\n if lenidx > newmv_shape[idx]:\n newdata = numpy.delete( nd.data, slice(imax+1-imin,lenidx), idx )\n else:\n newdata = nd\n\n # new variable\n newmv = cdms2.createVariable( newdata, copy=True, axes=newaxes, id=mv.id )\n newmv.units = mv.units\n return newmv",
"def _maping(x,y,l,a):\n newx = (x**2 *(l* ((x**2 + y**2)**(a/2) - 1) + 2) - l * y**2 *((x**2 + y**2)**(a/2) - 1))/(x**2 + y**2) \n newy = (2 * x* y *(l* ((x**2 + y**2)**(a/2) - 1) + 1))/(x**2 + y**2)\n return newx, newy",
"def _compute_mean(index, M, R, rake):\r\n mean = (a1[index] + _compute_linear_magnitude_term(index, M) + _compute_quadratic_magnitude_term(index, M) +\r\n _compute_logarithmic_distance_term(index, M, R) + _compute_faulting_style_term(index, rake))\r\n\r\n return mean",
"def _compute_mean(index, M, R, rake):\r\n mean = (a1[index] + _compute_linear_magnitude_term(index, M) + _compute_quadratic_magnitude_term(index, M) +\r\n _compute_logarithmic_distance_term(index, M, R) + _compute_faulting_style_term(index, rake))\r\n\r\n return mean",
"def extract_loc(ref_lon, ref_lat, tlon, tlat, var):\n\n if var.ndim == 3: # 3D variable\n zmax, imax, jmax = var.shape\n threeD = True\n elif var.ndim == 2: # 2D variable\n imax, jmax = var.shape\n threeD = False\n else:\n print 'extract_loc: check variable dimensions'\n return\n\n # find the indices of the 4 model grid points around the location\n Ilist, Jlist = find_stn_idx(ref_lon, ref_lat, tlon, tlat)\n\n # compute great circle distance from location to model grid points\n dist = gc_dist(ref_lon, ref_lat, tlon, tlat)\n dist[dist==0] = 1.e-15 # avoid division by zero\n\n # arrays to store weights and data to be averaged\n if threeD: # 3D variable\n wghts = MA.zeros((zmax,len(Ilist)*len(Jlist)),float)\n data = MA.zeros((zmax,len(Ilist)*len(Jlist)),float)\n if MA.isMA(var): # mask weights\n dist_m = MA.array(N.resize(dist,var.shape),mask=var.mask)\n else:\n dist_m = N.array(N.resize(dist,var.shape))\n else: # 2D variable\n wghts = MA.zeros((len(Ilist)*len(Jlist)),float)\n data = MA.zeros((len(Ilist)*len(Jlist)),float)\n if MA.isMA(var):\n dist_m = MA.array(dist,mask=var.mask) # mask weights\n else:\n dist_m = N.array(dist)\n\n # get the 4 model grid points and compute weights\n n = 0\n for i in Ilist:\n for j in Jlist:\n wghts[...,n] = 1./dist_m[...,i,j]\n data[...,n] = var[...,i,j]\n n += 1\n\n # compute weighted average\n wavg = MA.average(data,axis=-1,weights=wghts)\n return wavg",
"def geo_average(self, returns):\r\n return (1 + returns).prod() ** (self.day / len(returns)) - 1",
"def make_average(self, arr):\n\n if not self.degen:\n self.get_degen()\n\n nkpt, nband = arr.shape[-2:]\n \n for ikpt in range(nkpt):\n for group in self.degen[ikpt]:\n average = copy(arr[...,ikpt,group[0][1]])\n for ispin, iband in group[1:]:\n average += arr[...,ikpt,iband]\n \n average /= len(group)\n for ispin, iband in group:\n arr[...,ikpt,iband] = average\n \n return arr",
"def zonal_avg(data,Log=False):\n print 'computing zonal average'\n # get lat and lon for new regular grid\n# fpin = Nio.open_file('/home/ivan/Python/data/lat_t.nc','r')\n fpin = Nio.open_file('/home/emunoz/Python/mapping/model_grid/lat_t.nc','r')\n lat_t = fpin.variables['lat_t'][:]\n lat_t_edges = fpin.variables['lat_t_edges'][:]\n fpin.close()\n# fpin = Nio.open_file('/home/ivan/Python/data/gx3v5.nc','r')\n fpin = Nio.open_file('/home/emunoz/Python/mapping/model_grid/gx3v5.nc','r')\n lon_t = N.sort(fpin.variables['TLONG'][0,:])\n ulon = N.sort(fpin.variables['ULONG'][0,:])\n lon_t_edges = N.concatenate((ulon,ulon[0,N.newaxis]+360.),0)\n # get gx3v5 lat and lon\n tlon = fpin.variables['TLONG'][:]\n tlat = fpin.variables['TLAT'][:]\n fpin.close()\n\n # compute area of cells in new regular grid\n area = grid_area(lon_t_edges,lat_t_edges)\n\n nlat = lat_t.shape[0]\n nlon = lon_t.shape[0]\n\n if data.ndim == 3:\n new_data = MA.zeros((data.shape[0],nlat,nlon),dtype=float)\n elif data.ndim == 2:\n new_data = MA.zeros((nlat,nlon),dtype=float)\n else:\n print 'Check field dimensions'\n sys.exit()\n\n # geometric mean?\n if Log:\n work = MA.log(data)\n else:\n work = data\n\n # remap data to new regular grid\n for i in range(nlat):\n #print 'lat = %.2f'%(lat_t[i])\n for j in range(nlon):\n new_data[:,i,j] = extract_loc(lon_t[j],lat_t[i],tlon,tlat,work)\n\n # compute zonal average\n if Log:\n za_data = (MA.exp(MA.average(new_data,axis=-1,\n weights=N.resize(area,new_data.shape))))\n else:\n za_data = (MA.average(new_data,axis=-1,\n weights=N.resize(area,new_data.shape)))\n\n return za_data, lat_t"
] | [
"0.71746695",
"0.7159011",
"0.693063",
"0.6655468",
"0.6433842",
"0.60125995",
"0.59771603",
"0.5941421",
"0.5817719",
"0.57721925",
"0.57491624",
"0.57213616",
"0.5685925",
"0.5673032",
"0.559728",
"0.5517831",
"0.544977",
"0.5412728",
"0.53823274",
"0.53628594",
"0.5176197",
"0.51366454",
"0.51361156",
"0.5086621",
"0.5076991",
"0.5076991",
"0.50383455",
"0.502188",
"0.5001558",
"0.4986504"
] | 0.7389955 | 0 |
as reduce2lat, but averaging reduces coordinates to (lev,lat) | def reduce2levlat( mv, vid=None ):
if vid==None: # Note that the averager function returns a variable with meaningless id.
vid = 'reduced_'+mv.id
if levAxis(mv) is None: return None
if latAxis(mv) is None: return None
axes = allAxes( mv )
timeax = timeAxis(mv)
if timeax.getBounds()==None:
timeax._bounds_ = timeax.genGenericBounds()
axis_names = [ a.id for a in axes if a.id!='lev' and a.id!='lat' ]
axes_string = '('+')('.join(axis_names)+')'
avmv = averager( mv, axis=axes_string )
avmv.id = vid
avmv.units = mv.units
return avmv | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reduce2lat( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n axis_names = [ a.id for a in axes if a.id!='lat' ]\n axes_string = '('+')('.join(axis_names)+')'\n\n avmv = averager( mv, axis=axes_string )\n avmv.id = vid\n avmv.units = mv.units\n\n return avmv",
"def gavg(idata):\n\t\n\twgt1=np.cos(np.deg2rad(idata.lat))*(idata*0+1)\n\tga=(wgt1*idata).sum(dim=['lat','lon'])/wgt1.sum(dim=['lat','lon'])\n\n\treturn ga",
"def reduce2latlon( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n axis_names = [ a.id for a in axes if a.id!='lat' and a.id!='lon' ]\n axes_string = '('+')('.join(axis_names)+')'\n for ax in axes:\n # The averager insists on bounds. Sometimes they don't exist, especially for obs.\n if ax.id!='lat' and ax.id!='lon' and not hasattr( ax, 'bounds' ):\n ax.setBounds( ax.genGenericBounds() )\n\n avmv = averager( mv, axis=axes_string )\n avmv.id = vid\n avmv.units = mv.units\n\n return avmv",
"def reduce2lat_old( mv, vid=None ):\n # >>> For now, I'm assuming that the only axes are time,lat,lon\n # And I'm assuming equal spacing in lon (so all longitudes contribute equally to the average)\n # If they aren't, it's best to use area from cell_measures attribute if available; otherwise\n # compute it with lat_bnds, lon_bnds etc.\n # If I base another reduction function on this one, it's important to note that an average\n # in the lat direction will unavoidably need weights, because of the geometry.\n\n if vid==None:\n vid = 'reduced_'+mv.id\n time_axis, lat_axis, lon_axis = tllAxes( mv )\n\n mvta = timeave_old( mv )\n\n zm = numpy.zeros( mvta.shape[0] )\n for i in range(len(lat_axis)):\n for j in range(len(lon_axis)):\n zm[i] += mvta[i,j]\n zm[i] /= len(lon_axis)\n zmv = cdms2.createVariable( zm, axes=[lat_axis], id=vid )\n return zmv",
"def _get_lat_avg(self, report):\n match = re.search(\"\\s*lat\\s*\\((\\w+)\\).*avg\\=\\s*(\\d+\\.{0,1}\\d*)\",\n report)\n if match:\n unit = match.group(1)\n value = float(match.group(2))\n if unit.lower() == \"usec\":\n value = value / 1000\n return value",
"def _sum_over_lat_lon(arr):\n return arr.sum(internal_names.LAT_STR).sum(internal_names.LON_STR)",
"def reduce2scalar_zonal_old( mv, latmin=-90, latmax=90, vid=None ):\n # For now, I'm assuming that the only axes are time,lat,lon - so that zm is a scalar.\n # And I'm assuming equal spacing in lon (so all longitudes contribute equally to the average)\n # If they aren't, it's best to use area from cell_measures attribute if available; otherwise\n # compute it with lat_bnds, lon_bnds etc.\n if vid==None:\n vid = 'reduced_'+mv.id\n time,lat,lon = tllAxes(mv)\n if hasattr(mv.parent,'variables'):\n fil = mv.parent # mv is a fileVariable and fil is a file.\n lat_bnds = fil[lat.bounds]\n else:\n lataxis = latAxis(mv) # mv is a TransientVariable\n lat_bnds = lataxis._bounds_\n\n mvta = timeave_old( mv )\n\n # In computing the average, we use area weighting.\n # Sometimes the area is available in cell_measures, but for now I'll just use the backup method:\n # The area of a lonlat cell is R^2*delta(lon)*delta(sin(lat)).\n # With equally spaced lon, we don't need delta(lon) for weights.\n # I'll assume that lat,lon are in degrees, which is the only way I've ever seen them.\n wgtsum = 0\n zm = 0\n for i,lati in enumerate(lat):\n # The following test could be sped up a lot, because lat[i] is ordered...\n # >>> to do: partial overlaps\n if latmin<=lati and lati<latmax:\n latlo = lat_bnds[i,0]\n lathi = lat_bnds[i,1]\n wgti = sin(radians(lathi))-sin(radians(latlo))\n zi = 0.0\n for j in range(len(lon)):\n zi += mvta[i,j]\n zi *= wgti\n wgtsum += wgti*len(lon)\n zm += zi\n zm /= wgtsum\n # zm is a scalar, so createVariable gets no axes argument:\n zmv = cdms2.createVariable( zm, id=vid )\n return zmv",
"def fAvg(H, r):\n p = r[['start_lat', 'start_lng']]\n p.columns = ['lat', 'lng']\n d = r[['end_lat', 'end_lng']]\n d.columns = ['lat', 'lng']\n\n return f(H, p, d).sum()",
"def average_coords_nt(all_profile_nt: namedtuple) -> tuple:\n \"\"\"Param: all_profile_nt: Named tuple containing all profiles\"\"\"\n x, y = sum(map(lambda t: t[0], map(lambda v: v[4], all_profile_nt)))/len(all_profile_nt), sum(\n map(lambda t: t[1], map(lambda v: v[4], all_profile_nt)))/len(all_profile_nt)\n return x, y",
"def reduce2scalar_zonal( mv, latmin=-90, latmax=90, vid=None ):\n if vid==None:\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n ilat = None\n for i,ax in enumerate(axes):\n if ax.id=='lat': ilat = i\n # reduce size of lat axis to (latmin,latmax)\n # Let's home a direct search will be fast enough:\n lataxis = latAxis( mv )\n lmin = -1\n lmax = len(lataxis)\n if lataxis[0]>=latmin: lmin = 0\n if lataxis[-1]<=latmax: lmax = len(lataxis)-1\n if lmin==-1 or lmax==len(lataxis):\n for l,ax in enumerate(lataxis):\n if lmin==-1 and ax>=latmin: lmin = max( 0, l )\n if lmax==len(lataxis) and ax>=latmax: lmax = min( l, len(lataxis) )\n lataxis_shrunk = lataxis.subaxis(lmin,lmax)\n mv2shape = list(mv.shape)\n mv2shape[ilat] = lmax-lmin+1\n axes[ilat] = lataxis_shrunk\n mvd1 = numpy.delete( mv, slice(0,lmin), ilat )\n mvdata = numpy.delete( mvd1, slice(lmax-lmin,len(lataxis)-lmin), ilat )\n mv2 = cdms2.createVariable( mvdata, axes=axes )\n\n axis_names = [ a.id for a in axes ]\n axes_string = '('+')('.join(axis_names)+')'\n avmv = averager( mv2, axis=axes_string )\n avmv.id = vid # Note that the averager function returns a variable with meaningless id.\n ammv.units = mv.units\n\n return avmv",
"def average(coords):\n x = 0\n y = 0\n for coord in coords:\n x += coord[0]\n y += coord[1]\n count = len(coords)\n return (x/count, y/count)",
"def geo_average(self, returns):\r\n return (1 + returns).prod() ** (self.day / len(returns)) - 1",
"def geo_m_v2(data_array):\n r = 6378.137 #promien ziemi w km\n delta = np.zeros(data_array.size//7-1)\n alo = data_array[0][1]\n ala = data_array[0][2]\n count = 0\n for row in data_array[1:]:\n dLat = (row[2] - ala) * math.pi/180.0\n dLon = (row[1] - alo) * math.pi/180.0\n a = math.sin(dLat/2.0)**2 + math.cos(ala * math.pi/180.0) * math.cos(row[2] * math.pi/180.0)\\\n * math.sin(dLon/2.0)**2\n delta[count] = r * 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))#w km\n count += 1\n alo = row[1]\n ala = row[2]\n return delta",
"def meanHaversineDistance(lat_sub, lon_sub, lat_real, lon_real):\n return np.mean(HaversineDistance(lat_sub, lon_sub, lat_real, lon_real))",
"def coarsen_byavg(invar,lat,lon,deg,tol,latweight=True,verbose=True,ignorenan=False):\n\n # Make new Arrays\n lon5 = np.arange(0,360+deg,deg)\n lat5 = np.arange(-90,90+deg,deg)\n \n \n # Set up latitude weights\n if latweight:\n _,Y = np.meshgrid(lon,lat)\n wgt = np.cos(np.radians(Y)) # [lat x lon]\n invar *= wgt[None,:,:] # Multiply by latitude weight\n \n # Get time dimension and preallocate\n nt = invar.shape[0]\n outvar = np.zeros((nt,len(lat5),len(lon5)))\n \n # Loop and regrid\n i=0\n for o in range(len(lon5)):\n for a in range(len(lat5)):\n lonf = lon5[o]\n latf = lat5[a]\n \n lons = np.where((lon >= lonf-tol) & (lon <= lonf+tol))[0]\n lats = np.where((lat >= latf-tol) & (lat <= latf+tol))[0]\n \n varf = invar[:,lats[:,None],lons[None,:]]\n \n if latweight:\n wgtbox = wgt[lats[:,None],lons[None,:]]\n if ignorenan:\n varf = np.nansum(varf/np.nansum(wgtbox,(0,1)),(1,2)) # Divide by the total weight for the box\n else:\n varf = np.sum(varf/np.sum(wgtbox,(0,1)),(1,2)) # Divide by the total weight for the box\n \n \n else:\n if ignorenan: \n varf = np.nanmean(varf,axis=(1,2))\n else:\n varf = varf.mean((1,2))\n \n outvar[:,a,o] = varf.copy()\n i+= 1\n msg=\"\\rCompleted %i of %i\"% (i,len(lon5)*len(lat5))\n print(msg,end=\"\\r\",flush=True)\n return outvar,lat5,lon5",
"def average_coords_dc(all_profile_dict: dict) -> tuple:\n \"\"\"Param:all_profile_dc: dictionary containing all profiles\"\"\"\n x, y = sum(map(lambda t: t[0], map(lambda v: v['current_location'], all_profile_dict.values()))) / len(all_profile_dict.values(\n )), sum(map(lambda t: t[1], map(lambda v: v['current_location'], all_profile_dict.values()))) / len(all_profile_dict.values())\n return x, y",
"def reduce2levlat_seasonal( mv, seasons=seasonsyr, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n if levAxis(mv) is None: return None\n if latAxis(mv) is None: return None\n axes = allAxes( mv )\n timeax = timeAxis(mv)\n if timeax.getBounds()==None:\n timeax._bounds_ = timeax.genGenericBounds()\n\n if timeax.units=='months':\n # Special check necessary for LEGATES obs data, because\n # climatology() won't accept this incomplete specification\n timeax.units = 'months since 0001-01-01'\n mvseas = seasons.climatology(mv)\n\n axis_names = [ a.id for a in axes if a.id!='lev' and a.id!='lat' and a.id!='time']\n axes_string = '('+')('.join(axis_names)+')'\n\n if len(axes_string)>2:\n avmv = averager( mvseas, axis=axes_string )\n else:\n avmv = mvseas\n avmv.id = vid\n avmv = delete_singleton_axis( avmv, vid='time' )\n avmv.units = mv.units\n\n return avmv",
"def avg_equivlat(in_field, pv_field, n_lon, n_lat):\n # constants\n PI = np.pi\n\n # grid characteristics\n n_grid = int(n_lon)*int(n_lat)\n phi = PI/n_lat\n phih = 0.5*PI - phi*np.arange(n_lat+1)\n\n area_field = np.zeros([n_lon, n_lat])\n for j in range(n_lat):\n area_field[:, j] = 2*PI*(np.sin(phih[j]) - np.sin(phih[j+1]))/n_lon\n\n # reorder the fields\n ord_ind = np.argsort(pv_field, axis=None)[::-1]\n infield_ordered = in_field.flatten()[ord_ind]\n pv_ordered = pv_field.flatten()[ord_ind]\n area_ordered = area_field.flatten()[ord_ind]\n\n # areas of equivalent latitude bands for output\n # sum area along latitude bands\n area_band = np.sum(area_field, axis = 0)\n infield_eq = np.zeros(n_lat)\n\n ll = 0\n area_now = 0.0\n infield_tot = 0.0\n\n # loop to average in equivalent latitude bands\n for nn in range(n_grid):\n area_now += area_ordered[nn]\n infield_tot += area_ordered[nn]*infield_ordered[nn]\n if (area_now >= area_band[ll] or (nn == n_grid-1)):\n infield_tot -= (area_now - area_band[ll])*infield_ordered[nn]\n infield_eq[ll] = infield_tot/area_band[ll]\n infield_tot = (area_now - area_band[ll])*infield_ordered[nn]\n area_now -= area_band[ll]\n ll += 1\n \n # in field is averaged along eq. latitude bands from 90N - 90S\n # legacy from times when we were mostly interested in NH \n lat = PI/2 - np.arange(n_lat)*phi \n return (lat, infield_eq)",
"def calcApproxDist(lon1, lat1, lon2, lat2):\n\n import math\n from shapely.geometry import Point\n\n if lat1 == lat2 and lon1 == lon2:\n return 0.0\n\n point1 = Point(lon1,lat1)\n point2 = Point(lon2, lat2)\n\n return math.acos(math.sin(math.radians(point1.y))*math.sin(math.radians(point2.y))+math.cos(math.radians(\n point1.y))*math.cos(math.radians(point2.y))*math.cos(math.radians(point2.x)-math.radians(point1.x)))*6371",
"def find_slope(lat1,lon1,lat2,lon2):\n return (lon2-lon1)/(lat2-lat1)",
"def weighted_loc2(df, lat, lon, by, val2avg):\n import pandas as pd\n import swifter\n df_use = df.loc[:, [(lat), (lon), (by), val2avg]]\n df_use.loc[:, 'lat_wt'] = df_use.swifter.apply(lambda y: y[lat] * y[val2avg], axis=1).copy()\n df_use.loc[:, 'lon_wt'] = df_use.swifter.apply(lambda y: y[lon] * y[val2avg], axis=1).copy()\n\n sumwts = pd.DataFrame(df_use.copy().groupby(str(by)).apply(lambda y: sum_values(y[str(val2avg)])), columns={'totwts'})\n sumwts.loc[:, 'min_reads'] = sumwts.copy().index\n sumwts = sumwts.reset_index(drop=True).rename(columns={\"min_reads\": str(by)})\n totlats = pd.DataFrame(df_use.groupby(str(by)).apply(lambda y: sum_values(y['lat_wt'])), columns=['totlats'])\n totlats['min_reads'] = totlats.index.copy()\n totlats = totlats.reset_index(drop=True)\n totlats = totlats.rename(columns={\"min_reads\": str(by)})\n totlons = pd.DataFrame(df_use.groupby(str(by)).apply(lambda y: sum_values(y['lon_wt'])), columns=['totlons'])\n totlons['min_reads'] = totlons.index.copy()\n totlons = totlons.reset_index(drop=True)\n totlons = totlons.rename(columns={\"min_reads\": str(by)})\n df_use = pd.merge(totlats, df_use, on=str(by))\n df_use = pd.merge(totlons, df_use, on=str(by))\n df_use = pd.merge(sumwts, df_use, on=str(by))\n df_use.loc[:, 'overall_LON'] = df_use.swifter.apply(lambda y: y['totlons'] / y['totwts'], axis=1)\n df_use.loc[:, 'overall_LAT'] = df_use.swifter.apply(lambda y: y['totlats'] / y['totwts'], axis=1)\n return (df_use.loc[:, [(str(by)), ('overall_LON'), ('overall_LAT')]].drop_duplicates().rename(\n columns={'overall_LON': str(lon), 'overall_LAT': str(lat)}))",
"def geo_mean(num_list):\n np_array = np.array(num_list)\n return np_array.prod() ** (1.0 / len(np_array))",
"def mean_average_position():\n pass",
"def _compute_mean(index, M, R, rake):\r\n mean = (a1[index] + _compute_linear_magnitude_term(index, M) + _compute_quadratic_magnitude_term(index, M) +\r\n _compute_logarithmic_distance_term(index, M, R) + _compute_faulting_style_term(index, rake))\r\n\r\n return mean",
"def _compute_mean(index, M, R, rake):\r\n mean = (a1[index] + _compute_linear_magnitude_term(index, M) + _compute_quadratic_magnitude_term(index, M) +\r\n _compute_logarithmic_distance_term(index, M, R) + _compute_faulting_style_term(index, rake))\r\n\r\n return mean",
"def project(self, (lng, lat)):\n x = lng * DEG_TO_RAD\n lat = max(min(MAX_LATITUDE, lat), -MAX_LATITUDE)\n y = lat * DEG_TO_RAD\n y = math.log(math.tan((math.pi / 4) + (y / 2)))\n return (x*EARTH_RADIUS, y*EARTH_RADIUS)",
"def average_point(self, *points):\n length = len(points)\n sum_x = reduce(lambda total, point: total + point[0], points, 0)\n sum_y = reduce(lambda total, point: total + point[1], points, 0)\n return (sum_x/length, sum_y/length)",
"def geo_mean(array):\n logsum = sum([np.log(each) for each in array])/len(array)\n return np.exp(logsum)",
"def average_double_pts(elev_list, dist_list, minimum_point_distance=0.5):\n import numpy as np\n\n dist_array = np.array(dist_list)\n\n diffs = dist_array[1:] - dist_array[:-1]\n bad_pts = diffs < minimum_point_distance\n bad_indices = [i for i in range(len(bad_pts)) if bad_pts[i]]\n\n new_elev_array = np.array(elev_list)\n\n for i in bad_indices:\n mean_elevation = (elev_list[i] + elev_list[i+1])/2\n new_elev_array[i] = mean_elevation\n new_elev_array[i+1] = mean_elevation\n\n new_dist_array = dist_array[0:-1]\n new_dist_array = new_dist_array[np.logical_not(bad_pts)]\n\n new_elev_array = np.array(new_elev_array[0:-1], copy=True)\n new_elev_array = new_elev_array[np.logical_not(bad_pts)]\n\n new_dist_list = list(new_dist_array)\n new_dist_list.append(dist_list[-1])\n new_elev_list = list(new_elev_array)\n new_elev_list.append(elev_list[-1])\n\n return new_elev_list, new_dist_list",
"def getAltitudeFromLatLon(self, lat, lon):\r\n # print \"-----\\nFromLatLon\", lon, lat\r\n lat -= self.lat\r\n lon -= self.lon\r\n # print \"lon, lat\", lon, lat\r\n if lat < 0.0 or lat >= 1.0 or lon < 0.0 or lon >= 1.0:\r\n raise WrongTileError(self.lat, self.lon, self.lat+lat, self.lon+lon)\r\n x = lon * (self.size - 1)\r\n y = lat * (self.size - 1)\r\n # print \"x,y\", x, y\r\n x_int = int(x)\r\n x_frac = x - int(x)\r\n y_int = int(y)\r\n y_frac = y - int(y)\r\n # print \"frac\", x_int, x_frac, y_int, y_frac\r\n value00 = self.getPixelValue(x_int, y_int)\r\n value10 = self.getPixelValue(x_int+1, y_int)\r\n value01 = self.getPixelValue(x_int, y_int+1)\r\n value11 = self.getPixelValue(x_int+1, y_int+1)\r\n value1 = self._avg(value00, value10, x_frac)\r\n value2 = self._avg(value01, value11, x_frac)\r\n value = self._avg(value1, value2, y_frac)\r\n # print \"%4d %4d | %4d\\n%4d %4d | %4d\\n-------------\\n%4d\" % (\r\n # value00, value10, value1, value01, value11, value2, value)\r\n return value"
] | [
"0.69857925",
"0.6807853",
"0.6773293",
"0.6564294",
"0.6501866",
"0.64869905",
"0.6404557",
"0.6294769",
"0.59781444",
"0.595413",
"0.5908495",
"0.5811801",
"0.5792106",
"0.577163",
"0.57321805",
"0.5651219",
"0.55763006",
"0.5573147",
"0.5571079",
"0.5546693",
"0.55466807",
"0.5537763",
"0.5528725",
"0.54972893",
"0.54972893",
"0.54854965",
"0.5469255",
"0.54673415",
"0.54614747",
"0.54517657"
] | 0.6961548 | 1 |
as reduce2levlat, but data is averaged only for time restricted to the specified season; as in reduce2lat_seasona. | def reduce2levlat_seasonal( mv, seasons=seasonsyr, vid=None ):
if vid==None: # Note that the averager function returns a variable with meaningless id.
vid = 'reduced_'+mv.id
if levAxis(mv) is None: return None
if latAxis(mv) is None: return None
axes = allAxes( mv )
timeax = timeAxis(mv)
if timeax.getBounds()==None:
timeax._bounds_ = timeax.genGenericBounds()
if timeax.units=='months':
# Special check necessary for LEGATES obs data, because
# climatology() won't accept this incomplete specification
timeax.units = 'months since 0001-01-01'
mvseas = seasons.climatology(mv)
axis_names = [ a.id for a in axes if a.id!='lev' and a.id!='lat' and a.id!='time']
axes_string = '('+')('.join(axis_names)+')'
if len(axes_string)>2:
avmv = averager( mvseas, axis=axes_string )
else:
avmv = mvseas
avmv.id = vid
avmv = delete_singleton_axis( avmv, vid='time' )
avmv.units = mv.units
return avmv | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reduce2latlon_seasonal( mv, seasons=seasonsyr, vid=None ):\n # This differs from reduce2lat_seasonal only in the line \"axis_names =\"....\n # I need to think about how to structure the code so there's less cut-and-paste!\n if vid==None:\n vid = 'reduced_'+mv.id\n # Note that the averager function returns a variable with meaningless id.\n # The climatology function returns the same id as mv, which we also don't want.\n\n # The slicers in time.py require getBounds() to work.\n # If it doesn't, we'll have to give it one.\n # Setting the _bounds_ attribute will do it.\n timeax = timeAxis(mv)\n if timeax.getBounds()==None:\n timeax._bounds_ = timeax.genGenericBounds()\n mvseas = seasons.climatology(mv)\n \n axes = allAxes( mv )\n axis_names = [ a.id for a in axes if a.id!='lat' and a.id!='lon' and a.id!='time']\n axes_string = '('+')('.join(axis_names)+')'\n\n if len(axes_string)>2:\n for axis in mvseas.getAxisList():\n if axis.getBounds() is None:\n axis._bounds_ = axis.genGenericBounds()\n avmv = averager( mvseas, axis=axes_string )\n else:\n avmv = mvseas\n if avmv is None: return avmv\n avmv.id = vid\n if hasattr(mv,'units'): avmv.units = mv.units\n avmv = delete_singleton_axis( avmv, vid='time' )\n avmv.units = mv.units\n return avmv",
"def reduce2lat_seasonal( mv, seasons=seasonsyr, vid=None ):\n if vid==None:\n vid = 'reduced_'+mv.id\n # Note that the averager function returns a variable with meaningless id.\n # The climatology function returns the same id as mv, which we also don't want.\n\n # The slicers in time.py require getBounds() to work.\n # If it doesn't, we'll have to give it one.\n # Setting the _bounds_ attribute will do it.\n for ax in mv.getAxisList():\n if ax.getBounds() is None:\n ax._bounds_ = ax.genGenericBounds()\n timeax = timeAxis(mv)\n if timeax.units=='months':\n # Special check necessary for LEGATES obs data, because\n # climatology() won't accept this incomplete specification\n timeax.units = 'months since 0001-01-01'\n mvseas = seasons.climatology(mv)\n if mvseas is None:\n # Among other cases, this can happen if mv has all missing values.\n return None\n \n axes = allAxes( mv )\n axis_names = [ a.id for a in axes if a.id!='lat' and a.id!='time']\n axes_string = '('+')('.join(axis_names)+')'\n\n if len(axes_string)>2:\n avmv = averager( mvseas, axis=axes_string )\n else:\n avmv = mvseas\n avmv.id = vid\n\n avmv = delete_singleton_axis( avmv, vid='time' )\n avmv.units = mv.units\n return avmv",
"def seasonal_avg(var_nc,the_season,lat_slice=None,lon_slice=None): \n the_season=np.array(the_season,dtype=np.int32)\n if (lat_slice is None) and (lon_slice is None):\n num_lats=var_nc.shape[2]\n num_lons=var_nc.shape[3]\n lat_slice=slice(0,num_lats)\n lon_slice=slice(0,num_lons)\n else:\n if lat_slice.stop is None:\n num_lats=var_nc.shape[2]\n else:\n num_lats=lat_slice.stop - lat_slice.start\n if lon_slice.stop is None:\n num_lons=var_nc.shape[3]\n else:\n num_lons=lon_slice.stop - lon_slice.start\n num_levs=var_nc.shape[1]\n accumulate=ma.zeros([num_levs,num_lats,num_lons],dtype=var_nc.dtype)\n num_years=var_nc.shape[0]//12\n\n for the_year in np.arange(0,num_years):\n the_slice=var_nc[the_season,:,lat_slice,lon_slice]\n the_slice=ma.mean(the_slice,axis=0)\n accumulate+=the_slice\n the_season=the_season+12\n accumulate=accumulate/num_years \n the_avg=ma.mean(accumulate,axis=1)\n the_avg=ma.mean(the_avg,axis=1)\n return the_avg",
"def interpolateseasons(self):\n\n remainder = self.season - self.startseason\n f1 = 1.0 - remainder\n self.data = (self.startdata * f1) + (self.stopdata * remainder)",
"def reduce2levlat( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n if levAxis(mv) is None: return None\n if latAxis(mv) is None: return None\n axes = allAxes( mv )\n timeax = timeAxis(mv)\n if timeax.getBounds()==None:\n timeax._bounds_ = timeax.genGenericBounds()\n axis_names = [ a.id for a in axes if a.id!='lev' and a.id!='lat' ]\n axes_string = '('+')('.join(axis_names)+')'\n\n avmv = averager( mv, axis=axes_string )\n avmv.id = vid\n avmv.units = mv.units\n\n return avmv",
"def winter_avg(var_nc,lat_slice=None,lon_slice=None): \n #\n # accumulate in shape [plev,lat,lon]\n #\n # use the whole array if slice objects are missing\n #\n if (lat_slice is None) and (lon_slice is None):\n num_lats=var_nc.shape[2]\n num_lons=var_nc.shape[3]\n lat_slice=slice(0,num_lats)\n lon_slice=slice(0,num_lons)\n print \"in winter avg: \",lat_slice,lon_slice\n else:\n num_lats=lat_slice.stop - lat_slice.start\n num_lons=lon_slice.stop - lon_slice.start\n num_levs=var_nc.shape[1]\n accumulate=ma.zeros([num_levs,num_lats,num_lons],dtype=var_nc.dtype)\n #\n # year 0 is special case since it doesn't have a december\n #\n djf0=np.array([0,1],dtype=np.int32) #january and feburary\n the_slice=var_nc[djf0,:,lat_slice,lon_slice]\n the_slice=ma.mean(the_slice,axis=0) #average over the two months\n accumulate+=the_slice\n num_years=var_nc.shape[0]//12\n #\n # now year 1 has year 0's december\n #\n djf=np.array([11,12,13],dtype=np.int32)\n #\n # iterate one year less because we've alread\n # done year zero as a special case\n #\n for the_year in np.arange(0,num_years-1):\n the_slice=var_nc[djf,:,lat_slice,lon_slice]\n the_slice=ma.mean(the_slice,axis=0)\n accumulate+=the_slice\n djf=djf+12\n accumulate=accumulate/num_years \n the_avg=ma.mean(accumulate,axis=1)\n the_avg=ma.mean(the_avg,axis=1)\n return the_avg",
"def reduce2scalar_zonal_old( mv, latmin=-90, latmax=90, vid=None ):\n # For now, I'm assuming that the only axes are time,lat,lon - so that zm is a scalar.\n # And I'm assuming equal spacing in lon (so all longitudes contribute equally to the average)\n # If they aren't, it's best to use area from cell_measures attribute if available; otherwise\n # compute it with lat_bnds, lon_bnds etc.\n if vid==None:\n vid = 'reduced_'+mv.id\n time,lat,lon = tllAxes(mv)\n if hasattr(mv.parent,'variables'):\n fil = mv.parent # mv is a fileVariable and fil is a file.\n lat_bnds = fil[lat.bounds]\n else:\n lataxis = latAxis(mv) # mv is a TransientVariable\n lat_bnds = lataxis._bounds_\n\n mvta = timeave_old( mv )\n\n # In computing the average, we use area weighting.\n # Sometimes the area is available in cell_measures, but for now I'll just use the backup method:\n # The area of a lonlat cell is R^2*delta(lon)*delta(sin(lat)).\n # With equally spaced lon, we don't need delta(lon) for weights.\n # I'll assume that lat,lon are in degrees, which is the only way I've ever seen them.\n wgtsum = 0\n zm = 0\n for i,lati in enumerate(lat):\n # The following test could be sped up a lot, because lat[i] is ordered...\n # >>> to do: partial overlaps\n if latmin<=lati and lati<latmax:\n latlo = lat_bnds[i,0]\n lathi = lat_bnds[i,1]\n wgti = sin(radians(lathi))-sin(radians(latlo))\n zi = 0.0\n for j in range(len(lon)):\n zi += mvta[i,j]\n zi *= wgti\n wgtsum += wgti*len(lon)\n zm += zi\n zm /= wgtsum\n # zm is a scalar, so createVariable gets no axes argument:\n zmv = cdms2.createVariable( zm, id=vid )\n return zmv",
"def manipulate_data(ds, var, predef_clim, predef_trnd, trn_yrs, all_yrs, \n apply_latw=True, apply_detrending=True, dropna=True):\n\n \n if((var=='SD')|(var=='sd')|(var=='snowc')): \n ds[var] = ds[var].where(ds[var]>=0, other=0.0)\n ds[var] = ds[var].where(ds[var]==0, other=1.0)\n #ds[var].values = Gauss_filter(ds[var].values, (0,3,3))\n \n \"\"\"\n if((var=='hgt')|(var=='z')|(var=='GPT')):\n months = ds.time.to_index().month; ssn_ends = (months==2)|(months==5)|(months==8)|(months==11)\n ds = ds.sel(time=ssn_ends)\n else: \n ds = ds.resample(time='3M').mean()\n \"\"\"\n \n ds = ds.resample(time='3M').mean()\n\n ds = ds.sel(time=slice(str(all_yrs[0])+'-01-01', str(all_yrs[-1])+'-12-31')) \n \n try: \n clim = predef_clim\n ds = ds.groupby('time.season') - clim\n print('Predefined climatology used')\n except:\n clim = ds.sel(time=slice(str(trn_yrs[0])+'-01-01', str(trn_yrs[-1])+'-12-31')).groupby('time.season').mean('time')\n ds = ds.groupby('time.season') - clim\n print('Climatology calculated from data')\n \n if(apply_latw): ds[var].values = lat_weighting(ds[var].values, \n ds.lat, ds.lon)\n if(dropna):\n ds = ds.stack(gridcell=('lat', 'lon')).dropna(dim='gridcell',how='any')\n else: \n ds = ds.stack(gridcell=('lat', 'lon')).fillna(0)\n \n \n trend_models = { }\n if(apply_detrending): \n ds = ds.load()\n for ssn in ('DJF', 'MAM', 'JJA', 'SON'):\n #ssn_idx = ds['time.season'] == ssn\n \n trn_idx = bool_index_to_int_index(np.isin(ds['time.season'], ssn) & np.isin(ds['time.year'], trn_yrs))\n all_idx = bool_index_to_int_index(np.isin(ds['time.season'], ssn) & np.isin(ds['time.year'], all_yrs))\n \n trn_x = np.array(ds.time[trn_idx].values.tolist()).reshape(-1,1)\n all_x = np.array(ds.time[all_idx].values.tolist()).reshape(-1,1)\n try:\n trend = predef_trnd[ssn].predict(all_x)\n trend_models[ssn] = predef_trnd[ssn]\n print('Predefined trend model used')\n except:\n #_, trend_model = define_trends(ds[var][trn_idx], trn_x)\n _, trend_model = define_trends(ds[var][all_idx], all_x)\n trend = trend_model.predict(all_x)\n trend_models[ssn] = trend_model\n print('Trends calculated from data')\n \n ds[var][all_idx] = ds[var][all_idx] - trend\n \n\n \n return ds, clim, trend_models",
"def reduce2lat_old( mv, vid=None ):\n # >>> For now, I'm assuming that the only axes are time,lat,lon\n # And I'm assuming equal spacing in lon (so all longitudes contribute equally to the average)\n # If they aren't, it's best to use area from cell_measures attribute if available; otherwise\n # compute it with lat_bnds, lon_bnds etc.\n # If I base another reduction function on this one, it's important to note that an average\n # in the lat direction will unavoidably need weights, because of the geometry.\n\n if vid==None:\n vid = 'reduced_'+mv.id\n time_axis, lat_axis, lon_axis = tllAxes( mv )\n\n mvta = timeave_old( mv )\n\n zm = numpy.zeros( mvta.shape[0] )\n for i in range(len(lat_axis)):\n for j in range(len(lon_axis)):\n zm[i] += mvta[i,j]\n zm[i] /= len(lon_axis)\n zmv = cdms2.createVariable( zm, axes=[lat_axis], id=vid )\n return zmv",
"def reduce2lat( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n axis_names = [ a.id for a in axes if a.id!='lat' ]\n axes_string = '('+')('.join(axis_names)+')'\n\n avmv = averager( mv, axis=axes_string )\n avmv.id = vid\n avmv.units = mv.units\n\n return avmv",
"def reduce_time_seasonal( mv, seasons=seasonsyr, vid=None ):\n if vid==None:\n vid = 'reduced_'+mv.id\n # Note that the averager function returns a variable with meaningless id.\n # The climatology function returns the same id as mv, which we also don't want.\n\n # The slicers in time.py require getBounds() to work.\n # If it doesn't, we'll have to give it one.\n # Setting the _bounds_ attribute will do it.\n timeax = timeAxis(mv)\n if timeax is None:\n print \"WARNING- no time axis in\",mv.id\n return None\n if timeax.getBounds()==None:\n timeax._bounds_ = timeax.genGenericBounds()\n mvseas = seasons.climatology(mv)\n if mvseas is None:\n print \"WARNING- cannot compute climatology for\",mv.id,seasons.seasons\n print \"...probably there is no data for times in the requested season.\"\n return None\n avmv = mvseas\n avmv.id = vid\n if hasattr(mv,'units'): avmv.units = mv.units\n avmv = delete_singleton_axis( avmv, vid='time' )\n avmv.units = mv.units\n return avmv",
"def reduce2latlon( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n axis_names = [ a.id for a in axes if a.id!='lat' and a.id!='lon' ]\n axes_string = '('+')('.join(axis_names)+')'\n for ax in axes:\n # The averager insists on bounds. Sometimes they don't exist, especially for obs.\n if ax.id!='lat' and ax.id!='lon' and not hasattr( ax, 'bounds' ):\n ax.setBounds( ax.genGenericBounds() )\n\n avmv = averager( mv, axis=axes_string )\n avmv.id = vid\n avmv.units = mv.units\n\n return avmv",
"def ann_mean(ds, season=None, time_bnds_varname='time_bnds', time_centered=True, n_req=None):\n \n ds = ds.copy() #deep=True)\n\n if n_req is None:\n if season is not None:\n n_req = 2\n else:\n n_req = 8\n \n if time_bnds_varname is None and not time_centered:\n raise NotImplementedError('time_bnds_varname cannot be \"None\" if time_centered=False')\n \n if not time_centered:\n time_units = ds.time.encoding['units']\n time_calendar = ds.time.encoding['calendar']\n\n # compute time bounds array\n time_bound_data = cftime.date2num(\n ds[time_bnds_varname].data, \n units=time_units, \n calendar=time_calendar) \n\n # center time\n time_centered = cftime.num2date(\n time_bound_data.mean(axis=1),\n units=time_units, \n calendar=time_calendar\n ) \n time_attrs = ds.time.attrs\n time_encoding = ds.time.encoding\n\n ds['time'] = xr.DataArray(\n time_centered,\n dims=('time')\n ) \n \n ones = xr.DataArray(\n np.ones((len(ds.time))), \n dims=('time'), \n coords={'time': ds.time},\n )\n time_mask = xr.DataArray(\n np.ones((len(ds.time))), \n dims=('time'), \n coords={'time': ds.time},\n )\n\n group_by_year = 'time.year'\n rename = {'year': 'time'}\n \n if season is not None:\n season = season.upper()\n if season not in ['DJF', 'MAM', 'JJA', 'SON']:\n raise ValueError(f'unknown season: {season}') \n\n ds['austral_year'] = xr.where(ds['time.month'] > 6, ds['time.year'] + 1, ds['time.year'])\n ds = ds.set_coords('austral_year')\n ones = ones.assign_coords({'austral_year': ds.austral_year})\n time_mask = time_mask.assign_coords({'austral_year': ds.austral_year})\n time_mask = time_mask.where(ds['time.season'] == season).fillna(0)\n \n if season == 'DJF':\n group_by_year = 'austral_year'\n rename = {'austral_year': 'time'}\n \n if time_bnds_varname is not None:\n time_wgt = ds[time_bnds_varname].diff(dim=ds[time_bnds_varname].dims[1])\n if time_wgt.dtype == '<m8[ns]':\n time_wgt = time_wgt / np.timedelta64(1, 'D')\n else: \n time_wgt = xr.DataArray(\n np.ones((len(ds.time))), \n dims=('time'), \n coords={'time': ds.time},\n )\n time_wgt = time_wgt.assign_coords(\n {c: da for c, da in ds.coords.items() if 'time' in da.dims}\n )\n \n time_wgt = time_wgt.where(time_mask==1) #.fillna(0.)\n\n ones = ones.where(time_mask==1)\n time_wgt_grouped = time_wgt.groupby(group_by_year, restore_coord_dims=False)\n time_wgt = time_wgt_grouped / time_wgt_grouped.sum(dim=xr.ALL_DIMS)\n\n nyr = len(time_wgt_grouped.groups)\n \n time_wgt = time_wgt.squeeze()\n\n idx_not_nans = ~np.isnan(time_wgt)\n sum_wgt = time_wgt.groupby(group_by_year).sum(dim=xr.ALL_DIMS)\n idx_not_nans = (sum_wgt > 0)\n\n np.testing.assert_almost_equal(\n sum_wgt[idx_not_nans], \n np.ones(idx_not_nans.sum().values)\n )\n\n nontime_vars = set([v for v in ds.variables if 'time' not in ds[v].dims]) - set(ds.coords)\n dsop = ds.drop_vars(nontime_vars)\n\n if time_bnds_varname is not None:\n dsop = dsop.drop_vars(time_bnds_varname) \n \n def weighted_mean_arr(darr, wgts=None):\n # if NaN are present, we need to use individual weights\n cond = darr.isnull()\n ones = xr.where(cond, 0.0, 1.0)\n if season is None:\n mask = (\n darr.resample({'time': 'A'}, restore_coord_dims=False).mean(dim='time').notnull()\n )\n da_sum = (\n (darr * wgts).resample({'time': 'A'}, restore_coord_dims=False).sum(dim='time')\n )\n ones_out = (\n (ones * wgts).resample({'time': 'A'}, restore_coord_dims=False).sum(dim='time')\n )\n count = (\n (ones * wgts.notnull()).resample({'time': 'A'}, restore_coord_dims=False).sum(dim='time')\n )\n else:\n mask = (\n darr.groupby(group_by_year, restore_coord_dims=False).mean(dim='time').notnull()\n ).rename(rename)\n \n da_sum = (\n (darr * wgts).groupby(group_by_year, restore_coord_dims=False).sum(dim='time')\n ).rename(rename)\n \n ones_out = (\n (ones * wgts).groupby(group_by_year, restore_coord_dims=False).sum(dim='time')\n ).rename(rename)\n \n count = (\n (ones * wgts.notnull()).groupby(group_by_year, restore_coord_dims=False).sum(dim='time')\n ).rename(rename)\n\n ones_out = ones_out.where(ones_out > 0.0)\n da_weighted_mean = da_sum / ones_out\n\n return da_weighted_mean.where(mask).where(count >= n_req) \n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n ds_ann = dsop.map(weighted_mean_arr, wgts=time_wgt)\n\n # copy attrs\n for v in ds_ann:\n ds_ann[v].attrs = ds[v].attrs\n\n # restore coords\n ds_ann = xr.merge((ds_ann, ds[list(nontime_vars)]))\n\n # eliminate partials\n ndx = (time_wgt_grouped.count(dim=xr.ALL_DIMS) >= n_req).values\n if not ndx.all():\n ds_ann = ds_ann.isel(time=ndx)\n\n return ds_ann",
"def reduce2scalar_zonal( mv, latmin=-90, latmax=90, vid=None ):\n if vid==None:\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n ilat = None\n for i,ax in enumerate(axes):\n if ax.id=='lat': ilat = i\n # reduce size of lat axis to (latmin,latmax)\n # Let's home a direct search will be fast enough:\n lataxis = latAxis( mv )\n lmin = -1\n lmax = len(lataxis)\n if lataxis[0]>=latmin: lmin = 0\n if lataxis[-1]<=latmax: lmax = len(lataxis)-1\n if lmin==-1 or lmax==len(lataxis):\n for l,ax in enumerate(lataxis):\n if lmin==-1 and ax>=latmin: lmin = max( 0, l )\n if lmax==len(lataxis) and ax>=latmax: lmax = min( l, len(lataxis) )\n lataxis_shrunk = lataxis.subaxis(lmin,lmax)\n mv2shape = list(mv.shape)\n mv2shape[ilat] = lmax-lmin+1\n axes[ilat] = lataxis_shrunk\n mvd1 = numpy.delete( mv, slice(0,lmin), ilat )\n mvdata = numpy.delete( mvd1, slice(lmax-lmin,len(lataxis)-lmin), ilat )\n mv2 = cdms2.createVariable( mvdata, axes=axes )\n\n axis_names = [ a.id for a in axes ]\n axes_string = '('+')('.join(axis_names)+')'\n avmv = averager( mv2, axis=axes_string )\n avmv.id = vid # Note that the averager function returns a variable with meaningless id.\n ammv.units = mv.units\n\n return avmv",
"def coldaverage( names):\n\n rs = radioastronomy.Spectrum() # create input and average structures\n avenames = names # create an output list to average\n\n# assume only a limited range of galactic latitudes are available\n# not range above +/-60.\n use60Range = False\n minGlat = 90. # initialize to extremea\n maxGlat = -90.\n maxEl = -90.\n minEl = 90.\n ncold = 0\n\n # for all input files\n for filename in names:\n\n parts = filename.split('/')\n nparts = len(parts)\n if nparts == 1:\n aname = parts[0]\n else:\n aname = parts[nparts-1]\n\n parts = aname.split('.')\n nparts = len(parts)\n if nparts < 2:\n print 'File is not an astronomy file: ',filename\n continue\n else:\n extension = parts[nparts-1]\n\n extension = extension.upper()\n if extension != 'AST': # speed up by only looking at astronomy files\n continue\n \n rs.read_spec_ast(filename) # An observation, read values\n\n if rs.telel < 0: # only working with observations, skip elevation <= 0.\n continue\n\n maxGlat = max( rs.gallat, maxGlat)\n minGlat = min( rs.gallat, minGlat)\n maxEl = max( rs.telel, maxEl)\n minEl = min( rs.telel, minEl)\n # end for all files loop, looking for max el and latitude ranges\n\n # if any high galactic latitudes, use only above +/-60d \n if minGlat < -60. or maxGlat > 60.:\n minGlat = -60.\n maxGlat = 60.\n else: # else no high galactic latitude data\n # use highest galactic latitudes - +/-5.degrees\n if -minGlat > maxGlat: # if negative latitudes higher\n minGlat = minGlat + 5.\n maxGlat = 90.\n else: # else positive latitudes higher\n maxGlat = maxGlat - 5.\n minGlat = -90.\n\n # only use the elevations above 60 degrees, if any\n if maxEl > 60.:\n maxEl = 60.\n else:\n maxEl = maxEl - 10. #else must use highest elevations available\n\n # now average coldest data for calibration\n for filename in names:\n\n rs.read_spec_ast(filename)\n rs.azel2radec() # compute ra,dec from az,el\n\n if rs.telel < maxEl:\n continue\n\n if rs.gallat > maxGlat or rs.gallat < minGlat:\n avenames[ncold] = filename\n ncold = ncold + 1\n # end of for all files loop\n\n ncold, cold = average( avenames[0:ncold]) # now use generic program for averages\n if ncold < 1:\n print 'No Cold load files; can not calibrate!'\n exit()\n\n return ncold, cold, minEl, maxEl",
"def meanRegion(thk_s,thk_p,thk_diff):\n meanp = np.nanmean(np.nanmean(thk_p,axis=1),axis=1)\n means = np.nanmean(np.nanmean(thk_s,axis=1),axis=1)\n \n print '\\n --- [[%s to %s N, %s to %s E]] ---' % (latmin,latmax,lonmin,lonmax)\n print 'Average Thickness (Satellite) == %s meters' % np.nanmean(means)\n print 'Average Thickness (PIOMAS) == %s meters' % np.nanmean(meanp)\n print 'Average Difference == %s meters' % (np.nanmean(means)-np.nanmean(meanp))\n \n yearmin = 2004\n yearmax = 2015\n years = np.arange(yearmin,yearmax+1,1)\n years = np.setdiff1d(years,[2010]) ### no satellite data in 2010\n \n fig = plt.figure()\n ax = plt.subplot(111)\n \n ### Call parameters\n plt.rcParams['text.usetex']=True\n plt.rcParams['font.family'] = 'sans-serif'\n plt.rcParams['font.sans-serif'] = 'Avant Garde'\n \n plt.plot(meanp,color='darkred',linewidth=2,linestyle='-',\n label=r'PIOMAS')\n plt.plot(means,color='forestgreen',linewidth=2,linestyle='-',\n label=r'Satellite')\n plt.axvline(6,color='k',linewidth=3,linestyle='-')\n \n labelsy = map(str,np.arange(0,6,1))\n labelsx = map(str,years)\n plt.xticks(np.arange(len(years)),labelsx)\n plt.yticks(np.arange(0,6,1),labelsy)\n plt.ylabel(r'\\textbf{Thickness (meters)}',fontsize=13)\n \n ### Adjust axes in time series plots \n def adjust_spines(ax, spines):\n for loc, spine in ax.spines.items():\n if loc in spines:\n spine.set_position(('outward', 10))\n else:\n spine.set_color('none') \n if 'left' in spines:\n ax.yaxis.set_ticks_position('left')\n else:\n ax.yaxis.set_ticks([])\n \n if 'bottom' in spines:\n ax.xaxis.set_ticks_position('bottom')\n else:\n ax.xaxis.set_ticks([]) \n \n ### Adjust axes spines\n adjust_spines(ax, ['left', 'bottom'])\n ax.spines['top'].set_color('none')\n ax.spines['right'].set_color('none')\n plt.grid(color='b',zorder=1,alpha=0.3)\n \n plt.legend(shadow=False,fontsize=11,loc='upper right',\n fancybox=True)\n \n plt.text(2,-0.8,r'\\textbf{ICESat}',fontsize=13)\n plt.text(7.3,-0.8,r'\\textbf{PIOMAS}',fontsize=13)\n \n fig.suptitle(r'\\textbf{SIT Difference [Satellite - PIOMAS]}',fontsize=16)\n plt.savefig(directoryfigure + 'test5_difftseries.png',dpi=300)",
"def seasonal_calc(t, y, func, edges=None):\n ts, ys = seasonal_series(t, y, edges=edges)\n t_means = [t.jyear.mean() for t in ts]\n t_means = astropy.time.Time(t_means, format='jyear', scale=t.scale)\n f_y = np.array([func(y) for y in ys])\n return t_means, f_y",
"def timeave_seasonal( mv, seasons=seasonsyr ):\n return seasons.climatology(mv)",
"def gavg(idata):\n\t\n\twgt1=np.cos(np.deg2rad(idata.lat))*(idata*0+1)\n\tga=(wgt1*idata).sum(dim=['lat','lon'])/wgt1.sum(dim=['lat','lon'])\n\n\treturn ga",
"def extract_seasonal_component(original_ts, ppy):\n \"\"\"\n # === get in-sample data\n original_ts = original_ts[:-out_of_sample]\n \"\"\"\n if seasonality_test(original_ts, ppy):\n # print(\"seasonal\")\n # ==== get moving averages\n ma_ts = moving_averages(original_ts, ppy)\n\n # ==== get seasonality indices\n le_ts = original_ts * 100 / ma_ts\n le_ts = np.hstack((le_ts, np.full((ppy - (len(le_ts) % ppy)), np.nan)))\n le_ts = np.reshape(le_ts, (-1, ppy))\n si = np.nanmean(le_ts, 0)\n norm = np.sum(si) / (ppy * 100)\n si = si / norm\n else:\n # print(\"NOT seasonal\")\n si = np.full(ppy, 100)\n return si",
"def coarsen_byavg(invar,lat,lon,deg,tol,latweight=True,verbose=True,ignorenan=False):\n\n # Make new Arrays\n lon5 = np.arange(0,360+deg,deg)\n lat5 = np.arange(-90,90+deg,deg)\n \n \n # Set up latitude weights\n if latweight:\n _,Y = np.meshgrid(lon,lat)\n wgt = np.cos(np.radians(Y)) # [lat x lon]\n invar *= wgt[None,:,:] # Multiply by latitude weight\n \n # Get time dimension and preallocate\n nt = invar.shape[0]\n outvar = np.zeros((nt,len(lat5),len(lon5)))\n \n # Loop and regrid\n i=0\n for o in range(len(lon5)):\n for a in range(len(lat5)):\n lonf = lon5[o]\n latf = lat5[a]\n \n lons = np.where((lon >= lonf-tol) & (lon <= lonf+tol))[0]\n lats = np.where((lat >= latf-tol) & (lat <= latf+tol))[0]\n \n varf = invar[:,lats[:,None],lons[None,:]]\n \n if latweight:\n wgtbox = wgt[lats[:,None],lons[None,:]]\n if ignorenan:\n varf = np.nansum(varf/np.nansum(wgtbox,(0,1)),(1,2)) # Divide by the total weight for the box\n else:\n varf = np.sum(varf/np.sum(wgtbox,(0,1)),(1,2)) # Divide by the total weight for the box\n \n \n else:\n if ignorenan: \n varf = np.nanmean(varf,axis=(1,2))\n else:\n varf = varf.mean((1,2))\n \n outvar[:,a,o] = varf.copy()\n i+= 1\n msg=\"\\rCompleted %i of %i\"% (i,len(lon5)*len(lat5))\n print(msg,end=\"\\r\",flush=True)\n return outvar,lat5,lon5",
"def get_gas_by_month(self, year, month, deseasonalize=False):\n df = self.conc_trend if deseasonalize else self.conc_seasonal\n ts = pd.Timestamp(year, month, 1)\n info_dict = {'latency': df.latency[ts]}\n return df.dmf_mean[ts], info_dict",
"def extract_loc(ref_lon, ref_lat, tlon, tlat, var):\n\n if var.ndim == 3: # 3D variable\n zmax, imax, jmax = var.shape\n threeD = True\n elif var.ndim == 2: # 2D variable\n imax, jmax = var.shape\n threeD = False\n else:\n print 'extract_loc: check variable dimensions'\n return\n\n # find the indices of the 4 model grid points around the location\n Ilist, Jlist = find_stn_idx(ref_lon, ref_lat, tlon, tlat)\n\n # compute great circle distance from location to model grid points\n dist = gc_dist(ref_lon, ref_lat, tlon, tlat)\n dist[dist==0] = 1.e-15 # avoid division by zero\n\n # arrays to store weights and data to be averaged\n if threeD: # 3D variable\n wghts = MA.zeros((zmax,len(Ilist)*len(Jlist)),float)\n data = MA.zeros((zmax,len(Ilist)*len(Jlist)),float)\n if MA.isMA(var): # mask weights\n dist_m = MA.array(N.resize(dist,var.shape),mask=var.mask)\n else:\n dist_m = N.array(N.resize(dist,var.shape))\n else: # 2D variable\n wghts = MA.zeros((len(Ilist)*len(Jlist)),float)\n data = MA.zeros((len(Ilist)*len(Jlist)),float)\n if MA.isMA(var):\n dist_m = MA.array(dist,mask=var.mask) # mask weights\n else:\n dist_m = N.array(dist)\n\n # get the 4 model grid points and compute weights\n n = 0\n for i in Ilist:\n for j in Jlist:\n wghts[...,n] = 1./dist_m[...,i,j]\n data[...,n] = var[...,i,j]\n n += 1\n\n # compute weighted average\n wavg = MA.average(data,axis=-1,weights=wghts)\n return wavg",
"def slg_average(df,start_year,end_year,bat_met,player_name):\n base_fields = ['AB','HR','X3B','X2B','SLG']\n emp_list = check_base_fields(df,base_fields)\n\n if not emp_list:\n df['X1B'] = round(df['SLG']*df['AB'] - (4*df['HR'] + 3*df['X3B'] + 2*df['X2B']),0)\n return round((df['X1B'].sum(axis = 0) + df['X2B'].sum(axis = 0) * 2 + df['X3B'].sum(axis = 0) * 3 + df['HR'].sum(axis = 0) * 4) / df['AB'].sum(axis = 0),3)\n\n else:\n df = original_dataframe(start_year,end_year,bat_met+emp_list,player_name)\n df['X1B'] = round(df['SLG']*df['AB'] - (4*df['HR'] + 3*df['X3B'] + 2*df['X2B']),0)\n SLG = round((df['X1B'].sum(axis = 0) + df['X2B'].sum(axis = 0) * 2 + df['X3B'].sum(axis = 0) * 3 + df['HR'].sum(axis = 0) * 4) / df['AB'].sum(axis = 0),3)\n del df['X1B']\n return SLG",
"def compute_stage4(lon, lat, year):\n nc = netCDF4.Dataset(\"/mesonet/data/stage4/%s_stage4_hourly.nc\" % (year,))\n lons = nc.variables[\"lon\"][:]\n lats = nc.variables[\"lat\"][:]\n dist = ((lons - lon) ** 2 + (lats - lat) ** 2) ** 0.5\n (yidx, xidx) = np.unravel_index(dist.argmin(), dist.shape)\n print(\n (\"Computed stage4 nclon:%.2f nclat:%.2f yidx:%s xidx:%s \")\n % (lons[yidx, xidx], lats[yidx, xidx], yidx, xidx)\n )\n p01i = mm2inch(nc.variables[\"p01m\"][:, yidx, xidx])\n nc.close()\n df = pd.DataFrame(\n {\"precip\": 0.0},\n index=pd.date_range(\n \"%s-01-01\" % (year,), \"%s-12-31\" % (year,), tz=\"America/Chicago\"\n ),\n )\n for date in df.index.values:\n date2 = datetime.datetime.utcfromtimestamp(date.tolist() / 1e9)\n ts = datetime.datetime(date2.year, date2.month, date2.day, 6)\n ts = ts.replace(tzinfo=pytz.utc)\n ts = ts.astimezone(pytz.timezone(\"America/Chicago\"))\n ts = ts.replace(hour=0)\n ts = ts.astimezone(pytz.utc)\n tidx = hourly_offset(ts)\n # values are in the rears\n val = np.ma.sum(p01i[tidx + 1 : tidx + 25])\n if val > 0:\n df.at[date, \"precip\"] = val # close enough\n return df",
"def seasonal_mean(args_file):\n product, start_date, end_date, variable_name, shape_file = Utility.read_yml_params(args_file)\n stat = Statistic.Mean\n time = TimePeriod.Seasonal\n\n ds = get_data_set(product, shape_file)\n\n result = Utility.Apply_stat(ds, start_date, end_date, variable_name, stat, time)\n return result",
"def _parse_station(station) -> WeatherStation:\n with open(core_season_file_path) as file_handle:\n core_seasons = json.load(file_handle)\n ecodivisions = geopandas.read_file(ecodiv_shape_file_path)\n station_coord = Point(\n float(station['longitude']), float(station['latitude']))\n\n # hacky fix for station 447 (WATSON LAKE FS), which is in the Yukon\n # so ecodivision name has to be hard-coded\n if station['stationCode'] == '447':\n ecodiv_name = \"SUB-ARCTIC HIGHLANDS\"\n else:\n for index, row in ecodivisions.iterrows(): # pylint: disable=redefined-outer-name, unused-variable\n geom = row['geometry']\n if station_coord.within(geom):\n ecodiv_name = row['CDVSNNM']\n break\n return WeatherStation(\n code=station['stationCode'],\n name=station['displayLabel'],\n lat=station['latitude'],\n long=station['longitude'],\n ecodivision_name=ecodiv_name,\n core_season=core_seasons[ecodiv_name]['core_season'])",
"def map_season(x, figsize=(8, 6), **kwargs):\n\n from pycmbs.mapping import map_plot\n\n nvals = len(x.data)\n if nvals == 12:\n year = True\n elif nvals == 4:\n year = False\n else:\n raise ValueError('Only data for 4-seasons or monthly data is supported!')\n\n #/// checks ///\n if x.data.ndim != 3:\n print x.data.ndim\n raise ValueError('only 3D data supported')\n\n if 'vmin' not in kwargs.keys():\n raise ValueError('vmin argument is obligatory for map_seasons()')\n if 'vmax' not in kwargs.keys():\n raise ValueError('vmax argument is obligatory for map_seasons()')\n\n if kwargs['vmin'] is None:\n raise ValueError('vmin MUST NOT be None!')\n if kwargs['vmax'] is None:\n raise ValueError('vmax MUST NOT be None!')\n\n #/// figure and axes\n if 'figure' in kwargs:\n f = kwargs['figure']\n else:\n f = plt.figure(figsize=figsize)\n\n if 'title' in kwargs:\n tit = kwargs.pop('title')\n else:\n tit = x.label\n\n if 'drawparallels' in kwargs:\n drawparallels = kwargs.pop('drawparallels')\n else:\n drawparallels = False\n\n if 'savefile' in kwargs:\n savefile = kwargs.pop('savefile')\n if '.nc' in savefile:\n savefile = savefile[:-3]\n else:\n savefile = None\n\n # plot\n if year:\n labels = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC']\n else:\n labels = ['DJF', 'MAM', 'JJA', 'SON']\n\n # check dates\n if year:\n mo = 1\n for t in x.time:\n if x.num2date(t).month != mo:\n print x.num2date(t), mo\n raise ValueError('Invalid monthly sequence! Can not plot results!')\n mo += 1\n\n #/// in case that an overlay is provided, this needs to be processed for each timestep individually\n if 'overlay' in kwargs.keys():\n overlays = kwargs.pop('overlay')\n else:\n overlays = None\n\n for i in range(nvals):\n if year:\n ax = f.add_subplot(4, 3, i + 1)\n #if i % 3 == 2:\n if i > 8:\n show_colorbar = True\n else:\n show_colorbar = False\n else:\n ax = f.add_subplot(2, 2, i + 1)\n if 'show_colorbar' in kwargs:\n show_colorbar = kwargs.pop('show_colorbar')\n else:\n show_colorbar = True\n\n d = x.copy()\n d.data = x.data[i, :, :]\n d.label = labels[i]\n\n if overlays is None:\n overlay = None\n else:\n overlay = overlays[i, :, :]\n\n if savefile is not None:\n tmpoutname = savefile + '_' + labels[i]\n else:\n tmpoutname = None\n\n map_plot(d, ax=ax, show_colorbar=show_colorbar, overlay=overlay,\n savefile=tmpoutname, colorbar_orientation='horizontal',\n drawparallels=drawparallels, **kwargs)\n del d\n f.suptitle(tit, size=16)\n return f",
"def calcSeason(ra, time):\n # Reference RA and equinox to anchor ra/season reference - RA = 0 is overhead at autumnal equinox\n # autumn equinox 2014 happened on september 23 --> equinox MJD\n Equinox = 2456923.5 - 2400000.5\n # convert ra into 'days'\n dayRA = ra / 360 * 365.25\n firstSeasonBegan = Equinox + dayRA - 0.5 * 365.25\n seasons = (time - firstSeasonBegan) / 365.25\n # Set first season to 0\n seasons = seasons - np.floor(np.min(seasons))\n return seasons",
"def satReader(directory,month,latmin,latmax,lonmin,lonmax):\n \n ### Enter filename\n filename = 'cs2icesat_regrid_mar_20042015.nc' \n \n ### Month/Years extracted\n dateyr = now.year \n datemo = datetime.date(dateyr,month+1,1).strftime('%B')\n \n ### Retrieve data\n data = Dataset(directory + filename)\n lat = data.variables['lat'][:]\n lon = data.variables['lon'][:]\n thkn = data.variables['thick'][:]\n data.close()\n \n ### Calculate lat/lon region\n xmask = (lat > latmin) & (lat < latmax)\n ymask = (lon > lonmin) & (lon < lonmax)\n \n mask = xmask[:] & ymask[:]\n latvals = np.where(mask == True)[0]\n lonvals = np.where(mask == True)[1]\n latvals = np.unique(latvals)\n lonvals = np.unique(lonvals)\n \n thk = thkn[:,latvals,:]\n thk = thk[:,:,lonvals]\n \n lat = lat[latvals,:]\n lat = lat[:,lonvals]\n lon = lon[latvals,:]\n lon = lon[:,lonvals]\n\n grid = '---> [[%s to %s N, %s to %s E]]' % (latmin,latmax,lonmin,lonmax)\n print 'Completed: Satellite data read (%s)!' % datemo, grid\n \n return lat,lon,thk"
] | [
"0.75936365",
"0.74594027",
"0.688166",
"0.6238695",
"0.61847365",
"0.6133317",
"0.60225564",
"0.5992159",
"0.58987415",
"0.5852854",
"0.5766803",
"0.5747153",
"0.555478",
"0.54478663",
"0.5436463",
"0.5383689",
"0.53339547",
"0.53088355",
"0.5280703",
"0.5271107",
"0.5259503",
"0.514486",
"0.5133364",
"0.5097927",
"0.507667",
"0.50266653",
"0.50261164",
"0.5022141",
"0.49701214",
"0.49551564"
] | 0.74663657 | 1 |
as reduce2lat, but data is used only for time restricted to the specified season. The season is specified as an object of type cdutil.ties.Seasons, and defaults to the whole year. The returned variable will still have a time axis, with one value per season specified. | def reduce2lat_seasonal( mv, seasons=seasonsyr, vid=None ):
if vid==None:
vid = 'reduced_'+mv.id
# Note that the averager function returns a variable with meaningless id.
# The climatology function returns the same id as mv, which we also don't want.
# The slicers in time.py require getBounds() to work.
# If it doesn't, we'll have to give it one.
# Setting the _bounds_ attribute will do it.
for ax in mv.getAxisList():
if ax.getBounds() is None:
ax._bounds_ = ax.genGenericBounds()
timeax = timeAxis(mv)
if timeax.units=='months':
# Special check necessary for LEGATES obs data, because
# climatology() won't accept this incomplete specification
timeax.units = 'months since 0001-01-01'
mvseas = seasons.climatology(mv)
if mvseas is None:
# Among other cases, this can happen if mv has all missing values.
return None
axes = allAxes( mv )
axis_names = [ a.id for a in axes if a.id!='lat' and a.id!='time']
axes_string = '('+')('.join(axis_names)+')'
if len(axes_string)>2:
avmv = averager( mvseas, axis=axes_string )
else:
avmv = mvseas
avmv.id = vid
avmv = delete_singleton_axis( avmv, vid='time' )
avmv.units = mv.units
return avmv | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reduce2latlon_seasonal( mv, seasons=seasonsyr, vid=None ):\n # This differs from reduce2lat_seasonal only in the line \"axis_names =\"....\n # I need to think about how to structure the code so there's less cut-and-paste!\n if vid==None:\n vid = 'reduced_'+mv.id\n # Note that the averager function returns a variable with meaningless id.\n # The climatology function returns the same id as mv, which we also don't want.\n\n # The slicers in time.py require getBounds() to work.\n # If it doesn't, we'll have to give it one.\n # Setting the _bounds_ attribute will do it.\n timeax = timeAxis(mv)\n if timeax.getBounds()==None:\n timeax._bounds_ = timeax.genGenericBounds()\n mvseas = seasons.climatology(mv)\n \n axes = allAxes( mv )\n axis_names = [ a.id for a in axes if a.id!='lat' and a.id!='lon' and a.id!='time']\n axes_string = '('+')('.join(axis_names)+')'\n\n if len(axes_string)>2:\n for axis in mvseas.getAxisList():\n if axis.getBounds() is None:\n axis._bounds_ = axis.genGenericBounds()\n avmv = averager( mvseas, axis=axes_string )\n else:\n avmv = mvseas\n if avmv is None: return avmv\n avmv.id = vid\n if hasattr(mv,'units'): avmv.units = mv.units\n avmv = delete_singleton_axis( avmv, vid='time' )\n avmv.units = mv.units\n return avmv",
"def reduce2levlat_seasonal( mv, seasons=seasonsyr, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n if levAxis(mv) is None: return None\n if latAxis(mv) is None: return None\n axes = allAxes( mv )\n timeax = timeAxis(mv)\n if timeax.getBounds()==None:\n timeax._bounds_ = timeax.genGenericBounds()\n\n if timeax.units=='months':\n # Special check necessary for LEGATES obs data, because\n # climatology() won't accept this incomplete specification\n timeax.units = 'months since 0001-01-01'\n mvseas = seasons.climatology(mv)\n\n axis_names = [ a.id for a in axes if a.id!='lev' and a.id!='lat' and a.id!='time']\n axes_string = '('+')('.join(axis_names)+')'\n\n if len(axes_string)>2:\n avmv = averager( mvseas, axis=axes_string )\n else:\n avmv = mvseas\n avmv.id = vid\n avmv = delete_singleton_axis( avmv, vid='time' )\n avmv.units = mv.units\n\n return avmv",
"def interpolateseasons(self):\n\n remainder = self.season - self.startseason\n f1 = 1.0 - remainder\n self.data = (self.startdata * f1) + (self.stopdata * remainder)",
"def warren_time_series(lat, lon, dates, variable='snow_depth'):\n\n my_func = {'snow_depth': snow_depth,\n 'swe': swe}\n\n #if not all([isinstance(d, dt.datetime) for d in dates]):\n # print ('Expects datetime objects')\n\n # If lat, lon are vectors, generate 2d grids\n # Need to add code to make sure x and y are DataArrays\n if (count_dims(lat) == 1):\n x, y = np.meshgrid(lon, lat)\n else:\n x, y = lon, lat\n \n if dates.size == 1:\n cube = my_func[variable](x,y,dates.dt.month)\n da = xr.DataArray(cube,\n coords={'lat': x, 'lon': y},\n dims=['lat', 'lon'])\n else:\n cube = [my_func[variable](x, y, m) for m in dates.dt.month.values]\n da = xr.concat(cube, dim='time')\n da['time'] = dates\n\n return da",
"def map_season(x, figsize=(8, 6), **kwargs):\n\n from pycmbs.mapping import map_plot\n\n nvals = len(x.data)\n if nvals == 12:\n year = True\n elif nvals == 4:\n year = False\n else:\n raise ValueError('Only data for 4-seasons or monthly data is supported!')\n\n #/// checks ///\n if x.data.ndim != 3:\n print x.data.ndim\n raise ValueError('only 3D data supported')\n\n if 'vmin' not in kwargs.keys():\n raise ValueError('vmin argument is obligatory for map_seasons()')\n if 'vmax' not in kwargs.keys():\n raise ValueError('vmax argument is obligatory for map_seasons()')\n\n if kwargs['vmin'] is None:\n raise ValueError('vmin MUST NOT be None!')\n if kwargs['vmax'] is None:\n raise ValueError('vmax MUST NOT be None!')\n\n #/// figure and axes\n if 'figure' in kwargs:\n f = kwargs['figure']\n else:\n f = plt.figure(figsize=figsize)\n\n if 'title' in kwargs:\n tit = kwargs.pop('title')\n else:\n tit = x.label\n\n if 'drawparallels' in kwargs:\n drawparallels = kwargs.pop('drawparallels')\n else:\n drawparallels = False\n\n if 'savefile' in kwargs:\n savefile = kwargs.pop('savefile')\n if '.nc' in savefile:\n savefile = savefile[:-3]\n else:\n savefile = None\n\n # plot\n if year:\n labels = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC']\n else:\n labels = ['DJF', 'MAM', 'JJA', 'SON']\n\n # check dates\n if year:\n mo = 1\n for t in x.time:\n if x.num2date(t).month != mo:\n print x.num2date(t), mo\n raise ValueError('Invalid monthly sequence! Can not plot results!')\n mo += 1\n\n #/// in case that an overlay is provided, this needs to be processed for each timestep individually\n if 'overlay' in kwargs.keys():\n overlays = kwargs.pop('overlay')\n else:\n overlays = None\n\n for i in range(nvals):\n if year:\n ax = f.add_subplot(4, 3, i + 1)\n #if i % 3 == 2:\n if i > 8:\n show_colorbar = True\n else:\n show_colorbar = False\n else:\n ax = f.add_subplot(2, 2, i + 1)\n if 'show_colorbar' in kwargs:\n show_colorbar = kwargs.pop('show_colorbar')\n else:\n show_colorbar = True\n\n d = x.copy()\n d.data = x.data[i, :, :]\n d.label = labels[i]\n\n if overlays is None:\n overlay = None\n else:\n overlay = overlays[i, :, :]\n\n if savefile is not None:\n tmpoutname = savefile + '_' + labels[i]\n else:\n tmpoutname = None\n\n map_plot(d, ax=ax, show_colorbar=show_colorbar, overlay=overlay,\n savefile=tmpoutname, colorbar_orientation='horizontal',\n drawparallels=drawparallels, **kwargs)\n del d\n f.suptitle(tit, size=16)\n return f",
"def season_edges(t):\n offset = season_offset(t)\n yr_min = t.datetime.min().year\n left_frac = t.jyear.min() % yr_min\n if left_frac < offset:\n ex_left = 1\n else:\n ex_left = 0\n edges = np.arange(yr_min - ex_left + offset, t.jyear.max() + 1, 1.0)\n return astropy.time.Time(edges, format='jyear')",
"def reduce_time_seasonal( mv, seasons=seasonsyr, vid=None ):\n if vid==None:\n vid = 'reduced_'+mv.id\n # Note that the averager function returns a variable with meaningless id.\n # The climatology function returns the same id as mv, which we also don't want.\n\n # The slicers in time.py require getBounds() to work.\n # If it doesn't, we'll have to give it one.\n # Setting the _bounds_ attribute will do it.\n timeax = timeAxis(mv)\n if timeax is None:\n print \"WARNING- no time axis in\",mv.id\n return None\n if timeax.getBounds()==None:\n timeax._bounds_ = timeax.genGenericBounds()\n mvseas = seasons.climatology(mv)\n if mvseas is None:\n print \"WARNING- cannot compute climatology for\",mv.id,seasons.seasons\n print \"...probably there is no data for times in the requested season.\"\n return None\n avmv = mvseas\n avmv.id = vid\n if hasattr(mv,'units'): avmv.units = mv.units\n avmv = delete_singleton_axis( avmv, vid='time' )\n avmv.units = mv.units\n return avmv",
"def seasonal_avg(var_nc,the_season,lat_slice=None,lon_slice=None): \n the_season=np.array(the_season,dtype=np.int32)\n if (lat_slice is None) and (lon_slice is None):\n num_lats=var_nc.shape[2]\n num_lons=var_nc.shape[3]\n lat_slice=slice(0,num_lats)\n lon_slice=slice(0,num_lons)\n else:\n if lat_slice.stop is None:\n num_lats=var_nc.shape[2]\n else:\n num_lats=lat_slice.stop - lat_slice.start\n if lon_slice.stop is None:\n num_lons=var_nc.shape[3]\n else:\n num_lons=lon_slice.stop - lon_slice.start\n num_levs=var_nc.shape[1]\n accumulate=ma.zeros([num_levs,num_lats,num_lons],dtype=var_nc.dtype)\n num_years=var_nc.shape[0]//12\n\n for the_year in np.arange(0,num_years):\n the_slice=var_nc[the_season,:,lat_slice,lon_slice]\n the_slice=ma.mean(the_slice,axis=0)\n accumulate+=the_slice\n the_season=the_season+12\n accumulate=accumulate/num_years \n the_avg=ma.mean(accumulate,axis=1)\n the_avg=ma.mean(the_avg,axis=1)\n return the_avg",
"def convert_season(row): \n if row[\"month\"] >= 8:\n return int(row[\"season\"][:4])\n else:\n return int(row[\"season\"][-4:])",
"def replace_season(data,season):\n if not SeasonCharts.matchseason(season):\n raise SeasonCharts.SeasonError\n ## Check data format\n if test_rawdata(data):\n for cat,shows in data.items():\n for show in shows: show['season'] = season\n else:\n for show in data: show['season'] = season",
"def getseason(data):\n ## Season key is the most reliable\n season = data.get(\"season\")\n if season:\n ## Season key is an integer formatted \"YYS\" and is 2000-based (i.e.- 171 == 2017-Winter)\n season = str(season)\n year = int(f\"20{season[:2]}\")\n ## Anichart Season key is 1-indexed\n season = int(season[2]) - 1\n ## This should normally pass; if it consistently does not, we'll have to investigate why\n try: return SeasonCharts.buildseason(season,year)\n ## If something goes wrong, we'll try another method\n except: print(f\"Failed to parse season: {data['season']}\")\n ## Next, we'll iterate over rankings to try to determine the season/year\n ## There are multiple types of rankings based on season, year, and both combined,\n ## so we'll piece it together based on whatever we come across first\n season,year = None,None\n for ranking in data.get(\"rankings\",list()):\n ## Quicker exit (without just making this loop its own function)\n if season and year: continue\n ## We'll ignore stuff we've already gotten and assume that nothing in\n ## rankings contradicts eachother\n if not season:\n ## Defaults to None one way or another if it's not supplied\n season = ranking.get(\"season\")\n if not year: year = ranking.get(\"year\")\n ## Check if we made it\n if season and year:\n ## As above, this should always work out-of-the-box\n try: return SeasonCharts.buildseason(season,year)\n except: print(season,year)\n ## Welp, we're stumped...\n return None",
"def season(obs, season_gap=80., mjdCol='observationStartMJD'):\n\n # check wether season has already been estimated\n if 'season' in obs.dtype.names:\n return obs\n\n obs.sort(order=mjdCol)\n\n \"\"\"\n if len(obs) == 1:\n obs = np.atleast_1d(obs)\n obs = rf.append_fields([obs], 'season', [1.])\n return obs\n diff = obs[mjdCol][1:]-obs[mjdCol][:-1]\n\n flag = np.argwhere(diff > season_gap)\n if len(flag) > 0:\n seas = np.zeros((len(obs),), dtype=int)\n flag += 1\n seas[0:flag[0][0]] = 1\n for iflag in range(len(flag)-1):\n seas[flag[iflag][0]:flag[iflag+1][0]] = iflag+2\n seas[flag[-1][0]:] = len(flag)+1\n obs = rf.append_fields(obs, 'season', seas)\n else:\n obs = rf.append_fields(obs, 'season', [1]*len(obs))\n \"\"\"\n seasoncalc = np.ones(obs.size, dtype=int)\n\n if len(obs) > 1:\n diff = np.diff(obs[mjdCol])\n flag = np.where(diff > season_gap)[0]\n\n if len(flag) > 0:\n for i, indx in enumerate(flag):\n seasoncalc[indx+1:] = i+2\n\n obs = rf.append_fields(obs, 'season', seasoncalc)\n return obs",
"def manipulate_data(ds, var, predef_clim, predef_trnd, trn_yrs, all_yrs, \n apply_latw=True, apply_detrending=True, dropna=True):\n\n \n if((var=='SD')|(var=='sd')|(var=='snowc')): \n ds[var] = ds[var].where(ds[var]>=0, other=0.0)\n ds[var] = ds[var].where(ds[var]==0, other=1.0)\n #ds[var].values = Gauss_filter(ds[var].values, (0,3,3))\n \n \"\"\"\n if((var=='hgt')|(var=='z')|(var=='GPT')):\n months = ds.time.to_index().month; ssn_ends = (months==2)|(months==5)|(months==8)|(months==11)\n ds = ds.sel(time=ssn_ends)\n else: \n ds = ds.resample(time='3M').mean()\n \"\"\"\n \n ds = ds.resample(time='3M').mean()\n\n ds = ds.sel(time=slice(str(all_yrs[0])+'-01-01', str(all_yrs[-1])+'-12-31')) \n \n try: \n clim = predef_clim\n ds = ds.groupby('time.season') - clim\n print('Predefined climatology used')\n except:\n clim = ds.sel(time=slice(str(trn_yrs[0])+'-01-01', str(trn_yrs[-1])+'-12-31')).groupby('time.season').mean('time')\n ds = ds.groupby('time.season') - clim\n print('Climatology calculated from data')\n \n if(apply_latw): ds[var].values = lat_weighting(ds[var].values, \n ds.lat, ds.lon)\n if(dropna):\n ds = ds.stack(gridcell=('lat', 'lon')).dropna(dim='gridcell',how='any')\n else: \n ds = ds.stack(gridcell=('lat', 'lon')).fillna(0)\n \n \n trend_models = { }\n if(apply_detrending): \n ds = ds.load()\n for ssn in ('DJF', 'MAM', 'JJA', 'SON'):\n #ssn_idx = ds['time.season'] == ssn\n \n trn_idx = bool_index_to_int_index(np.isin(ds['time.season'], ssn) & np.isin(ds['time.year'], trn_yrs))\n all_idx = bool_index_to_int_index(np.isin(ds['time.season'], ssn) & np.isin(ds['time.year'], all_yrs))\n \n trn_x = np.array(ds.time[trn_idx].values.tolist()).reshape(-1,1)\n all_x = np.array(ds.time[all_idx].values.tolist()).reshape(-1,1)\n try:\n trend = predef_trnd[ssn].predict(all_x)\n trend_models[ssn] = predef_trnd[ssn]\n print('Predefined trend model used')\n except:\n #_, trend_model = define_trends(ds[var][trn_idx], trn_x)\n _, trend_model = define_trends(ds[var][all_idx], all_x)\n trend = trend_model.predict(all_x)\n trend_models[ssn] = trend_model\n print('Trends calculated from data')\n \n ds[var][all_idx] = ds[var][all_idx] - trend\n \n\n \n return ds, clim, trend_models",
"def set_season_time(season): \n if season == '2021-22':\n startdate = time.strptime('13-08-2021', '%d-%m-%Y')\n startdate = datetime.fromtimestamp(mktime(startdate))\n enddate = time.strptime('08-10-2021', '%d-%m-%Y')\n enddate = datetime.fromtimestamp(mktime(enddate))\n if season == '2020-21':\n startdate = time.strptime('12-08-2020', '%d-%m-%Y')\n startdate = datetime.fromtimestamp(mktime(startdate))\n enddate = time.strptime('26-07-2021', '%d-%m-%Y')\n enddate = datetime.fromtimestamp(mktime(enddate))\n if season == '2019-20':\n startdate = time.strptime('09-08-2019', '%d-%m-%Y')\n startdate = datetime.fromtimestamp(mktime(startdate))\n enddate = time.strptime('26-07-2020', '%d-%m-%Y')\n enddate = datetime.fromtimestamp(mktime(enddate))\n return startdate, enddate",
"def calcSeason(ra, time):\n # Reference RA and equinox to anchor ra/season reference - RA = 0 is overhead at autumnal equinox\n # autumn equinox 2014 happened on september 23 --> equinox MJD\n Equinox = 2456923.5 - 2400000.5\n # convert ra into 'days'\n dayRA = ra / 360 * 365.25\n firstSeasonBegan = Equinox + dayRA - 0.5 * 365.25\n seasons = (time - firstSeasonBegan) / 365.25\n # Set first season to 0\n seasons = seasons - np.floor(np.min(seasons))\n return seasons",
"def seasonal_series(t, y, edges=None, hard=False):\n t = ensure_tarray(t)\n if len(t) == 1:\n return [t], [y]\n season_ixs = season_indices(t, edges=edges, hard=hard)\n ts = []\n ys = []\n for season in season_ixs:\n ts.append(astropy.time.Time(t.jyear[season], format='jyear', scale=t.scale))\n ys.append(y[season])\n return ts, ys",
"def reduce2lat_old( mv, vid=None ):\n # >>> For now, I'm assuming that the only axes are time,lat,lon\n # And I'm assuming equal spacing in lon (so all longitudes contribute equally to the average)\n # If they aren't, it's best to use area from cell_measures attribute if available; otherwise\n # compute it with lat_bnds, lon_bnds etc.\n # If I base another reduction function on this one, it's important to note that an average\n # in the lat direction will unavoidably need weights, because of the geometry.\n\n if vid==None:\n vid = 'reduced_'+mv.id\n time_axis, lat_axis, lon_axis = tllAxes( mv )\n\n mvta = timeave_old( mv )\n\n zm = numpy.zeros( mvta.shape[0] )\n for i in range(len(lat_axis)):\n for j in range(len(lon_axis)):\n zm[i] += mvta[i,j]\n zm[i] /= len(lon_axis)\n zmv = cdms2.createVariable( zm, axes=[lat_axis], id=vid )\n return zmv",
"def season_offset(t):\n t_sort = np.sort(t) # sorted copy\n delta = t_sort[-1] - t_sort[0]\n seconds_in_year = 365.25 * 86400\n Nyears = int(delta.sec / seconds_in_year)\n f = np.vectorize(lambda x: x.sec) # function to turn TimeDelta into float seconds\n dt = f(t_sort[1:] - t_sort[0:-1]) # ... use the above\n gaps = np.sort(dt)[-Nyears:] # use sorted copy\n median_gap = np.median(gaps)\n offset = median_gap / 2 # half-width of gap in seconds\n # Find index of gap closest to mean gap\n min_diff = np.inf\n i_median_gap = -1\n for i in range(dt.size):\n diff = np.abs(dt[i] - median_gap)\n if diff < min_diff:\n min_diff = diff\n i_median_gap = i\n before_gap = t_sort[i_median_gap]\n offset_frac = (before_gap.jyear + offset/seconds_in_year) % 1\n return offset_frac",
"def timeave_seasonal( mv, seasons=seasonsyr ):\n return seasons.climatology(mv)",
"def set_season(self, season):\n self.set_date_range(dt.date(season, 1, 1),\n dt.date(season, 12, 31))",
"def getSeason(date):\n\n date = validate.timestamp(date)\n day = date.dayofyear\n leap_year = int(date.is_leap_year)\n\n spring = numpy.arange(80, 172) + leap_year\n summer = numpy.arange(172, 264) + leap_year\n autumn = numpy.arange(264, 355) + leap_year\n\n if day in spring:\n season = \"spring\"\n elif day in summer:\n season = \"summer\"\n elif day in autumn:\n season = \"autumn\"\n else:\n season = \"winter\"\n\n return season",
"def set_season(date_obj):\n date_year = date_obj.year\n\n for key, val in SEASONS.items():\n start = datetime(year=date_year, month=val['start']['month'], day=val['start']['day'])\n end = datetime(year=date_year, month=val['end']['month'], day=val['end']['day'])\n if key == 'Winter':\n start_year = date_year - 1 if date_obj.month in [1, 2, 3] else date_year\n end_year = date_year + 1 if date_obj.month == 12 else date_year\n start = datetime(year=start_year, month=val['start']['month'], day=val['start']['day'])\n end = datetime(year=end_year, month=val['end']['month'], day=val['end']['day'])\n\n if start <= date_obj <= end:\n return key",
"def _parse_station(station) -> WeatherStation:\n with open(core_season_file_path) as file_handle:\n core_seasons = json.load(file_handle)\n ecodivisions = geopandas.read_file(ecodiv_shape_file_path)\n station_coord = Point(\n float(station['longitude']), float(station['latitude']))\n\n # hacky fix for station 447 (WATSON LAKE FS), which is in the Yukon\n # so ecodivision name has to be hard-coded\n if station['stationCode'] == '447':\n ecodiv_name = \"SUB-ARCTIC HIGHLANDS\"\n else:\n for index, row in ecodivisions.iterrows(): # pylint: disable=redefined-outer-name, unused-variable\n geom = row['geometry']\n if station_coord.within(geom):\n ecodiv_name = row['CDVSNNM']\n break\n return WeatherStation(\n code=station['stationCode'],\n name=station['displayLabel'],\n lat=station['latitude'],\n long=station['longitude'],\n ecodivision_name=ecodiv_name,\n core_season=core_seasons[ecodiv_name]['core_season'])",
"def seasonal_calc(t, y, func, edges=None):\n ts, ys = seasonal_series(t, y, edges=edges)\n t_means = [t.jyear.mean() for t in ts]\n t_means = astropy.time.Time(t_means, format='jyear', scale=t.scale)\n f_y = np.array([func(y) for y in ys])\n return t_means, f_y",
"def get_season_dates(date, season):\n start_date_start = date\n start_date_end = date\n if season == \"Spring\":\n start_date_start = date.replace(month=4)\n start_date_end = date.replace(month=6, day=30)\n elif season == \"Summer\":\n start_date_start = date.replace(month=7)\n start_date_end = date.replace(month=9, day=30)\n elif season == \"Fall\":\n start_date_start = date.replace(month=10)\n start_date_end = date.replace(month=12, day=31)\n elif season == \"Winter\":\n start_date_start = date.replace(month=1)\n start_date_end = date.replace(month=3, day=31)\n return start_date_start, start_date_end",
"def distributeSeason(self):\n i = 1\n for day in self.daylist:\n if i >= monthbeg[5] and i < monthbeg[9]: #june through SEpt as per SCE\n day.season = 'summer' #https://www.sce.com/residential/rates/Time-Of-Use-Residential-Rate-Plans\n i = i + 1\n else:\n day.season = 'winter'\n i = i+1",
"def speed_setting_season(self):\n if self.season == \"spring\":\n self.grid.speed_values[self.path_color] = 4\n self.grid.speed_values[(0, 0, 255)] = 0.1\n elif self.season == \"winter\":\n self.grid.speed_values[self.path_color] = 3\n self.grid.speed_values[(0, 0, 255)] = 0.1\n elif self.season == \"fall\":\n self.grid.speed_values[self.path_color] = 6\n elif self.season == \"summer\":\n pass",
"def segment_by_season(self, dt, winter = None, summer = None):\n if winter == None:\n winter = [10, 11, 12, 1, 2, 3]\n if summer == None:\n summer = [4, 5, 6, 7, 8, 9]\n\n if dt.month in winter:\n ind = []\n for date in self.historic_data.index:\n if date.month in winter:\n ind.append(date)\n segmented_data = self.historic_data.reindex(ind)\n else:\n ind = []\n for date in self.historic_data.index:\n if date.month in summer:\n ind.append(date)\n segmented_data = self.historic_data.reindex(ind)\n\n return RollingWindow(self.name, segmented_data, self.source_type,\n self.dayahead_data)",
"def season_folder(cls, season):\r\n\r\n\t\t'''# Google Drive downloads replace these characters automatically\r\n\t\t# I'm implementing this in the code as well for convenience\r\n\t\tseason = season.replace(\"&\", \"_\")\r\n\t\tseason = season.replace(\"'\", \"_\")'''\r\n\r\n\t\t# Folder names are ANSI versions of the season name\r\n\t\t# This is important in names like \"Lé Unicorn\" which get\r\n\t\t# converted incorrectly as folder names\r\n\t\tseason = season.encode(encoding=\"utf-8\")\r\n\t\tseason = season.decode(encoding=\"cp1252\", errors=\"ignore\")\r\n\r\n\t\treturn season",
"def seasonality(df):\n df_datetime = pd.DatetimeIndex(df.date_time)\n df[\"month\"] = df_datetime.month\n df = drop_cols(df, [\"date_time\"])\n\n return df"
] | [
"0.67101383",
"0.61974657",
"0.6165893",
"0.57036895",
"0.54976517",
"0.5443406",
"0.5429207",
"0.5275951",
"0.5232346",
"0.52280706",
"0.5203124",
"0.5159468",
"0.515358",
"0.51510376",
"0.5138082",
"0.50527817",
"0.50496626",
"0.5037915",
"0.49941966",
"0.4983268",
"0.4960697",
"0.4960681",
"0.49505866",
"0.49404982",
"0.4915507",
"0.49084416",
"0.48996848",
"0.48994577",
"0.48565385",
"0.48502243"
] | 0.6925929 | 0 |
as reduce2lat_seasonal, but both lat and lon axes are retained. | def reduce2latlon_seasonal( mv, seasons=seasonsyr, vid=None ):
# This differs from reduce2lat_seasonal only in the line "axis_names ="....
# I need to think about how to structure the code so there's less cut-and-paste!
if vid==None:
vid = 'reduced_'+mv.id
# Note that the averager function returns a variable with meaningless id.
# The climatology function returns the same id as mv, which we also don't want.
# The slicers in time.py require getBounds() to work.
# If it doesn't, we'll have to give it one.
# Setting the _bounds_ attribute will do it.
timeax = timeAxis(mv)
if timeax.getBounds()==None:
timeax._bounds_ = timeax.genGenericBounds()
mvseas = seasons.climatology(mv)
axes = allAxes( mv )
axis_names = [ a.id for a in axes if a.id!='lat' and a.id!='lon' and a.id!='time']
axes_string = '('+')('.join(axis_names)+')'
if len(axes_string)>2:
for axis in mvseas.getAxisList():
if axis.getBounds() is None:
axis._bounds_ = axis.genGenericBounds()
avmv = averager( mvseas, axis=axes_string )
else:
avmv = mvseas
if avmv is None: return avmv
avmv.id = vid
if hasattr(mv,'units'): avmv.units = mv.units
avmv = delete_singleton_axis( avmv, vid='time' )
avmv.units = mv.units
return avmv | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reduce2lat_seasonal( mv, seasons=seasonsyr, vid=None ):\n if vid==None:\n vid = 'reduced_'+mv.id\n # Note that the averager function returns a variable with meaningless id.\n # The climatology function returns the same id as mv, which we also don't want.\n\n # The slicers in time.py require getBounds() to work.\n # If it doesn't, we'll have to give it one.\n # Setting the _bounds_ attribute will do it.\n for ax in mv.getAxisList():\n if ax.getBounds() is None:\n ax._bounds_ = ax.genGenericBounds()\n timeax = timeAxis(mv)\n if timeax.units=='months':\n # Special check necessary for LEGATES obs data, because\n # climatology() won't accept this incomplete specification\n timeax.units = 'months since 0001-01-01'\n mvseas = seasons.climatology(mv)\n if mvseas is None:\n # Among other cases, this can happen if mv has all missing values.\n return None\n \n axes = allAxes( mv )\n axis_names = [ a.id for a in axes if a.id!='lat' and a.id!='time']\n axes_string = '('+')('.join(axis_names)+')'\n\n if len(axes_string)>2:\n avmv = averager( mvseas, axis=axes_string )\n else:\n avmv = mvseas\n avmv.id = vid\n\n avmv = delete_singleton_axis( avmv, vid='time' )\n avmv.units = mv.units\n return avmv",
"def reduce2levlat_seasonal( mv, seasons=seasonsyr, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n if levAxis(mv) is None: return None\n if latAxis(mv) is None: return None\n axes = allAxes( mv )\n timeax = timeAxis(mv)\n if timeax.getBounds()==None:\n timeax._bounds_ = timeax.genGenericBounds()\n\n if timeax.units=='months':\n # Special check necessary for LEGATES obs data, because\n # climatology() won't accept this incomplete specification\n timeax.units = 'months since 0001-01-01'\n mvseas = seasons.climatology(mv)\n\n axis_names = [ a.id for a in axes if a.id!='lev' and a.id!='lat' and a.id!='time']\n axes_string = '('+')('.join(axis_names)+')'\n\n if len(axes_string)>2:\n avmv = averager( mvseas, axis=axes_string )\n else:\n avmv = mvseas\n avmv.id = vid\n avmv = delete_singleton_axis( avmv, vid='time' )\n avmv.units = mv.units\n\n return avmv",
"def reduce2latlon( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n axis_names = [ a.id for a in axes if a.id!='lat' and a.id!='lon' ]\n axes_string = '('+')('.join(axis_names)+')'\n for ax in axes:\n # The averager insists on bounds. Sometimes they don't exist, especially for obs.\n if ax.id!='lat' and ax.id!='lon' and not hasattr( ax, 'bounds' ):\n ax.setBounds( ax.genGenericBounds() )\n\n avmv = averager( mv, axis=axes_string )\n avmv.id = vid\n avmv.units = mv.units\n\n return avmv",
"def correct_lon(ds):\n ds = ds.copy()\n x = ds['x'].data\n ds['x'].data = np.where(x < 0 , 360 + x, x)\n\n lon = ds['lon'].data\n ds['lon'].data = np.where(lon < 0 , 360 + lon, lon)\n \n ds = ds.sortby('x')\n return ds",
"def reduce2lat_old( mv, vid=None ):\n # >>> For now, I'm assuming that the only axes are time,lat,lon\n # And I'm assuming equal spacing in lon (so all longitudes contribute equally to the average)\n # If they aren't, it's best to use area from cell_measures attribute if available; otherwise\n # compute it with lat_bnds, lon_bnds etc.\n # If I base another reduction function on this one, it's important to note that an average\n # in the lat direction will unavoidably need weights, because of the geometry.\n\n if vid==None:\n vid = 'reduced_'+mv.id\n time_axis, lat_axis, lon_axis = tllAxes( mv )\n\n mvta = timeave_old( mv )\n\n zm = numpy.zeros( mvta.shape[0] )\n for i in range(len(lat_axis)):\n for j in range(len(lon_axis)):\n zm[i] += mvta[i,j]\n zm[i] /= len(lon_axis)\n zmv = cdms2.createVariable( zm, axes=[lat_axis], id=vid )\n return zmv",
"def reduce2scalar_zonal_old( mv, latmin=-90, latmax=90, vid=None ):\n # For now, I'm assuming that the only axes are time,lat,lon - so that zm is a scalar.\n # And I'm assuming equal spacing in lon (so all longitudes contribute equally to the average)\n # If they aren't, it's best to use area from cell_measures attribute if available; otherwise\n # compute it with lat_bnds, lon_bnds etc.\n if vid==None:\n vid = 'reduced_'+mv.id\n time,lat,lon = tllAxes(mv)\n if hasattr(mv.parent,'variables'):\n fil = mv.parent # mv is a fileVariable and fil is a file.\n lat_bnds = fil[lat.bounds]\n else:\n lataxis = latAxis(mv) # mv is a TransientVariable\n lat_bnds = lataxis._bounds_\n\n mvta = timeave_old( mv )\n\n # In computing the average, we use area weighting.\n # Sometimes the area is available in cell_measures, but for now I'll just use the backup method:\n # The area of a lonlat cell is R^2*delta(lon)*delta(sin(lat)).\n # With equally spaced lon, we don't need delta(lon) for weights.\n # I'll assume that lat,lon are in degrees, which is the only way I've ever seen them.\n wgtsum = 0\n zm = 0\n for i,lati in enumerate(lat):\n # The following test could be sped up a lot, because lat[i] is ordered...\n # >>> to do: partial overlaps\n if latmin<=lati and lati<latmax:\n latlo = lat_bnds[i,0]\n lathi = lat_bnds[i,1]\n wgti = sin(radians(lathi))-sin(radians(latlo))\n zi = 0.0\n for j in range(len(lon)):\n zi += mvta[i,j]\n zi *= wgti\n wgtsum += wgti*len(lon)\n zm += zi\n zm /= wgtsum\n # zm is a scalar, so createVariable gets no axes argument:\n zmv = cdms2.createVariable( zm, id=vid )\n return zmv",
"def reduce2scalar_zonal( mv, latmin=-90, latmax=90, vid=None ):\n if vid==None:\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n ilat = None\n for i,ax in enumerate(axes):\n if ax.id=='lat': ilat = i\n # reduce size of lat axis to (latmin,latmax)\n # Let's home a direct search will be fast enough:\n lataxis = latAxis( mv )\n lmin = -1\n lmax = len(lataxis)\n if lataxis[0]>=latmin: lmin = 0\n if lataxis[-1]<=latmax: lmax = len(lataxis)-1\n if lmin==-1 or lmax==len(lataxis):\n for l,ax in enumerate(lataxis):\n if lmin==-1 and ax>=latmin: lmin = max( 0, l )\n if lmax==len(lataxis) and ax>=latmax: lmax = min( l, len(lataxis) )\n lataxis_shrunk = lataxis.subaxis(lmin,lmax)\n mv2shape = list(mv.shape)\n mv2shape[ilat] = lmax-lmin+1\n axes[ilat] = lataxis_shrunk\n mvd1 = numpy.delete( mv, slice(0,lmin), ilat )\n mvdata = numpy.delete( mvd1, slice(lmax-lmin,len(lataxis)-lmin), ilat )\n mv2 = cdms2.createVariable( mvdata, axes=axes )\n\n axis_names = [ a.id for a in axes ]\n axes_string = '('+')('.join(axis_names)+')'\n avmv = averager( mv2, axis=axes_string )\n avmv.id = vid # Note that the averager function returns a variable with meaningless id.\n ammv.units = mv.units\n\n return avmv",
"def broadcast_lonlat(ds, verbose=True):\n if 'lon' not in ds.variables:\n ds.coords['lon'] = ds['x']\n if 'lat' not in ds.variables:\n ds.coords['lat'] = ds['y']\n \n if len(ds['lon'].dims) < 2:\n ds.coords[\"lon\"] = ds[\"lon\"] * xr.ones_like(ds[\"lat\"])\n if len(ds['lat'].dims) < 2:\n ds.coords[\"lat\"] = xr.ones_like(ds[\"lon\"]) * ds[\"lat\"]\n return ds",
"def interpolateseasons(self):\n\n remainder = self.season - self.startseason\n f1 = 1.0 - remainder\n self.data = (self.startdata * f1) + (self.stopdata * remainder)",
"def warren_time_series(lat, lon, dates, variable='snow_depth'):\n\n my_func = {'snow_depth': snow_depth,\n 'swe': swe}\n\n #if not all([isinstance(d, dt.datetime) for d in dates]):\n # print ('Expects datetime objects')\n\n # If lat, lon are vectors, generate 2d grids\n # Need to add code to make sure x and y are DataArrays\n if (count_dims(lat) == 1):\n x, y = np.meshgrid(lon, lat)\n else:\n x, y = lon, lat\n \n if dates.size == 1:\n cube = my_func[variable](x,y,dates.dt.month)\n da = xr.DataArray(cube,\n coords={'lat': x, 'lon': y},\n dims=['lat', 'lon'])\n else:\n cube = [my_func[variable](x, y, m) for m in dates.dt.month.values]\n da = xr.concat(cube, dim='time')\n da['time'] = dates\n\n return da",
"def switch_lons(ds, lon_name='lon'):\n ds = ds.copy()\n with dask.config.set(**{'array.slicing.split_large_chunks': True}):\n if np.any(ds.coords[lon_name] < 0): # if current coords are -180 to 180\n ds.coords[lon_name] = (ds.coords[lon_name] + 360) % 360\n else:\n ds.coords[lon_name] = (ds.coords[lon_name] + 180) % 360 - 180\n return ds.sortby(ds[lon_name])",
"def reduce2lat( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n axis_names = [ a.id for a in axes if a.id!='lat' ]\n axes_string = '('+')('.join(axis_names)+')'\n\n avmv = averager( mv, axis=axes_string )\n avmv.id = vid\n avmv.units = mv.units\n\n return avmv",
"def restrict_lat( mv, latmin, latmax ):\n if latmin==-90: latmin = -91 # just to make sure\n if latmax==90: latmax = 91\n\n # axes\n latax,idx = latAxis2(mv)\n if latax is None: return None\n imin = min( [i for i in range(len(latax)) if latax[i]>=latmin and latax[i]<=latmax ] )\n imax = max( [i for i in range(len(latax)) if latax[i]>=latmin and latax[i]<=latmax ] )\n newlatax = latax.subaxis( imin, imax+1 )\n # TO DO: use latax.bounds (if present) for newlatax.bounds\n # At the moment, I'm working with data for which latax.bounds doesn't exist.\n # At the moment, we don't need bounds. This would get us through if necessary:\n # newlatax.bounds = newlatax.genGenericBounds()\n newaxes = list( allAxes(mv) ) # shallow copy\n newaxes[idx] = newlatax\n\n # shrink the data to match the shrunk lat axis\n newmv_shape = list( mv.shape )\n newmv_shape[idx] = imax+1 - imin\n if imin>0:\n nd = numpy.delete( mv.data, slice(0,imin), idx ) # doesn't change mv\n else:\n nd = mv\n lenidx = nd.shape[idx]\n if lenidx > newmv_shape[idx]:\n newdata = numpy.delete( nd.data, slice(imax+1-imin,lenidx), idx )\n else:\n newdata = nd\n\n # new variable\n newmv = cdms2.createVariable( newdata, copy=True, axes=newaxes, id=mv.id )\n newmv.units = mv.units\n return newmv",
"def _fix_coords(x, y):\n if x.ndim != 1 or all(x < x[0]): # skip 2D arrays and monotonic backwards data\n return x, y\n lon1 = x[0]\n filter_ = x < lon1\n while filter_.sum():\n filter_ = x < lon1\n x[filter_] += 360\n return x, y",
"def normalize_longitude(lon: np.ndarray,\n lon_min: Optional[float] = -180.0) -> np.ndarray:\n return ((lon - lon_min) % 360) + lon_min",
"def msl_nn_srtm_interp(self, lon_lat):\n tiles = self.find_srtm_tiles(lon_lat)\n lon_lat_msl = np.zeros((lon_lat.shape[0], 3))\n lon_lat_msl[:, 0:2] = lon_lat\n for tile in set(tiles):\n otile = rasterio.open(tile, 'r')\n oimg = otile.read(1)\n idx = np.where(np.array(tiles) == tile)[0]\n pix = feat.geo_to_pix(\n otile.affine, lon_lat[idx, 0], lon_lat[idx, 1])\n pix = np.round(pix).astype(np.int)\n lon_lat_msl[idx, 2] = oimg[pix[:, 1], pix[:, 0]]\n otile.close()\n nan_mask = lon_lat_msl[:, 2] == -32768\n lon_lat_msl[nan_mask, 2] = np.NaN\n return lon_lat_msl",
"def replace_x_y_nominal_lat_lon(ds):\n ds = ds.copy()\n if 'x' in ds.dims and 'y' in ds.dims:\n \n nominal_y = ds.lat.mean('x')\n # extract the equatorial lat and take those lon values as nominal lon\n eq_ind = abs(ds.lat.mean('x')).load().argmin().data\n nominal_x = ds.lon.isel(y=eq_ind)\n ds.coords['x'].data = nominal_x.data\n ds.coords['y'].data = nominal_y.data\n\n ds = ds.sortby('x')\n ds = ds.sortby('y')\n \n else:\n warnings.warn('No x and y found in dimensions for source_id:%s. This likely means that you forgot to rename the dataset or this is the German unstructured model' %ds.attrs['source_id'])\n return ds",
"def shifts_projection(sc, clean):\n def shifts_projected(clean, axis):\n projected = clean.map(lambda x: x.mean(axis=axis)[:, :, np.newaxis])\n target = getTarget(projected, 30, 1)\n shifts = registerByPlane(sc, projected, target[:, :, np.newaxis], 10, False)\n return shifts[:, :, 0]\n\n # shifts_xy = shifts_projected(clean, 2)\n shifts_xz = shifts_projected(clean, 1)\n shifts_yz = shifts_projected(clean, 0)\n\n # x_shifts = np.mean(np.stack((shifts_xz[:, 0], shifts_xy[:, 0])), axis=0)\n z_shifts = np.mean(np.stack((shifts_xz[:, 1], shifts_yz[:, 1])), axis=0)\n # y_shifts = np.mean(np.stack((shifts_yz[:, 0], shifts_xy[:, 1])), axis=0)\n plt.figure()\n plt.plot(shifts_xz[:, 1])\n plt.plot(shifts_yz[:, 1])\n plt.plot(z_shifts)\n plt.title('Z')\n # plt.figure()\n # plt.plot(shifts_xz[:, 0])\n # plt.plot(shifts_xy[:, 0])\n # plt.plot(x_shifts)\n # plt.title('X')\n # plt.figure()\n # plt.plot(shifts_yz[:, 0])\n # plt.plot(shifts_xy[:, 1])\n # plt.plot(y_shifts)\n # plt.title('Y')\n # shifts_all = np.stack((x_shifts, y_shifts, z_shifts))\n\n def initReg(kv):\n from scipy.ndimage.interpolation import shift\n index, volume = kv\n current_shift = (0, 0, -1 * z_shifts[int(index[0])])\n shifted = shift(volume, current_shift)\n return shifted.astype(np.int16)\n\n reg = clean.map(initReg, with_keys=True, value_shape=clean.shape[1:], dtype=np.int16)\n reg.cache()\n reg.count()\n return reg",
"def proj(self,lon,lat):\n x, y = self(np.atleast_1d(lon),np.atleast_1d(lat))\n x[x > 1e29] = None\n y[y > 1e29] = None\n #return np.ma.array(x,mask=x>1e2),np.ma.array(y,mask=y>1e2)\n return x, y",
"def map_season(x, figsize=(8, 6), **kwargs):\n\n from pycmbs.mapping import map_plot\n\n nvals = len(x.data)\n if nvals == 12:\n year = True\n elif nvals == 4:\n year = False\n else:\n raise ValueError('Only data for 4-seasons or monthly data is supported!')\n\n #/// checks ///\n if x.data.ndim != 3:\n print x.data.ndim\n raise ValueError('only 3D data supported')\n\n if 'vmin' not in kwargs.keys():\n raise ValueError('vmin argument is obligatory for map_seasons()')\n if 'vmax' not in kwargs.keys():\n raise ValueError('vmax argument is obligatory for map_seasons()')\n\n if kwargs['vmin'] is None:\n raise ValueError('vmin MUST NOT be None!')\n if kwargs['vmax'] is None:\n raise ValueError('vmax MUST NOT be None!')\n\n #/// figure and axes\n if 'figure' in kwargs:\n f = kwargs['figure']\n else:\n f = plt.figure(figsize=figsize)\n\n if 'title' in kwargs:\n tit = kwargs.pop('title')\n else:\n tit = x.label\n\n if 'drawparallels' in kwargs:\n drawparallels = kwargs.pop('drawparallels')\n else:\n drawparallels = False\n\n if 'savefile' in kwargs:\n savefile = kwargs.pop('savefile')\n if '.nc' in savefile:\n savefile = savefile[:-3]\n else:\n savefile = None\n\n # plot\n if year:\n labels = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC']\n else:\n labels = ['DJF', 'MAM', 'JJA', 'SON']\n\n # check dates\n if year:\n mo = 1\n for t in x.time:\n if x.num2date(t).month != mo:\n print x.num2date(t), mo\n raise ValueError('Invalid monthly sequence! Can not plot results!')\n mo += 1\n\n #/// in case that an overlay is provided, this needs to be processed for each timestep individually\n if 'overlay' in kwargs.keys():\n overlays = kwargs.pop('overlay')\n else:\n overlays = None\n\n for i in range(nvals):\n if year:\n ax = f.add_subplot(4, 3, i + 1)\n #if i % 3 == 2:\n if i > 8:\n show_colorbar = True\n else:\n show_colorbar = False\n else:\n ax = f.add_subplot(2, 2, i + 1)\n if 'show_colorbar' in kwargs:\n show_colorbar = kwargs.pop('show_colorbar')\n else:\n show_colorbar = True\n\n d = x.copy()\n d.data = x.data[i, :, :]\n d.label = labels[i]\n\n if overlays is None:\n overlay = None\n else:\n overlay = overlays[i, :, :]\n\n if savefile is not None:\n tmpoutname = savefile + '_' + labels[i]\n else:\n tmpoutname = None\n\n map_plot(d, ax=ax, show_colorbar=show_colorbar, overlay=overlay,\n savefile=tmpoutname, colorbar_orientation='horizontal',\n drawparallels=drawparallels, **kwargs)\n del d\n f.suptitle(tit, size=16)\n return f",
"def wgs84_to_mercator(df, lon, lat):\n k = 6378137\n df[\"x\"] = df[lon] * (k * np.pi/180.0)\n df[\"y\"] = np.log(np.tan((90 + df[lat]) * np.pi/360.0)) * k\n return df",
"def safeProj(proj, lon, lat):\n x, y = proj(np.atleast_1d(lon),np.atleast_1d(lat))\n x[x > 1e29] = None\n y[y > 1e29] = None\n #return np.ma.array(x,mask=x>1e2),np.ma.array(y,mask=y>1e2)\n return x, y",
"def undo_mercator_project(x,y):\n lon = y*np.pi\n ex = np.exp(4*np.pi*x)\n lat = np.arcsin((ex - 1)/(ex +1 ))\n lon = lon*360/2/np.pi\n lat = lat*360 /2/np.pi\n return lon, lat",
"def manipulate_data(ds, var, predef_clim, predef_trnd, trn_yrs, all_yrs, \n apply_latw=True, apply_detrending=True, dropna=True):\n\n \n if((var=='SD')|(var=='sd')|(var=='snowc')): \n ds[var] = ds[var].where(ds[var]>=0, other=0.0)\n ds[var] = ds[var].where(ds[var]==0, other=1.0)\n #ds[var].values = Gauss_filter(ds[var].values, (0,3,3))\n \n \"\"\"\n if((var=='hgt')|(var=='z')|(var=='GPT')):\n months = ds.time.to_index().month; ssn_ends = (months==2)|(months==5)|(months==8)|(months==11)\n ds = ds.sel(time=ssn_ends)\n else: \n ds = ds.resample(time='3M').mean()\n \"\"\"\n \n ds = ds.resample(time='3M').mean()\n\n ds = ds.sel(time=slice(str(all_yrs[0])+'-01-01', str(all_yrs[-1])+'-12-31')) \n \n try: \n clim = predef_clim\n ds = ds.groupby('time.season') - clim\n print('Predefined climatology used')\n except:\n clim = ds.sel(time=slice(str(trn_yrs[0])+'-01-01', str(trn_yrs[-1])+'-12-31')).groupby('time.season').mean('time')\n ds = ds.groupby('time.season') - clim\n print('Climatology calculated from data')\n \n if(apply_latw): ds[var].values = lat_weighting(ds[var].values, \n ds.lat, ds.lon)\n if(dropna):\n ds = ds.stack(gridcell=('lat', 'lon')).dropna(dim='gridcell',how='any')\n else: \n ds = ds.stack(gridcell=('lat', 'lon')).fillna(0)\n \n \n trend_models = { }\n if(apply_detrending): \n ds = ds.load()\n for ssn in ('DJF', 'MAM', 'JJA', 'SON'):\n #ssn_idx = ds['time.season'] == ssn\n \n trn_idx = bool_index_to_int_index(np.isin(ds['time.season'], ssn) & np.isin(ds['time.year'], trn_yrs))\n all_idx = bool_index_to_int_index(np.isin(ds['time.season'], ssn) & np.isin(ds['time.year'], all_yrs))\n \n trn_x = np.array(ds.time[trn_idx].values.tolist()).reshape(-1,1)\n all_x = np.array(ds.time[all_idx].values.tolist()).reshape(-1,1)\n try:\n trend = predef_trnd[ssn].predict(all_x)\n trend_models[ssn] = predef_trnd[ssn]\n print('Predefined trend model used')\n except:\n #_, trend_model = define_trends(ds[var][trn_idx], trn_x)\n _, trend_model = define_trends(ds[var][all_idx], all_x)\n trend = trend_model.predict(all_x)\n trend_models[ssn] = trend_model\n print('Trends calculated from data')\n \n ds[var][all_idx] = ds[var][all_idx] - trend\n \n\n \n return ds, clim, trend_models",
"def seasonal_avg(var_nc,the_season,lat_slice=None,lon_slice=None): \n the_season=np.array(the_season,dtype=np.int32)\n if (lat_slice is None) and (lon_slice is None):\n num_lats=var_nc.shape[2]\n num_lons=var_nc.shape[3]\n lat_slice=slice(0,num_lats)\n lon_slice=slice(0,num_lons)\n else:\n if lat_slice.stop is None:\n num_lats=var_nc.shape[2]\n else:\n num_lats=lat_slice.stop - lat_slice.start\n if lon_slice.stop is None:\n num_lons=var_nc.shape[3]\n else:\n num_lons=lon_slice.stop - lon_slice.start\n num_levs=var_nc.shape[1]\n accumulate=ma.zeros([num_levs,num_lats,num_lons],dtype=var_nc.dtype)\n num_years=var_nc.shape[0]//12\n\n for the_year in np.arange(0,num_years):\n the_slice=var_nc[the_season,:,lat_slice,lon_slice]\n the_slice=ma.mean(the_slice,axis=0)\n accumulate+=the_slice\n the_season=the_season+12\n accumulate=accumulate/num_years \n the_avg=ma.mean(accumulate,axis=1)\n the_avg=ma.mean(the_avg,axis=1)\n return the_avg",
"def reduce2levlat( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n if levAxis(mv) is None: return None\n if latAxis(mv) is None: return None\n axes = allAxes( mv )\n timeax = timeAxis(mv)\n if timeax.getBounds()==None:\n timeax._bounds_ = timeax.genGenericBounds()\n axis_names = [ a.id for a in axes if a.id!='lev' and a.id!='lat' ]\n axes_string = '('+')('.join(axis_names)+')'\n\n avmv = averager( mv, axis=axes_string )\n avmv.id = vid\n avmv.units = mv.units\n\n return avmv",
"def satReader(directory,month,latmin,latmax,lonmin,lonmax):\n \n ### Enter filename\n filename = 'cs2icesat_regrid_mar_20042015.nc' \n \n ### Month/Years extracted\n dateyr = now.year \n datemo = datetime.date(dateyr,month+1,1).strftime('%B')\n \n ### Retrieve data\n data = Dataset(directory + filename)\n lat = data.variables['lat'][:]\n lon = data.variables['lon'][:]\n thkn = data.variables['thick'][:]\n data.close()\n \n ### Calculate lat/lon region\n xmask = (lat > latmin) & (lat < latmax)\n ymask = (lon > lonmin) & (lon < lonmax)\n \n mask = xmask[:] & ymask[:]\n latvals = np.where(mask == True)[0]\n lonvals = np.where(mask == True)[1]\n latvals = np.unique(latvals)\n lonvals = np.unique(lonvals)\n \n thk = thkn[:,latvals,:]\n thk = thk[:,:,lonvals]\n \n lat = lat[latvals,:]\n lat = lat[:,lonvals]\n lon = lon[latvals,:]\n lon = lon[:,lonvals]\n\n grid = '---> [[%s to %s N, %s to %s E]]' % (latmin,latmax,lonmin,lonmax)\n print 'Completed: Satellite data read (%s)!' % datemo, grid\n \n return lat,lon,thk",
"def extract_seasonal_component(original_ts, ppy):\n \"\"\"\n # === get in-sample data\n original_ts = original_ts[:-out_of_sample]\n \"\"\"\n if seasonality_test(original_ts, ppy):\n # print(\"seasonal\")\n # ==== get moving averages\n ma_ts = moving_averages(original_ts, ppy)\n\n # ==== get seasonality indices\n le_ts = original_ts * 100 / ma_ts\n le_ts = np.hstack((le_ts, np.full((ppy - (len(le_ts) % ppy)), np.nan)))\n le_ts = np.reshape(le_ts, (-1, ppy))\n si = np.nanmean(le_ts, 0)\n norm = np.sum(si) / (ppy * 100)\n si = si / norm\n else:\n # print(\"NOT seasonal\")\n si = np.full(ppy, 100)\n return si",
"def reverse(self, lon, lat):",
"def timeave_seasonal( mv, seasons=seasonsyr ):\n return seasons.climatology(mv)"
] | [
"0.7120988",
"0.65913016",
"0.5912345",
"0.58565855",
"0.5846651",
"0.5827005",
"0.5757517",
"0.5742788",
"0.56260955",
"0.5573889",
"0.5551024",
"0.5517785",
"0.5454073",
"0.5447219",
"0.5380435",
"0.5367206",
"0.53613245",
"0.5329884",
"0.52797866",
"0.527883",
"0.5270271",
"0.52385974",
"0.52133644",
"0.52067035",
"0.51973885",
"0.5185496",
"0.5161347",
"0.5072558",
"0.5060495",
"0.50463194"
] | 0.7579458 | 0 |
Input is a leveldependent variable mv and a level slev to select. slev is an instance of udunits thus it has a value and a units attribute. This function will create and return a new variable mvs without a level axis. The values of mvs correspond to the values of mv with level set to slev. Interpolation isn't done yet, but is planned ! | def select_lev( mv, slev ):
levax = levAxis(mv)
# Get ig, the first index for which levax[ig]>slev
# Assume that levax values are monotonic.
dummy,slev = reconcile_units( levax, slev ) # new slev has same units as levax
if levax[0]<=levax[-1]:
ids = numpy.where( levax[:]>=slev.value ) # assumes levax values are monotonic increasing
else:
ids = numpy.where( levax[:]<=slev.value ) # assumes levax values are monotonic decreasing
if ids is None or len(ids)==0:
ig = len(levax)-1
else:
ig = ids[0][0]
# Crude fist cut: don't interpolate, just return a value
if levax == mv.getAxisList()[0]:
mvs = cdms2.createVariable( mv[ig:ig+1,...], copy=1 ) # why ig:ig+1 rather than ig? bug workaround.
elif levax == mv.getAxisList()[1]:
mvs = cdms2.createVariable( mv[:,ig:ig+1,...], copy=1 )
else:
print "ERROR, select_lev() does not support level axis except as first or second dimentions"
return None
return mvs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def levvar( mv ):\n # First get the axis. This is probably not as general as we'll need...\n if mv is None: return None\n lev_axis = levAxis(mv)\n #levmv = mv.clone() # good if mv has only a lev axis\n #levmv[:] = lev_axis[:]\n levmv = cdms2.createVariable( lev_axis[:], axes=[lev_axis], id='lev',\n attributes={'units':lev_axis.units},\n copy=True )\n return levmv",
"def levvar_min( mv1, mv2 ):\n if mv1 is None: return None\n if mv2 is None: return None\n lev_axis1 = levAxis(mv1)\n lev_axis2 = levAxis(mv2)\n if len(lev_axis1)<=len(lev_axis2):\n lev_axis = lev_axis1\n mv = mv1\n else:\n lev_axis = lev_axis2\n mv = mv2\n levmv = cdms2.createVariable( lev_axis[:], axes=[lev_axis], id='levels',\n attributes={'units':lev_axis.units} )\n return levmv",
"def uvsChangeLevel(mot, oldlev, newlev):\n\n ####################\n # TODO - part 3\n diff = abs(oldlev - newlev)\n if oldlev > newlev:\n factor = diff * 2\n elif oldlev < newlev:\n factor = 1 / (diff * 2)\n else:\n factor = 1\n\n nmot = mot * factor\n ####################\n return nmot",
"def reduce2levlat( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n if levAxis(mv) is None: return None\n if latAxis(mv) is None: return None\n axes = allAxes( mv )\n timeax = timeAxis(mv)\n if timeax.getBounds()==None:\n timeax._bounds_ = timeax.genGenericBounds()\n axis_names = [ a.id for a in axes if a.id!='lev' and a.id!='lat' ]\n axes_string = '('+')('.join(axis_names)+')'\n\n avmv = averager( mv, axis=axes_string )\n avmv.id = vid\n avmv.units = mv.units\n\n return avmv",
"def scale_u_and_v(u, v, level, pyr):\n # TODO: Your code here\n image = pyr[level-1]\n expanded_u = ps4.expand_image(u)\n expanded_v = ps4.expand_image(v)\n scaled_u = expanded_u * 2\n scaled_v = expanded_v * 2\n if image.shape[0] == scaled_u.shape[0] - 1:\n scaled_u = scaled_u[:-1, :]\n if image.shape[1] == scaled_u.shape[1] - 1:\n scaled_u = scaled_u[:, :-1]\n if image.shape[0] == scaled_v.shape[0] - 1:\n scaled_v = scaled_v[:-1, :]\n if image.shape[1] == scaled_v.shape[1] - 1:\n scaled_v = scaled_v[:, :-1]\n return scaled_u, scaled_v",
"def heightvar( mv ):\n if mv is None: return None\n lev_axis = levAxis(mv)\n heights = 0.001 * press2alt.press2alt( pressures_in_mb(lev_axis) ) # 1000 m = 1 km\n heightmv = cdms2.createVariable( heights, axes=[lev_axis], id=mv.id,\n attributes={'units':\"km\"} )\n return heightmv",
"def _msqrd_v_l_l(s, t, model: SingleRhNeutrinoModel, ml: float):\n mx = model.mx\n u = 0.5 * np.tan(2 * model.theta)\n return (\n -2\n * u**2\n * GF**2\n * (\n 2 * ml**4 * (1 + 4 * SW**2 + 8 * SW**4)\n + 2 * ml**2 * (mx**2 - s - 2 * (1 + 4 * SW**2 + 8 * SW**4) * t)\n + (1 + 4 * SW**2 + 8 * SW**4)\n * (s**2 + 2 * s * t + 2 * t**2 - mx**2 * (s + 2 * t))\n )\n )",
"def reduce2levlat_seasonal( mv, seasons=seasonsyr, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n if levAxis(mv) is None: return None\n if latAxis(mv) is None: return None\n axes = allAxes( mv )\n timeax = timeAxis(mv)\n if timeax.getBounds()==None:\n timeax._bounds_ = timeax.genGenericBounds()\n\n if timeax.units=='months':\n # Special check necessary for LEGATES obs data, because\n # climatology() won't accept this incomplete specification\n timeax.units = 'months since 0001-01-01'\n mvseas = seasons.climatology(mv)\n\n axis_names = [ a.id for a in axes if a.id!='lev' and a.id!='lat' and a.id!='time']\n axes_string = '('+')('.join(axis_names)+')'\n\n if len(axes_string)>2:\n avmv = averager( mvseas, axis=axes_string )\n else:\n avmv = mvseas\n avmv.id = vid\n avmv = delete_singleton_axis( avmv, vid='time' )\n avmv.units = mv.units\n\n return avmv",
"def latvar( mv ):\n # First get the axis. This is probably not as general as we'll need...\n if mv is None: return None\n lat_axis = latAxis(mv)\n #latmv = mv.clone() # good if mv has only a lat axis\n #latmv[:] = lat_axis[:]\n latmv = cdms2.createVariable( lat_axis[:], axes=[lat_axis], id='lat',\n attributes={'units':lat_axis.units},\n copy=True )\n return latmv",
"def lonvar( mv ):\n # First get the axis. This is probably not as general as we'll need...\n if mv is None: return None\n lon_axis = lonAxis(mv)\n latmv = cdms2.createVariable( lon_axis[:], axes=[lon_axis], id='lon',\n attributes={'units':lon_axis.units},\n copy=True )\n return latmv",
"def test_set_uv(self):\n s = State(substance=\"water\")\n s.uv = Q_(1013250.0, \"J/kg\"), Q_(0.4772010021515822, \"m**3/kg\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(373.1242958476843, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.uv[0], Q_(1013250.0, \"J/kg\")) # type: ignore\n assert np.isclose(s.uv[1], Q_(0.4772010021515822, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.u, Q_(1013250.0, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(3028.9867985920914, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(0.4772010021515822, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(1061602.391543017, \"J/kg\")) # type: ignore\n assert np.isclose(s.x, Q_(0.28475636946248034, \"dimensionless\")) # type: ignore",
"def vol_from_var(self, level: float = 0.95, interpolation: str = 'lower') -> float:\n return float(-np.sqrt(self.periods_in_a_year) *\n self.var_down_func(level, interpolation=interpolation) / ss.norm.ppf(level))",
"def vol_from_var_func(self, level: float = 0.95, months_from_last: int = None,\n from_date: dt.date = None, to_date: dt.date = None, interpolation: str = 'lower',\n drift_adjust: bool = False, periods_in_a_year_fixed: int = None) -> float:\n earlier, later = self.calc_range(months_from_last, from_date, to_date)\n if periods_in_a_year_fixed:\n time_factor = periods_in_a_year_fixed\n else:\n fraction = (later - earlier).days / 365.25\n how_many = self.tsdf.loc[earlier:later].count(numeric_only=True)\n time_factor = how_many / fraction\n if drift_adjust:\n return float((-np.sqrt(time_factor) / ss.norm.ppf(level)) *\n (self.var_down_func(level, months_from_last, from_date, to_date, interpolation) -\n self.tsdf.loc[earlier:later].pct_change().sum() /\n len(self.tsdf.loc[earlier:later].pct_change())))\n else:\n return float(-np.sqrt(time_factor) *\n self.var_down_func(level, months_from_last, from_date, to_date,\n interpolation) / ss.norm.ppf(level))",
"def test_set_vu(self):\n s = State(substance=\"water\")\n s.vu = Q_(0.4772010021515822, \"m**3/kg\"), Q_(1013250.0, \"J/kg\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(373.1242958476843, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.vu[0], Q_(0.4772010021515822, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.vu[1], Q_(1013250.0, \"J/kg\")) # type: ignore\n assert np.isclose(s.u, Q_(1013250, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(3028.9867985920914, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(0.4772010021515822, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(1061602.391543017, \"J/kg\")) # type: ignore\n assert np.isclose(s.x, Q_(0.28475636946248034, \"dimensionless\")) # type: ignore",
"def test_set_vs(self):\n s = State(substance=\"water\")\n s.vs = Q_(0.4772010021515822, \"m**3/kg\"), Q_(3028.9867985920914, \"J/(kg*K)\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(373.1242958476843, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.vs[0], Q_(0.4772010021515822, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.vs[1], Q_(3028.9867985920914, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.u, Q_(1013250, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(3028.9867985920914, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(0.4772010021515822, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(1061602.391543017, \"J/kg\")) # type: ignore\n assert np.isclose(s.x, Q_(0.28475636946248034, \"dimensionless\")) # type: ignore",
"async def uv_level(self, uvi):\n\n if uvi is None:\n return \"no-data\"\n\n if uvi >= 10.5:\n return self._translations[\"uv\"][\"extreme\"]\n if uvi >= 7.5:\n return self._translations[\"uv\"][\"very-high\"]\n if uvi >= 5.5:\n return self._translations[\"uv\"][\"high\"]\n if uvi >= 2.5:\n return self._translations[\"uv\"][\"moderate\"]\n if uvi > 0:\n return self._translations[\"uv\"][\"low\"]\n \n return self._translations[\"uv\"][\"none\"]",
"def _newMV(self, newValue=None, *, dtype: np.dtype = None) -> 'MultiVector':\n if newValue is None and dtype is None:\n raise TypeError(\"Must specify either a type or value\")\n\n return self.__class__(self.layout, newValue, dtype=dtype)",
"def volume(mid, vols):\n bt = mid.ticks_per_beat\n trk = MidiTrack()\n trk.name = \"Volume variation\"\n trk.append(Message(\"control_change\",\n control=7,\n time=0,\n value=vols[0]))\n\n for i, vol in enumerate(vols):\n trk.append(Message(\"control_change\",\n control=7,\n time=bt,\n value=vol))\n\n mid.tracks.append(trk)\n return mid",
"def update_units(self):\n unit_var_value = self.view.vars['unit'].get()\n if unit_var_value == 'm3ph':\n self.minran_u_label.config(text='m³/h')\n self.maxran_u_label.config(text='m³/h')\n self.points_tview.heading('vflow', text='Przepływ [m³/h]', anchor=tk.CENTER)\n elif unit_var_value == 'lps':\n self.minran_u_label.config(text='l/s')\n self.maxran_u_label.config(text='l/s')\n self.points_tview.heading('vflow', text='Przepływ [l/s]', anchor=tk.CENTER)\n self.view.vars['pump_eff_min'].convert_unit(unit_var_value)\n self.view.vars['pump_eff_max'].convert_unit(unit_var_value)\n self.view.vars['pump_characteristic'].convert_unit(unit_var_value)",
"def rkm_MS_pathvar(models, s_span, X):\n W_dst_var=np.ndarray(models.shape[0],np.float64)\n for i in range(models.shape[0]):\n W = models[i,:,:]\n res = W[1:,:]-W[0:-1,:]\n W_dst=np.linalg.norm(res, axis=1)\n W_dst_var[i] = np.var(W_dst)\n\n return W_dst_var",
"def interp_vel(z, u, v, sva, zi, pPsva, return_diagnostics=False):\n\n svac, ir, ic, w1, w2 = interp_quantity(z, sva, zi, pPsva, True)\n\n um = u[ir, ic]\n vm = v[ir, ic]\n svam = sva[ir, ic]\n\n theta = np.arctan2(vm[:, 0] - vm[:, 1], um[:, 0] - um[:, 1])\n\n ur = np.empty_like(um)\n ur[:, 0] = um[:, 0]*np.cos(theta) + vm[:, 0]*np.sin(theta)\n ur[:, 1] = um[:, 1]*np.cos(theta) + vm[:, 1]*np.sin(theta)\n vr = -um[:, 0]*np.sin(theta) + vm[:, 0]*np.cos(theta)\n\n sc = (ur[:, 0]*(svac - svam[:, 1]) + ur[:, 1]*(svam[:, 0] - svac))\n sc /= (svam[:, 0] - svam[:, 1])\n\n uc = sc*np.cos(theta) - vr*np.sin(theta)\n vc = sc*np.sin(theta) + vr*np.cos(theta)\n\n if return_diagnostics:\n return uc, vc, ir, ic, w1, w2\n else:\n return uc, vc",
"def _msqrd_l_u_d(s, t, model: SingleRhNeutrinoModel, mu, md, ml, ckm):\n mx = model.mx\n u = 0.5 * np.tan(2 * model.theta)\n return (\n 16.0\n * GF**2\n * (ml**2 + mx**2 - s - t)\n * (s + t - mu**2 - md**2)\n * u**2\n * abs(ckm) ** 2\n )",
"def _v_to_lbs(slope,offset,V):\n return V # without slope and offset data, keep this a stub",
"def duv(self, u, v):\n return np.column_stack(\n [_.ev(u, v, dx=1, dy=1) for _ in (self.splz, self.sply, self.splx)])",
"def test_set_sv(self):\n s = State(substance=\"water\")\n s.sv = Q_(3028.9867985920914, \"J/(kg*K)\"), Q_(0.4772010021515822, \"m**3/kg\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(373.1242958476843, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.sv[0], Q_(3028.9867985920914, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.sv[1], Q_(0.4772010021515822, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.u, Q_(1013250.0, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(3028.9867985920914, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(0.4772010021515822, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(1061602.391543017, \"J/kg\")) # type: ignore\n assert np.isclose(s.x, Q_(0.28475636946248034, \"dimensionless\")) # type: ignore",
"def randomMV(\n layout, min=-2.0, max=2.0, grades=None, mvClass=MultiVector,\n uniform=None, n=1, normed=False):\n\n if n > 1:\n # return many multivectors\n return [randomMV(layout=layout, min=min, max=max, grades=grades,\n mvClass=mvClass, uniform=uniform, n=1,\n normed=normed) for k in range(n)]\n\n if uniform is None:\n uniform = np.random.uniform\n\n if grades is None:\n mv = mvClass(layout, uniform(min, max, (layout.gaDims,)))\n else:\n if isinstance(grades, int):\n grades = [grades]\n newValue = np.zeros((layout.gaDims,))\n for i in range(layout.gaDims):\n if layout.gradeList[i] in grades:\n newValue[i] = uniform(min, max)\n mv = mvClass(layout, newValue)\n\n if normed:\n mv = mv.normal()\n\n return mv",
"def voltmeter_settings(self, scalefactor, offset):\n if scalefactor is not None and offset is not None:\n if self._request('SM', str(scalefactor), str(offset))[0]:\n return scalefactor, offset\n else:\n done, data = self._request('GM')\n if done:\n return int(data[0]), int(data[1])\n\n raise EvseError",
"def modality(v):\n \n s = st.skew(vel, bias=False)\n k = st.kurtosis(vel, bias=False)\n m = (1+s**2)/(3+k**2)\n return s, k, m",
"def lonvar_min( mv1, mv2 ):\n if mv1 is None: return None\n if mv2 is None: return None\n lon_axis1 = lonAxis(mv1)\n lon_axis2 = lonAxis(mv2)\n if len(lon_axis1)<=len(lon_axis2):\n lon_axis = lon_axis1\n mv = mv1\n else:\n lon_axis = lon_axis2\n mv = mv2\n lonmv = cdms2.createVariable( lon_axis[:], axes=[lon_axis], id='lon',\n attributes={'units':lon_axis.units} )\n return lonmv",
"def import_velocity(self,fname,units='m'):\n velmod=pd.read_csv(fname,sep=' ',names=['depth','vp','vs','rho'])\n \n if units =='m':\n \n velmod.depth=velmod.depth/1000\n velmod.vp=velmod.vp/1000\n velmod.vs=velmod.vs/1000\n velmod.rho=velmod.rho/1000 \n \n self.velocity_model['depth']=velmod.depth.values\n self.velocity_model['vp']=velmod.vp.values\n self.velocity_model['vs']=velmod.vs.values\n self.velocity_model['rho']=velmod.rho.values"
] | [
"0.67565304",
"0.6339013",
"0.5813282",
"0.55218333",
"0.55123705",
"0.54566956",
"0.54150814",
"0.5345006",
"0.53043866",
"0.52986836",
"0.5298433",
"0.5282433",
"0.5152977",
"0.5144597",
"0.50879806",
"0.50478345",
"0.50315887",
"0.5008072",
"0.5002643",
"0.49997583",
"0.49930182",
"0.49737152",
"0.4970707",
"0.4952324",
"0.49502742",
"0.49151593",
"0.49150982",
"0.49031606",
"0.48963678",
"0.48896614"
] | 0.77553666 | 0 |
returns a transient variable which is dimensioned along the lat axis but whose values are the latitudes | def latvar( mv ):
# First get the axis. This is probably not as general as we'll need...
if mv is None: return None
lat_axis = latAxis(mv)
#latmv = mv.clone() # good if mv has only a lat axis
#latmv[:] = lat_axis[:]
latmv = cdms2.createVariable( lat_axis[:], axes=[lat_axis], id='lat',
attributes={'units':lat_axis.units},
copy=True )
return latmv | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def latlons(self):\n\t\t\n\t\t# First check we have a grid feature type\n\t\tif self.featuretype in ['Grid', 'GridSeries']:\n\n\t\t\tlatvar = self.latitude_variable\n\t\t\tlonvar = self.longitude_variable\n\n\t\t\tlatdims = self.coordinates_mapping['latitude']['map']\n\t\t\tlondims = self.coordinates_mapping['longitude']['map']\n\n\t\t\t# Create latitude and longitude subset slices from the field subset slices\n\t\t\tlat_subset = []\n\t\t\tfor dim in latdims:\n\t\t\t\tlat_subset.append(self._subset[dim])\n\t\t\t\n\t\t\tlon_subset = []\n\t\t\tfor dim in londims:\n\t\t\t\tlon_subset.append(self._subset[dim])\n\n\t\t\t# Then check if latitude and longitude variables are 1D\n\t\t\tif len(latvar.shape) == 1 and len(lonvar.shape) == 1:\n\t\t\t\tlatvar_2d = latvar[lat_subset].reshape((-1,1)).repeat(lonvar.shape[0], axis=1)\n\t\t\t\tlonvar_2d = lonvar[lon_subset].reshape((-1,1)).transpose().repeat(latvar.shape[0], axis=0)\n\t\t\t\treturn (latvar_2d, lonvar_2d)\n\t\t\t\n\t\t\t# for 2D variables its easy, just return the variable data\n\t\t\telif len(latvar.shape) >= 2 and len(lonvar.shape) >= 2:\n\t\t\t\t\n\t\t\t\t# Handle the WRF case where lat/lon variables are 3D with time as first dimension\n\t\t\t\tif len(latvar.shape) == 3 and len(lonvar.shape) == 3:\n\t\t\t\t\treturn (latvar[0,lat_subset], lonvar[0,lon_subset])\n\t\t\t\telse:\n\t\t\t\t\treturn (latvar[lat_subset], lonvar[lon_subset])\n\t\t\t\n\t\t\t# otherwise, we can't do it!\n\t\t\telse:\n\t\t\t\treturn (None, None)\n\t\t\n\t\telif self.featuretype == 'PointSeries':\n\t\t\treturn (self.latitude_variable[:], self.longitude_variable[:])",
"def lat_lons(self):",
"def vector(self):\n return np.array([self.lat, self.lng])",
"def spatial_var(map_):\n expx, expy = spatial_expval(map_)\n map_ = map_ / np.sum(map_)\n x, y = np.meshgrid(np.arange(map_.shape[1]), np.arange(map_.shape[0]))\n\n return np.sum(map_ * ((x - expx) ** 2 + (y - expy) ** 2))",
"def lonvar( mv ):\n # First get the axis. This is probably not as general as we'll need...\n if mv is None: return None\n lon_axis = lonAxis(mv)\n latmv = cdms2.createVariable( lon_axis[:], axes=[lon_axis], id='lon',\n attributes={'units':lon_axis.units},\n copy=True )\n return latmv",
"def get_latitude0(self):\n return self.B",
"def geo(self):\n return vec2geo_linear_signed(self)",
"def lat(self):\n return self['lat']",
"def latvar_min( mv1, mv2 ):\n if mv1 is None: return None\n if mv2 is None: return None\n lat_axis1 = latAxis(mv1)\n lat_axis2 = latAxis(mv2)\n if len(lat_axis1)<=len(lat_axis2):\n lat_axis = lat_axis1\n mv = mv1\n else:\n lat_axis = lat_axis2\n mv = mv2\n latmv = cdms2.createVariable( lat_axis[:], axes=[lat_axis], id='lat',\n attributes={'units':lat_axis.units} )\n return latmv",
"def get_lats(self, variables):\n\n return self._get_variable(variables, self.LATS_VARIABLE)",
"def spatial(self):",
"def lat(self):\n return self._grid.lat[self._lat_indices]",
"def _getlats(self):\n lats = 90. - np.degrees(self.zeros)\n return lats",
"def spatial(self):\n return self.spatial_x, self.spatial_y, self.spatial_data",
"def location(self):\n return np.array((self.latitude, self.longitude))",
"def location(self):\n return np.array((self.latitude, self.longitude))",
"def proj(self,lon,lat):\n x, y = self(np.atleast_1d(lon),np.atleast_1d(lat))\n x[x > 1e29] = None\n y[y > 1e29] = None\n #return np.ma.array(x,mask=x>1e2),np.ma.array(y,mask=y>1e2)\n return x, y",
"def latitude(self):\n return self.coordinates[0]",
"def spatial(self):\n return self._spatial",
"def latitude(self, lat):\n data = float(lat[1:])\n if lat[0] == \"N\":\n return data\n else:\n return -data",
"def project(self, (lng, lat)):\n x = lng * DEG_TO_RAD\n lat = max(min(MAX_LATITUDE, lat), -MAX_LATITUDE)\n y = lat * DEG_TO_RAD\n y = math.log(math.tan((math.pi / 4) + (y / 2)))\n return (x*EARTH_RADIUS, y*EARTH_RADIUS)",
"def ns(self):\n return self.lat",
"def map_to_latent_space(data_sample, model) -> np.ndarray: # [N x Z]\n\n latent_coords = []\n\n for batch in data_sample:\n # run encoder\n coords = model.encoder(batch)\n latent_coords.append(coords)\n\n # return latent (per jet?)\n return np.concatenate(latent_coords, axis=0)",
"def lat(self):\n return self._lat",
"def get_latitude(self):\n return self.B + self.dB",
"def get_ecmwf_lat_lon(nc_file):\n from netCDF4 import Dataset\n \n fh = Dataset(nc_file, mode='r')\n\n latitude_ecmwf = fh.variables['latitude_ecmwf'][:]\n longitude_ecmwf = fh.variables['longitude_ecmwf'][:]\n\n lonmesh_ecmwf,latmesh_ecmwf = np.meshgrid(longitude_ecmwf,latitude_ecmwf)\n\n print('latitude_ecmwf: ', latitude_ecmwf.shape)\n print('longitude_ecmwf: ', longitude_ecmwf.shape)\n \n return latitude_ecmwf, longitude_ecmwf, latmesh_ecmwf, lonmesh_ecmwf;",
"def spatialData(self):\n return self.__spatial_data",
"def broadcast_lonlat(ds, verbose=True):\n if 'lon' not in ds.variables:\n ds.coords['lon'] = ds['x']\n if 'lat' not in ds.variables:\n ds.coords['lat'] = ds['y']\n \n if len(ds['lon'].dims) < 2:\n ds.coords[\"lon\"] = ds[\"lon\"] * xr.ones_like(ds[\"lat\"])\n if len(ds['lat'].dims) < 2:\n ds.coords[\"lat\"] = xr.ones_like(ds[\"lon\"]) * ds[\"lat\"]\n return ds",
"def effective_latitude_xr(self):\n\n grid_areas_ddf = self.grid_area_xr.to_dataframe().reset_index()\n grid_areas_ddf = grid_areas_ddf[\n ['temp_bucket', 'cdf_eff_lat_deg', 'time']\n ]\n\n merge_ddf = (\n self.data_array_dask_df\n .reset_index(drop=True)\n #.repartition(npartitions=100)\n .merge(grid_areas_ddf,\n on=['time', 'temp_bucket'],\n how='left')\n )\n\n eff_lat_xr = self.dask_data_to_xarray(merge_ddf,\n var='cdf_eff_lat_deg')\n\n eff_lat_xr.name = 'effective_latitude'\n\n return eff_lat_xr",
"def __init__(self, lat, long):\n\n self.lat = float(lat)\n self.long = float(long)\n self.cartesian = None"
] | [
"0.67513454",
"0.66928744",
"0.6395381",
"0.63260347",
"0.61951125",
"0.61491525",
"0.613318",
"0.6064862",
"0.6063187",
"0.6053654",
"0.6050005",
"0.6028488",
"0.5952476",
"0.58669955",
"0.5862181",
"0.5862181",
"0.58513904",
"0.58510435",
"0.5826054",
"0.58009094",
"0.58002454",
"0.57809854",
"0.57540536",
"0.5703555",
"0.56965405",
"0.56725276",
"0.5664976",
"0.563764",
"0.56285477",
"0.56011355"
] | 0.74743557 | 0 |
returns a transient variable which is dimensioned along the lon axis but whose values are the longitudes | def lonvar( mv ):
# First get the axis. This is probably not as general as we'll need...
if mv is None: return None
lon_axis = lonAxis(mv)
latmv = cdms2.createVariable( lon_axis[:], axes=[lon_axis], id='lon',
attributes={'units':lon_axis.units},
copy=True )
return latmv | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def latvar( mv ):\n # First get the axis. This is probably not as general as we'll need...\n if mv is None: return None\n lat_axis = latAxis(mv)\n #latmv = mv.clone() # good if mv has only a lat axis\n #latmv[:] = lat_axis[:]\n latmv = cdms2.createVariable( lat_axis[:], axes=[lat_axis], id='lat',\n attributes={'units':lat_axis.units},\n copy=True )\n return latmv",
"def lat_lons(self):",
"def lon(self):\n return self['lon']",
"def find_longitude_var(nc,name):\n var_obj = nc.variables[name] # Find the corresponding variable object \n dimens = var_obj.dimensions # Find the names of the dimensions of variable\n for i in range(len(dimens)):\n # For each dimension find the corresponding variable\n var_dim = nc.variables[dimens[i]]\n if is_longitude_var(var_dim) == True:\n return var_obj # If longitude exists, return the variable object\n \n return None",
"def latlons(self):\n\t\t\n\t\t# First check we have a grid feature type\n\t\tif self.featuretype in ['Grid', 'GridSeries']:\n\n\t\t\tlatvar = self.latitude_variable\n\t\t\tlonvar = self.longitude_variable\n\n\t\t\tlatdims = self.coordinates_mapping['latitude']['map']\n\t\t\tlondims = self.coordinates_mapping['longitude']['map']\n\n\t\t\t# Create latitude and longitude subset slices from the field subset slices\n\t\t\tlat_subset = []\n\t\t\tfor dim in latdims:\n\t\t\t\tlat_subset.append(self._subset[dim])\n\t\t\t\n\t\t\tlon_subset = []\n\t\t\tfor dim in londims:\n\t\t\t\tlon_subset.append(self._subset[dim])\n\n\t\t\t# Then check if latitude and longitude variables are 1D\n\t\t\tif len(latvar.shape) == 1 and len(lonvar.shape) == 1:\n\t\t\t\tlatvar_2d = latvar[lat_subset].reshape((-1,1)).repeat(lonvar.shape[0], axis=1)\n\t\t\t\tlonvar_2d = lonvar[lon_subset].reshape((-1,1)).transpose().repeat(latvar.shape[0], axis=0)\n\t\t\t\treturn (latvar_2d, lonvar_2d)\n\t\t\t\n\t\t\t# for 2D variables its easy, just return the variable data\n\t\t\telif len(latvar.shape) >= 2 and len(lonvar.shape) >= 2:\n\t\t\t\t\n\t\t\t\t# Handle the WRF case where lat/lon variables are 3D with time as first dimension\n\t\t\t\tif len(latvar.shape) == 3 and len(lonvar.shape) == 3:\n\t\t\t\t\treturn (latvar[0,lat_subset], lonvar[0,lon_subset])\n\t\t\t\telse:\n\t\t\t\t\treturn (latvar[lat_subset], lonvar[lon_subset])\n\t\t\t\n\t\t\t# otherwise, we can't do it!\n\t\t\telse:\n\t\t\t\treturn (None, None)\n\t\t\n\t\telif self.featuretype == 'PointSeries':\n\t\t\treturn (self.latitude_variable[:], self.longitude_variable[:])",
"def lon(self):\n return self._lon",
"def proj(self,lon,lat):\n x, y = self(np.atleast_1d(lon),np.atleast_1d(lat))\n x[x > 1e29] = None\n y[y > 1e29] = None\n #return np.ma.array(x,mask=x>1e2),np.ma.array(y,mask=y>1e2)\n return x, y",
"def spatial_var(map_):\n expx, expy = spatial_expval(map_)\n map_ = map_ / np.sum(map_)\n x, y = np.meshgrid(np.arange(map_.shape[1]), np.arange(map_.shape[0]))\n\n return np.sum(map_ * ((x - expx) ** 2 + (y - expy) ** 2))",
"def readDataLatLon(self, varName, lat, lon, **kwargs):\n # TODO: interpolate between lat and lon if necessary\n \n retList = []\n \n records = self._readRecords(varName, **kwargs)\n\n # get the index of the (lat,lon) pair\n lats, lons = records[0].latlons() \n latIdx, lonIdx = self._getIndex(lats, lons, lat, lon)\n\n for record in records:\n # We only care about the pressure levels\n if record.typeOfLevel == 'isobaricInhPa':\n # Append this value as a 2D 1x1 grid, NOTE: right now, just a value\n retList.append((record.level, record.values[latIdx][lonIdx]))\n\n retList.sort()\n\n presVec = np.ndarray((len(retList)))\n dataVec = np.ndarray((len(retList)))\n for i in range(len(retList)):\n presVec[i] = retList[i][0]\n dataVec[i] = retList[i][1]\n\n return presVec, dataVec",
"def get_ecmwf_lat_lon(nc_file):\n from netCDF4 import Dataset\n \n fh = Dataset(nc_file, mode='r')\n\n latitude_ecmwf = fh.variables['latitude_ecmwf'][:]\n longitude_ecmwf = fh.variables['longitude_ecmwf'][:]\n\n lonmesh_ecmwf,latmesh_ecmwf = np.meshgrid(longitude_ecmwf,latitude_ecmwf)\n\n print('latitude_ecmwf: ', latitude_ecmwf.shape)\n print('longitude_ecmwf: ', longitude_ecmwf.shape)\n \n return latitude_ecmwf, longitude_ecmwf, latmesh_ecmwf, lonmesh_ecmwf;",
"def _getlons(self):\n dlon = 360. / self.nlon\n lons = np.linspace(0. + dlon / 2., 360. - dlon / 2., self.nlon)\n return lons",
"def _getXYZ ( lon, lat ):\n d2r = pi / 180.\n rlon, rlat = ( d2r * lon, d2r * lat )\n x = cos(rlat) * cos(rlon)\n y = cos(rlat) * sin(rlon)\n z = sin(rlat)\n return (x,y,z)",
"def vector(self):\n return np.array([self.lat, self.lng])",
"def load_2D_netCDF(filename, var_name, lat_name, lon_name):\n data = Dataset(filename, 'r')\n var = data[var_name][:]\n lats = data[lat_name][:]\n lons = data[lon_name][:]\n data.close()\n return var, lats, lons",
"def get_latlon():\n\t\n iss.compute() # Get the lat/long values from ephem\n long_value = [float(i) for i in str(iss.sublong).split(\":\")]\n if long_value[0] < 0:\n long_value[0] = abs(long_value[0])\n cam.exif_tags['GPS.GPSLongitudeRef'] = \"W\"\n else:\n cam.exif_tags['GPS.GPSLongitudeRef'] = \"E\"\n cam.exif_tags['GPS.GPSLongitude'] = '%d/1,%d/1,%d/10' % (long_value[0], long_value[1], long_value[2]*10)\n lat_value = [float(i) for i in str(iss.sublat).split(\":\")]\n if lat_value[0] < 0:\n lat_value[0] = abs(lat_value[0])\n cam.exif_tags['GPS.GPSLatitudeRef'] = \"S\"\n else:\n cam.exif_tags['GPS.GPSLatitudeRef'] = \"N\"\n cam.exif_tags['GPS.GPSLatitude'] = '%d/1,%d/1,%d/10' % (lat_value[0], lat_value[1], lat_value[2]*10)\n return (iss.sublat / degree, iss.sublong / degree)",
"def longitude(self, lon):\n data = float(lon[1:])\n if lon[0] == \"E\":\n return data\n else:\n return -data",
"def merc_x(lon):\n r_major = 6378137.000\n x = r_major * math.radians(lon)\n return x",
"def getlatlon(self):\n lat = np.pi/2.0 - self._th\n time = self.gettime()\n lon = self._phi - 2*np.pi*time/86164.09164\n return lat, lon",
"def project(self, (lng, lat)):\n x = lng * DEG_TO_RAD\n lat = max(min(MAX_LATITUDE, lat), -MAX_LATITUDE)\n y = lat * DEG_TO_RAD\n y = math.log(math.tan((math.pi / 4) + (y / 2)))\n return (x*EARTH_RADIUS, y*EARTH_RADIUS)",
"def northing(self):\r\n x, y = self.lonlat2xy(self.longitude, self.latitude)\r\n return y",
"def lons(self):\n return self._origin.lon",
"def lons(self):\n return self._origin.lon",
"def lonvar_min( mv1, mv2 ):\n if mv1 is None: return None\n if mv2 is None: return None\n lon_axis1 = lonAxis(mv1)\n lon_axis2 = lonAxis(mv2)\n if len(lon_axis1)<=len(lon_axis2):\n lon_axis = lon_axis1\n mv = mv1\n else:\n lon_axis = lon_axis2\n mv = mv2\n lonmv = cdms2.createVariable( lon_axis[:], axes=[lon_axis], id='lon',\n attributes={'units':lon_axis.units} )\n return lonmv",
"def broadcast_lonlat(ds, verbose=True):\n if 'lon' not in ds.variables:\n ds.coords['lon'] = ds['x']\n if 'lat' not in ds.variables:\n ds.coords['lat'] = ds['y']\n \n if len(ds['lon'].dims) < 2:\n ds.coords[\"lon\"] = ds[\"lon\"] * xr.ones_like(ds[\"lat\"])\n if len(ds['lat'].dims) < 2:\n ds.coords[\"lat\"] = xr.ones_like(ds[\"lon\"]) * ds[\"lat\"]\n return ds",
"def solar_longitude(cls, tee):\n return cls.true_position(tee, cls.SIDEREAL_YEAR, 14/360, cls.ANOMALISTIC_YEAR, 1/42)",
"def get_xyz_coord(path):\r\n\tlabels = loadmat(path)\r\n\tanno_xyz = []\r\n\tfor index in range(0, 1500):\r\n\t\tanno_xyz.append([])\r\n\t\tfor i in range(0, 21):\r\n\t\t\tx = labels['handPara'][0][i][index]\r\n\t\t\ty = labels['handPara'][1][i][index]\r\n\t\t\tz = labels['handPara'][2][i][index]\r\n\t\t\tanno_xyz[-1].append([x, y, z])\r\n\tanno_xyz = np.array(anno_xyz)\r\n\t# anno_xyz = np.reshape(labels['handPara'], (1500, 21, 3))\r\n\treturn anno_xyz",
"def nancay():\n return coord.EarthLocation(lat=47.376511*u.deg, lon=2.1924002*u.deg)",
"def lat_lon(self):\n if self._lat_lon is None:\n if 'coordinates' in self:\n self._lat_lon = self.coordinates\n else:\n self._lat_lon = self.meta\n lat_lon_cols = ['latitude', 'longitude']\n for c in self.meta.columns:\n if c.lower().startswith('lat'):\n lat_lon_cols[0] = c\n elif c.lower().startswith('lon'):\n lat_lon_cols[1] = c\n\n self._lat_lon = self._lat_lon[lat_lon_cols].values\n\n return self._lat_lon",
"def geo_topo_vector(longitude, latitude, elevation, jd):\n from astropy.coordinates import GCRS, EarthLocation\n from astropy.time import Time\n import numpy as np\n\n loc = EarthLocation(longitude, latitude, elevation)\n\n time = Time(jd, scale=\"utc\", format=\"jd\")\n itrs = loc.get_itrs(obstime=time)\n gcrs = itrs.transform_to(GCRS(obstime=time))\n\n r = gcrs.cartesian\n\n # convert from m to km\n x = r.x.value / 1000.0\n y = r.y.value / 1000.0\n z = r.z.value / 1000.0\n\n return np.array([x, y, z])",
"def get_map_values(self, lons, lats, ibin=None):\n theta = np.pi / 2. - np.radians(lats)\n phi = np.radians(lons)\n\n pix = hp.ang2pix(self.hpx.nside, theta, phi, nest=self.hpx.nest)\n\n if self.data.ndim == 2:\n return self.data[:, pix] if ibin is None else self.data[ibin, pix]\n else:\n return self.data[pix]"
] | [
"0.67902434",
"0.6771255",
"0.67498714",
"0.6621353",
"0.6568897",
"0.6417901",
"0.617368",
"0.61442626",
"0.6087874",
"0.60350233",
"0.5941551",
"0.59397936",
"0.5933728",
"0.5912952",
"0.5898972",
"0.58715075",
"0.5850327",
"0.57997036",
"0.579611",
"0.5794126",
"0.57769763",
"0.57769763",
"0.5775408",
"0.5766215",
"0.57597625",
"0.57468134",
"0.57133734",
"0.5712144",
"0.5705799",
"0.5692999"
] | 0.7568185 | 0 |
returns a transient variable which is dimensioned along the lev (level) axis but whose values are the levels | def levvar( mv ):
# First get the axis. This is probably not as general as we'll need...
if mv is None: return None
lev_axis = levAxis(mv)
#levmv = mv.clone() # good if mv has only a lev axis
#levmv[:] = lev_axis[:]
levmv = cdms2.createVariable( lev_axis[:], axes=[lev_axis], id='lev',
attributes={'units':lev_axis.units},
copy=True )
return levmv | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def select_lev( mv, slev ):\n levax = levAxis(mv)\n # Get ig, the first index for which levax[ig]>slev\n # Assume that levax values are monotonic.\n dummy,slev = reconcile_units( levax, slev ) # new slev has same units as levax\n if levax[0]<=levax[-1]:\n ids = numpy.where( levax[:]>=slev.value ) # assumes levax values are monotonic increasing\n else:\n ids = numpy.where( levax[:]<=slev.value ) # assumes levax values are monotonic decreasing\n if ids is None or len(ids)==0:\n ig = len(levax)-1\n else:\n ig = ids[0][0]\n # Crude fist cut: don't interpolate, just return a value\n if levax == mv.getAxisList()[0]:\n mvs = cdms2.createVariable( mv[ig:ig+1,...], copy=1 ) # why ig:ig+1 rather than ig? bug workaround.\n elif levax == mv.getAxisList()[1]:\n mvs = cdms2.createVariable( mv[:,ig:ig+1,...], copy=1 )\n else:\n print \"ERROR, select_lev() does not support level axis except as first or second dimentions\"\n return None\n return mvs",
"def _get_level_values(self, level: int, unique: bool = False) -> Index:\n lev = self.levels[level]\n level_codes = self.codes[level]\n name = self._names[level]\n if unique:\n level_codes = algos.unique(level_codes)\n filled = algos.take_nd(lev._values, level_codes, fill_value=lev._na_value)\n return lev._shallow_copy(filled, name=name)",
"def levvar_min( mv1, mv2 ):\n if mv1 is None: return None\n if mv2 is None: return None\n lev_axis1 = levAxis(mv1)\n lev_axis2 = levAxis(mv2)\n if len(lev_axis1)<=len(lev_axis2):\n lev_axis = lev_axis1\n mv = mv1\n else:\n lev_axis = lev_axis2\n mv = mv2\n levmv = cdms2.createVariable( lev_axis[:], axes=[lev_axis], id='levels',\n attributes={'units':lev_axis.units} )\n return levmv",
"def var(self,i): # TODO: change to property to access (read only?) X?\n return Var(i,self.dims[i])",
"def levels(self):\n return np.array(self._levels()).T",
"def levshape(self) -> Shape:\n return tuple(len(x) for x in self.levels)",
"def latvar( mv ):\n # First get the axis. This is probably not as general as we'll need...\n if mv is None: return None\n lat_axis = latAxis(mv)\n #latmv = mv.clone() # good if mv has only a lat axis\n #latmv[:] = lat_axis[:]\n latmv = cdms2.createVariable( lat_axis[:], axes=[lat_axis], id='lat',\n attributes={'units':lat_axis.units},\n copy=True )\n return latmv",
"def getLevel(unique_name):",
"def getLevels():",
"def get_var_level_index(sample_size, threshold):\n if sample_size <= 0:\n raise ValueError(\"Sample size cannot be non-positive:\", sample_size)\n if threshold <= 0.0:\n return 0\n if threshold >= 1.0:\n return sample_size - 1\n return int(math.floor(sample_size * threshold))",
"def enforce(self, wave, variables, parameters):\n return np.hstack([variables[0] - self.level])",
"def est_maxlevel(dims,bandwidth):\n lev = math.floor((math.log(min(dims))/math.log(2)-2)/bandwidth)\n lev=int(lev)\n return lev",
"def var(self, name):\n return self.get_ground_vector('!Var:{}'.format(name))",
"def var(self, name):\n return self.get_ground_vector('!Var:{}'.format(name))",
"def var(self, name):\n return self.get_ground_vector('!Var:{}'.format(name))",
"def map_lvar(self, *args):\n return _ida_hexrays.vdui_t_map_lvar(self, *args)",
"def __getitem__(self, *args):\n return _ida_hexrays.qvector_lvar_t___getitem__(self, *args)",
"def add_extra_level(self, variable, xlevel):\n \n if variable not in [\"geopotential\", \"temperature\"]:\n raise Exception(\"variable should be one of [geopotential,temperature]\")\n \n if variable == \"geopotential\":\n # geopotential \n A = self.z.z[:, -1, :, :].to_dataset() # copy lowest pressure level\n A[\"level\"] = xlevel\n self.z = (xarray.concat([self.z, A], dim=\"level\"))\n \n # convert pressure to geopotential\n self.z.z[0, -1, :, :] = pres2alt(xlevel * 100) * g\n \n else: \n # temperature\n A = self.t.t[:, -1, :, :].to_dataset() # copy lowest pressure level\n A[\"level\"] = xlevel\n self.t = (xarray.concat([self.t, A], dim=\"level\"))",
"def mult_var_by_prior(self, x_scaled):\n model_var = self.likelihood(x_scaled)[1]\n tensor_log_prior = self.log_prior(x_scaled)\n return tf.reshape(model_var, shape=tensor_log_prior.shape) * tf.math.exp(tensor_log_prior)",
"def vars(self):\n return [Var(i,self.dims[i]) for i in range(self.nvar)] # TODO: use stored state info (=1 sometimes)",
"def level(self):\n return self.init_v[2]",
"def vol_from_var(self, level: float = 0.95, interpolation: str = 'lower') -> float:\n return float(-np.sqrt(self.periods_in_a_year) *\n self.var_down_func(level, interpolation=interpolation) / ss.norm.ppf(level))",
"def variance_kl(var: base.Array, pred_log_var: base.Array) -> base.Array:\n log_var = jnp.log(var)\n pred_var = jnp.exp(pred_log_var)\n return 0.5 * (pred_log_var - log_var + var / pred_var - 1)",
"def GetLinearDimension(dimension):\r\n pass",
"def get_levels(self):\n return self.levels[self.game]",
"def get_ground_vector(self, label):\n\n if ':' in label:\n # This is an identifier\n\n parent, _, specifier = label.rpartition(':')\n top, _, _ = parent.partition(':')\n parentvec = self.get_ground_vector(parent)\n\n rs = np.random.RandomState(\n zlib.adler32(\n (str(self.hrr_size)+label).encode('utf-8')\n ) & 0xffffffff)\n rs.randint(2)\n\n specifier_vec = normalize(\n self.specifier_variances['var_{}_{}'.format(top, parent.count(':'))] *\n torch.tensor(rs.standard_normal(self.hrr_size)).float())\n\n newvec = normalize(\n self.ground_vec_merge_ratios['ground_{}_{}'.format(top, parent.count(':'))] @\n torch.cat([\n parentvec,\n specifier_vec,\n ]).reshape(-1, self.hrr_size))\n\n return newvec\n else:\n # Top level terms are fixed encodings\n\n return normalize(self.fixed_encodings[label])",
"def get_ground_vector(self, label):\n\n if ':' in label:\n # This is an identifier\n\n parent, _, specifier = label.rpartition(':')\n top, _, _ = parent.partition(':')\n parentvec = self.get_ground_vector(parent)\n\n rs = np.random.RandomState(\n zlib.adler32(\n (str(self.hrr_size)+label).encode('utf-8')\n ) & 0xffffffff)\n rs.randint(2)\n\n specifier_vec = normalize_comp(\n self.specifier_variances['var_{}_{}'.format(top, parent.count(':'))] *\n torch.tensor(rs.standard_normal((2, self.hrr_size))).float())\n\n newvec = normalize_comp(\n self.ground_vec_merge_ratios['ground_{}_{}'.format(top, parent.count(':'))] @\n torch.cat([\n parentvec,\n specifier_vec,\n ]).reshape(-1, 2 * self.hrr_size)).reshape(2, self.hrr_size)\n\n return newvec\n else:\n # Top level terms are fixed encodings\n\n return normalize_comp(self.fixed_encodings[label])",
"def lonvar( mv ):\n # First get the axis. This is probably not as general as we'll need...\n if mv is None: return None\n lon_axis = lonAxis(mv)\n latmv = cdms2.createVariable( lon_axis[:], axes=[lon_axis], id='lon',\n attributes={'units':lon_axis.units},\n copy=True )\n return latmv",
"def lvec(self):\n lv = ROOT.TLorentzVector()\n# if self.pt < 0 or abs(self.eta) > 6:\n# raise Exception(\"Invalid values for TLorentzVector\")\n lv.SetPtEtaPhiM(self.pt, self.eta, self.phi, self.mass)\n# if abs(lv.Pt()) > 100000 or abs(lv.Eta()) > 100000:\n# raise Exception(\"Invalid values for TLorentzVector\")\n return lv",
"def at(self, *args):\n return _ida_hexrays.qvector_lvar_t_at(self, *args)"
] | [
"0.61772287",
"0.60954696",
"0.6019012",
"0.5921437",
"0.57173896",
"0.56935555",
"0.55191696",
"0.55090266",
"0.5501754",
"0.5488749",
"0.5447241",
"0.5427211",
"0.54218185",
"0.54218185",
"0.54218185",
"0.5372012",
"0.5331837",
"0.52983266",
"0.52873015",
"0.5285758",
"0.5281006",
"0.52809644",
"0.52152973",
"0.5198821",
"0.5191172",
"0.51868975",
"0.51668894",
"0.51548123",
"0.5153946",
"0.5131576"
] | 0.7218306 | 0 |
From a variable or axis of pressures, this function converts to millibars, and returns the result as a numpy array. | def pressures_in_mb( pressures ):
if not hasattr( pressures, 'units' ): return None
if pressures.units=='mb':
pressures.units = 'mbar' # udunits uses mb for something else
return pressures[:]
tmp = udunits(1.0,pressures.units)
s,i = tmp.how('mbar')
pressmb = s*pressures[:] + i
return pressmb | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def scale_mag_1(x):\n return np.array([np.true_divide(ui, mag(x)) for ui in x])",
"def cm2inch(x: Union[float, Sequence[float], NDArray]) -> Sequence[float]:\n return list(np.array(x) / 2.54)",
"def convertUnits(self, varname, arr):\n if varname == \"SPDQ\" or varname == \"PHQ\":\n return arr*2.5e6/1000.\n return arr",
"def _convert_bar_width(x, width=1, ncols=1):\n # WARNING: This will fail for non-numeric non-datetime64 singleton\n # datatypes but this is good enough for vast majority of cases.\n x_test = np.atleast_1d(_to_ndarray(x))\n if len(x_test) >= 2:\n x_step = x_test[1:] - x_test[:-1]\n x_step = np.concatenate((x_step, x_step[-1:]))\n elif x_test.dtype == np.datetime64:\n x_step = np.timedelta64(1, 'D')\n else:\n x_step = np.array(0.5)\n if np.issubdtype(x_test.dtype, np.datetime64):\n # Avoid integer timedelta truncation\n x_step = x_step.astype('timedelta64[ns]')\n return width * x_step / ncols",
"def signal_xs(mass, width_frac):\n width = mass*width_frac/100.\n return ROOT.getHiggsXS(mass, width)",
"async def infer_shape_scalar_to_array(track, x):\n return ()",
"def fluxes_to_labels(fluxes: np.ndarray) -> np.ndarray:\n return ((1 - fluxes) // 2).astype(np.int8)",
"def convert_wave_to_units(self, wave):\n return [self.convert_point_to_units(i) for i in wave]",
"def load_nifty_volume_as_array(filename):\n img = sitk.ReadImage(filename)\n img_arr = sitk.GetArrayFromImage(img)\n return img_arr",
"def rescale_toa(arr, dtype=np.float32):\n # First look at raw value dists along bands\n\n arr_trans = np.subtract(arr, arr.min(axis=(1, 2))[:, np.newaxis, np.newaxis])\n arr_rs = np.divide(arr_trans, arr_trans.max(axis=(1, 2))[:, np.newaxis, np.newaxis])\n if dtype == np.uint8:\n arr_rs = np.array(arr_rs*255, dtype=np.uint8)\n return arr_rs",
"def get_time_series_1d(data, bin_width):\n return np.array([x for x in zip(*(data[i:] for i in range(bin_width)))])",
"def _make_array(x):\n try:\n x = np.asfarray(x).squeeze()\n except ValueError:\n pass\n return x",
"def to_volume(self, verbose=True):\n images = self.load_all_dicom_images(verbose=verbose)\n\n volume = np.stack(\n [\n x.pixel_array * x.RescaleSlope + x.RescaleIntercept\n for x in images\n ],\n axis=-1,\n ).astype(np.int16)\n return volume",
"def convert_units(array, in_units=\"None\"):\n if in_units == \"None\" or in_units is None:\n return array\n else:\n raise Exception(\"Unsupported unit for bispectrum descriptors.\")",
"def scalebox(self, b):\n return [int(b[0]*self.video_w/self.detection_image_size[0]),\n int(b[1]*self.video_h/self.detection_image_size[1]),\n int(b[2]*self.video_w/self.detection_image_size[0]),\n int(b[3]*self.video_h/self.detection_image_size[1])]",
"def array_converter(roodataobject,obs_name):\n try:\n from numpy import array\n except ImportError:\n from array import array as array\n\n # Create the histogram with respect the observable\n histo = roodataobject.createHistogram(obs_name)\n # Normalize\n histo.Scale(1.0/histo.Integral())\n _provlist = []\n for i in xrange(1,histo.GetNbinsX()+1):\n _provlist.append(histo.GetBinContent(i))\n\n # the output array\n try:\n harray = array([ x for x in _provlist ],dtype='d')\n except TypeError:\n harray = array('d',[ x for x in _provlist ])\n return harray",
"def convert(value, unit, axis):\n converted_value = value\n if isinstance(unit, str) or isinstance(unit, Unit):\n unit = (unit,)\n if isinstance(value, (unyt_array, unyt_quantity)):\n converted_value = value.to(*unit)\n else:\n value_type = type(value)\n converted_value = []\n for obj in value:\n converted_value.append(obj.to(*unit))\n converted_value = value_type(converted_value)\n return converted_value",
"def auto_convert(boxes: Type[Union[Tensor, np.ndarray]], w: int, h: int):\n\n if boxes.max() < 2:\n # to pixel coordinates\n boxes[:, 0::2] *= w\n boxes[:, 1::2] *= h\n else:\n # to normalized 0-1\n boxes[:, 0::2] /= w\n boxes[:, 1::2] /= h\n return boxes",
"def convert(report):\n M = []\n for row in report['data']['rows']:\n dimensions = row['dimensions']\n metrics = row['metrics'][0]['values']\n M.append(dimensions + metrics)\n return M",
"def to_unitless(value, new_unit=None):\n integer_one = 1\n if new_unit is None:\n new_unit = pq.dimensionless\n\n if isinstance(value, (list, tuple)):\n return np.array([to_unitless(elem, new_unit) for elem in value])\n elif isinstance(value, np.ndarray) and not hasattr(value, \"rescale\"):\n if is_unitless(new_unit) and new_unit == 1 and value.dtype != object:\n return value\n return np.array([to_unitless(elem, new_unit) for elem in value])\n elif isinstance(value, dict):\n new_value = dict(value.items()) # value.copy()\n for k in value:\n new_value[k] = to_unitless(value[k], new_unit)\n return new_value\n elif (\n isinstance(value, (int, float)) and new_unit is integer_one or new_unit is None\n ):\n return value\n elif isinstance(value, str):\n raise ValueError(\"str not supported\")\n else:\n try:\n try:\n mag = magnitude(value)\n unt = unit_of(value)\n conv = rescale(unt/new_unit, pq.dimensionless)\n result = np.array(mag)*conv\n except AttributeError:\n if new_unit == pq.dimensionless:\n return value\n else:\n raise\n else:\n if result.ndim == 0:\n return float(result)\n else:\n return np.asarray(result)\n except TypeError:\n return np.array([to_unitless(elem, new_unit) for elem in value])",
"def tiles_to_volume(self, tiles: list) -> np.ndarray:\n if not self.ascending:\n tiles = tiles[::-1]\n volume = np.stack(tiles, axis=-1).transpose((1, 0, 2))\n return np.flip(volume, axis=1)",
"def get_metric(ms):\n\treturn '['+','.join(str(m) for m in ms)+']'",
"def reformat(dataset):\n x = dataset[:, 1] \n x = np.stack(x) # reshape to (n, mel bands, timesteps)\n x = np.expand_dims(np.moveaxis(x, 1, -1), axis=3) # reformat x to (n, timesteps, mel bands, 1) \n y = dataset[:, 2] \n y = np.moveaxis(np.stack(y), 1, -1) # reformat y to (n, timesteps, 8)\n return x, y",
"def aspect2bytes(aspect):\n return (0xFF * (aspect/pi + 1)/2).astype(numpy.uint8)",
"def _process_quantiles(x, dim):\r\n x = np.asarray(x, dtype=float)\r\n\r\n if x.ndim == 0:\r\n x = x[np.newaxis]\r\n elif x.ndim == 1:\r\n if dim == 1:\r\n x = x[:, np.newaxis]\r\n else:\r\n x = x[np.newaxis, :]\r\n\r\n return x",
"def get_signal_gwgds1072au(a_signal_packed: bytes, a_scale : float ) -> list:\n the_return = None\n the_signal_packed=a_signal_packed\n the_scale=a_scale\n the_signal_sequence=[]\n the_signal=0.0 #TODO reminder check this before allowing it\n the_info=[]\n n=4\n bla=0\n blb=bla+n\n print(the_signal_packed)\n JX=unpack('>%sh' % 2 ,the_signal_packed[bla:blb])\n for ii in range(0,2003):\n the_info.append(unpack('>%sh' % 2 ,the_signal_packed[bla:blb])[0])\n bla=bla+n\n blb=blb+n\n #TODO get the potential scale\n #TODO get the offset\n #TODO get the time scale\n\n return the_info",
"def waveVector_deBroglie(momentum=1, units=SI):\n\n var = sy.var('p hbar')\n par = momentum, units['hbar']\n\n y = p / hbar\n return dic_result(var,par,y)",
"def le_binario_mgbq(filebin,nt,nc):\n return np.fromfile(filebin,'<f4').reshape(nt,nc)",
"def scale01(arr):\r\n walk_arr_01 = numpy.interp(arr, (numpy.amin(arr), numpy.amax(arr)), (-1, +1)) # linear scaling\r\n return walk_arr_01 #return the scaled array\r",
"def preprocess_xarray(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n args = tuple(a.metpy.unit_array if isinstance(a, xr.DataArray) else a for a in args)\n kwargs = {name: (v.metpy.unit_array if isinstance(v, xr.DataArray) else v)\n for name, v in kwargs.items()}\n return func(*args, **kwargs)\n return wrapper"
] | [
"0.50821847",
"0.50635743",
"0.5063201",
"0.5052912",
"0.50040376",
"0.49670303",
"0.48602846",
"0.47888026",
"0.47871676",
"0.47724292",
"0.47648123",
"0.47488585",
"0.46842295",
"0.46627557",
"0.46367168",
"0.46319687",
"0.4622999",
"0.46144953",
"0.46112272",
"0.45830104",
"0.45763472",
"0.4559005",
"0.45513022",
"0.45487088",
"0.4547772",
"0.45354503",
"0.45192635",
"0.45098084",
"0.4496422",
"0.44706327"
] | 0.5553397 | 0 |
returns a transient variable which is dimensioned along the lev (level) axis and whose values are the heights corresponding to the pressure levels found as the lev axis of mv. Levels will be converted to millibars. heights are returned in km | def heightvar( mv ):
if mv is None: return None
lev_axis = levAxis(mv)
heights = 0.001 * press2alt.press2alt( pressures_in_mb(lev_axis) ) # 1000 m = 1 km
heightmv = cdms2.createVariable( heights, axes=[lev_axis], id=mv.id,
attributes={'units':"km"} )
return heightmv | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def levvar( mv ):\n # First get the axis. This is probably not as general as we'll need...\n if mv is None: return None\n lev_axis = levAxis(mv)\n #levmv = mv.clone() # good if mv has only a lev axis\n #levmv[:] = lev_axis[:]\n levmv = cdms2.createVariable( lev_axis[:], axes=[lev_axis], id='lev',\n attributes={'units':lev_axis.units},\n copy=True )\n return levmv",
"def select_lev( mv, slev ):\n levax = levAxis(mv)\n # Get ig, the first index for which levax[ig]>slev\n # Assume that levax values are monotonic.\n dummy,slev = reconcile_units( levax, slev ) # new slev has same units as levax\n if levax[0]<=levax[-1]:\n ids = numpy.where( levax[:]>=slev.value ) # assumes levax values are monotonic increasing\n else:\n ids = numpy.where( levax[:]<=slev.value ) # assumes levax values are monotonic decreasing\n if ids is None or len(ids)==0:\n ig = len(levax)-1\n else:\n ig = ids[0][0]\n # Crude fist cut: don't interpolate, just return a value\n if levax == mv.getAxisList()[0]:\n mvs = cdms2.createVariable( mv[ig:ig+1,...], copy=1 ) # why ig:ig+1 rather than ig? bug workaround.\n elif levax == mv.getAxisList()[1]:\n mvs = cdms2.createVariable( mv[:,ig:ig+1,...], copy=1 )\n else:\n print \"ERROR, select_lev() does not support level axis except as first or second dimentions\"\n return None\n return mvs",
"def levvar_min( mv1, mv2 ):\n if mv1 is None: return None\n if mv2 is None: return None\n lev_axis1 = levAxis(mv1)\n lev_axis2 = levAxis(mv2)\n if len(lev_axis1)<=len(lev_axis2):\n lev_axis = lev_axis1\n mv = mv1\n else:\n lev_axis = lev_axis2\n mv = mv2\n levmv = cdms2.createVariable( lev_axis[:], axes=[lev_axis], id='levels',\n attributes={'units':lev_axis.units} )\n return levmv",
"def lonvar( mv ):\n # First get the axis. This is probably not as general as we'll need...\n if mv is None: return None\n lon_axis = lonAxis(mv)\n latmv = cdms2.createVariable( lon_axis[:], axes=[lon_axis], id='lon',\n attributes={'units':lon_axis.units},\n copy=True )\n return latmv",
"def calculate_tumor(filename, verbose = False):\n img = nibabel.load(filename)\n data = img.get_data()\n pixdim = img.header['pixdim']\n xyzt_units = img.header['xyzt_units']\n #pixdim[1],pixdim[2],pixdim[3] stores width, depth and height\n volume_per_pix = pixdim[1]*pixdim[2]*pixdim[3]\n\n volumes = {}\n volumes['total vasogenic edema volume'] = round(sum(data[data ==2 ])*volume_per_pix/1000, 3)\n volumes['enhancing portion'] = round(sum(data[data == 4]) * volume_per_pix/1000, 3)\n volumes['non enhancing portion'] = round(sum(data[data == 1]) * volume_per_pix/1000, 3)\n volumes['total tumor volume'] = round(volumes['enhancing portion'] + volumes['non enhancing portion'], 3)\n if xyzt_units == 1:\n volumes['unit'] = 'L'\n if xyzt_units == 2:\n volumes['unit'] = 'ML'\n if xyzt_units == 3:\n volumes['unit'] = 'UL'\n\n return volumes",
"def latvar( mv ):\n # First get the axis. This is probably not as general as we'll need...\n if mv is None: return None\n lat_axis = latAxis(mv)\n #latmv = mv.clone() # good if mv has only a lat axis\n #latmv[:] = lat_axis[:]\n latmv = cdms2.createVariable( lat_axis[:], axes=[lat_axis], id='lat',\n attributes={'units':lat_axis.units},\n copy=True )\n return latmv",
"def get_level_size(slide, level):\n return slide.level_dimensions[level]",
"def v(self):\n\n # TODO This translation formula works, but needs simplified.\n\n # PWM duration can go from 0 to 4095 with 4095 representing max rpm\n# print(\"MuleBot.v MuleBot.dcMotorPWMDurationLeft:\", MuleBot.dcMotorPWMDurationLeft)\n speed_percentage = float(MuleBot.dcMotorPWMDurationLeft) / 4095.0\n# print(\"speed_percentage: \", speed_percentage)\n\n rpm = speed_percentage * self.motorMaxRPM\n# print(\"rpm: \", rpm)\n\n secondsPerMinute = 60\n revs_per_second = rpm / secondsPerMinute\n# print(\"--revs_per_second\", revs_per_second)\n\n inches_per_rev = 2.0 * math.pi * MuleBot.WHEEL_RADIUS\n INCHES_PER_METER = 39.3701\n meters_per_rev = inches_per_rev / INCHES_PER_METER\n# print(\"--meters_per_rev\", meters_per_rev)\n\n meters_per_second = meters_per_rev * revs_per_second\n\n# print(\"--meters_per_second: \", meters_per_second)\n return meters_per_second",
"def width_v_phi(model: SingleRhNeutrinoModel):\n params = _neutrino_vector_meson_constants[\"phi\"]\n k, g = params[\"k\"], params[\"g\"]\n return _width_v_hv(model, MPHI, k, g)",
"def get_level_mag(slide, level):\n return level_mags(slide)[level]",
"def est_maxlevel(dims,bandwidth):\n lev = math.floor((math.log(min(dims))/math.log(2)-2)/bandwidth)\n lev=int(lev)\n return lev",
"def get_dimensional_measurements():\n return Global_Module.global_dimensional_measurements",
"def model_onelayer_pert(r):\n\n\t#- march through the various depth levels -----------------------------------------------------\n\n\tif (r > 6361000.0):\n\t\trho = 2.7\n\t\tvpv = 5.8\n\t\tvph = vpv\n\t\tvsv = 2.0 + 0.02\n\t\tvsh = vsv \n\t\teta = 1.0\n\n\telse:\n\t\trho = 3.1\n\t\tvpv = 7.8\n\t\tvph = vpv\n\t\tvsv = 3.0\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- convert to elastic parameters --------------------------------------------------------------\n\n\trho = 1000.0 * rho\n\tvpv = 1000.0 * vpv\n\tvph = 1000.0 * vph\n\tvsv = 1000.0 * vsv\n\tvsh = 1000.0 * vsh\n\n\tA = rho * vph**2\n\tC = rho * vpv**2\n\tN = rho * vsh**2\n\tL = rho * vsv**2\n\tF = eta * (A - 2 * L)\n\n\treturn rho, A, C, F, L, N",
"def vp_from_ke(m):\n return (m[0, 0]/m[2,0], m[1,0]/m[2,0])",
"def calc_thickness(self):\n s = \"::: calculating z-varying thickness :::\"\n print_text(s, cls=self)\n #H = project(self.S - self.x[2], self.Q, annotate=False)\n H = self.vert_integrate(Constant(1.0), d='down')\n Hv = H.vector()\n Hv[Hv < 0] = 0.0\n print_min_max(H, 'H', cls=self)\n return H",
"def vol_from_var(self, level: float = 0.95, interpolation: str = 'lower') -> float:\n return float(-np.sqrt(self.periods_in_a_year) *\n self.var_down_func(level, interpolation=interpolation) / ss.norm.ppf(level))",
"def dimension_pv(self):\n return self._dimension_pv",
"def mTV(self):\n distance = abs(self.vertPosT - self.vertPosW) # distance between htp and vortex shred plane,\n # approximated with the wing root chordplane\n return distance / (self.spanW / 2)",
"def get_properties(self):\n assert self.kekulize, '#ERROR: u need to get explicit BOs for amon generation'\n self.vs = np.array([ ai.GetTotalValence() for ai in self.m0.GetAtoms() ], np.int)\n #self.update_bom()\n self.ias_heav = self.ias[ self.zs > 1 ]\n bom_heav = self.bom[ self.ias_heav, : ][ :, self.ias_heav ]\n self.vs_heav = bom_heav.sum(axis=0)\n self.cns_heav = ( bom_heav > 0 ).sum(axis=0)\n self.nhs = self.vs[:self.nheav] - self.vs_heav - self.chgs[:self.nheav]\n self.dvs = self.vs_heav - self.cns_heav\n self.hybs = np.array([ _hyb[ai.GetHybridization()] for ai in self.m.GetAtoms() ])",
"def vol(x):\r\n return pi*(topdia(x)/2000.)**2 * length (x)",
"def reduce2levlat( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n if levAxis(mv) is None: return None\n if latAxis(mv) is None: return None\n axes = allAxes( mv )\n timeax = timeAxis(mv)\n if timeax.getBounds()==None:\n timeax._bounds_ = timeax.genGenericBounds()\n axis_names = [ a.id for a in axes if a.id!='lev' and a.id!='lat' ]\n axes_string = '('+')('.join(axis_names)+')'\n\n avmv = averager( mv, axis=axes_string )\n avmv.id = vid\n avmv.units = mv.units\n\n return avmv",
"def calc_lhv(self):\n hf = {}\n hf['hydrogen'] = 0\n hf['methane'] = -74.85\n hf['ethane'] = -84.68\n hf['propane'] = -103.8\n hf['butane'] = -124.51\n hf['O2'] = 0\n hf['CO2'] = -393.5\n # water (gaseous)\n hf['H2O'] = -241.8\n\n lhv = 0\n\n for f, x in self.fuel.val.items():\n molar_masses[f] = CP.PropsSI('M', f)\n fl = set(list(hf.keys())).intersection(\n set([a.replace(' ', '') for a in CP.get_aliases(f)]))\n if len(fl) == 0:\n continue\n\n if list(fl)[0] in self.fuels():\n structure = fluid_structure(f)\n\n n = {}\n for el in ['C', 'H', 'O']:\n if el in structure:\n n[el] = structure[el]\n else:\n n[el] = 0\n\n lhv += (-(n['H'] / 2 * hf['H2O'] + n['C'] * hf['CO2'] -\n ((n['C'] + n['H'] / 4) * hf['O2'] +\n hf[list(fl)[0]])) / molar_masses[f] * 1000) * x\n\n return lhv",
"def bv_data():\n heights = [1000., 1500., 2000., 2500.] * units('m')\n potential_temperatures = [[290., 290., 290., 290.],\n [292., 293., 293., 292.],\n [294., 296., 293., 293.],\n [296., 295., 293., 296.]] * units('K')\n return heights, potential_temperatures",
"def velocity(self,level='cell'):\r\n\r\n # 每个section中总是储存t+1时刻的volume,t到t+1的flow,即一个仿真步长(step)过程中的流量和仿真步长结束时的元胞中车辆数\r\n # 但计算速度需要用到仿真步长开始时的元胞密度,因此要对应时刻的元胞中车辆数vol_t = Vol_t+1 + outflow_t - inflow_t \r\n vels = []\r\n vols = self.last_sim_step_volume()\r\n \r\n if level=='cell':\r\n # 计算第一个元胞\r\n vol = vols[0]\r\n outflow = self.flows[0]\r\n if vol == 0 :\r\n vels.append(0)\r\n else :\r\n vel = outflow*3600/(vol/self.cell_length)\r\n vels.append(round(vel,2))\r\n \r\n # 计算中间元胞\r\n for i in range(1,self.cells_number-1):\r\n vol = vols[i]\r\n outflow = self.flows[i]\r\n if vol == 0 :\r\n vels.append(0)\r\n else:\r\n vel = outflow*3600/(vol/self.cell_length)\r\n vels.append(round(vel,2))\r\n\r\n # 计算最后一个元胞\r\n vol = vols[-1]\r\n outflow = self.outflow\r\n if vol==0:\r\n vels.append(0)\r\n else:\r\n vel = outflow*3600/(vol/self.cell_length)\r\n vels.append(round(vel,2))\r\n \r\n return vels\r\n \r\n elif level=='section': \r\n # 先计算每一个元胞的再按照volume计算加权平均\r\n \r\n # 计算第一个元胞\r\n vol = vols[0]\r\n outflow = self.flows[0]\r\n if vol == 0 :\r\n vels.append(0)\r\n else :\r\n vel = outflow*3600/(vol/self.cell_length)\r\n vels.append(round(vel,2))\r\n \r\n # 计算中间元胞\r\n for i in range(1,self.cells_number-1):\r\n vol = vols[i]\r\n outflow = self.flows[i]\r\n if vol == 0 :\r\n vels.append(0)\r\n else:\r\n vel = outflow*3600/(vol/self.cell_length)\r\n vels.append(round(vel,2))\r\n\r\n # 计算最后一个元胞\r\n vol = vols[-1]\r\n outflow = self.outflow\r\n if vol==0:\r\n vels.append(0)\r\n else:\r\n vel = outflow*3600/(vol/self.cell_length)\r\n vels.append(round(vel,2)) \r\n\r\n \r\n # 将速度按照volume加权平均\r\n weighted_vels = [vel*vol for vel, vol in zip(vels,vols)]\r\n sum_vol = sum(vols)\r\n if sum_vol == 0:\r\n avg_vel = 0\r\n else:\r\n avg_vel = round(sum(weighted_vels)/sum_vol,2)\r\n \r\n return avg_vel\r\n\r\n\r\n else :\r\n raise ValueError('no such level for collecting data')",
"def loading(self, pressure):\n kp = self.params[\"K\"] * pressure\n return self.params[\"n_m\"] * kp / (1.0 + kp)",
"def getComponentVolume(self):\n lengthO = self.getDimension(\"lengthOuter\")\n widthO = self.getDimension(\"widthOuter\")\n heightO = self.getDimension(\"heightOuter\")\n lengthI = self.getDimension(\"lengthInner\")\n widthI = self.getDimension(\"widthInner\")\n heightI = self.getDimension(\"heightInner\")\n mult = self.getDimension(\"mult\")\n vol = mult * (lengthO * widthO * heightO - lengthI * widthI * heightI)\n return vol",
"def width_v_v_v(model: SingleRhNeutrinoModel, genv: Generation):\n mx = model.mx\n u = 0.5 * np.tan(2 * model.theta)\n w = parameters.GF**2 * mx**5 / (768 * np.pi**3) * u**2\n pre = 2 if genv == model.gen else 1.0\n return pre * w",
"def MH(self):\n\n #return math.log10(self.glb[user_params_index[\"Zs\"]]*constants.solar_x/(self.glb[user_params_index[\"Xs\"]]*constants.solar_z))\n return math.log10(self.glb[iz0]*constants.solar_x/(self.glb[ix0]*constants.solar_z))",
"def height(self, x):\n\t\treturn np.interp(x, self.x, self.z)",
"def peak_height(self, logM, k = [], pk = []):\n # Checks\n pk=np.atleast_2d(pk)\n assert len(pk[0])==len(k), \"Length of scales is different from power spectra\"\n sigma2 = self.mass_variance(logM,k,pk)\n nu = self.delta_sc/sigma2**.5\n return nu"
] | [
"0.6776988",
"0.6006147",
"0.590526",
"0.5898939",
"0.55466783",
"0.54994154",
"0.547498",
"0.5466537",
"0.5447086",
"0.54124725",
"0.5391253",
"0.5389002",
"0.53862095",
"0.5358909",
"0.5290569",
"0.5286262",
"0.5192268",
"0.51763445",
"0.5174741",
"0.51733345",
"0.5168547",
"0.51670426",
"0.51535904",
"0.5152418",
"0.5145301",
"0.51448774",
"0.5137595",
"0.51373607",
"0.5126466",
"0.51196504"
] | 0.77257425 | 0 |
returns a transient variable which is dimensioned as whichever of mv1, mv2 has the fewest latitude points but whose values are the latitudes | def latvar_min( mv1, mv2 ):
if mv1 is None: return None
if mv2 is None: return None
lat_axis1 = latAxis(mv1)
lat_axis2 = latAxis(mv2)
if len(lat_axis1)<=len(lat_axis2):
lat_axis = lat_axis1
mv = mv1
else:
lat_axis = lat_axis2
mv = mv2
latmv = cdms2.createVariable( lat_axis[:], axes=[lat_axis], id='lat',
attributes={'units':lat_axis.units} )
return latmv | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def lonvar_min( mv1, mv2 ):\n if mv1 is None: return None\n if mv2 is None: return None\n lon_axis1 = lonAxis(mv1)\n lon_axis2 = lonAxis(mv2)\n if len(lon_axis1)<=len(lon_axis2):\n lon_axis = lon_axis1\n mv = mv1\n else:\n lon_axis = lon_axis2\n mv = mv2\n lonmv = cdms2.createVariable( lon_axis[:], axes=[lon_axis], id='lon',\n attributes={'units':lon_axis.units} )\n return lonmv",
"def reduce2scalar_zonal( mv, latmin=-90, latmax=90, vid=None ):\n if vid==None:\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n ilat = None\n for i,ax in enumerate(axes):\n if ax.id=='lat': ilat = i\n # reduce size of lat axis to (latmin,latmax)\n # Let's home a direct search will be fast enough:\n lataxis = latAxis( mv )\n lmin = -1\n lmax = len(lataxis)\n if lataxis[0]>=latmin: lmin = 0\n if lataxis[-1]<=latmax: lmax = len(lataxis)-1\n if lmin==-1 or lmax==len(lataxis):\n for l,ax in enumerate(lataxis):\n if lmin==-1 and ax>=latmin: lmin = max( 0, l )\n if lmax==len(lataxis) and ax>=latmax: lmax = min( l, len(lataxis) )\n lataxis_shrunk = lataxis.subaxis(lmin,lmax)\n mv2shape = list(mv.shape)\n mv2shape[ilat] = lmax-lmin+1\n axes[ilat] = lataxis_shrunk\n mvd1 = numpy.delete( mv, slice(0,lmin), ilat )\n mvdata = numpy.delete( mvd1, slice(lmax-lmin,len(lataxis)-lmin), ilat )\n mv2 = cdms2.createVariable( mvdata, axes=axes )\n\n axis_names = [ a.id for a in axes ]\n axes_string = '('+')('.join(axis_names)+')'\n avmv = averager( mv2, axis=axes_string )\n avmv.id = vid # Note that the averager function returns a variable with meaningless id.\n ammv.units = mv.units\n\n return avmv",
"def latvar( mv ):\n # First get the axis. This is probably not as general as we'll need...\n if mv is None: return None\n lat_axis = latAxis(mv)\n #latmv = mv.clone() # good if mv has only a lat axis\n #latmv[:] = lat_axis[:]\n latmv = cdms2.createVariable( lat_axis[:], axes=[lat_axis], id='lat',\n attributes={'units':lat_axis.units},\n copy=True )\n return latmv",
"def lonvar( mv ):\n # First get the axis. This is probably not as general as we'll need...\n if mv is None: return None\n lon_axis = lonAxis(mv)\n latmv = cdms2.createVariable( lon_axis[:], axes=[lon_axis], id='lon',\n attributes={'units':lon_axis.units},\n copy=True )\n return latmv",
"def spatial_var(map_):\n expx, expy = spatial_expval(map_)\n map_ = map_ / np.sum(map_)\n x, y = np.meshgrid(np.arange(map_.shape[1]), np.arange(map_.shape[0]))\n\n return np.sum(map_ * ((x - expx) ** 2 + (y - expy) ** 2))",
"def restrict_lat( mv, latmin, latmax ):\n if latmin==-90: latmin = -91 # just to make sure\n if latmax==90: latmax = 91\n\n # axes\n latax,idx = latAxis2(mv)\n if latax is None: return None\n imin = min( [i for i in range(len(latax)) if latax[i]>=latmin and latax[i]<=latmax ] )\n imax = max( [i for i in range(len(latax)) if latax[i]>=latmin and latax[i]<=latmax ] )\n newlatax = latax.subaxis( imin, imax+1 )\n # TO DO: use latax.bounds (if present) for newlatax.bounds\n # At the moment, I'm working with data for which latax.bounds doesn't exist.\n # At the moment, we don't need bounds. This would get us through if necessary:\n # newlatax.bounds = newlatax.genGenericBounds()\n newaxes = list( allAxes(mv) ) # shallow copy\n newaxes[idx] = newlatax\n\n # shrink the data to match the shrunk lat axis\n newmv_shape = list( mv.shape )\n newmv_shape[idx] = imax+1 - imin\n if imin>0:\n nd = numpy.delete( mv.data, slice(0,imin), idx ) # doesn't change mv\n else:\n nd = mv\n lenidx = nd.shape[idx]\n if lenidx > newmv_shape[idx]:\n newdata = numpy.delete( nd.data, slice(imax+1-imin,lenidx), idx )\n else:\n newdata = nd\n\n # new variable\n newmv = cdms2.createVariable( newdata, copy=True, axes=newaxes, id=mv.id )\n newmv.units = mv.units\n return newmv",
"def reduce2scalar_zonal_old( mv, latmin=-90, latmax=90, vid=None ):\n # For now, I'm assuming that the only axes are time,lat,lon - so that zm is a scalar.\n # And I'm assuming equal spacing in lon (so all longitudes contribute equally to the average)\n # If they aren't, it's best to use area from cell_measures attribute if available; otherwise\n # compute it with lat_bnds, lon_bnds etc.\n if vid==None:\n vid = 'reduced_'+mv.id\n time,lat,lon = tllAxes(mv)\n if hasattr(mv.parent,'variables'):\n fil = mv.parent # mv is a fileVariable and fil is a file.\n lat_bnds = fil[lat.bounds]\n else:\n lataxis = latAxis(mv) # mv is a TransientVariable\n lat_bnds = lataxis._bounds_\n\n mvta = timeave_old( mv )\n\n # In computing the average, we use area weighting.\n # Sometimes the area is available in cell_measures, but for now I'll just use the backup method:\n # The area of a lonlat cell is R^2*delta(lon)*delta(sin(lat)).\n # With equally spaced lon, we don't need delta(lon) for weights.\n # I'll assume that lat,lon are in degrees, which is the only way I've ever seen them.\n wgtsum = 0\n zm = 0\n for i,lati in enumerate(lat):\n # The following test could be sped up a lot, because lat[i] is ordered...\n # >>> to do: partial overlaps\n if latmin<=lati and lati<latmax:\n latlo = lat_bnds[i,0]\n lathi = lat_bnds[i,1]\n wgti = sin(radians(lathi))-sin(radians(latlo))\n zi = 0.0\n for j in range(len(lon)):\n zi += mvta[i,j]\n zi *= wgti\n wgtsum += wgti*len(lon)\n zm += zi\n zm /= wgtsum\n # zm is a scalar, so createVariable gets no axes argument:\n zmv = cdms2.createVariable( zm, id=vid )\n return zmv",
"def latlons(self):\n\t\t\n\t\t# First check we have a grid feature type\n\t\tif self.featuretype in ['Grid', 'GridSeries']:\n\n\t\t\tlatvar = self.latitude_variable\n\t\t\tlonvar = self.longitude_variable\n\n\t\t\tlatdims = self.coordinates_mapping['latitude']['map']\n\t\t\tlondims = self.coordinates_mapping['longitude']['map']\n\n\t\t\t# Create latitude and longitude subset slices from the field subset slices\n\t\t\tlat_subset = []\n\t\t\tfor dim in latdims:\n\t\t\t\tlat_subset.append(self._subset[dim])\n\t\t\t\n\t\t\tlon_subset = []\n\t\t\tfor dim in londims:\n\t\t\t\tlon_subset.append(self._subset[dim])\n\n\t\t\t# Then check if latitude and longitude variables are 1D\n\t\t\tif len(latvar.shape) == 1 and len(lonvar.shape) == 1:\n\t\t\t\tlatvar_2d = latvar[lat_subset].reshape((-1,1)).repeat(lonvar.shape[0], axis=1)\n\t\t\t\tlonvar_2d = lonvar[lon_subset].reshape((-1,1)).transpose().repeat(latvar.shape[0], axis=0)\n\t\t\t\treturn (latvar_2d, lonvar_2d)\n\t\t\t\n\t\t\t# for 2D variables its easy, just return the variable data\n\t\t\telif len(latvar.shape) >= 2 and len(lonvar.shape) >= 2:\n\t\t\t\t\n\t\t\t\t# Handle the WRF case where lat/lon variables are 3D with time as first dimension\n\t\t\t\tif len(latvar.shape) == 3 and len(lonvar.shape) == 3:\n\t\t\t\t\treturn (latvar[0,lat_subset], lonvar[0,lon_subset])\n\t\t\t\telse:\n\t\t\t\t\treturn (latvar[lat_subset], lonvar[lon_subset])\n\t\t\t\n\t\t\t# otherwise, we can't do it!\n\t\t\telse:\n\t\t\t\treturn (None, None)\n\t\t\n\t\telif self.featuretype == 'PointSeries':\n\t\t\treturn (self.latitude_variable[:], self.longitude_variable[:])",
"def best_coords(self):\n lat, lon = None, None\n for term in self.terms:\n # print(term)\n # print(term['weight'])\n geo = term.get(\"geo\")\n if geo:\n osm = geo['osm']\n gm = geo['gm']\n geo_data = None\n if osm:\n geo_data = osm\n elif gm:\n geo_data = gm\n if geo_data:\n g = geo_data[0]\n lat, lon = g['latitude'], g['longitude']\n break\n return lat, lon, self.region",
"def coordExtrema(a):\n # Extreme values of longitude and latitude in the survey.\n longiMin = sp.inf\n latMin = sp.inf\n longiMax = -sp.inf\n latMax = -sp.inf\n for t in range(len(a)):\n if a[t].pktCount > 0:\n arraMin = sp.amin(a[t].longi)\n if arraMin < longiMin:\n longiMin = sp.amin(a[t].longi)\n arraMin = sp.amin(a[t].lat)\n if arraMin < latMin:\n latMin = arraMin\n arraMax = sp.amax(a[t].longi)\n if arraMax > longiMax:\n longiMax = arraMax\n arraMax = sp.amax(a[t].lat)\n if arraMax > latMax:\n latMax = arraMax\n\n ext = cs.emptyClass()\n ext.longiMin = longiMin\n ext.longiMax = longiMax\n ext.latMin = latMin\n ext.latMax = latMax\n return ext",
"def levvar_min( mv1, mv2 ):\n if mv1 is None: return None\n if mv2 is None: return None\n lev_axis1 = levAxis(mv1)\n lev_axis2 = levAxis(mv2)\n if len(lev_axis1)<=len(lev_axis2):\n lev_axis = lev_axis1\n mv = mv1\n else:\n lev_axis = lev_axis2\n mv = mv2\n levmv = cdms2.createVariable( lev_axis[:], axes=[lev_axis], id='levels',\n attributes={'units':lev_axis.units} )\n return levmv",
"def find_area(self):\n min_lat_point = self.latitude_min\n max_lat_point = self.latitude_max\n min_lon_point = self.longitude_min\n max_lon_point = self.longitude_max\n self.rename_latitude()\n self.rename_longitude()\n all_lat_bounds = self.cube.coord('latitude').bounds\n all_lon_bounds = self.cube.coord('longitude').bounds\n # print(all_lat_bounds)\n # print(all_lon_bounds)\n for i, lat in enumerate(all_lat_bounds):\n for j, lon in enumerate(all_lon_bounds):\n lat_bounds = lat # 2D array of the lower and upper lat bounds\n lon_bounds = lon # 2D array of the lower and upper lon bounds\n if lat_bounds[0] <= min_lat_point < lat_bounds[1]:\n if lon_bounds[0] <= min_lon_point < lon_bounds[1]:\n nlat_min = i\n nlon_min = j\n else:\n pass\n else:\n pass\n\n for k, lat in enumerate(all_lat_bounds):\n for l, lon in enumerate(all_lon_bounds):\n lat_bounds = lat # 2D array of the lower and upper lat bounds\n lon_bounds = lon # 2D array of the lower and upper lon bounds\n if lat_bounds[0] <= max_lat_point < lat_bounds[1]:\n if lon_bounds[0] <= max_lon_point < lon_bounds[1]:\n nlat_max = k\n nlon_max = l\n else:\n pass\n else:\n pass\n\n area_subset = self.cube[:, nlat_min:nlat_max+1, nlon_min:nlon_max+1]\n # print(area_subset.coord('latitude').points)\n # print(area_subset.coord('longitude').points)\n area_mean = area_subset.collapsed(['latitude', 'longitude'],\n iris.analysis.MEAN)\n\n return area_mean",
"def extract_loc(ref_lon, ref_lat, tlon, tlat, var):\n\n if var.ndim == 3: # 3D variable\n zmax, imax, jmax = var.shape\n threeD = True\n elif var.ndim == 2: # 2D variable\n imax, jmax = var.shape\n threeD = False\n else:\n print 'extract_loc: check variable dimensions'\n return\n\n # find the indices of the 4 model grid points around the location\n Ilist, Jlist = find_stn_idx(ref_lon, ref_lat, tlon, tlat)\n\n # compute great circle distance from location to model grid points\n dist = gc_dist(ref_lon, ref_lat, tlon, tlat)\n dist[dist==0] = 1.e-15 # avoid division by zero\n\n # arrays to store weights and data to be averaged\n if threeD: # 3D variable\n wghts = MA.zeros((zmax,len(Ilist)*len(Jlist)),float)\n data = MA.zeros((zmax,len(Ilist)*len(Jlist)),float)\n if MA.isMA(var): # mask weights\n dist_m = MA.array(N.resize(dist,var.shape),mask=var.mask)\n else:\n dist_m = N.array(N.resize(dist,var.shape))\n else: # 2D variable\n wghts = MA.zeros((len(Ilist)*len(Jlist)),float)\n data = MA.zeros((len(Ilist)*len(Jlist)),float)\n if MA.isMA(var):\n dist_m = MA.array(dist,mask=var.mask) # mask weights\n else:\n dist_m = N.array(dist)\n\n # get the 4 model grid points and compute weights\n n = 0\n for i in Ilist:\n for j in Jlist:\n wghts[...,n] = 1./dist_m[...,i,j]\n data[...,n] = var[...,i,j]\n n += 1\n\n # compute weighted average\n wavg = MA.average(data,axis=-1,weights=wghts)\n return wavg",
"def spatial(self):",
"def get_geo_extents(nc, possible_units, std_name, axis_name, short_name):\n\n geo_extent_vars = {}\n geo_extent_units = []\n\n # variables must have units\n for var in nc.get_variables_by_attributes(units=lambda x: x is not None):\n \n geo_extent_vars[var.name] = 0\n # units in this set\n if var.units in possible_units:\n geo_extent_vars[var.name] += 1\n geo_extent_units.append(var.units)\n \n\n # standard name\n if hasattr(var, 'standard_name') and var.standard_name == std_name:\n geo_extent_vars[var.name] += 1\n geo_extent_units.append(var.units)\n \n # axis of \"X\"\n if hasattr(var, 'axis') and var.axis == axis_name:\n geo_extent_vars[var.name] += 1\n geo_extent_units.append(var.units)\n \n\n if var.name == std_name or var.name == short_name:\n geo_extent_vars[var.name] += 1\n geo_extent_units.append(var.units)\n\n if len(geo_extent_vars) == 0:\n return\n\n # filter out any zero scores\n geo_extent_vars = dict(filter(lambda x: x[1]>0, geo_extent_vars.items()))\n\n # sort by criteria passed\n final_geo_vars = sorted(geo_extent_vars, key=lambda x: geo_extent_vars[x], reverse=True)\n\n obs_mins = [np.nanmin(nc.variables[var]) for var in final_geo_vars if not np.isnan(nc.variables[var]).all()]\n obs_maxs = [np.nanmax(nc.variables[var]) for var in final_geo_vars if not np.isnan(nc.variables[var]).all()]\n\n # Let's just pick one\n geo_vals = nc.variables[final_geo_vars[0][:]]\n if geo_vals.size == 1:\n obs_res = [0.0]\n else:\n obs_res = [np.nanmean(np.diff(nc.variables[var])) for var in final_geo_vars if not np.isnan(nc.variables[var]).all()]\n\n geo_min = round(float(min(obs_mins)), 5)\n geo_max = round(float(max(obs_maxs)), 5)\n geo_extent_units = [nc.variables[k].units for k, v in geo_extent_vars.items()][0]\n geo_res = \"{} {}\".format(round(float(abs(np.mean(obs_res))), 5), geo_extent_units)\n\n print('<attribute name=\"geospatial_{}_min\" value=\"{}\" />'.format(short_name, geo_min))\n print('<attribute name=\"geospatial_{}_max\" value=\"{}\" />'.format(short_name, geo_max))\n print('<attribute name=\"geospatial_{}_resolution\" value=\"{}\" />'.format(short_name, geo_res))\n print('<attribute name=\"geospatial_{}_units\" value=\"{}\" />'.format(short_name, geo_extent_units))",
"def lat_lons(self):",
"def reduce2lat_old( mv, vid=None ):\n # >>> For now, I'm assuming that the only axes are time,lat,lon\n # And I'm assuming equal spacing in lon (so all longitudes contribute equally to the average)\n # If they aren't, it's best to use area from cell_measures attribute if available; otherwise\n # compute it with lat_bnds, lon_bnds etc.\n # If I base another reduction function on this one, it's important to note that an average\n # in the lat direction will unavoidably need weights, because of the geometry.\n\n if vid==None:\n vid = 'reduced_'+mv.id\n time_axis, lat_axis, lon_axis = tllAxes( mv )\n\n mvta = timeave_old( mv )\n\n zm = numpy.zeros( mvta.shape[0] )\n for i in range(len(lat_axis)):\n for j in range(len(lon_axis)):\n zm[i] += mvta[i,j]\n zm[i] /= len(lon_axis)\n zmv = cdms2.createVariable( zm, axes=[lat_axis], id=vid )\n return zmv",
"def mme_geo(samples, moment=1):\n samples = samples ** moment\n k = len(samples)\n return ( k / np.sum(samples))",
"def spatial_dimension(self):\r\n pass",
"def nspatials(self):\n return int(len(self)/2)",
"def get_min_max(self):\n\n mr = np.sqrt(2 * np.log(1/self.mth)) * self.ms\n mr[:] = np.max(mr)\n\n mxmin = self.mx - mr\n mxmax = self.mx + mr\n mymin = self.my - mr\n mymax = self.my + mr\n mzmin = self.mz - mr\n mzmax = self.mz + mr\n\n mb_xmin_idx = np.argmin(mxmin[self.ma > 0])\n mb_xmax_idx = np.argmax(mxmax[self.ma > 0])\n mb_ymin_idx = np.argmin(mymin[self.ma > 0])\n mb_ymax_idx = np.argmax(mymax[self.ma > 0])\n mb_zmin_idx = np.argmin(mzmin[self.ma > 0])\n mb_zmax_idx = np.argmax(mzmax[self.ma > 0])\n\n xmin0 = self.mx[mb_xmin_idx] - mr[mb_xmin_idx]\n xmax0 = self.mx[mb_xmax_idx] + mr[mb_xmax_idx]\n ymin0 = self.my[mb_ymin_idx] - mr[mb_ymin_idx]\n ymax0 = self.my[mb_ymax_idx] + mr[mb_ymax_idx]\n zmin0 = self.mz[mb_zmin_idx] - mr[mb_zmin_idx]\n zmax0 = self.mz[mb_zmax_idx] + mr[mb_zmax_idx]\n\n xmin = xmin0 - (xmax0 - xmin0) * 0.25\n xmax = xmax0 + (xmax0 - xmin0) * 0.25\n ymin = ymin0 - (ymax0 - ymin0) * 0.25\n ymax = ymax0 + (ymax0 - ymin0) * 0.25\n zmin = zmin0 - (zmax0 - zmin0) * 0.25\n zmax = zmax0 + (zmax0 - zmin0) * 0.25\n\n return xmin, xmax, ymin, ymax, zmin, zmax",
"def probaContagius(lat1,lon1,lat2,lon2,M):\n GAMMA = 0.02\n dlon = abs(lon2 - lon1) * 10000\n dlat = abs(lat2 - lat1) * 10000\n distance_Y = int(round(dlon, 0))\n distance_X = int(round(dlat, 0))\n proba = 0\n if ( (distance_X>=0 and distance_X<300) and (distance_Y>=0 and distance_Y<300) ):\n proba = GAMMA * M[distance_X][distance_Y]\n return proba",
"def getMinMax(self,arr):\n # not implemented for Template SED yet\n return arr[\"z\"], arr[\"z\"]",
"def findSubsetIndices(min_lat,max_lat,min_lon,max_lon,lats,lons):\n res=np.zeros((4),dtype=np.float64)\n minLon=min_lon; maxLon=max_lon\n\n distances1 = []; distances2 = []\n indices=[]; index=1\n\n for point in lats:\n s1 = max_lat-point # (vector subtract)\n s2 = min_lat-point # (vector subtract)\n distances1.append((np.dot(s1, s1), point, index))\n distances2.append((np.dot(s2, s2), point, index-1))\n index=index+1\n\n distances1.sort()\n distances2.sort()\n indices.append(distances1[0])\n indices.append(distances2[0])\n\n distances1 = []; distances2 = []; index=1\n\n for point in lons:\n s1 = maxLon-point # (vector subtract)\n s2 = minLon-point # (vector subtract)\n distances1.append((np.dot(s1, s1), point, index))\n distances2.append((np.dot(s2, s2), point, index-1))\n index=index+1\n\n distances1.sort()\n distances2.sort()\n indices.append(distances1[0])\n indices.append(distances2[0])\n\n # Save final product: max_lat_indices,min_lat_indices,max_lon_indices,min_lon_indices\n minJ=indices[1][2]\n maxJ=indices[0][2]\n minI=indices[3][2]\n maxI=indices[2][2]\n\n res[0]=minI; res[1]=maxI; res[2]=minJ; res[3]=maxJ\n return res",
"def avg_variables(ds1, ds2, lat, lon, z, p):\r\n \r\n T1 = ds1.temp.mean(dim='time').mean(dim='lon')\r\n T2 = ds2.temp.mean(dim='time').mean(dim='lon')\r\n T_avg = average(T1, T2, z, lat, 'lat', 'pfull', 'K')\r\n \r\n uz1 = ds1.ucomp.mean(dim='time').mean(dim='lon')\r\n uz2 = ds2.ucomp.mean(dim='time').mean(dim='lon')\r\n uz_avg = average(uz1, uz2, z, lat, 'lat', 'pfull', 'm/s')\r\n \r\n msf1 = v(ds1, p, lat)\r\n msf2 = v(ds2, p, lat)\r\n msf_avg = average(msf1, msf2, z, lat, 'lat', 'pfull', 'kg/s')\r\n \r\n return T_avg, uz_avg, msf_avg",
"def nearlonlat_zl(lon,lat,lonp,latp): # needed for the next function get_FVCOM_bottom_temp \r\n # approximation for small distance \r\n cp=np.cos(latp*np.pi/180.) \r\n dx=(lon-lonp)*cp\r\n dy=lat-latp \r\n xi=np.argmin(abs(dx)) \r\n yi=np.argmin(abs(dy))\r\n min_dist=111*np.sqrt(dx[xi]**2+dy[yi]**2)\r\n return xi,yi,min_dist",
"def get_min_max(self) -> tuple:\r\n\r\n minimum = float(\"inf\")\r\n maximum = float(\"-inf\")\r\n\r\n for name, data in self.scatters_data.items():\r\n mapping = self.scatters[name][\"mapping\"]\r\n min_x = float(\"inf\")\r\n min_y = float(\"inf\")\r\n min_z = float(\"inf\")\r\n max_x = float(\"-inf\")\r\n max_y = float(\"-inf\")\r\n max_z = float(\"-inf\")\r\n\r\n if mapping[\"x\"] in data:\r\n min_x = min(data[mapping[\"x\"]])\r\n max_x = max(data[mapping[\"x\"]])\r\n\r\n if mapping[\"y\"] in data:\r\n min_y = min(data[mapping[\"y\"]])\r\n max_y = max(data[mapping[\"y\"]])\r\n\r\n if mapping[\"z\"] in data:\r\n min_z = min(data[mapping[\"z\"]])\r\n max_z = max(data[mapping[\"z\"]])\r\n\r\n minimum = min(minimum, min([min_x, min_y, min_z]))\r\n maximum = max(maximum, max([max_x, max_y, max_z]))\r\n\r\n for name, data in self.trees_data.items():\r\n if self.trees[name][\"point_helper\"] is None:\r\n mapping = self.trees[name][\"mapping\"]\r\n min_x = float(\"inf\")\r\n min_y = float(\"inf\")\r\n min_z = float(\"inf\")\r\n max_x = float(\"-inf\")\r\n max_y = float(\"-inf\")\r\n max_z = float(\"-inf\")\r\n\r\n if mapping[\"x\"] in data:\r\n min_x = min(data[mapping[\"x\"]])\r\n max_x = max(data[mapping[\"x\"]])\r\n\r\n if mapping[\"y\"] in data:\r\n min_y = min(data[mapping[\"y\"]])\r\n max_y = max(data[mapping[\"y\"]])\r\n\r\n if mapping[\"z\"] in data:\r\n min_z = min(data[mapping[\"z\"]])\r\n max_z = max(data[mapping[\"z\"]])\r\n\r\n minimum = min(minimum, min([min_x, min_y, min_z]))\r\n maximum = max(maximum, max([max_x, max_y, max_z]))\r\n\r\n return minimum, maximum",
"def find_longitude_var(nc,name):\n var_obj = nc.variables[name] # Find the corresponding variable object \n dimens = var_obj.dimensions # Find the names of the dimensions of variable\n for i in range(len(dimens)):\n # For each dimension find the corresponding variable\n var_dim = nc.variables[dimens[i]]\n if is_longitude_var(var_dim) == True:\n return var_obj # If longitude exists, return the variable object\n \n return None",
"def proj(self,lon,lat):\n x, y = self(np.atleast_1d(lon),np.atleast_1d(lat))\n x[x > 1e29] = None\n y[y > 1e29] = None\n #return np.ma.array(x,mask=x>1e2),np.ma.array(y,mask=y>1e2)\n return x, y",
"def getShortestCoordinate (analyzer,startLat, startLon, endLat, endLon):\n estacionOrigen=model.getCloserStation (analyzer, startLat, startLon)\n estacionDestino=model.getCloserStation (analyzer, endLat, endLon)\n ruta,tiempo=model.getShortestCoordinate(analyzer,estacionOrigen, estacionDestino)\n return (estacionOrigen,estacionDestino,ruta,tiempo)"
] | [
"0.69351184",
"0.62349105",
"0.6094042",
"0.5838326",
"0.5824384",
"0.58051234",
"0.57359993",
"0.5527261",
"0.5517576",
"0.5458443",
"0.54356056",
"0.5415327",
"0.54134643",
"0.54001105",
"0.5361201",
"0.52343124",
"0.52035445",
"0.51936215",
"0.5187855",
"0.51732856",
"0.5172442",
"0.51652145",
"0.51572615",
"0.51517236",
"0.514668",
"0.51264614",
"0.5120579",
"0.51170605",
"0.5116207",
"0.5109469"
] | 0.73494667 | 0 |
returns a transient variable which is dimensioned as whichever of mv1, mv2 has the fewest longitude points but whose values are the longitudes | def lonvar_min( mv1, mv2 ):
if mv1 is None: return None
if mv2 is None: return None
lon_axis1 = lonAxis(mv1)
lon_axis2 = lonAxis(mv2)
if len(lon_axis1)<=len(lon_axis2):
lon_axis = lon_axis1
mv = mv1
else:
lon_axis = lon_axis2
mv = mv2
lonmv = cdms2.createVariable( lon_axis[:], axes=[lon_axis], id='lon',
attributes={'units':lon_axis.units} )
return lonmv | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def latvar_min( mv1, mv2 ):\n if mv1 is None: return None\n if mv2 is None: return None\n lat_axis1 = latAxis(mv1)\n lat_axis2 = latAxis(mv2)\n if len(lat_axis1)<=len(lat_axis2):\n lat_axis = lat_axis1\n mv = mv1\n else:\n lat_axis = lat_axis2\n mv = mv2\n latmv = cdms2.createVariable( lat_axis[:], axes=[lat_axis], id='lat',\n attributes={'units':lat_axis.units} )\n return latmv",
"def lonvar( mv ):\n # First get the axis. This is probably not as general as we'll need...\n if mv is None: return None\n lon_axis = lonAxis(mv)\n latmv = cdms2.createVariable( lon_axis[:], axes=[lon_axis], id='lon',\n attributes={'units':lon_axis.units},\n copy=True )\n return latmv",
"def reduce2scalar_zonal( mv, latmin=-90, latmax=90, vid=None ):\n if vid==None:\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n ilat = None\n for i,ax in enumerate(axes):\n if ax.id=='lat': ilat = i\n # reduce size of lat axis to (latmin,latmax)\n # Let's home a direct search will be fast enough:\n lataxis = latAxis( mv )\n lmin = -1\n lmax = len(lataxis)\n if lataxis[0]>=latmin: lmin = 0\n if lataxis[-1]<=latmax: lmax = len(lataxis)-1\n if lmin==-1 or lmax==len(lataxis):\n for l,ax in enumerate(lataxis):\n if lmin==-1 and ax>=latmin: lmin = max( 0, l )\n if lmax==len(lataxis) and ax>=latmax: lmax = min( l, len(lataxis) )\n lataxis_shrunk = lataxis.subaxis(lmin,lmax)\n mv2shape = list(mv.shape)\n mv2shape[ilat] = lmax-lmin+1\n axes[ilat] = lataxis_shrunk\n mvd1 = numpy.delete( mv, slice(0,lmin), ilat )\n mvdata = numpy.delete( mvd1, slice(lmax-lmin,len(lataxis)-lmin), ilat )\n mv2 = cdms2.createVariable( mvdata, axes=axes )\n\n axis_names = [ a.id for a in axes ]\n axes_string = '('+')('.join(axis_names)+')'\n avmv = averager( mv2, axis=axes_string )\n avmv.id = vid # Note that the averager function returns a variable with meaningless id.\n ammv.units = mv.units\n\n return avmv",
"def find_longitude_var(nc,name):\n var_obj = nc.variables[name] # Find the corresponding variable object \n dimens = var_obj.dimensions # Find the names of the dimensions of variable\n for i in range(len(dimens)):\n # For each dimension find the corresponding variable\n var_dim = nc.variables[dimens[i]]\n if is_longitude_var(var_dim) == True:\n return var_obj # If longitude exists, return the variable object\n \n return None",
"def latvar( mv ):\n # First get the axis. This is probably not as general as we'll need...\n if mv is None: return None\n lat_axis = latAxis(mv)\n #latmv = mv.clone() # good if mv has only a lat axis\n #latmv[:] = lat_axis[:]\n latmv = cdms2.createVariable( lat_axis[:], axes=[lat_axis], id='lat',\n attributes={'units':lat_axis.units},\n copy=True )\n return latmv",
"def spatial_var(map_):\n expx, expy = spatial_expval(map_)\n map_ = map_ / np.sum(map_)\n x, y = np.meshgrid(np.arange(map_.shape[1]), np.arange(map_.shape[0]))\n\n return np.sum(map_ * ((x - expx) ** 2 + (y - expy) ** 2))",
"def reduce2scalar_zonal_old( mv, latmin=-90, latmax=90, vid=None ):\n # For now, I'm assuming that the only axes are time,lat,lon - so that zm is a scalar.\n # And I'm assuming equal spacing in lon (so all longitudes contribute equally to the average)\n # If they aren't, it's best to use area from cell_measures attribute if available; otherwise\n # compute it with lat_bnds, lon_bnds etc.\n if vid==None:\n vid = 'reduced_'+mv.id\n time,lat,lon = tllAxes(mv)\n if hasattr(mv.parent,'variables'):\n fil = mv.parent # mv is a fileVariable and fil is a file.\n lat_bnds = fil[lat.bounds]\n else:\n lataxis = latAxis(mv) # mv is a TransientVariable\n lat_bnds = lataxis._bounds_\n\n mvta = timeave_old( mv )\n\n # In computing the average, we use area weighting.\n # Sometimes the area is available in cell_measures, but for now I'll just use the backup method:\n # The area of a lonlat cell is R^2*delta(lon)*delta(sin(lat)).\n # With equally spaced lon, we don't need delta(lon) for weights.\n # I'll assume that lat,lon are in degrees, which is the only way I've ever seen them.\n wgtsum = 0\n zm = 0\n for i,lati in enumerate(lat):\n # The following test could be sped up a lot, because lat[i] is ordered...\n # >>> to do: partial overlaps\n if latmin<=lati and lati<latmax:\n latlo = lat_bnds[i,0]\n lathi = lat_bnds[i,1]\n wgti = sin(radians(lathi))-sin(radians(latlo))\n zi = 0.0\n for j in range(len(lon)):\n zi += mvta[i,j]\n zi *= wgti\n wgtsum += wgti*len(lon)\n zm += zi\n zm /= wgtsum\n # zm is a scalar, so createVariable gets no axes argument:\n zmv = cdms2.createVariable( zm, id=vid )\n return zmv",
"def find_area(self):\n min_lat_point = self.latitude_min\n max_lat_point = self.latitude_max\n min_lon_point = self.longitude_min\n max_lon_point = self.longitude_max\n self.rename_latitude()\n self.rename_longitude()\n all_lat_bounds = self.cube.coord('latitude').bounds\n all_lon_bounds = self.cube.coord('longitude').bounds\n # print(all_lat_bounds)\n # print(all_lon_bounds)\n for i, lat in enumerate(all_lat_bounds):\n for j, lon in enumerate(all_lon_bounds):\n lat_bounds = lat # 2D array of the lower and upper lat bounds\n lon_bounds = lon # 2D array of the lower and upper lon bounds\n if lat_bounds[0] <= min_lat_point < lat_bounds[1]:\n if lon_bounds[0] <= min_lon_point < lon_bounds[1]:\n nlat_min = i\n nlon_min = j\n else:\n pass\n else:\n pass\n\n for k, lat in enumerate(all_lat_bounds):\n for l, lon in enumerate(all_lon_bounds):\n lat_bounds = lat # 2D array of the lower and upper lat bounds\n lon_bounds = lon # 2D array of the lower and upper lon bounds\n if lat_bounds[0] <= max_lat_point < lat_bounds[1]:\n if lon_bounds[0] <= max_lon_point < lon_bounds[1]:\n nlat_max = k\n nlon_max = l\n else:\n pass\n else:\n pass\n\n area_subset = self.cube[:, nlat_min:nlat_max+1, nlon_min:nlon_max+1]\n # print(area_subset.coord('latitude').points)\n # print(area_subset.coord('longitude').points)\n area_mean = area_subset.collapsed(['latitude', 'longitude'],\n iris.analysis.MEAN)\n\n return area_mean",
"def latlons(self):\n\t\t\n\t\t# First check we have a grid feature type\n\t\tif self.featuretype in ['Grid', 'GridSeries']:\n\n\t\t\tlatvar = self.latitude_variable\n\t\t\tlonvar = self.longitude_variable\n\n\t\t\tlatdims = self.coordinates_mapping['latitude']['map']\n\t\t\tlondims = self.coordinates_mapping['longitude']['map']\n\n\t\t\t# Create latitude and longitude subset slices from the field subset slices\n\t\t\tlat_subset = []\n\t\t\tfor dim in latdims:\n\t\t\t\tlat_subset.append(self._subset[dim])\n\t\t\t\n\t\t\tlon_subset = []\n\t\t\tfor dim in londims:\n\t\t\t\tlon_subset.append(self._subset[dim])\n\n\t\t\t# Then check if latitude and longitude variables are 1D\n\t\t\tif len(latvar.shape) == 1 and len(lonvar.shape) == 1:\n\t\t\t\tlatvar_2d = latvar[lat_subset].reshape((-1,1)).repeat(lonvar.shape[0], axis=1)\n\t\t\t\tlonvar_2d = lonvar[lon_subset].reshape((-1,1)).transpose().repeat(latvar.shape[0], axis=0)\n\t\t\t\treturn (latvar_2d, lonvar_2d)\n\t\t\t\n\t\t\t# for 2D variables its easy, just return the variable data\n\t\t\telif len(latvar.shape) >= 2 and len(lonvar.shape) >= 2:\n\t\t\t\t\n\t\t\t\t# Handle the WRF case where lat/lon variables are 3D with time as first dimension\n\t\t\t\tif len(latvar.shape) == 3 and len(lonvar.shape) == 3:\n\t\t\t\t\treturn (latvar[0,lat_subset], lonvar[0,lon_subset])\n\t\t\t\telse:\n\t\t\t\t\treturn (latvar[lat_subset], lonvar[lon_subset])\n\t\t\t\n\t\t\t# otherwise, we can't do it!\n\t\t\telse:\n\t\t\t\treturn (None, None)\n\t\t\n\t\telif self.featuretype == 'PointSeries':\n\t\t\treturn (self.latitude_variable[:], self.longitude_variable[:])",
"def lat_lons(self):",
"def restrict_lat( mv, latmin, latmax ):\n if latmin==-90: latmin = -91 # just to make sure\n if latmax==90: latmax = 91\n\n # axes\n latax,idx = latAxis2(mv)\n if latax is None: return None\n imin = min( [i for i in range(len(latax)) if latax[i]>=latmin and latax[i]<=latmax ] )\n imax = max( [i for i in range(len(latax)) if latax[i]>=latmin and latax[i]<=latmax ] )\n newlatax = latax.subaxis( imin, imax+1 )\n # TO DO: use latax.bounds (if present) for newlatax.bounds\n # At the moment, I'm working with data for which latax.bounds doesn't exist.\n # At the moment, we don't need bounds. This would get us through if necessary:\n # newlatax.bounds = newlatax.genGenericBounds()\n newaxes = list( allAxes(mv) ) # shallow copy\n newaxes[idx] = newlatax\n\n # shrink the data to match the shrunk lat axis\n newmv_shape = list( mv.shape )\n newmv_shape[idx] = imax+1 - imin\n if imin>0:\n nd = numpy.delete( mv.data, slice(0,imin), idx ) # doesn't change mv\n else:\n nd = mv\n lenidx = nd.shape[idx]\n if lenidx > newmv_shape[idx]:\n newdata = numpy.delete( nd.data, slice(imax+1-imin,lenidx), idx )\n else:\n newdata = nd\n\n # new variable\n newmv = cdms2.createVariable( newdata, copy=True, axes=newaxes, id=mv.id )\n newmv.units = mv.units\n return newmv",
"def best_coords(self):\n lat, lon = None, None\n for term in self.terms:\n # print(term)\n # print(term['weight'])\n geo = term.get(\"geo\")\n if geo:\n osm = geo['osm']\n gm = geo['gm']\n geo_data = None\n if osm:\n geo_data = osm\n elif gm:\n geo_data = gm\n if geo_data:\n g = geo_data[0]\n lat, lon = g['latitude'], g['longitude']\n break\n return lat, lon, self.region",
"def coordExtrema(a):\n # Extreme values of longitude and latitude in the survey.\n longiMin = sp.inf\n latMin = sp.inf\n longiMax = -sp.inf\n latMax = -sp.inf\n for t in range(len(a)):\n if a[t].pktCount > 0:\n arraMin = sp.amin(a[t].longi)\n if arraMin < longiMin:\n longiMin = sp.amin(a[t].longi)\n arraMin = sp.amin(a[t].lat)\n if arraMin < latMin:\n latMin = arraMin\n arraMax = sp.amax(a[t].longi)\n if arraMax > longiMax:\n longiMax = arraMax\n arraMax = sp.amax(a[t].lat)\n if arraMax > latMax:\n latMax = arraMax\n\n ext = cs.emptyClass()\n ext.longiMin = longiMin\n ext.longiMax = longiMax\n ext.latMin = latMin\n ext.latMax = latMax\n return ext",
"def levvar_min( mv1, mv2 ):\n if mv1 is None: return None\n if mv2 is None: return None\n lev_axis1 = levAxis(mv1)\n lev_axis2 = levAxis(mv2)\n if len(lev_axis1)<=len(lev_axis2):\n lev_axis = lev_axis1\n mv = mv1\n else:\n lev_axis = lev_axis2\n mv = mv2\n levmv = cdms2.createVariable( lev_axis[:], axes=[lev_axis], id='levels',\n attributes={'units':lev_axis.units} )\n return levmv",
"def local_maxima_3D(data, order=3):\n size = 1 + 2 * order\n footprint = np.ones((size, size, size))\n footprint[order, order, order] = 0\n\n filtered = ndi.maximum_filter(data, footprint=footprint)\n mask_local_maxima = data > filtered\n coords = np.asarray(np.where(mask_local_maxima)).T\n values = data[mask_local_maxima]\n\n return coords, values",
"def extract_loc(ref_lon, ref_lat, tlon, tlat, var):\n\n if var.ndim == 3: # 3D variable\n zmax, imax, jmax = var.shape\n threeD = True\n elif var.ndim == 2: # 2D variable\n imax, jmax = var.shape\n threeD = False\n else:\n print 'extract_loc: check variable dimensions'\n return\n\n # find the indices of the 4 model grid points around the location\n Ilist, Jlist = find_stn_idx(ref_lon, ref_lat, tlon, tlat)\n\n # compute great circle distance from location to model grid points\n dist = gc_dist(ref_lon, ref_lat, tlon, tlat)\n dist[dist==0] = 1.e-15 # avoid division by zero\n\n # arrays to store weights and data to be averaged\n if threeD: # 3D variable\n wghts = MA.zeros((zmax,len(Ilist)*len(Jlist)),float)\n data = MA.zeros((zmax,len(Ilist)*len(Jlist)),float)\n if MA.isMA(var): # mask weights\n dist_m = MA.array(N.resize(dist,var.shape),mask=var.mask)\n else:\n dist_m = N.array(N.resize(dist,var.shape))\n else: # 2D variable\n wghts = MA.zeros((len(Ilist)*len(Jlist)),float)\n data = MA.zeros((len(Ilist)*len(Jlist)),float)\n if MA.isMA(var):\n dist_m = MA.array(dist,mask=var.mask) # mask weights\n else:\n dist_m = N.array(dist)\n\n # get the 4 model grid points and compute weights\n n = 0\n for i in Ilist:\n for j in Jlist:\n wghts[...,n] = 1./dist_m[...,i,j]\n data[...,n] = var[...,i,j]\n n += 1\n\n # compute weighted average\n wavg = MA.average(data,axis=-1,weights=wghts)\n return wavg",
"def get_scale_local_maximas_vectorized(cube_coordinates, laplacian_cube):\n x, y, z = [ cube_coordinates[:, ind] for ind in range(3) ]\n \n point_responses = laplacian_cube[x, y, z]\n lowers = point_responses.copy()\n uppers = point_responses.copy()\n not_layer_0 = z > 0\n lower_responses = laplacian_cube[x[not_layer_0], y[not_layer_0], z[not_layer_0]-1]\n lowers[not_layer_0] = lower_responses \n \n not_max_layer = z < (laplacian_cube.shape[2] - 1)\n upper_responses = laplacian_cube[x[not_max_layer], y[not_max_layer], z[not_max_layer]+1]\n uppers[not_max_layer] = upper_responses\n \n lo_check = np.ones(z.shape, dtype=np.bool)\n lo_check[not_layer_0] = (point_responses > lowers)[not_layer_0]\n hi_check = np.ones(z.shape, dtype=np.bool)\n hi_check[not_max_layer] = (point_responses > uppers)[not_max_layer]\n \n return cube_coordinates[lo_check & hi_check]",
"def get_bounds(self):\n\n northing=self.f.variables['y']\n easting=self.f.variables['x']\n\n lat1,lon1 = utm.to_latlon(np.min(easting),np.min(northing),11,northern=True)\n lat2,lon2 = utm.to_latlon(np.max(easting),np.max(northing),11,northern=True)\n\n return (lon1,lon2,lat1,lat2)",
"def proj(self,lon,lat):\n x, y = self(np.atleast_1d(lon),np.atleast_1d(lat))\n x[x > 1e29] = None\n y[y > 1e29] = None\n #return np.ma.array(x,mask=x>1e2),np.ma.array(y,mask=y>1e2)\n return x, y",
"def findSubsetIndices(min_lat,max_lat,min_lon,max_lon,lats,lons):\n res=np.zeros((4),dtype=np.float64)\n minLon=min_lon; maxLon=max_lon\n\n distances1 = []; distances2 = []\n indices=[]; index=1\n\n for point in lats:\n s1 = max_lat-point # (vector subtract)\n s2 = min_lat-point # (vector subtract)\n distances1.append((np.dot(s1, s1), point, index))\n distances2.append((np.dot(s2, s2), point, index-1))\n index=index+1\n\n distances1.sort()\n distances2.sort()\n indices.append(distances1[0])\n indices.append(distances2[0])\n\n distances1 = []; distances2 = []; index=1\n\n for point in lons:\n s1 = maxLon-point # (vector subtract)\n s2 = minLon-point # (vector subtract)\n distances1.append((np.dot(s1, s1), point, index))\n distances2.append((np.dot(s2, s2), point, index-1))\n index=index+1\n\n distances1.sort()\n distances2.sort()\n indices.append(distances1[0])\n indices.append(distances2[0])\n\n # Save final product: max_lat_indices,min_lat_indices,max_lon_indices,min_lon_indices\n minJ=indices[1][2]\n maxJ=indices[0][2]\n minI=indices[3][2]\n maxI=indices[2][2]\n\n res[0]=minI; res[1]=maxI; res[2]=minJ; res[3]=maxJ\n return res",
"def moonlongitude(time):\n B0 = 481267.8809\n C0 = 218.3162\n # fmt: off\n A = np.array([62888.e-4, 12740.e-4, 6583.e-4, 2136.e-4, 1851.e-4, \\\n 1144.e-4, 588.e-4, 571.e-4, 533.e-4, 458.e-4, 409.e-4, \\\n 347.e-4, 304.e-4, 154.e-4, 125.e-4, 110.e-4, 107.e-4, \\\n 100.e-4, 85.e-4, 79.e-4, 68.e-4, 52.e-4, 50.e-4, 40.e-4, \\\n 40.e-4, 40.e-4, 38.e-4, 37.e-4, 28.e-4, 27.e-4, 26.e-4, \\\n 24.e-4, 23.e-4, 22.e-4, 21.e-4, 21.e-4, 21.e-4, 18.e-4, \\\n 16.e-4, 12.e-4, 11.e-4, 9.e-4, 8.e-4, 7.e-4, 7.e-4, \\\n 7.e-4, 7.e-4, 6.e-4, 6.e-4, 5.e-4, 5.e-4, 5.e-4, \\\n 4.e-4, 4.e-4, 3.e-4, 3.e-4, 3.e-4, 3.e-4, 3.e-4, \\\n 3.e-4, 3.e-4])\n B = np.array([477198.868, 413335.35, 890534.22, 954397.74, \\\n 35999.05, 966404.0, 63863.5, 377336.3, \\\n 1367733.1, 854535.2, 441199.8, 445267.1, \\\n 513197.9, 75870, 1443603, 489205, 1303870, \\\n 1431597, 826671, 449334, 926533, 31932, \\\n 481266, 1331734, 1844932, 133, 1781068, \\\n 541062, 1934, 918399, 1379739, 99863, \\\n 922466, 818536, 990397, 71998, 341337, \\\n 401329, 1856938, 1267871, 1920802, 858602, \\\n 1403732, 790672, 405201, 485333, 27864, \\\n 111869, 2258267, 1908795, 1745069, 509131, \\\n 39871, 12006, 958465, 381404, 349472, \\\n 1808933, 549197, 4067, 2322131.])\n C = np.array([44.963, 10.74, 145.70, 179.93, 87.53, 276.5, \\\n 124.2, 13.2, 280.7, 148.2, 47.4, 27.9, 222.5, \\\n 41, 52, 142, 246, 315, 111, 188, \\\n 323, 107, 205, 283, 56, 29, 21, \\\n 259, 145, 182, 17, 122, 163, 151, \\\n 357, 85, 16, 274, 152, 249, 186, \\\n 129, 98, 114, 50, 186, 127, 38, \\\n 156, 90, 24, 242, 223, 187, 340, \\\n 354, 337, 58, 220, 70, 191])\n # fmt: on\n RAD = 0.0174532925199433\n tempb = (B * time + C) * RAD\n amp = A * np.cos(tempb)\n moonlon = np.sum(amp)\n moonlon = (moonlon + B0 * time + C0) * RAD\n return moonlon",
"def geo_m_v2(data_array):\n r = 6378.137 #promien ziemi w km\n delta = np.zeros(data_array.size//7-1)\n alo = data_array[0][1]\n ala = data_array[0][2]\n count = 0\n for row in data_array[1:]:\n dLat = (row[2] - ala) * math.pi/180.0\n dLon = (row[1] - alo) * math.pi/180.0\n a = math.sin(dLat/2.0)**2 + math.cos(ala * math.pi/180.0) * math.cos(row[2] * math.pi/180.0)\\\n * math.sin(dLon/2.0)**2\n delta[count] = r * 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))#w km\n count += 1\n alo = row[1]\n ala = row[2]\n return delta",
"def merge_maps(self, map_2d):\n x = map_2d.data.max(0, keepdim=True)[0]\n y = map_2d.data.max(1, keepdim=True)[0]\n return x, y",
"def mlat_finder(self, lat1, lat0, pole = \"north\"):\n\n if pole == \"both\":\n lowerA = np.abs(self.mlatA) < lat1\n higherA = np.abs(self.mlatA) > lat0\n is_poleA = lowerA * higherA\n\n elif pole == \"north\":\n lowerA = (self.mlatA) < lat1\n higherA = (self.mlatA) > lat0\n is_poleA = lowerA * higherA\n\n elif pole == \"south\":\n lowerA = (self.mlatA) > lat1\n higherA = (self.mlatA) < lat0\n is_poleA = lowerA * higherA\n\n high_lat_A = np.where(is_poleA == 1)\n low_lat_A = np.where(is_poleA == 0)\n indsA = [low_lat_A, high_lat_A]\n\n return indsA",
"def get_geo_extents(nc, possible_units, std_name, axis_name, short_name):\n\n geo_extent_vars = {}\n geo_extent_units = []\n\n # variables must have units\n for var in nc.get_variables_by_attributes(units=lambda x: x is not None):\n \n geo_extent_vars[var.name] = 0\n # units in this set\n if var.units in possible_units:\n geo_extent_vars[var.name] += 1\n geo_extent_units.append(var.units)\n \n\n # standard name\n if hasattr(var, 'standard_name') and var.standard_name == std_name:\n geo_extent_vars[var.name] += 1\n geo_extent_units.append(var.units)\n \n # axis of \"X\"\n if hasattr(var, 'axis') and var.axis == axis_name:\n geo_extent_vars[var.name] += 1\n geo_extent_units.append(var.units)\n \n\n if var.name == std_name or var.name == short_name:\n geo_extent_vars[var.name] += 1\n geo_extent_units.append(var.units)\n\n if len(geo_extent_vars) == 0:\n return\n\n # filter out any zero scores\n geo_extent_vars = dict(filter(lambda x: x[1]>0, geo_extent_vars.items()))\n\n # sort by criteria passed\n final_geo_vars = sorted(geo_extent_vars, key=lambda x: geo_extent_vars[x], reverse=True)\n\n obs_mins = [np.nanmin(nc.variables[var]) for var in final_geo_vars if not np.isnan(nc.variables[var]).all()]\n obs_maxs = [np.nanmax(nc.variables[var]) for var in final_geo_vars if not np.isnan(nc.variables[var]).all()]\n\n # Let's just pick one\n geo_vals = nc.variables[final_geo_vars[0][:]]\n if geo_vals.size == 1:\n obs_res = [0.0]\n else:\n obs_res = [np.nanmean(np.diff(nc.variables[var])) for var in final_geo_vars if not np.isnan(nc.variables[var]).all()]\n\n geo_min = round(float(min(obs_mins)), 5)\n geo_max = round(float(max(obs_maxs)), 5)\n geo_extent_units = [nc.variables[k].units for k, v in geo_extent_vars.items()][0]\n geo_res = \"{} {}\".format(round(float(abs(np.mean(obs_res))), 5), geo_extent_units)\n\n print('<attribute name=\"geospatial_{}_min\" value=\"{}\" />'.format(short_name, geo_min))\n print('<attribute name=\"geospatial_{}_max\" value=\"{}\" />'.format(short_name, geo_max))\n print('<attribute name=\"geospatial_{}_resolution\" value=\"{}\" />'.format(short_name, geo_res))\n print('<attribute name=\"geospatial_{}_units\" value=\"{}\" />'.format(short_name, geo_extent_units))",
"def get_location_metres(original_location, dNorth, dEast): \n [r_center, r_level] = eclipse_compensate(original_location)\n \n # coordinate offsets in radians\n dLat = dNorth / r_center\n dLon = dEast / r_level\n \n # new position in decimal degrees\n newlat = original_location.lat + math.degrees(dLat)\n newlon = original_location.lon + math.degrees(dLon)\n \n # return according to the input coordinate Class\n if isinstance(original_location,LocationGlobal):\n targetlocation = LocationGlobal(newlat, newlon,original_location.alt)\n \n elif isinstance(original_location,LocationGlobalRelative):\n targetlocation = LocationGlobalRelative(newlat, newlon,original_location.alt)\n \n else:\n raise Exception(\"Invalid Location object passed\")\n \n return targetlocation",
"def test_two_pop_known_var_ind(data1_: tuple, data2_: tuple):\n x_bar = cls.get_mean(data1_)\n y_bar = cls.get_mean(data2_)\n var_x = cls.get_var(data1_, is_population=True)\n var_y = cls.get_var(data2_, is_population=True)\n n_x = cls.get_n(data1_)\n n_y = cls.get_n(data2_)\n return (x_bar - y_bar) / sqrt(var_x / n_x + var_y / n_y)",
"def find_local_maxima(tens):\n return tf.squeeze(tf.where(tf.equal(label_local_extrema(tens), 'P')))",
"def get_min_max(self):\n\n mr = np.sqrt(2 * np.log(1/self.mth)) * self.ms\n mr[:] = np.max(mr)\n\n mxmin = self.mx - mr\n mxmax = self.mx + mr\n mymin = self.my - mr\n mymax = self.my + mr\n mzmin = self.mz - mr\n mzmax = self.mz + mr\n\n mb_xmin_idx = np.argmin(mxmin[self.ma > 0])\n mb_xmax_idx = np.argmax(mxmax[self.ma > 0])\n mb_ymin_idx = np.argmin(mymin[self.ma > 0])\n mb_ymax_idx = np.argmax(mymax[self.ma > 0])\n mb_zmin_idx = np.argmin(mzmin[self.ma > 0])\n mb_zmax_idx = np.argmax(mzmax[self.ma > 0])\n\n xmin0 = self.mx[mb_xmin_idx] - mr[mb_xmin_idx]\n xmax0 = self.mx[mb_xmax_idx] + mr[mb_xmax_idx]\n ymin0 = self.my[mb_ymin_idx] - mr[mb_ymin_idx]\n ymax0 = self.my[mb_ymax_idx] + mr[mb_ymax_idx]\n zmin0 = self.mz[mb_zmin_idx] - mr[mb_zmin_idx]\n zmax0 = self.mz[mb_zmax_idx] + mr[mb_zmax_idx]\n\n xmin = xmin0 - (xmax0 - xmin0) * 0.25\n xmax = xmax0 + (xmax0 - xmin0) * 0.25\n ymin = ymin0 - (ymax0 - ymin0) * 0.25\n ymax = ymax0 + (ymax0 - ymin0) * 0.25\n zmin = zmin0 - (zmax0 - zmin0) * 0.25\n zmax = zmax0 + (zmax0 - zmin0) * 0.25\n\n return xmin, xmax, ymin, ymax, zmin, zmax",
"def mme_geo(samples, moment=1):\n samples = samples ** moment\n k = len(samples)\n return ( k / np.sum(samples))"
] | [
"0.68968886",
"0.66709334",
"0.6106323",
"0.59900844",
"0.5892058",
"0.57636064",
"0.57299185",
"0.5693455",
"0.5671543",
"0.56326985",
"0.5609396",
"0.5597439",
"0.5474902",
"0.5423528",
"0.5386049",
"0.53539693",
"0.5313379",
"0.5313333",
"0.53117704",
"0.53086036",
"0.530855",
"0.53082246",
"0.5274431",
"0.526225",
"0.5235767",
"0.522743",
"0.5209887",
"0.52091694",
"0.52030355",
"0.5142041"
] | 0.7268381 | 0 |
returns a transient variable which is dimensioned as whichever of mv1, mv2 has the fewest level points but whose values are the levels | def levvar_min( mv1, mv2 ):
if mv1 is None: return None
if mv2 is None: return None
lev_axis1 = levAxis(mv1)
lev_axis2 = levAxis(mv2)
if len(lev_axis1)<=len(lev_axis2):
lev_axis = lev_axis1
mv = mv1
else:
lev_axis = lev_axis2
mv = mv2
levmv = cdms2.createVariable( lev_axis[:], axes=[lev_axis], id='levels',
attributes={'units':lev_axis.units} )
return levmv | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def select_lev( mv, slev ):\n levax = levAxis(mv)\n # Get ig, the first index for which levax[ig]>slev\n # Assume that levax values are monotonic.\n dummy,slev = reconcile_units( levax, slev ) # new slev has same units as levax\n if levax[0]<=levax[-1]:\n ids = numpy.where( levax[:]>=slev.value ) # assumes levax values are monotonic increasing\n else:\n ids = numpy.where( levax[:]<=slev.value ) # assumes levax values are monotonic decreasing\n if ids is None or len(ids)==0:\n ig = len(levax)-1\n else:\n ig = ids[0][0]\n # Crude fist cut: don't interpolate, just return a value\n if levax == mv.getAxisList()[0]:\n mvs = cdms2.createVariable( mv[ig:ig+1,...], copy=1 ) # why ig:ig+1 rather than ig? bug workaround.\n elif levax == mv.getAxisList()[1]:\n mvs = cdms2.createVariable( mv[:,ig:ig+1,...], copy=1 )\n else:\n print \"ERROR, select_lev() does not support level axis except as first or second dimentions\"\n return None\n return mvs",
"def lonvar_min( mv1, mv2 ):\n if mv1 is None: return None\n if mv2 is None: return None\n lon_axis1 = lonAxis(mv1)\n lon_axis2 = lonAxis(mv2)\n if len(lon_axis1)<=len(lon_axis2):\n lon_axis = lon_axis1\n mv = mv1\n else:\n lon_axis = lon_axis2\n mv = mv2\n lonmv = cdms2.createVariable( lon_axis[:], axes=[lon_axis], id='lon',\n attributes={'units':lon_axis.units} )\n return lonmv",
"def dim2():\n probs = (0.1, 0.2, 0.3, 0.4)\n categories = (\"asdfa\", \"2\", \"3\", \"4\")\n categories = OrderedDict(zip(categories, probs))\n dim2 = Categorical(\"yolo2\", categories, default_value=\"2\")\n return dim2",
"def levvar( mv ):\n # First get the axis. This is probably not as general as we'll need...\n if mv is None: return None\n lev_axis = levAxis(mv)\n #levmv = mv.clone() # good if mv has only a lev axis\n #levmv[:] = lev_axis[:]\n levmv = cdms2.createVariable( lev_axis[:], axes=[lev_axis], id='lev',\n attributes={'units':lev_axis.units},\n copy=True )\n return levmv",
"def getLevel(unique_name):",
"def getLevels():",
"def main_trees_quick(df):\n groups = df.groupby(['tree', 'scale'])\n try:\n mmp = groups.mvir.transform(np.max) == df.mvir\n except (KeyError, AttributeError):\n mmp = groups.Mvir.transform(np.max) == df.Mvir\n return mmp",
"def est_maxlevel(dims,bandwidth):\n lev = math.floor((math.log(min(dims))/math.log(2)-2)/bandwidth)\n lev=int(lev)\n return lev",
"def latvar_min( mv1, mv2 ):\n if mv1 is None: return None\n if mv2 is None: return None\n lat_axis1 = latAxis(mv1)\n lat_axis2 = latAxis(mv2)\n if len(lat_axis1)<=len(lat_axis2):\n lat_axis = lat_axis1\n mv = mv1\n else:\n lat_axis = lat_axis2\n mv = mv2\n latmv = cdms2.createVariable( lat_axis[:], axes=[lat_axis], id='lat',\n attributes={'units':lat_axis.units} )\n return latmv",
"def resolution(self, level):\n return 2 ** (level - 1)",
"def _determine_level(levels, points):\n import operator\n level = None\n sorted_levels = sorted(levels.iteritems(), key=operator.itemgetter(1))\n for el in sorted_levels:\n if points <= el[1]:\n level = el[0]\n break\n\n max_level = max(levels.iterkeys(), key=lambda threshold: levels[threshold])\n if points >= levels[max_level]:\n level = max_level\n return level",
"def get_level_mag(slide, level):\n return level_mags(slide)[level]",
"def levshape(self) -> Shape:\n return tuple(len(x) for x in self.levels)",
"def max_diaphragmatic_level(levels):\n return [max(x) for x in levels]",
"def zoomlevels(self):\n return self._bboxes[0][1] #TODO: merge all coverages",
"def map_roi_levels(self, rois, num_levels):\r\n scale = torch.sqrt(\r\n (rois[:, 3] - rois[:, 1] + 1) * (rois[:, 4] - rois[:, 2] + 1))\r\n target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6))\r\n target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()\r\n return target_lvls",
"def get_var_level_index(sample_size, threshold):\n if sample_size <= 0:\n raise ValueError(\"Sample size cannot be non-positive:\", sample_size)\n if threshold <= 0.0:\n return 0\n if threshold >= 1.0:\n return sample_size - 1\n return int(math.floor(sample_size * threshold))",
"def enforce(self, wave, variables, parameters):\n return np.hstack([variables[0] - self.level])",
"def test_lfc_ml2():\n levels = np.array([1024.95703125, 1016.61474609, 1005.33056641, 991.08544922, 973.4163208,\n 951.3381958, 924.82836914, 898.25482178, 873.46124268, 848.69830322,\n 823.92553711, 788.49304199, 743.44580078, 700.50970459, 659.62017822,\n 620.70861816, 583.69421387, 548.49719238, 515.03826904, 483.24401855,\n 453.0418396, 424.36477661, 397.1505127, 371.33441162, 346.85922241,\n 323.66995239, 301.70935059, 280.92651367, 261.27053833, 242.69168091,\n 225.14237976, 208.57781982, 192.95333862, 178.22599792, 164.39630127,\n 151.54336548, 139.68635559, 128.74923706, 118.6588974, 109.35111237,\n 100.76405334, 92.84288025, 85.53556824, 78.79430389, 72.57549286,\n 66.83885193, 61.54678726, 56.66480637, 52.16108322]) * units.mbar\n temperatures = np.array([6.00750732, 5.14892578, 4.177948, 3.00268555, 1.55535889,\n -0.25527954, -1.93988037, -3.57766724, -4.40600586, -4.19238281,\n -3.71185303, -4.47943115, -6.81280518, -8.08685303, -8.41287231,\n -10.79302979, -14.13262939, -16.85784912, -19.51675415,\n -22.28689575, -24.99938965, -27.79664612, -30.90414429,\n -34.49435425, -38.438797, -42.27981567, -45.99230957,\n -49.75340271, -53.58230591, -57.30686951, -60.76026917,\n -63.92070007, -66.72470093, -68.97846985, -70.4264679,\n -71.16407776, -71.53797913, -71.64375305, -71.52735901,\n -71.53523254, -71.61097717, -71.92687988, -72.68682861,\n -74.129776, -76.02471924, -76.88977051, -76.26008606,\n -75.90351868, -76.15809631]) * units.celsius\n dewpoints = np.array([4.50012302, 3.42483997, 2.78102994, 2.24474645, 1.593485, -0.9440815,\n -3.8044982, -3.55629468, -9.7376976, -10.2950449, -9.67498302,\n -10.30486488, -8.70559597, -8.71669006, -12.66509628, -18.6697197,\n -23.00351334, -29.46240425, -36.82178497, -41.68824768, -44.50320816,\n -48.54426575, -52.50753403, -51.09564209, -48.92690659, -49.97380829,\n -51.57516098, -52.62096405, -54.24332809, -57.09109879, -60.5596199,\n -63.93486404, -67.07530212, -70.01263428, -72.9258728, -76.12271881,\n -79.49847412, -82.2350769, -83.91127014, -84.95665741, -85.61238861,\n -86.16391754, -86.7653656, -87.34436035, -87.87495422, -88.34281921,\n -88.74453735, -89.04680634, -89.26436615]) * units.celsius\n __, t_mixed, td_mixed = mixed_parcel(levels, temperatures, dewpoints)\n mixed_parcel_prof = parcel_profile(levels, t_mixed, td_mixed)\n lfc_pressure, lfc_temp = lfc(levels, temperatures, dewpoints, mixed_parcel_prof, td_mixed)\n assert_almost_equal(lfc_pressure, 962.34 * units.mbar, 2)\n assert_almost_equal(lfc_temp, 0.767 * units.degC, 2)",
"def level(self):\n return self.init_v[2]",
"def dims(self):\n return tuple(d for d in (v.states for v in self.__vars)) if len(self.__vars) else (1,)",
"def get_map_size(level):\n if level < 5:\n return 5, 5\n if level < 70:\n return 10, 10\n if level < 150:\n return 25, 25\n return 50, 50",
"def estimate_var(sample, threshold):\n sample_size = len(sample)\n index_at = get_var_level_index(sample_size, threshold)\n sample.sort()\n return sample[index_at]",
"def _get_level_values(self, level: int, unique: bool = False) -> Index:\n lev = self.levels[level]\n level_codes = self.codes[level]\n name = self._names[level]\n if unique:\n level_codes = algos.unique(level_codes)\n filled = algos.take_nd(lev._values, level_codes, fill_value=lev._na_value)\n return lev._shallow_copy(filled, name=name)",
"def zernike_Double_Index(nlevels):\n \n\t \n if not (nlevels>=0):\n print('Input parameter nlevels must be >= 0')\n raise AssertionError()\n \n if (nlevels == 0):\n \n m = 0\n n = 0\n \n return n, m\n \n else:\n \n # ++++ Defining layout for row number n and colunmn number m ++++++++\n\n row_n = nlevels+1\n col_m = 2*nlevels +1\n x = np.arange(row_n)\n y = np.arange(-(col_m-1)//2, (col_m+1)//2,1)\n Q = [(i,j) for i in x for j in y]\n #\n\n\n nm_index = []\n \n top = (col_m + 1)/2\n leftside = row_n*col_m - col_m + 1\n rightside = row_n*col_m \n\n k1 = 0; k2 = 0\n\n for i in xrange(top,row_n*col_m+1, 2*col_m):\n\n nm_index.append(Q[i-1])\n s1 = i + col_m + 1\n s2 = i + col_m - 1 \n jj1 = k1\n jj2 = k2\n\n\n while (s2 <= leftside): \n\n nm_index.append(Q[s2-1])\n s2 +=col_m - 1\n jj1 += 1\n jj2 -= 1\n\n leftside +=2\n\n jj1 = k1\n jj2 = k2\n\n while (s1 <= rightside): \n\n # \n nm_index.append(Q[s1-1])\n s1 +=col_m + 1\n jj1 += 1\n jj2 += 1\n\n rightside -=2\n k1 = 0; k2 += 2\n\n n = np.array(nm_index)[:,0]\n m = np.array(nm_index)[:,1]\n\n return n, m",
"def mi(self, lhs, rhs, cond=None):\n\t\tbins = np.amax(data, axis=0) # read levels for each variable\n\t\tif len(bins) == 1:\n\t\t\thist,_ = np.histogramdd(data, bins=(bins)) # frequency counts\n\t\t\tPx = hist/hist.sum()\n\t\t\tMI = -1 * np.sum( Px * np.log( Px ) )\n\t\t\treturn round(MI, 4)\n\t\t\t\n\t\tif len(bins) == 2:\n\t\t\thist,_ = np.histogramdd(data, bins=bins[0:2]) # frequency counts\n\n\t\t\tPxy = hist / hist.sum()# joint probability distribution over X,Y,Z\n\t\t\tPx = np.sum(Pxy, axis = 1) # P(X,Z)\n\t\t\tPy = np.sum(Pxy, axis = 0) # P(Y,Z)\t\n\n\t\t\tPxPy = np.outer(Px,Py)\n\t\t\tPxy += 1e-7\n\t\t\tPxPy += 1e-7\n\t\t\tMI = np.sum(Pxy * np.log(Pxy / (PxPy)))\n\t\t\treturn round(MI,4)\n\t\telif len(bins) > 2 and conditional==True:\n\t\t\t# CHECK FOR > 3 COLUMNS -> concatenate Z into one column\n\t\t\tif len(bins) > 3:\n\t\t\t\tdata = data.astype('str')\n\t\t\t\tncols = len(bins)\n\t\t\t\tfor i in range(len(data)):\n\t\t\t\t\tdata[i,2] = ''.join(data[i,2:ncols])\n\t\t\t\tdata = data.astype('int')[:,0:3]\n\n\t\t\tbins = np.amax(data,axis=0)\n\t\t\thist,_ = np.histogramdd(data, bins=bins) # frequency counts\n\n\t\t\tPxyz = hist / hist.sum()# joint probability distribution over X,Y,Z\n\t\t\tPz = np.sum(Pxyz, axis = (0,1)) # P(Z)\n\t\t\tPxz = np.sum(Pxyz, axis = 1) # P(X,Z)\n\t\t\tPyz = np.sum(Pxyz, axis = 0) # P(Y,Z)\t\n\n\t\t\tPxy_z = Pxyz / (Pz+1e-7) # P(X,Y | Z) = P(X,Y,Z) / P(Z)\n\t\t\tPx_z = Pxz / (Pz+1e-7) # P(X | Z) = P(X,Z) / P(Z)\t\n\t\t\tPy_z = Pyz / (Pz+1e-7) # P(Y | Z) = P(Y,Z) / P(Z)\n\n\t\t\tPx_y_z = np.empty((Pxy_z.shape)) # P(X|Z)P(Y|Z)\n\t\t\tfor i in range(bins[0]):\n\t\t\t\tfor j in range(bins[1]):\n\t\t\t\t\tfor k in range(bins[2]):\n\t\t\t\t\t\tPx_y_z[i][j][k] = Px_z[i][k]*Py_z[j][k]\n\t\t\tPxyz += 1e-7\n\t\t\tPxy_z += 1e-7\n\t\t\tPx_y_z += 1e-7\n\t\t\tMI = np.sum(Pxyz * np.log(Pxy_z / (Px_y_z)))\n\t\t\t\n\t\t\treturn round(MI,4)\n\t\telif len(bins) > 2 and conditional == False:\n\t\t\tdata = data.astype('str')\n\t\t\tncols = len(bins)\n\t\t\tfor i in range(len(data)):\n\t\t\t\tdata[i,1] = ''.join(data[i,1:ncols])\n\t\t\tdata = data.astype('int')[:,0:2]\n\n\t\t\thist,_ = np.histogramdd(data, bins=bins[0:2]) # frequency counts\n\n\t\t\tPxy = hist / hist.sum()# joint probability distribution over X,Y,Z\n\t\t\tPx = np.sum(Pxy, axis = 1) # P(X,Z)\n\t\t\tPy = np.sum(Pxy, axis = 0) # P(Y,Z)\t\n\n\t\t\tPxPy = np.outer(Px,Py)\n\t\t\tPxy += 1e-7\n\t\t\tPxPy += 1e-7\n\t\t\tMI = np.sum(Pxy * np.log(Pxy / (PxPy)))\n\t\t\treturn round(MI,4)",
"def model_onelayer_pert(r):\n\n\t#- march through the various depth levels -----------------------------------------------------\n\n\tif (r > 6361000.0):\n\t\trho = 2.7\n\t\tvpv = 5.8\n\t\tvph = vpv\n\t\tvsv = 2.0 + 0.02\n\t\tvsh = vsv \n\t\teta = 1.0\n\n\telse:\n\t\trho = 3.1\n\t\tvpv = 7.8\n\t\tvph = vpv\n\t\tvsv = 3.0\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- convert to elastic parameters --------------------------------------------------------------\n\n\trho = 1000.0 * rho\n\tvpv = 1000.0 * vpv\n\tvph = 1000.0 * vph\n\tvsv = 1000.0 * vsv\n\tvsh = 1000.0 * vsh\n\n\tA = rho * vph**2\n\tC = rho * vpv**2\n\tN = rho * vsh**2\n\tL = rho * vsv**2\n\tF = eta * (A - 2 * L)\n\n\treturn rho, A, C, F, L, N",
"def scale_u_and_v(u, v, level, pyr):\n # TODO: Your code here\n image = pyr[level-1]\n expanded_u = ps4.expand_image(u)\n expanded_v = ps4.expand_image(v)\n scaled_u = expanded_u * 2\n scaled_v = expanded_v * 2\n if image.shape[0] == scaled_u.shape[0] - 1:\n scaled_u = scaled_u[:-1, :]\n if image.shape[1] == scaled_u.shape[1] - 1:\n scaled_u = scaled_u[:, :-1]\n if image.shape[0] == scaled_v.shape[0] - 1:\n scaled_v = scaled_v[:-1, :]\n if image.shape[1] == scaled_v.shape[1] - 1:\n scaled_v = scaled_v[:, :-1]\n return scaled_u, scaled_v",
"def assign_to_levels(boxes, im_size, num_levels=6, finest_stride=3):\n im_area = im_size[0] * im_size[1]\n widths = boxes[:, 2] - boxes[:, 0] + 1.0\n heights = boxes[:, 3] - boxes[:, 1] + 1.0\n areas = widths * heights\n # if e.g. the finest level has a stride of 4, we want all boxes\n # at 1/4 image resolution to map to the coarsest level (k = 0)\n k = np.round(np.log2(math.sqrt(im_area) / np.sqrt(areas)) - math.log2(finest_stride))\n k = k.astype(np.int32)\n inds = np.where(k < 0)[0]\n k[inds] = 0\n inds = np.where(k > num_levels)[0]\n k[inds] = num_levels\n return k",
"def read_vs_1d(vname, depth): \n with open(vname, 'r') as f:\n lines = f.readlines() \n line0=lines[0].split()\n n_col = int(line0[0])\n \n data = []\n\n for line in lines[1:]:\n data.append([float(val) for val in line.split()])\n\n data = np.concatenate(data) \n v_mod = data.reshape([n_col,6])\n \n depth_ref = 0\n for i in range(0, n_col):\n depth_ref = depth_ref+v_mod[i,0]\n #print(depth_ref)\n if(depth_ref>depth):\n vs_1d = v_mod[i-1,2]\n rho_1d = v_mod[i-1,3] \n break\n \n return vs_1d, rho_1d"
] | [
"0.60400754",
"0.5711261",
"0.53916866",
"0.5316189",
"0.5314503",
"0.5262245",
"0.5258733",
"0.5256473",
"0.52382195",
"0.5228566",
"0.5206168",
"0.51694137",
"0.51330495",
"0.51280445",
"0.51269424",
"0.5126678",
"0.5115695",
"0.51080126",
"0.50934094",
"0.5042593",
"0.50391614",
"0.5038767",
"0.50226116",
"0.49956325",
"0.49911064",
"0.49690497",
"0.4966342",
"0.49567708",
"0.49533004",
"0.4944632"
] | 0.65224934 | 0 |
interpolates a variable mv along its second axis, normally latitude, so as to match the new axis (which should be coarser, i.e. fewer points), and returns a numpy array of the interpolated values. The first axis is normally levels, and isn't expected to be very large (usually <20; surely <50) There shall be no more than two axes. | def interp2( newaxis1, mv ):
missing = mv.get_fill_value()
axes = allAxes(mv)
if len(newaxis1[:])>len(axes[1][:]): return mv
new_vals = numpy.ma.masked_all( ( len(axes[0]), len(newaxis1[:]) ) )
for i in range(len( axes[0] )):
new_vals[i,:] = numpy.interp( newaxis1[:], axes[1][:], mv[i,:], left=missing, right=missing )
# numpy.interp loses the mask, and I want to propagate it! But we can separately interpolate
# the mask - which numpy.interp treats False as 0, True as 1:
new_vals.mask[i,:] = ( numpy.interp( newaxis1[:], axes[1][:], mv.mask[i,:], left=missing,
right=missing ) )>0
return new_vals | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reduce2lat_old( mv, vid=None ):\n # >>> For now, I'm assuming that the only axes are time,lat,lon\n # And I'm assuming equal spacing in lon (so all longitudes contribute equally to the average)\n # If they aren't, it's best to use area from cell_measures attribute if available; otherwise\n # compute it with lat_bnds, lon_bnds etc.\n # If I base another reduction function on this one, it's important to note that an average\n # in the lat direction will unavoidably need weights, because of the geometry.\n\n if vid==None:\n vid = 'reduced_'+mv.id\n time_axis, lat_axis, lon_axis = tllAxes( mv )\n\n mvta = timeave_old( mv )\n\n zm = numpy.zeros( mvta.shape[0] )\n for i in range(len(lat_axis)):\n for j in range(len(lon_axis)):\n zm[i] += mvta[i,j]\n zm[i] /= len(lon_axis)\n zmv = cdms2.createVariable( zm, axes=[lat_axis], id=vid )\n return zmv",
"def reduce2lat( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n axis_names = [ a.id for a in axes if a.id!='lat' ]\n axes_string = '('+')('.join(axis_names)+')'\n\n avmv = averager( mv, axis=axes_string )\n avmv.id = vid\n avmv.units = mv.units\n\n return avmv",
"def reduce2latlon( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n axis_names = [ a.id for a in axes if a.id!='lat' and a.id!='lon' ]\n axes_string = '('+')('.join(axis_names)+')'\n for ax in axes:\n # The averager insists on bounds. Sometimes they don't exist, especially for obs.\n if ax.id!='lat' and ax.id!='lon' and not hasattr( ax, 'bounds' ):\n ax.setBounds( ax.genGenericBounds() )\n\n avmv = averager( mv, axis=axes_string )\n avmv.id = vid\n avmv.units = mv.units\n\n return avmv",
"def interpolate(m):\n \n x1 = m[0]\n x2 = m[1]\n x3 = m[2]\n y1 = m[3]\n y2 = m[4]\n y3 = m[5]\n denom = (x1 - x2)*(x1 - x3)*(x2 - x3)\n A = (x3 * (y2 - y1) + x2 * (y1 - y3) + x1 * (y3 - y2)) / denom\n B = (x3**2 * (y1 - y2) + x2**2 * (y3 - y1) + x1**2 * (y2 - y3)) / denom\n C = (x2 * x3 * (x2 - x3) * y1 + x3 * x1 * (x3 - x1) * y2 + x1 * x2 * (x1 - x2) * y3) / denom\n xext = -B/(2*A)\n yext = A*xext**2 + B*xext + C\n \n return(np.array([xext,yext]))",
"def reduce2levlat( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n if levAxis(mv) is None: return None\n if latAxis(mv) is None: return None\n axes = allAxes( mv )\n timeax = timeAxis(mv)\n if timeax.getBounds()==None:\n timeax._bounds_ = timeax.genGenericBounds()\n axis_names = [ a.id for a in axes if a.id!='lev' and a.id!='lat' ]\n axes_string = '('+')('.join(axis_names)+')'\n\n avmv = averager( mv, axis=axes_string )\n avmv.id = vid\n avmv.units = mv.units\n\n return avmv",
"def interpolation_matrix(m):\n return np.nanmean(m,axis=1)",
"def lonvar( mv ):\n # First get the axis. This is probably not as general as we'll need...\n if mv is None: return None\n lon_axis = lonAxis(mv)\n latmv = cdms2.createVariable( lon_axis[:], axes=[lon_axis], id='lon',\n attributes={'units':lon_axis.units},\n copy=True )\n return latmv",
"def reduce2scalar_zonal( mv, latmin=-90, latmax=90, vid=None ):\n if vid==None:\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n ilat = None\n for i,ax in enumerate(axes):\n if ax.id=='lat': ilat = i\n # reduce size of lat axis to (latmin,latmax)\n # Let's home a direct search will be fast enough:\n lataxis = latAxis( mv )\n lmin = -1\n lmax = len(lataxis)\n if lataxis[0]>=latmin: lmin = 0\n if lataxis[-1]<=latmax: lmax = len(lataxis)-1\n if lmin==-1 or lmax==len(lataxis):\n for l,ax in enumerate(lataxis):\n if lmin==-1 and ax>=latmin: lmin = max( 0, l )\n if lmax==len(lataxis) and ax>=latmax: lmax = min( l, len(lataxis) )\n lataxis_shrunk = lataxis.subaxis(lmin,lmax)\n mv2shape = list(mv.shape)\n mv2shape[ilat] = lmax-lmin+1\n axes[ilat] = lataxis_shrunk\n mvd1 = numpy.delete( mv, slice(0,lmin), ilat )\n mvdata = numpy.delete( mvd1, slice(lmax-lmin,len(lataxis)-lmin), ilat )\n mv2 = cdms2.createVariable( mvdata, axes=axes )\n\n axis_names = [ a.id for a in axes ]\n axes_string = '('+')('.join(axis_names)+')'\n avmv = averager( mv2, axis=axes_string )\n avmv.id = vid # Note that the averager function returns a variable with meaningless id.\n ammv.units = mv.units\n\n return avmv",
"def latvar( mv ):\n # First get the axis. This is probably not as general as we'll need...\n if mv is None: return None\n lat_axis = latAxis(mv)\n #latmv = mv.clone() # good if mv has only a lat axis\n #latmv[:] = lat_axis[:]\n latmv = cdms2.createVariable( lat_axis[:], axes=[lat_axis], id='lat',\n attributes={'units':lat_axis.units},\n copy=True )\n return latmv",
"def myinterp2d(x, y, z, xnew, ynew, method='linear'):\n x = np.ravel(x)\n y = np.ravel(y)\n z = np.ravel(z)\n znew = griddata((x, y), z, (xnew, ynew), method=method, fill_value=0.)\n return znew",
"def interpolate_matrix(matrix):",
"def loc_massmet(mass):\n return np.interp(mass, massmet[:, 0], massmet[:, 1])",
"def interpolate_vertical(ml_file, inter_file, new_vertical_axis):\n with xr.load_dataset(inter_file) as interpolated:\n reference = [variable for variable in interpolated.variables if len(interpolated[variable].shape) == 4][0]\n with xr.open_dataset(ml_file) as ml:\n for variable in [variable for variable in ml.variables if variable not in interpolated.variables\n and len(ml[variable].dims) == 4\n and \"lev_2\" in ml[variable].dims]:\n try:\n x = np.array(ml[new_vertical_axis].data)\n y = np.array(ml[variable].data)\n interpolated_data = interpolate_1d(interpolated[\"lev\"].data, x, y, axis=1)\n attributes = ml[variable].attrs\n\n interpolated[variable] = interpolated[reference].copy(data=interpolated_data)\n interpolated[variable].attrs = ml[variable].attrs\n except Exception as e:\n print(variable, e)\n interpolated.to_netcdf(inter_file)",
"def coord_interp(parameter, interval):\r\n epoch = _np.linspace(1800, 12600 , int(10800/interval)+1) # 3h validity interval within 4h\r\n time = _np.array([epoch**deg for deg in range(len(parameter)-1,-1,-1)])\r\n return _np.matmul(parameter,time)",
"def reduce2scalar_zonal_old( mv, latmin=-90, latmax=90, vid=None ):\n # For now, I'm assuming that the only axes are time,lat,lon - so that zm is a scalar.\n # And I'm assuming equal spacing in lon (so all longitudes contribute equally to the average)\n # If they aren't, it's best to use area from cell_measures attribute if available; otherwise\n # compute it with lat_bnds, lon_bnds etc.\n if vid==None:\n vid = 'reduced_'+mv.id\n time,lat,lon = tllAxes(mv)\n if hasattr(mv.parent,'variables'):\n fil = mv.parent # mv is a fileVariable and fil is a file.\n lat_bnds = fil[lat.bounds]\n else:\n lataxis = latAxis(mv) # mv is a TransientVariable\n lat_bnds = lataxis._bounds_\n\n mvta = timeave_old( mv )\n\n # In computing the average, we use area weighting.\n # Sometimes the area is available in cell_measures, but for now I'll just use the backup method:\n # The area of a lonlat cell is R^2*delta(lon)*delta(sin(lat)).\n # With equally spaced lon, we don't need delta(lon) for weights.\n # I'll assume that lat,lon are in degrees, which is the only way I've ever seen them.\n wgtsum = 0\n zm = 0\n for i,lati in enumerate(lat):\n # The following test could be sped up a lot, because lat[i] is ordered...\n # >>> to do: partial overlaps\n if latmin<=lati and lati<latmax:\n latlo = lat_bnds[i,0]\n lathi = lat_bnds[i,1]\n wgti = sin(radians(lathi))-sin(radians(latlo))\n zi = 0.0\n for j in range(len(lon)):\n zi += mvta[i,j]\n zi *= wgti\n wgtsum += wgti*len(lon)\n zm += zi\n zm /= wgtsum\n # zm is a scalar, so createVariable gets no axes argument:\n zmv = cdms2.createVariable( zm, id=vid )\n return zmv",
"def reduce2levlat_seasonal( mv, seasons=seasonsyr, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n if levAxis(mv) is None: return None\n if latAxis(mv) is None: return None\n axes = allAxes( mv )\n timeax = timeAxis(mv)\n if timeax.getBounds()==None:\n timeax._bounds_ = timeax.genGenericBounds()\n\n if timeax.units=='months':\n # Special check necessary for LEGATES obs data, because\n # climatology() won't accept this incomplete specification\n timeax.units = 'months since 0001-01-01'\n mvseas = seasons.climatology(mv)\n\n axis_names = [ a.id for a in axes if a.id!='lev' and a.id!='lat' and a.id!='time']\n axes_string = '('+')('.join(axis_names)+')'\n\n if len(axes_string)>2:\n avmv = averager( mvseas, axis=axes_string )\n else:\n avmv = mvseas\n avmv.id = vid\n avmv = delete_singleton_axis( avmv, vid='time' )\n avmv.units = mv.units\n\n return avmv",
"def interpolation(self, arr, factor):\n\t x = arr[:, 0]\n\t y = arr[:, 1]\n\t z = arr[:, 2]\n\t t = np.linspace(0,x.shape[0],num=x.shape[0])\n\t to_expand = [x, y, z]\n\t for i in range(len(to_expand)):\n\t spl = interp1d(t, np.ravel(to_expand[i]))\n\t to_expand[i] = spl(np.linspace(0,len(t), len(t)*factor))\n\t new_matrix = np.matrix(np.r_[0:len(t):1.0/factor])\n\t for i in to_expand:\n\t new_matrix = np.concatenate((new_matrix, np.matrix(i)), axis = 0)\n\t return new_matrix.T[:,1:]",
"def lonvar_min( mv1, mv2 ):\n if mv1 is None: return None\n if mv2 is None: return None\n lon_axis1 = lonAxis(mv1)\n lon_axis2 = lonAxis(mv2)\n if len(lon_axis1)<=len(lon_axis2):\n lon_axis = lon_axis1\n mv = mv1\n else:\n lon_axis = lon_axis2\n mv = mv2\n lonmv = cdms2.createVariable( lon_axis[:], axes=[lon_axis], id='lon',\n attributes={'units':lon_axis.units} )\n return lonmv",
"def frechet_var_approx(dist_proj):\n return torch.mean(dist_proj ** 2).item()",
"def latvar_min( mv1, mv2 ):\n if mv1 is None: return None\n if mv2 is None: return None\n lat_axis1 = latAxis(mv1)\n lat_axis2 = latAxis(mv2)\n if len(lat_axis1)<=len(lat_axis2):\n lat_axis = lat_axis1\n mv = mv1\n else:\n lat_axis = lat_axis2\n mv = mv2\n latmv = cdms2.createVariable( lat_axis[:], axes=[lat_axis], id='lat',\n attributes={'units':lat_axis.units} )\n return latmv",
"def interpolate2D(uv, img):\n\n x, y = uv.T\n\n # Get closest points\n x1, y1 = np.floor(uv).T.astype(np.int)\n x2, y2 = np.ceil(uv).T.astype(np.int)\n\n # Evaluate color function at closest points\n Q11 = img[y1, x1]\n Q12 = img[y1, x2]\n Q21 = img[y2, x1]\n Q22 = img[y2, x2]\n\n # Interpolate on x\n\n n = x2-x1\n alpha, beta = ((x2-x)/n)[:, None], ((x-x1)/n)[:, None]\n\n fxy1 = alpha*Q11 + beta*Q21\n fxy2 = alpha*Q12 + beta*Q22\n\n # Interpolate on y\n\n n = y2-y1\n alpha, beta = ((y2-y)/n)[:, None], ((y-y1)/n)[:, None]\n\n return alpha*fxy1 + beta*fxy2",
"def reduce2lat_seasonal( mv, seasons=seasonsyr, vid=None ):\n if vid==None:\n vid = 'reduced_'+mv.id\n # Note that the averager function returns a variable with meaningless id.\n # The climatology function returns the same id as mv, which we also don't want.\n\n # The slicers in time.py require getBounds() to work.\n # If it doesn't, we'll have to give it one.\n # Setting the _bounds_ attribute will do it.\n for ax in mv.getAxisList():\n if ax.getBounds() is None:\n ax._bounds_ = ax.genGenericBounds()\n timeax = timeAxis(mv)\n if timeax.units=='months':\n # Special check necessary for LEGATES obs data, because\n # climatology() won't accept this incomplete specification\n timeax.units = 'months since 0001-01-01'\n mvseas = seasons.climatology(mv)\n if mvseas is None:\n # Among other cases, this can happen if mv has all missing values.\n return None\n \n axes = allAxes( mv )\n axis_names = [ a.id for a in axes if a.id!='lat' and a.id!='time']\n axes_string = '('+')('.join(axis_names)+')'\n\n if len(axes_string)>2:\n avmv = averager( mvseas, axis=axes_string )\n else:\n avmv = mvseas\n avmv.id = vid\n\n avmv = delete_singleton_axis( avmv, vid='time' )\n avmv.units = mv.units\n return avmv",
"def interpolate_meridional(self, *interp1d_args, **interp1d_kwargs):\n return self.interp1d_meridional(*interp1d_args, **interp1d_kwargs)(self.lat)",
"def reduce2latlon_seasonal( mv, seasons=seasonsyr, vid=None ):\n # This differs from reduce2lat_seasonal only in the line \"axis_names =\"....\n # I need to think about how to structure the code so there's less cut-and-paste!\n if vid==None:\n vid = 'reduced_'+mv.id\n # Note that the averager function returns a variable with meaningless id.\n # The climatology function returns the same id as mv, which we also don't want.\n\n # The slicers in time.py require getBounds() to work.\n # If it doesn't, we'll have to give it one.\n # Setting the _bounds_ attribute will do it.\n timeax = timeAxis(mv)\n if timeax.getBounds()==None:\n timeax._bounds_ = timeax.genGenericBounds()\n mvseas = seasons.climatology(mv)\n \n axes = allAxes( mv )\n axis_names = [ a.id for a in axes if a.id!='lat' and a.id!='lon' and a.id!='time']\n axes_string = '('+')('.join(axis_names)+')'\n\n if len(axes_string)>2:\n for axis in mvseas.getAxisList():\n if axis.getBounds() is None:\n axis._bounds_ = axis.genGenericBounds()\n avmv = averager( mvseas, axis=axes_string )\n else:\n avmv = mvseas\n if avmv is None: return avmv\n avmv.id = vid\n if hasattr(mv,'units'): avmv.units = mv.units\n avmv = delete_singleton_axis( avmv, vid='time' )\n avmv.units = mv.units\n return avmv",
"def focal_projection(m, dx=.02, return_grid=False):\n dy = dx # grid loop\n x = np.arange(-1, 1, dx)[None, ...]\n y = np.arange(-1, 1, dy)[..., None]\n\n nx = len(x)\n ny = len(y) # vectorization of previous code begins here\n\n x2 = x.repeat(ny, axis=0)\n y2 = y.repeat(ny, axis=1)\n\n r2 = x2 * x2 + y2 * y2\n trend = np.arctan2(y2, x2)\n plunge = np.pi / 2 - 2 * np.arcsin(np.sqrt(r2 / 2)) # equal area projection\n\n vij1 = np.cos(trend) * np.cos(plunge) # set up local vector grids\n vij2 = np.sin(trend) * np.cos(plunge)\n vij3 = np.sin(plunge)\n\n m = np.array(m)\n if len(m.ravel()) == 9:\n m = moment_convert(m)\n\n u1 = (vij1 * m[0] + vij2 * m[5] + vij3 * m[4]) * vij1\n u2 = (vij1 * m[5] + vij2 * m[1] + vij3 * m[3]) * vij2\n u3 = (vij1 * m[4] + vij2 * m[3] + vij3 * m[2]) * vij3\n u = u1 + u2 + u3\n u[r2 > 1] = np.nan\n if return_grid:\n return u, vij1, vij2, vij3\n return u",
"def interpolate_2d(x, y, z, x_new, y_new):\n fun = RectBivariateSpline(x, y, z, kx=1, ky=1) # linear interpolation\n return fun(x_new, y_new)",
"def vp_from_ke(m):\n return (m[0, 0]/m[2,0], m[1,0]/m[2,0])",
"def reduce2scalar( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n axis_names = [ a.id for a in axes ]\n axes_string = '('+')('.join(axis_names)+')'\n\n avmv = averager( mv, axis=axes_string )\n avmv.id = vid\n avmv.units = mv.units\n\n return avmv",
"def dmsp_map_interpolate_NN_smooth(X_dmsp, Y_dmsp, X_map, Y_map, Obs_map, k = 5, tol = 3):\n #reshape to N by 2 array where each row is (X, Y)\n dmsp_points = np.hstack((X_dmsp.flatten().reshape(-1,1),Y_dmsp.flatten().reshape(-1,1)))\n map_points = np.hstack((X_map.flatten().reshape(-1,1), Y_map.flatten().reshape(-1,1)))\n N_points = dmsp_points.shape[0]\n obs_val = Obs_map.flatten()\n model = sklearn.neighbors.BallTree(map_points,leaf_size = 40 )\n dists, inds = model.query(dmsp_points, k=k) \n\n obs_interp = np.empty(N_points)\n for i in range(N_points):\n norm = LA.norm(dists[i])\n if (norm > tol):\n obs_interp[i] = np.nan\n else:\n# weights = dists[i]/norm\n\n weights = dists[i]/np.nansum(dists[i])\n obs_interp[i] = np.nansum( obs_val[inds[i]] * weights )\n\n return obs_interp",
"def linear_interp2d(z, map_lower, map_higher, comoving_dist=False, NewProjected=False):\n with h5py.File(map_lower, \"r\") as ds1, h5py.File(map_higher, \"r\") as ds2:\n if NewProjected:\n dm_name = \"map\"\n header_name = \"Header\"\n else:\n dm_name = \"DM\"\n header_name = \"HEADER\"\n\n y2 = ds2[dm_name][:]\n y1 = ds1[dm_name][:]\n\n if comoving_dist:\n x2 = z_to_mpc(ds2[header_name].attrs[\"Redshift\"])\n x1 = z_to_mpc(ds1[header_name].attrs[\"Redshift\"])\n dist = z_to_mpc(z) - x1\n else:\n x2 = ds2[header_name].attrs[\"Redshift\"]\n x1 = ds1[header_name].attrs[\"Redshift\"]\n dist = z - x1\n\n grad = (y2 - y1)/ (x2 - x1)\n\n return grad * dist + y1"
] | [
"0.6127793",
"0.61027855",
"0.6082101",
"0.59283215",
"0.5838112",
"0.57578194",
"0.5623196",
"0.5599504",
"0.55420876",
"0.54633516",
"0.5410154",
"0.5350282",
"0.5312561",
"0.52844816",
"0.52626956",
"0.5179751",
"0.5162937",
"0.5137714",
"0.5093342",
"0.5058199",
"0.5038663",
"0.50160366",
"0.50040245",
"0.49906573",
"0.49896732",
"0.49162564",
"0.49052754",
"0.49019223",
"0.48935536",
"0.48931697"
] | 0.62990654 | 0 |
returns mv1[0,]mv2[0,]; they should be dimensioned alike. Attributes will be fixed up where I know how. | def aminusb0( mv1, mv2 ):
mv = mv1[0,] - mv2[0,]
if hasattr(mv,'long_name'):
if mv.long_name==mv1.long_name: # They're different, shouldn't have the same long_name
mv.long_name = ''
return mv | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _gu_matvec(x1, x2):\n return (x1 @ x2[..., np.newaxis])[..., 0]",
"def get_molecular_matrix_and_vector(single_body, two_body):\n x, y = single_body.shape\n func = np.vectorize(round_custom)\n _new_dim = x * y\n single_one_dim = func(single_body.reshape(_new_dim, 1))\n two_body_two_dim = func(two_body.reshape(_new_dim, _new_dim))\n\n return single_one_dim, two_body_two_dim",
"def _match_dims(poly1, poly2, copy=None):\r\n if copy is None:\r\n copy = True\r\n\r\n if copy:\r\n p1 = deepcopy(poly1)\r\n p2 = deepcopy(poly2)\r\n else:\r\n p1 = poly1\r\n p2 = poly2\r\n\r\n dim1 = poly1.multi_index.spatial_dimension\r\n dim2 = poly2.multi_index.spatial_dimension\r\n if dim1 >= dim2:\r\n poly2.expand_dim(dim1)\r\n else:\r\n poly1.expand_dim(dim2)\r\n return poly1, poly2",
"def match_dimension(p0, p1):\n\n\tif p0.shape != p1.shape:\n\t\tnxmax = max(p0.shape[0], p1.shape[0])\n\t\tnymax = max(p0.shape[1], p1.shape[1])\n\n\t\tp0 = pad_edge_to_shape(p0, int(nxmax), int(nymax))\n\t\tp1 = pad_edge_to_shape(p1, int(nxmax), int(nymax))\n\n\treturn p0, p1",
"def aminusb_ax2( mv1, mv2 ):\n if hasattr(mv1,'units') and hasattr(mv2,'units') and mv1.units!=mv2.units:\n print \"WARING: aminusb_ax2 is subtracting variables with different units!\",mv1,mv1\n axes1 = allAxes(mv1)\n axes2 = allAxes(mv2)\n # TO DO: convert, interpolate, etc. as needed to accomodate differing first axes.\n # But for now, we'll just check a bit ...\n ax1=axes1[0]\n ax2=axes2[0]\n if ax1.shape!=ax2.shape:\n print \"ERROR aminusb_ax2 requires same axes, but shape differs:\",ax1.shape,ax2.shape\n print \"ax1,ax2\"\n return None\n if hasattr(ax1,'units') and hasattr(ax2,'units') and ax1.units!=ax2.units:\n if ax1.units=='mb':\n ax1.units = 'mbar' # udunits uses mb for something else\n if ax2.units=='mb':\n ax2.units = 'mbar' # udunits uses mb for something else\n tmp = udunits(1.0,ax2.units)\n s,i = tmp.how(ax1.units) # will raise an exception if conversion not possible\n # crude substitute for a real units library:\n #if not (ax1.units=='mb' and ax2.units=='millibars') and\\\n # not (ax1.units=='millibars' and ax2.units=='mb'):\n # print \"ERROR aminusb_ax2 requires same axes, but units differ:\",ax1.units,ax2,units\n # print \"ax1,ax2\"\n # return None\n ab_axes = [ax1]\n if len(axes1[1])<=len(axes2[1]):\n a = mv1\n b = interp2( axes1[1], mv2 )\n ab_axes.append(axes1[1])\n else:\n a = interp2( axes2[1], mv1 )\n b = mv2\n ab_axes.append(axes2[1])\n aminusb = a - b\n aminusb.id = mv1.id\n aminusb.initDomain( ab_axes )\n return aminusb",
"def Mxform(x1,y1,x2,y2):\n return Jones.toMueller([[np.dot(x2,x1), np.dot(x2, y1)], [np.dot(y2,x1), np.dot(y2,y1)]])",
"def get_m2(self):\n pass",
"def aminusb_1ax( mv1, mv2 ):\n mv1, mv2 = reconcile_units( mv1, mv2 )\n if hasattr(mv1,'units') and hasattr(mv2,'units') and mv1.units!=mv2.units:\n print \"WARNING: aminusb_1ax1 is subtracting variables with different units!\",mv1,mv1\n if mv1 is None or mv2 is None: return None\n missing = mv1.get_fill_value()\n axis1 = allAxes(mv1)[0]\n axis2 = allAxes(mv2)[0]\n if len(axis1)<=len(axis2):\n a = mv1\n b = numpy.interp( axis1[:], axis2[:], mv2[:], left=missing, right=missing )\n else:\n a = numpy.interp( axis2[:], axis1[:], mv1[:], left=missing, right=missing )\n b = mv2\n aminusb = a - b\n aminusb.id = mv1.id\n return aminusb",
"def Rt(X):\n return X[:2,:2], X[:2, 2]",
"def computemeta(src, dst):\n srcy, srcx = src\n canvasx, canvasy = srcx.start, srcy.start\n width, height = srcx.stop - srcx.start, srcy.stop - srcy.start\n posy, posx = dst\n return [posx, posy, width, height, canvasx, canvasy]",
"def reshape(self):\n qx = self.tunx[1:].reshape(self.nsigma, self.nangles)\n qy = self.tuny[1:].reshape(self.nsigma, self.nangles)\n return qx, qy",
"def _mps_AA(self, A1, A2):\n Dl, d1, _ = A1.shape\n _, d2, Dr = A2.shape\n return np.reshape(np.tensordot(A1, A2, axes=(2, 0)), [Dl, d1 * d2, Dr])",
"def v1Mv2(v1, M, v2):\r\n return v1[:, None] * M * v2[None, :]",
"def getDist(ind1,ind2,distMat):\n return distMat[ind1,ind2]",
"def pd(self, other):\n return Matriz([self]).T() * Matriz([other])",
"def mate(self, p1, p2):\n return (p1, p2)",
"def attributes_picker(index1, index2):\r\n # Adds indexes and + 1 for data types\r\n positions = {-1}\r\n positions.add(2 * index1)\r\n positions.add((2 * index1) + 1)\r\n positions.add(2 * index2)\r\n positions.add((2 * index2) + 1)\r\n return positions",
"def mult(m1, m2):\n assert np.shape(m1) == (2, 3)\n assert np.shape(m2) == (2, 3)\n\n m1_temp = np.vstack((m1, [0, 0, 1]))\n m2_temp = np.vstack((m2, [0, 0, 1]))\n result = m1_temp * m2_temp\n\n return result[:2, :]",
"def common_axes( mv1, mv2 ):\n axes1 = [a[0] for a in mv1.getDomain()]\n axes2 = [a[0] for a in mv2.getDomain()]\n if len(axes1)!=len(axes2):\n print \"ERROR. common_axes requires same number of axes in\",mv1,\" and\",mv2\n return None\n axes3 = []\n for i in range(len(axes1)):\n axes3.append(common_axis( axes1[i], axes2[i] ))\n return axes3",
"def position(self):\n return self.atoms.reshape((1,-1))",
"def mainIndices(self):\n return self.i1, self.i2",
"def _addMats(X1,X2):\n _checkSize(X1,X2)\n return [ _addVectors(X1[i],X2[i]) for i in range(len(X1))]",
"def combine_M_X1_X2(M, X1, X2):\n Nm = M.shape[0]\n Nx1 = X1.shape[0]\n Nx2 = X2.shape[0]\n Nt = Nm + Nx1 + Nx2\n logging.debug(\"combine_M_X1_X2: The total number of agents in the network are:\" + str(Nt))\n W = np.zeros([Nt, Nt], dtype=int)\n\n for i in range(0, Nm):\n for j in range(0, Nm):\n W[i][j] = M[i][j]\n\n x_x1 = 0\n y_x1 = 0\n for x in range(Nm, Nm + Nx1):\n for y in range(Nm, Nm + Nx1):\n W[x][y] = X1[x_x1][y_x1]\n y_x1 = y_x1 + 1\n y_x1 = 0\n x_x1 = x_x1 + 1\n\n x_x2 = 0\n y_x2 = 0\n for x in range(Nm + Nx1, Nt):\n for y in range(Nm + Nx1, Nt):\n W[x][y] = X2[x_x2][y_x2]\n y_x2 = y_x2 + 1\n y_x2 = 0\n x_x2 = x_x2 + 1\n\n return W",
"def _broadcast(self, v1, v2):\n v1, v2 = np.array(v1), np.array(v2)\n if len(v1.shape) < len(v2.shape):\n idx = tuple(slice(None) for i in range(len(v1.shape)))\n idx = idx + (None,) * (len(v2.shape) - len(v1.shape))\n return v1[idx], v2\n elif len(v1.shape) > len(v2.shape):\n idx = tuple(slice(None) for i in range(len(v2.shape)))\n idx = idx + (None,) * (len(v1.shape) - len(v2.shape))\n return v1, v2[idx]\n else:\n return v1, v2",
"def cross_link_attribute(self, attribute_name, node_list1, node_list2):\n W = self.link_attribute(attribute_name)\n return W[node_list1, :][:, node_list2]",
"def test_populator_alternate_attribute():\n o1, o2 = MediaBag(iid=1), MediaBag(iid=2)\n with build_multi_get(2) as multi_get:\n media.build_populator('iid', multi_get)([o1, o2])\n assert (o1.media, o2.media) == (1, 2)",
"def classical(m1,m2):\n \n n = m1.shape\n result = np.zeros(n, dtype = int)\n\n for i in range(n[0]):\n for j in range(n[0]):\n for k in range(n[0]):\n result[i][j] += m1[i][k] * m2[k][j]\n return result",
"def _reverse_numeric_op(self, other, attr_name):\n l = reshape_append_ones(self, other)\n return getattr(numpy.ndarray, attr_name)(l[0], l[1])",
"def single_crossover(self, original1, original2):\n point=self.r.uniform(0.1,0.6)\n cut1=int(point*len(original1))\n cut2=int(point*len(original2))\n child1=original1[:cut1]+original2[cut2:]\n child2=original2[:cut2]+original1[cut1:]\n return child1, child2",
"def nm2m(self):\n return self._nm2m"
] | [
"0.5995994",
"0.5515784",
"0.5483453",
"0.5478269",
"0.5441555",
"0.5402159",
"0.5384988",
"0.5359179",
"0.52837175",
"0.5270582",
"0.5260968",
"0.5260383",
"0.525911",
"0.5228595",
"0.5210489",
"0.5195151",
"0.5183675",
"0.51530695",
"0.51313037",
"0.51037055",
"0.5097388",
"0.5086019",
"0.50781524",
"0.5074742",
"0.5043666",
"0.50326777",
"0.5027884",
"0.5015499",
"0.50100946",
"0.50042593"
] | 0.56507003 | 1 |
returns a transient variable representing mv1mv2, where mv1 and mv2 are variables, normally transient variables, which are required to depend only one axis. To perform the subtraction, one of the variables is linearly interpolated to the axis of the other. The axis used will be the coarsest (fewest points) of the two axes. Note that if mv1 _or_ mv2 have a missing value at index i, then the return value (mv1mv2) will also have a missing value at index i. | def aminusb_1ax( mv1, mv2 ):
mv1, mv2 = reconcile_units( mv1, mv2 )
if hasattr(mv1,'units') and hasattr(mv2,'units') and mv1.units!=mv2.units:
print "WARNING: aminusb_1ax1 is subtracting variables with different units!",mv1,mv1
if mv1 is None or mv2 is None: return None
missing = mv1.get_fill_value()
axis1 = allAxes(mv1)[0]
axis2 = allAxes(mv2)[0]
if len(axis1)<=len(axis2):
a = mv1
b = numpy.interp( axis1[:], axis2[:], mv2[:], left=missing, right=missing )
else:
a = numpy.interp( axis2[:], axis1[:], mv1[:], left=missing, right=missing )
b = mv2
aminusb = a - b
aminusb.id = mv1.id
return aminusb | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def basic_sub(mv1, mv2):\n obj = expand(mv1.obj - mv2.obj)\n return MV(obj)",
"def displacement(cls, v1, v2):\n return np.array([v2 - v1])",
"def aminusb_ax2( mv1, mv2 ):\n if hasattr(mv1,'units') and hasattr(mv2,'units') and mv1.units!=mv2.units:\n print \"WARING: aminusb_ax2 is subtracting variables with different units!\",mv1,mv1\n axes1 = allAxes(mv1)\n axes2 = allAxes(mv2)\n # TO DO: convert, interpolate, etc. as needed to accomodate differing first axes.\n # But for now, we'll just check a bit ...\n ax1=axes1[0]\n ax2=axes2[0]\n if ax1.shape!=ax2.shape:\n print \"ERROR aminusb_ax2 requires same axes, but shape differs:\",ax1.shape,ax2.shape\n print \"ax1,ax2\"\n return None\n if hasattr(ax1,'units') and hasattr(ax2,'units') and ax1.units!=ax2.units:\n if ax1.units=='mb':\n ax1.units = 'mbar' # udunits uses mb for something else\n if ax2.units=='mb':\n ax2.units = 'mbar' # udunits uses mb for something else\n tmp = udunits(1.0,ax2.units)\n s,i = tmp.how(ax1.units) # will raise an exception if conversion not possible\n # crude substitute for a real units library:\n #if not (ax1.units=='mb' and ax2.units=='millibars') and\\\n # not (ax1.units=='millibars' and ax2.units=='mb'):\n # print \"ERROR aminusb_ax2 requires same axes, but units differ:\",ax1.units,ax2,units\n # print \"ax1,ax2\"\n # return None\n ab_axes = [ax1]\n if len(axes1[1])<=len(axes2[1]):\n a = mv1\n b = interp2( axes1[1], mv2 )\n ab_axes.append(axes1[1])\n else:\n a = interp2( axes2[1], mv1 )\n b = mv2\n ab_axes.append(axes2[1])\n aminusb = a - b\n aminusb.id = mv1.id\n aminusb.initDomain( ab_axes )\n return aminusb",
"def __rsub__(self, other) -> 'MultiVector':\n\n other, mv = self._checkOther(other)\n if not mv:\n if isinstance(other, np.ndarray):\n obj = self.__array__()\n return other - obj\n newValue = other.value - self.value\n\n return self._newMV(newValue)",
"def displacement(cls, v1, v2):\n return (v2 - v1).copy()",
"def vector_sub(v1,v2):\n return Vector(v1.x - v2.x, v1.y - v2.y, v1.z - v2.z)",
"def subm(f1, f2):\n from numpy import array, clip\n\n if type(f2) is array:\n assert f1.dtype == f2.dtype, 'Cannot have different datatypes:'\n bottom,top=limits(f1)\n y = clip(f1.astype('d') - f2, bottom, top)\n y = y.astype(f1.dtype)\n return y",
"def vars_subtract ( self , var1 , var2 , name = '' , title = '' ) :\n\n f1 = isinstance ( var1 , num_types )\n f2 = isinstance ( var2 , num_types )\n\n if f1 and f2 :\n ##\n res = float ( var1 ) - float ( var2 )\n return ROOT.RooRealConstant.value ( res ) \n elif f1 :\n ## \n var1 = ROOT.RooRealConstant.value ( var1 ) \n return self.vars_subtract ( var1 , var2 , name , title )\n elif f2 :\n ## shortcut \n if 0 == var2 : return var1 ## SHORTCUT\n #\n var2 = ROOT.RooRealConstant.value ( var2 ) \n return self.vars_subtract ( var1 , var2 , name , title )\n\n self.aux_keep.append ( var1 )\n self.aux_keep.append ( var2 )\n\n result = Ostap.MoreRooFit.Subtraction ( var1 , var2 )\n self.aux_keep.append ( result )\n \n return result",
"def lonvar_min( mv1, mv2 ):\n if mv1 is None: return None\n if mv2 is None: return None\n lon_axis1 = lonAxis(mv1)\n lon_axis2 = lonAxis(mv2)\n if len(lon_axis1)<=len(lon_axis2):\n lon_axis = lon_axis1\n mv = mv1\n else:\n lon_axis = lon_axis2\n mv = mv2\n lonmv = cdms2.createVariable( lon_axis[:], axes=[lon_axis], id='lon',\n attributes={'units':lon_axis.units} )\n return lonmv",
"def vd(v2,v1):\n return v2-v1",
"def v1Mv2(v1, M, v2):\r\n return v1[:, None] * M * v2[None, :]",
"def sub(first, other):\n if isinstance(first,FreeCAD.Vector) and isinstance(other,FreeCAD.Vector):\n return FreeCAD.Vector(first.x-other.x, first.y-other.y, first.z-other.z)",
"def __sub__(self, other: TranslationType):\n return Translation(\n self.x - other.x,\n self.y - other.y,\n self.z - other.z)",
"def interp2( newaxis1, mv ):\n missing = mv.get_fill_value()\n axes = allAxes(mv)\n if len(newaxis1[:])>len(axes[1][:]): return mv\n new_vals = numpy.ma.masked_all( ( len(axes[0]), len(newaxis1[:]) ) )\n for i in range(len( axes[0] )):\n new_vals[i,:] = numpy.interp( newaxis1[:], axes[1][:], mv[i,:], left=missing, right=missing )\n # numpy.interp loses the mask, and I want to propagate it! But we can separately interpolate\n # the mask - which numpy.interp treats False as 0, True as 1:\n new_vals.mask[i,:] = ( numpy.interp( newaxis1[:], axes[1][:], mv.mask[i,:], left=missing,\n right=missing ) )>0\n return new_vals",
"def __sub__(self, other: float) -> 'Translation':\n self._vector.setWithArray((self._vector.x - other, self._vector.y - other, self._vector.z - other))\n return self",
"def vector_subtract(v1, v2):\n return v1[0] - v2[0], v1[1] - v2[1]",
"def subtract_vectors(vector_1, vector_2):\n new_coordinates = []\n index = 0\n while index < vector_1.dimension:\n new_value = vector_1.coordinates[index] - vector_2.coordinates[index]\n new_coordinates.append(new_value)\n index += 1\n new_vector = Vector(new_coordinates)\n return new_vector",
"def __sub__(self, other):\n tmp = VectorHeat1D(self.size)\n tmp.set_values(self.get_values() - other.get_values())\n return tmp",
"def __rtruediv__(self, other):\n value = -1 / (self.val * self.val)\n total = {self.var: other * value}\n return AutoDiffReverse(other / self.val, None, total)",
"def __sub__(self, other):\n return Vec2d(self.v[0] - other[0], self.v[1] - other[1])",
"def latvar_min( mv1, mv2 ):\n if mv1 is None: return None\n if mv2 is None: return None\n lat_axis1 = latAxis(mv1)\n lat_axis2 = latAxis(mv2)\n if len(lat_axis1)<=len(lat_axis2):\n lat_axis = lat_axis1\n mv = mv1\n else:\n lat_axis = lat_axis2\n mv = mv2\n latmv = cdms2.createVariable( lat_axis[:], axes=[lat_axis], id='lat',\n attributes={'units':lat_axis.units} )\n return latmv",
"def diff_v_x1(x1, x2, t=0.):\n return (omega) ** 2 * x1",
"def measurement(params1, params2):\n s0 = state(params1)\n s1 = state(params2)\n val = (np.absolute(np.conj(s0)@s1))**2\n return val",
"def __truediv__(self, other):\n try:\n value = -1 / (other.val * other.val)\n total = {self.var: 1 / other.val, other.var: value * self.val}\n return AutoDiffReverse(self.val / other.val, None, total)\n except AttributeError:\n total = {self.var: 1 / other}\n return AutoDiffReverse(self.val / other, None, total)",
"def get_cross2d(v1, v2):\n return v1[0]*v2[1] - v1[1]*v2[0]",
"def diff(self, x0, x1):\n nq, nv, nx = self.model.nq, self.model.nv, self.nx\n assert (x0.shape == (nx, ) and x1.shape == (nx, ))\n q0 = x0[:nq]\n q1 = x1[:nq]\n v0 = x0[-nv:]\n v1 = x1[-nv:]\n dq = pinocchio.difference(self.model, a2m(q0), a2m(q1))\n return np.concatenate([dq.flat, v1 - v0])",
"def __rmul__(self, other) -> 'MultiVector':\n\n other, mv = self._checkOther(other, coerce=False)\n\n if mv:\n newValue = self.layout.gmt_func(other.value, self.value)\n else:\n if isinstance(other, np.ndarray):\n obj = self.__array__()\n return other*obj\n newValue = other*self.value\n\n return self._newMV(newValue)",
"def levvar_min( mv1, mv2 ):\n if mv1 is None: return None\n if mv2 is None: return None\n lev_axis1 = levAxis(mv1)\n lev_axis2 = levAxis(mv2)\n if len(lev_axis1)<=len(lev_axis2):\n lev_axis = lev_axis1\n mv = mv1\n else:\n lev_axis = lev_axis2\n mv = mv2\n levmv = cdms2.createVariable( lev_axis[:], axes=[lev_axis], id='levels',\n attributes={'units':lev_axis.units} )\n return levmv",
"def diff_v_x2(x1, x2, t=0.):\n return (omega) ** 2 * x2",
"def __truediv__(self, other) -> 'MultiVector':\n\n other, mv = self._checkOther(other, coerce=False)\n\n if mv:\n return self * other.inv()\n else:\n if isinstance(other, np.ndarray):\n obj = self.__array__()\n return obj/other\n newValue = self.value / other\n return self._newMV(newValue)"
] | [
"0.6467121",
"0.6038152",
"0.60112774",
"0.598649",
"0.5940264",
"0.59214985",
"0.5907114",
"0.5860278",
"0.5774416",
"0.57667285",
"0.5750668",
"0.56914055",
"0.5675003",
"0.5616137",
"0.56054354",
"0.5589946",
"0.5553921",
"0.55365384",
"0.55223125",
"0.5514815",
"0.5504505",
"0.5477639",
"0.54443294",
"0.54415566",
"0.53941643",
"0.53935504",
"0.53863615",
"0.53774726",
"0.53668",
"0.5295524"
] | 0.66377026 | 0 |
Returns time averages of the cems2 variable mv. The average is comuted only over times which lie in the specified season(s). The returned variable has the same number of dimensions as mv, but the time axis has been reduced to the number of seasons requested. The seasons are specified as an object of type cdutil.times.Seasons, and defaults to the whole year. | def timeave_seasonal( mv, seasons=seasonsyr ):
return seasons.climatology(mv) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reduce_time_seasonal( mv, seasons=seasonsyr, vid=None ):\n if vid==None:\n vid = 'reduced_'+mv.id\n # Note that the averager function returns a variable with meaningless id.\n # The climatology function returns the same id as mv, which we also don't want.\n\n # The slicers in time.py require getBounds() to work.\n # If it doesn't, we'll have to give it one.\n # Setting the _bounds_ attribute will do it.\n timeax = timeAxis(mv)\n if timeax is None:\n print \"WARNING- no time axis in\",mv.id\n return None\n if timeax.getBounds()==None:\n timeax._bounds_ = timeax.genGenericBounds()\n mvseas = seasons.climatology(mv)\n if mvseas is None:\n print \"WARNING- cannot compute climatology for\",mv.id,seasons.seasons\n print \"...probably there is no data for times in the requested season.\"\n return None\n avmv = mvseas\n avmv.id = vid\n if hasattr(mv,'units'): avmv.units = mv.units\n avmv = delete_singleton_axis( avmv, vid='time' )\n avmv.units = mv.units\n return avmv",
"def reduce2lat_seasonal( mv, seasons=seasonsyr, vid=None ):\n if vid==None:\n vid = 'reduced_'+mv.id\n # Note that the averager function returns a variable with meaningless id.\n # The climatology function returns the same id as mv, which we also don't want.\n\n # The slicers in time.py require getBounds() to work.\n # If it doesn't, we'll have to give it one.\n # Setting the _bounds_ attribute will do it.\n for ax in mv.getAxisList():\n if ax.getBounds() is None:\n ax._bounds_ = ax.genGenericBounds()\n timeax = timeAxis(mv)\n if timeax.units=='months':\n # Special check necessary for LEGATES obs data, because\n # climatology() won't accept this incomplete specification\n timeax.units = 'months since 0001-01-01'\n mvseas = seasons.climatology(mv)\n if mvseas is None:\n # Among other cases, this can happen if mv has all missing values.\n return None\n \n axes = allAxes( mv )\n axis_names = [ a.id for a in axes if a.id!='lat' and a.id!='time']\n axes_string = '('+')('.join(axis_names)+')'\n\n if len(axes_string)>2:\n avmv = averager( mvseas, axis=axes_string )\n else:\n avmv = mvseas\n avmv.id = vid\n\n avmv = delete_singleton_axis( avmv, vid='time' )\n avmv.units = mv.units\n return avmv",
"def reduce2latlon_seasonal( mv, seasons=seasonsyr, vid=None ):\n # This differs from reduce2lat_seasonal only in the line \"axis_names =\"....\n # I need to think about how to structure the code so there's less cut-and-paste!\n if vid==None:\n vid = 'reduced_'+mv.id\n # Note that the averager function returns a variable with meaningless id.\n # The climatology function returns the same id as mv, which we also don't want.\n\n # The slicers in time.py require getBounds() to work.\n # If it doesn't, we'll have to give it one.\n # Setting the _bounds_ attribute will do it.\n timeax = timeAxis(mv)\n if timeax.getBounds()==None:\n timeax._bounds_ = timeax.genGenericBounds()\n mvseas = seasons.climatology(mv)\n \n axes = allAxes( mv )\n axis_names = [ a.id for a in axes if a.id!='lat' and a.id!='lon' and a.id!='time']\n axes_string = '('+')('.join(axis_names)+')'\n\n if len(axes_string)>2:\n for axis in mvseas.getAxisList():\n if axis.getBounds() is None:\n axis._bounds_ = axis.genGenericBounds()\n avmv = averager( mvseas, axis=axes_string )\n else:\n avmv = mvseas\n if avmv is None: return avmv\n avmv.id = vid\n if hasattr(mv,'units'): avmv.units = mv.units\n avmv = delete_singleton_axis( avmv, vid='time' )\n avmv.units = mv.units\n return avmv",
"def reduce2levlat_seasonal( mv, seasons=seasonsyr, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n if levAxis(mv) is None: return None\n if latAxis(mv) is None: return None\n axes = allAxes( mv )\n timeax = timeAxis(mv)\n if timeax.getBounds()==None:\n timeax._bounds_ = timeax.genGenericBounds()\n\n if timeax.units=='months':\n # Special check necessary for LEGATES obs data, because\n # climatology() won't accept this incomplete specification\n timeax.units = 'months since 0001-01-01'\n mvseas = seasons.climatology(mv)\n\n axis_names = [ a.id for a in axes if a.id!='lev' and a.id!='lat' and a.id!='time']\n axes_string = '('+')('.join(axis_names)+')'\n\n if len(axes_string)>2:\n avmv = averager( mvseas, axis=axes_string )\n else:\n avmv = mvseas\n avmv.id = vid\n avmv = delete_singleton_axis( avmv, vid='time' )\n avmv.units = mv.units\n\n return avmv",
"def reduce_time( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n axis_names = [ a.id for a in axes if a.id=='time' ]\n axes_string = '('+')('.join(axis_names)+')'\n if len(axes_string)>2:\n for ax in axes:\n # The averager insists on bounds. Sometimes they don't exist, especially for obs.\n if ax.id!='lat' and ax.id!='lon' and not hasattr( ax, 'bounds' ):\n ax.setBounds( ax.genGenericBounds() )\n avmv = averager( mv, axis=axes_string )\n else:\n avmv = mv\n avmv.id = vid\n avmv.units = mv.units\n\n return avmv",
"def seasonal_avg(var_nc,the_season,lat_slice=None,lon_slice=None): \n the_season=np.array(the_season,dtype=np.int32)\n if (lat_slice is None) and (lon_slice is None):\n num_lats=var_nc.shape[2]\n num_lons=var_nc.shape[3]\n lat_slice=slice(0,num_lats)\n lon_slice=slice(0,num_lons)\n else:\n if lat_slice.stop is None:\n num_lats=var_nc.shape[2]\n else:\n num_lats=lat_slice.stop - lat_slice.start\n if lon_slice.stop is None:\n num_lons=var_nc.shape[3]\n else:\n num_lons=lon_slice.stop - lon_slice.start\n num_levs=var_nc.shape[1]\n accumulate=ma.zeros([num_levs,num_lats,num_lons],dtype=var_nc.dtype)\n num_years=var_nc.shape[0]//12\n\n for the_year in np.arange(0,num_years):\n the_slice=var_nc[the_season,:,lat_slice,lon_slice]\n the_slice=ma.mean(the_slice,axis=0)\n accumulate+=the_slice\n the_season=the_season+12\n accumulate=accumulate/num_years \n the_avg=ma.mean(accumulate,axis=1)\n the_avg=ma.mean(the_avg,axis=1)\n return the_avg",
"def getSeasonStats(self):\n df_season_agg = self.toSeasonAggFormat()\n\n # Calculate Possessions for each game\n df_season_agg['possessions'] = 0.5 * (df_season_agg['FGA'] + 0.475 * df_season_agg['FTA'] - df_season_agg['OR'] + df_season_agg['TO']) \\\n + 0.5 * (df_season_agg['OppFGA'] + 0.475 * df_season_agg['OppFTA'] - df_season_agg['OppOR'] + df_season_agg['OppTO'])\n\n # Aggregate to Season Summary Level\n season_stats = df_season_agg.groupby(['TeamID', 'Season']).sum()\n\n season_stats = season_stats.rename(columns={'Win':'wins'})\n\n # Season Advanced Stats\n season_stats['o_eff'] = season_stats['Score'] / season_stats['possessions'] * 100\n season_stats['d_eff'] = season_stats['OppScore'] / season_stats['possessions'] * 100\n season_stats['net_eff'] = season_stats['o_eff'] - season_stats['d_eff']\n\n season_stats.drop('DayNum', axis=1, inplace=True)\n season_stats.drop('OppTeamID', axis=1, inplace=True)\n season_stats.drop('rand', axis=1, inplace=True)\n\n return season_stats",
"def compute_mse(self, results, T, nSegments):\n times_per_day = 56\n mse=0\n for i in range(nSegments):\n mse += T * np.mean(results[i][0].mse_path_[np.where(results[i][0].alphas_ == results[i][0].alpha_)[0][0]])\n mse += ((times_per_day-1)-T) * np.mean(results[i][1].mse_path_[np.where(results[i][1].alphas_ == results[i][1].alpha_)[0][0]])\n mse = mse/((times_per_day-1)*nSegments)\n return mse",
"def get_dryspells_perseason(dryspells, seasons=((12, 1, 2), (3, 4, 5),\n (6, 7, 8), (9, 10, 11))):\n dryspells_seasons = []\n for season in seasons:\n eveSeas = []\n for eveLand in dryspells:\n eves = [e for e in eveLand if e.start_date().month in season]\n eveSeas.append(eves)\n dryspells_seasons.append(eveSeas)\n\n return dryspells_seasons",
"def seasonal_mean(args_file):\n product, start_date, end_date, variable_name, shape_file = Utility.read_yml_params(args_file)\n stat = Statistic.Mean\n time = TimePeriod.Seasonal\n\n ds = get_data_set(product, shape_file)\n\n result = Utility.Apply_stat(ds, start_date, end_date, variable_name, stat, time)\n return result",
"def timeseriesCVscore(self, params):\n errors = []\n\n # values = series.values\n values = self.train_ts\n self.alpha, self.beta, self.gamma = params\n\n # set the number of folds for cross-validation\n tscv = TimeSeriesSplit(n_splits=3)\n\n # iterating over folds, train model on each, forecast and calculate error\n for train, test in tscv.split(values):\n\n self.train = values[train]\n self.test = values[test]\n self.triple_exponential_smoothing()\n predictions = self.result[-len(self.test) :]\n actual = values[test]\n error = mape(list(actual), predictions)\n errors.append(error)\n\n # print \"error: \"\n # print errors\n return np.mean(np.array(errors))",
"def seasonal_means(t, y, edges=None, hard=False):\n ts, ys = seasonal_series(t, y, edges=edges, hard=hard)\n t_means = [t.jyear.mean() for t in ts]\n t_means = astropy.time.Time(t_means, format='jyear', scale=t.scale)\n y_means = np.array([y.mean() for y in ys])\n y_std = np.array([y.std() for y in ys])\n y_N = np.array([y.size for y in ys])\n return t_means, y_means, y_std, y_N",
"def timeave_old( mv ):\n # I haven't thought yet about how missing values would work with this...\n # If time intervals be unequal, this will have to be changed...\n sh = mv.shape # e.g. [312,90,144] for t,lat,lon\n n = sh[0]\n # BTW, this is the size of everything else:\n # n2 = reduce( operator.mul, sh[1:] ) # e.g. 90*144=12960\n mvta = numpy.sum( mv.__array__(), axis=0 )\n mvta /= n\n return mvta",
"def calcSeason(ra, time):\n # Reference RA and equinox to anchor ra/season reference - RA = 0 is overhead at autumnal equinox\n # autumn equinox 2014 happened on september 23 --> equinox MJD\n Equinox = 2456923.5 - 2400000.5\n # convert ra into 'days'\n dayRA = ra / 360 * 365.25\n firstSeasonBegan = Equinox + dayRA - 0.5 * 365.25\n seasons = (time - firstSeasonBegan) / 365.25\n # Set first season to 0\n seasons = seasons - np.floor(np.min(seasons))\n return seasons",
"def get_gas_by_month(self, year, month, deseasonalize=False):\n df = self.conc_trend if deseasonalize else self.conc_seasonal\n ts = pd.Timestamp(year, month, 1)\n info_dict = {'latency': df.latency[ts]}\n return df.dmf_mean[ts], info_dict",
"def interpolateseasons(self):\n\n remainder = self.season - self.startseason\n f1 = 1.0 - remainder\n self.data = (self.startdata * f1) + (self.stopdata * remainder)",
"def distributeSeason(self):\n i = 1\n for day in self.daylist:\n if i >= monthbeg[5] and i < monthbeg[9]: #june through SEpt as per SCE\n day.season = 'summer' #https://www.sce.com/residential/rates/Time-Of-Use-Residential-Rate-Plans\n i = i + 1\n else:\n day.season = 'winter'\n i = i+1",
"def reduce2scalar( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n axis_names = [ a.id for a in axes ]\n axes_string = '('+')('.join(axis_names)+')'\n\n avmv = averager( mv, axis=axes_string )\n avmv.id = vid\n avmv.units = mv.units\n\n return avmv",
"def test_aggregate_from_month_to_seasons(\n self, months, seasons, monthly_data, monthly_data_as_seasons\n ):\n adaptor = IntervalAdaptor(\"test-month-season\")\n from_spec = Spec(\n name=\"test-var\", dtype=\"float\", dims=[\"months\"], coords={\"months\": months}\n )\n adaptor.add_input(from_spec)\n to_spec = Spec(\n name=\"test-var\",\n dtype=\"float\",\n dims=[\"seasons\"],\n coords={\"seasons\": seasons},\n )\n adaptor.add_output(to_spec)\n actual_coefficients = adaptor.generate_coefficients(from_spec, to_spec)\n\n data_array = DataArray(from_spec, monthly_data)\n\n data_handle = Mock()\n data_handle.get_data = Mock(return_value=data_array)\n data_handle.read_coefficients = Mock(return_value=actual_coefficients)\n\n adaptor.simulate(data_handle)\n actual = data_handle.set_results.call_args[0][1]\n expected = monthly_data_as_seasons\n\n assert np.allclose(actual, expected, rtol=1e-05, atol=1e-08)",
"def season_rounds(cls, season):\r\n\t\t\r\n\t\tfolder_name = cls.season_folder(season)\r\n\t\tround_list = os.listdir(f'Data/{folder_name}')\r\n\r\n\t\tall_rounds = []\r\n\r\n\t\tfor round_file in round_list:\r\n\t\t\twith open(f'Data/{folder_name}/{round_file}', 'r', encoding='utf-8') as f:\r\n\t\t\t\tround_info = f.read().splitlines()\r\n\r\n\t\t\tround_number = round_file[:-4]\r\n\t\t\tfull_round_name = f\"{season} R{round_number}\"\r\n\r\n\t\t\tround_date = int(round_info[0])\r\n\t\t\tlookup_ind = DATES.month_diff(round_date, DATES.MIN_DATE)\r\n\r\n\t\t\t# If the round isn't actually counted for TWOW Glicko\r\n\t\t\tif full_round_name not in cls.ROUNDS[lookup_ind].keys():\r\n\t\t\t\tcontinue\r\n\r\n\t\t\tcontestant_count = len(round_info) - 1\r\n\r\n\t\t\tstrength = cls.ROUNDS[lookup_ind][full_round_name][0]\r\n\r\n\t\t\tall_rounds.append([\r\n\t\t\t\tround_number,\r\n\t\t\t\tround_date,\r\n\t\t\t\tcontestant_count,\r\n\t\t\t\tstrength\r\n\t\t\t])\r\n\t\t\r\n\t\treturn all_rounds",
"def rmsse(self, seasonality: int = 1) -> float:\n q = np.abs(self._error()) / self.mae(self.true[seasonality:], self._naive_prognose(seasonality))\n return float(np.sqrt(np.mean(np.square(q))))",
"def seasonal_calc(t, y, func, edges=None):\n ts, ys = seasonal_series(t, y, edges=edges)\n t_means = [t.jyear.mean() for t in ts]\n t_means = astropy.time.Time(t_means, format='jyear', scale=t.scale)\n f_y = np.array([func(y) for y in ys])\n return t_means, f_y",
"def mov_mean_std(ts, m):\n\n if m <= 1:\n raise ValueError(\"Query length must be longer than one\")\n\n ts = ts.astype(\"float\")\n\n # Add zero to the beginning of the cumsum of ts\n s = np.insert(np.cumsum(ts), 0, 0)\n\n # Add zero to the beginning of the cumsum of ts ** 2\n s_sq = np.insert(np.cumsum(ts ** 2), 0, 0)\n seg_sum = s[m:] - s[:-m]\n seg_sum_sq = s_sq[m:] - s_sq[:-m]\n return seg_sum / m, np.sqrt(seg_sum_sq / m - (seg_sum / m) ** 2)",
"def calcSeasonModified( monthNum ):\r\n\r\n if monthNum == 12 or monthNum == 1 or monthNum == 2:\r\n return 0\r\n\r\n elif monthNum == 6 or monthNum == 7 or monthNum == 7:\r\n return 1\r\n\r\n else:\r\n return 3",
"def m_average_fun(self, dx=df.dx):\n\n mx = df.assemble(\n self.material._Ms_dg * df.dot(self._m, df.Constant([1, 0, 0])) * dx)\n my = df.assemble(\n self.material._Ms_dg * df.dot(self._m, df.Constant([0, 1, 0])) * dx)\n mz = df.assemble(\n self.material._Ms_dg * df.dot(self._m, df.Constant([0, 0, 1])) * dx)\n volume = df.assemble(self.material._Ms_dg * dx)\n\n return np.array([mx, my, mz]) / volume",
"def mase(self, seasonality: int = 1):\n return self.mae() / self.mae(self.true[seasonality:], self._naive_prognose(seasonality))",
"def enstrophy_average(\n omega1, # vorticity-1 component\n omega2, # vorticity-2 component\n omega3): # vorticity-3 component\n #---------------------------------------------------------------------#\n # Defining the domain variables #\n #---------------------------------------------------------------------#\n dim = omega1.shape\n time = dim[-1]\n avg = np.zeros(time)\n #---------------------------------------------------------------------#\n # Looping over the time variable #\n #---------------------------------------------------------------------#\n print_count = 51\n for i in range(0, time):\n term1 = np.square(omega1[:,:,:,i])\n term2 = np.square(omega2[:,:,:,i])\n term3 = np.square(omega3[:,:,:,i])\n enst = 0.5*(term1 + term2 + term3)\n avg[i] = np.mean(enst)\n #-----------------------------------------------------------------#\n # Printing statement #\n #-----------------------------------------------------------------#\n if print_count > 20:\n print('Enstrophy average ---> t_step = %i' %(i))\n print_count = 0\n print_count += 1\n\n return avg",
"def getNumSeasons(self):\n searchURL = \"http://api.tvmaze.com/shows/\" + str(self.__showID) \\\n + \"/seasons\"\n\n response = requests.get(searchURL)\n data = response.json()\n\n return data[-1][\"number\"]",
"def parse_episodes_by_season (self, response_data):\n episodes = {}\n raw_episodes = response_data['value']['videos']\n for episode_id in raw_episodes:\n if self._is_size_key(key=episode_id) == False:\n if (raw_episodes[episode_id]['summary']['type'] == 'episode'):\n episodes.update(self.parse_episode(episode=raw_episodes[episode_id], genres=response_data['value']['genres']))\n return episodes",
"def test_time_only_conversion_disagg(self, months, seasons):\n adaptor = IntervalAdaptor(\"test-season-month\")\n from_spec = Spec(\n name=\"test-var\",\n dtype=\"float\",\n dims=[\"seasons\"],\n coords={\"seasons\": seasons},\n )\n adaptor.add_input(from_spec)\n to_spec = Spec(\n name=\"test-var\", dtype=\"float\", dims=[\"months\"], coords={\"months\": months}\n )\n adaptor.add_output(to_spec)\n actual_coefficients = adaptor.generate_coefficients(from_spec, to_spec)\n\n data = np.array([3, 3, 3, 3])\n data_array = DataArray(from_spec, data)\n data_handle = Mock()\n data_handle.get_data = Mock(return_value=data_array)\n data_handle.read_coefficients = Mock(return_value=actual_coefficients)\n\n adaptor.simulate(data_handle)\n actual = data_handle.set_results.call_args[0][1]\n expected = np.array(\n [\n 1.033333,\n 0.933333,\n 1.01087,\n 0.978261,\n 1.01087,\n 0.978261,\n 1.01087,\n 1.01087,\n 0.989011,\n 1.021978,\n 0.989011,\n 1.033333,\n ]\n )\n np.testing.assert_allclose(actual, expected, rtol=1e-3)"
] | [
"0.7113886",
"0.68064904",
"0.6650361",
"0.61950076",
"0.5758541",
"0.5734643",
"0.55560625",
"0.54895353",
"0.5439703",
"0.522154",
"0.51681584",
"0.5160492",
"0.5129565",
"0.5069776",
"0.50522226",
"0.50236505",
"0.4989968",
"0.49448606",
"0.49113876",
"0.48840415",
"0.4866676",
"0.4850736",
"0.48409486",
"0.4814644",
"0.48000026",
"0.47813103",
"0.4770627",
"0.47444415",
"0.47058216",
"0.47034478"
] | 0.6935159 | 1 |
Returns a time average of the cdms2 variable mv. mv is a cdms2 variable, assumed to be timedependent and indexed as is usual for CFcompliant variables, i.e. mv(time,...). What's returned is a numpy array, not a cdms2 variable. (I may change this in the future). | def timeave_old( mv ):
# I haven't thought yet about how missing values would work with this...
# If time intervals be unequal, this will have to be changed...
sh = mv.shape # e.g. [312,90,144] for t,lat,lon
n = sh[0]
# BTW, this is the size of everything else:
# n2 = reduce( operator.mul, sh[1:] ) # e.g. 90*144=12960
mvta = numpy.sum( mv.__array__(), axis=0 )
mvta /= n
return mvta | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reduce_time( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n axis_names = [ a.id for a in axes if a.id=='time' ]\n axes_string = '('+')('.join(axis_names)+')'\n if len(axes_string)>2:\n for ax in axes:\n # The averager insists on bounds. Sometimes they don't exist, especially for obs.\n if ax.id!='lat' and ax.id!='lon' and not hasattr( ax, 'bounds' ):\n ax.setBounds( ax.genGenericBounds() )\n avmv = averager( mv, axis=axes_string )\n else:\n avmv = mv\n avmv.id = vid\n avmv.units = mv.units\n\n return avmv",
"def reduce2scalar( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n axis_names = [ a.id for a in axes ]\n axes_string = '('+')('.join(axis_names)+')'\n\n avmv = averager( mv, axis=axes_string )\n avmv.id = vid\n avmv.units = mv.units\n\n return avmv",
"def m_average_fun(self, dx=df.dx):\n\n mx = df.assemble(\n self.material._Ms_dg * df.dot(self._m, df.Constant([1, 0, 0])) * dx)\n my = df.assemble(\n self.material._Ms_dg * df.dot(self._m, df.Constant([0, 1, 0])) * dx)\n mz = df.assemble(\n self.material._Ms_dg * df.dot(self._m, df.Constant([0, 0, 1])) * dx)\n volume = df.assemble(self.material._Ms_dg * dx)\n\n return np.array([mx, my, mz]) / volume",
"def monthly_avg_2darray(x):\n return x.reshape(-1,12).transpose().reshape(-1,int(x.shape[1]/12)).mean(1).reshape(12,-1).transpose()",
"def time_average(new_cube):\n\n time_average_cube = new_cube.collapsed('time', iris.analysis.MEAN)\n\n return time_average_cube",
"def timeave_seasonal( mv, seasons=seasonsyr ):\n return seasons.climatology(mv)",
"def mov_mean_std(ts, m):\n\n if m <= 1:\n raise ValueError(\"Query length must be longer than one\")\n\n ts = ts.astype(\"float\")\n\n # Add zero to the beginning of the cumsum of ts\n s = np.insert(np.cumsum(ts), 0, 0)\n\n # Add zero to the beginning of the cumsum of ts ** 2\n s_sq = np.insert(np.cumsum(ts ** 2), 0, 0)\n seg_sum = s[m:] - s[:-m]\n seg_sum_sq = s_sq[m:] - s_sq[:-m]\n return seg_sum / m, np.sqrt(seg_sum_sq / m - (seg_sum / m) ** 2)",
"def compute(dm,do):\n mae = MV.average(MV.absolute(MV.subtract(dm,do)))\n return float(mae)",
"def TemporalAverage(video_handle):\n temp_avg = np.zeros((int(video_handle.get(cv2.CAP_PROP_FRAME_HEIGHT)), int(video_handle.get(\n cv2.CAP_PROP_FRAME_WIDTH))))\n\n while video_handle.isOpened():\n ret, frame = video_handle.read()\n if not ret:\n break\n # Converts input RGB frames to Grayscale and adds the pixel values of successive frames\n temp_avg += GrayScale(frame)\n # Find the average of each pixel in the video\n temp_avg = temp_avg / video_handle.get(cv2.CAP_PROP_FRAME_COUNT)\n\n return temp_avg",
"def getMeanRMS (self,arr):\n # in base class we return redshift and zero varinace\n # repeat that here because mean RMS is meaningless for Template SED PDFs\n N=len(arr)\n return arr[\"z\"],np.zeros(N)",
"def reduce2lat_old( mv, vid=None ):\n # >>> For now, I'm assuming that the only axes are time,lat,lon\n # And I'm assuming equal spacing in lon (so all longitudes contribute equally to the average)\n # If they aren't, it's best to use area from cell_measures attribute if available; otherwise\n # compute it with lat_bnds, lon_bnds etc.\n # If I base another reduction function on this one, it's important to note that an average\n # in the lat direction will unavoidably need weights, because of the geometry.\n\n if vid==None:\n vid = 'reduced_'+mv.id\n time_axis, lat_axis, lon_axis = tllAxes( mv )\n\n mvta = timeave_old( mv )\n\n zm = numpy.zeros( mvta.shape[0] )\n for i in range(len(lat_axis)):\n for j in range(len(lon_axis)):\n zm[i] += mvta[i,j]\n zm[i] /= len(lon_axis)\n zmv = cdms2.createVariable( zm, axes=[lat_axis], id=vid )\n return zmv",
"def calcVmoy(self, V, idx, n):\n i = max(idx-n, 0)\n f = min(idx+n+1, V.shape[0])\n av = np.mean(V[i:f])\n return av",
"def cal_beam_AvgRMS(infile):\n \n data = np.loadtxt(infile)\n rms = data[:,3]\n avg_rms = round(np.mean(rms), 3)\n \n return avg_rms",
"def reduce_time_seasonal( mv, seasons=seasonsyr, vid=None ):\n if vid==None:\n vid = 'reduced_'+mv.id\n # Note that the averager function returns a variable with meaningless id.\n # The climatology function returns the same id as mv, which we also don't want.\n\n # The slicers in time.py require getBounds() to work.\n # If it doesn't, we'll have to give it one.\n # Setting the _bounds_ attribute will do it.\n timeax = timeAxis(mv)\n if timeax is None:\n print \"WARNING- no time axis in\",mv.id\n return None\n if timeax.getBounds()==None:\n timeax._bounds_ = timeax.genGenericBounds()\n mvseas = seasons.climatology(mv)\n if mvseas is None:\n print \"WARNING- cannot compute climatology for\",mv.id,seasons.seasons\n print \"...probably there is no data for times in the requested season.\"\n return None\n avmv = mvseas\n avmv.id = vid\n if hasattr(mv,'units'): avmv.units = mv.units\n avmv = delete_singleton_axis( avmv, vid='time' )\n avmv.units = mv.units\n return avmv",
"def find_mean_time_to_absorbtion(self):\n T = self.discrete_transition_matrix[:-3, :-3]\n S = np.linalg.inv(np.identity(len(T)) - T)\n steps2absorb = [sum([S[i,j] for j in range(len(S))]) for i in range(len(S))]\n time2absorb = [s*self.time_step for s in steps2absorb]\n self.mean_steps_to_absorbtion = {str(self.State_Space[i]): steps2absorb[i] for i in range(len(steps2absorb))}\n self.mean_time_to_absorbtion = {str(self.State_Space[i]): float(time2absorb[i]) for i in range(len(time2absorb))}",
"def _compute_mean(index, M, R, rake):\r\n mean = (a1[index] + _compute_linear_magnitude_term(index, M) + _compute_quadratic_magnitude_term(index, M) +\r\n _compute_logarithmic_distance_term(index, M, R) + _compute_faulting_style_term(index, rake))\r\n\r\n return mean",
"def _compute_mean(index, M, R, rake):\r\n mean = (a1[index] + _compute_linear_magnitude_term(index, M) + _compute_quadratic_magnitude_term(index, M) +\r\n _compute_logarithmic_distance_term(index, M, R) + _compute_faulting_style_term(index, rake))\r\n\r\n return mean",
"def averageTime(self):\n \n pass",
"def forced_trend(varname, cvdp_loc):\n\n if not cvdp_loc.endswith('/'):\n cvdp_loc = cvdp_loc + '/'\n\n # Can use CVDP output\n fnames = sorted(glob('%sCESM1-CAM5-BGC-LE_*.cvdp_data.*.nc' % cvdp_loc))\n\n cvdp_name = 'tas_global_avg_mon'\n\n nfiles = len(fnames)\n ds = Dataset(fnames[0], 'r')\n time = ds['time'][:]\n time_units = ds['time'].units\n gm_em_units = ds[cvdp_name].units\n\n n = len(time)\n glob_mean = np.empty((nfiles, n))\n for counter, file in enumerate(fnames):\n ds = Dataset(file, 'r')\n glob_mean[counter, :] = ds[cvdp_name][:]\n\n # Take average across ensemble members\n gm_em = np.mean(glob_mean, axis=0)\n\n return gm_em, gm_em_units, time, time_units",
"def annual_avg_2darray(x):\n return x.reshape(-1,12).mean(1).reshape(x.shape[0],int(x.shape[1]/12))",
"def SimpleMovingAverage(self, timeperiod = 14): \r\n return ta.SMA(self.data.close,timeperiod)",
"def enstrophy_average(\n omega1, # vorticity-1 component\n omega2, # vorticity-2 component\n omega3): # vorticity-3 component\n #---------------------------------------------------------------------#\n # Defining the domain variables #\n #---------------------------------------------------------------------#\n dim = omega1.shape\n time = dim[-1]\n avg = np.zeros(time)\n #---------------------------------------------------------------------#\n # Looping over the time variable #\n #---------------------------------------------------------------------#\n print_count = 51\n for i in range(0, time):\n term1 = np.square(omega1[:,:,:,i])\n term2 = np.square(omega2[:,:,:,i])\n term3 = np.square(omega3[:,:,:,i])\n enst = 0.5*(term1 + term2 + term3)\n avg[i] = np.mean(enst)\n #-----------------------------------------------------------------#\n # Printing statement #\n #-----------------------------------------------------------------#\n if print_count > 20:\n print('Enstrophy average ---> t_step = %i' %(i))\n print_count = 0\n print_count += 1\n\n return avg",
"def avgtime(self):\n return (self._total_time['value'] / 1000) / self._total_time['count'] if self._total_time['count'] else 0",
"def extract_avg_charge_time_5(batch,index):\n avg_time = []\n for ind in index:\n cell_no = list(batch.keys())[ind]\n avg_time_ = np.average(batch[cell_no]['summary']['chargetime'][1:6]) #Cycle 2 to cycle 6\n # avg_time.append(avg_time_)\n avg_time.append(log(abs(avg_time_),10))\n avg_time = np.reshape(avg_time,(-1,1))\n return avg_time\n pass",
"def metric_average(val, name, hvd):\n tensor = val.clone().detach()\n avg_tensor = hvd.allreduce(tensor, name=name)\n return avg_tensor.item()",
"def get_mean(self):\r\n for i in range(1,len(self.data[0])):\r\n self.prom.append(np.mean(self.data[:,i]))",
"def mv_to_typet(mv):\n tab1 = [\n 0.0000000E+00,\n 2.5949192E+01,\n -2.1316967E-01,\n 7.9018692E-01,\n 4.2527777E-01,\n 1.3304473E-01,\n 2.0241446E-02,\n 1.2668171E-03,\n ]\n\n tab2 = [\n 0.000000E+00,\n 2.592800E+01,\n -7.602961E-01,\n 4.637791E-02,\n -2.165394E-03,\n 6.048144E-05,\n -7.293422E-07,\n 0.000000E+00,\n ]\n\n if -5.603 <= mv <= 0:\n c = tab1\n elif 0 < mv <= 20.872:\n c = tab2\n else:\n raise ValueError(\"Voltage specified is out of range for Type T thermocouple\")\n\n t = 0.0\n for p in range(0, len(c)):\n t += c[p] * math.pow(mv, p)\n return t",
"def part_b(filename):\n\n data = np.genfromtxt(get_filepath(filename), names=['abs_time', 'key'], delimiter=\",\")\n\n rel_time = [curr - last for last, curr in zip(np.concatenate(([0], data['abs_time'])), data['abs_time'])]\n rel_time_squared = [x * x for x in rel_time]\n\n return np.mean(rel_time_squared)",
"def opc_calcs(df_param_indexed):\n \n df_param_indexed = df_param_indexed.copy()\n \n ''' commented 20180210 after Calmetrix update\n # Remove for cc1 data exported with cc2\n mix_start = datetime.strptime(\n df_param_indexed.loc['Mix Time', 1], \"%d-%b-%Y %H:%M:%S\")\n log_start = datetime.strptime(\n df_param_indexed.loc['Start Time', 1], \"%d-%b-%Y %H:%M:%S\")\n time_difference = (log_start - mix_start).total_seconds()\n '''\n # select values from DataFrame and calculate mass of binder in sample\n # may be worth checking if any of these values are 0 at some point in the future\n \n m_water = float(df_param_indexed.loc['Water Mass, g', 1])\n m_cem = float(df_param_indexed.loc['Cement Mass, g', 1])\n m_sample = float(df_param_indexed.loc['Sample Mass, g', 1])\n m_sample_cem = m_sample / (m_cem + m_water) * m_cem\n \n return m_sample_cem",
"def reduce2lat( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n axis_names = [ a.id for a in axes if a.id!='lat' ]\n axes_string = '('+')('.join(axis_names)+')'\n\n avmv = averager( mv, axis=axes_string )\n avmv.id = vid\n avmv.units = mv.units\n\n return avmv"
] | [
"0.58772254",
"0.569831",
"0.54306823",
"0.5339006",
"0.52570695",
"0.5178657",
"0.51458687",
"0.5114982",
"0.5073207",
"0.50591934",
"0.50121516",
"0.5010453",
"0.50002575",
"0.49653572",
"0.49469918",
"0.492515",
"0.492515",
"0.48782063",
"0.48579815",
"0.4835696",
"0.4835382",
"0.48226145",
"0.47899503",
"0.47412407",
"0.47096846",
"0.4691531",
"0.4691113",
"0.46892732",
"0.4682321",
"0.46681687"
] | 0.5918535 | 0 |
returns a TransientVariable containing the minimum and maximum values of all the variables provided as arguments | def minmin_maxmax( *args ):
rmin = min( [ mv.min() for mv in args ] )
rmax = max( [ mv.max() for mv in args ] )
rmv = cdms2.createVariable( [rmin,rmax] )
return rmv | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_minimum():\n return [\n convert_variables([0.78547, 0.78547, 0.78547]),\n ]",
"def __minimum_remaining_values(self, unassigned_vars):\n min_var = None\n for var in unassigned_vars:\n if min_var is None:\n min_var = var\n elif len(var.domain) < len(min_var.domain):\n min_var = var\n return min_var",
"def variable_range(examples, var):\n if var[1] == 'd':\n range = set()\n for datum in examples:\n range.add(datum[var[0]])\n return range\n else:\n range_min, range_max = 0, 0\n for datum in examples:\n data_val = float(datum[var[0]])\n range_min, range_max = min(range_min, data_val), max(range_max, data_val)\n return (range_min, range_max)",
"def get_max_and_min(self):\n max_x = float('-inf')\n min_x = float('inf')\n max_y = float('-inf')\n min_y = float('inf')\n max_z = float('-inf')\n min_z = float('inf')\n ans = max_x, max_y, max_z, min_x, min_y, min_z\n counter = 0\n for src, node in self._graph.get_all_v().items():\n if node.location is not None:\n x = node.location.x\n y = node.location.y\n z = node.location.z\n counter += 1\n max_x = x if x > max_x else max_x\n min_x = x if x < min_x else min_x\n max_y = y if y > max_y else max_y\n min_y = y if y < min_y else min_y\n max_z = z if z > max_z else max_z\n min_z = z if z < min_z else min_z\n if counter > 4:\n ans = max_x, max_y, max_z, min_x, min_y, min_z\n return ans",
"def xminmax ( self ) :\n return self.xvar.minmax()",
"def get_min_max_x(self, min_x = 1e9, max_x = -1e9, exclude = []): \n \n if self.verbose > 1:\n print(\"MultiLinearSpectra.get_min_max_x()\") \n \n for m in range(len(self.mess)):\n if m not in exclude and self.mess[m][\"class\"] not in exclude:\n min_x, max_x = self.mess[m][\"object\"].get_min_max_x(min_x, max_x)\n \n return min_x, max_x",
"def getMinMax(self,arr):\n # not implemented for Template SED yet\n return arr[\"z\"], arr[\"z\"]",
"def _get_min_max_value(min, max, value=None, step=None):\n # Either min and max need to be given, or value needs to be given\n if value is None:\n if min is None or max is None:\n raise ValueError('unable to infer range, value from: ({0}, {1}, {2})'.format(min, max, value))\n diff = max - min\n value = min + (diff / 2)\n # Ensure that value has the same type as diff\n if not isinstance(value, type(diff)):\n value = min + (diff // 2)\n else: # value is not None\n if not isinstance(value, Real):\n raise TypeError('expected a real number, got: %r' % value)\n # Infer min/max from value\n if value == 0:\n # This gives (0, 1) of the correct type\n vrange = (value, value + 1)\n elif value > 0:\n vrange = (-value, 3*value)\n else:\n vrange = (3*value, -value)\n if min is None:\n min = vrange[0]\n if max is None:\n max = vrange[1]\n if step is not None:\n # ensure value is on a step\n tick = int((value - min) / step)\n value = min + tick * step\n if not min <= value <= max:\n raise ValueError('value must be between min and max (min={0}, value={1}, max={2})'.format(min, value, max))\n return min, max, value",
"def min_values(self, lower, upper): \n if not self.lower_bounds is None:\n return self.lower_bounds\n\n minus = np.clip(self.coeffs,-math.inf,0)\n plus = np.clip(self.coeffs,0,math.inf)\n self.lower_bounds = plus.dot(lower) + minus.dot(upper) + self.const\n \n return self.lower_bounds",
"def get_min_max(ints):\n if len(ints) <= 0:\n return ()\n min_value = ints[0]\n max_value = ints[0]\n for i in range(len(ints)):\n temp = ints[i]\n if temp <= min_value:\n min_value = temp\n if temp >= max_value:\n max_value = temp\n output = (min_value, max_value)\n# print(\"output: \", output)\n return output\n pass",
"def _init_special_vars(self, T_start=None, T_end=None):\n self.min_energy = np.min(self.event_list_T[1][T_start:T_end])\n self.max_energy = np.max(self.event_list_T[1][T_start:T_end])\n self.min_time = np.min(self.event_list_T[0][T_start:T_end])\n self.max_time = np.max(self.event_list_T[0][T_start:T_end])",
"def min_max(xs):\n return min(xs), max(xs)",
"def get_min_max(ints):\r\n if len(ints) == 0:\r\n return None\r\n max = ints[0]\r\n min = ints[0]\r\n\r\n for int in ints:\r\n if int < min:\r\n min = int\r\n if int > max:\r\n max = int\r\n \r\n return min, max",
"def min_max(self, data, era):\n return 0, np.max(data)",
"def get_minmax_stats(dataframe, variable):\n\n print(\"Maximum value of \", variable, \"is: \", dataframe[variable].max())\n print(\"Minimum value of \", variable, \"is: \", dataframe[variable].min())",
"def get_minmax(self, stmt, slist):\n minel = maxel = None\n for s in slist:\n if s.keyword == \"min-elements\":\n minel = s.arg\n elif s.keyword == \"max-elements\":\n maxel = s.arg\n if minel is None:\n minst = stmt.search_one(\"min_elements\")\n if minst:\n minel = minst.arg\n else:\n minel = \"0\"\n if maxel is None:\n maxst = stmt.search_one(\"max_elements\")\n if maxst:\n maxel = maxst.arg\n return (minel, maxel)",
"def get_min_max(ints):\n if not ints:\n return None, None\n if len(ints) ==None:\n return None\n min_val = float(\"inf\")\n max_val = -float(\"inf\")\n # for each int in ints if update max_val and min_val accordingly\n for integer in ints:\n if integer > max_val:\n max_val = integer\n\n if integer < min_val:\n min_val = integer\n \n return (min_val, max_val)",
"def define_variables(m):\r\n\r\n # Non-negative candidate capacity\r\n m.mu_1 = Var(m.G_C, m.Y, within=NonNegativeReals, initialize=0)\r\n\r\n # Solar build limits\r\n m.mu_2 = Var(m.Z, m.Y, within=NonNegativeReals, initialize=0)\r\n\r\n # Wind build limits\r\n m.mu_3 = Var(m.Z, m.Y, within=NonNegativeReals, initialize=0)\r\n\r\n # Storage build limits\r\n m.mu_4 = Var(m.Z, m.Y, within=NonNegativeReals, initialize=0)\r\n\r\n # Min power output (all generators excluding storage units)\r\n m.sigma_1 = Var(m.G.difference(m.G_STORAGE), m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max power output - existing thermal\r\n m.sigma_2 = Var(m.G_E_THERM, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max power output - candidate thermal\r\n m.sigma_3 = Var(m.G_C_THERM, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max power output - existing wind\r\n m.sigma_4 = Var(m.G_E_WIND, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max power output - candidate wind\r\n m.sigma_5 = Var(m.G_C_WIND, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max power output - existing solar\r\n m.sigma_6 = Var(m.G_E_SOLAR, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max power output - candidate solar\r\n m.sigma_7 = Var(m.G_C_SOLAR, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max power output - hydro\r\n m.sigma_8 = Var(m.G_E_HYDRO, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Min charging power - storage units\r\n m.sigma_9 = Var(m.G_STORAGE, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Min discharging power - storage_units\r\n m.sigma_10 = Var(m.G_STORAGE, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max charging power - existing storage\r\n m.sigma_11 = Var(m.G_E_STORAGE, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max charging power - candidate storage\r\n m.sigma_12 = Var(m.G_C_STORAGE, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max discharging power - existing storage\r\n m.sigma_13 = Var(m.G_E_STORAGE, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max discharging power - candidate storage\r\n m.sigma_14 = Var(m.G_C_STORAGE, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Min energy - storage units\r\n m.sigma_15 = Var(m.G_STORAGE, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max energy - existing storage units\r\n m.sigma_16 = Var(m.G_E_STORAGE, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max energy - candidate storage\r\n m.sigma_17 = Var(m.G_C_STORAGE, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Min energy - interval end\r\n m.sigma_18 = Var(m.G_STORAGE, m.Y, m.S, within=NonNegativeReals, initialize=0)\r\n\r\n # Max energy - interval end\r\n m.sigma_19 = Var(m.G_STORAGE, m.Y, m.S, within=NonNegativeReals, initialize=0)\r\n\r\n # Ramp-rate up (thermal and hydro generators)\r\n m.sigma_20 = Var(m.G_THERM.union(m.G_E_HYDRO), m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Ramp-rate down (thermal and hydro generators)\r\n m.sigma_23 = Var(m.G_THERM.union(m.G_E_HYDRO), m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Non-negative lost load power\r\n m.sigma_26 = Var(m.Z, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Min powerflow\r\n m.sigma_27 = Var(m.L, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max powerflow\r\n m.sigma_28 = Var(m.L, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Storage energy transition\r\n m.zeta_1 = Var(m.G_STORAGE, m.Y, m.S, m.T, initialize=0)\r\n\r\n # Power balance (locational marginal price)\r\n m.lamb = Var(m.Z, m.Y, m.S, m.T, initialize=0)\r\n\r\n return m",
"def zminmax ( self ) :\n return self.zvar.minmax()",
"def vmnmx ( self , var , vmin , vmax ) :\n if var.xminmax() :\n vmn , vmx = var.xminmax ()\n if is_good_number ( vmin ) : vmin = max ( vmin , vmn )\n else : vmin = vmn\n if is_good_number ( vmax ) : vmax = min ( vmax , vmx )\n else : vmax = vmx\n\n assert is_good_number ( vmin ), 'Invalid type of ``min'' %s/%s' % ( vmin , type ( vmin ) )\n assert is_good_number ( vmax ), 'Invalid type of ``max'' %s/%s' % ( vmin , type ( vmin ) )\n assert vmin < vmax, 'Invalid min/max range: %s/%s' % ( vmin , vmax )\n \n return vmin , vmax",
"def _full_value_range(self):\n min_value, max_value = self._raw_data.data_range\n return max_value - min_value",
"def minMaxFonc(liste):\n\n return min(liste), max(liste)",
"def __init__(__self__, *,\n max: pulumi.Input[int],\n min: pulumi.Input[int]):\n pulumi.set(__self__, \"max\", max)\n pulumi.set(__self__, \"min\", min)",
"def __init__(__self__, *,\n max: pulumi.Input[int],\n min: pulumi.Input[int]):\n pulumi.set(__self__, \"max\", max)\n pulumi.set(__self__, \"min\", min)",
"def get_variables(self):\n return [self.g_t, self.m_t]",
"def min_max(items):\n return min(items), max(items)",
"def get_range(self, field, deep=False, axis=None):\n variables = list(self.vars(deep, with_name=field))\n\n if not variables:\n raise KeyError(\"No variable named '%s' was found!\" % field)\n\n start = [np.nanmin(self[var], axis).item(0) for var in variables]\n end = [np.nanmax(self[var], axis).item(0) for var in variables]\n return min(start), max(end)",
"def __init__(self, min: float, max: float):\n super().__init__()\n\n # store input parameters\n self.min = min\n self.max = max",
"def get_state_observed_min(self):\n minValues = numpy.zeros(self.get_num_variables())\n i = 0\n for v in self.variables:\n minValues[i] = v.get_min_value()\n i += 1\n return minValues",
"def get_bounds(self):\n return ([self.t_min] * self.dim,[self.t_max] * self.dim)"
] | [
"0.6460465",
"0.61516154",
"0.603381",
"0.5994661",
"0.5891523",
"0.5836747",
"0.58244497",
"0.57395023",
"0.5681477",
"0.5667921",
"0.56678796",
"0.5662136",
"0.56610906",
"0.5639008",
"0.562747",
"0.5615644",
"0.5598546",
"0.55895096",
"0.5584579",
"0.5576427",
"0.5575043",
"0.55732083",
"0.5564093",
"0.5564093",
"0.5563893",
"0.5562986",
"0.55533534",
"0.55489796",
"0.5524373",
"0.5512893"
] | 0.7315337 | 0 |
If mv depends on an axis with just one value, create a copy of mv without that axis, and without the corresponding data dimension. Normally this happens when time has been averaged out, but there is still a onevalued time axis left (thus one would normally use id='time'). You can specify the axis id if there might be more than one singleton. | def delete_singleton_axis( mv, vid=None ):
axes = allAxes(mv)
saxis = None
si = None
for i in range(len(axes)):
if len(axes[i])==1 and (vid==None or axes[i].id==vid):
saxis = axes[i]
si = i
del axes[si]
break
if saxis==None: return mv
data = ma.copy( mv.data )
if numpy.version.version >= '1.7.0':
data = ma.squeeze( data, axis=si )
else:
data = ma.squeeze( data ) # let's hope that there's only one singleton!
mvnew = cdms2.createVariable ( data, axes=axes, id=mv.id )
if hasattr(mv,'units'): mvnew.units = mv.units
return mvnew | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reduce_time( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n axis_names = [ a.id for a in axes if a.id=='time' ]\n axes_string = '('+')('.join(axis_names)+')'\n if len(axes_string)>2:\n for ax in axes:\n # The averager insists on bounds. Sometimes they don't exist, especially for obs.\n if ax.id!='lat' and ax.id!='lon' and not hasattr( ax, 'bounds' ):\n ax.setBounds( ax.genGenericBounds() )\n avmv = averager( mv, axis=axes_string )\n else:\n avmv = mv\n avmv.id = vid\n avmv.units = mv.units\n\n return avmv",
"def convert_axis( mv, axisold, axisindnew ):\n (axisnew, indexina3) = axisindnew\n axes = allAxes(mv)\n kold = None\n for k in range(len(axes)):\n if axes[k]==axisold: kold=k\n if kold==None:\n print \"ERROR. convert_axis cannot find axis\",axisold,\" in variable\",mv\n if len(axisold)==len(axisnew):\n mv.setAxis( kold, axisnew )\n return\n # Here's what we would do in 1-D:\n # newdata = ma.ones(len(axisnew))*mv.missing_value # Note that a FileVariable's missing_value is a tuple.\n # for i in range(len(axisold)):\n # newdata[ indexina3[i] ] = ma[i]\n # newmv = cdms2.createVariable( newdata, id=mv.id )\n # >1-D is the same idea, but more dimensions are coming along for the ride,\n # making it more complicated...\n shape0 = mv.shape\n shape0[kold] = len(axisnew)\n newdata = ma.ones(shape0)*mv.missing_value # Note that a FileVariable's missing_value is a tuple.\n # We want to copy ma to newdata - except that we need indirect indexing for the kold-th axis.\n # There seems to be nothing in numpy for treating one axis differently from the rest\n # (except for ellipsis, but it makes sense to use only one ellipsis and we would need two here).\n # The following will do the job. It would be very slow for an array with many big dimensions,\n # but the arrays here have already been reduced for graphics; the index sets will be small or\n # empty...\n ranges = map( range, shape0[0:kold] )\n for i in range(len(axisold)):\n for idx in apply(itertools.product,ranges):\n idx = idx + [indexina3(i)] + [Ellipsis]\n idxo = idx + [i] + [Ellipsis]\n newdata[ tuple(idx) ] = mv[idxo]\n newmv = cdms2.createVariable( newdata, id=mv.id )",
"def reduce_time_seasonal( mv, seasons=seasonsyr, vid=None ):\n if vid==None:\n vid = 'reduced_'+mv.id\n # Note that the averager function returns a variable with meaningless id.\n # The climatology function returns the same id as mv, which we also don't want.\n\n # The slicers in time.py require getBounds() to work.\n # If it doesn't, we'll have to give it one.\n # Setting the _bounds_ attribute will do it.\n timeax = timeAxis(mv)\n if timeax is None:\n print \"WARNING- no time axis in\",mv.id\n return None\n if timeax.getBounds()==None:\n timeax._bounds_ = timeax.genGenericBounds()\n mvseas = seasons.climatology(mv)\n if mvseas is None:\n print \"WARNING- cannot compute climatology for\",mv.id,seasons.seasons\n print \"...probably there is no data for times in the requested season.\"\n return None\n avmv = mvseas\n avmv.id = vid\n if hasattr(mv,'units'): avmv.units = mv.units\n avmv = delete_singleton_axis( avmv, vid='time' )\n avmv.units = mv.units\n return avmv",
"def reset_time_dim(ds):\n return ds.assign_coords(Time=pd.to_datetime(ds.XTIME.values))",
"def timeave_old( mv ):\n # I haven't thought yet about how missing values would work with this...\n # If time intervals be unequal, this will have to be changed...\n sh = mv.shape # e.g. [312,90,144] for t,lat,lon\n n = sh[0]\n # BTW, this is the size of everything else:\n # n2 = reduce( operator.mul, sh[1:] ) # e.g. 90*144=12960\n mvta = numpy.sum( mv.__array__(), axis=0 )\n mvta /= n\n return mvta",
"def squeeze(self, axis=None):\n # print 'input axis:', axis\n sh = self.data.shape\n if axis is None:\n axis = [a for i, a in enumerate(self.axes_names) if sh[i] == 1]\n else:\n assert self.has_axes(axis)\n ssh = np.array([sh[self.get_axis_id(a)] for a in axis])\n if (ssh != 1).all():\n raise Exception('Subset axes to squeeze (%s) '\n 'are not all one-length: %s'\n % (str(axis), str(ssh)))\n\n axis_ids = tuple(self.get_axis_id(a) for a in axis)\n # print 'axes to squeeze', axis\n # print 'ids :', axis_ids\n\n # select axes to keep:\n axes_names = [a for a in self.axes_names if a not in axis]\n\n axes_domains = dict((a, self.axes_domains[a]) for a in axes_names)\n\n if parse_version(np.__version__) >= parse_version('1.7'):\n data = self.data.squeeze(axis=axis_ids)\n else:\n sm = [':'] * len(sh)\n for i in axis_ids:\n sm[i] = '0'\n # print 'sm:', sm\n data = eval('self.data[%s]' % ','.join(sm))\n\n return xndarray(data, axes_names, axes_domains,\n self.value_label, self.meta_data)",
"def test_reset_temporal_axis(PM_ds_control_3d_full):\r\n smooth = 10\r\n tsmooth_kws = {\"time\": smooth}\r\n first_ori = PM_ds_control_3d_full.time[0].values\r\n first_actual = _reset_temporal_axis(\r\n PM_ds_control_3d_full, tsmooth_kws=tsmooth_kws, dim=\"time\"\r\n ).time.values[0]\r\n first_expected = f\"{first_ori}-{first_ori+smooth*1-1}\"\r\n assert first_actual == first_expected",
"def reduce2scalar( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n axis_names = [ a.id for a in axes ]\n axes_string = '('+')('.join(axis_names)+')'\n\n avmv = averager( mv, axis=axes_string )\n avmv.id = vid\n avmv.units = mv.units\n\n return avmv",
"def get_mds_axis(obj,index,strict=True):\n ax=obj.dim_of(index)\n if type(ax)!=mds.treenode.TreeNode:\n try:\n ax=ax.getAxis()\n except:\n if strict:\n raise Exception(\"Axis %s is not a treenode\"%(ax))\n return ax",
"def moveaxis(self, tensor, source, destination):\n\n axes = list(range(self.ndim(tensor)))\n if source < 0:\n source = axes[source]\n if destination < 0:\n destination = axes[destination]\n try:\n axes.pop(source)\n except IndexError:\n raise ValueError(\n \"Source should verify 0 <= source < tensor.ndim\" \"Got %d\" % source\n )\n try:\n axes.insert(destination, source)\n except IndexError:\n raise ValueError(\n \"Destination should verify 0 <= destination < tensor.ndim\"\n \"Got %d\" % destination\n )\n return self.transpose(tensor, axes)",
"def observation(self, obs):\n\n# import pdb;pdb.set_trace()\n return np.moveaxis(obs, 2, 0)",
"def add_timedim(data, date=\"1970-01-01\"):\n if isinstance(data, xr.DataArray):\n if \"time\" in data.dims:\n raise ValueError(\n \"You trying to add time dimension to the DataArray that already have it. \\\nThe reason migh be that you trying to use 2d variable (e.g. `a_ice`) \\\nin a function that accepts only 3d variables (e.g. `hovm_data`)\"\n )\n timestamp = [np.array(np.datetime64(date, \"ns\"))]\n data = data.expand_dims({\"time\": timestamp}, axis=0)\n return data\n else:\n data = np.expand_dims(data, axis=0)\n return data",
"def test_alternate_spatial_axes(conv1d_placeholder, output_size, width_axis):\n width_axis.name = \"time\"\n assert len(conv1d_placeholder.axes.find_by_name(\"time\")) == 1\n\n conv_layer = Convolution((3, output_size), lambda x: 1)\n with pytest.raises(IncompatibleAxesError):\n conv_layer(conv1d_placeholder)\n # As a dictionary\n output = conv_layer(conv1d_placeholder, spatial_axes={\"W\": \"time\"})\n assert output.axes == conv1d_placeholder.axes\n # As a tuple\n output = conv_layer(conv1d_placeholder, spatial_axes=(\"D\", \"H\", \"time\"))\n assert output.axes == conv1d_placeholder.axes",
"def reduce2latlon_seasonal( mv, seasons=seasonsyr, vid=None ):\n # This differs from reduce2lat_seasonal only in the line \"axis_names =\"....\n # I need to think about how to structure the code so there's less cut-and-paste!\n if vid==None:\n vid = 'reduced_'+mv.id\n # Note that the averager function returns a variable with meaningless id.\n # The climatology function returns the same id as mv, which we also don't want.\n\n # The slicers in time.py require getBounds() to work.\n # If it doesn't, we'll have to give it one.\n # Setting the _bounds_ attribute will do it.\n timeax = timeAxis(mv)\n if timeax.getBounds()==None:\n timeax._bounds_ = timeax.genGenericBounds()\n mvseas = seasons.climatology(mv)\n \n axes = allAxes( mv )\n axis_names = [ a.id for a in axes if a.id!='lat' and a.id!='lon' and a.id!='time']\n axes_string = '('+')('.join(axis_names)+')'\n\n if len(axes_string)>2:\n for axis in mvseas.getAxisList():\n if axis.getBounds() is None:\n axis._bounds_ = axis.genGenericBounds()\n avmv = averager( mvseas, axis=axes_string )\n else:\n avmv = mvseas\n if avmv is None: return avmv\n avmv.id = vid\n if hasattr(mv,'units'): avmv.units = mv.units\n avmv = delete_singleton_axis( avmv, vid='time' )\n avmv.units = mv.units\n return avmv",
"def collapse_time(cube, ntimes, timestep):\n\n if timestep == None:\n print('Averaging over the %s time points' %(str(ntimes)))\n new_cube = cube.collapsed('time', iris.analysis.MEAN)\n else:\n assert new_cube.coords()[0] == 'time'\n new_cube = cube[timestep, :, :]\n\n return new_cube",
"def makeKeepDims(x, y, axis):\r\n x = as_tensor_variable(x)\r\n y = as_tensor_variable(y)\r\n\r\n if axis is None:\r\n axis = range(x.type.ndim)\r\n elif isinstance(axis, (int, numpy.integer)):\r\n axis = [axis]\r\n elif isinstance(axis, numpy.ndarray) and axis.ndim == 0:\r\n axis = [int(axis)]\r\n else:\r\n axis = [int(a) for a in axis]\r\n newaxis = []\r\n for a in axis:\r\n if not isinstance(a, int):\r\n raise ValueError(\"keepdims option can be used only with constant axis\")\r\n if a < 0:\r\n a += x.type.ndim\r\n newaxis.append(a)\r\n i = 0\r\n new_dims = []\r\n for j, _ in enumerate(x.type.broadcastable):\r\n if j in newaxis:\r\n new_dims.append('x')\r\n else:\r\n new_dims.append(i)\r\n i += 1\r\n return DimShuffle(y.type.broadcastable, new_dims)(y)",
"def _create_squeeze(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axes', list(op.axis)),\n ])\n return node",
"def onAxisLogScaleChanged(self, axis_name, on):\n if axis_name in self.axes:\n # take the old axis and replace it with the new one\n old_axis = self.axes[axis_name]\n if on:\n new_axis = QtChart.QLogValueAxis()\n else:\n new_axis = QtChart.QValueAxis()\n\n # copy the values from old_axis into new_axis\n new_axis.setRange(old_axis.min(), old_axis.max())\n new_axis.setVisible(old_axis.isVisible())\n new_axis.setGridLineVisible(old_axis.isGridLineVisible())\n new_axis.setTitleText(old_axis.titleText())\n self.axes[axis_name] = new_axis\n\n # swap the old axis for the new one in chart and all series\n # attached to old_axis\n self.chart().addAxis(self.axes[axis_name],\n self.axis_alignment[axis_name])\n for unused_name, series in self.series.items():\n if old_axis in series.attachedAxes():\n series.detachAxis(old_axis)\n series.attachAxis(new_axis)\n self.chart().removeAxis(old_axis)",
"def addExtraAxis(slab,newaxis=None,axis=0,verbose=False):\n\n import cdms2 as cdms\n import MV2 as MV\n\n if newaxis is None:\n newaxis=cdms.createAxis([1,])\n newaxis.units=''\n\n # add new axis to axis list of input <slab>\n axislist=slab.getAxisList()\n axislist.insert(axis,newaxis)\n\n #----------------Reshape----------------\n shape=list(slab.shape)\n shape.insert(axis,len(newaxis))\n slab2=MV.reshape(slab,shape)\n\n #------------Create variable------------\n att_dict=attribute_obj2dict(slab)\n slab2=cdms.createVariable(slab2,axes=axislist,attributes=att_dict,\\\n typecode='f')\n slab2.id=slab.id\n\n if verbose:\n print('\\n# <addExtraAxis>: Originial variable shape:',slab.shape)\n print('# <addExtraAxis>: New variable shape:',slab2.shape)\n\n return slab2",
"def _data_with_axis(self, axis):\n shpl = list(self.data.shape)\n \n if len(shpl) == 2:\n shpl[1] += 1\n shp = tuple(shpl)\n data = numpy.zeros(shp,dtype=self.data.dtype)\n data[:,1:] = self.data\n data[:,0] = axis.data \n elif len(shpl) == 1:\n shpl.append(2)\n shp = tuple(shpl)\n data = numpy.zeros(shp,dtype=self.data.dtype)\n data[:,1] = self.data\n data[:,0] = axis.data\n else:\n raise Exception(\"Other shapes than (N,) and (N,M) not implemented\")\n return data",
"def common_time_axis(dismr, verbose=True):\n # generate the time axis\n Nt = len(dismr)\n time = [dt.datetime(850, 1, 15)]\n for i in range(1, len(dismr)):\n y = time[i - 1].year\n m = time[i - 1].month\n if m == 12:\n y += 1\n m = 0\n time.append(dt.datetime(y, m + 1, 15))\n time = np.array(time)\n\n return time",
"def analyze_on_axis(phase_space, id_begin, id_end, ds_slice, zplot):\n\n ps = phase_space[:, (id_begin-1):id_end, :]\n # print(np.shape(ps))\n # ps = ps[numpy.logical_not(numpy.isnan(ps))]\n\n x = ps[0, :, :]\n px = ps[1, :, :]\n y = ps[2, :, :]\n py = ps[3, :, :]\n\n id_on_axis = np.zeros((4, int(id_end-id_begin+1)))\n\n for n in range(int(id_end-id_begin+1)):\n x_this = x[n, :]\n px_this = px[n, :]\n y_this = y[n, :]\n py_this = py[n, :]\n\n # Remove all NAN elements in the phase space array\n x_this = x_this[np.logical_not(np.isnan(x_this))]\n px_this = px_this[np.logical_not(np.isnan(px_this))]\n y_this = y_this[np.logical_not(np.isnan(y_this))]\n py_this = py_this[np.logical_not(np.isnan(py_this))]\n\n ## Plot X\n plt.subplot(2, 2, 1)\n plt.plot(zplot[0:len(x_this)]*1e+6, x_this*1e+6)\n plt.ylabel('Position in X/ $\\mu$m', fontsize=10)\n\n ## Plot Y\n plt.subplot(2, 2, 2)\n plt.plot(zplot[0:len(y_this)]*1e+6, y_this*1e+6)\n plt.ylabel('Position in Y/ $\\mu$m', fontsize=10)\n\n ## Plot px\n plt.subplot(2, 2, 3)\n plt.plot(zplot[0:len(px_this)]*1e+6, px_this)\n plt.ylabel('Angle in X', fontsize=10)\n\n ## Plot py\n plt.subplot(2, 2, 4)\n plt.plot(zplot[0:len(py_this)]*1e+6, py_this)\n plt.ylabel('Angle in Y', fontsize=10)\n\n\n # plt.xlabel('Longitudianl Direction of the Bunch $s$/ $\\mu$m')\n # plt.title('First Undulator Section')\n # plt.title('Second Undulator Section')\n # plt.title('Third Undulator Section')\n\n id_on_axis[0, n] = np.argmin(np.abs(x_this))\n id_on_axis[1, n] = np.argmin(np.abs(px_this))\n id_on_axis[2, n] = np.argmin(np.abs(y_this))\n id_on_axis[3, n] = np.argmin(np.abs(py_this))\n\n fig = plt.gcf()\n fig.set_size_inches(13.5, 9)\n ax = plt.gca()\n ax.yaxis.get_major_formatter().set_powerlimits((0,1))\n fig.savefig('phase_space_U3_new.png', dpi=100)\n plt.show()\n\n\n s_on_axis = np.average(id_on_axis[2:4,:])*ds_slice\n\n return id_on_axis, s_on_axis",
"def _replace_dim(da, olddim, newdim, drop=True):\n\n da_new = da.rename({olddim: newdim.name})\n # note that alignment along a dimension is skipped when you are overriding\n # the relevant coordinate values\n da_new .coords[newdim.name] = newdim\n da_new = da_new.reset_coords(drop=drop)\n return da_new",
"def uncollapse_time_from_batch(hparams, x):\n return tf.reshape(x, [hparams.batch_size, -1] + x.shape.as_list()[1:])",
"def aminusb_1ax( mv1, mv2 ):\n mv1, mv2 = reconcile_units( mv1, mv2 )\n if hasattr(mv1,'units') and hasattr(mv2,'units') and mv1.units!=mv2.units:\n print \"WARNING: aminusb_1ax1 is subtracting variables with different units!\",mv1,mv1\n if mv1 is None or mv2 is None: return None\n missing = mv1.get_fill_value()\n axis1 = allAxes(mv1)[0]\n axis2 = allAxes(mv2)[0]\n if len(axis1)<=len(axis2):\n a = mv1\n b = numpy.interp( axis1[:], axis2[:], mv2[:], left=missing, right=missing )\n else:\n a = numpy.interp( axis2[:], axis1[:], mv1[:], left=missing, right=missing )\n b = mv2\n aminusb = a - b\n aminusb.id = mv1.id\n return aminusb",
"def remove_temporal_mean(self):\n if not hasattr(self, 'detrended_data'):\n self.detrend_data()\n self.mean_removed_data = self.detrended_data - \\\n np.mean(self.detrended_data, axis=-1, keepdims=True)",
"def latvar( mv ):\n # First get the axis. This is probably not as general as we'll need...\n if mv is None: return None\n lat_axis = latAxis(mv)\n #latmv = mv.clone() # good if mv has only a lat axis\n #latmv[:] = lat_axis[:]\n latmv = cdms2.createVariable( lat_axis[:], axes=[lat_axis], id='lat',\n attributes={'units':lat_axis.units},\n copy=True )\n return latmv",
"def uncollapse_freq_into_time(hparams, x):\n if x.shape.ndims == 3:\n return tf.reshape(x, [x.shape[0], hparams.ntimebins, constants.nfreqbins, -1])\n return tf.reshape(x, [x.shape[0], hparams.ntimebins, constants.nfreqbins])",
"def reduce2lat_old( mv, vid=None ):\n # >>> For now, I'm assuming that the only axes are time,lat,lon\n # And I'm assuming equal spacing in lon (so all longitudes contribute equally to the average)\n # If they aren't, it's best to use area from cell_measures attribute if available; otherwise\n # compute it with lat_bnds, lon_bnds etc.\n # If I base another reduction function on this one, it's important to note that an average\n # in the lat direction will unavoidably need weights, because of the geometry.\n\n if vid==None:\n vid = 'reduced_'+mv.id\n time_axis, lat_axis, lon_axis = tllAxes( mv )\n\n mvta = timeave_old( mv )\n\n zm = numpy.zeros( mvta.shape[0] )\n for i in range(len(lat_axis)):\n for j in range(len(lon_axis)):\n zm[i] += mvta[i,j]\n zm[i] /= len(lon_axis)\n zmv = cdms2.createVariable( zm, axes=[lat_axis], id=vid )\n return zmv",
"def test_axis_preservation(conv1d_placeholder, output_size):\n conv_layer = Convolution((3, output_size), lambda x: 1)\n output = conv_layer(conv1d_placeholder)\n assert output.axes == conv1d_placeholder.axes, (\"Output axes are not the same as input axes: \"\n \"{} != {}\").format(output.axes,\n conv1d_placeholder.axes)"
] | [
"0.6247364",
"0.59301597",
"0.55981517",
"0.55702573",
"0.53490317",
"0.5049275",
"0.49913675",
"0.49404666",
"0.48569542",
"0.4843272",
"0.4835321",
"0.47630703",
"0.47483668",
"0.47441968",
"0.47385386",
"0.46919236",
"0.46870866",
"0.4654426",
"0.46510515",
"0.46408376",
"0.46343154",
"0.4608973",
"0.4606408",
"0.45731387",
"0.45673314",
"0.45604137",
"0.45567515",
"0.4555424",
"0.45495102",
"0.45459738"
] | 0.71518254 | 0 |
Not much tested I decided against doing overlapping line plots this way. The input arguments are two variables (cdms2 MVs, normally TransientVariables), with whatever compatibility is needed for this function to work. New axes are computed which can be used for both variables. These axes are returned as a list of tuples, each containing one new axis and index information. | def common_axes( mv1, mv2 ):
axes1 = [a[0] for a in mv1.getDomain()]
axes2 = [a[0] for a in mv2.getDomain()]
if len(axes1)!=len(axes2):
print "ERROR. common_axes requires same number of axes in",mv1," and",mv2
return None
axes3 = []
for i in range(len(axes1)):
axes3.append(common_axis( axes1[i], axes2[i] ))
return axes3 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setup_axes(self):\n fig = plt.figure(1)\n axs = fig.add_subplot(1, 1, 1)\n fig.clf()\n axs = plt.subplots(1, 2)\n ax1 : plt.axis = axs[0]\n ax2 : plt.axis = axs[1]\n fig.canvas.draw()\n \n line1_t, = ax1.plot([], label='train')\n line1_v, = ax1.plot([], label='val')\n\n ax1.set_title('Loss vs Iterations')\n ax1.set_xlabel('Iterations')\n ax1.set_ylabel('Loss')\n ax1.grid(True)\n ax1.autoscale()\n # ax1.legend()\n\n line2_t, = ax2.plot([], label='train')\n line2_v, = ax2.plot([], label='val')\n\n ax2.set_title('Accuracy vs Iterations')\n ax2.set_xlabel('Time')\n ax2.set_ylabel('Percent Accuracy')\n ax2.grid(True)\n ax2.autoscale()\n # ax2.legend()\n\n lines = [line1_t, line1_v, line2_t, line2_v]\n\n return fig, ax1, ax2, lines",
"def plot_results_2d(p_1, p_2, d_1 = 'X', d_2 = 'Y'):\n plt.figure(figsize = (10, 10))\n ax = plt.axes() \n\n color=iter(cm.rainbow(np.linspace(0,1,p_1.shape[0]))) # (1)\n labels = ['Particle ' + str(pl+1) for pl in np.arange(0, p_1.shape[0], step = 1)]\n\n for p in np.arange(0, p_1.shape[0], step = 1): \n c = next(color) # (c)\n for t in np.arange(0, p_1.shape[1], step = 1): \n plt.plot(p_1[p, t], p_2[p, t], 'x', c = c, label = labels[p])\n legend_without_duplicate_labels(ax)\n ax.grid(b = 'True', which = 'major')\n ax.set_xlabel(d_1) \n ax.set_ylabel(d_2)\n ax.set_title('2D particle trajectories')",
"def plot_examples(cms):\r\n data = amp_axis\r\n\r\n fig, axs = plt.subplots(1, 2, figsize=(30, 8)) #create two plots\r\n for [ax, cmap] in zip(axs, cms):\r\n psm = ax.pcolormesh(time_axis, tof_axis, data, cmap=cmap, rasterized=True, vmin = 250) #specify axis and minimum amplitude value to show on the graph\r\n fig.colorbar(psm, ax=ax, label = 'Amplitude') #define the legend of the amplitude data\r\n \r\n ax.set_ylabel('Time of Flight [\\u03bcs]') #set label for y axis\r\n ax.set_xlabel('Time [min]') #set label for x axis\r\n \r\n ax.hlines(8.744, 0, stop_time, colors = 'white') #create two white lines for the safe operating range for ToF\r\n ax.hlines(9.555, 0, stop_time, colors = 'white') \r\n \r\n plt.show()",
"def plotResultsComparison(monthlyData1, monthlyData2, indices, arg):\n \n energyType = arg[0] \n \n dummyRange = np.asarray(range(len(indices['E_tot1'])))\n \n fig = plt.figure(figsize=(16, 8))\n \n# plt.suptitle('Heating Demand (COP=' + str(usedEfficiencies['H_COP']) + ')')\n if energyType == 'PV':\n multiplier = -1\n else:\n multiplier = 1\n \n ax1 = plt.subplot(2,1,1)\n \n plt.plot(multiplier*monthlyData1[energyType][indices['E_tot1'], dummyRange], label = 'Results1', color='b')\n plt.plot(multiplier*monthlyData2[energyType][indices['E_tot2'], dummyRange], label = 'Results2', color='g')\n \n plt.ylabel('Energy [kWh]')\n plt.legend()\n \n majorLocator = MultipleLocator(24)\n majorFormatter = FormatStrFormatter('%d')\n minorLocator = MultipleLocator(24)\n minorFormatter = FormatStrFormatter('%d')\n\n ax1.xaxis.set_major_locator(majorLocator)\n ax1.xaxis.set_major_formatter(majorFormatter)\n ax1.xaxis.set_minor_locator(minorLocator)\n# ax1.xaxis.set_minor_formatter(minorFormatter)\n plt.grid(True, which='both')\n \n ax2 = plt.subplot(2,1,2, sharex=ax1)\n \n plt.plot(multiplier*monthlyData1[energyType][indices['E_tot1'], dummyRange]-multiplier*monthlyData2[energyType][indices['E_tot2'], dummyRange], label = '1-2', color='b')\n\n plt.ylabel('Energy Difference [kWh]')\n plt.legend()\n\n ax2.xaxis.set_major_locator(majorLocator)\n ax2.xaxis.set_major_formatter(majorFormatter)\n ax2.xaxis.set_minor_locator(minorLocator)\n# ax2.xaxis.set_minor_formatter(minorFormatter)\n plt.grid(True, which='both')\n \n return fig",
"def _get_lines(self) -> tuple[VGroup, VGroup]:\n x_axis = self.get_x_axis()\n y_axis = self.get_y_axis()\n\n x_lines1, x_lines2 = self._get_lines_parallel_to_axis(\n x_axis,\n y_axis,\n self.y_axis.x_range[2],\n self.faded_line_ratio,\n )\n\n y_lines1, y_lines2 = self._get_lines_parallel_to_axis(\n y_axis,\n x_axis,\n self.x_axis.x_range[2],\n self.faded_line_ratio,\n )\n\n # TODO this was added so that we can run tests on NumberPlane\n # In the future these attributes will be tacked onto self.background_lines\n self.x_lines = x_lines1\n self.y_lines = y_lines1\n lines1 = VGroup(*x_lines1, *y_lines1)\n lines2 = VGroup(*x_lines2, *y_lines2)\n\n return lines1, lines2",
"def _make_twin_axes(self, *args, **kwargs):\n # Typically, SubplotBase._make_twin_axes is called instead of this.\n # There is also an override in axes_grid1/axes_divider.py.\n if 'sharex' in kwargs and 'sharey' in kwargs:\n raise ValueError('Twinned Axes may share only one axis.')\n ax2 = self.figure.add_axes(self.get_position(True), *args, **kwargs)\n self.set_adjustable('datalim')\n ax2.set_adjustable('datalim')\n self._twinned_axes.join(self, ax2)\n return ax2",
"def aminusb_2ax( mv1, mv2 ):\n return mv2\n mv1, mv2 = reconcile_units( mv1, mv2 )\n missing = mv1.get_fill_value()\n axes1 = allAxes(mv1)\n axes2 = allAxes(mv2)\n if axes1 is None or axes2 is None: return None\n if len(axes1)!=2: print \"ERROR @1, wrong number of axes for aminusb_2ax\",axes1\n if len(axes2)!=2: print \"ERROR @2, wrong number of axes for aminusb_2ax\",axes2\n if len(axes1[0])==len(axes2[0]):\n # Only axis2 differs, there's a better way...\n return aminusb_ax2( mv1, mv2 )\n if len(axes1[0])<=len(axes2[0]):\n if len(axes1[1])<=len(axes2[1]):\n mv1new = mv1\n # Interpolate mv2 from axis2 to axis1 in both directions. Use the CDAT regridder.\n grid1 = mv1.getGrid()\n mv2new = mv2.regrid(grid1)\n else:\n # Interpolate mv1 from axis1[1] to axis2[1]\n # Interpolate mv2 from axis2[0] to axis1[0]\n print \"ERROR @3, aminusb_2ax IS NOT FINISHED\"\n return None\n else:\n if len(axes1[1])<=len(axes2[1]):\n # Interpolate mv1 from axis1[0] to axis2[0]\n # Interpolate mv2 from axis2[1] to axis1[1]\n print \"ERROR @4, aminusb_2ax IS NOT FINISHED\"\n return None\n else:\n mv2new = mv2\n # Interpolate mv2 from axis2 to axis1 in both directions. Use the CDAT regridder.\n grid2 = mv2.getGrid()\n mv1new = mv1.regrid(grid2)\n aminusb = mv1new - mv2new\n aminusb.id = mv1.id\n return aminusb",
"def analyze_on_axis(phase_space, id_begin, id_end, ds_slice, zplot):\n\n ps = phase_space[:, (id_begin-1):id_end, :]\n # print(np.shape(ps))\n # ps = ps[numpy.logical_not(numpy.isnan(ps))]\n\n x = ps[0, :, :]\n px = ps[1, :, :]\n y = ps[2, :, :]\n py = ps[3, :, :]\n\n id_on_axis = np.zeros((4, int(id_end-id_begin+1)))\n\n for n in range(int(id_end-id_begin+1)):\n x_this = x[n, :]\n px_this = px[n, :]\n y_this = y[n, :]\n py_this = py[n, :]\n\n # Remove all NAN elements in the phase space array\n x_this = x_this[np.logical_not(np.isnan(x_this))]\n px_this = px_this[np.logical_not(np.isnan(px_this))]\n y_this = y_this[np.logical_not(np.isnan(y_this))]\n py_this = py_this[np.logical_not(np.isnan(py_this))]\n\n ## Plot X\n plt.subplot(2, 2, 1)\n plt.plot(zplot[0:len(x_this)]*1e+6, x_this*1e+6)\n plt.ylabel('Position in X/ $\\mu$m', fontsize=10)\n\n ## Plot Y\n plt.subplot(2, 2, 2)\n plt.plot(zplot[0:len(y_this)]*1e+6, y_this*1e+6)\n plt.ylabel('Position in Y/ $\\mu$m', fontsize=10)\n\n ## Plot px\n plt.subplot(2, 2, 3)\n plt.plot(zplot[0:len(px_this)]*1e+6, px_this)\n plt.ylabel('Angle in X', fontsize=10)\n\n ## Plot py\n plt.subplot(2, 2, 4)\n plt.plot(zplot[0:len(py_this)]*1e+6, py_this)\n plt.ylabel('Angle in Y', fontsize=10)\n\n\n # plt.xlabel('Longitudianl Direction of the Bunch $s$/ $\\mu$m')\n # plt.title('First Undulator Section')\n # plt.title('Second Undulator Section')\n # plt.title('Third Undulator Section')\n\n id_on_axis[0, n] = np.argmin(np.abs(x_this))\n id_on_axis[1, n] = np.argmin(np.abs(px_this))\n id_on_axis[2, n] = np.argmin(np.abs(y_this))\n id_on_axis[3, n] = np.argmin(np.abs(py_this))\n\n fig = plt.gcf()\n fig.set_size_inches(13.5, 9)\n ax = plt.gca()\n ax.yaxis.get_major_formatter().set_powerlimits((0,1))\n fig.savefig('phase_space_U3_new.png', dpi=100)\n plt.show()\n\n\n s_on_axis = np.average(id_on_axis[2:4,:])*ds_slice\n\n return id_on_axis, s_on_axis",
"def axes_subplots():\n # gerenate data\n x = np.arange(0, 6 * np.pi+0.2, 0.2)\n y_1 = np.cos(x)\n y_2 = np.sin(2*x)\n y_3 = y_1 + y_2\n\n # display multiple\n fig, axs = plt.subplots(3, 1, sharex=True)\n fig.suptitle('Subplots w/ shared axes')\n axs[0].plot(x, y_1)\n axs[1].plot(x, y_2)\n axs[2].plot(x, y_3)\n axs[0].set_ylabel('$y$')\n axs[1].set_ylabel('$y$')\n axs[2].set_ylabel('$y$')\n\n plt.show()\n\n return None",
"def TwoOrOneValuePlot(no_of_sets, Xax, Ydat1, Ydat2, Label1, Label2,\n xmin, xmax, ymin_1, ymax_1, ymin_2, ymax_2,\n XLab, YLab_1, YLab_2, SupTitle, Title, FileName,\n currentDate, currentTime, Software_version):\n\n rc('font', size=6, weight='bold')\n if no_of_sets == 1:\n fig = plt.figure(figsize=(9, 5))\n ax1 = fig.add_subplot(111)\n elif no_of_sets == 2:\n fig = plt.figure(figsize=(9, 9))\n ax1 = fig.add_subplot(211)\n else:\n print(' ERROR !!!')\n if no_of_sets == 2:\n ax1.plot(Xax, Ydat2, color=u'#ff7f0e', linestyle='-', alpha=0.4, linewidth='1.00')\n ax1.plot(Xax, Ydat1, color=u'#1f77b4', linestyle='-', alpha=1.0, linewidth='1.00', label=Label1)\n ax1.legend(loc='upper right', fontsize=6)\n ax1.grid(visible=True, which='both', color='silver', linestyle='-')\n ax1.axis([xmin, xmax, ymin_1, ymax_1])\n ax1.set_ylabel(YLab_1, fontsize=6, fontweight='bold')\n ax1.set_title(Title, fontsize=6)\n if no_of_sets == 2:\n ax1.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)\n ax2 = fig.add_subplot(212)\n if no_of_sets == 2:\n ax2.plot(Xax, Ydat1, color=u'#1f77b4', linestyle='-', alpha=0.4, linewidth='1.00')\n ax2.plot(Xax, Ydat2, color=u'#ff7f0e', linestyle='-', alpha=1.0, linewidth='1.00', label=Label2)\n ax2.legend(loc='upper right', fontsize=6)\n ax2.grid(visible=True, which='both', color='silver', linestyle='-')\n ax2.axis([xmin, xmax, ymin_2, ymax_2])\n ax2.set_xlabel(XLab, fontsize=6, fontweight='bold')\n ax2.set_ylabel(YLab_2, fontsize=6, fontweight='bold')\n fig.subplots_adjust(hspace=0.05, top=0.94)\n elif no_of_sets == 1:\n ax1.set_xlabel(XLab, fontsize=6, fontweight='bold')\n fig.subplots_adjust(top=0.92)\n else:\n print(' ERROR !!!')\n fig.suptitle(SupTitle, fontsize = 8, fontweight='bold')\n if no_of_sets == 2:\n fig.text(0.73, 0.06, 'Processed ' + currentDate + ' at ' + currentTime,\n fontsize=4, transform=plt.gcf().transFigure)\n fig.text(0.09, 0.06, 'Software version: ' + Software_version + ', [email protected], IRA NASU',\n fontsize=4, transform=plt.gcf().transFigure)\n elif no_of_sets == 1:\n fig.text(0.73, 0.03, 'Processed ' + currentDate + ' at '+currentTime,\n fontsize=4, transform=plt.gcf().transFigure)\n fig.text(0.09, 0.03, 'Software version: ' + Software_version + ', [email protected], IRA NASU',\n fontsize=4, transform=plt.gcf().transFigure)\n else:\n print(' ERROR !!!')\n pylab.savefig(FileName, bbox_inches='tight', dpi=160)\n plt.close('all')\n return",
"def plot_2d_topomap_intra(ax):\n\n # plot first Head \n N = 300 # number of points for interpolation\n xy_center = [-0.178,0] # center of the plot\n radius = 0.1 # radius\n\n # draw a circle\n circle = matplotlib.patches.Circle(xy = xy_center, radius = radius, edgecolor = \"k\", facecolor = \"w\")\n ax.add_patch(circle)\n \n # make the axis invisible \n for loc, spine in ax.spines.items():\n spine.set_linewidth(0)\n \n # remove the ticks\n ax.set_xticks([])\n ax.set_yticks([])\n \n # add some body parts. Hide unwanted parts by setting the zorder low\n # add two ears\n circle = matplotlib.patches.Ellipse(xy = [-0.083,-0.012], width = 0.025, height = 0.05, angle = 0, edgecolor = \"k\", facecolor = \"w\", zorder = 0)\n ax.add_patch(circle)\n circle = matplotlib.patches.Ellipse(xy = [-0.273,-0.012], width = 0.025, height = 0.05, angle = 0, edgecolor = \"k\", facecolor = \"w\", zorder = 0)\n ax.add_patch(circle)\n ## add a nose\n xy = [[-0.151,0.091],[-0.205,0.091], [-0.178,0.11]]\n polygon = matplotlib.patches.Polygon(xy = xy, edgecolor = \"k\", facecolor = \"w\", zorder = 0)\n ax.add_patch(polygon) \n \n\n # Plot second Head \n x2y2_center = [0.178,0] # center of the plot\n radius2 = 0.1 # radius\n \n # draw a circle\n circle = matplotlib.patches.Circle(xy = x2y2_center, radius = radius2, edgecolor = \"k\", facecolor = \"w\")\n ax.add_patch(circle)\n \n # make the axis invisible \n for loc, spine in ax.spines.items():\n spine.set_linewidth(0)\n \n # remove the ticks\n ax.set_xticks([])\n ax.set_yticks([])\n \n ## add some body parts. Hide unwanted parts by setting the zorder low\n # add two ears\n circle = matplotlib.patches.Ellipse(xy = [0.083,-0.012], width = 0.025, height = 0.05, angle = 0, edgecolor = \"k\", facecolor = \"w\", zorder = 0)\n ax.add_patch(circle)\n circle = matplotlib.patches.Ellipse(xy = [0.273,-0.012], width = 0.025, height = 0.05, angle = 0, edgecolor = \"k\", facecolor = \"w\", zorder = 0)\n ax.add_patch(circle)\n ## add a nose\n x2y2 = [[0.151,0.091],[0.205,0.091], [0.178,0.11]]\n polygon = matplotlib.patches.Polygon(xy = x2y2, edgecolor = \"k\", facecolor = \"w\", zorder = 0)\n ax.add_patch(polygon)",
"def aminusb_ax2( mv1, mv2 ):\n if hasattr(mv1,'units') and hasattr(mv2,'units') and mv1.units!=mv2.units:\n print \"WARING: aminusb_ax2 is subtracting variables with different units!\",mv1,mv1\n axes1 = allAxes(mv1)\n axes2 = allAxes(mv2)\n # TO DO: convert, interpolate, etc. as needed to accomodate differing first axes.\n # But for now, we'll just check a bit ...\n ax1=axes1[0]\n ax2=axes2[0]\n if ax1.shape!=ax2.shape:\n print \"ERROR aminusb_ax2 requires same axes, but shape differs:\",ax1.shape,ax2.shape\n print \"ax1,ax2\"\n return None\n if hasattr(ax1,'units') and hasattr(ax2,'units') and ax1.units!=ax2.units:\n if ax1.units=='mb':\n ax1.units = 'mbar' # udunits uses mb for something else\n if ax2.units=='mb':\n ax2.units = 'mbar' # udunits uses mb for something else\n tmp = udunits(1.0,ax2.units)\n s,i = tmp.how(ax1.units) # will raise an exception if conversion not possible\n # crude substitute for a real units library:\n #if not (ax1.units=='mb' and ax2.units=='millibars') and\\\n # not (ax1.units=='millibars' and ax2.units=='mb'):\n # print \"ERROR aminusb_ax2 requires same axes, but units differ:\",ax1.units,ax2,units\n # print \"ax1,ax2\"\n # return None\n ab_axes = [ax1]\n if len(axes1[1])<=len(axes2[1]):\n a = mv1\n b = interp2( axes1[1], mv2 )\n ab_axes.append(axes1[1])\n else:\n a = interp2( axes2[1], mv1 )\n b = mv2\n ab_axes.append(axes2[1])\n aminusb = a - b\n aminusb.id = mv1.id\n aminusb.initDomain( ab_axes )\n return aminusb",
"def multiPlot(self,indexSelect=None,varSelect=None,wrapNumber=5,\n compLines=None, save = None, xlim = None,\n forceYAxisZero = True, colourOverride = None,\n style = None, legend = None, varAsAxis = False,\n xAxisLabel = None, yAxisLabel = None, figsize = (12,10),\n legendLoc = 'lower right'):\n if isinstance(compLines,list):\n compVars = [list(i.columns) for i in compLines]\n dfB = [i.copy() for i in compLines]\n for i in range(len(compLines)):\n if \"Time\" not in compVars[i]:\n dfB[i][\"Time\"]=dfB[i].index\n else:\n compVars[i].remove(\"Time\")\n dfB[i] = pd.melt(dfB[i], id_vars=[\"Time\"],\n value_vars=compVars[i])\n elif compLines is not None:\n compVars=list(compLines.columns)\n dfB = compLines.copy()\n if \"Time\" not in compVars:\n dfB[\"Time\"]=dfB.index\n else:\n compVars.remove(\"Time\")\n dfB = pd.melt(dfB,id_vars=[\"Time\"],\n value_vars=compVars)\n if varSelect is None:\n varSelect=list(self.longData['variable'].unique())\n if indexSelect is None:\n indexSelect=list(self.longData['index'].unique())\n if not isinstance(indexSelect,list):\n indexSelect = [indexSelect]\n if len(varSelect)<wrapNumber:\n #cols = math.floor(math.sqrt(len(varSelect)))\n cols = math.ceil(math.sqrt(len(varSelect)))\n else:\n cols = wrapNumber\n rows = math.ceil(len(varSelect)/cols)\n if style is None:\n myStyle = \"darkgrid\"\n else:\n myStyle = style\n with sns.axes_style(style):\n fig, axs = plt.subplots(rows, cols, sharex=True,\n figsize=figsize)\n if (rows>1):\n axs = trim_axs(axs, len(varSelect))\n elif (cols==1):\n axs = [axs]\n if colourOverride is not None:\n myColorMap = plt.get_cmap(name=\"cool\")\n else:\n myColorMap = plt.get_cmap(name=\"hsv\",\n lut=len(indexSelect)+1)\n for ax, theVar, j in zip(axs, varSelect, range(len(varSelect))):\n if varAsAxis:\n if isinstance(yAxisLabel,list):\n ax.set_ylabel(theVar+\" \"+yAxisLabel[j])\n elif yAxisLabel is not None:\n ax.set_ylabel(theVar+\" \"+yAxisLabel)\n else:\n ax.set_ylabel(theVar)\n else:\n ax.set_title(theVar)\n if isinstance(yAxisLabel,list):\n ax.set_ylabel(yAxisLabel[j])\n elif yAxisLabel is not None:\n ax.set_ylabel(yAxisLabel)\n if xAxisLabel is not None:\n ax.set_xlabel(xAxisLabel)\n df = self.longData\n df = df[df['variable']==theVar]\n if indexSelect is not None:\n for theIndex, i in zip(indexSelect,\n range(len(indexSelect))):\n df2 = df[df['index']==theIndex]\n if colourOverride is not None:\n ax.plot(df2[\"Time\"], df2[\"value\"],\n linestyle='solid',\n color=myColorMap(colourOverride[i]))\n else:\n ax.plot(df2[\"Time\"], df2[\"value\"],\n linestyle='solid',\n color=myColorMap(i))\n if isinstance(compLines,list):\n for i, theIndex in enumerate(indexSelect):\n dfB2 = dfB[theIndex][\n dfB[theIndex]['variable']==theVar]\n if colourOverride is not None:\n ax.plot(dfB2[\"Time\"], dfB2[\"value\"],\"o\",\n color=myColorMap(colourOverride[i]))\n else:\n ax.plot(dfB2[\"Time\"], dfB2[\"value\"],\"o\",\n color=myColorMap(i))\n elif compLines is not None:\n dfB2 = dfB[dfB['variable']==theVar]\n ax.plot(dfB2[\"Time\"], dfB2[\"value\"],\"ko\")\n if xlim is not None:\n ax.set_xlim(xlim)\n if forceYAxisZero:\n ax.set_ylim([0, None])\n if legend is not None:\n if colourOverride is not None:\n custom_lines = [Line2D([0], [0], color=myColorMap(\n colourOverride[i]), lw=4)\n for i in range(len(indexSelect))]\n else:\n custom_lines = [Line2D([0], [0], color=myColorMap(i),\n lw=4)\n for i in range(len(indexSelect))]\n if ((not isinstance(compLines,list)) and\n (compLines is not None)):\n custom_lines.append(Line2D([0], [0], \n color=\"k\", lw=4))\n fig.legend(custom_lines, legend,\n loc = legendLoc)\n fig.tight_layout()\n if save is not None:\n fig.savefig(save)",
"def make_plots(title, \n chrg_x, csd, \n ele_x, pots, \n csd_x, est_csd, est_pot, \n true_pot=None):\n fig = plt.figure(figsize=(7,10))\n #CSDs\n ax1 = plt.subplot(211)\n if np.array(csd_x).any() != False:\n im1b = ax1.plot(csd_x, est_csd[:,0], 'r', label='kCSD', linewidth=3)\n im1a = ax1.plot(chrg_x, csd, 'g', label = 'CSD', linestyle='--', linewidth=3)\n ax1.plot(ele_x, np.zeros_like(ele_x), 'ko',markersize=2.)\n plt.legend() \n ax1.set_xlim([0.,1.])\n #ax1.set_ylim(ax1.get_ylim()[::-1]) #Zero on the top --ASK?!\n max_csd = np.maximum(max(np.abs(csd)), max(np.abs(est_csd[:,0])))\n max_csd += max_csd*0.2\n ax1.set_ylim([-max_csd, max_csd])\n ax1.set_xlabel('Depth mm')\n ax1.set_ylabel('CSD mA/mm')\n #Potentails\n ax2 = plt.subplot(212)\n ax2.plot( ele_x, np.zeros_like(ele_x),'ko',markersize=2.)\n im2b = ax2.plot(csd_x, est_pot, 'b', label = 'kPOT', linewidth=3)\n im2a = ax2.plot(chrg_x,true_pot, color = 'orange', \n linestyle='--', label='TruePOT', linewidth=3)\n ax2.set_xlim([0.,1.])\n #ax2.set_ylim(ax2.get_ylim()[::-1]) #Zero on the top --ASK?!\n ax2.plot(ele_x, pots, 'kx', markersize=8.)\n max_pots = np.maximum(max(np.abs(true_pot)), max(np.abs(est_pot)))\n max_pots += max_pots*0.2\n ax2.set_xlabel('Depth mm')\n ax2.set_ylabel('Potential mV')\n ax2.set_ylim([-max_pots, max_pots])\n fig.suptitle(title)\n plt.legend()\n plt.show()\n return",
"def _get_lines(self) -> tuple[VGroup, VGroup]:\n center = self.get_origin()\n ratio_faded_lines = self.faded_line_ratio\n offset = self.azimuth_offset\n\n if ratio_faded_lines == 0: # don't show faded lines\n ratio_faded_lines = 1 # i.e. set ratio to 1\n rstep = (1 / ratio_faded_lines) * self.x_axis.x_range[2]\n astep = (1 / ratio_faded_lines) * (TAU * (1 / self.azimuth_step))\n rlines1 = VGroup()\n rlines2 = VGroup()\n alines1 = VGroup()\n alines2 = VGroup()\n\n rinput = np.arange(0, self.x_axis.x_range[1] + rstep, rstep)\n ainput = np.arange(0, TAU, astep)\n\n unit_vector = self.x_axis.get_unit_vector()[0]\n\n for k, x in enumerate(rinput):\n new_line = Circle(radius=x * unit_vector)\n if k % ratio_faded_lines == 0:\n alines1.add(new_line)\n else:\n alines2.add(new_line)\n\n line = Line(center, self.get_x_axis().get_end())\n\n for k, x in enumerate(ainput):\n new_line = line.copy()\n new_line.rotate(x + offset, about_point=center)\n if k % ratio_faded_lines == 0:\n rlines1.add(new_line)\n else:\n rlines2.add(new_line)\n\n lines1 = VGroup(*rlines1, *alines1)\n lines2 = VGroup(*rlines2, *alines2)\n return lines1, lines2",
"def line_axes (self):\n return self._line_axes",
"def _timeseries_scatter_plot_lines(axes):\n axes.axvline(\n x=0,\n ymin=-1000,\n ymax=1000,\n color=\"grey\",\n linestyle=\"dotted\",\n alpha=0.6,\n )\n axes.axhline(\n y=0,\n xmin=-1000,\n xmax=1000,\n color=\"grey\",\n linestyle=\"dotted\",\n alpha=0.6,\n )",
"def plot_2d_topomap_inter(ax):\n\n # plot first Head \n N = 300 # number of points for interpolation\n xy_center = [-0.178,0] # center of the plot\n radius = 0.1 # radius\n\n # draw a circle\n circle = matplotlib.patches.Circle(xy = xy_center, radius = radius, edgecolor = \"k\", facecolor = \"w\")\n ax.add_patch(circle)\n \n # make the axis invisible \n for loc, spine in ax.spines.items():\n spine.set_linewidth(0)\n \n # remove the ticks\n ax.set_xticks([])\n ax.set_yticks([])\n \n # add some body parts. Hide unwanted parts by setting the zorder low\n # add two ears\n circle = matplotlib.patches.Ellipse(xy = [-0.19,0.095], width = 0.05, height = 0.025, angle = 0, edgecolor = \"k\", facecolor = \"w\", zorder = 0)\n ax.add_patch(circle)\n circle = matplotlib.patches.Ellipse(xy = [-0.19,-0.095], width = 0.05, height = 0.025, angle = 0, edgecolor = \"k\", facecolor = \"w\", zorder = 0)\n ax.add_patch(circle)\n ## add a nose\n xy = [[-0.087,-0.027],[-0.087,0.027], [-0.068,0]]\n polygon = matplotlib.patches.Polygon(xy = xy, edgecolor = \"k\", facecolor = \"w\", zorder = 0)\n ax.add_patch(polygon) \n \n\n # Plot second Head \n x2y2_center = [0.178,0] # center of the plot\n radius2 = 0.1 # radius\n \n # draw a circle\n circle = matplotlib.patches.Circle(xy = x2y2_center, radius = radius2, edgecolor = \"k\", facecolor = \"w\")\n ax.add_patch(circle)\n \n # make the axis invisible \n for loc, spine in ax.spines.items():\n spine.set_linewidth(0)\n \n # remove the ticks\n ax.set_xticks([])\n ax.set_yticks([])\n \n ## add some body parts. Hide unwanted parts by setting the zorder low\n # add two ears\n circle = matplotlib.patches.Ellipse(xy = [0.19,0.095], width = 0.05, height = 0.025, angle = 0, edgecolor = \"k\", facecolor = \"w\", zorder = 0)\n ax.add_patch(circle)\n circle = matplotlib.patches.Ellipse(xy = [0.19,-0.095], width = 0.05, height = 0.025, angle = 0, edgecolor = \"k\", facecolor = \"w\", zorder = 0)\n ax.add_patch(circle)\n ## add a nose\n x2y2 = [[0.087,-0.027],[0.087,0.027], [0.068,0]]\n polygon = matplotlib.patches.Polygon(xy = x2y2, edgecolor = \"k\", facecolor = \"w\", zorder = 0)\n ax.add_patch(polygon)",
"def princ_axes(self):\r\n # get coordinates of mesh\r\n coords = BoundaryMesh(self.mesh,\"exterior\",True).coordinates()\r\n\r\n # get distances\r\n dist = np.sqrt(np.einsum('ij->i', np.square(coords)))\r\n\r\n # get maximal value\r\n maxind = np.argmax(dist)\r\n maxdist = dist[maxind]\r\n\r\n # get minimal value\r\n minind = np.argmin(dist)\r\n mindist = dist[minind]\r\n\r\n # find coordinates of maximal and minimal points\r\n maxax = coords[maxind, :]\r\n minax = coords[minind, :]\r\n\r\n # get the cross product of these vectors,\r\n # which is the ideal mid-size axis\r\n idealax = np.cross(maxax,minax)\r\n\r\n # get the dot product of this ideal axis with the coordinates,\r\n # take the absolute value, and find the index of the maximum\r\n secind = np.argmax(np.abs(np.einsum('j,ij->i',idealax,coords)))\r\n\r\n # get the second-axis distance\r\n secdist = dist[secind]\r\n\r\n return([maxdist, secdist, mindist], [\"a\", \"b\", \"c\"])",
"def _color_twin_axes(ax1, color1, ax2, color2):\n #spines\n ax1.spines['left'].set_color(color1)\n ax1.spines['right'].set_color(color2)\n ax2.spines['left'].set_color(color1)\n ax2.spines['right'].set_color(color2)\n #text\n ax1.yaxis.label.set_color(color1)\n ax2.yaxis.label.set_color(color2)\n #ticks\n ax1.tick_params(axis = 'y', colors = color1)\n ax2.tick_params(axis = 'y', colors = color2)",
"def make_six_plots(mc, moc1, moc2, moc1name, moc2name, rxn,\n cmin=None, cmax=None, emin=None, emax=None, **kwargs):\n\t# Get reaction rate colorbars\n\t_cmin, _cmax = get_min_and_max((mc, moc1, moc2), positive=True)\n\tif cmin is None:\n\t\tcmin = max(1 - max(1 - _cmin, _cmax - 1), 0)\n\tif cmax is None:\n\t\tcmax = 1 + max(_cmax - 1, 1 - _cmin)\n\t\n\tfig = plt.figure()\n\t# Plot the OpenMC reference solution in the upper left subplot\n\taxa = fig.add_subplot(231)\n\ta = plt.imshow(mc.squeeze(), interpolation=\"none\", cmap=\"jet\")\n\tplt.title(\"OpenMC {} Distribution\".format(rxn))\n\tplt.clim(cmin, cmax)\n\tplt.colorbar(a)\n\t# Plot OpenMOC\"s fission rates in the upper center subplot\n\taxb = fig.add_subplot(232)\n\tb = plt.imshow(moc1.squeeze(), interpolation=\"none\", cmap=\"jet\")\n\tplt.title(\"OpenMOC {} Distribution\\n{}\".format(rxn, moc1name))\n\tplt.clim(cmin, cmax)\n\tplt.colorbar(b)\n\t# Plot different OpenMOC fission rates in the upper right subplot\n\taxc = fig.add_subplot(233)\n\tc = plt.imshow(moc2.squeeze(), interpolation=\"none\", cmap=\"jet\")\n\tplt.title(\"OpenMOC {} Distribution\\n{}\".format(rxn, moc2name))\n\tplt.clim(cmin, cmax)\n\tplt.colorbar(c)\n\t\n\t# Get errors and their colorbars\n\terror11 = np.divide(moc1 - mc, mc / 100)\n\terror25 = np.divide(moc2 - mc, mc / 100)\n\terrorvs = np.divide(moc1 - moc2, moc2 / 100)\n\t\n\t_emin, _emax = get_min_and_max((error11, error25, errorvs))\n\tif emin is None:\n\t\temin = min(_emin, -_emax)\n\tif emax is None:\n\t\temax = max(_emax, -_emin)\n\t\t\n\t# Plot (MOC #1 vs. MOC #2) error in the lower left\n\taxd = fig.add_subplot(234)\n\td = plt.imshow(errorvs.squeeze(), interpolation=\"none\", cmap=\"rainbow\")\n\tplt.title(\"% Relative error of {}\\nvs {}\".format(moc1name, moc2name))\n\tplt.clim(emin, emax)\n\tplt.colorbar(d)\n\t# Plot (MOC #1 vs. Monte Carlo) error in the lower center\n\taxe = fig.add_subplot(235)\n\te = plt.imshow(error11.squeeze(), interpolation=\"none\", cmap=\"rainbow\")\n\tplt.title(\"% Relative error of {}\\nvs openmc\".format(moc1name))\n\tplt.clim(emin, emax)\n\tplt.colorbar(e)\n\t# Plot (MOC #2 vs. Monte Carlo) error in the lower right\n\taxf = fig.add_subplot(236)\n\tf = plt.imshow(error25.squeeze(), interpolation=\"none\", cmap=\"rainbow\")\n\tplt.title(\"% Relative error of {}\\nvs openmc\".format(moc2name))\n\tplt.clim(emin, emax)\n\tplt.colorbar(f)\n\t\n\t# implement mode and the rest later\n\tsix_axes = (axa, axb, axc, axd, axe, axf)\n\treturn fig, six_axes",
"def cartesian_coordinates(self, *axes):",
"def ft_ax(ax=None,\n y=1.03,\n yy=1.1,\n title=None,\n subtitle=None,\n source=None,\n add_box=False,\n left_axis=False):\n\n if ax is None:\n ax = plt.gca()\n\n ax.set_axisbelow(True)\n \n if title is not None:\n title = plt.title(title, y=y, loc='left')\n if subtitle is not None:\n plt.annotate(subtitle, xy=title.get_position(),\n xycoords='axes fraction', xytext=(0,-11), \n textcoords='offset points', size='large') \n \n if source is not None:\n src = plt.annotate(source, xy=(0,0), \n xycoords='axes fraction', xytext=(0,-35), \n textcoords='offset points', ha='left', va='top', size='small')\n \n # axes and grid-lines\n plt.grid(axis='y', linewidth=.5)\n sns.despine(left=True)\n if not left_axis:\n ax.yaxis.tick_right()\n ax.yaxis.set_label_position('right')\n ax.yaxis.set_label_coords(1,yy)\n ax.yaxis.get_label().set_rotation(0)\n ax.tick_params('y', length=0)\n \n plt.tight_layout()\n \n if add_box:\n ax2 = plt.axes(ax.get_position().bounds, facecolor=(1,1,1,0))\n ax2.xaxis.set_visible(False)\n ax2.yaxis.set_visible(False)\n x,y = np.array([[.01, 0.15], [y+.12, y+.12]])\n line = matplotlib.lines.Line2D(x, y, lw=6., color='k')\n ax2.add_line(line)\n line.set_clip_on(False)\n \n if add_box and source is not None:\n return (line, src)\n elif not add_box and source is not None:\n return (src,)\n elif add_box and source is None:\n return (line,)\n else:\n return []",
"def _timeseries_scatter_plot_axlim(axes, var_combination, slope,\n mins_maxs):\n min_lim, max_lim, min_glob, max_glob = mins_maxs\n for box in range(3):\n axes[box].set_ylim(min_lim, max_lim)\n if var_combination == \"pr:tas\":\n min_l = min(min_glob) - (max(max_glob) - min(min_glob)) * 0.1\n max_l = max(max_glob) + (max(max_glob) - min(min_glob)) * 0.1\n axes[box].set_xlim(min_l, max_l)\n else:\n axes[box].set_xlim(min_lim, max_lim)\n\n if (slope[\"cmip5\"] + slope[\"cmip6\"]) >= 0:\n axes[box].plot(\n [-1000, 1000],\n [-1000, 1000],\n color=\"gray\",\n alpha=0.6,\n )\n else:\n axes[box].plot(\n [-1000, 1000],\n [1000, -1000],\n color=\"gray\",\n alpha=0.6,\n )",
"def setup_axes2(fig, rect,tmin, tmax,zmin,zmax):\n\n tr =PolarAxes.PolarTransform()\n pi = np.pi\n\n angle_ticks = [(tmin, '%.2f' % tmin), (0,r'$0$'), (tmax, '%.2f' % tmax)]\n\n grid_locator1 = FixedLocator([v for v, s in angle_ticks])\n tick_formatter1 = DictFormatter(dict(angle_ticks))\n\n grid_locator2 = MaxNLocator(4)\n\n grid_helper = floating_axes.GridHelperCurveLinear(\n tr, extremes=(tmax, tmin, zmax, zmin),\n grid_locator1=grid_locator1,\n grid_locator2=grid_locator2,\n tick_formatter1=tick_formatter1,\n tick_formatter2=None)\n\n ax1 = floating_axes.FloatingSubplot(fig, rect, grid_helper=grid_helper)\n fig.add_subplot(ax1)\n\n # create a parasite axes whose transData in RA, cz\n aux_ax = ax1.get_aux_axes(tr)\n\n aux_ax.patch = ax1.patch # for aux_ax to have a clip path as in ax\n ax1.patch.zorder = 0.95 # but this has a side effect that the patch is\n # drawn twice, and possibly over some other\n # artists. So, we decrease the zorder a bit to\n # prevent this.\n\n return ax1, aux_ax",
"def common_axis( axis1, axis2 ):\n if hasattr( axis1, 'units' ):\n units1 = axis1.units.lower().replace(' ','_')\n if axis1.isTime():\n axis1.toRelativeTime( units1 ) #probably will change input argument\n else:\n units1 = None\n if hasattr( axis2, 'units' ):\n units2 = axis2.units.lower().replace(' ','_')\n else:\n units2 = None\n if units1!=None and units2!=None and units1 != units2:\n if axis1.isTime() and axis2.isTime():\n axis2.toRelativeTime( units1, axis1.getCalendar() ) #probably will change input argument\n else:\n print \"ERROR. common_axis does not yet support differing units\",axis1.units,\" and \",axis2.units\n return None\n if axis1.isTime() or axis2.isTime():\n if not axis2.isTime() or not axis1.isTime():\n print \"ERROR. In common_axis, one axis is time, not the other\"\n return None\n if not axis1.calendar==axis2.calendar:\n print \"ERROR. common_axis does not yet support differing calendars.\"\n if len(axis1)==1 and len(axis2)==1:\n # There's just one time value, probably from averaging over time. The time value is meaningless\n # but it would be messy to have two.\n return (axis1,[0],[0])\n\n # to do: similar checks using isLatitude and isLongitude and isLevel <<<<<<\n # Also, transfer long_name, standard_name, axis attributes if in agreement;\n # units and calendar attributes should always be transferred if present.\n # Also to do: use bounds if available\n a12 = numpy.concatenate( [ axis1.getData(), axis2.getData() ] )\n a3, a12indexina3 = numpy.unique( a12, return_inverse=True )\n #... a3 has only unique indices and is sorted (unfortunately, uniqueness is based on exact identity,\n # not to some numerical tolerance). For an i index into a12 (thus 0<=i<len(axis1)+len(axis2),\n # j is an index into a3 such that, if a12indexina3[i]==j, then a1[i]==a3[j].\n a1indexina3 = a12indexina3[0:len(axis1)]\n a2indexina3 = a12indexina3[len(axis1):len(axis1)+len(axis2)]\n\n if hasattr(axis1,'id') and hasattr(axis2,'id') and axis1.id==axis2.id :\n vid = axis1.id\n else:\n vid = None\n axis3 = cdms2.createAxis( a3, bounds=None, id=vid )\n axis3.units = units1\n return (axis3,a1indexina3,a2indexina3)",
"def make_plot(solution, t, plot_Ts, plot_T1, plot_T2, xaxis, cc, delta_cc, albedo,delta_albedo\\\n , em1, delta_em1, em2, delta_em2):\n\n plt.close('all')\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n \n if xaxis == 'cloud cover':\n inc_cc = []\n for i in range(len(solution[0,:])):\n inc_cc.append(cc + (i*delta_cc)/calcs_per_timestep)\n\n if plot_Ts == 'On': ax1.plot(inc_cc,solution[0,:],label = 'Surface temperature')\n if plot_T1 == 'On': ax1.plot(inc_cc,solution[1,:], label = 'Lower atmospheric temperature')\n if plot_T2 == 'On': ax1.plot(inc_cc,solution[2,:], label = 'Upper atmospheric temperature')\n if plot_Ts == 'Off' and plot_T1 == 'Off' and plot_T2 == 'Off': raise ValueError('No y variable selected')\n\n elif xaxis == 'time':\n \n #for i in range(len(solution[0,:])):\n #t.append(i*(timestep/calcs_per_timestep))\n \n if plot_Ts == 'On': ax1.plot(t,solution[0,:],label = 'Surface temperature')\n if plot_T1 == 'On': ax1.plot(t,solution[1,:], label = 'Lower atmospheric temperature')\n if plot_T2 == 'On': ax1.plot(t,solution[2,:], label = 'Upper atmospheric temperature')\n if plot_Ts == 'Off' and plot_T1 == 'Off' and plot_T2 == 'Off': raise ValueError('No y variable selected')\n \n elif xaxis == 'albedo':\n inc_alb = []\n for i in range(len(solution[0,:])):\n inc_alb.append(albedo+(i*delta_albedo)/calcs_per_timestep)\n \n if plot_Ts == 'On': ax1.plot(inc_alb,solution[0,:],label = 'Surface temperature')\n if plot_T1 == 'On': ax1.plot(inc_alb,solution[1,:], label = 'Lower atmospheric temperature')\n if plot_T2 == 'On': ax1.plot(inc_alb,solution[2,:], label = 'Upper atmospheric temperature')\n if plot_Ts == 'Off' and plot_T1 == 'Off' and plot_T2 == 'Off': raise ValueError('No y variable selected')\n \n elif xaxis == 'epsilon1':\n inc_em = []\n for i in range(len(solution[0,:])):\n inc_em.append(em1+(i*delta_em1)/calcs_per_timestep)\n \n if plot_Ts == 'On': ax1.plot(inc_em,solution[0,:],label = 'Surface temperature')\n if plot_T1 == 'On': ax1.plot(inc_em,solution[1,:], label = 'Lower atmospheric temperature')\n if plot_T2 == 'On': ax1.plot(inc_em,solution[2,:], label = 'Upper atmospheric temperature')\n if plot_Ts == 'Off' and plot_T1 == 'Off' and plot_T2 == 'Off': raise ValueError('No y variable selected')\n \n elif xaxis == 'epsilon2':\n inc_em = []\n for i in range(len(solution[0,:])):\n inc_em.append(em2+(i*delta_em2)/calcs_per_timestep)\n \n if plot_Ts == 'On': ax1.plot(inc_em,solution[0,:],label = 'Surface temperature')\n if plot_T1 == 'On': ax1.plot(inc_em,solution[1,:], label = 'Lower atmospheric temperature')\n if plot_T2 == 'On': ax1.plot(inc_em,solution[2,:], label = 'Upper atmospheric temperature')\n if plot_Ts == 'Off' and plot_T1 == 'Off' and plot_T2 == 'Off': raise ValueError('No y variable selected')\n \n else: raise ValueError('No x axis selected')\n \n fig.suptitle('Global Average Temperature')\n ax1.set_title(f'Final Surface Temperature = {round(solution[0,-1],2)} K')\n ax1.legend()\n\n if xaxis == 'cloud cover': ax1.set_xlabel('Cloud Cover (%)')\n elif xaxis == 'time': ax1.set_xlabel('Time (years)')\n elif xaxis == 'albedo': ax1.set_xlabel('Albedo')\n elif xaxis == 'epsilon1': ax1.set_xlabel(u'\\u03B5\\u2081')\n elif xaxis == 'epsilon2': ax1.set_xlabel(u'\\u03B5\\u2082')\n plt.ylabel('Temerature (K)')\n return fig",
"def update_cross_sections(self):\n self.get_xcs_panel().update_plot()\n self.get_ycs_panel().update_plot()",
"def plot_overlay2axes(self, axes) -> None:\n # calculate height (based on leaf analysis ratio)\n upper_point = (\n self.leaf_center_px - self.leaf_width_px / 2 * self._analysis_ratio\n )\n lower_point = (\n self.leaf_center_px + self.leaf_width_px / 2 * self._analysis_ratio\n )\n height = abs(upper_point - lower_point) * 0.8\n\n for idx, line in enumerate(self.marker_lines):\n width = abs(self.error[idx]) * self._image.dpmm\n y = line.center.y\n x = self.position[idx] - (self.error[idx] * self._image.dpmm) / 2\n\n if self._orientation == Orientation.UP_DOWN:\n r = Rectangle(width, height, center=(x, y))\n # if any of the values are over tolerance, show another larger rectangle to draw the eye\n if not self.passed[idx] or not self.passed_action[idx]:\n re = Rectangle(\n self._image_window.shape[1] * 0.2, height * 1.2, center=(x, y)\n )\n re.plot2axes(\n axes,\n edgecolor=\"none\",\n fill=True,\n alpha=0.5,\n facecolor=self.bg_color[idx],\n )\n else:\n r = Rectangle(height, width, center=(x, y))\n if not self.passed[idx] or not self.passed_action[idx]:\n re = Rectangle(\n self._image_window.shape[1] * 0.2, height * 1.2, center=(x, y)\n )\n re.plot2axes(\n axes,\n edgecolor=\"none\",\n fill=True,\n alpha=0.5,\n facecolor=self.bg_color[idx],\n )\n r.plot2axes(\n axes, edgecolor=\"none\", fill=True, alpha=1, facecolor=self.bg_color[idx]\n )",
"def populate_axes_with_euclidean(pid_series, series_was_shifted_to, axes):\n euclidean_distance(pid_series, series_was_shifted_to).plot(kind='bar', legend=False, ax=axes)"
] | [
"0.57410544",
"0.57360107",
"0.5571669",
"0.55329823",
"0.5515343",
"0.54836935",
"0.5431624",
"0.5425385",
"0.5408111",
"0.5307821",
"0.5279651",
"0.5267647",
"0.52437675",
"0.52202445",
"0.51879686",
"0.5187689",
"0.51863897",
"0.5173515",
"0.51703244",
"0.5135122",
"0.5129557",
"0.5126211",
"0.5114361",
"0.51135707",
"0.5112207",
"0.51084524",
"0.5105865",
"0.5095333",
"0.50933015",
"0.508934"
] | 0.63710725 | 0 |
Not much tested I decided against doing overlapping line plots this way. The input arguments are two axes (AbstractAxis class), as compatible as necessary for the following to be sensible. This function has 3 return values. It returns a TransientAxis which includes all the points of the input axes. It may be one of the inputs. It also returs index information from which one can determine whether a point of the new axis came from axis1 or axis2 or both. | def common_axis( axis1, axis2 ):
if hasattr( axis1, 'units' ):
units1 = axis1.units.lower().replace(' ','_')
if axis1.isTime():
axis1.toRelativeTime( units1 ) #probably will change input argument
else:
units1 = None
if hasattr( axis2, 'units' ):
units2 = axis2.units.lower().replace(' ','_')
else:
units2 = None
if units1!=None and units2!=None and units1 != units2:
if axis1.isTime() and axis2.isTime():
axis2.toRelativeTime( units1, axis1.getCalendar() ) #probably will change input argument
else:
print "ERROR. common_axis does not yet support differing units",axis1.units," and ",axis2.units
return None
if axis1.isTime() or axis2.isTime():
if not axis2.isTime() or not axis1.isTime():
print "ERROR. In common_axis, one axis is time, not the other"
return None
if not axis1.calendar==axis2.calendar:
print "ERROR. common_axis does not yet support differing calendars."
if len(axis1)==1 and len(axis2)==1:
# There's just one time value, probably from averaging over time. The time value is meaningless
# but it would be messy to have two.
return (axis1,[0],[0])
# to do: similar checks using isLatitude and isLongitude and isLevel <<<<<<
# Also, transfer long_name, standard_name, axis attributes if in agreement;
# units and calendar attributes should always be transferred if present.
# Also to do: use bounds if available
a12 = numpy.concatenate( [ axis1.getData(), axis2.getData() ] )
a3, a12indexina3 = numpy.unique( a12, return_inverse=True )
#... a3 has only unique indices and is sorted (unfortunately, uniqueness is based on exact identity,
# not to some numerical tolerance). For an i index into a12 (thus 0<=i<len(axis1)+len(axis2),
# j is an index into a3 such that, if a12indexina3[i]==j, then a1[i]==a3[j].
a1indexina3 = a12indexina3[0:len(axis1)]
a2indexina3 = a12indexina3[len(axis1):len(axis1)+len(axis2)]
if hasattr(axis1,'id') and hasattr(axis2,'id') and axis1.id==axis2.id :
vid = axis1.id
else:
vid = None
axis3 = cdms2.createAxis( a3, bounds=None, id=vid )
axis3.units = units1
return (axis3,a1indexina3,a2indexina3) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def common_axes( mv1, mv2 ):\n axes1 = [a[0] for a in mv1.getDomain()]\n axes2 = [a[0] for a in mv2.getDomain()]\n if len(axes1)!=len(axes2):\n print \"ERROR. common_axes requires same number of axes in\",mv1,\" and\",mv2\n return None\n axes3 = []\n for i in range(len(axes1)):\n axes3.append(common_axis( axes1[i], axes2[i] ))\n return axes3",
"def _make_twin_axes(self, *args, **kwargs):\n # Typically, SubplotBase._make_twin_axes is called instead of this.\n # There is also an override in axes_grid1/axes_divider.py.\n if 'sharex' in kwargs and 'sharey' in kwargs:\n raise ValueError('Twinned Axes may share only one axis.')\n ax2 = self.figure.add_axes(self.get_position(True), *args, **kwargs)\n self.set_adjustable('datalim')\n ax2.set_adjustable('datalim')\n self._twinned_axes.join(self, ax2)\n return ax2",
"def intersection(self, axis2):",
"def _find_axes(cls, input_data, explicit_x=None):\n\n if isinstance(input_data, pd.Series):\n if explicit_x is not None:\n raise ArgumentError(\"You cannot pass an explicit x axis with a pandas Series\")\n\n return input_data.index, input_data.values\n elif isinstance(input_data, pd.DataFrame):\n if explicit_x is not None:\n raise ArgumentError(\"You cannot pass an explicit x axis with a pandas DataFrame\")\n\n return input_data.index, input_data.values[:, 0]\n elif isinstance(input_data, np.ndarray):\n if len(input_data.shape) == 2 and input_data.shape[0] == 2:\n if explicit_x is not None:\n raise ArgumentError(\"You cannot pass an explicit x axis with a 2D array of input data\")\n\n return input_data[:, 0], input_data[:, 1]\n elif len(input_data.shape) == 1:\n if explicit_x is not None:\n if len(explicit_x) != len(input_data):\n raise ArgumentError(\"Your explicit x data has a different length that your y data\", x_length=len(explicit_x), y_length=len(input_data))\n\n return explicit_x, input_data\n else:\n return np.linspace(0, len(input_data) - 1, len(input_data)), input_data\n elif explicit_x is not None:\n return np.array(explicit_x), np.array(explicit_x)\n\n return np.linspace(0, len(input_data) - 1, len(input_data)), np.array(input_data)",
"def aminusb_2ax( mv1, mv2 ):\n return mv2\n mv1, mv2 = reconcile_units( mv1, mv2 )\n missing = mv1.get_fill_value()\n axes1 = allAxes(mv1)\n axes2 = allAxes(mv2)\n if axes1 is None or axes2 is None: return None\n if len(axes1)!=2: print \"ERROR @1, wrong number of axes for aminusb_2ax\",axes1\n if len(axes2)!=2: print \"ERROR @2, wrong number of axes for aminusb_2ax\",axes2\n if len(axes1[0])==len(axes2[0]):\n # Only axis2 differs, there's a better way...\n return aminusb_ax2( mv1, mv2 )\n if len(axes1[0])<=len(axes2[0]):\n if len(axes1[1])<=len(axes2[1]):\n mv1new = mv1\n # Interpolate mv2 from axis2 to axis1 in both directions. Use the CDAT regridder.\n grid1 = mv1.getGrid()\n mv2new = mv2.regrid(grid1)\n else:\n # Interpolate mv1 from axis1[1] to axis2[1]\n # Interpolate mv2 from axis2[0] to axis1[0]\n print \"ERROR @3, aminusb_2ax IS NOT FINISHED\"\n return None\n else:\n if len(axes1[1])<=len(axes2[1]):\n # Interpolate mv1 from axis1[0] to axis2[0]\n # Interpolate mv2 from axis2[1] to axis1[1]\n print \"ERROR @4, aminusb_2ax IS NOT FINISHED\"\n return None\n else:\n mv2new = mv2\n # Interpolate mv2 from axis2 to axis1 in both directions. Use the CDAT regridder.\n grid2 = mv2.getGrid()\n mv1new = mv1.regrid(grid2)\n aminusb = mv1new - mv2new\n aminusb.id = mv1.id\n return aminusb",
"def axes2indices(\n self,\n axes:'Union['\n 'Tuple[Union[ConvertableAxisClass, ellipsis], ...], '\n 'Mapping[NamedIndex, ConvertableAxisClass],'\n ']',\n )->'Any':\n\n if isinstance(axes, dict):\n indices = []\n for dim, axes_ in self.items():\n axis = axes.get(dim)\n if axis is None and dim in axes:\n warnings.warn(f'it does not make sense using None(at dim {dim!r}) '\n 'in a named index, it whould be translated into '\n 'slice(None)(i.e. :)')\n index = slice(None) if axis is None else axis2index(axes_, axis)\n indices.append(index)\n return tuple(indices)\n\n axes = axes if isinstance(axes, tuple) else (axes, )\n idx_elps = naxis = len(axes)\n for idx_axis, axis in enumerate(axes):\n if isinstance(axis, type(Ellipsis)):\n assert idx_elps == naxis, 'more than one ellipsis is not allowed'\n\n idx_elps = idx_axis\n\n indices = []\n idx_axis = idx_dim = 0\n while idx_axis < idx_elps:\n axis = axes[idx_axis]\n index = None if axis is None else axis2index(self._dim_axes[idx_dim], axis)\n indices.append(index)\n idx_axis += 1\n idx_dim += index is not None\n\n if idx_elps < naxis:\n indices.append(axes[idx_elps])\n remainder = idx_elps + 1 - naxis\n indices_ = []\n idx_axis = idx_dim = -1\n while idx_axis >= remainder:\n axis = axes[idx_axis]\n index = None if axis is None else axis2index(self._dim_axes[idx_dim], axis)\n indices_.append(index)\n idx_axis -= 1\n idx_dim -= index is not None\n indices_.reverse()\n indices.extend(indices_)\n\n return tuple(indices)",
"def axline(x=None, y=None, a=None, b=None,\n xlim=None, ylim=None, xinvert=False, yinvert=False, xlog=False, ylog=False, title=None,\n xlabel=None, ylabel=None, label=None, grid=None, ax=None, plot_kw={}, **kwargs):\n\n # Get the relevant axis\n if ax is not None:\n if isinstance(ax, (list, tuple, ndarray)):\n if len(shape(ax)) > 1: # If ax array is multi-dimensional, flatten it\n ax = array(ax).flatten()\n else:\n ax = [ax] # Axis must be a list-like object\n else:\n ax = [gca()]\n \n old_ax = axes_handler(ax[0]) # sets the current axis and returns old axis\n\n # Validate input parameters\n if not (any([is_numeric(var) for var in [x, y, a, b]])): # If nothing has been specified\n raise TypeError(\"axline() missing one of optional arguments: 'x', 'y', 'a' or 'b'\")\n\n for i, val in enumerate([x, y, a, b]):\n if (val is not None):\n try: # Test whether the parameter is iterable\n _ = (k for k in val)\n except TypeError: # If not, convert to a list\n if (i == 0): x = [x]\n elif (i == 1): y = [y]\n elif (i == 2): a = [a]\n elif (i == 3): b = [b]\n\n if (x is not None and y is not None): # Check whether both x and y were specified\n raise ValueError(\"'x' and 'y' cannot be both specified\")\n\n if (x is not None): # Check conditions if x specified\n if (any([a, b])): # Should not specify a or b, if x given.\n raise ValueError(\"'{0}' cannot be specified if x specified\".format('a' if a else 'b'))\n L = len(x)\n\n if (y is not None): # Check conditions if y specified\n if (any([a, b])): # Should not specify a or b, if y given.\n raise ValueError(\"'{0}' cannot be specified if y specified\".format('a' if a else 'b'))\n L = len(y)\n\n if (a is not None):\n if (b is None): # If no intercept specified\n b = [0] * len(a) # set b to 0 for all a\n else:\n if (len(b) == 1):\n b = [b[0]] * len(a)\n elif (len(b) != len(a)):\n if (len(a) == 1):\n a = [a[0]] * len(b)\n else:\n raise ValueError(f\"Length of 'a' ({len(a)}) and length of 'b' ({len(b)}) must be equal or otherwise 1\")\n L = len(a)\n elif (b is not None):\n if (a is None): # If no slope specified\n a = [1] * len(b) # set a to 1 for all b\n L = len(b)\n\n if not isinstance(label, list):\n label = [label] * L\n elif (len(label) != L):\n raise ValueError(\"Length of label list ({0}) must match the number of lines given ({1}).\".format(len(label), L))\n\n # Combine the `explicit` plot_kw dictionary with the `implicit` **kwargs dictionary\n plot_par = {**plot_kw, **kwargs}\n\n # Create 'L' number of plot kwarg dictionaries to parse into each plot call\n plot_par = dict_splicer(plot_par, L, [1] * L)\n\n lines = [[]] * len(ax) # Initialise list which contains each Line2D object\n for jj, axis in enumerate(ax): # Loop over all axes\n gridpar = grid_handler(grid, axis)\n\n ax_xlim = axis.get_xlim() if lims_handler(xlim, axis) is None else xlim\n ax_ylim = axis.get_ylim() if lims_handler(ylim, axis) is None else ylim\n\n if (x is not None):\n for ii, xx in enumerate(x):\n lines[jj].append(axis.axvline(x=xx, **plot_par[ii], label=label[ii]))\n if (y is not None):\n for ii, yy in enumerate(y):\n lines[jj].append(axis.axhline(y=yy, **plot_par[ii], label=label[ii]))\n if (a is not None):\n for ii, (aa, bb) in enumerate(zip(a, b)): # Loop over all lines\n lines[jj].append(axis.axline(xy1=(0, bb), slope=aa, label=label[ii], **plot_par[ii]))\n\n _plot_finalizer(xlog, ylog, ax_xlim, ax_ylim, title, xlabel, ylabel, xinvert, yinvert, gridpar, axis)\n\n if old_ax is not None: # Reset the previously set axis\n sca(old_ax)\n\n return squeeze(lines).tolist() # Reduce the dimensionality of the lines, if needed",
"def plot_line(ax, p1, p2, *args, **kwargs):\n ax.plot(*zip(p1, p2), *args, **kwargs)",
"def setup_axes(self):\n fig = plt.figure(1)\n axs = fig.add_subplot(1, 1, 1)\n fig.clf()\n axs = plt.subplots(1, 2)\n ax1 : plt.axis = axs[0]\n ax2 : plt.axis = axs[1]\n fig.canvas.draw()\n \n line1_t, = ax1.plot([], label='train')\n line1_v, = ax1.plot([], label='val')\n\n ax1.set_title('Loss vs Iterations')\n ax1.set_xlabel('Iterations')\n ax1.set_ylabel('Loss')\n ax1.grid(True)\n ax1.autoscale()\n # ax1.legend()\n\n line2_t, = ax2.plot([], label='train')\n line2_v, = ax2.plot([], label='val')\n\n ax2.set_title('Accuracy vs Iterations')\n ax2.set_xlabel('Time')\n ax2.set_ylabel('Percent Accuracy')\n ax2.grid(True)\n ax2.autoscale()\n # ax2.legend()\n\n lines = [line1_t, line1_v, line2_t, line2_v]\n\n return fig, ax1, ax2, lines",
"def cube_like_index_to_sequence_and_common_axis_indices(cube_like_index, common_axis,\n common_axis_lengths):\n cumul_lengths = np.cumsum(common_axis_lengths)\n sequence_index = np.arange(len(cumul_lengths))[cumul_lengths > cube_like_index][0]\n if sequence_index == 0:\n common_axis_index = cube_like_index\n else:\n common_axis_index = cube_like_index - cumul_lengths[sequence_index - 1]\n return sequence_index, common_axis_index",
"def _color_twin_axes(ax1, color1, ax2, color2):\n #spines\n ax1.spines['left'].set_color(color1)\n ax1.spines['right'].set_color(color2)\n ax2.spines['left'].set_color(color1)\n ax2.spines['right'].set_color(color2)\n #text\n ax1.yaxis.label.set_color(color1)\n ax2.yaxis.label.set_color(color2)\n #ticks\n ax1.tick_params(axis = 'y', colors = color1)\n ax2.tick_params(axis = 'y', colors = color2)",
"def aminusb_ax2( mv1, mv2 ):\n if hasattr(mv1,'units') and hasattr(mv2,'units') and mv1.units!=mv2.units:\n print \"WARING: aminusb_ax2 is subtracting variables with different units!\",mv1,mv1\n axes1 = allAxes(mv1)\n axes2 = allAxes(mv2)\n # TO DO: convert, interpolate, etc. as needed to accomodate differing first axes.\n # But for now, we'll just check a bit ...\n ax1=axes1[0]\n ax2=axes2[0]\n if ax1.shape!=ax2.shape:\n print \"ERROR aminusb_ax2 requires same axes, but shape differs:\",ax1.shape,ax2.shape\n print \"ax1,ax2\"\n return None\n if hasattr(ax1,'units') and hasattr(ax2,'units') and ax1.units!=ax2.units:\n if ax1.units=='mb':\n ax1.units = 'mbar' # udunits uses mb for something else\n if ax2.units=='mb':\n ax2.units = 'mbar' # udunits uses mb for something else\n tmp = udunits(1.0,ax2.units)\n s,i = tmp.how(ax1.units) # will raise an exception if conversion not possible\n # crude substitute for a real units library:\n #if not (ax1.units=='mb' and ax2.units=='millibars') and\\\n # not (ax1.units=='millibars' and ax2.units=='mb'):\n # print \"ERROR aminusb_ax2 requires same axes, but units differ:\",ax1.units,ax2,units\n # print \"ax1,ax2\"\n # return None\n ab_axes = [ax1]\n if len(axes1[1])<=len(axes2[1]):\n a = mv1\n b = interp2( axes1[1], mv2 )\n ab_axes.append(axes1[1])\n else:\n a = interp2( axes2[1], mv1 )\n b = mv2\n ab_axes.append(axes2[1])\n aminusb = a - b\n aminusb.id = mv1.id\n aminusb.initDomain( ab_axes )\n return aminusb",
"def axis2index(axes:NamedAxes, axis:ConvertableAxisClass)->IndexClass:\n\n if axis is None:\n return axis\n\n # NOTE: iterating until hashable\n if isinstance(axis, Collection) and not isinstance(axis, Hashable):\n iter_index = (axis2index(axes, a) for a in axis)\n if isinstance(axis, np.ndarray): # force list output for numpy array\n return list(iter_index)\n return type(axis)(iter_index)\n\n if isinstance(axis, slice):\n start = axis2index(axes, axis.start)\n stop = axis2index(axes, axis.stop)\n return type(axis)(start, stop, axis.step)\n\n if isinstance(axes, dict):\n return axes[axis]\n\n if is_namedtuple(axes):\n return getattr(axes, axis)\n\n # fallback to int axes with int axis\n assert is_integer(axes) and is_integer(axis), f'unnamed axis({axis!r}) should be integer'\n\n return axis",
"def _lines_intersection(self, other):\n\n the_slope, the_y_intercept = False, False\n\n # parallel?\n if self.slope == other.slope:\n return (\n self.y_intercept == other.y_intercept and\n self.x_value == other.x_value\n )\n\n if self.is_vertical():\n x = self.x_value\n the_slope = other.slope\n the_y_intercept = other.y_intercept\n elif other.is_vertical():\n x = other.x_value\n else:\n x = (other.y_intercept - self.y_intercept) / (self.slope - other.slope)\n\n if the_slope is None or the_slope is False:\n the_slope = self.slope\n the_y_intercept = self.y_intercept\n\n y = the_slope * x + the_y_intercept\n\n return Point(x, y)",
"def _format_twin_axes(*args):\n if(len(args) > 1):\n #get minimum y limit\n ylow, yhigh = 0., 0.\n for ax in args:\n yl = ax.get_ylim()\n if(yl[0] < ylow):\n ylow = yl[0]\n yhigh = yl[1]\n #scale all axes identically so that they overlap at y = 0\n if(yhigh != 0):\n frac = ylow/yhigh\n for ax in args:\n yl = ax.get_ylim()\n ax.set_ylim(frac*yl[1], yl[1])",
"def analyze_on_axis(phase_space, id_begin, id_end, ds_slice, zplot):\n\n ps = phase_space[:, (id_begin-1):id_end, :]\n # print(np.shape(ps))\n # ps = ps[numpy.logical_not(numpy.isnan(ps))]\n\n x = ps[0, :, :]\n px = ps[1, :, :]\n y = ps[2, :, :]\n py = ps[3, :, :]\n\n id_on_axis = np.zeros((4, int(id_end-id_begin+1)))\n\n for n in range(int(id_end-id_begin+1)):\n x_this = x[n, :]\n px_this = px[n, :]\n y_this = y[n, :]\n py_this = py[n, :]\n\n # Remove all NAN elements in the phase space array\n x_this = x_this[np.logical_not(np.isnan(x_this))]\n px_this = px_this[np.logical_not(np.isnan(px_this))]\n y_this = y_this[np.logical_not(np.isnan(y_this))]\n py_this = py_this[np.logical_not(np.isnan(py_this))]\n\n ## Plot X\n plt.subplot(2, 2, 1)\n plt.plot(zplot[0:len(x_this)]*1e+6, x_this*1e+6)\n plt.ylabel('Position in X/ $\\mu$m', fontsize=10)\n\n ## Plot Y\n plt.subplot(2, 2, 2)\n plt.plot(zplot[0:len(y_this)]*1e+6, y_this*1e+6)\n plt.ylabel('Position in Y/ $\\mu$m', fontsize=10)\n\n ## Plot px\n plt.subplot(2, 2, 3)\n plt.plot(zplot[0:len(px_this)]*1e+6, px_this)\n plt.ylabel('Angle in X', fontsize=10)\n\n ## Plot py\n plt.subplot(2, 2, 4)\n plt.plot(zplot[0:len(py_this)]*1e+6, py_this)\n plt.ylabel('Angle in Y', fontsize=10)\n\n\n # plt.xlabel('Longitudianl Direction of the Bunch $s$/ $\\mu$m')\n # plt.title('First Undulator Section')\n # plt.title('Second Undulator Section')\n # plt.title('Third Undulator Section')\n\n id_on_axis[0, n] = np.argmin(np.abs(x_this))\n id_on_axis[1, n] = np.argmin(np.abs(px_this))\n id_on_axis[2, n] = np.argmin(np.abs(y_this))\n id_on_axis[3, n] = np.argmin(np.abs(py_this))\n\n fig = plt.gcf()\n fig.set_size_inches(13.5, 9)\n ax = plt.gca()\n ax.yaxis.get_major_formatter().set_powerlimits((0,1))\n fig.savefig('phase_space_U3_new.png', dpi=100)\n plt.show()\n\n\n s_on_axis = np.average(id_on_axis[2:4,:])*ds_slice\n\n return id_on_axis, s_on_axis",
"def get_tick_iterators(self, axes):\n\n lat_levs, lat_n, lat_factor = self._grid_info[\"lat_info\"]\n yy0 = lat_levs / lat_factor\n\n lon_levs, lon_n, lon_factor = self._grid_info[\"lon_info\"]\n xx0 = lon_levs / lon_factor\n\n e0, e1 = self._extremes\n\n def trf_xy(x, y):\n trf = self.grid_helper.grid_finder.get_transform() + axes.transData\n return trf.transform(np.column_stack(np.broadcast_arrays(x, y))).T\n\n # find angles\n if self.nth_coord == 0:\n mask = (e0 <= yy0) & (yy0 <= e1)\n (xx1, yy1), (dxx1, dyy1), (dxx2, dyy2) = _value_and_jacobian(\n trf_xy, self.value, yy0[mask], (-np.inf, np.inf), (e0, e1))\n labels = self._grid_info[\"lat_labels\"]\n\n elif self.nth_coord == 1:\n mask = (e0 <= xx0) & (xx0 <= e1)\n (xx1, yy1), (dxx2, dyy2), (dxx1, dyy1) = _value_and_jacobian(\n trf_xy, xx0[mask], self.value, (-np.inf, np.inf), (e0, e1))\n labels = self._grid_info[\"lon_labels\"]\n\n labels = [l for l, m in zip(labels, mask) if m]\n\n angle_normal = np.arctan2(dyy1, dxx1)\n angle_tangent = np.arctan2(dyy2, dxx2)\n mm = (dyy1 == 0) & (dxx1 == 0) # points with degenerate normal\n angle_normal[mm] = angle_tangent[mm] + np.pi / 2\n\n tick_to_axes = self.get_tick_transform(axes) - axes.transAxes\n in_01 = functools.partial(\n mpl.transforms._interval_contains_close, (0, 1))\n\n def f1():\n for x, y, normal, tangent, lab \\\n in zip(xx1, yy1, angle_normal, angle_tangent, labels):\n c2 = tick_to_axes.transform((x, y))\n if in_01(c2[0]) and in_01(c2[1]):\n yield [x, y], *np.rad2deg([normal, tangent]), lab\n\n return f1(), iter([])",
"def ft_ax(ax=None,\n y=1.03,\n yy=1.1,\n title=None,\n subtitle=None,\n source=None,\n add_box=False,\n left_axis=False):\n\n if ax is None:\n ax = plt.gca()\n\n ax.set_axisbelow(True)\n \n if title is not None:\n title = plt.title(title, y=y, loc='left')\n if subtitle is not None:\n plt.annotate(subtitle, xy=title.get_position(),\n xycoords='axes fraction', xytext=(0,-11), \n textcoords='offset points', size='large') \n \n if source is not None:\n src = plt.annotate(source, xy=(0,0), \n xycoords='axes fraction', xytext=(0,-35), \n textcoords='offset points', ha='left', va='top', size='small')\n \n # axes and grid-lines\n plt.grid(axis='y', linewidth=.5)\n sns.despine(left=True)\n if not left_axis:\n ax.yaxis.tick_right()\n ax.yaxis.set_label_position('right')\n ax.yaxis.set_label_coords(1,yy)\n ax.yaxis.get_label().set_rotation(0)\n ax.tick_params('y', length=0)\n \n plt.tight_layout()\n \n if add_box:\n ax2 = plt.axes(ax.get_position().bounds, facecolor=(1,1,1,0))\n ax2.xaxis.set_visible(False)\n ax2.yaxis.set_visible(False)\n x,y = np.array([[.01, 0.15], [y+.12, y+.12]])\n line = matplotlib.lines.Line2D(x, y, lw=6., color='k')\n ax2.add_line(line)\n line.set_clip_on(False)\n \n if add_box and source is not None:\n return (line, src)\n elif not add_box and source is not None:\n return (src,)\n elif add_box and source is None:\n return (line,)\n else:\n return []",
"def maybe_get_ax(*args, **kwargs):\n\n if 'ax' in kwargs:\n ax = kwargs.pop('ax')\n elif len(args) == 0:\n fig = plt.gcf()\n ax = plt.gca()\n elif isinstance(args[0], mpl.axes.Axes):\n ax = args[0]\n args = args[1:]\n else:\n ax = plt.gca()\n return ax, args, dict(kwargs)",
"def intersection(line1, line2):\n p0, p1, p2, p3 = map(\n lambda tup : np.array(tup[:2]),\n [line1[0], line1[1], line2[0], line2[1]]\n )\n p1, p2, p3 = map(lambda x : x - p0, [p1, p2, p3])\n transform = np.zeros((2, 2))\n transform[:,0], transform[:,1] = p1, p2\n if np.linalg.det(transform) == 0: return\n inv = np.linalg.inv(transform)\n new_p3 = np.dot(inv, p3.reshape((2, 1)))\n #Where does line connecting (0, 1) to new_p3 hit x axis\n x_intercept = new_p3[0] / (1 - new_p3[1]) \n result = np.dot(transform, [[x_intercept], [0]])\n result = result.reshape((2,)) + p0\n return result",
"def setup_axes2(fig, rect,tmin, tmax,zmin,zmax):\n\n tr =PolarAxes.PolarTransform()\n pi = np.pi\n\n angle_ticks = [(tmin, '%.2f' % tmin), (0,r'$0$'), (tmax, '%.2f' % tmax)]\n\n grid_locator1 = FixedLocator([v for v, s in angle_ticks])\n tick_formatter1 = DictFormatter(dict(angle_ticks))\n\n grid_locator2 = MaxNLocator(4)\n\n grid_helper = floating_axes.GridHelperCurveLinear(\n tr, extremes=(tmax, tmin, zmax, zmin),\n grid_locator1=grid_locator1,\n grid_locator2=grid_locator2,\n tick_formatter1=tick_formatter1,\n tick_formatter2=None)\n\n ax1 = floating_axes.FloatingSubplot(fig, rect, grid_helper=grid_helper)\n fig.add_subplot(ax1)\n\n # create a parasite axes whose transData in RA, cz\n aux_ax = ax1.get_aux_axes(tr)\n\n aux_ax.patch = ax1.patch # for aux_ax to have a clip path as in ax\n ax1.patch.zorder = 0.95 # but this has a side effect that the patch is\n # drawn twice, and possibly over some other\n # artists. So, we decrease the zorder a bit to\n # prevent this.\n\n return ax1, aux_ax",
"def setX(ax1: Union[object, List], ax2: Union[object, List]):\n if type(ax1) is list:\n print(\"PlotHelpers: cannot use list as source to set Y axis\")\n return\n ax2 = _ax_tolist(ax2)\n # if type(ax2) is not list:\n # ax2 = [ax2]\n refx = ax1.get_xlim()\n for ax in ax2:\n ax.set_xlim(refx)",
"def itrace(a, axes=(0, 1)):\n # Single index pair to trace out\n if isinstance(axes[0], Integral):\n return np.trace(a, axis1=axes[0], axis2=axes[1])\n elif len(axes[0]) == 1:\n return np.trace(a, axis1=axes[0][0], axis2=axes[1][0])\n\n # Multiple index pairs to trace out\n gone = set()\n for axis1, axis2 in zip(*axes):\n # Modify indices to adjust for traced out dimensions\n mod1 = sum(x < axis1 for x in gone)\n mod2 = sum(x < axis2 for x in gone)\n gone |= {axis1, axis2}\n a = np.trace(a, axis1=axis1 - mod1, axis2=axis2 - mod2)\n return a",
"def plot_sensors_2d_intra(epo1: mne.Epochs, epo2: mne.Epochs, lab: bool = False):\n\n # extract sensor info and transform loc to fit with headmodel\n loc1 = copy(np.array([ch['loc'][:3] for ch in epo1.info['chs']]))\n loc1 = transform_2d_intra(loc1, traX=-0.178, traY=0.012, traZ=0, rotZ=(-np.pi/2))\n lab1 = [ch for ch in epo1.ch_names]\n\n loc2 = copy(np.array([ch['loc'][:3] for ch in epo2.info['chs']]))\n loc2 = transform_2d_intra(loc2, traX=0.178, traY=0.012, traZ=0, rotZ=(-np.pi/2))\n lab2 = [ch for ch in epo2.ch_names]\n\n bads_epo1 = []\n bads_epo1 = epo1.info['bads']\n bads_epo2 = []\n bads_epo2 = epo2.info['bads']\n\n # plot sensors\n for ch in epo1.ch_names:\n if ch in bads_epo1:\n index_ch = epo1.ch_names.index(ch)\n x1, y1, z1 = loc1[index_ch, :]\n plt.plot(x1, y1, marker='x', color='dimgrey')\n if lab:\n plt.text(x1+0.012, y1+0.012, lab1[index_ch],\n horizontalalignment='center',\n verticalalignment='center')\n else:\n index_ch = epo1.ch_names.index(ch)\n x1, y1, z1 = loc1[index_ch, :]\n plt.plot(x1, y1, marker='o', color='dimgrey')\n if lab:\n plt.text(x1+0.012, y1+0.012, lab1[index_ch],\n horizontalalignment='center',\n verticalalignment='center')\n\n \n for ch in epo2.ch_names:\n if ch in bads_epo2:\n index_ch = epo2.ch_names.index(ch)\n x2, y2, z2 = loc2[index_ch, :]\n plt.plot(x2, y2, marker='x', color='dimgrey')\n if lab:\n plt.text(x2+0.012, y2+0.012, lab2[index_ch],\n horizontalalignment='center',\n verticalalignment='center')\n else:\n index_ch = epo2.ch_names.index(ch)\n x2, y2, z2 = loc2[index_ch, :]\n plt.plot(x2, y2, marker='o', color='dimgrey')\n if lab:\n plt.text(x2+0.012, y2+0.012, lab2[index_ch],\n horizontalalignment='center',\n verticalalignment='center')",
"def plot_2d_topomap_intra(ax):\n\n # plot first Head \n N = 300 # number of points for interpolation\n xy_center = [-0.178,0] # center of the plot\n radius = 0.1 # radius\n\n # draw a circle\n circle = matplotlib.patches.Circle(xy = xy_center, radius = radius, edgecolor = \"k\", facecolor = \"w\")\n ax.add_patch(circle)\n \n # make the axis invisible \n for loc, spine in ax.spines.items():\n spine.set_linewidth(0)\n \n # remove the ticks\n ax.set_xticks([])\n ax.set_yticks([])\n \n # add some body parts. Hide unwanted parts by setting the zorder low\n # add two ears\n circle = matplotlib.patches.Ellipse(xy = [-0.083,-0.012], width = 0.025, height = 0.05, angle = 0, edgecolor = \"k\", facecolor = \"w\", zorder = 0)\n ax.add_patch(circle)\n circle = matplotlib.patches.Ellipse(xy = [-0.273,-0.012], width = 0.025, height = 0.05, angle = 0, edgecolor = \"k\", facecolor = \"w\", zorder = 0)\n ax.add_patch(circle)\n ## add a nose\n xy = [[-0.151,0.091],[-0.205,0.091], [-0.178,0.11]]\n polygon = matplotlib.patches.Polygon(xy = xy, edgecolor = \"k\", facecolor = \"w\", zorder = 0)\n ax.add_patch(polygon) \n \n\n # Plot second Head \n x2y2_center = [0.178,0] # center of the plot\n radius2 = 0.1 # radius\n \n # draw a circle\n circle = matplotlib.patches.Circle(xy = x2y2_center, radius = radius2, edgecolor = \"k\", facecolor = \"w\")\n ax.add_patch(circle)\n \n # make the axis invisible \n for loc, spine in ax.spines.items():\n spine.set_linewidth(0)\n \n # remove the ticks\n ax.set_xticks([])\n ax.set_yticks([])\n \n ## add some body parts. Hide unwanted parts by setting the zorder low\n # add two ears\n circle = matplotlib.patches.Ellipse(xy = [0.083,-0.012], width = 0.025, height = 0.05, angle = 0, edgecolor = \"k\", facecolor = \"w\", zorder = 0)\n ax.add_patch(circle)\n circle = matplotlib.patches.Ellipse(xy = [0.273,-0.012], width = 0.025, height = 0.05, angle = 0, edgecolor = \"k\", facecolor = \"w\", zorder = 0)\n ax.add_patch(circle)\n ## add a nose\n x2y2 = [[0.151,0.091],[0.205,0.091], [0.178,0.11]]\n polygon = matplotlib.patches.Polygon(xy = x2y2, edgecolor = \"k\", facecolor = \"w\", zorder = 0)\n ax.add_patch(polygon)",
"def _get_lines_parallel_to_axis(\n self,\n axis_parallel_to: NumberLine,\n axis_perpendicular_to: NumberLine,\n freq: float,\n ratio_faded_lines: int,\n ) -> tuple[VGroup, VGroup]:\n\n line = Line(axis_parallel_to.get_start(), axis_parallel_to.get_end())\n if ratio_faded_lines == 0: # don't show faded lines\n ratio_faded_lines = 1 # i.e. set ratio to 1\n step = (1 / ratio_faded_lines) * freq\n lines1 = VGroup()\n lines2 = VGroup()\n unit_vector_axis_perp_to = axis_perpendicular_to.get_unit_vector()\n\n # need to unpack all three values\n x_min, x_max, _ = axis_perpendicular_to.x_range\n\n # account for different axis scalings (logarithmic), where\n # negative values do not exist and [-2 , 4] should output lines\n # similar to [0, 6]\n if axis_perpendicular_to.x_min > 0 and x_min < 0:\n x_min, x_max = (0, np.abs(x_min) + np.abs(x_max))\n\n # min/max used in case range does not include 0. i.e. if (2,6):\n # the range becomes (0,4), not (0,6).\n ranges = (\n [0],\n np.arange(step, min(x_max - x_min, x_max), step),\n np.arange(-step, max(x_min - x_max, x_min), -step),\n )\n\n for inputs in ranges:\n for k, x in enumerate(inputs):\n new_line = line.copy()\n new_line.shift(unit_vector_axis_perp_to * x)\n if (k + 1) % ratio_faded_lines == 0:\n lines1.add(new_line)\n else:\n lines2.add(new_line)\n return lines1, lines2",
"def axes_subplots():\n # gerenate data\n x = np.arange(0, 6 * np.pi+0.2, 0.2)\n y_1 = np.cos(x)\n y_2 = np.sin(2*x)\n y_3 = y_1 + y_2\n\n # display multiple\n fig, axs = plt.subplots(3, 1, sharex=True)\n fig.suptitle('Subplots w/ shared axes')\n axs[0].plot(x, y_1)\n axs[1].plot(x, y_2)\n axs[2].plot(x, y_3)\n axs[0].set_ylabel('$y$')\n axs[1].set_ylabel('$y$')\n axs[2].set_ylabel('$y$')\n\n plt.show()\n\n return None",
"def _getAxesExtent(\n self,\n x0: float,\n y0: float,\n x1: float,\n y1: float,\n enabledAxes: Optional[EnabledAxes] = None,\n ) -> AxesExtent:\n if enabledAxes is None:\n enabledAxes = self.enabledAxes\n\n y2_0, y2_1 = y0, y1\n left, top, width, height = self.plot.getPlotBoundsInPixels()\n\n if not all(enabledAxes) and not self.plot.isKeepDataAspectRatio():\n # Handle axes disabled for zoom if plot is not keeping aspec ratio\n if not enabledAxes.xaxis:\n x0, x1 = left, left + width\n if not enabledAxes.yaxis:\n y0, y1 = top, top + height\n if not enabledAxes.y2axis:\n y2_0, y2_1 = top, top + height\n\n if self.plot.isKeepDataAspectRatio() and height != 0 and width != 0:\n ratio = width / height\n xextent, yextent = math.fabs(x1 - x0), math.fabs(y1 - y0)\n if xextent != 0 and yextent != 0:\n if xextent / yextent > ratio:\n areaHeight = xextent / ratio\n center = 0.5 * (y0 + y1)\n y0 = center - numpy.sign(y1 - y0) * 0.5 * areaHeight\n y1 = center + numpy.sign(y1 - y0) * 0.5 * areaHeight\n else:\n areaWidth = yextent * ratio\n center = 0.5 * (x0 + x1)\n x0 = center - numpy.sign(x1 - x0) * 0.5 * areaWidth\n x1 = center + numpy.sign(x1 - x0) * 0.5 * areaWidth\n\n # Convert to data space\n x0, y0 = self.plot.pixelToData(x0, y0, check=False)\n x1, y1 = self.plot.pixelToData(x1, y1, check=False)\n y2_0 = self.plot.pixelToData(None, y2_0, axis=\"right\", check=False)[1]\n y2_1 = self.plot.pixelToData(None, y2_1, axis=\"right\", check=False)[1]\n\n return AxesExtent(\n min(x0, x1),\n max(x0, x1),\n min(y0, y1),\n max(y0, y1),\n min(y2_0, y2_1),\n max(y2_0, y2_1),\n )",
"def convert_axis( mv, axisold, axisindnew ):\n (axisnew, indexina3) = axisindnew\n axes = allAxes(mv)\n kold = None\n for k in range(len(axes)):\n if axes[k]==axisold: kold=k\n if kold==None:\n print \"ERROR. convert_axis cannot find axis\",axisold,\" in variable\",mv\n if len(axisold)==len(axisnew):\n mv.setAxis( kold, axisnew )\n return\n # Here's what we would do in 1-D:\n # newdata = ma.ones(len(axisnew))*mv.missing_value # Note that a FileVariable's missing_value is a tuple.\n # for i in range(len(axisold)):\n # newdata[ indexina3[i] ] = ma[i]\n # newmv = cdms2.createVariable( newdata, id=mv.id )\n # >1-D is the same idea, but more dimensions are coming along for the ride,\n # making it more complicated...\n shape0 = mv.shape\n shape0[kold] = len(axisnew)\n newdata = ma.ones(shape0)*mv.missing_value # Note that a FileVariable's missing_value is a tuple.\n # We want to copy ma to newdata - except that we need indirect indexing for the kold-th axis.\n # There seems to be nothing in numpy for treating one axis differently from the rest\n # (except for ellipsis, but it makes sense to use only one ellipsis and we would need two here).\n # The following will do the job. It would be very slow for an array with many big dimensions,\n # but the arrays here have already been reduced for graphics; the index sets will be small or\n # empty...\n ranges = map( range, shape0[0:kold] )\n for i in range(len(axisold)):\n for idx in apply(itertools.product,ranges):\n idx = idx + [indexina3(i)] + [Ellipsis]\n idxo = idx + [i] + [Ellipsis]\n newdata[ tuple(idx) ] = mv[idxo]\n newmv = cdms2.createVariable( newdata, id=mv.id )",
"def _lines_overlap_on_x_axis(self, line1, line2):\n x1, x2, = line1[0][0], line1[1][0]\n x3, x4, = line2[0][0], line2[1][0]\n e1_left, e1_right = min(x1, x2), max(x1, x2)\n e2_left, e2_right = min(x3, x4), max(x3, x4)\n return (e1_left >= e2_left and e1_left <= e2_right) or (e1_right >= e2_left and e1_right <= e2_right) or \\\n (e2_left >= e1_left and e2_left <= e1_right) or (e2_right >= e1_left and e2_right <= e1_right)"
] | [
"0.61138195",
"0.6088058",
"0.6041942",
"0.5724058",
"0.55722874",
"0.54428166",
"0.5421346",
"0.5317659",
"0.53087",
"0.52630603",
"0.5246884",
"0.521855",
"0.5203898",
"0.5200251",
"0.51671886",
"0.5156836",
"0.51552814",
"0.5126504",
"0.51036406",
"0.5095335",
"0.5079292",
"0.507371",
"0.5068818",
"0.5067907",
"0.5063909",
"0.5045423",
"0.50289595",
"0.5015576",
"0.5002713",
"0.5001157"
] | 0.69112474 | 0 |
Not much tested I decided against doing overlapping line plots this way. Returns a TransientVaraible made by replacing an axis axisold of a TransientVariable mv with a new axis. The new axis will have all points of the old axis, but may have more, thus requiring the new variable to have more missing data. The variable axisnindew is a 2tuple, containing the new axis and index information describing which elements came from the old axis. In terms of common_axis(), it is (axis3,a1indexina3) or (axis3,a2indexina3). | def convert_axis( mv, axisold, axisindnew ):
(axisnew, indexina3) = axisindnew
axes = allAxes(mv)
kold = None
for k in range(len(axes)):
if axes[k]==axisold: kold=k
if kold==None:
print "ERROR. convert_axis cannot find axis",axisold," in variable",mv
if len(axisold)==len(axisnew):
mv.setAxis( kold, axisnew )
return
# Here's what we would do in 1-D:
# newdata = ma.ones(len(axisnew))*mv.missing_value # Note that a FileVariable's missing_value is a tuple.
# for i in range(len(axisold)):
# newdata[ indexina3[i] ] = ma[i]
# newmv = cdms2.createVariable( newdata, id=mv.id )
# >1-D is the same idea, but more dimensions are coming along for the ride,
# making it more complicated...
shape0 = mv.shape
shape0[kold] = len(axisnew)
newdata = ma.ones(shape0)*mv.missing_value # Note that a FileVariable's missing_value is a tuple.
# We want to copy ma to newdata - except that we need indirect indexing for the kold-th axis.
# There seems to be nothing in numpy for treating one axis differently from the rest
# (except for ellipsis, but it makes sense to use only one ellipsis and we would need two here).
# The following will do the job. It would be very slow for an array with many big dimensions,
# but the arrays here have already been reduced for graphics; the index sets will be small or
# empty...
ranges = map( range, shape0[0:kold] )
for i in range(len(axisold)):
for idx in apply(itertools.product,ranges):
idx = idx + [indexina3(i)] + [Ellipsis]
idxo = idx + [i] + [Ellipsis]
newdata[ tuple(idx) ] = mv[idxo]
newmv = cdms2.createVariable( newdata, id=mv.id ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def replace_axis(self, dim:NamedIndex,\n mapping_or_old:'Union[Mapping[NamedIndex, NamedIndex], NamedIndex]',\n new:'Optional[NamedIndex]'=None):\n\n axes = self[dim] # disable idx_dim access\n # axes = self.get(dim) or self._dim_axes[dim] # dim:'Union[int, NamedIndex]'\n is_tuple_axes = is_namedtuple(axes)\n assert isinstance(axes, dict) or is_tuple_axes, (\n f'unnamed dim({dim!r}) cannot be renamed')\n\n axes_keys = axes._fields if is_tuple_axes else axes.keys()\n axes_iter = iter(zip(axes._fields, axes)) if is_tuple_axes else axes.items()\n axes_ = OrderedDict()\n\n if new is None:\n assert isinstance(mapping_or_old, dict), (\n f\"'mapping_or_old'({type(mapping_or_old)}) is expected to be a dict \"\n \"when 'new' is None\")\n\n mapping = mapping_or_old\n for axis, index in axes_iter:\n axis = mapping.get(axis, axis)\n assert axis not in axes_, f'axis {axis!r} in mapping is conflicted'\n\n axes_[axis] = index\n else:\n assert new not in axes_keys, f'new axis({new!r}) is confilicted'\n\n old = mapping_or_old\n for axis, index in axes_iter:\n axes_[new if axis == old else axis] = index\n\n axes_ = namedtuple(dim, axes_.keys())(**axes_) if is_tuple_axes else type(axes)(axes_)\n ret = OrderedDict()\n for dim_, axes in self.items():\n ret[dim_] = axes_ if dim_ == dim else axes\n\n return type(self)(ret)",
"def common_axis( axis1, axis2 ):\n if hasattr( axis1, 'units' ):\n units1 = axis1.units.lower().replace(' ','_')\n if axis1.isTime():\n axis1.toRelativeTime( units1 ) #probably will change input argument\n else:\n units1 = None\n if hasattr( axis2, 'units' ):\n units2 = axis2.units.lower().replace(' ','_')\n else:\n units2 = None\n if units1!=None and units2!=None and units1 != units2:\n if axis1.isTime() and axis2.isTime():\n axis2.toRelativeTime( units1, axis1.getCalendar() ) #probably will change input argument\n else:\n print \"ERROR. common_axis does not yet support differing units\",axis1.units,\" and \",axis2.units\n return None\n if axis1.isTime() or axis2.isTime():\n if not axis2.isTime() or not axis1.isTime():\n print \"ERROR. In common_axis, one axis is time, not the other\"\n return None\n if not axis1.calendar==axis2.calendar:\n print \"ERROR. common_axis does not yet support differing calendars.\"\n if len(axis1)==1 and len(axis2)==1:\n # There's just one time value, probably from averaging over time. The time value is meaningless\n # but it would be messy to have two.\n return (axis1,[0],[0])\n\n # to do: similar checks using isLatitude and isLongitude and isLevel <<<<<<\n # Also, transfer long_name, standard_name, axis attributes if in agreement;\n # units and calendar attributes should always be transferred if present.\n # Also to do: use bounds if available\n a12 = numpy.concatenate( [ axis1.getData(), axis2.getData() ] )\n a3, a12indexina3 = numpy.unique( a12, return_inverse=True )\n #... a3 has only unique indices and is sorted (unfortunately, uniqueness is based on exact identity,\n # not to some numerical tolerance). For an i index into a12 (thus 0<=i<len(axis1)+len(axis2),\n # j is an index into a3 such that, if a12indexina3[i]==j, then a1[i]==a3[j].\n a1indexina3 = a12indexina3[0:len(axis1)]\n a2indexina3 = a12indexina3[len(axis1):len(axis1)+len(axis2)]\n\n if hasattr(axis1,'id') and hasattr(axis2,'id') and axis1.id==axis2.id :\n vid = axis1.id\n else:\n vid = None\n axis3 = cdms2.createAxis( a3, bounds=None, id=vid )\n axis3.units = units1\n return (axis3,a1indexina3,a2indexina3)",
"def delete_singleton_axis( mv, vid=None ):\n axes = allAxes(mv)\n saxis = None\n si = None\n for i in range(len(axes)):\n if len(axes[i])==1 and (vid==None or axes[i].id==vid):\n saxis = axes[i]\n si = i\n del axes[si]\n break\n if saxis==None: return mv\n data = ma.copy( mv.data )\n if numpy.version.version >= '1.7.0':\n data = ma.squeeze( data, axis=si )\n else:\n data = ma.squeeze( data ) # let's hope that there's only one singleton!\n mvnew = cdms2.createVariable ( data, axes=axes, id=mv.id )\n if hasattr(mv,'units'): mvnew.units = mv.units\n return mvnew",
"def replace_dim(self, old:NamedIndex, new:NamedIndex):\n\n assert new not in self, f'new dim({new!r}) is confilicted'\n\n ret = OrderedDict()\n for dim, axes in self.items():\n if dim == old:\n if is_namedtuple(axes):\n axes = namedtuple(new, axes._fields)(*axes)\n ret[new] = axes\n else:\n ret[old] = axes\n\n return type(self)(ret)",
"def test_newaxis(self):\r\n newaxis = numpy.newaxis\r\n\r\n n = self.shared(numpy.arange(24, dtype=self.dtype).reshape((2, 3, 4)))\r\n assert n.ndim == 3\r\n\r\n n4 = n[newaxis, :, :, :]\r\n assert n4.broadcastable == (True, False, False, False), n4\r\n\r\n n4 = n[:, newaxis, :, :]\r\n assert n4.broadcastable == (False, True, False, False), n4\r\n\r\n n4 = n[:, :, newaxis, :]\r\n assert n4.broadcastable == (False, False, True, False), n4\r\n\r\n n4 = n[:, :, :, newaxis]\r\n assert n4.broadcastable == (False, False, False, True), n4\r\n\r\n n3 = n.flatten()[newaxis, :, newaxis]\r\n assert n3.broadcastable == (True, False, True), n3\r\n\r\n s = cscalar()\r\n s1 = s[newaxis]\r\n assert s1.broadcastable == (True,), s1\r\n\r\n vs1, vn3, vn4 = theano.function([s], [s1, n3, n4])(-2.0)\r\n\r\n assert numpy.all(vs1 == [-2.0])\r\n assert numpy.all(vn3\r\n == numpy.arange(24)[newaxis, :, newaxis])\r\n assert numpy.all(vn4\r\n == numpy.arange(24).reshape((2, 3, 4))[:, :, :, newaxis])",
"def common_axes( mv1, mv2 ):\n axes1 = [a[0] for a in mv1.getDomain()]\n axes2 = [a[0] for a in mv2.getDomain()]\n if len(axes1)!=len(axes2):\n print \"ERROR. common_axes requires same number of axes in\",mv1,\" and\",mv2\n return None\n axes3 = []\n for i in range(len(axes1)):\n axes3.append(common_axis( axes1[i], axes2[i] ))\n return axes3",
"def addExtraAxis(slab,newaxis=None,axis=0,verbose=False):\n\n import cdms2 as cdms\n import MV2 as MV\n\n if newaxis is None:\n newaxis=cdms.createAxis([1,])\n newaxis.units=''\n\n # add new axis to axis list of input <slab>\n axislist=slab.getAxisList()\n axislist.insert(axis,newaxis)\n\n #----------------Reshape----------------\n shape=list(slab.shape)\n shape.insert(axis,len(newaxis))\n slab2=MV.reshape(slab,shape)\n\n #------------Create variable------------\n att_dict=attribute_obj2dict(slab)\n slab2=cdms.createVariable(slab2,axes=axislist,attributes=att_dict,\\\n typecode='f')\n slab2.id=slab.id\n\n if verbose:\n print('\\n# <addExtraAxis>: Originial variable shape:',slab.shape)\n print('# <addExtraAxis>: New variable shape:',slab2.shape)\n\n return slab2",
"def _make_twin_axes(self, *args, **kwargs):\n # Typically, SubplotBase._make_twin_axes is called instead of this.\n # There is also an override in axes_grid1/axes_divider.py.\n if 'sharex' in kwargs and 'sharey' in kwargs:\n raise ValueError('Twinned Axes may share only one axis.')\n ax2 = self.figure.add_axes(self.get_position(True), *args, **kwargs)\n self.set_adjustable('datalim')\n ax2.set_adjustable('datalim')\n self._twinned_axes.join(self, ax2)\n return ax2",
"def aminusb_ax2( mv1, mv2 ):\n if hasattr(mv1,'units') and hasattr(mv2,'units') and mv1.units!=mv2.units:\n print \"WARING: aminusb_ax2 is subtracting variables with different units!\",mv1,mv1\n axes1 = allAxes(mv1)\n axes2 = allAxes(mv2)\n # TO DO: convert, interpolate, etc. as needed to accomodate differing first axes.\n # But for now, we'll just check a bit ...\n ax1=axes1[0]\n ax2=axes2[0]\n if ax1.shape!=ax2.shape:\n print \"ERROR aminusb_ax2 requires same axes, but shape differs:\",ax1.shape,ax2.shape\n print \"ax1,ax2\"\n return None\n if hasattr(ax1,'units') and hasattr(ax2,'units') and ax1.units!=ax2.units:\n if ax1.units=='mb':\n ax1.units = 'mbar' # udunits uses mb for something else\n if ax2.units=='mb':\n ax2.units = 'mbar' # udunits uses mb for something else\n tmp = udunits(1.0,ax2.units)\n s,i = tmp.how(ax1.units) # will raise an exception if conversion not possible\n # crude substitute for a real units library:\n #if not (ax1.units=='mb' and ax2.units=='millibars') and\\\n # not (ax1.units=='millibars' and ax2.units=='mb'):\n # print \"ERROR aminusb_ax2 requires same axes, but units differ:\",ax1.units,ax2,units\n # print \"ax1,ax2\"\n # return None\n ab_axes = [ax1]\n if len(axes1[1])<=len(axes2[1]):\n a = mv1\n b = interp2( axes1[1], mv2 )\n ab_axes.append(axes1[1])\n else:\n a = interp2( axes2[1], mv1 )\n b = mv2\n ab_axes.append(axes2[1])\n aminusb = a - b\n aminusb.id = mv1.id\n aminusb.initDomain( ab_axes )\n return aminusb",
"def swapaxes(self, a1, a2):\n an = self.axes_names[:]\n ia1, ia2 = self.get_axis_id(a1), self.get_axis_id(a2)\n an[ia2], an[ia1] = an[ia1], an[ia2]\n return xndarray(np.swapaxes(self.data, ia1, ia2), an, self.axes_domains,\n self.value_label, self.meta_data)",
"def interp2( newaxis1, mv ):\n missing = mv.get_fill_value()\n axes = allAxes(mv)\n if len(newaxis1[:])>len(axes[1][:]): return mv\n new_vals = numpy.ma.masked_all( ( len(axes[0]), len(newaxis1[:]) ) )\n for i in range(len( axes[0] )):\n new_vals[i,:] = numpy.interp( newaxis1[:], axes[1][:], mv[i,:], left=missing, right=missing )\n # numpy.interp loses the mask, and I want to propagate it! But we can separately interpolate\n # the mask - which numpy.interp treats False as 0, True as 1:\n new_vals.mask[i,:] = ( numpy.interp( newaxis1[:], axes[1][:], mv.mask[i,:], left=missing,\n right=missing ) )>0\n return new_vals",
"def aminusb_2ax( mv1, mv2 ):\n return mv2\n mv1, mv2 = reconcile_units( mv1, mv2 )\n missing = mv1.get_fill_value()\n axes1 = allAxes(mv1)\n axes2 = allAxes(mv2)\n if axes1 is None or axes2 is None: return None\n if len(axes1)!=2: print \"ERROR @1, wrong number of axes for aminusb_2ax\",axes1\n if len(axes2)!=2: print \"ERROR @2, wrong number of axes for aminusb_2ax\",axes2\n if len(axes1[0])==len(axes2[0]):\n # Only axis2 differs, there's a better way...\n return aminusb_ax2( mv1, mv2 )\n if len(axes1[0])<=len(axes2[0]):\n if len(axes1[1])<=len(axes2[1]):\n mv1new = mv1\n # Interpolate mv2 from axis2 to axis1 in both directions. Use the CDAT regridder.\n grid1 = mv1.getGrid()\n mv2new = mv2.regrid(grid1)\n else:\n # Interpolate mv1 from axis1[1] to axis2[1]\n # Interpolate mv2 from axis2[0] to axis1[0]\n print \"ERROR @3, aminusb_2ax IS NOT FINISHED\"\n return None\n else:\n if len(axes1[1])<=len(axes2[1]):\n # Interpolate mv1 from axis1[0] to axis2[0]\n # Interpolate mv2 from axis2[1] to axis1[1]\n print \"ERROR @4, aminusb_2ax IS NOT FINISHED\"\n return None\n else:\n mv2new = mv2\n # Interpolate mv2 from axis2 to axis1 in both directions. Use the CDAT regridder.\n grid2 = mv2.getGrid()\n mv1new = mv1.regrid(grid2)\n aminusb = mv1new - mv2new\n aminusb.id = mv1.id\n return aminusb",
"def duplicate_axes(isl_obj, duplicate_inames, new_inames):\n if isinstance(isl_obj, list):\n return [\n duplicate_axes(i, duplicate_inames, new_inames)\n for i in isl_obj]\n\n if not duplicate_inames:\n return isl_obj\n\n def _align_and_intersect(d1, d2):\n d1, d2 = isl.align_two(d1, d2)\n return d1 & d2\n\n old_name_to_new_name = dict(zip(duplicate_inames, new_inames))\n\n dup_isl_obj = isl_obj\n\n for old_name, (dt, pos) in isl_obj.get_var_dict().items():\n dup_isl_obj = dup_isl_obj.set_dim_name(dt, pos,\n old_name_to_new_name.get(old_name,\n old_name))\n\n return _align_and_intersect(dup_isl_obj, isl_obj)",
"def reorderAxesEvent(self):\n axisB = self.sender().text()\n self.myParent.swapAxes(self.axisName, axisB)\n self.myParent.setVistrailsVariableAxes()",
"def test_var_and_raw_var_same_index(self):\n\n # Swap first row for second one\n var = Validator.getattr_anndata(self.validator.adata, \"var\")\n\n # First swap the index\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n\n # Then swap the actual rows\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n\n self.validator.validate_adata()\n print(\"FOO\", self.validator.errors)\n self.assertEqual(\n self.validator.errors,\n [\"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"],\n )",
"def interpolate_vertical(ml_file, inter_file, new_vertical_axis):\n with xr.load_dataset(inter_file) as interpolated:\n reference = [variable for variable in interpolated.variables if len(interpolated[variable].shape) == 4][0]\n with xr.open_dataset(ml_file) as ml:\n for variable in [variable for variable in ml.variables if variable not in interpolated.variables\n and len(ml[variable].dims) == 4\n and \"lev_2\" in ml[variable].dims]:\n try:\n x = np.array(ml[new_vertical_axis].data)\n y = np.array(ml[variable].data)\n interpolated_data = interpolate_1d(interpolated[\"lev\"].data, x, y, axis=1)\n attributes = ml[variable].attrs\n\n interpolated[variable] = interpolated[reference].copy(data=interpolated_data)\n interpolated[variable].attrs = ml[variable].attrs\n except Exception as e:\n print(variable, e)\n interpolated.to_netcdf(inter_file)",
"def adjust_axes(axes):\n # TODO: Uncomment & decide for each subplot!\n for ax in axes.itervalues():\n core.hide_axis(ax)\n\n for k in [\n \"placeholder\",\n \"placeholder1\",\n \"placeholder2\",\n \"spikes_stim\",\n \"spikes_stim1\",\n \"spikes_stim2\",\n \"spikes_post\",\n \"stimulation_schema\"\n ]:\n axes[k].set_frame_on(False)",
"def aminusb_1ax( mv1, mv2 ):\n mv1, mv2 = reconcile_units( mv1, mv2 )\n if hasattr(mv1,'units') and hasattr(mv2,'units') and mv1.units!=mv2.units:\n print \"WARNING: aminusb_1ax1 is subtracting variables with different units!\",mv1,mv1\n if mv1 is None or mv2 is None: return None\n missing = mv1.get_fill_value()\n axis1 = allAxes(mv1)[0]\n axis2 = allAxes(mv2)[0]\n if len(axis1)<=len(axis2):\n a = mv1\n b = numpy.interp( axis1[:], axis2[:], mv2[:], left=missing, right=missing )\n else:\n a = numpy.interp( axis2[:], axis1[:], mv1[:], left=missing, right=missing )\n b = mv2\n aminusb = a - b\n aminusb.id = mv1.id\n return aminusb",
"def plot_sensors_3d_intra(ax: str, epo1: mne.Epochs, epo2: mne.Epochs, lab: bool = False):\n\n # extract sensor infos and transform loc to fit with headmodel \n loc1 = copy(np.array([ch['loc'][:3] for ch in epo1.info['chs']]))\n loc1 = transform(loc1, traX=0, traY=0, traZ=0.04, rotY=0, rotZ=(-np.pi/2))\n lab1 = [ch for ch in epo1.ch_names]\n\n loc2 = copy(np.array([ch['loc'][:3] for ch in epo2.info['chs']]))\n loc2 = transform(loc2, traX=0, traY=0.5, traZ=0.04, rotY=0, rotZ=(-np.pi/2))\n lab2 = [ch for ch in epo2.ch_names]\n\n bads_epo1 =[]\n bads_epo1 = epo1.info['bads']\n bads_epo2 =[]\n bads_epo2 = epo2.info['bads']\n\n # plot sensors\n for ch in epo1.ch_names:\n if ch in bads_epo1:\n index_ch = epo1.ch_names.index(ch)\n x1, y1, z1 = loc1[index_ch, :]\n ax.scatter(x1, y1, z1, marker='x', color='dimgrey')\n if lab:\n ax.text(x1+0.012, y1+0.012 ,z1, lab1[index_ch],\n horizontalalignment='center',\n verticalalignment='center')\n else:\n index_ch = epo1.ch_names.index(ch)\n x1, y1, z1 = loc1[index_ch, :]\n ax.scatter(x1, y1, z1, marker='o', color='dimgrey')\n if lab:\n ax.text(x1+0.012, y1+0.012 ,z1, lab1[index_ch],\n horizontalalignment='center',\n verticalalignment='center')\n\n for ch in epo2.ch_names:\n if ch in bads_epo2:\n index_ch = epo2.ch_names.index(ch)\n x2, y2, z2 = loc2[index_ch, :]\n ax.scatter(x2, y2, z2, marker='x', color='dimgrey')\n if lab:\n ax.text(x2+0.012, y2+0.012 ,z2, lab2[index_ch],\n horizontalalignment='center',\n verticalalignment='center')\n else:\n index_ch = epo2.ch_names.index(ch)\n x2, y2, z2 = loc2[index_ch, :]\n ax.scatter(x2, y2, z2, marker='o', color='dimgrey')\n if lab:\n ax.text(x2+0.012, y2+0.012 ,z2, lab2[index_ch],\n horizontalalignment='center',\n verticalalignment='center')",
"def adjust_axes(axis):\r\n x_lim = axis.get_xlim()\r\n y_lim = axis.get_ylim()\r\n new_lim = (min(x_lim[0], y_lim[0]), max(x_lim[1], y_lim[1]))\r\n axis.set_xlim(new_lim)\r\n axis.set_ylim(new_lim)\r\n axis.set_aspect('equal')",
"def _metadata_changed(self, old, new):\n\n #self.cross_plot.value_range.low = self.minz\n #self.cross_plot.value_range.high = self.maxz\n #self.cross_plot2.value_range.low = self.minz\n #self.cross_plot2.value_range.high = self.maxz\n if self._imag_index.metadata.has_key(\"selections\"):\n x_ndx, y_ndx = self._imag_index.metadata[\"selections\"]\n if y_ndx and x_ndx:\n# xdata, ydata = self._image_index.get_data()\n# xdata, ydata = xdata.get_data(), ydata.get_data()\n self.pd_horiz.set_data(\"horiz\", self._image_value.data[y_ndx,:])\n self.pd_vert.set_data(\"vert\", self._image_value.data[:,x_ndx])",
"def analyze_on_axis(phase_space, id_begin, id_end, ds_slice, zplot):\n\n ps = phase_space[:, (id_begin-1):id_end, :]\n # print(np.shape(ps))\n # ps = ps[numpy.logical_not(numpy.isnan(ps))]\n\n x = ps[0, :, :]\n px = ps[1, :, :]\n y = ps[2, :, :]\n py = ps[3, :, :]\n\n id_on_axis = np.zeros((4, int(id_end-id_begin+1)))\n\n for n in range(int(id_end-id_begin+1)):\n x_this = x[n, :]\n px_this = px[n, :]\n y_this = y[n, :]\n py_this = py[n, :]\n\n # Remove all NAN elements in the phase space array\n x_this = x_this[np.logical_not(np.isnan(x_this))]\n px_this = px_this[np.logical_not(np.isnan(px_this))]\n y_this = y_this[np.logical_not(np.isnan(y_this))]\n py_this = py_this[np.logical_not(np.isnan(py_this))]\n\n ## Plot X\n plt.subplot(2, 2, 1)\n plt.plot(zplot[0:len(x_this)]*1e+6, x_this*1e+6)\n plt.ylabel('Position in X/ $\\mu$m', fontsize=10)\n\n ## Plot Y\n plt.subplot(2, 2, 2)\n plt.plot(zplot[0:len(y_this)]*1e+6, y_this*1e+6)\n plt.ylabel('Position in Y/ $\\mu$m', fontsize=10)\n\n ## Plot px\n plt.subplot(2, 2, 3)\n plt.plot(zplot[0:len(px_this)]*1e+6, px_this)\n plt.ylabel('Angle in X', fontsize=10)\n\n ## Plot py\n plt.subplot(2, 2, 4)\n plt.plot(zplot[0:len(py_this)]*1e+6, py_this)\n plt.ylabel('Angle in Y', fontsize=10)\n\n\n # plt.xlabel('Longitudianl Direction of the Bunch $s$/ $\\mu$m')\n # plt.title('First Undulator Section')\n # plt.title('Second Undulator Section')\n # plt.title('Third Undulator Section')\n\n id_on_axis[0, n] = np.argmin(np.abs(x_this))\n id_on_axis[1, n] = np.argmin(np.abs(px_this))\n id_on_axis[2, n] = np.argmin(np.abs(y_this))\n id_on_axis[3, n] = np.argmin(np.abs(py_this))\n\n fig = plt.gcf()\n fig.set_size_inches(13.5, 9)\n ax = plt.gca()\n ax.yaxis.get_major_formatter().set_powerlimits((0,1))\n fig.savefig('phase_space_U3_new.png', dpi=100)\n plt.show()\n\n\n s_on_axis = np.average(id_on_axis[2:4,:])*ds_slice\n\n return id_on_axis, s_on_axis",
"def setupVariableAxes(self):\n if self.var is None:\n return\n \n if (self.axisList is None):\n self.axisList = self.var.getAxisList()\n self.axisOrder = range(len(self.axisList))\n\n self.clear() \n self.setAxesNames()\n \n # Iterate through the variables axes & init each axis widget\n axisIndex = 0\n for axis, axisName in zip(self.axisList, self.axesNames):\n # Create the axis widget\n axisWidget = QAxis(axis, axisName, axisIndex, self)\n axisWidget.setAxisButtonText(axisName)\n self.axisWidgets.append(axisWidget)\n\n # Setup the layout for each axis\n row = self.gridLayout.rowCount()\n self.gridLayout.addWidget(axisWidget.getAxisButton(), row, 0)\n self.gridLayout.addWidget(axisWidget, row, 1) \n self.gridLayout.addWidget(axisWidget.getAxisOperationsButton(), row, 2)\n\n # Create separator line between each axis widget\n vline = QtGui.QFrame()\n vline.setFrameStyle(QtGui.QFrame.HLine | QtGui.QFrame.Sunken)\n self.gridLayout.addWidget(vline, row+1, 0, 1,\n self.gridLayout.columnCount())\n\n axisIndex += 1\n\n self.gridLayout.setRowStretch(self.gridLayout.rowCount(), 1)",
"def translate(self, diff: AnyVec) -> None:\n for p in self.planes:\n p += diff\n\n u_axis = Vec(self.uaxis.x, self.uaxis.y, self.uaxis.z)\n v_axis = Vec(self.vaxis.x, self.vaxis.y, self.vaxis.z)\n\n # Fix offset - see 2013 SDK utils/vbsp/map.cpp:2237\n self.uaxis.offset -= Vec.dot(u_axis, diff) / self.uaxis.scale\n self.vaxis.offset -= Vec.dot(v_axis, diff) / self.vaxis.scale",
"def _update_axes(self):\n data_shape = self.data.shape\n if len(self.axes) < self.data.ndim + 1:\n self._axes.append(Axis())\n for index in range(self.data.ndim):\n if len(self.axes[index].values) != data_shape[index]:\n self.axes[index].values = np.arange(data_shape[index],\n dtype=np.float64)",
"def _make_axes(self, hdr, quiet=False, novec=False, vonly=False, simple=False):\n\n # PULL THE IMAGE/CUBE SIZES FROM THE HEADER\n naxis = int(hdr['NAXIS'])\n naxis1 = int(hdr['NAXIS1'])\n naxis2 = int(hdr['NAXIS2'])\n if naxis > 2:\n naxis3 = hdr['NAXIS3']\n\n ## EXTRACT FITS ASTROMETRY STRUCTURE\n ww = astropy.wcs.WCS(hdr)\n\n #IF DATASET IS A CUBE THEN WE MAKE THE THIRD AXIS IN THE SIMPLEST WAY POSSIBLE (NO COMPLICATED ASTROMETRY WORRIES FOR FREQUENCY INFORMATION)\n if naxis > 3:\n #GRAB THE RELEVANT INFORMATION FROM THE ASTROMETRY HEADER\n cd = ww.wcs.cd\n crpix = ww.wcs.crpix\n cdelt = ww.wcs.crelt\n crval = ww.wcs.crval\n\n if naxis > 2:\n # MAKE THE VELOCITY AXIS (WILL BE M/S)\n v = np.arange(naxis3) * 1.0\n vdif = v - (hdr['CRPIX3']-1)\n vaxis = (vdif * hdr['CDELT3'] + hdr['CRVAL3'])\n\n # CUT OUT HERE IF WE ONLY WANT VELOCITY INFO\n if vonly:\n return vaxis\n\n #IF 'SIMPLE' IS CALLED THEN DO THE REALLY TRIVIAL THING:\n if simple:\n print('Using simple aproach to make axes.')\n print('BE SURE THIS IS WHAT YOU WANT! It probably is not.')\n raxis = np.arange(naxis1) * 1.0\n rdif = raxis - (hdr['CRPIX1'] - 1)\n raxis = (rdif * hdr['CDELT1'] + hdr['CRVAL1'])\n\n daxis = np.arange(naxis2) * 1.0\n ddif = daxis - (hdr['CRPIX1'] - 1)\n daxis = (ddif * hdr['CDELT1'] + hdr['CRVAL1'])\n\n rimg = raxis # (fltarr(naxis2) + 1.)\n dimg = (np.asarray(naxis1) + 1.) # daxis\n return rimg, dimg\n\n # OBNOXIOUS SFL/GLS THING\n glspos = ww.wcs.ctype[0].find('GLS')\n if glspos != -1:\n ctstr = ww.wcs.ctype[0]\n newtype = 'SFL'\n ctstr.replace('GLS', 'SFL')\n ww.wcs.ctype[0] = ctstr\n print('Replaced GLS with SFL; CTYPE1 now =' + ww.wcs.ctype[0])\n\n glspos = ww.wcs.ctype[1].find('GLS')\n if glspos != -1:\n ctstr = ww.wcs.ctype[1]\n newtype = 'SFL'\n ctstr.replace('GLS', 'SFL')\n ww.wcs.ctype[1] = ctstr\n print('Replaced GLS with SFL; CTYPE2 now = ' + ww.wcs.ctype[1])\n\n # CALL 'xy2ad' TO FIND THE RA AND DEC FOR EVERY POINT IN THE IMAGE\n if novec:\n rimg = np.zeros((naxis1, naxis2))\n dimg = np.zeros((naxis1, naxis2))\n for i in range(naxis1):\n j = np.asarray([0 for i in xrange(naxis2)])\n\n pixcrd = np.array([[zip(float(i), float(j))]], numpy.float_)\n ra, dec = ww.all_pix2world(pixcrd, 1)\n\n rimg[i, :] = ra\n dimg[i, :] = dec\n else:\n ximg = np.arange(naxis1) * 1.0\n yimg = np.arange(naxis1) * 1.0\n X, Y = np.meshgrid(ximg, yimg, indexing='xy')\n ss = X.shape\n xx, yy = X.flatten(), Y.flatten()\n\n pixcrd = np.array(zip(xx, yy), np.float_)\n img_new = ww.all_pix2world(pixcrd, 0)\n rimg_new, dimg_new = img_new[:,0], img_new[:,1]\n\n rimg = rimg_new.reshape(ss)\n dimg = dimg_new.reshape(ss)\n\n # GET AXES FROM THE IMAGES. USE THE CENTRAL COLUMN AND CENTRAL ROW\n raxis = np.squeeze(rimg[:, naxis2/2])\n daxis = np.squeeze(dimg[naxis1/2, :])\n\n return rimg, dimg",
"def setaxesnames(self):\n if not self._axesnames or self.prop['skipsai']:\n return\n debug('ControllerStartup.setaxesnames()')\n oldaxes = self.pidevice.qSAI_ALL()\n for i, newaxis in enumerate(self.axesnames):\n if newaxis != oldaxes[i] or self.prop['forcesai']:\n setstage = False\n if self.pidevice.HasqCST():\n if self.pidevice.qCST()[oldaxes[i]] == 'NOSTAGE':\n try:\n debug('try rename NOSTAGE to TEMP (0x3C)')\n self.pidevice.SPA(oldaxes[i], 0x3c, 'TEMP')\n setstage = True\n except GCSError:\n pass\n self.pidevice.SAI(oldaxes[i], newaxis)\n if setstage:\n self.pidevice.SPA(newaxis, 0x3c, 'NOSTAGE')\n debug('restore NOSTAGE (0x3C)')",
"def InterpolateSurfaceVectorsWithLine():\r\n # Load Surface Mesh Data and generate normals\r\n VTKString = OpenData('C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles','muscle_surface.vtk')\r\n header, PointData, PolygonData = CreateMatrixVTK(VTKString)\r\n Centroids1,Vectors1 = ElementNormal(PointData,PolygonData)\r\n # Load full volume centroid\r\n NCF_Str = OpenData(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles\",\"new_centroids_file.dat\")\r\n HeaderNCF,Centroids2 = CreateMatrixDat(NCF_Str)\r\n print('Loading Finished \\n Inserting Centre Line...')\r\n # Create Plane of vectors through centreline.\r\n PlaneCentroids,PlaneVectors = InsertCentreLine(Centroids1,Vectors1,50)\r\n print('Centre Line Inserted \\n Interpolating Centroids...')\r\n # Interpolate Vectors\r\n Vectors2 = VectorInter(PlaneCentroids,PlaneVectors,Centroids2)\r\n # Make the data more sparse to display better.\r\n C1,V1 = SparseData(PlaneCentroids,PlaneVectors,0.1)\r\n C2,V2 = SparseData(Centroids2,Vectors2,0.1)\r\n print('Interpolation Finished \\n Plotting...')\r\n # Plot Data\r\n fig = plt.figure()\r\n\r\n ax1 = fig.add_subplot(121,projection = '3d')\r\n DisplaySliceVectors(C1,V1,ax1,5,10)\r\n\r\n ax2 = fig.add_subplot(122,projection = '3d')\r\n DisplaySliceVectors(C2,V2,ax2,5,10)\r\n\r\n plt.show()\r\n\r\n header = 'TITLE = \\\"Normal Surface Vectors With Central axis Line\\\"\\nVARIABLES = \\\"XV\\\", \\\"YV\\\", \\\"ZV\\\" \\nZONE T=\\\"Step 0 Incr 0\\\" \\nF = VECTORS'\r\n\r\n np.savetxt(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/OutputFiles/SurfaceLineVectorInterpolation.dat\",Vectors2,header = header,comments='')",
"def _update_vars(self, axis, traj_s, traj_o, rank_s, rank_o, t):\n if axis == 0:\n self.x_traj = traj_s\n self.x_ranking = rank_s\n self.x_scores = traj_s[-1]\n self.inverse_y_traj = traj_o\n self.inverse_y_ranking = rank_o\n self.inverse_y_scores = traj_o[-1]\n if axis == 1:\n self.y_traj = traj_s\n self.y_ranking = rank_s\n self.y_scores = traj_s[-1]\n self.inverse_x_traj = traj_o\n self.inverse_x_ranking = rank_o\n self.inverse_x_scores = traj_o[-1]",
"def _plot_interpolation(x, y, x_new, y_new, title=\"\"):\n f, (ax1, ax2, ax3) = plt.subplots(3, sharex=True, sharey=True)\n axes = (ax1, ax2, ax3)\n coord = [\"X\", \"Y\", \"Z\"]\n\n for idx, ax in enumerate(axes):\n ax.set_title(title + \" (\" + coord[idx] + \" coordinate)\", fontsize=12)\n ax.set_ylabel(\"m\")\n ax.plot(x, y[:, idx], \"bo\", label=\"Original data\")\n ax.plot(x_new, y_new[:, idx], \"ro\", label=\"Interpolated data\")\n\n ax3.set_xlabel(\"Time\")\n ax1.legend(fontsize=8, loc=1)\n f.subplots_adjust(hspace=0.3)\n plt.setp([a.get_xticklabels() for a in f.axes[:-1]], visible=False)\n plt.show()"
] | [
"0.5527314",
"0.5408021",
"0.5342411",
"0.52576274",
"0.52193874",
"0.52078605",
"0.5200823",
"0.5182296",
"0.5143223",
"0.5039198",
"0.5026079",
"0.49834254",
"0.49665734",
"0.48400095",
"0.48353782",
"0.48336178",
"0.4820047",
"0.48099452",
"0.4761784",
"0.47465044",
"0.47106996",
"0.47098395",
"0.46774116",
"0.4657304",
"0.4640464",
"0.46337345",
"0.4628509",
"0.46209556",
"0.4620678",
"0.46194112"
] | 0.71285164 | 0 |
From a filename, extracts the first part of the filename as the possible name of a family of files; e.g. from 'ts_Amon_bcccsm11_amip_r1i1p1_197901200812.nc' extract and return 'ts_Amon_bcccsm11_amip_r1i1p1'. To distinguish between the end of a file family name and the beginning of the filespecific part of the filename, we look for an underscore and two numerical digits, e.g. '_19'. | def extract_filefamilyname( self, filename ):
matchobject = re.search( r"^.*_\d\d", filename )
if matchobject is None:
return filename
else:
familyname = filename[0:(matchobject.end()-3)]
return familyname | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reFileName(str_):\n rv = 'None', str_\n m = re.match(r'((?:[a-zA-Z0-9-]){4,})_(.*)$', str_)\n if m:\n rv = m.group(1), m.group(2)\n else:\n m = re.match(r'(\\d+-\\d+)\\.-\\.(.*)$', str_)\n if m:\n rv = m.group(1), m.group(2)\n return rv",
"def parse_rarefaction_fname(name_string):\r\n\r\n root, ext = os.path.splitext(name_string)\r\n root_list = root.split(\"_\")\r\n iters = int(root_list.pop())\r\n seqs_per_sam = int(root_list.pop())\r\n base_name = \"_\".join(root_list)\r\n return base_name, seqs_per_sam, iters, ext",
"def parse_filename(cls, filename):\n words = filename.split('_')\n return words[0], int(words[1][1:]), int(words[2])",
"def _get_aso_id_from_file_name(self, filename: str) -> str:\n id_parts = filename.split('/')\n prefix = id_parts[1]\n suffix = id_parts[-1].split('.')[0].zfill(3)\n if len(suffix) == 5:\n return suffix\n else:\n return prefix + suffix",
"def extract_filename(str):\n regex = r\"([0-9_-]+).jpg\"\n matches = re.search(regex, str)\n if matches:\n return matches.group(1)",
"def split_name(filename):\n # *********** My filename are in the format ./CaAl2Si2O8_T3_nvt_a12.5.outcar.msd.dat\n # ******* so I can split their name with _ and take the compound and T from their name\n filename = filename.strip('./')\n temperature = filename.split('_')[1]\n acell = filename.split('.outcar')[0].split('_')[3].strip('a')\n return temperature, acell",
"def split_name(filename):\n # *********** My filename are in the format ./CaAl2Si2O8_T3_nvt_a12.5.\n # ******* so I can split their name with _ and take the compound and T from their name\n filename = filename.strip('./')\n temperature = str(int(float(filename.split('_')[1].strip('T'))*1000))\n acell = filename.split('_')[3].split('.outcar')[0].strip('a')\n return temperature, acell",
"def genBaseName(fileName):\n return fileName.split(\"_\")[0].split(\".\")[0]",
"def get_name_from_filename(filename):\n return filename[:-4]",
"def splitFilename(filename):\n\n if filename[-4:] == '.rpm':\n filename = filename[:-4]\n \n archIndex = filename.rfind('.')\n arch = filename[archIndex+1:]\n\n relIndex = filename[:archIndex].rfind('-')\n rel = filename[relIndex+1:archIndex]\n\n verIndex = filename[:relIndex].rfind('-')\n ver = filename[verIndex+1:relIndex]\n\n epochIndex = filename.find(':')\n if epochIndex == -1:\n epoch = ''\n else:\n epoch = filename[:epochIndex]\n \n name = filename[epochIndex + 1:verIndex]\n return name, ver, rel, epoch, arch",
"def get_name_from_file(filename):\n return filename.split(\".\")[0]",
"def prefix_from_filename(input_file):\n prefix = \"\"\n \n if str(input_file).find(\"medication_bner_\") != -1:\n prefix = \"drugbank\"\n elif str(input_file).find(\"_bc5cdr\") != -1:\n prefix = \"bc5cdr\"\n elif str(input_file).find(\"_bionlp13cg\") != -1:\n prefix = \"bionlp13cg\"\n \n return prefix",
"def split_file_name(file_path):\n file_name = os.path.splitext(file_path)[0]\n file_name = os.path.split(file_name)[1]\n\n return file_name",
"def get_fname(a_file):\r\n fname, fext = os.path.splitext(a_file)\r\n return os.path.basename(fname)",
"def get_fname(a_file):\r\n fname, fext = os.path.splitext(a_file)\r\n return os.path.basename(fname)",
"def get_fixed_filename(filename):\n new_name = \"\"\n for i, char in enumerate(filename):\n if i + 1 != len(filename):\n previous_character = filename[i - 1]\n next_character = filename[i + 1]\n if char.islower() and next_character.isupper():\n new_name += char + \"_\"\n elif previous_character == \".\":\n new_name += char\n elif char.islower() and not previous_character.isalpha():\n new_name += char.upper()\n else:\n new_name += char\n else:\n new_name += char\n new_name = new_name.replace(\" \", \"_\").replace(\".TXT\", \".txt\")\n return new_name",
"def decompose_newstyle_name(filename):\n path, parts, ext = _get_fields(filename)\n observatory = parts[0]\n serial = list_get(parts, 3, \"\")\n\n if ext == \".pmap\":\n assert len(parts) in [1,2], \"Invalid .pmap filename \" + repr(filename)\n instrument, filekind = \"\", \"\"\n serial = list_get(parts, 1, \"\")\n elif ext == \".imap\":\n assert len(parts) in [2,3], \"Invalid .imap filename \" + repr(filename)\n instrument = parts[1]\n filekind = \"\"\n serial = list_get(parts, 2, \"\")\n else:\n assert len(parts) in [3,4], \"Invalid filename \" + repr(filename)\n instrument = parts[1]\n filekind = parts[2]\n serial = list_get(parts, 3, \"\")\n\n # Don't include filename in these or it messes up crds.certify unique error tracking.\n\n assert instrument in INSTRUMENTS+[\"\"], \"Invalid instrument \" + repr(instrument)\n assert filekind in FILEKINDS+[\"\"], \"Invalid filekind \" + repr(filekind)\n assert re.match(r\"\\d*\", serial), \"Invalid id field \" + repr(id)\n # extension may vary for upload temporary files.\n\n return path, observatory, instrument, filekind, serial, ext",
"def guessFilePrefix(self, filename):\n count = 0\n lim = None\n for i in range(len(filename)):\n if filename[i] == \".\":\n break\n\n try:\n int(filename[i])\n\n if lim is None:\n lim = count\n\n except ValueError:\n lim = None\n\n count += 1\n\n if lim is None:\n array = os.path.splitext(filename)\n\n if array[1] == '.gz' or array[1] == '.bz2':\n array = os.path.splitext(array[0])\n\n filename = array[0]\n\n else:\n filename = filename[:lim]\n\n return filename",
"def extract_file_name(file_path):\n # ファイルパスからファイル名(拡張子含む)を取り出す\n file_name = file_path.split('/')[-1]\n # 拡張子を取り除く\n return file_name.split('.')[0]",
"def clean_filename(file):\r\n\r\n return file.split('.')[0]",
"def basefname(fname):\n return os.path.splitext(fname.split(\"\\\\\")[-1])[0]",
"def get_fixed_filename(filename):\n\n filename = filename.replace(\".TXT\", \".txt\")\n new_name = \"\"\n space_preceding = False\n bracket_preceding = False\n\n for letter in filename:\n if letter.isspace() or letter == \"_\":\n space_preceding = True\n new_name = new_name + \"_\"\n elif letter == \"(\":\n bracket_preceding = True\n elif letter.isupper():\n if new_name != \"\" and not space_preceding and not bracket_preceding:\n new_name = new_name + \"_\"\n if not (letter.isspace() or letter == \"_\"):\n if space_preceding:\n letter = letter.upper()\n space_preceding = False\n new_name = new_name + letter\n\n return new_name",
"def getFileName(filepath):\n return os.path.splitext(os.path.basename(filepath))[0]",
"def get_date_from_filename(file_path):\n file_name = basename(file_path)\n name, _ = splitext(file_name)\n _, date = name.split('_')\n\n return date",
"def extractFileName(fileType, modelName, modelVersion, modelState):\n fileName = '{}_{}_{}'.format(modelName, modelVersion, fileType) if modelState == 'national' else '{}_{}_{}_{}'.format(modelName, modelVersion, modelState, fileType)\n return fileName",
"def scrub_underscore_suffix(filename):\n scrubbed = re.sub(r\"_[^_]+\\.\", \".\", filename)\n return scrubbed",
"def get_extension_from_filename(filename):\n return filename[-4:]",
"def filename_ext(filename):\n base = os.path.basename(filename)\n return os.path.splitext(base)[1][1:]",
"def get_file_name(filepath: str) -> str:\n\n f = os.path.basename(filepath)\n filename, _ = os.path.splitext(f)\n\n return filename",
"def get_file_name(file):\n return os.path.splitext(os.path.basename(file))[0]"
] | [
"0.7018564",
"0.68886405",
"0.6759327",
"0.6749364",
"0.6716706",
"0.6699516",
"0.6672866",
"0.66605604",
"0.6640363",
"0.65921485",
"0.6585",
"0.65480256",
"0.6478625",
"0.64543766",
"0.64543766",
"0.6446561",
"0.6443001",
"0.64351195",
"0.6385604",
"0.6374533",
"0.6354233",
"0.6317038",
"0.6274813",
"0.6271444",
"0.6234476",
"0.62194896",
"0.6198998",
"0.6178173",
"0.6176048",
"0.6163876"
] | 0.7713043 | 0 |
Finds and opens the files containing data required for the variable, Applies the reduction function to the data, and returns an MV. When completed, this will treat missing data as such. At present only CFcompliant files are supported. | def reduce( self, vid=None ):
if vid is None:
vid = self._vid
rows = self._filetable.find_files( self.variableid, time_range=self.timerange,
lat_range=self.latrange, lon_range=self.lonrange,
level_range=self.levelrange )
if rows==None or len(rows)<=0:
# this belongs in a log file:
print "ERROR no data found for reduced variable",self.variableid
print "in",self.timerange, self.latrange, self.lonrange, self.levelrange
print "filetable is",self._filetable
return None
# To make it even easier on the first cut, I won't worry about missing data and
# anything else inconvenient, and I'll assume CF compliance.
files = list(set([r.fileid for r in rows]))
if len(files)>1:
# Piece together the data from multiple files. That's what cdscan is for...
# One problem is there may be more than one file family in the same
# directory! If we see more than one at this point, the user wasn't
# careful in his specifications. We'll just have to choose one.
famdict = { f:self.extract_filefamilyname(f) for f in files }
families = list(set([ famdict[f] for f in files ]))
families.sort(key=len) # a shorter name is more likely to be what we want
if len(families)==0:
print "ERROR. No data to reduce. files[0]=:",files[0]
return None
elif len(families)>1:
print "WARNING: ",len(families)," file families found, will use the first one:",families
fam = families[0]
# We'll run cdscan to combine the multiple files into one logical file.
# To save (a lot of) time, we'll re-use an xml file if a suitable one already exists.
# To do this safely, incorporate the file list (names,lengths,dates) into the xml file name.
famfiles = [f for f in files if famdict[f]==fam]
famfiles.sort() # improves consistency between runs
file_list = '-'.join(
[ f+'size'+str(os.path.getsize(f))+'mtime'+str(os.path.getmtime(f))\
for f in famfiles ] )
csum = hashlib.md5(file_list).hexdigest()
xml_name = fam+'_cs'+csum+'.xml'
if os.path.isfile( xml_name ):
files = [ xml_name ]
if len(files)>1:
famfiles = [f for f in files if famdict[f]==fam]
# Normally when we get here, it's because data has been divided by time among
# several files. So when cdscan puts it all back together, it needs the time
# units. If the time variable is named 'time' and has a valid 'units'
# attribute, we're fine; otherwise we're in trouble. But for some AMWG obs
# data which I have, the time units may be found in the long_name attribute.
# The -e option will normally be the way to fix it up, but maybe the -r option
# could be made to work.
# I know of no exception to the rule that all files in the file family keep their
# units in the same place; so find where they are by checking the first file.
f = cdms2.open( famfiles[0] )
time_units = f['time'].units
if type(time_units) is str and len(time_units)>3:
# cdscan can get time units from the files; we're good.
f.close()
cdscan_line = 'cdscan -q '+'-x '+xml_name+' '+' '.join(famfiles)
else:
# cdscan needs to be told what the time units are. I'm betting that all files
# use the same units. I know of cases where they all have different units (e.g.,
# GISS) but in all those cases, the units attribute is used properly, so we don't
# get here.
# Another problem is that units stuck in the long_name sometimes are
# nonstandard. So fix them!
if hasattr(f['time'],'long_name'):
time_units = f['time'].long_name
else:
time_units = 'days' # probably wrong but we can't go on without something
# Usually when we get here it's a climatology file where time is meaningless.
f.close()
if type(time_units) is str and len(time_units)>1 and (
time_units.find('months')==0 or time_units.find('days')==0 or
time_units.find('hours')==0 ):
time_units = fix_time_units( time_units )
cdscan_line = 'cdscan -q '+'-x '+xml_name+' -e time.units="'+time_units+'" '+\
' '.join(famfiles)
else:
print "WARNING, cannot find time units; will try to continue",famfiles[0]
cdscan_line = 'cdscan -q '+'-x '+xml_name+' -e time.units="'+time_units+'" '+\
' '.join(famfiles)
print "cdscan_line=",cdscan_line
proc = subprocess.Popen([cdscan_line],shell=True)
proc_status = proc.wait()
if proc_status!=0: print "ERROR: cdscan terminated with",proc_status
f = cdms2.open( xml_name )
else:
# the easy case, just one file has all the data on this variable
f = cdms2.open(files[0])
fcf = get_datafile_filefmt(f)
reduced_data = self._reduction_function( f(self.variableid), vid=vid )
if reduced_data is not None:
reduced_data._vid = vid
f.close()
return reduced_data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def open_files(self):\n if not self.unbalanced:\n if not self.validation:\n datas={}\n for var in self.variables:\n datas[var]=xr.open_dataset(\n f'/{self.dlfile_directory}/{self.climate}_{self.variable_translate(var).lower()}_{self.mask_str}_dldata_traintest.nc')\n return datas\n if self.validation:\n datas={}\n for var in self.variables:\n datas[var]=xr.open_dataset(\n f'/{self.dlfile_directory}/{self.climate}_{self.variable_translate(var).lower()}_{self.mask_str}_dldata_traintest_valid.nc')\n return datas\n if self.unbalanced:\n if not self.validation:\n datas={}\n for var in self.variables:\n datas[var]=xr.open_dataset(\n f'/{self.dlfile_directory}/{self.climate}_{self.variable_translate(var).lower()}_{self.mask_str}_dldata_traintest_unbalanced.nc')\n return datas\n if self.validation:\n datas={}\n for var in self.variables:\n datas[var]=xr.open_dataset(\n f'/{self.dlfile_directory}/{self.climate}_{self.variable_translate(var).lower()}_{self.mask_str}_dldata_traintest_unbalanced_valid.nc')\n return datas",
"def reduce_single_set_data():\n workflow = my_data.get()\n\n # Set reduction parameters\n focus_calib_file = '/SNS/VULCAN/shared/autoreduce/vulcan_foc_all_2bank_11p.cal'\n\n workflow.set_focus_calibration_file(focus_calib_file)\n\n # set up reduction parameters\n outputdir = os.getcwd()\n paramdict = {\n \"Extension\": \"_event.nxs\",\n \"PreserveEvents\": True,\n \"Binning\": -0.001,\n \"OutputDirectory\": outputdir,\n \"NormalizeByCurrent\": False,\n \"FilterBadPulses\": False,\n \"CompressTOFTolerance\": False,\n \"FrequencyLogNames\": \"skf1.speed\",\n \"WaveLengthLogNames\": \"skf12.lambda\",\n }\n\n workflow.set_reduction_parameters(paramdict)\n\n # reduce\n reduction_list = [(80231, True)]\n workflow.set_reduction_flag(file_flag_list=reduction_list, clear_flags=True)\n\n status, ret_obj = workflow.reduce_data_set(norm_by_vanadium=False)\n print('[Message] ', str(ret_obj))\n assert status",
"def get_model_data_from_files(self, oc):\r\n # Load model related files\r\n model_path = self.config['DATA_PATH'] + self.config['CUSTOMER_NAME'] + '/models/'\r\n\r\n features_file = model_path + self.task + '_' + str(oc) + '_features.txt'\r\n dummies_file = model_path + self.task + '_' + str(oc) + '_dummies.txt'\r\n model_file = model_path + self.task + '_' + str(oc) + '.joblib'\r\n\r\n if os.path.isfile(features_file) and os.path.isfile(dummies_file) and os.path.isfile(model_file):\r\n model = joblib.load(model_file)\r\n features = open(features_file, 'r', encoding=self.config['DATA_ENCODING']).read().rstrip('\\n').split(self.config['DATA_SEPARATOR'])\r\n dummies = open(dummies_file, 'r', encoding=self.config['DATA_ENCODING']).read().rstrip('\\n').split(self.config['DATA_SEPARATOR'])\r\n return (model, features, dummies)\r\n return (None, None, None)",
"def stageData(self,m):\n obs = Variable(filename = self.source,\n variable_name = self.variable,\n alternate_vars = self.alternate_vars)\n if obs.time is None: raise il.NotTemporalVariable()\n self.pruneRegions(obs)\n \n # Try to extract a commensurate quantity from the model\n mod = m.extractTimeSeries(self.variable,\n alt_vars = self.alternate_vars,\n expression = self.derived,\n initial_time = obs.time_bnds[ 0,0],\n final_time = obs.time_bnds[-1,1],\n lats = None if obs.spatial else obs.lat,\n lons = None if obs.spatial else obs.lon)\n obs,mod = il.MakeComparable(obs,mod,\n mask_ref = True,\n clip_ref = True,\n extents = self.extents,\n logstring = \"[%s][%s]\" % (self.longname,m.name))\n \n # Check the order of magnitude of the data and convert to help avoid roundoff errors\n def _reduceRoundoffErrors(var):\n if \"s-1\" in var.unit: return var.convert(var.unit.replace(\"s-1\",\"d-1\"))\n if \"kg\" in var.unit: return var.convert(var.unit.replace(\"kg\" ,\"g\" ))\n return var\n def _getOrder(var):\n return np.log10(np.abs(var.data).clip(1e-16)).mean()\n order = _getOrder(obs)\n count = 0\n while order < -2 and count < 2:\n obs = _reduceRoundoffErrors(obs)\n order = _getOrder(obs)\n count += 1\n \n # convert the model data to the same unit\n mod = mod.convert(obs.unit)\n\n return obs,mod",
"def apply(self, opened_file):",
"def load_file_data_from_db(self):\n\n file_objs = self.file_queryset.filter(sip=self.sip, removedtime__isnull=True)\n for file_obj in self._batch_query(file_objs):\n self.file_events = get_file_events(file_obj)\n if not self.file_events:\n return\n try:\n # merge the map_file_data dict with the map_av_data\n mapped_file_info = merge_file_data_dicts(\n map_file_data(file_obj, self.file_events), map_av_data(file_obj)\n )\n self.md_info[\"files\"].append(mapped_file_info)\n self.md_info[\"premis:size\"] = create_package_size(\n mapped_file_info[\"premis:size\"]\n )\n self.md_info[\"amount_of_files\"] += 1\n failed_virus_checks = get_failed_virus_checks(self.file_events)\n if failed_virus_checks:\n self.md_info[\"virus_scan_info\"][\"failed_virus_checks\"].append(\n failed_virus_checks\n )\n passed_virus_checks = get_passed_virus_checks(self.file_events)\n # add info virus_scan_tools if they passed and respect\n # different tools and versions if needed.\n if (\n passed_virus_checks\n and passed_virus_checks\n not in self.md_info[\"virus_scan_info\"][\"virus_scan_tools\"]\n ):\n self.md_info[\"virus_scan_info\"][\"virus_scan_tools\"].append(\n passed_virus_checks\n )\n except KeyError:\n logger.info(\n \"File is no longer present on the filesystem: %s\",\n file_obj.currentlocation,\n )\n continue",
"def get_data(self): \n self.improvement = []\n self.corrsq = []\n for filename in onlyfiles:\n mst = MST(filename, mypath=mypath)\n mst.estimate_correct_seqences()\n mst.estimate_improvement()\n self.mst.append(mst)\n\n self.corrsq.append(mst.corrsq)\n self.improvement.append(mst.improvement)\n\n\n\n print(f\"cor = {improvement}\")\n print(f\"improvement = {improvement}\")\n print(f\"mittelwert der improvement = {np.mean(improvement)}\")\n print(f\"Standardabweichung der lersteigung = {np.std(improvement)}\")",
"def get_data(self):\n \n with os.scandir(self.file_path) as collection_of_files:\n files_found = [file.name.split('.')[0] for file in collection_of_files \n if (file.name.split('.')[0].lower().strip() in self._data_requirements.required_file_names \n and file.name.endswith('.csv'))]\n\n self.check_missing_files(files_found)\n \n self._data = DictObjectView(self.read_in_files(files_found))",
"def get_additional_data_from_files(df, file_description): # file description one of [\"video\", \"eaf\", \"seg\", \"gentle\"]\n if file_description == \"gentle\":\n file_folder = FILE_BASE + \"/gentle/\"\n is_gentle_file = True\n else:\n file_folder = FILE_BASE + \"/original/\"\n is_gentle_file = False\n\n file_df = None\n\n if file_description not in list(FILE_DESCRIPTIONS_TO_EXT.keys()):\n print(\"Unknown file description! Don't know what to do with %s files...\" % file_description)\n return None\n\n else:\n print(\"Load and extract information from %s files...\" % file_description)\n #pbar = tqdm.tqdm(total = len(np.unique(df[\"source_file\"])),desc='Files', position=0,leave=True,file=sys.stdout)\n #file_log = tqdm.tqdm(total=0, position=1, bar_format='{desc}',leave=True,file=sys.stdout)\n print(\"Total files to laod and preprocess: \", len(np.unique(df[\"source_file\"])))\n \n for i,file in enumerate(np.unique(df[\"source_file\"])):\n if i%100 == 0:\n print(\"File: \",i)\n \n filepath = file_folder + get_file_path(file,is_gentle_file=is_gentle_file) + FILE_DESCRIPTIONS_TO_EXT[file_description]\n\n if file_description == \"video\":\n file_i_df = mp4_file_processing.get_word_video_snippet_size(df, filepath)\n elif file_description == \"eaf\":\n speech_annotation_eaf_data, gesture_eaf_data = eaf_file_processing.read_eaf(filepath)\n file_i_df = eaf_file_processing.map_gestures_to_annotation(speech_annotation_eaf_data, gesture_eaf_data, remove_pauses=False)\n file_i_df = eaf_file_processing.binary_encode_gestures(file_i_df, gesture_column=\"gesture\")\n\n elif file_description == \"seg\":\n file_i_df = seg_file_processing.get_seg_file_pos_info(filepath)\n\n elif file_description == \"gentle\":\n file_i_df = gentle_file_processing.get_gentle_file_transcripts(filepath)\n \n else:\n print(\"Unknown file format!!!\")\n return \n\n if file_df is None:\n file_df = file_i_df\n else:\n file_df = pd.concat([file_df, file_i_df], ignore_index=True)\n\n #file_log.set_description_str(f'Processed file: {file}')\n #pbar.update(1)\n #sleep(0.02)\n #file_log.close()\n #pbar.close()\n return file_df",
"def cmorization(in_dir, out_dir, cfg, cfg_user, start_date, end_date):\n # Run the cmorization\n for (short_name, var) in cfg['variables'].items():\n logger.info(\"CMORizing variable '%s'\", short_name)\n short_name = var['short_name']\n raw_filenames = Path(in_dir).rglob('*.nc')\n filenames = []\n for raw_filename in raw_filenames:\n if re.search(var['file'], str(raw_filename)) is not None:\n filenames.append(raw_filename)\n\n for filename in sorted(filenames):\n\n _extract_variable(short_name, var, cfg, filename, out_dir)",
"def readExperi(directory,varid,experi,level):\n print('\\n>>> Using readExperi function! \\n')\n \n ### Import modules\n import numpy as np\n from netCDF4 import Dataset\n \n ### Call files\n totaldirectory = directory + experi + '/monthly/'\n filename = totaldirectory + varid + '_1900-2000.nc'\n \n if any([experi == 'FPOL',experi == 'FSUB']):\n directory = '/home/zlabe/green/simu/'\n totaldirectory = directory + experi + '/monthly/'\n filename = totaldirectory + varid + '_1900-2000.nc'\n \n ### Read in Data\n if level == 'surface': # 3d variables\n data = Dataset(filename,'r')\n varq = data.variables['%s' % varid][:,:,:,0]\n data.close()\n \n dataq = Dataset(totaldirectory + 'T2M_1900-2000.nc')\n time = dataq.variables['time'][:]\n lev = 'surface'\n lat = dataq.variables['latitude'][:]\n lon = dataq.variables['longitude'][:]\n dataq.close()\n elif level == 'profile': # 4d variables\n data = Dataset(filename,'r')\n varq = data.variables['%s' % varid][:,:,:,0]\n data.close()\n \n dataq = Dataset(totaldirectory + 'TEMP_1900-2000.nc')\n time = dataq.variables['time'][:]\n lev = dataq.variables['level'][:]\n lat = dataq.variables['latitude'][:]\n lon = dataq.variables['longitude'][:]\n dataq.close()\n else:\n print(ValueError('Selected wrong height - (surface or profile!)!')) \n print('Completed: Read data for *%s* : %s!' % (experi[:4],varid))\n \n ### Reshape to split years and months\n months = 12\n if level == 'surface': # 3d variables\n var = np.reshape(varq,(int(varq.shape[0]/12),months,\n int(lat.shape[0])))\n elif level == 'profile': # 4d variables\n var = np.reshape(varq,(int(varq.shape[0]/12),months,int(lev.shape[0]),\n int(lat.shape[0])))\n else:\n print(ValueError('Selected wrong height - (surface or profile!)!')) \n print('Completed: Reshaped %s array!' % (varid))\n \n ### Convert units\n if varid in ('TEMP','T2M'):\n var = var - 273.15 # Kelvin to degrees Celsius \n print('Completed: Changed units (K to C)!')\n\n print('\\n*Completed: Finished readExperi function!')\n return lat,lon,time,lev,var",
"def __call__( self, file_contents, regression_var ):\n return super()._process( self.__load( file_contents ), regression_var )",
"def __call__( self, file_contents, regression_var ):\n return super()._process( self.__load( file_contents ), regression_var )",
"def _compute_(self, case):\n dic = \"data/sim/{dn}/{rad}/\".format(dn=self.event.strftime(\"%Y.%m.%d.%H.%M\"), rad=self.rad)\n fn = \"data/sim/{dn}/{rad}/exp.{cse}.bm({bm}).elv(<elv>).csv\".format(dn=self.event.strftime(\"%Y.%m.%d.%H.%M\"),\n rad=self.rad, bm=self.bmnum, cse=case)\n cmd = \"export DIR_MODELS_REF_DAT=/home/shibaji/Collaboration_NCAR/code_rt_sd/pharlap/pharlap_4.1.3/dat;\\\n cd pharlap;\\\n matlab -nodisplay -nodesktop -nosplash -nojvm -r \\\"UT=[{ut}];rad='{rad}';dic='{dic}';bm={bm};\\\n fn='{fn}';cse='{cse}';rt_1D_sen;exit;\\\"\".format(ut=self.event.strftime(\"%Y %m %d %H %S\"), rad=self.rad,\n dic=dic, bm=self.bmnum, fn=fn, cse=case)\n os.system(cmd)\n return",
"def read_inversion_info(file_dic):\n #print_file_test = open('file_test.txt','w')\n\n if not ( check_inversion_files(file_dic) ):\n print 'error(read_inversion_info): problem with lenstool file names'\n return 0\n \n file_generate_arcs = file_dic['file_generate_arcs']\n info_input_lens = fc.extract_second_identifiers( file_generate_arcs, \\\n 'potential' )\n#-------------------------------------------------------------------------------\n\n file_source = file_dic['file_source']\n info_src = np.loadtxt(file_source, unpack=False)\n if len(info_src) == 8 and np.isscalar(info_src[0]):\n #FIXME - check if the second condition is all we need\n info_src = [info_src]\n#-------------------------------------------------------------------------------\n\n file_make_inversion = file_dic['file_make_inversion']\n info_fited_param = fc.extract_second_identifiers( file_make_inversion, \\\n 'limit' )\n info_forme = fc.extract_parameter(file_make_inversion, 'forme')[0][0]\n\n#-------------------------------------------------------------------------------\n\n file_best_fit = file_dic['file_best_fit']\n info_best_fit = fc.extract_second_identifiers( file_best_fit, \\\n 'potentiel' )\n\n info_xi2 = fc.extract_parameter(file_best_fit, '#Chi2pos:')\n\n#-------------------------------------------------------------------------------\n file_chires = file_dic['file_chires']\n\n info_chires = extract_parameter(file_chires, '0')\n rmss_mean = [0.0, 0.0]\n rmsi_mean = [0.0, 0.0]\n for i in info_chires:\n if i[0] != 'A':\n rmss_mean[0] = rmss_mean[0] + float(i[7])\n rmss_mean[1] = rmss_mean[1] + 1.0\n \n rmsi_mean[0] = rmsi_mean[0] + float(i[8])\n rmsi_mean[1] = rmsi_mean[1] + 1.0\n\n rmss_mean = rmss_mean[0]/rmss_mean[1]\n rmsi_mean = rmsi_mean[0]/rmsi_mean[1]\n#-------------------------------------------------------------------------------\n out_dict = { 'xi2' : float(info_xi2[0][0]), \\\n 'best_fit_lens' : info_best_fit, \\\n 'rmsi_mean' : rmsi_mean, \\\n 'rmss_mean' : rmss_mean, \\\n 'fited_parameters' : info_fited_param[0].keys(), \\\n 'input_lens' : info_input_lens[len(info_input_lens) - 1], \\\n 'forme' : info_forme \\\n }\n #for i in out_dict.keys():\n # print i, out_dict[i]\n return out_dict",
"def execute(cf):\n\n ##Ports and parameters\n train_set = cf.get_input(\"train_set\") #training set. Typically even_file\n test_set = cf.get_input(\"test_set\") #test set. Typically odd_file\n WM1 = cf.get_input(\"WM1\")\n WM2 = cf.get_input(\"WM2\")\n WM3 = cf.get_input(\"WM3\")\n WM4 = cf.get_input(\"WM4\")\n WM5 = cf.get_input(\"WM5\")\n WM6 = cf.get_input(\"WM6\")\n WM7 = cf.get_input(\"WM7\")\n WM8 = cf.get_input(\"WM8\")\n WM9 = cf.get_input(\"WM9\")\n WM10 = cf.get_input(\"WM10\")\n WM11 = cf.get_input(\"WM11\")\n WM12 = cf.get_input(\"WM12\")\n WM13 = cf.get_input(\"WM13\")\n WM14 = cf.get_input(\"WM14\")\n WM15 = cf.get_input(\"WM15\")\n WM16 = cf.get_input(\"WM16\")\n WM17 = cf.get_input(\"WM17\")\n WM18 = cf.get_input(\"WM18\")\n WM19 = cf.get_input(\"WM19\")\n WM20 = cf.get_input(\"WM20\")\n WMdir = cf.get_input(\"WMdir\")\n WMdir2 = cf.get_input(\"WMdir2\")\n basefreqs = cf.get_input(\"BaseFrequencies\")\n ufemodel_path = cf.get_input(\"UFEmodel\")\n\n bestWM = cf.get_output(\"BestWM\")\n log_file = cf.get_output(\"log_file\")\n interm = cf.get_output(\"intermediate\")\n\n genome = cf.get_parameter('genome', 'string')\n motevo_path = cf.get_parameter('motevo_path', 'string')\n aligned = cf.get_parameter(\"aligned\", \"boolean\")\n\n os.mkdir(interm)\n\n\n\n # Read stuff in\n WMs = [i for i in[WM1, WM2, WM3, WM4, WM5, WM6, WM7, WM8, WM9, WM10, WM11, WM12, WM13, WM14, WM15, WM16, WM17, WM18, WM19, WM20] if i]\n\n if WMdir:\n WMs += [os.path.join(WMdir, wm) for wm in os.listdir(WMdir)]\n\n if WMdir2:\n WMs += [os.path.join(WMdir2, wm) for wm in os.listdir(WMdir2)]\n\n f = open(basefreqs)\n ATfreq = float(f.readline().strip().split()[1])\n GCfreq = float(f.readline().strip().split()[1])\n f.close()\n\n\n # Compute stuff: optimal priors and then likelihood of test set\n optpriors = []\n logliks = []\n\n for i, WM in enumerate(WMs):\n\n wmlen = len(open(WM).readlines())-4\n\n # 1. Fit prior on training set with EM\n tag = 'fitP_%i' %(i+1)\n params, sites, priors, loglikfile = giveMotevoParamFile(genome, wmlen, interm, tag, aligned, ufemodel_path, ATfreq, GCfreq, emprior=1, bgorder=0, bgprior=0.99)\n r = runMotevo(motevo_path, train_set, params, WM, interm, tag)\n if r != 0:\n print 'motevo failed ', tag\n sys.exit(1)\n\n # prior file:\n # WM_name final_prior nr_of_sites density\n # /import/bc2/home/nimwegen/GROUP/hseq_pipeline/severin/Anduril/Pipeline/PipeLineSource/TESTRUN/NRF1_Z2/OUTPUT/NRF1_FgBg-runmotevoPG2_1/Logo 0.016554 635.008 0.251863\n # background 0.983446 37724.8 0.748137\n # UFEwm 0 0 0\n\n optprior = float(open(priors).readlines()[1].split()[1])\n bgprior=(1-optprior)\n print bgprior\n\n # 2. Compute log-likelihood on test set with optimal prior from training set and without EM\n tag = 'compLL_%i' %(i+1)\n params, sites, priors, loglikfile = giveMotevoParamFile(genome, wmlen, interm, tag, aligned, ufemodel_path, ATfreq, GCfreq, emprior=0, bgorder=0, bgprior=bgprior)\n runMotevo(motevo_path, train_set, params, WM, interm, tag)\n\n a = loadtxt(loglikfile, usecols=[1])\n ll = sum(a)\n\n logliks.append(ll)\n optpriors.append(optprior)\n\n print logliks\n\n\n\n #replace name in WM file with bestWM\n lines = open(WMs[argmax(logliks)]).readlines()\n lines[1] = 'NA BestWM\\n'\n bwm = open(bestWM, 'w')\n bwm.write(''.join(lines))\n\n\n l = open(log_file, 'w')\n\n l.write('WM_name\\tWM_path\\tlog_likelihood\\topt_prior\\n')\n\n names = ['WM_%i\\t%s\\t%.4f\\t%s' %(i+1, WMs[i], logliks[i], optpriors[i]) for i in arange(len(WMs))]\n\n l.write('\\n'.join(names))\n l.close()\n\n\n return 0",
"def solve(ctx):\n my_solver(ctx.obj['filename'])",
"def get_obs(case, this_varname, this_filename, valid_years, mode_lag, cvdp_file, AMO_cutoff_freq, name_conversion):\n\n # The forced component of both temperature and precipitation are estimated through regressing the local\n # values onto the GM-EM temperature time series, which can be viewed as a proxy for radiative forcing.\n\n # We assume that the forced component of SLP is zero.\n cvdp_loc = '/'.join(cvdp_file.split('/')[:-1])\n gm_em, gm_em_units, time, time_units = forced_trend('tas', cvdp_loc)\n\n if this_varname == 'slp':\n gm_em *= 0\n gm_em += 1 # will replace constant\n\n # Get dataframe of modes\n df = create_mode_df(cvdp_file, AMO_cutoff_freq)\n\n # Add EM, GM time series to it\n df = df.assign(F=gm_em)\n\n # Shift modes in time\n df_shifted = shift_df(df, mode_lag, ['year', 'month', 'season', 'F'])\n\n # Subset to valid years\n subset = np.isin(df_shifted['year'].values, valid_years)\n df_shifted = df_shifted.loc[subset, :]\n\n # Reset the forced trend time series to a mean of zero\n # This allows for the forced trend to be straightforwardly added in later\n F = df_shifted['F'].values\n F -= np.mean(F)\n df_shifted = df_shifted.assign(F=F)\n\n # Load dataset\n if case == 'obs': # Observational data\n ds = xr.open_dataset(this_filename)\n elif 'LE' in case: # CESM data. Allows for multiple runs to be concatenated if desired.\n if this_varname == 'pr': # CESM splits up precipitation into convective and large scale, liquid+ice vs snow\n ds = xr.open_mfdataset(this_filename, combine='nested', concat_dim='time')\n this_filename2 = [f.replace('PRECC', 'PRECL') for f in this_filename]\n ds2 = xr.open_mfdataset(this_filename2, combine='nested', concat_dim='time')\n this_filename3 = [f.replace('PRECC', 'PRECSC') for f in this_filename]\n ds3 = xr.open_mfdataset(this_filename3, combine='nested', concat_dim='time')\n this_filename4 = [f.replace('PRECC', 'PRECSL') for f in this_filename]\n ds4 = xr.open_mfdataset(this_filename4, combine='nested', concat_dim='time')\n # CESM output saved with one day delay, so need to move back\n ds2 = ds2.assign_coords(time=ds2.time-timedelta(days=1))\n ds3 = ds3.assign_coords(time=ds3.time-timedelta(days=1))\n ds4 = ds4.assign_coords(time=ds4.time-timedelta(days=1))\n else:\n ds = xr.open_mfdataset(this_filename, combine='nested', concat_dim='time')\n\n # CESM output saved with one day delay, so need to move back\n ds = ds.assign_coords(time=ds.time-timedelta(days=1))\n\n # Load data\n try:\n lat = ds['latitude'].values\n lon = ds['longitude'].values\n except KeyError:\n lat = ds['lat'].values\n lon = ds['lon'].values\n try:\n X = ds[this_varname]\n X_units = ds[this_varname].units\n except KeyError:\n alt_name = name_conversion[this_varname]\n X = ds[alt_name]\n X_units = ds[alt_name].units\n\n # Pull out values, since we'll be permuting the data / changing units, etc\n # For CESM1-LE precipitation, need to add up convective and large scale\n if name_conversion[this_varname] == 'PRECC':\n X = X.values + ds2.PRECL.values + ds3.PRECSC.values + ds4.PRECSL.values\n else:\n X = X.values\n\n X_time = ds['time']\n if 'units' in ds['time'].attrs: # nonstandard, from BEST\n assert ds['time'].units == 'year A.D.'\n X_year = np.floor(X_time)\n X_month = (np.ceil((X_time - X_year)*12)).astype(int)\n else:\n X_year = ds['time.year']\n X_month = ds['time.month']\n\n # Change units if necessary\n if X_units == 'K':\n # convert to celsius\n X -= 273.15\n X_units = 'deg C'\n elif X_units == 'm/s':\n # convert to mm / day\n X *= 1000*24*60*60 # mm per day\n X_units = 'mm/day'\n elif X_units == 'mm/month': # GPCC, mm total over month\n days_per_month = [calendar.monthrange(int(y), int(m))[1] for y, m in zip(X_year, X_month)]\n X /= np.array(days_per_month)[:, np.newaxis, np.newaxis]\n X_units = 'mm/day'\n\n # Check unit consistency\n if this_varname == 'slp':\n assert X_units == 'Pa'\n if this_varname == 'pr':\n assert X_units == 'mm/day'\n\n if 'climatology' in ds.variables:\n climo = ds['climatology'].values\n # Add climatology to X\n for counter, this_month in enumerate(X_month):\n X[counter, ...] += climo[this_month - 1, ...]\n\n # Permute all data to be time, lat, lon\n lat_idx = np.where(np.isin(X.shape, len(lat)))[0][0]\n lon_idx = np.where(np.isin(X.shape, len(lon)))[0][0]\n time_idx = np.where(np.isin(X.shape, len(X_time)))[0][0]\n\n X = np.transpose(X, (time_idx, lat_idx, lon_idx))\n ntime, nlat, nlon = np.shape(X)\n\n # Subset data\n subset = np.isin(X_year, valid_years)\n X = X[subset, :]\n X_year = X_year[subset]\n X_month = X_month[subset]\n\n # Also need to check if our data spans the full valid period\n subset = np.isin(df_shifted['year'].values, X_year)\n df_shifted = df_shifted.loc[subset, :]\n\n # Check that all dimensions look consistent\n assert len(df_shifted) == np.shape(X)[0]\n\n # Put into dataarray\n time = pd.date_range(start='%04d-%02d' % (X_year[0], X_month[0]),\n freq='M', periods=len(X_year))\n daX = xr.DataArray(data=X,\n dims=('time', 'lat', 'lon'),\n coords={'time': time,\n 'lat': lat,\n 'lon': lon},\n attrs={'units': X_units})\n\n return daX, df_shifted, df",
"def main(data, setup):\n # input check \n varnames = ('vm_raw', 'vm_raw_theo')\n for varname in varnames:\n if varname not in data.keys():\n raise LookupError('data must contain variable %s.' %s)\n\n # display info message\n chrono = setup['chrono']\n chrono.issue('target velocity: correct for sensor motion...')\n\n # retrieve varialbes\n vnys = data['nqv']\n v_sensor_r = data['v_sensor_r']\n\n # ========== main =================================== #\n for key_raw in ('vm_raw', 'vm_raw_theo'):\n key_c = key_raw.replace('raw', 'raw_c')\n\n # sum\n vm_raw = data[key_raw]\n v_sum = (vm_raw + np.expand_dims(v_sensor_r, 1))\n\n # mod\n data[key_c] = symmod(v_sum, vnys)\n # ==================================================== #\n\n return data",
"def get_files(self):\n\n # Grab master data - use existing header, remove unhappy columns\n\n self.df_mas_lab_data = pd.read_csv(\n self.master_csv, dtype=str, usecols=self.columns\n )\n\n # Delete rows, where column FACILITY_TYPE != Independent, Hospital,\n # Physician Office\n facility_type_keep_list = [\"Independent\", \"Hospital\", \"Physician Office\"]\n self.df_mas_lab_data = self.df_mas_lab_data[\n self.df_mas_lab_data[\"FACILITY_TYPE\"].isin(facility_type_keep_list)\n ]\n\n # Make everything a string and remove trailing and leading whitespaces\n self.df_mas_lab_data = self.df_mas_lab_data.astype(str)\n self.df_mas_lab_data = self.df_mas_lab_data.applymap(\n lambda x: x.strip() if isinstance(x, str) else x\n )\n\n print_banner(\"Computing all the Data\")\n print(f\"{len(self.df_mas_lab_data)} original master CLIA labs...\")\n\n # Grab other inputed files to make new data file to compare with\n self.df_new_lab_data = pd.concat(\n [\n pd.read_csv(file, names=self.columns, header=None, dtype=str, usecols=self.columns)\n for file in self.new_files\n ]\n )\n\n # Probably not needed for the new data but just in case:\n # Delete rows, where column FACILITY_TYPE != Independent, Hospital,\n # Physician Office\n self.df_new_lab_data = self.df_new_lab_data[\n self.df_new_lab_data[\"FACILITY_TYPE\"].isin(facility_type_keep_list)\n ]\n\n # Make everything a string and remove trailing and leading whitespaces\n self.df_new_lab_data = self.df_new_lab_data.astype(str)\n self.df_new_lab_data = self.df_new_lab_data.applymap(\n lambda x: x.strip() if isinstance(x, str) else x\n )\n\n print(f\"{len(self.df_new_lab_data)} inputted CLIA labs for comparison...\")",
"def _get_model_val(models_name, data_dir, val_source='test'):\n model_val = {}\n for model in models_name:\n mypath = data_dir + '/' + model\n only_files = [f for f in listdir(mypath) if isfile(join(mypath, f))]\n select_files = [val_source in x for x in only_files]\n only_files = list(compress(only_files, select_files))\n if type(only_files) == list:\n for name_file in only_files:\n df_name = name_file.replace('.csv', '')\n model_val[model + '_' + df_name] = pd.read_csv(mypath + '/' + name_file)\n else:\n df_name = only_files.replace('.csv', '')\n model_val[model + '_' + df_name] = pd.read_csv(mypath + '/' + only_files)\n return model_val",
"def eval(\n self,\n processed_data_dir: Path,\n output_result_dir: Path,\n ) -> NoReturn:\n pass",
"def s_validation(path_setup=None):\n if path_setup is not None:\n # import validation setup\n fname = os.path.basename(path_setup)\n mname, ext = os.path.splitext(fname)\n val_module = imp.load_source(mname, path_setup)\n jobs, process = val_module.setup_process()\n results_path = '/data-write/RADAR/Validation_FFascetti/'\n for job in jobs:\n results = process.calc(job)\n netcdf_results_manager(results, results_path)",
"def ReadData( fileName ):\n \n # define column names\n colNames = ['Date','Precip','Max Temp', 'Min Temp','Wind Speed'] #NOTE: I changed the column names because .query() would not work when referencing column names with spaces\n global DataDF #added this line to make the dataframe visible in the variable explorer\n global ReplacedValuesDF #added this line to make the dataframe visible in the variable explorer\n # open and read the file\n DataDF = pd.read_csv(\"DataQualityChecking.txt\",header=None, names=colNames, \n delimiter=r\"\\s+\",parse_dates=[0])\n DataDF = DataDF.set_index('Date')\n \n # define and initialize the missing data dictionary\n ReplacedValuesDF = pd.DataFrame(0, index=[\"1. No Data\",\"2. Gross Error\",\"3. Swapped\",\"4. Range Fail\"], columns=colNames[1:]) #added additional indexed rows to make adding the values later easier\n \n return( DataDF, ReplacedValuesDF )",
"def exercise_9(path_to_X_data, path_to_w_data):\r\n\r\n print(\"=\"*30)\r\n print(\"Running exercise_9()\")\r\n\r\n #### YOUR CODE HERE ####\r\n # load the X and w data from file into arrays\r\n X = numpy.loadtxt('data/X.txt', delimiter=',')\r\n w = numpy.loadtxt('data/w.txt', delimiter=',')\r\n\r\n print(f'X:\\n{X}')\r\n print(f'w: {w}')\r\n\r\n #### YOUR CODE HERE ####\r\n # Extract the column 0 (x_n1) and column 1 (x_n2) vectors from X\r\n\r\n x_n1 = X[numpy.array([0,1,2,3,4]), 0]\r\n x_n2 = X[numpy.array([0,1,2,3,4]), 1]\r\n\r\n print(f'x_n1: {x_n1}')\r\n print(f'x_n2: {x_n2}')\r\n\r\n #### YOUR CODE HERE ####\r\n w_0 = w[0]\r\n w_1 = w[1]\r\n\r\n scalar_result_0 = w_0 * w_0 * sum(x_n1*x_n1) + 2 * w_0 * w_1 * sum(x_n2 * x_n1) + w_1 * w_1 * sum(x_n2*x_n2)\r\n # Use scalar arithmetic to compute the right-hand side of Exercise 3\r\n # (Exercise 1.3 from FCMA p.35)\r\n # Set the final value to\r\n scalar_result = scalar_result_0\r\n\r\n print(f'scalar_result: {scalar_result}')\r\n\r\n #### YOUR CODE HERE ####\r\n # Now you will compute the same result but using linear algebra operators.\r\n # (i.e., the left-hand of the equation in Exercise 1.3 from FCMA p.35)\r\n # You can compute the values in any linear order you want (but remember,\r\n # linear algebra is *NOT* commutative!), however here will require you to\r\n # first compute the inner term: X-transpose times X (XX), and then\r\n # below you complete the computation by multiplying on the left and right\r\n # by w (wXXw)\r\n X_transpose = numpy.transpose(X)\r\n XX = numpy.dot(X_transpose, X)\r\n\r\n print(f'XX:\\n{XX}')\r\n\r\n #### YOUR CODE HERE ####\r\n # Now you'll complete the computation by multiplying on the left and right\r\n # by w to determine the final value: wXXw\r\n wXX = numpy.dot(w, XX)\r\n wXXw = numpy.dot(wXX, w)\r\n\r\n print(f'wXXw: {wXXw}')\r\n\r\n print(\"DONE exercise_9()\")\r\n\r\n return X, w, x_n1, x_n2, scalar_result, XX, wXXw",
"def readFiles(opt, path, pathCopyData,minlat, maxlat, minlon, maxlon , variables, estaciones):\n date = '\\d\\d\\d\\d-\\d\\d-\\d\\d'\n dirr = pathCopyData\n patron2 = re.compile(date)\n print(dirr + 'tfile.txt')\n tempfile = df.read_csv(dirr + 'tfile.txt')\n tempbase = df.read_csv(dirr + 'tbase.txt')\n tfile = list(tempfile.values.flatten())\n tbase = list(tempbase.values.flatten())\n tfileCopy = list(tempfile.values.flatten())\n tbaseCopy = list(tempbase.values.flatten())\n l = len(tfile)\n for i in range(l):\n tfil = tfile[i]\n tbas = tbase[i]\n ls = tbas + '/' + tfil\n f = patron2.findall(tfil)\n cadena = clearString(tfil)\n print(cadena)\n try:\n #net = open_netcdf(ls, tfil, cadena, pathCopyData)\n net = Dataset(ls)\n for xs in range(len(estaciones)):\n minlat1 = minlat[xs]\n maxlat1 = maxlat[xs]\n minlon1 = minlon[xs]\n maxlon1 = maxlon[xs]\n estacion = estaciones[xs]\n #checkFile(net, tfil, f[0], opt, path, minlat1, maxlat1, minlon1, maxlon1, variables, estacion)\n var_cut = []\n for i in variables:\n var = net.variables[i][:,int(minlat1):int(maxlat1),int(minlon1):int(maxlon1)]\n #print(LON)\n #print(var)\n #return\n # celda.append(var)\n # result = ne(var, LON, LAT, LONsize, LATsize, minlat, maxlat, minlon, maxlon)\n var_cut.append(var)\n\n for ls in range(len(var_cut)):\n saveData(var_cut[ls], variables[ls], f[0], opt, path, estacion)\n tfileCopy.remove(tfil)\n tbaseCopy.remove(tbas)\n except (OSError, EOFError) as e:\n print(e)\n fdata = df.DataFrame(tfileCopy, columns=['nameFile'])\n fbas = df.DataFrame(tbaseCopy, columns=['nameBase'])\n fdata.to_csv(dirr + 'tfile.txt', encoding='utf-8', index=False)\n fbas.to_csv(dirr + 'tbase.txt', encoding='utf-8', index=False)\n if os.path.exists(pathCopyData + cadena):\n os.remove(pathCopyData + cadena)\n sys.exit()\n # readFiles(1);\n except tarfile.ReadError:\n print('error2')\n # fdata = df.DataFrame(tfile,columns=['nameFile']);\n # fbas = df.DataFrame(tbase,columns=['nameBase']);\n # fdata.to_csv(dirr+'tfile.txt',encoding='utf-8',index=False);\n # fbas.to_csv(dirr+'tbase.txt',encoding='utf-8',index=False);\n # readFiles(1);\n except (KeyError, FileNotFoundError):\n print('ERROR DE LECTURA')",
"def load_fvcom_files(filepath=None,casename=None,ncname=None,neifile=None):\n\n currdir=os.getcwd()\n os.chdir(filepath)\n\n data=_load_grdfile(casename)\n\n data.update(_load_depfile(casename))\n \n data.update(_load_spgfile(casename))\n\n data.update(_load_obcfile(casename))\n\n data.update(_load_llfiles(casename))\n\n if ncname!=None:\n data.update(_load_nc(ncname))\n\n if neifile!=None:\n data.update(loadnei(neifile))\n\n os.chdir(currdir)\n\n return data",
"def update_file_vectors(self, audio_file):\n outer_df = defer.Deferred()\n\n def get_file_vector(val):\n # Take all the new PluginOutput objects and generate and\n # apply a single vector to represent the file.\n df = self.mine.calculate_file_vector(audio_file)\n return df\n\n def save_file(vector):\n logger.debug(\"--> Applying vector to %r %r\", audio_file, vector)\n logger.debug(\"--> Saving %r\", audio_file)\n audio_file.vector = vector\n df_s = self.model.save(audio_file)\n return df_s\n\n logger.debug(\"NB: Updating File Vectors for %r\", audio_file)\n df = self.get_vectors_eventually(audio_file)\n df.addCallback(get_file_vector)\n df.addCallback(save_file)\n\n return df",
"def mover_get_data(lfns,\n path,\n sitename,\n queuename,\n stageinTries,\n inputpoolfcstring=\"xmlcatalog_file:PoolFileCatalog.xml\",\n ub=\"outdated\", # to be removed\n dsname=\"\",\n dsdict={},\n rucio_dataset_dictionary={},\n guids=[],\n analysisJob=False,\n usect=True,\n pinitdir=\"\",\n proxycheck=True,\n spsetup=\"\",\n tokens=[],\n userid=\"\",\n inputDir=\"\",\n jobId=None,\n jobDefId=\"\",\n access_dict=None,\n scope_dict=None,\n workDir=\"\",\n DN=None,\n dbh=None,\n jobPars=\"\",\n cmtconfig=\"\",\n filesizeIn=[],\n checksumIn=[],\n transferType=None,\n experiment=\"\",\n eventService=False,\n sourceSite=\"\"):\n\n tolog(\"Mover get data started\")\n\n statusPFCTurl = None\n pilotErrorDiag = \"\"\n\n # FAX counters (will be reported in jobMetrics; only relevant when FAX has been activated after a stage-in failure)\n N_filesWithoutFAX = 0\n N_filesWithFAX = 0\n bytesWithoutFAX = 0L\n bytesWithFAX = 0L\n\n # FAX control variable, if FAX is used as primary site mover in combination with direct I/O\n usedFAXandDirectIO = False\n\n # The FAX variables above will be stored in a dictionary, to be returned by this function\n FAX_dictionary = {}\n\n # Is the DBRelease file available locally?\n DBReleaseIsAvailable = handleDBRelease(dbh, lfns, jobPars, path)\n\n # Should stage-in be aborted? (if there are only locally available DBRelease files in the stage-in list)\n if abortStageIn(dbh, lfns, DBReleaseIsAvailable):\n return 0, pilotErrorDiag, statusPFCTurl, FAX_dictionary\n\n # Setup the dictionary necessary for all instrumentation\n report = getInitialTracingReport(userid, sitename, dsname, \"get_sm\", analysisJob, jobId, jobDefId, DN)\n\n if stageinTries != 0:\n get_RETRY = min(stageinTries, MAX_NUMBER_OF_RETRIES)\n else:\n get_RETRY = MAX_RETRY\n get_TIMEOUT = 5*3600/get_RETRY\n\n fail = 0\n guidfname = {}\n error = PilotErrors()\n\n region = readpar('region')\n\n # Space tokens currently not used for input files\n # # check if there is are any space tokens\n # _token = getProperSpaceTokenList(token, listSEs, len(lfns))\n\n # Select the correct mover\n copycmd, setup = getCopytool(mode=\"get\")\n\n # Get the sitemover object corresponding to the default copy command\n sitemover = getSiteMover(copycmd, setup)\n\n # Get the experiment object\n thisExperiment = getExperiment(experiment)\n\n # Get the name for the PFC file\n _path = path\n if eventService:\n # Update the path (create the PFC in one level above the payload workdir)\n path = os.path.abspath(os.path.join(path, '..'))\n pfc_name = getPFCName(path, inputpoolfcstring)\n # done with the event server modification (related to the PFC generation), reset the path again\n path = _path\n\n # Build the file info dictionary (use the filesize and checksum from the dispatcher if possible) and create the PFC\n # Format: fileInfoDic[file_nr] = (guid, gpfn, fsize, fchecksum, filetype, copytool)\n # replicas_dic[guid1] = [ replica1, .. ] where replicaN is an object of class replica\n ec, pilotErrorDiag, fileInfoDic, totalFileSize, replicas_dic = \\\n getFileInfo(region, ub, queuename, guids, dsname, dsdict, lfns, pinitdir, analysisJob, tokens, DN, sitemover, error, path, dbh, DBReleaseIsAvailable,\\\n scope_dict, pfc_name=pfc_name, filesizeIn=filesizeIn, checksumIn=checksumIn, thisExperiment=thisExperiment)\n if ec != 0:\n return ec, pilotErrorDiag, statusPFCTurl, FAX_dictionary\n\n # Until the Mover PFC file is no longer needed, call the TURL based PFC \"PoolFileCatalogTURL.xml\"\n pfc_name_turl = pfc_name.replace(\".xml\", \"TURL.xml\")\n\n # Create a SURL to space token dictionary\n tokens_dictionary = getSurlTokenDictionary(lfns, tokens)\n\n # Create a TURL based PFC if necessary/requested (i.e. if copy tool should not be used [useCT=False] and\n # if oldPrefix and newPrefix are not already set in copysetup [useSetPrefixes=False])\n ec, pilotErrorDiag, createdPFCTURL, usect = PFC4TURLs(analysisJob, transferType, fileInfoDic, pfc_name_turl, sitemover, sitename, usect, dsdict, eventService, tokens_dictionary, sitename, sourceSite, lfns)\n if ec != 0:\n return ec, pilotErrorDiag, statusPFCTurl, FAX_dictionary\n\n # Correct the total file size for the DBRelease file if necessary\n totalFileSize = correctTotalFileSize(totalFileSize, fileInfoDic, lfns, dbh, DBReleaseIsAvailable)\n\n # Only bother with the size checks if the copy tool is to be used (non-direct access mode)\n if usect:\n # Get a proper maxinputsize from schedconfig/default \n _maxinputsize = getMaxInputSize()\n\n # Check the total input file size\n ec, pilotErrorDiag = verifyInputFileSize(totalFileSize, _maxinputsize, error)\n if ec != 0:\n return ec, pilotErrorDiag, statusPFCTurl, FAX_dictionary\n\n # Do we have enough local space to stage in all data and run the job?\n ec, pilotErrorDiag = verifyAvailableSpace(sitemover, totalFileSize, path, error)\n if ec != 0:\n return ec, pilotErrorDiag, statusPFCTurl, FAX_dictionary\n\n # Get the replica dictionary from file (used when the primary replica can not be staged due to some temporary error)\n replica_dictionary = getReplicaDictionaryFile(path)\n\n # file counters\n N_files_on_tape = 0\n N_root_files = 0\n N_non_root_files = 0\n\n # If FAX is used as a primary site mover then set the default FAX mode to true, otherwise to false (normal mode)\n if copycmd == \"fax\":\n usedFAXMode = True\n else:\n usedFAXMode = False\n\n # Use isOneByOneFileTransfer() to determine if files should be transferred one by one or all at once\n if not sitemover.isOneByOneFileTransfer():\n\n # Note: this mode is used by the aria2c site mover only\n # Normal stage-in is below\n\n tolog(\"All files will be transferred at once\")\n\n # Extract the file info for the first file in the dictionary\n guid, gpfn, lfn, fsize, fchecksum, filetype, copytool = extractInputFileInfo(fileInfoDic[0], lfns)\n file_access = getFileAccess(access_dict, lfn)\n dsname = getDataset(lfn, dsdict)\n\n # Perform stage-in using the sitemover wrapper method\n s, pErrorText = sitemover_get_all_data(sitemover, error, gpfn, lfn, path, fsize=fsize, spsetup=spsetup, fchecksum=fchecksum,\\\n guid=guid, analysisJob=analysisJob, usect=usect, pinitdir=pinitdir, proxycheck=proxycheck,\\\n sitename=sitename, token=None, timeout=get_TIMEOUT, dsname=dsname, userid=userid, report=report,\\\n access=file_access, inputDir=inputDir, jobId=jobId, workDir=workDir, cmtconfig=cmtconfig, lfns=lfns,\\\n experiment=experiment, replicas_dic=replicas_dic, dsdict=dsdict, scope_dict=scope_dict)\n if s != 0:\n tolog('!!WARNING!!2999!! Failed during stage-in of multiple files: %s' % (error.getErrorStr(s)))\n tolog(\"Exit code: %s\" % (s))\n fail = s\n\n # Normal stage-in (one by one file transfers)\n if sitemover.isOneByOneFileTransfer() or fail != 0:\n \n tolog(\"Files will be transferred one by one\")\n\n # Reset any previous failure\n fail = 0\n\n # Loop over all files in the file info dictionary\n number_of_files = len(fileInfoDic.keys())\n tolog(\"Will process %d file(s)\" % (number_of_files))\n for nr in range(number_of_files):\n # Extract the file info from the dictionary\n guid, gpfn, lfn, fsize, fchecksum, filetype, copytool = extractInputFileInfo(fileInfoDic[nr], lfns)\n\n # Has the copycmd/copytool changed? (E.g. due to FAX) If so, update the sitemover object\n if copytool != copycmd:\n copycmd = copytool\n # Get the sitemover object corresponding to the new copy command\n sitemover = getSiteMover(copycmd, setup)\n tolog(\"Site mover object updated since copytool has changed\")\n\n # Update the dataset name\n dsname = getDataset(lfn, dsdict)\n proper_dsname = getDataset(lfn, rucio_dataset_dictionary)\n scope = getFileScope(scope_dict, lfn)\n\n # Update the tracing report with the proper container/dataset name\n report = updateReport(report, gpfn, proper_dsname, fsize, sitemover)\n report['scope'] = scope\n\n # The DBRelease file might already have been handled, go to next file\n if isDBReleaseFile(dbh, lfn) and DBReleaseIsAvailable:\n updateFileState(lfn, workDir, jobId, mode=\"transfer_mode\", state=\"no_transfer\", type=\"input\")\n guidfname[guid] = lfn # needed for verification below\n continue\n else:\n tolog(\"(Not a DBRelease file)\")\n\n tolog(\"Mover is preparing to copy file %d/%d (lfn: %s guid: %s dsname: %s)\" % (nr+1, number_of_files, lfn, guid, dsname))\n tolog('Copying %s to %s (file catalog checksum: \\\"%s\\\", fsize: %s) using %s (%s)' %\\\n (gpfn, path, fchecksum, fsize, sitemover.getID(), sitemover.getSetup()))\n\n # Get the number of replica retries\n get_RETRY_replicas = getNumberOfReplicaRetries(createdPFCTURL, replica_dictionary, guid)\n\n file_access = getFileAccess(access_dict, lfn)\n\n # Loop over get function to allow for multiple get attempts for a file\n will_use_direct_io = False\n get_attempt = 0\n\n #get_RETRY = 1 #2 #PN\n while get_attempt < get_RETRY:\n if get_attempt > 0:\n _rest = 5*60\n tolog(\"(Waiting %d seconds before next stage-in attempt)\" % (_rest))\n sleep(_rest)\n tolog(\"Get attempt %d/%d\" % (get_attempt + 1, get_RETRY))\n replica_number = 0\n replica_transferred = False\n s = 1\n\n # Loop over replicas\n while s != 0 and replica_number < get_RETRY_replicas:\n # Grab the gpfn from the replicas dictionary in case alternative replica stage-in is allowed\n gpfn = getAlternativeReplica(gpfn, guid, replica_number, createdPFCTURL, replica_dictionary)\n\n # Perform stage-in using the sitemover wrapper method\n s, pErrorText, N_files_on_tape, N_root_files, N_non_root_files, replica_transferred, will_use_direct_io = sitemover_get_data(sitemover, error,\\\n get_RETRY, get_RETRY_replicas, get_attempt,\\\n replica_number, N_files_on_tape, N_root_files,\\\n N_non_root_files, gpfn, lfn, path,\\\n fsize=fsize, spsetup=spsetup, fchecksum=fchecksum,\\\n guid=guid, analysisJob=analysisJob, usect=usect,\\\n pinitdir=pinitdir, proxycheck=proxycheck,\\\n sitename=sitename, token=None, timeout=get_TIMEOUT,\\\n dsname=dsname, userid=userid, report=report,\\\n access=file_access, inputDir=inputDir, jobId=jobId,\\\n workDir=workDir, cmtconfig=cmtconfig,\\\n experiment=experiment, scope_dict=scope_dict,\\\n sourceSite=sourceSite)\n # Get out of the multiple replica loop\n if replica_transferred:\n break\n\n # Increase the replica attempt counter in case the previous replica could not be transferred\n replica_number += 1\n\n # Get out of the multiple get attempt loop\n if replica_transferred:\n break\n\n # Increase the get attempt counter in case of failure to transfer the file\n get_attempt += 1\n\n # Increase the successful file transfer counter (used only when reporting FAX transfers)\n if s == 0:\n # note the special case if FAX is the primary site mover (normally FAX is the fallback)\n if sitemover.copyCommand == \"fax\":\n N_filesWithFAX += 1\n bytesWithFAX += long(fsize)\n else:\n # Normal case\n N_filesWithoutFAX += 1\n bytesWithoutFAX += long(fsize)\n\n if s != 0:\n # Normal stage-in failed, now try with FAX if possible\n if error.isPilotFAXErrorCode(s):\n if isFAXAllowed(filetype, gpfn) and transferType != \"fax\" and sitemover.copyCommand != \"fax\": # no point in trying to fallback to fax if the fax transfer above failed\n tolog(\"Normal stage-in failed, will attempt to use FAX\")\n usedFAXMode = True\n\n # Get the FAX site mover\n old_sitemover = sitemover\n sitemover = getSiteMover(\"fax\", \"\")\n\n # Perform stage-in using the sitemover wrapper method\n s, pErrorText, N_files_on_tape, N_root_files, N_non_root_files, replica_transferred, will_use_direct_io = sitemover_get_data(sitemover, error,\\\n get_RETRY, get_RETRY_replicas, get_attempt, replica_number,\\\n N_files_on_tape, N_root_files, N_non_root_files,\\\n gpfn, lfn, path,\\\n fsize=fsize, spsetup=spsetup, fchecksum=fchecksum,\\\n guid=guid, analysisJob=analysisJob, usect=usect,\\\n pinitdir=pinitdir, proxycheck=proxycheck,\\\n sitename=sitename, token=None, timeout=get_TIMEOUT,\\\n dsname=dsname, userid=userid, report=report,\\\n access=file_access, inputDir=inputDir, jobId=jobId,\\\n workDir=workDir, cmtconfig=cmtconfig, experiment=experiment)\n if replica_transferred:\n tolog(\"FAX site mover managed to transfer file from remote site (resetting error code to zero)\")\n pilotErrorDiag = \"\"\n s = 0\n\n # Increase the successful FAX transfer counter\n N_filesWithFAX += 1\n bytesWithFAX += long(fsize)\n else:\n tolog(\"FAX site mover also failed to transfer file from remote site, giving up\")\n\n # restore the old sitemover\n del sitemover\n sitemover = old_sitemover\n else:\n tolog(\"(Not an error code eligible for FAX fail-over)\")\n\n if s != 0:\n tolog('!!FAILED!!2999!! Failed to transfer %s: %s (%s)' % (os.path.basename(gpfn), s, error.getErrorStr(s)))\n tolog(\"Exit code: %s\" % (s))\n\n # report corrupt file to consistency server if needed\n if s == error.ERR_GETADMISMATCH or s == error.ERR_GETMD5MISMATCH or s == error.ERR_GETWRONGSIZE or s == error.ERR_NOSUCHFILE:\n reportFileCorruption(gpfn, sitemover)\n\n # exception for object stores\n if (gpfn.startswith(\"s3:\") or 'objectstore' in gpfn) and '.log.tgz' in gpfn:\n tolog(\"!!FAILED!!2999!! Failed to transfer a log file from S3 objectstore. Will skip it and continue the job.\")\n else:\n fail = s\n break\n\n # Build the dictionary used to create the PFC for the TRF\n # In the case of FAX, use the global paths if direct access is to be used for the particlar file\n if usedFAXMode and will_use_direct_io:\n # The site mover needed here is the FAX site mover since the global file path methods are defined there only\n old_sitemover = sitemover\n sitemover = getSiteMover(\"fax\", \"\")\n guidfname[guid] = sitemover.findGlobalFilePath(lfn, dsname, sitename, sourceSite)\n\n # Restore the old sitemover\n del sitemover\n sitemover = old_sitemover\n\n # If FAX is used as a primary site mover, in combination with direct access, set the usedFAXandDirectIO flag\n # this will later be used to update the run command (e.g. --lfcHost is not needed etc)\n if copycmd == \"fax\":\n usedFAXandDirectIO = True\n else:\n guidfname[guid] = lfn # local_file_name\n\n if fail == 0:\n # Make sure the PFC has the correct number of files\n fail, pilotErrorDiag = verifyPFCIntegrity(guidfname, lfns, dbh, DBReleaseIsAvailable, error)\n\n # Now that the Mover PFC file is no longer needed, back it up and rename the TURL based PFC if it exists\n # (the original PFC is no longer needed. Move it away, and then create the PFC for the trf/runAthena)\n # backupPFC4Mover(pfc_name)\n\n # Create a standard PFC with SURLs if needed (basically this is default)\n # note: if FAX was used as a primary site mover in combination with direct I/O, then the SURLs will actually be TURLs\n # but there is no need to use the special TURL creation method PFC4TURL used above (FAX will have returned the TURLs instead)\n createStandardPFC4TRF(createdPFCTURL, pfc_name_turl, pfc_name, guidfname)\n\n tolog(\"Number of identified root files : %d\" % (N_root_files))\n tolog(\"Number of transferred non-root files: %d\" % (N_non_root_files))\n\n if usedFAXMode:\n tolog(\"Number of files without FAX : %d (normal transfers)\" % (N_filesWithoutFAX))\n tolog(\"Number of files with FAX : %d (successful FAX transfers)\" % (N_filesWithFAX))\n tolog(\"Bytes without FAX : %d (normal transfers)\" % (bytesWithoutFAX))\n tolog(\"Bytes with FAX : %d (successful FAX transfers)\" % (bytesWithFAX))\n\n if N_files_on_tape > 0:\n tolog(\"!!WARNING!!2999!! Number of skipped files: %d (not staged)\" % (N_files_on_tape))\n if N_root_files == 0:\n # This should only happen for user jobs\n tolog(\"Mover get_data failed since no root files could be transferred\")\n fail = error.ERR_NOSTAGEDFILES\n else:\n tolog(\"Mover get_data finished (partial)\")\n else:\n if fail == 0:\n tolog(\"Get successful\")\n tolog(\"Mover get_data finished\")\n else:\n tolog(\"Mover get_data finished (failed)\")\n tolog(\"Will return exit code = %d, pilotErrorDiag = %s\" % (fail, pilotErrorDiag)) \n\n # Now populate the FAX dictionary before finishing\n FAX_dictionary = getFAXDictionary(N_filesWithoutFAX, N_filesWithFAX, bytesWithoutFAX, bytesWithFAX, usedFAXandDirectIO)\n\n return fail, pilotErrorDiag, statusPFCTurl, FAX_dictionary",
"def loadScalarField(self):\n\n\t\tif self.beta is None:\n\t\t\traise ForcingException(\"Must supply concentration response factor\")\n\n\t\tif self._mortality_fname is None or self._mortality_var is None:\n\t\t\traise ForcingException(\"Must supply mortality file\")\n\n\t\tif self._pop_fname is None or self._pop_var is None:\n\t\t\traise ForcingException(\"Must supply population file\")\n\n\t\t# This is optional\n\t\t#if self.vsl is None:\n\t\t#\traise ForcingException(\"Must specify statistical value of life (in millions)\")\n\n\t\t# Open the mortality file\n\t\ttry:\n\t\t\tmortality = DataFile(self._mortality_fname, mode='r', open=True)\n\t\texcept IOError as ex:\n\t\t\tForcing.error(\"Error! Cannot open mortality file %s. File exists? %r\"%(self._mortality_fname, os.path.isfile(self._mortality_fname)))\n\t\t\traise\n\n\t\t# Check dimensions\n\t\tif not (mortality.dimensions['COL'] == self.ni and mortality.dimensions['ROW'] == self.nj):\n\t\t\traise ValueError(\"Error, dimensions in mortality file %s do not match domain.\"%self._mortality_fname)\n\n\t\t# Read the field\n\t\ttry:\n\t\t\t# dims are TSTEP, LAY, ROW, COL.. so skip TSTEP and LAY\n\t\t\t# this should be made more general, or the file should be made better.\n\t\t\tmfld = mortality.variables[self._mortality_var][0][0]\n\t\texcept IOError as e:\n\t\t\traise e\n\t\texcept IndexError as e:\n\t\t\traise ForcingFileDimensionException(\"Mortality NetCDF file seems to have incompatible dimensions. Currently require shape (TSTEP, LAY, ROW, COL). This is marked to be improved, as the data does not vary with time or layer.\")\n\n\t\t# Close the file\n\t\tif self._pop_fname != self._pop_fname:\n\t\t\tmortality.close()\n\n\t\t\t# Open the population file\n\t\t\ttry:\n\t\t\t\tpop = DataFile(self._pop_fname, mode='r', open=True)\n\t\t\texcept IOError as ex:\n\t\t\t\tForcing.error(\"Error! Cannot open population file %s\"%(self._pop_fname))\n\t\t\t\traise\n\n\t\t\t# Check dimensions\n\t\t\tif not (pop.dimensions['COL'] == self.ni and pop.dimensions['ROW'] == self.nj):\n\t\t\t\traise ValueError(\"Error, dimensions in population file %s do not match domain.\"%self._pop_fname)\n\t\telse:\n\t\t\t# Same file?\n\t\t\tpop = mortality\n\n\t\t# Read the field\n\t\ttry:\n\t\t\t# dims are TSTEP, LAY, ROW, COL.. so skip TSTEP and LAY\n\t\t\tpfld = pop.variables[self._pop_var][0][0]\n\t\texcept IOError as e:\n\t\t\traise e\n\t\texcept IndexError as e:\n\t\t\traise ForcingFileDimensionException(\"Population NetCDF file seems to have incompatible dimensions. Currently require shape (TSTEP, LAY, ROW, COL). This is marked to be improved, as the data does not vary with time or layer.\")\n\n\n\t\tpop.close()\n\n\t\t# Debug, remember, when debugging this against plotted data or fortran\n\t\t# code: values like (70,70) started at index 1 whereas we started at\n\t\t# index 0, so (70,70)=(69,69)\n\t\t#print \"[j=%d,i=%d] = mfld * mfld_scale * pfld * self.beta / 365 = %e %e %e %e %e = %e\"%(self.debug_j, self.debug_i, mfld[self.debug_j,self.debug_i], (10.**-4), pfld[self.debug_j,self.debug_i], self.beta, 365.0, mfld[self.debug_j,self.debug_i]*(10.**-4)*pfld[self.debug_j,self.debug_i]*self.beta/365.0)\n\n\t\t# (mfld * pfld) is element wise multiplication, not matrix multiplication\n\t\t# Take leap years into account?\n\t\tForcing.debug(\"[TODO]: Leap years are not yet accounted for.\")\n\t\tself.timeInvariantScalarMultiplcativeFld = mfld * self.mort_scale / 365.0 * pfld * self.beta\n\t\tif self.vsl is not None:\n\t\t\tself.timeInvariantScalarMultiplcativeFld = self.timeInvariantScalarMultiplcativeFld * self.vsl"
] | [
"0.5547697",
"0.51804596",
"0.51175356",
"0.50851357",
"0.49448034",
"0.49235836",
"0.49108392",
"0.49066615",
"0.49041834",
"0.4902881",
"0.48873246",
"0.48858833",
"0.48858833",
"0.48857036",
"0.48537314",
"0.47822648",
"0.47778893",
"0.4767607",
"0.47633898",
"0.4755537",
"0.47510976",
"0.47500542",
"0.47469684",
"0.4725044",
"0.47235817",
"0.47211826",
"0.4720177",
"0.47131965",
"0.47125056",
"0.47107178"
] | 0.62348145 | 0 |
Prepare the file locker. Specify the file to lock and optionally the maximum timeout and the delay between each attempt to lock. | def __init__(self, file_name, timeout=10, delay=.05):
self.is_locked = False
#self.lockfile = os.path.join(os.getcwd(), "%s.lock" % file_name)
self.lockfile = file_name + '.lock'
self.file_name = file_name
self.timeout = timeout
self.delay = delay | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, file_name, timeout=10, delay=.05):\n self.is_locked = False\n self.lockfile = os.path.abspath(file_name)\n self.file_name = file_name\n self.timeout = timeout\n self.delay = delay\n self.fd = None",
"def __init__(self, protected_file_path, timeout=None, delay=1, lock_file_contents=None):\n self.is_locked = False\n self.lockfile = protected_file_path + \".lock\"\n self.timeout = timeout\n self.delay = delay\n self._lock_file_contents = lock_file_contents\n if self._lock_file_contents is None:\n self._lock_file_contents = \"Owning process args:\\n\"\n for arg in sys.argv:\n self._lock_file_contents += arg + \"\\n\"",
"def __init__(self, file_name, timeout=10, delay=0.05):\n self.file_name = os.path.abspath(file_name)\n self.lockfile = os.path.abspath(file_name) + \".lock\"\n self.timeout = float(timeout)\n self.delay = float(delay)\n self.is_locked = False\n\n if self.delay > self.timeout or self.delay <= 0 or self.timeout <= 0:\n raise ValueError(\"delay and timeout must be positive with delay \" \"<= timeout\")",
"def __init__(self, dir, timeout=None):\n self.dir = dir\n if timeout is not None and timeout < 2.0:\n raise ValueError('timeout must be at least 2 seconds')\n self.timeout = timeout\n if self.fileName:\n self.lockDir = os.path.join(dir, self.fileName)\n self._makeDir()\n else:\n self.lockDir = dir \n self._locked = False",
"def acquire(self):\n start_time = time.time()\n while True:\n try:\n self.fd = os.open(self.lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR)\n break\n except (OSError,) as e:\n if e.errno != errno.EEXIST:\n raise\n if (time.time() - start_time) >= self.timeout:\n raise FileLockException(f\"{self.lockfile}: Timeout occurred.\")\n time.sleep(self.delay)\n\n self.is_locked = True",
"def acquire(self):\r\n start_time = time.time()\r\n import getpass\r\n userName = getpass.getuser()\r\n import platform\r\n computerName = platform.uname()[1]\r\n while True:\r\n try:\r\n self.fd = os.open(self.lockfile, os.O_CREAT|os.O_EXCL|os.O_RDWR)\r\n os.write(self.fd, userName + '\\n')\r\n os.write(self.fd, computerName + '\\n')\r\n os.write(self.fd, time.ctime(time.time()))\r\n break;\r\n except OSError as e:\r\n if e.errno != errno.EEXIST and e.errno != errno.EACCES:\r\n raise \r\n if (time.time() - start_time) >= self.timeout:\r\n if e.errno == errno.EEXIST:\r\n raise FileLockException(\"Timeout occured.\")\r\n else:\r\n raise FileLockException(\"Access denied.\")\r\n time.sleep(self.delay)\r\n self.is_locked = True",
"def lock(self, timeout=0):\n if timeout:\n timeout_time = time.time() + timeout\n # Make sure my temp lockfile exists, and that its contents are\n # up-to-date (e.g. the temp file name, and the lock lifetime).\n self.__write()\n # TBD: This next call can fail with an EPERM. I have no idea why, but\n # I'm nervous about wrapping this in a try/except. It seems to be a\n # very rare occurence, only happens from cron, and (only?) on Solaris\n # 2.6.\n self.__touch()\n\n while True:\n # Create the hard link and test for exactly 2 links to the file\n try:\n os.link(self.__tmpfname, self.__lockfile)\n # If we got here, we know we know we got the lock, and never\n # had it before, so we're done. Just touch it again for the\n # fun of it.\n self.__touch()\n break\n except OSError, e:\n # The link failed for some reason, possibly because someone\n # else already has the lock (i.e. we got an EEXIST), or for\n # some other bizarre reason.\n if e.errno == errno.ENOENT:\n # TBD: in some Linux environments, it is possible to get\n # an ENOENT, which is truly strange, because this means\n # that self.__tmpfname doesn't exist at the time of the\n # os.link(), but self.__write() is supposed to guarantee\n # that this happens! I don't honestly know why this\n # happens, but for now we just say we didn't acquire the\n # lock, and try again next time.\n pass\n elif e.errno <> errno.EEXIST:\n # Something very bizarre happened. Clean up our state and\n # pass the error on up.\n os.unlink(self.__tmpfname)\n raise\n elif self.__linkcount() <> 2:\n # Somebody's messin' with us!\n pass\n elif self.__read() == self.__tmpfname:\n # It was us that already had the link.\n raise AlreadyLockedError\n # otherwise, someone else has the lock\n pass\n # We did not acquire the lock, because someone else already has\n # it. Have we timed out in our quest for the lock?\n if timeout and timeout_time < time.time():\n os.unlink(self.__tmpfname)\n raise TimeOutError\n # Okay, we haven't timed out, but we didn't get the lock. Let's\n # find if the lock lifetime has expired.\n if time.time() > self.__releasetime():\n # Yes, so break the lock.\n self.__break()\n # Okay, someone else has the lock, our claim hasn't timed out yet,\n # and the expected lock lifetime hasn't expired yet. So let's\n # wait a while for the owner of the lock to give it up.\n self.__sleep()",
"def Lock(self, timeout_secs=_DEFAULT_TIMEOUT_SECS):\n if self._file_desc is not None:\n raise OSError(\"%s has been locked.\" % self._file_path)\n parent_dir = os.path.dirname(self._file_path)\n if not os.path.exists(parent_dir):\n os.makedirs(parent_dir)\n successful = False\n self._file_desc = os.open(self._file_path, os.O_CREAT | os.O_RDWR,\n 0o666)\n try:\n successful = self._Flock(timeout_secs)\n finally:\n if not successful:\n os.close(self._file_desc)\n self._file_desc = None\n return successful",
"def acquire(lockfile, timeout=None):\n\tif timeout is None:\n\t\ttimeout = max_timeout # 100yrs should suffice\n\tretries = int(float(timeout)/wait_interval)\n\n\t_lock_acquire(lockfile, retries)\n\t\n\treturn lockfile",
"def lock(tmp_dir, timeout=120, min_wait=5, max_wait=10, verbosity=1):\r\n # Create base of lock directory if required.\r\n base_lock = os.path.dirname(tmp_dir)\r\n if not os.path.isdir(base_lock):\r\n try:\r\n os.makedirs(base_lock)\r\n except OSError:\r\n # Someone else was probably trying to create it at the same time.\r\n # We wait two seconds just to make sure the following assert does\r\n # not fail on some NFS systems.\r\n time.sleep(2)\r\n assert os.path.isdir(base_lock)\r\n\r\n # Variable initialization.\r\n lock_file = os.path.join(tmp_dir, 'lock')\r\n random.seed()\r\n my_pid = os.getpid()\r\n no_display = (verbosity == 0)\r\n\r\n nb_error = 0\r\n # The number of time we sleep when their is no errors.\r\n # Used to don't display it the first time to display it less frequently.\r\n # And so don't get as much email about this!\r\n nb_wait = 0\r\n # Acquire lock.\r\n while True:\r\n try:\r\n last_owner = 'no_owner'\r\n time_start = time.time()\r\n other_dead = False\r\n while os.path.isdir(tmp_dir):\r\n try:\r\n read_owner = open(lock_file).readlines()[0].strip()\r\n # the try is transtion code for old locks\r\n # it may be removed when poeple have upgraded\r\n try:\r\n other_host = read_owner.split('_')[2]\r\n except IndexError:\r\n other_host = () # make sure it isn't equal to any host\r\n if other_host == socket.gethostname():\r\n try:\r\n os.kill(int(read_owner.split('_')[0]), 0)\r\n except OSError:\r\n other_dead = True\r\n except AttributeError:\r\n pass #os.kill does not exist on windows\r\n except Exception:\r\n read_owner = 'failure'\r\n if other_dead:\r\n if not no_display:\r\n msg = \"process '%s'\" % read_owner.split('_')[0]\r\n _logger.warning(\"Overriding existing lock by dead %s \"\r\n \"(I am process '%s')\", msg, my_pid)\r\n get_lock.unlocker.unlock()\r\n continue\r\n if last_owner == read_owner:\r\n if (timeout is not None and\r\n time.time() - time_start >= timeout):\r\n # Timeout exceeded or locking process dead.\r\n if not no_display:\r\n if read_owner == 'failure':\r\n msg = 'unknown process'\r\n else:\r\n msg = \"process '%s'\" % read_owner.split('_')[0]\r\n _logger.warning(\"Overriding existing lock by %s \"\r\n \"(I am process '%s')\", msg, my_pid)\r\n get_lock.unlocker.unlock()\r\n continue\r\n else:\r\n last_owner = read_owner\r\n time_start = time.time()\r\n no_display = (verbosity == 0)\r\n if not no_display and nb_wait > 0:\r\n if read_owner == 'failure':\r\n msg = 'unknown process'\r\n else:\r\n msg = \"process '%s'\" % read_owner.split('_')[0]\r\n _logger.info(\"Waiting for existing lock by %s (I am \"\r\n \"process '%s')\", msg, my_pid)\r\n _logger.info(\"To manually release the lock, delete %s\",\r\n tmp_dir)\r\n if verbosity <= 1:\r\n no_display = True\r\n nb_wait += 1\r\n time.sleep(random.uniform(min_wait, max_wait))\r\n\r\n try:\r\n os.mkdir(tmp_dir)\r\n except OSError:\r\n # Error while creating the directory: someone else\r\n # must have tried at the exact same time.\r\n nb_error += 1\r\n if nb_error < 10:\r\n continue\r\n else:\r\n raise\r\n # Safety check: the directory should be here.\r\n assert os.path.isdir(tmp_dir)\r\n\r\n # Write own id into lock file.\r\n unique_id = refresh_lock(lock_file)\r\n\r\n # Verify we are really the lock owner (this should not be needed,\r\n # but better be safe than sorry).\r\n owner = open(lock_file).readlines()[0].strip()\r\n if owner != unique_id:\r\n # Too bad, try again.\r\n continue\r\n else:\r\n # We got the lock, hoorray!\r\n return\r\n\r\n except Exception, e:\r\n # If something wrong happened, we try again.\r\n _logger.warning(\"Something wrong happened: %s %s\", type(e), e)\r\n nb_error += 1\r\n if nb_error > 10:\r\n raise\r\n time.sleep(random.uniform(min_wait, max_wait))\r\n continue",
"def __init__(self, dir, timeout=None):\n ExclusiveLock.__init__(self, dir, timeout)\n writeLockDir = os.path.join(self.dir, WriteLock.fileName)\n self.writeLock = ExclusiveLock(writeLockDir, timeout)",
"def acquire(self, timeout=None):\r\n try:\r\n open(self.unique_name, \"wb\").close()\r\n except IOError:\r\n raise LockFailed\r\n\r\n end_time = time.time()\r\n if timeout is not None and timeout > 0:\r\n end_time += timeout\r\n\r\n while True:\r\n # Try and create a hard link to it.\r\n try:\r\n os.link(self.unique_name, self.lock_file)\r\n except OSError:\r\n # Link creation failed. Maybe we've double-locked?\r\n nlinks = os.stat(self.unique_name).st_nlink\r\n if nlinks == 2:\r\n # The original link plus the one I created == 2. We're\r\n # good to go.\r\n return\r\n else:\r\n # Otherwise the lock creation failed.\r\n if timeout is not None and time.time() > end_time:\r\n os.unlink(self.unique_name)\r\n if timeout > 0:\r\n raise LockTimeout\r\n else:\r\n raise AlreadyLocked\r\n time.sleep(timeout is not None and timeout/10 or 0.1)\r\n else:\r\n # Link creation succeeded. We're good to go.\r\n return",
"def acquire(self):\n start_time = time.time()\n while True:\n # 当前文件锁对象未有加锁,执行加锁\n if self.fd is None:\n try:\n # 独占式打开文件\n lock_dir = os.path.dirname(self.lockfile)\n if not os.path.isdir(lock_dir):\n os.makedirs(lock_dir, exist_ok=True)\n self.fd = os.open(self.lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR)\n break\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n # 超时\n if (time.time() - start_time) >= self.timeout:\n raise FileLockException(\"Timeout occured.\")\n # 本次加锁失败,需要等待\n time.sleep(self.delay)\n self.is_locked = True",
"def __init__(self, dir, timeout=None, readlocktimeout=None):\n ExclusiveLock.__init__(self, dir, timeout)\n if readlocktimeout is None:\n self.readlocktimeout = timeout\n else:\n self.readlocktimeout = readlocktimeout",
"def acquire(self, timeout=None):\n if self._locked:\n raise RuntimeError(\"lock already locked\")\n if self.writeLock.acquire(timeout):\n try:\n self.lockDir = tempfile.mkdtemp('', self.fileName, self.dir)\n self._locked = True\n # log('acquired read lock: %s\\n' % self.lockDir)\n return True\n finally:\n self.writeLock.release() \n return False",
"def testUnlockWait(t, env):\n c = env.c1\n c.init_connection()\n fh, stateid = c.create_confirm(t.code)\n res = c.lock_file(t.code, fh, stateid, 20, 100)\n check(res, msg=\"Locking file %s\" % t.code)\n sleeptime = c.getLeaseTime() * 2\n env.sleep(sleeptime)\n ops = c.use_obj(fh)\n ops += [c.locku_op(READ_LT, 1, res.lockid, 0, 0xffffffffffffffff)]\n _replay(c, ops, [NFS4_OK, NFS4ERR_EXPIRED])",
"def __init__(self, lockfile, lifetime=DEFAULT_LOCK_LIFETIME):\n self.__lockfile = lockfile\n self.__lifetime = lifetime\n # This works because we know we're single threaded\n self.__counter = LockFile.COUNTER\n LockFile.COUNTER += 1\n self.__tmpfname = '%s.%s.%d.%d' % (lockfile, \n socket.gethostname(),\n os.getpid(),\n self.__counter)",
"def _file_open_rlock(self, preset_type, timeout=1.0):\n\n if self._fd is None:\n path = self._path(preset_type)\n with open(path, 'r+') as fd:\n # Set up file lock timeout with a raising handler\n # We will need this handler due to PEP 475\n def interrupt(signum, frame):\n raise InterruptedError()\n\n old_handler = signal.signal(signal.SIGALRM, interrupt)\n try:\n signal.setitimer(signal.ITIMER_REAL, timeout)\n fcntl.flock(fd, fcntl.LOCK_EX)\n except InterruptedError:\n # Ignore interrupted and proceed to cleanup\n pass\n finally:\n # Clean up file lock timeout\n signal.setitimer(signal.ITIMER_REAL, 0)\n signal.signal(signal.SIGALRM, old_handler)\n # Error now if we still can't get the lock.\n # Getting lock twice is safe.\n fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)\n logger.debug('acquired lock for %s', path)\n self._fd = fd\n yield fd\n fcntl.flock(fd, fcntl.LOCK_UN)\n logger.debug('released lock for %s', path)\n self._fd = None\n else:\n logger.debug('using already open file descriptor')\n yield self._fd",
"def __init__(self, fname, lockduration=10, verbosity=0):\n self._verbosity = verbosity\n self._lockduration = lockduration\n fname = op.normpath(fname)\n self._basedir = op.dirname(fname)\n self._lockfilename = \"%s.lock\" % op.basename(fname)\n self._uniquename = \",\".join((\n self._lockfilename, socket.getfqdn(), str(os.getpid()),\n str(uuid.uuid4())[-12:],\n ))\n self._uniquefile_created = False\n self._p(\" File to lock: %s\" % fname)\n self._p(\"Lockfile name: %s\" % self._lockfilename)\n self._p(\" Unique name: %s\" % self._uniquename)",
"def testLock(t, env):\n c = env.c1\n c.init_connection()\n # Create a file and partially lock it\n fh, stateid = c.create_confirm(t.code)\n res = c.lock_file(t.code, fh, stateid, 20, 100)\n check(res, msg=\"Locking file %s\" % t.code)\n # Create and replay LOCK ops\n ops = c.use_obj(fh)\n lock_owner = exist_lock_owner4(res.lockid, 1)\n locker = locker4(FALSE, lock_owner=lock_owner)\n ops += [c.lock_op(WRITE_LT, FALSE, 0, 10, locker)]\n _replay(c, ops)",
"def FSLockExclusive(filepath, timeout=None):\n return _lock(filepath, True, timeout=timeout)",
"def _wait_for_lockfile(self, lockfile_path: Path) -> None:\n if not lockfile_path.exists():\n return\n\n # The first second is free.\n start = time.time()\n time.sleep(1)\n if not lockfile_path.exists():\n return\n\n # After the first second, we print one message, then we stay silent for 10 minutes, at\n # which time we print a message every minute.\n def time_elapsed() -> float:\n return time.time() - start\n self.logger.info(\"Starting to wait for %s\", lockfile_path)\n next_message_time = time.time() + 16 * 60\n while lockfile_path.exists():\n if next_message_time - time.time() < 0:\n self.logger.warning(\n \"Lockfile %s has been blocked for %.0f seconds\",\n lockfile_path,\n time_elapsed())\n next_message_time = time.time() + 60\n time.sleep(1)",
"def _Flock(self, timeout_secs):\n try:\n if timeout_secs > 0:\n wrapper = utils.TimeoutException(timeout_secs)\n wrapper(fcntl.flock)(self._file_desc, fcntl.LOCK_EX)\n else:\n fcntl.flock(self._file_desc, fcntl.LOCK_EX | fcntl.LOCK_NB)\n except errors.FunctionTimeoutError as e:\n logger.debug(\"Cannot lock %s within %s seconds\",\n self._file_path, timeout_secs)\n return False\n except (OSError, IOError) as e:\n # flock raises IOError in python2; OSError in python3.\n if e.errno in (errno.EACCES, errno.EAGAIN):\n logger.debug(\"Cannot lock %s\", self._file_path)\n return False\n raise\n return True",
"def test_waits_on_existing_lockfile(self):\n self.lock.__enter__()\n self.assertTrue(os.path.exists(self.lock.lockfile_path))\n\n def exit_first_lock():\n time.sleep(0.1)\n self.lock.__exit__(None, None, None)\n thread = threading.Thread(target=exit_first_lock)\n thread.start()\n\n new_lock = disk.DiskDatasetLock(self.dataset, timeout_sec=1)\n new_lock.__enter__()\n\n thread.join()",
"def acquire(path, onwait=None):\r\n\r\n touch(path)\r\n lock_fd = lock_file(path, blocking=False)\r\n if not lock_fd:\r\n blocking = True\r\n with open(path, 'r') as fd:\r\n pid = int(fd.read().strip())\r\n if onwait:\r\n blocking = onwait(pid)\r\n if not blocking:\r\n return None\r\n lock_fd = lock_file(path, blocking=blocking)\r\n\r\n lock_fd.truncate(0)\r\n lock_fd.write('%d\\n' % os.getpid())\r\n lock_fd.flush()\r\n return Lock(lock_fd)",
"def _lock_process(pipe, filepath, exclusive, timeout=None):\n try:\n # Reset signal handlers\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n signal.signal(signal.SIGHUP, signal.SIG_DFL)\n signal.signal(signal.SIGTERM, signal.SIG_DFL)\n\n # Open the file\n mode = os.O_RDONLY | os.O_CREAT if exclusive else os.O_RDONLY\n try:\n fd = os.open(filepath, mode)\n except FileNotFoundError:\n pipe.send('NOTFOUND')\n return\n\n # Lock it\n op = fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH\n if timeout is None:\n fcntl.flock(fd, op)\n elif timeout == 0:\n try:\n fcntl.flock(fd, op | fcntl.LOCK_NB)\n except BlockingIOError:\n pipe.send('TIMEOUT')\n return\n else:\n with timeout_syscall(timeout):\n try:\n fcntl.flock(fd, op)\n except InterruptedError:\n pipe.send('TIMEOUT')\n return\n pipe.send('LOCKED')\n except Exception:\n pipe.send('ERROR')\n raise\n\n # Wait for unlock message then exit\n assert pipe.recv() == 'UNLOCK'\n\n # Exiting releases the lock",
"def acquire(self, timeout=None):\n timer = self.timerClass(timeout)\n timer.start()\n while timer.haveTime():\n try:\n os.mkdir(self.lockDir)\n self._locked = True\n # log('acquired exclusive lock: %s\\n' % (self.lockDir, ))\n return True\n except OSError, err:\n if err.errno != errno.EEXIST:\n raise\n if self.expire():\n continue # Try immediately to acquire\n timer.sleep()\n return False",
"def AcquireFileLock(target_file, flags):\n assert flags in (\n LOCK_EX, LOCK_SH, LOCK_NB, LOCK_EX | LOCK_NB, LOCK_SH | LOCK_NB)\n if os.name == 'nt':\n _LockImplWin(target_file, flags)\n elif os.name == 'posix':\n _LockImplPosix(target_file, flags)\n else:\n raise NotImplementedError('%s is not supported' % os.name)",
"def make_pidlockfile(path, acquire_timeout):\n if not isinstance(path, basestring):\n error = ValueError(\"Not a filesystem path: %(path)r\" % vars())\n raise error\n if not os.path.isabs(path):\n error = ValueError(\"Not an absolute path: %(path)r\" % vars())\n raise error\n lockfile = pidlockfile.TimeoutPIDLockFile(path, acquire_timeout)\n\n return lockfile",
"def test_locked_file_03(self):\n \n f = open(\"tests/locked.db3\", \"a+\")\n fcntl.lockf(f.fileno(), fcntl.LOCK_EX) \n \n x = subprocess.Popen([\"sqlbak\", \"tests\", \"--ms-towait=4000\"],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n\n time.sleep(3)\n fcntl.lockf(f.fileno(), fcntl.LOCK_UN)\n f.close()\n\n result = x.communicate()[0]\n\n self.assertTrue(\"cannot obtain lock\" not in result)"
] | [
"0.7269624",
"0.6982146",
"0.69293517",
"0.67427164",
"0.6594269",
"0.6411413",
"0.6381823",
"0.63453645",
"0.63121194",
"0.63120097",
"0.62144",
"0.6043685",
"0.59835607",
"0.5917388",
"0.5861188",
"0.577998",
"0.5779113",
"0.57543266",
"0.5674176",
"0.56467295",
"0.5624611",
"0.5607124",
"0.55963784",
"0.55711716",
"0.5542878",
"0.5485642",
"0.54830825",
"0.54784495",
"0.5458545",
"0.5419394"
] | 0.72758126 | 0 |
Gets the view model for the cards in the deck | def get_cards(self):
return [card.view_model() for card in self._deck.loc] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_card_model(self, model: str) -> Any:\n return self.collection.models.byName(model)",
"def cards(self):\r\n return Cards(self)",
"def get_card_list(self):\n return self.cards",
"def cards(self):\n return self._cards",
"def GetView(self):\r\n return self.model.GetView()",
"def get_card(self):\n return self.card",
"def get(self, request ):\n return render(request, \"main_display_cards.html\")",
"def get(self):\n user = get_authenticated_user()\n return get_card(user)",
"def get_customer_viewmodel(self, customer_id):\n credit_card_list = []\n customers = self._customer_repo.get_customer_list()\n credit_cards = self._customer_repo.get_credit_card_list()\n for customer in customers:\n if customer.get_customer_id() == customer_id:\n customer_first_name = customer.get_first_name()\n customer_last_name = customer.get_last_name()\n country = customer.get_country()\n for credit_card in credit_cards:\n if credit_card.get_customer_id() == customer_id:\n credit_card_list.append(credit_card.get_card_number())\n customer_to_view = CustomerViewModel(customer_id, customer_first_name,\n customer_last_name, country, credit_card_list)\n return customer_to_view",
"def getAllCards(self):\n return self._cards",
"def __repr__(self):\n return f\"Deck({self.cards})\"",
"def get_deck(self):\n deck = Deck(self.get_cards())\n return deck.as_string()",
"def get_cards(self):\n card = self._starting_card\n return card",
"def card(self):\r\n return Card(self)",
"def card(self):\r\n return Card(self)",
"def get_card (self, card):\n\t\treturn self._card",
"def get(self, request):\n cards = self.get_queryset().all()\n user = None\n auth = request.user.is_authenticated\n if auth:\n user = request.user\n return render(request, 'index/index.html', {\n 'cards': cards,\n 'user': user\n })",
"def get_deck():\n deck = []\n for suit in Suit:\n for rank in Rank:\n deck.append(Card(suit, rank))\n return deck",
"def get_cards(self):\n return deepcopy(self._cards)",
"def _cards_getter(self):\n pass",
"def deck(self) -> Iterable[CardIdentifier]:\n # for some reason cards are treated quite different by NS api currently\n # so we cant simply make a shards call. for now we make a direct call\n # to the requester shards_xml method, since it does not insert the\n # `nation=name` parameter\n # this request returns a <CARDS><DECK><CARD/>...</DECK><CARDS> structure,\n # so we immedietly retrieve the DECK node (which contains multiple CARD nodes)\n # with [0]\n deck = as_xml(\n self.requester.shard_request(\n shards=[\"cards\", \"deck\"], nationname=self.nationname\n ).text\n )[0]\n return [CardIdentifier.from_xml(node) for node in deck]",
"def card(self):\n return self.cdb.name_to_card[self.card_name]",
"def selectDeck():\n\n\t\tfrom common.main.browsedecks import BrowseDecks\n\t\tbrowseDeck = BrowseDecks(False)\n\t\tbrowseDeck.browseDecks()\n\t\treturn browseDeck.mDeck",
"def get_decks(self, include_cards=True):\n deck_previews = self.data_source.get_decks(self.user_id,\n not include_cards)\n\n return deck_previews",
"def get_card(self, suit, face):\n for card in self.deck:\n if card.suit == suit and card.value == face:\n return card",
"def __str__(self):\n return f\"This deck contains the following cards: {self.cards}\"",
"def show(self):\r\n for card in self.cards_list:\r\n print(card)",
"def print_deck(self):\n\n ls = []\n for card in self.deck:\n ls.append(card.get_card())\n print(ls)",
"def deck_info(self) -> DeckInfo:\n return DeckInfo.from_xml(self.cards_xml(\"info\")[\"info\"])",
"def get_model_and_view(self):\n uri = self.request.path\n\n #handle datastore page\n page = Page.gql(\"where uri=:1\", uri).get()\n if page is not None and (page.is_public or users.is_current_user_admin()):\n hydrate(page)\n return ModelAndView(view='standard.html',\n model={\n 'page': page,\n 'syntax_list': get_syntax_list([page])\n })\n else:\n #handle static page\n filename = uri[1:] + '.html' if len(uri) > 1 else 'index.html'\n static_page_path = os.path.join(os.path.dirname(__file__), '..', 'content', 'pages', filename)\n if os.path.isfile(static_page_path):\n return ModelAndView(view = static_page_path, model = {})\n\n return self.get_list()"
] | [
"0.62337184",
"0.6114645",
"0.5980971",
"0.5815437",
"0.5713382",
"0.57027954",
"0.5669594",
"0.5646416",
"0.56217915",
"0.5597641",
"0.55945307",
"0.5587441",
"0.5510169",
"0.5467855",
"0.5467855",
"0.54545987",
"0.542285",
"0.540551",
"0.5393853",
"0.5392188",
"0.52771145",
"0.52626395",
"0.5227895",
"0.52221215",
"0.5216388",
"0.5209738",
"0.5179758",
"0.5139741",
"0.5135426",
"0.51279193"
] | 0.7292069 | 0 |
validate rpy2 can load correctly | def test_rpy2_integration():
## Try to import rpy (test R_HOME path) ##
import rpy2.robjects as robjects
import rpy2
from rpy2.robjects.packages import importr
req_filepath = path.join(ROOT, R_REQUIREMENTS_FILE)
with open(req_filepath, 'r') as req_fh:
raw_req = req_fh.read().splitlines()
## Test that requirements are in .libPaths() ##
for requirement in raw_req:
package, version = requirement.split(R_DELIMITER)
package = package.replace(' ', '')
if package in ROBJECT_OVERRIDES:
importr(
package,
robject_translations=ROBJECT_OVERRIDES[package]
)
else:
importr(package) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_valid_python():\n from decisionengine.framework.util import reaper # noqa: F401\n\n pass",
"def test_rlmm_imported():\n assert \"rlmm\" in sys.modules",
"def rpy2_import_test(self):\n try:\n import rpy2\n\n rpy2_present = True\n except:\n rpy2_present = False\n\n assert self.results._import_rpy2() is rpy2_present\n\n if rpy2_present:\n # R color part\n assert len(self.results._generate_r_colors(\"None\", 10)) == 10\n\n rainbow = self.results._generate_r_colors(\"rainbow\", 10)\n rainbow_r = self.results._generate_r_colors(\"rainbow_r\", 10)\n rainbow_r.reverse()\n assert rainbow == rainbow_r\n\n plot_result_class = pickle.load(\n open(os.path.join(\"tests\", \"data\", \"test_BSA_pyqms_results.pkl\"), \"rb\")\n )\n plot_name = os.path.join(\"tests\", \"data\", \"BSA_DDSPDLPK\")\n for key in plot_result_class._parse_and_filter(molecules=[\"DDSPDLPK\"]):\n # plot 3D\n plot_result_class.plot_MIC_3D(\n key, file_name=plot_name, rt_window=None, i_transform=None\n )\n assert os.path.exists(plot_name + \"_MIC_3D.pdf\") is True\n\n # test fail\n plot_result_class.plot_MIC_3D(\n key, file_name=plot_name, rt_window=[-2, -1], i_transform=None\n )\n # plot 2D\n graphics, grdevices = plot_result_class.init_r_plot(\n plot_name + \"_MIC_2D.pdf\"\n )\n max_score_tuple = plot_result_class.max_score(molecules=[\"DDSPDLPK\"])\n\n plot_result_class.plot_MICs_2D(\n [key],\n graphics=graphics,\n rt_window=[28, 31],\n ablines={\n key: [\n {\n \"v\": max_score_tuple[3].rt,\n \"col\": \"gray\",\n \"lty\": \"dashed\",\n \"lwd\": 0.4,\n }\n ]\n },\n additional_legends={\n key: [{\"text\": \"maxI RT\", \"x\": max_score_tuple[3].rt, \"y\": 47}]\n },\n )\n assert os.path.exists(plot_name + \"_MIC_2D.pdf\") is True\n\n # plot mz and i error function\n plot_result_class._determine_measured_error(\n score_threshold=0.5,\n filename=os.path.join(\n \"tests\", \"data\", \"mz_and_intensity_error_density_plot.pdf\"\n ),\n plot=True,\n )",
"def require():",
"def test_LPyModelDriver_nolpy(): # pragma: no lpy\n assert_raises(RuntimeError, LPyModelDriver.LPyModelDriver,\n 'test', scripts['lpy'])",
"def load(self):\n return True",
"def test_model_can_import():\n assert hasattr(model, \"SEIR_model_publish_w_risk\")\n assert hasattr(model, \"compute_R0\")",
"def test_override_builtin_extension_without_explicit_flag(self):\n with self.assertRaises(ValueError):\n PyLoader.register()",
"def LoadHint(self) -> LoadHint:",
"def LoadHint(self) -> LoadHint:",
"def load_module(cls, *args, **kwargs): # real signature unknown\n pass",
"def load_module(cls, *args, **kwargs): # real signature unknown\n pass",
"def load_module(cls, *args, **kwargs): # real signature unknown\n pass",
"def load(self):\n\n raise NotImplementedError",
"def test_py2(self):\n if sys.version_info >= self.MIN_SUPPORTED_VERSION:\n return\n try:\n import miflora # noqa: F401 # pylint: disable=unused-import,import-outside-toplevel\n\n self.fail(\"Should have thrown an exception\")\n except ValueError as val_err:\n self.assertIn(\"version\", str(val_err))",
"def load_xdr(self, name):\n modulename = os.path.splitext(name)[0] + '_xdr'\n if modulename in sys.modules:\n return sys.modules[modulename]\n bindir = os.path.join(root, \"bin\")\n filename = os.path.join(root, \"tests\", \"xdr\", name)\n tmpdir = tempfile.mkdtemp(prefix=\"xdr-test-python.\")\n outdir = os.path.join(tmpdir, modulename)\n self.tmpdirs.append(tmpdir)\n subprocess.check_call([bindir+\"/xdr\", \"-t\", \"python\", \"-o\", outdir, filename])\n return imp.load_source(modulename, outdir + '/__init__.py')",
"def testPynocleImportsPynocle(self):\r\n self.buildTempDirs()\r\n expected = os.path.join(THISDIR, '__init__')\r\n self.assertEqual(expected, modulefinder.get_module_filename('pynocle', __file__))",
"def __init__(self):\n ScriptedLoadableModuleLogic.__init__(self)",
"def have_pyrex():\n pyrex_impls = 'Cython.Distutils.build_ext', 'Pyrex.Distutils.build_ext'\n for pyrex_impl in pyrex_impls:\n try:\n # from (pyrex_impl) import build_ext\n __import__(pyrex_impl, fromlist=['build_ext']).build_ext\n return True\n except Exception:\n pass\n return False",
"def test_load_non_existant_protocol():\n Protocol.load(path(__file__).parent /\n path('protocols') /\n path('no protocol'))",
"def initialize():\n _check_python_version()",
"def testFindsBuiltins(self):\r\n self.assertEqual('sys', modulefinder.get_module_filename('sys'))\r\n self.assertEqual('time', modulefinder.get_module_filename('time'))",
"def consider_env(self): \n for spec in self._envlist(\"PYLIB\"):\n self.import_module(spec)",
"def check(self, runtime):",
"def test_config_have_biopython():\n assert core.HAVE_BIOPYTHON\n args = Namespace(extended_validation='all')\n config = core.Config.from_args(args)\n assert config.extended_validation == 'all'",
"def test_find_module_py33():\n assert find_module_py33('_io') == (None, '_io', False)",
"def _load(self):\n raise NotImplementedError()",
"def test_LC18_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import LC18 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.LC18\", test]",
"def __init__(self):\n try:\n from reasonable import PyReasoner\n except ImportError:\n raise ImportError(\n \"'reasonable' package not found. Install\\\nsupport for the reasonable Reasoner with 'pip install brickschema[reasonable].\\\nCurrently only works on Linux and MacOS\"\n )\n self.r = PyReasoner()",
"def test_init_success(self):\n found = False\n try:\n pyint = Interpreter()\n except InitializationException: \n found = True\n self.assertFalse(found)"
] | [
"0.6022934",
"0.59124196",
"0.5730419",
"0.5573902",
"0.551886",
"0.5433446",
"0.5393205",
"0.53599614",
"0.53094214",
"0.53094214",
"0.5308447",
"0.5308447",
"0.5308447",
"0.5288998",
"0.52846456",
"0.52457887",
"0.52206194",
"0.52099794",
"0.5209703",
"0.51939344",
"0.51855856",
"0.51819634",
"0.51716286",
"0.5171305",
"0.51691914",
"0.5161172",
"0.51566076",
"0.51500237",
"0.5119811",
"0.50922817"
] | 0.7157122 | 0 |
Returns a generator of packets. This is the sync version of packets_from_tshark. It wait for the completion of each coroutine and reimplements reading packets in a sync way, yielding each packet as it arrives. | def _packets_from_tshark_sync(self, tshark_process, packet_count=None, timeout:float=3.0,
max_data_length:int=10000):
# NOTE: This has code duplication with the async version, think about how to solve this
psml_structure, data = self.eventloop.run_until_complete(self._get_psml_struct(tshark_process.stdout))
packets_captured = 0
data = b""
try:
while self.is_open.value:
try:
packet, data = self.eventloop.run_until_complete(
self._get_packet_from_stream(tshark_process.stdout,
data,
psml_structure=psml_structure,
got_first_packet=packets_captured > 0,
timeout=timeout))
except EOFError:
echo("Caught EOF", file=Interceptor.stdout)
self._log.debug("EOF reached (sync)")
break
if(packet is False): continue
if packet:
packets_captured += 1
yield packet
if packet_count and packets_captured >= packet_count:
break
if len(data) > max_data_length:
data = b''
finally:
if tshark_process in self._running_processes:
self.eventloop.run_until_complete(self._cleanup_subprocess(tshark_process)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_pkt_seq(self):\n pkt = self.read_pkt_line()\n while pkt:\n yield pkt\n pkt = self.read_pkt_line()",
"def pkt_gen(self):\n for i in range(self.num_pkts):\n # create the test packets\n pkt = Ether()/IP()/TCP()/'hello there pretty world!!!'\n rank = random.sample(range(0, 100), 1)[0]\n pkt_id = i\n tuser = Tuser(len(pkt), 0b00000001, 0b00000100, rank, pkt_id)\n print ('@ {:.2f} - Send: {} || {}'.format(self.env.now, pkt.summary(), tuser))\n # write the pkt and metadata into storage\n self.pkt_in_pipe.put((pkt, tuser))\n\n # wait for 10 cycles\n #for j in range(PREAMBLE + len(pkt) + IFG):\n yield self.wait_line_clks(self.PREAMBLE + len(pkt) + self.IFG)",
"def __iter__(self) -> Iterator[packets.Packet]:\n for packet in self._packets:\n yield packet\n for pointer in self._packet_pointers:\n yield pointer.get()",
"def read_packets(serial_input):\n while 1:\n header = scan_to_headerword(serial_input)\n yield header.read_packet(serial_input)",
"async def packets(self):\n\n async def registrator_task(sock):\n while True:\n try:\n await self._send(sock, \"reglistener\")\n _LOGGER.info(\n \"Registered self as listener for device at %s\",\n self._address,\n )\n except OSError: # e.g. Network is unreachable\n # just retry\n _LOGGER.warning(\"Could not send registration packet\")\n pass\n await asyncio.sleep(REGISTRATION_INTERVAL.seconds)\n\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind((\"\", COMMAND_PORT))\n sock.setblocking(0)\n loop = asyncio.get_event_loop()\n loop.create_task(registrator_task(sock))\n while True:\n try:\n response, address = await sock_recvfrom(sock, 1024)\n _LOGGER.debug(\"Got packet from %s\", address)\n if address == self._address:\n yield response.decode(\"ascii\")\n else:\n _LOGGER.warning(\n \"Got unknown response from %s: %s\",\n address,\n response,\n )\n except OSError as e:\n _LOGGER.warning(\"Could not receive from socket: %s\", e)",
"def packets_for_stream(fobj, offset):\n pcap = dpkt.pcap.Reader(fobj)\n pcapiter = iter(pcap)\n ts, raw = pcapiter.next()\n\n fobj.seek(offset)\n for p in next_connection_packets(pcapiter, linktype=pcap.datalink()):\n yield p",
"def remove_buffered_packets(self):\n seq = self.next_seq\n while True:\n p = self.buffer.pop(seq, None)\n if p is None:\n break\n else:\n seq += len(p.data)\n yield p",
"def next_batch(self):\n\n while self.cap.isOpened():\n flag, frame = self.cap.read()\n yield frame",
"async def get_iter(self) -> AsyncIterator[Data]:\n async with self.read_mutex:\n if self.get_in_progress:\n # This should be guarded against with the read_mutex,\n # exception is only here as a failsafe\n raise ServerError(\n \"Called get_iter on Websocket frame assembler \"\n \"while asynchronous get is already in progress.\"\n )\n self.get_in_progress = True\n\n chunks = self.chunks\n self.chunks = []\n self.chunks_queue = asyncio.Queue()\n\n # Sending None in chunk_queue supersedes setting message_complete\n # when switching to \"streaming\". If message is already complete\n # when the switch happens, put() didn't send None, so we have to.\n if self.message_complete.is_set():\n await self.chunks_queue.put(None)\n\n # Locking with get_in_progress ensures only one task can get here\n for c in chunks:\n yield c\n while True:\n chunk = await self.chunks_queue.get()\n if chunk is None:\n break\n yield chunk\n\n # Unpause the transport, if its paused\n if self.paused:\n self.protocol.resume_frames()\n self.paused = False\n if not self.get_in_progress: # no cov\n # This should be guarded against with the read_mutex,\n # exception is here as a failsafe\n raise ServerError(\n \"State of Websocket frame assembler was modified while an \"\n \"asynchronous get was in progress.\"\n )\n self.get_in_progress = False\n if not self.message_complete.is_set(): # no cov\n # This should be guarded against with the read_mutex,\n # exception is here as a failsafe\n raise ServerError(\n \"Websocket frame assembler chunks queue ended before \"\n \"message was complete.\"\n )\n self.message_complete.clear()\n if self.message_fetched.is_set(): # no cov\n # This should be guarded against with the read_mutex,\n # and get_in_progress check, this exception is\n # here as a failsafe\n raise ServerError(\n \"Websocket get_iter() found a message when state was \"\n \"already fetched.\"\n )\n\n self.message_fetched.set()\n # this should already be empty, but set it here for safety\n self.chunks = []\n self.chunks_queue = None",
"def run(self):\r\n waiting_packet = None\r\n while True:\r\n if waiting_packet is not None:\r\n packet = waiting_packet\r\n waiting_packet = None\r\n else:\r\n packet = yield self.buffer.get()\r\n self.channel.add_sender(self)\r\n yield self.env.timeout(packet.size/self.service_rate)\r\n self.channel.remove_sender(self)\r\n packet.output_timestamp= env.now\r\n if self.destination is None:\r\n self.packet_list.append(packet)\r\n if (not self.collision):\r\n if self.destination is not None:\r\n self.destination.put(packet)\r\n self.channel.packet_list.append(packet)\r\n else:\r\n if self.debug:\r\n print(\"Packet %d is discarded. Reason: Collision\" \r\n % (packet.id))\r\n self.packets_drop += 1\r\n waiting_packet = packet\r\n self.collision = False\r\n yield self.env.timeout(self.random_delay())",
"def sender_iter(self):\n while 1:\n yield self.send_next()",
"def async_fetch(self):\n args = (async_get_pipe, self.zargs, self.connections)\n mapped = yield ait.async_map(*args)\n return_value(multiplex(mapped))",
"def process_pkts(self):\n while not self.sim_done:\n # wait for metadata and pkt to arrive\n (meta, pkt) = yield self.pkt_in_pipe.get()\n\n # This is where the scheduling algorithm goes\n if self.sched_alg == \"Invert_pkts\":\n yield self.env.process(self.invert_pkts(meta, pkt))\n elif self.sched_alg == \"STFQ\":\n yield self.env.process(self.STFQ(meta, pkt))\n elif self.sched_alg == \"HSTFQ\":\n yield self.env.process(self.HSTFQ(meta, pkt))\n elif self.sched_alg == \"MinRate\":\n yield self.env.process(self.MinRate(meta, pkt))\n elif self.sched_alg == \"RR\":\n yield self.env.process(self.RR(meta, pkt))\n elif self.sched_alg == \"WRR\":\n yield self.env.process(self.WRR(meta, pkt))\n elif self.sched_alg == \"Strict\":\n yield self.env.process(self.Strict(meta, pkt))\n\n # record pkts and ranks\n self.pkts.append(pkt)\n self.ranks.append(meta.ranks[0])\n\n # wait until the scheduling_tree is ready to receive\n yield self.ready_out_pipe.get()\n # write metadata and pkt out\n self.pkt_out_pipe.put((meta, pkt))\n\n wrpcap(PCAP_FILE, self.pkts)\n with open(RANK_FILE, 'w') as f:\n json.dump(self.ranks, f)",
"def run(self):\n yield self.env.timeout(self.initial_delay)\n while self.env.now < self.finish:\n # wait for next transmission\n yield self.env.timeout(self.burst_dist())\n self.bursts += 1\n for i in range(self.burst_size):\n self.packets_sent += 1\n p = Packet(self.env.now, self.sdist(), self.packets_sent, src=self.id, flow_id=self.flow_id, priority=self.priority)\n self.out.put(p)",
"def iterate(cls, disc, track_number):\n\n assert track_number >= 0 and track_number < len(disc.tracks)\n\n track = disc.tracks[track_number]\n\n packet_frame_size = (\n disc.audio_format.rate / cls.PACKETS_PER_SECOND)\n\n # Mock up a packet that ends at the start of index 1, so the\n # first packet generated starts at that position\n p = cls(disc, track, track_number, track.pregap_offset, 0)\n\n while True:\n # Calculate offsets of next packet\n abs_pos = p.abs_pos + p.length\n\n if abs_pos < track.pregap_offset:\n length = min(track.pregap_offset - abs_pos, packet_frame_size)\n else:\n length = min(track.length - abs_pos, packet_frame_size)\n\n assert length >= 0\n\n if length == 0:\n # Reached end of track, switch to next. Simplify this\n # code by generating a dummy packet for the next\n # iteration to work on (but don't yield it!)\n\n track_number += 1\n\n try:\n track = disc.tracks[track_number]\n except IndexError:\n # That was the last track, no more packets\n return\n\n p = cls(disc, track, track_number, 0, 0)\n\n else:\n # Generate next packet\n flags = 0\n if (track.pause_after\n and abs_pos + length == track.length\n and track_number + 1 < len(disc.tracks)):\n flags |= p.PAUSE_AFTER\n\n p = cls(disc, track, track_number, abs_pos, length, flags)\n yield p",
"def audio_stream() -> typing.Iterable[bytes]:\n frames = frame_queue.get()\n while frames:\n yield frames\n frames = frame_queue.get()",
"def run(self):\n yield self.env.timeout(self.initial_delay)\n while self.env.now < self.finish:\n # wait for next transmission\n yield self.env.timeout(self.adist)\n self.packets_sent += 1\n p = Packet(self.env.now, self.sdist, self.packets_sent, src=self.id, flow_id=self.flow_id)\n self.out.put(p)",
"def v6_gen(self):\n sbuff = sb.ShuffleBuffer(self.v6_struct.size, self.shuffle_size)\n while len(self.readers):\n for r in self.readers:\n try:\n s = r.recv_bytes()\n s = sbuff.insert_or_replace(s)\n if s is None:\n continue # shuffle buffer not yet full\n yield s\n except EOFError:\n print(\"Reader EOF\")\n self.readers.remove(r)\n # drain the shuffle buffer.\n while True:\n s = sbuff.extract()\n if s is None:\n return\n yield s",
"def genLoopPackets(self):\n\n for p in self.get_observations():\n ts = int(time.time() + 0.5)\n packet = pywws2weewx(p, ts,\n self._last_rain_loop, self._last_rain_ts_loop,\n self.max_rain_rate)\n self._last_rain_loop = packet['rainTotal']\n self._last_rain_ts_loop = ts\n if packet['status'] != self._last_status:\n log.info('station status %s (%s)' % \n (decode_status(packet['status']), packet['status']))\n self._last_status = packet['status']\n yield packet",
"def pipes(self):\n for name in self._pipes:\n yield name, self._data[name]",
"def get_generator(self):\n while self._is_running():\n yield self._queue.get()",
"def get_generator(self):\n while self._is_running():\n yield self._queue.get()",
"def generate():\n with open(remote_path, \"rb\") as f:\n for chunk in iter(lambda: f.read(buffer_size), b''):\n yield chunk",
"def atari_frames_generator(env_name, ip):\n\n print(\"> Waiting for a stream of frames from:\", ip)\n\n # Set up a connection\n receiver = AtariFramesReceiver(env_name, ip)\n\n # Collect\n try:\n while True:\n yield receiver.receive(wait=True)\n\n except ConnectionAbortedError:\n raise StopIteration",
"def receive(self):\n while True:\n if self.pending_request:\n request = self.unpack(self.pending_request)\n self.pending_request = None\n else: \n request = self.unpack(self.mh.receive_message())\n if request:\n yield request\n else: break",
"async def async_comprehension() -> List[float]:\n return [i async for i in async_generator()]",
"def next_connection_packets(piter, linktype=1):\n first_ft = None\n\n for ts, raw in piter:\n ft = flowtuple_from_raw(raw, linktype)\n if not first_ft: first_ft = ft\n\n sip, dip, sport, dport, proto = ft\n if not (first_ft == ft or first_ft == (dip, sip, dport, sport, proto)):\n break\n\n yield {\n \"src\": sip, \"dst\": dip, \"sport\": sport, \"dport\": dport, \"proto\": proto,\n \"raw\": payload_from_raw(raw, linktype).encode(\"base64\"), \"direction\": first_ft == ft,\n }",
"def collect(self):\n while self.proc is not None:\n self.read()\n if not len(self.datalines):\n return\n while len(self.datalines):\n # pop the first node of list\n yield self.datalines.pop(0)",
"def __iter__(self):\n for b in self.dl: \n yield to_device(b, self.device) # yield pauses the execution, not store values in memory, forgets about them once iterated\n # no need to remove batch of data from device, done automatically",
"def iter_unpack(raw):\n return chunks(raw)"
] | [
"0.6388171",
"0.59669185",
"0.58295953",
"0.58010983",
"0.57933766",
"0.579229",
"0.57740164",
"0.56658477",
"0.5569597",
"0.55228883",
"0.5494399",
"0.5487101",
"0.5454645",
"0.5428898",
"0.54204917",
"0.53887296",
"0.5376197",
"0.53683877",
"0.52956384",
"0.52746814",
"0.5250131",
"0.5250131",
"0.52418303",
"0.5241606",
"0.5216681",
"0.5160942",
"0.51513577",
"0.5122623",
"0.5116472",
"0.5101056"
] | 0.6835916 | 0 |
Tests the matrix_vector_product code. | def test_matrix_product(self, use_cache):
key = jrandom.PRNGKey(0)
dim = 50
max_power = 25
matrix = jrandom.normal(key, (dim, dim)) / 10
vector = jnp.ones((dim,), dtype=jnp.float32)
if use_cache:
mpstate = model_utils.CachedMatrixPowerState.precompute(matrix, max_power)
else:
mpstate = model_utils.LazyMatrixPowerState(matrix)
for t in range(max_power):
result = mpstate.matrix_power_multiply(vector, t)
expected = np.linalg.matrix_power(matrix, t) @ vector
np.testing.assert_array_almost_equal(result, expected, decimal=1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_mul():\n assert_equal(Vector(3, 1) * 2, Vector(6, 2))\n assert_equal(2 * Vector(3, 1), Vector(6, 2))",
"def matrix_vector_prod(m,u):\n each_product = []\n for v in m:\n each_product.append(dot_prod(v, u))\n return each_product",
"def test_suite():\r\n test(add_vectors([1, 1], [1, 1]) == [2, 2])\r\n test(add_vectors([1, 2], [1, 4]) == [2, 6])\r\n test(add_vectors([1, 2, 1], [1, 4, 3]) == [2, 6, 4])\r\n test(scalar_mult(5, [1, 2]) == [5, 10])\r\n test(scalar_mult(3, [1, 0, -1]) == [3, 0, -3])\r\n test(scalar_mult(7, [3, 0, 5, 11, 2]) == [21, 0, 35, 77, 14])\r\n test(dot_product([1, 1], [1, 1]) == 2)\r\n test(dot_product([1, 2], [1, 4]) == 9)\r\n test(dot_product([1, 2, 1], [1, 4, 3]) == 12)\r\n test(cross_product([2,3,4], [5,6,7]) == [-3, 6, -3])",
"def test_mueller_product(self, ):\n mdims = ('mueller_v', 'mueller_h')\n mm_1 = xr.DataArray(np.random.rand(4, 4, ), dims=mdims, )\n mm_2 = xr.DataArray(np.identity(4, ), dims=mdims, )\n sv_1 = xr.DataArray(np.random.rand(4, ), dims=('stokes', ), )\n\n assert_almost_equal(mm_1.values, mueller_product(mm_1, mm_2).values, )\n assert_almost_equal(mm_1.values, mueller_product(mm_2, mm_1).values, )\n assert_almost_equal(sv_1.values, mueller_product(mm_2, sv_1).data, )",
"def test_matmul_vv(self):\n self.check_dot_vv(matmul_usecase, \"'@'\")",
"def test_vector_dot_product(self):\n\n # Example 1.2\n vector_p = np.array([0.5, 0.0, 0.5])\n vector_q = np.array([0.5, 0.5, 0.0])\n crystal = crystal_system.Tetragonal(0.5, 1.0)\n magnitude_ref_nm = 5.0/16.0\n\n vector_d = vector_p - vector_q\n magnitude_nm = vector.dot_product(crystal, vector_d, vector_d)\n\n self.assertAlmostEqual(magnitude_ref_nm, magnitude_nm, 5)\n\n # Example 1.3\n vector_p = np.array([1.0, 2.0, 0.0])\n vector_q = np.array([3.0, 1.0, 1.0])\n crystal = crystal_system.Tetragonal(0.5, 1.0)\n magnitude_ref_nm = 5.0/4.0\n\n magnitude_nm = vector.dot_product(crystal, vector_p, vector_q)\n self.assertAlmostEqual(magnitude_ref_nm, magnitude_nm, 5)\n\n magnitude_nm = vector.dot_product(crystal, vector_q, vector_p)\n self.assertAlmostEqual(magnitude_ref_nm, magnitude_nm, 5)\n\n #self.fail(\"Test if the testcase is working.\")",
"def test_multiply_vec(self):\n a = Vector(1, 2)\n b = Vector(3, 4)\n c = a * b\n assert c.x == 3\n assert c.y == 8",
"def test_product(self):\n self.assertEqual(functions.product(2, 2), 4)\n self.assertEqual(functions.product(2, -2), -4)",
"def matrix_mult(M, vector1, vector2):\n out = None\n ### YOUR CODE HERE\n pass\n ### END YOUR CODE\n\n return out",
"def matrix_mult(M, vector1, vector2):\n out = None\n ### YOUR CODE HERE\n pass\n ### END YOUR CODE\n\n return out",
"def test_is_product_entangled_state_3_sys():\n ent_vec = max_entangled(4)\n np.testing.assert_equal(is_product_vector(ent_vec, dim=[2, 2, 2, 2]), False)",
"def test_is_product_entangled_state_2_sys():\n ent_vec = max_entangled(4)\n np.testing.assert_equal(is_product_vector(ent_vec, dim=[4, 4]), False)",
"def test_matmul_vm(self):\n self.check_dot_vm(matmul_usecase, None, \"'@'\")",
"def test_is_product_entangled_state():\n ent_vec = max_entangled(3)\n np.testing.assert_equal(is_product_vector(ent_vec), False)",
"def multiply(matrix, vector):\n result = []\n for row in matrix:\n assert len(row) == len(vector)\n result.append(sum([a*b for (a, b) in zip(row, vector)]))\n return Vector3D.from_list(result)",
"def matrix_mult(m1, m2):\n pass",
"def test_inner_product(self):\n circuit = InnerProduct(n_qubits=3)\n expected = QuantumCircuit(*circuit.qregs)\n expected.cz(0, 3)\n expected.cz(1, 4)\n expected.cz(2, 5)\n self.assertEqual(circuit, expected)",
"def test_multiply_scalar(self):\n a = Vector(1, 2)\n c = a * 3\n assert c.x == 3\n assert c.y == 6",
"def python_nonsquare_matrix_mult(matrix):\n\n transposed_matrix = np.zeros([matrix.shape[1],matrix.shape[0]])\n start = time.time()\n # for i in range(matrix.shape[0]):\n # for j in range(matrix.shape[1]):\n # transposed_matrix[j,i] = matrix[i,j]\n\n transposed_matrix = np.transpose(matrix)\n product = matrix.dot(transposed_matrix)\n\n # transposed_matrix = np.transpose(matrix)\n end = time.time()-start\n\n # print(\"Python Golden Transpose: %s\" % product)\n # print('python transpose time: %.2E' % end)\n return [product, end]",
"def matr_prod(_A, _B):\r\n # Matrix multiplication\r\n B0 = _B[0]\r\n lenB = len(_B)\r\n lenA = len(_A)\r\n if(len(_A[0]) != lenB): # Check matrix dimensions \r\n Exception('Matrices have wrong dimensions')\r\n if(isinstance(B0, list) or isinstance(B0, array) or isinstance(B0, tuple)): #_B is matrix\r\n lenB0 = len(B0)\r\n C = [[0 for row in range(lenB0)] for col in range(lenA)]\r\n for i in range(lenA):\r\n for j in range(lenB0):\r\n for k in range(lenB):\r\n C[i][j] += _A[i][k]*_B[k][j]\r\n else: #_B is vector\r\n C = [0 for row in range(lenB)]\r\n for i in range(lenA):\r\n for k in range(lenB):\r\n C[i] += _A[i][k]*_B[k]\r\n return C",
"def test_matmul_mm(self):\n self.check_dot_mm(matmul_usecase, None, \"'@'\")",
"def mat_mul(mat1, mat2):\n\n if len(mat1[0]) == len(mat2):\n\n mat2 = matrix_transpose(mat2)\n response = []\n\n for row in range(len(mat1)):\n response.append(\n [\n sum(dot_product(mat1[row], mat2[column]))\n for column in range(len(mat2))\n ]\n )\n\n return response\n\n else:\n return None",
"def multiplyByVector(matrix:[[int]], vector: [int]):\n # assuming vector and result are transposed\n _validate(matrix, vector)\n if len(matrix[0]) != len(vector):\n raise InvalidArgumentError(f\"cannot multiply vector which length is {len(vector)} by matrix that has a {len(matrix[0])} columns\")\n result = [0 for _ in range(len(matrix))] # initialize empty array\n for matrix_row_idx, _ in enumerate(matrix):\n for matrix_column_idx, v_value in enumerate(vector):\n result[matrix_row_idx] ^= (v_value * matrix[matrix_row_idx][matrix_column_idx])\n return result",
"def test_largest_product_2_arrays():\n assert largest_product([[1, 2], [3, 4]]) == 12",
"def multiplyByMatrix(vector: [int], matrix:[[int]]):\n _validate(matrix, vector)\n if len(vector) != len (matrix):\n raise InvalidArgumentError(f\"cannot multiply vector which length is {len(vector)} by matrix that has a {len(matrix)} rows\")\n result = [0 for _ in range(len(matrix[0]))] # initialize empty array\n for matrix_column_idx, _ in enumerate(matrix[0]):\n for matrix_row_idx, v_value in enumerate(vector):\n result[matrix_column_idx] ^= (v_value * matrix[matrix_row_idx][matrix_column_idx])\n return result",
"def test_is_product_separable_state():\n e_0, e_1 = basis(2, 0), basis(2, 1)\n sep_vec = (\n 1 / 2 * (np.kron(e_0, e_0) - np.kron(e_0, e_1) - np.kron(e_1, e_0) + np.kron(e_1, e_1))\n )\n np.testing.assert_equal(is_product_vector(sep_vec), True)",
"def outer_product(A, B): \n print(A)\n print(B)\n A_rows = len(A)\n A_columns = len(A[0])\n\n B_rows = len(B)\n B_columns = len(B[0])\n \n if A_columns == 1 and B_rows == 1:\n \n outer_product = []\n\n # multi-line list comprehension for outer product\n [outer_product.append([A[i][0] * B[0][j] for j in range(B_columns)]) \n for i in range(A_rows)]\n\n return outer_product\n\n else:\n print(\"dimensions of vector do not match.\")",
"def test_product_mult_only(self):\r\n self.assertEquals(preview.latex_preview('2*3'), r'2\\cdot 3')",
"def test_multiply(vec3_fixture):\n scalar = vec3_fixture * 10\n assert scalar.x1 == vec3_fixture.x1 * 10\n assert scalar.x2 == vec3_fixture.x2 * 10\n assert scalar.x3 == vec3_fixture.x3 * 10\n\n vector = vec3_fixture * Vec3([2, 3, 4])\n assert vector.x1 == vec3_fixture.x1 * 2\n assert vector.x2 == vec3_fixture.x2 * 3\n assert vector.x3 == vec3_fixture.x3 * 4",
"def dot_product(A, B):\n A_rows = len(A)\n A_columns = len(A[0])\n\n B_rows = len(B)\n B_columns = len(B[0])\n\n if (A_columns == B_rows) and (A_rows == 1 and B_columns == 1):\n\n dot_product = []\n \n dot_product.append(sum([A[0][i]*B[i][0] for i in range(A_columns)]))\n\n return float(dot_product)\n \n else:\n print(\"dimensions of vector do not match.\")"
] | [
"0.7020426",
"0.68284523",
"0.6791578",
"0.67468673",
"0.67026436",
"0.66827",
"0.66523254",
"0.66410244",
"0.65571856",
"0.65571856",
"0.650921",
"0.6467925",
"0.6419744",
"0.6303483",
"0.62950623",
"0.62891656",
"0.6261458",
"0.62439704",
"0.62265456",
"0.6186289",
"0.61308116",
"0.61286354",
"0.60814786",
"0.6071064",
"0.6069444",
"0.60280424",
"0.60036075",
"0.5992218",
"0.59781235",
"0.59761256"
] | 0.75648934 | 0 |
Tests the matrix_power_cached code. | def test_matrix_power(self, use_cache):
key = jrandom.PRNGKey(0)
dim = 50
max_power = 25
matrix = jrandom.normal(key, (dim, dim)) / 10
if use_cache:
mpstate = model_utils.CachedMatrixPowerState.precompute(matrix, max_power)
else:
mpstate = model_utils.LazyMatrixPowerState(matrix)
for t in range(max_power):
result = mpstate.matrix_power(t, precision=jax.lax.Precision.HIGHEST)
expected = np.linalg.matrix_power(matrix, t)
np.testing.assert_array_almost_equal(result, expected, decimal=1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_matrix_product(self, use_cache):\n\n key = jrandom.PRNGKey(0)\n dim = 50\n max_power = 25\n\n matrix = jrandom.normal(key, (dim, dim)) / 10\n vector = jnp.ones((dim,), dtype=jnp.float32)\n\n if use_cache:\n mpstate = model_utils.CachedMatrixPowerState.precompute(matrix, max_power)\n else:\n mpstate = model_utils.LazyMatrixPowerState(matrix)\n\n for t in range(max_power):\n result = mpstate.matrix_power_multiply(vector, t)\n expected = np.linalg.matrix_power(matrix, t) @ vector\n\n np.testing.assert_array_almost_equal(result, expected, decimal=1)",
"def __pow__(self, power):\n if type(power) is not int:\n return NotImplemented\n if not self.isSquare():\n raise ValueError(\"Power invalid for non-square matrices\")\n if power > 0:\n p = power\n returnvalue = Matrix(self)\n elif power < 0:\n p = -power\n returnvalue = self.inverse()\n elif power == 0:\n return NotImplemented\n for i in range(p - 1):\n returnvalue *= returnvalue\n return returnvalue",
"def test_change_power_spectrum():\n #The 2010 paper had the knots at:\n #k = 0.475 0.75 1.19, 1.89\n #(knotpos, knotval)\n tests = [(np.array([0.475, 0.75, 1.19, 1.89]), np.array([0.475, 0.75, 1.19, 1.89])),\n (np.array([0.475, 0.75, 1.19, 1.89]), np.array([1.2, 1., 1., 1.])),\n (np.array([0.475, 0.75, 1.19, 1.89]), np.array([1.2, 0.5, 1.2, 0.5])),\n (np.array([0.05, 0.1, 10]), np.array([1.3, 0.3, 1.1]))]\n matpow = np.loadtxt(\"testdata/ics_matterpow_99.dat\")\n #Copy array so that we don't get changed in-place\n [check_change_power_spectrum(kp, kv, matpow) for (kp, kv) in tests]",
"def power(self, power: int, matrix_power: bool = False) -> QuantumCircuit:\n raise NotImplementedError",
"def test():\n # test getCl\n ISWoutFile = 'ISWout_scalCls.fits'\n ISWinFile = 'ISWin_scalCls.fits'\n ell,temps = getCl(ISWoutFile)\n\n \"\"\"\n # test showCl\n showCl(ell,temps)\n\n # test makeLegendreTable\n # this works fine for small lmax values, but ell=86 and higher have problems\n # possibly due to exceeding the maximum size of a float64 dtype\n makeLegendreTable(9,'testTable.npy')\n table = symLoad('testTable.npy')\n print table\n\n # test powerArray\n powers = powerArray(2,9)\n print powers\n \"\"\"\n\n # test makeCmatrix\n # measured time: 4.25 hrs for 6110 point mask\n startTime = time.time()\n\n # old files no longer used\n #saveMatrixFile = 'covar6110_R010_lowl.npy'\n #saveMatrixFile = 'covar6110_R010.npy'\n #maskFile = '/shared/Data/PSG/hundred_point/ISWmask2_din1_R160.fits'\n #saveMatrixFile = 'covar9875_R160b.npy'\n\n # huge mask\n #maskFile = 'ISWmask9875_RING.fits' #19917 pixels\n #saveMatrixFile = 'covar19917_ISWout_bws_hp12_RING.npy'\n #covMat = makeCmatrix(maskFile, ISWoutFile, highpass=12, beamSmooth=True, pixWin=True, nested=False)\n # took 24.83 hours\n\n # use ISWin to model expected signal\n #maskFile = 'ISWmask6110_RING.fits'\n #saveMatrixFile = 'covar6110_ISWin_bws_hp12_RING.npy'\n #covMat = makeCmatrix(maskFile, ISWinFile, highpass=12, nested=True)\n maskFile = 'ISWmask9875_RING.fits' #9875 pixels\n saveMatrixFile = 'covar9875_ISWin_bws_hp12_RING.npy'\n covMat = makeCmatrix(maskFile, ISWinFile, highpass=12, beamSmooth=True, pixWin=True, nested=False)\n\n # no beam nor window smoothing, high lmax\n #saveMatrixFile = 'covar6110_ISWout_nBW_hp12_RING.npy'\n #covMat = makeCmatrix(maskFile, ISWoutFile, highpass=12, beamSmooth=False, pixWin=False, lmax=2200, nested=False)\n\n print 'time elapsed: ',int((time.time()-startTime)/60),' minutes'\n symSave(covMat,saveMatrixFile)\n \"\"\"\n\n # test subMatrix\n subMask = '/shared/Data/PSG/small_masks/ISWmask_din1_R010_trunc0500.fits'\n subCmat = subMatrix(subMask,maskFile,saveMatrixFile)\n print 'time elapsed: ',int((time.time()-startTime)/60),' minutes'\n \"\"\"",
"def test_custom_cache(self, mocker):\n dev = qml.device(\"default.qubit\", wires=1)\n spy = mocker.spy(qml.interfaces, \"cache_execute\")\n\n def cost(a, cache):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a[0], wires=0)\n qml.RX(a[1], wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n return execute(\n [tape],\n dev,\n gradient_fn=param_shift,\n cache=cache,\n )[0]\n\n custom_cache = {}\n params = jax.numpy.array([0.1, 0.2])\n jax.grad(cost)(params, cache=custom_cache)\n\n cache = spy.call_args[0][1]\n assert cache is custom_cache",
"def testGetPower(self):\n # Bypass setter\n self.node._power = [12.8, 1.2, 1.4]\n\n self.assertEqual(\n (12.8, 1.2, 1.4),\n self.node.power,\n )",
"def test_binary_matrix(terms, num_qubits, result):\n binary_matrix = _binary_matrix(terms, num_qubits)\n assert (binary_matrix == result).all()",
"def test_memoization(self):\n non_memoized_func = lambda: random.randint(0, 1000000)\n yes_memoized_func = util.memoize(non_memoized_func)\n self.assertNotEqual(non_memoized_func(), non_memoized_func())\n self.assertEqual(yes_memoized_func(), yes_memoized_func())",
"def main():\n print 'Running the power method...'\n dim = input('Give the dimension : ')\n nbit = input('How many iterations ? ')\n j = complex(0, 1)\n rnd = np.random.normal(0, 1, (dim, dim)) \\\n + np.random.normal(0, 1, (dim, dim))*j\n nbs = np.random.normal(0, 1, (dim, 1)) \\\n + np.random.normal(0, 1, (dim, 1))*j\n rndmat = np.matrix(rnd)\n rndvec = np.matrix(nbs)\n eigmax = power_method(rndmat, rndvec, nbit)\n check(rndmat, eigmax)",
"def test_ref_power_mfcc():\n run_ref_power(MFCC)",
"def test_to_power2_already_a_power(self):\n # Define some data with four features\n data = array([[1, 2, 3, 4],\n [4, 5, 6, 7]])\n\n # Get a CData object\n cdata = CData(data)\n\n # Pad it until the dimension is a power of two\n cdata.pad_to_power2()\n\n self.assertEqual(cdata.num_features, 4)\n self.assertTrue(array_equal(cdata.data, data))",
"def __pow__(self, exponent: int):\n\t\tif exponent < 0:\n\t\t\traise ValueError(\"Negative powers not supported\")\n\t\telif exponent == 0:\n\t\t\treturn SquareMatrix(self._rows, 1)\n\t\telse:\n\t\t\tres = self\n\t\t\tfor i in range(1, exponent):\n\t\t\t\tres *= self\n\t\t\treturn res",
"def testPowerSetAndGet(self):\n\n power = (1.3782, 278.32, 0.738378233782)\n powerD = tuple([Decimal(str(i)) for i in power])\n\n self.cc.power = power\n\n self.assertEqual(\n powerD,\n self.cc.power\n )",
"def test_custom_cache_multiple(self, mocker):\n dev = qml.device(\"default.qubit\", wires=1)\n spy = mocker.spy(qml.interfaces, \"cache_execute\")\n\n a = jax.numpy.array(0.1)\n b = jax.numpy.array(0.2)\n\n def cost(a, b, cache):\n with qml.queuing.AnnotatedQueue() as q1:\n qml.RY(a, wires=0)\n qml.RX(b, wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape1 = qml.tape.QuantumScript.from_queue(q1)\n\n with qml.queuing.AnnotatedQueue() as q2:\n qml.RY(a, wires=0)\n qml.RX(b, wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape2 = qml.tape.QuantumScript.from_queue(q2)\n\n res = execute(\n [tape1, tape2],\n dev,\n gradient_fn=param_shift,\n cache=cache,\n )\n return res[0]\n\n custom_cache = {}\n jax.grad(cost)(a, b, cache=custom_cache)\n\n cache = spy.call_args[0][1]\n assert cache is custom_cache",
"def test_rtf_power(self, n_iter, diagonal_loading):\n n_fft_bin = 10\n channel = 4\n reference_channel = 0\n psd_s = np.random.random((n_fft_bin, channel, channel)) + np.random.random((n_fft_bin, channel, channel)) * 1j\n psd_n = np.random.random((n_fft_bin, channel, channel)) + np.random.random((n_fft_bin, channel, channel)) * 1j\n rtf = beamform_utils.rtf_power_numpy(psd_s, psd_n, reference_channel, n_iter, diagonal_loading)\n rtf_audio = F.rtf_power(\n torch.tensor(psd_s, dtype=self.complex_dtype, device=self.device),\n torch.tensor(psd_n, dtype=self.complex_dtype, device=self.device),\n reference_channel,\n n_iter,\n diagonal_loading=diagonal_loading,\n )\n self.assertEqual(torch.tensor(rtf, dtype=self.complex_dtype, device=self.device), rtf_audio)",
"def test_matrix(self, tol):\n\n res_static = qml.QFT.compute_matrix(2)\n res_dynamic = qml.QFT(wires=[0, 1]).matrix()\n res_reordered = qml.QFT(wires=[0, 1]).matrix([1, 0])\n\n expected = np.array(\n [\n [0.5 + 0.0j, 0.5 + 0.0j, 0.5 + 0.0j, 0.5 + 0.0j],\n [0.5 + 0.0j, 0.0 + 0.5j, -0.5 + 0.0j, -0.0 - 0.5j],\n [0.5 + 0.0j, -0.5 + 0.0j, 0.5 - 0.0j, -0.5 + 0.0j],\n [0.5 + 0.0j, -0.0 - 0.5j, -0.5 + 0.0j, 0.0 + 0.5j],\n ]\n )\n\n assert np.allclose(res_static, expected, atol=tol, rtol=0)\n assert np.allclose(res_dynamic, expected, atol=tol, rtol=0)\n\n expected_permuted = [\n [0.5 + 0.0j, 0.5 + 0.0j, 0.5 + 0.0j, 0.5 + 0.0j],\n [0.5 + 0.0j, 0.5 - 0.0j, -0.5 + 0.0j, -0.5 + 0.0j],\n [0.5 + 0.0j, -0.5 + 0.0j, 0.0 + 0.5j, -0.0 - 0.5j],\n [0.5 + 0.0j, -0.5 + 0.0j, -0.0 - 0.5j, 0.0 + 0.5j],\n ]\n assert np.allclose(res_reordered, expected_permuted, atol=tol, rtol=0)",
"def runpower_one(matrix, n):\n\t#get initial vector\n\tv = np.zeros(n)\n\tw = np.zeros(n)\n\tfor j in range(n):\n\t\tv[j] = np.random.uniform(0,1)\n\t#print 'matrix', matrix\n\t#print 'v', v\n\tT = 10000 #number of iterations\n\ttol = 1e-06\n\toldnormw = 0\n\tfor t in range(T):\n\t\tw = matrix.dot(v)\n\t\t#print 't', t, 'w',w\n\t\tnormw = (np.inner(w,w))**.5\n\t\tv = w/normw\n\t\t#print 't',t,'v',v\n\t\t#print 't',t,'normw',normw, 'old', oldnormw\n\t\tif np.abs(normw - oldnormw)/normw < tol:\n\t\t\t#print ' breaking'\n\t\t\tbreak\n\t\toldnormw = normw\n\treturn normw, v",
"def test_our_multiply(self):\n\n self.assertEqual(self.our_module.multiply(3, 4), 12)",
"def test_dbm_1(self):\n self.assertTrue(np.allclose(dbm(self.v1), self.r1, atol=1e-8, rtol=1e-5))",
"def test02(self):\n a, b = np.arange(self.N), np.arange(1, self.N+1)\n cr = bcolz.eval(\"a * b\", rootdir=self.rootdir)\n nr = a * b\n # print \"bcolz.eval ->\", cr\n # print \"numpy ->\", nr\n assert_array_equal(cr[:], nr, \"eval does not work correctly\")",
"def test_multiplying(self):\n multiplier = Multiplier()\n\n for i in range(-10, 10):\n for j in range(-10, 10):\n self.assertEqual(i*j, multiplier.calc(j, i))",
"def test_mulmod(self):\n from manticore.platforms import evm\n from manticore.core.smtlib import ConstraintSet, Z3Solver, Operators\n\n constraints = ConstraintSet()\n\n address = 0x41414141414141414141\n data = b\"\"\n caller = 0x42424242424242424242\n value = 0\n bytecode = \"\"\n vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=23000)\n\n self.assertEqual(vm.MULMOD(12323, 2343, 20), 9)\n self.assertEqual(vm.MULMOD(12323, 2343, 0), 0)\n\n A, B, C = (\n 110427941548649020598956093796432407239217743554726184882600387580788736,\n 1048576,\n 4194319,\n )\n self.assertEqual(vm.MULMOD(A, B, C), 2423129)\n a, b, c = (\n constraints.new_bitvec(256),\n constraints.new_bitvec(256),\n constraints.new_bitvec(256),\n )\n constraints.add(a == A)\n constraints.add(b == B)\n constraints.add(c == C)\n result = vm.MULMOD(a, b, c)\n # 0x8000000000000000000000000000000000000000000000000000000082000011\n self.assertEqual(Z3Solver.instance().get_all_values(constraints, result), [2423129])",
"def test_powell(self):\n fun = get_problem('powell', dimension=2, lower=-4, upper=5)\n self.assertAlmostEqual(fun(np.zeros(2)), 0.0)",
"def test_cache_maxsize(self, mocker):\n dev = qml.device(\"default.qubit\", wires=1)\n spy = mocker.spy(qml.interfaces, \"cache_execute\")\n\n def cost(a, cachesize):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a[0], wires=0)\n qml.RX(a[1], wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n return execute(\n [tape],\n dev,\n gradient_fn=param_shift,\n cachesize=cachesize,\n )[0]\n\n params = jax.numpy.array([0.1, 0.2])\n jax.jit(jax.grad(cost), static_argnums=1)(params, cachesize=2)\n cache = spy.call_args[0][1]\n\n assert cache.maxsize == 2\n assert cache.currsize == 2\n assert len(cache) == 2",
"def matmul():\n\n if RESULT_IN_NVRAM:\n matrix_c = ResultMatrixInDaos()\n else:\n matrix_c = ResultMatrixInMemory()\n\n # This could be trivially optimized by reordering indexes\n # and caching either a_block or b_block (assuming C in-memory).\n # *However* it would result in unfair comparisons with the \n # previous implementation used elsewhere.\n # Using the naive algorithm makes sense for a raw comparison.\n for i in range(MATRIXSIZE):\n for j in range(MATRIXSIZE):\n partial_result_block = np.zeros((BLOCKSIZE, BLOCKSIZE))\n\n for k in range(MATRIXSIZE):\n a_block = np.fromstring(\n DAOS_KV[\"A%02d%02d\" % (i, k)],\n dtype=NP_FROMSTRING_DTYPE\n ).reshape((BLOCKSIZE, BLOCKSIZE))\n\n b_block = np.fromstring(\n DAOS_KV[\"B%02d%02d\" % (k, j)],\n dtype=NP_FROMSTRING_DTYPE\n ).reshape((BLOCKSIZE, BLOCKSIZE))\n\n partial_result_block += a_block @ b_block\n \n matrix_c[i,j] = partial_result_block\n\n return matrix_c",
"def test_api_ucs_power(self):\n # first power off all servers\n self.set_all_server_power_state(\"off\")\n # verify power state is down\n self.check_all_server_power_state(\"down\")\n # now power on the servers\n self.set_all_server_power_state(\"on\")\n # verify power state is up\n self.check_all_server_power_state(\"up\")",
"def testMatrix(m):\n print \"Testing the spread matrix:\"\n for i in m.matrix:\n if float('%.3g' % sum(i)) != 1.000 and sum(i) != 0:\n print \"The spread is not as expected\", sum(i)\n return\n print \"Matrix is acceptable\"",
"def matrix_power(M, n):\n if n < 0:\n M = pinv(M)\n n = abs(n)\n\n # Shortcuts when 0 < n <= 3\n if n == 0:\n return at.eye(M.shape[-2])\n\n elif n == 1:\n return M\n\n elif n == 2:\n return tm.dot(M, M)\n\n elif n == 3:\n return tm.dot(tm.dot(M, M), M)\n\n result = z = None\n\n while n > 0:\n z = M if z is None else tm.dot(z, z)\n n, bit = divmod(n, 2)\n if bit:\n result = z if result is None else tm.dot(result, z)\n\n return result",
"def check_change_power_spectrum(test_knotpos, test_knotval, matpow):\n #Get the modified power spectrum\n kval = matpow[:,0]\n newpk = lyasimulation.change_power_spectrum_knots(test_knotpos, test_knotval, matpow)\n #Check the kvalues are still the same for comparison to the transfer function\n assert np.all([k in newpk[:,0] for k in kval])\n #Build interpolators for the new power spectrum\n #Only interpolate a subset of Pk for speed\n newpkint = build_restrict_interp(newpk, test_knotpos[0]/3., test_knotpos[-1]*3)\n #Build interpolator for old power spectrum\n pkint = build_restrict_interp(matpow, test_knotpos[0]/3., test_knotpos[-1]*3)\n #Build interpolator for knots\n ext_knotpos = np.concatenate([[kval[0],],test_knotpos, [kval[-1],]])\n ext_knotval = np.concatenate([[test_knotval[0],],test_knotval, [test_knotval[-1],]])\n knotint = interp.interp1d(ext_knotpos, ext_knotval, kind='linear')\n #Check that the interpolator works\n assert np.all(np.abs(knotint(test_knotpos) / test_knotval-1) < 1e-5)\n lg_knotpos = np.log(test_knotpos)\n #Check modification worked at the knot values\n assert np.all(np.abs(np.exp(newpkint(lg_knotpos)) / (np.exp(pkint(lg_knotpos)) * test_knotval) - 1) < 1e-3)\n #Pick some random k values distributed uniformly in log space\n krand = (lg_knotpos[-1]-lg_knotpos[0]+0.2)*np.random.random(250)+lg_knotpos[0]-0.1\n #Check that the modification was accurate at random positions\n #print(np.max(np.abs(np.exp(newpkint(krand)) / (np.exp(pkint(krand)) * knotint(np.exp(krand))) - 1)))\n assert np.all(np.abs(np.exp(newpkint(krand)) / (np.exp(pkint(krand)) * knotint(np.exp(krand))) - 1) < 0.01)"
] | [
"0.73225653",
"0.58746",
"0.58050704",
"0.56987643",
"0.5631421",
"0.5607155",
"0.55773807",
"0.55611145",
"0.54241306",
"0.5411276",
"0.5373944",
"0.5357646",
"0.5337961",
"0.5331032",
"0.5329204",
"0.5325335",
"0.5301618",
"0.5290857",
"0.52838194",
"0.5280639",
"0.5274496",
"0.52618116",
"0.5251156",
"0.5245079",
"0.5240098",
"0.5220719",
"0.521877",
"0.52175653",
"0.52138186",
"0.52123475"
] | 0.824397 | 0 |
This function returns the stations with the N highest relative water levels. | def stations_highest_rel_level(stations, N):
relative_water_level = []
# Create dictionary of relevant stations with relative water levels
for station in stations:
if type(station.relative_water_level()) != float:
continue
else:
relative_water_level.append((station.name, station.relative_water_level()))
# Order dictionary by water heights
relative_water_level.sort(key=lambda tup: tup[1], reverse = True)
return relative_water_level[:N] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run():\n # build the station list and update the current levels\n station_list = build_station_list()\n update_water_levels(station_list, use_cache=True)\n\n num_stations = 10\n highest_level_stations = stations_highest_rel_level(station_list, num_stations)\n\n print(\"{} stations with the highest relative water levels, in descending order:\".format(num_stations))\n for station in highest_level_stations:\n print(station.name, station.relative_water_level())",
"def get_n_best(self):\n pass",
"def best_genomes(self, n):\n def key(g):\n return g.fitness\n\n return sorted(self.most_fit_genomes, key=key, reverse=True)[:n]",
"def test_analysis_of_vector_data_top_N(self):\n\n for vectorname in ['test_buildings.shp',\n 'tsunami_building_exposure.shp']:\n\n filename = '%s/%s' % (TESTDATA, vectorname)\n layer = read_layer(filename)\n attributes = layer.get_data()\n\n # Check exceptions\n try:\n L = layer.get_topN(attribute='FLOOR_AREA', N=0)\n except VerificationError:\n pass\n else:\n msg = 'Exception should have been raised for N == 0'\n raise Exception(msg)\n\n # Check results\n for N in [5, 10, 11, 17]:\n if vectorname == 'test_buildings.shp':\n L = layer.get_topN(attribute='FLOOR_AREA', N=N)\n assert len(L) == N\n\n msg = ('Got projection %s, expected %s' %\n (L.projection, layer.projection))\n assert L.projection == layer.projection, msg\n #print [a['FLOOR_AREA'] for a in L.attributes]\n elif vectorname == 'tsunami_building_exposure.shp':\n L = layer.get_topN(attribute='STR_VALUE', N=N)\n assert len(L) == N\n assert L.get_projection() == layer.get_projection()\n val = [a['STR_VALUE'] for a in L.data]\n\n ref = [a['STR_VALUE'] for a in attributes]\n ref.sort()\n\n assert numpy.allclose(val, ref[-N:],\n atol=1.0e-12, rtol=1.0e-12)\n else:\n raise Exception",
"def N_states_for_learner(self):\n idx_max = []\n limits = 50, 2*_math.pi, 50, 50, 50, 50, 50, 50, 50\n for idx, limit in enumerate(limits):\n test = [0 for i in xrange(len(limits))]\n check = _arange(-limit,limit,limit/1000.)\n maxi = 0\n for v in check:\n test[idx]=v\n ret = self._state_index(*test)\n maxi = max((maxi, ret[idx]))\n idx_max.append(maxi)\n\n return tuple([idx+1 for idx in idx_max])",
"def run():\n\n # Build list of stations\n stations = build_station_list()\n list_of_rivers_numbers=rivers_by_station_number(stations, 9)\n print(\"Rivers with greatest number of stations: {}\".format(list_of_rivers_numbers))",
"def highestCurrent(requestContext, seriesList, n):\n return sorted( seriesList, key=safeLast )[-n:]",
"def top_of_climb_index(self):\n return self.altitudes.argmax()",
"def _get_max_sampled_bandit(self)->Bandit:\n estimates = []\n for bandit in self.bandits:\n Qth = np.random.normal(loc =self.mu[bandit.id], scale = self.var[bandit.id])\n f_hat = self.mu[bandit.id]#computing moving_average here \n estimates.append(max(Qth, f_hat))\n return self.bandits[np.argmax(estimates)]",
"def maximumBelow(requestContext, seriesList, n):\n\n result = []\n for series in seriesList:\n if max(series) <= n:\n result.append(series)\n return result",
"def top_n(self, n):\n top = {}\n for code, feat_set in self.iteritems():\n tuples = sorted(feat_set.items(), reverse=True, key=itemgetter(1))\n best = {feat for feat, _ in tuples[:n]}\n top[code] = best\n return top",
"def get_sectors_with_max_and_min_stocks():\n stock_by_sector = Counter(counter['sector'] for counter in data if counter['sector'] != 'n/a')\n return stock_by_sector.most_common()[0][0], stock_by_sector.most_common()[-1][0]",
"def get_mostFrequent(self, n=5):\r\n pass",
"def get_mostFrequent(self, n=5):\r\n pass",
"def maximumAbove(requestContext, seriesList, n):\n results = []\n for series in seriesList:\n if max(series) > n:\n results.append(series)\n return results",
"def max_energy(walkers, n, kinetic_only=False):\n # do local max\n if kinetic_only:\n energies_loc = np.array([eval_energy_KE(at) for at in walkers])\n else:\n energies_loc = np.array([ at.info['ns_energy'] for at in walkers])\n volumes_loc = np.array([ at.get_volume() for at in walkers])\n if comm is not None:\n energies = np.zeros( (comm.size*len(energies_loc)) )\n volumes = np.zeros( (comm.size*len(volumes_loc)) )\n # comm.barrier() #BARRIER\n comm.Allgather( [ energies_loc, MPI.DOUBLE ], [ energies, MPI.DOUBLE ] )\n energies = energies.flatten()\n comm.Allgather( [ volumes_loc, MPI.DOUBLE ], [ volumes, MPI.DOUBLE ] )\n volumes = volumes.flatten()\n else:\n energies = energies_loc\n volumes = volumes_loc\n\n # n is n_cull\n Emax_ind = energies.argsort()[-1:-n-1:-1]\n Emax = energies[Emax_ind]\n Vmax = volumes[Emax_ind]\n # WARNING: assumes that each node has equal number of walkers\n rank_of_max = np.floor(Emax_ind/len(walkers)).astype(int)\n ind_of_max = np.mod(Emax_ind,len(walkers))\n\n return (Emax, Vmax, rank_of_max, ind_of_max)",
"def get_top_station_set(city):\n s = {}\n for file in os.listdir(exp_data_path + os.sep + 'station' + os.sep + city):\n with open(exp_data_path + os.sep + 'station' + os.sep + city + os.sep + file) as f:\n reader = csv.reader(f)\n for row in reader:\n if row[0] not in s:\n s[row[0]] = 1\n else:\n s[row[0]] = s[row[0]] + 1\n\n sort_s = dict(sorted(s.items(), key=lambda x : x[1], reverse=True))\n first = True\n res = []\n for k, v in sort_s.items():\n if first:\n top = v\n first = False\n if top - v <= 30:\n res.append(k)\n print('before', len(sort_s))\n print('after', len(res))\n\n # restore new map [old_index, new_index]\n list_remap = {}\n new_index = 0\n for index in range(0, data_length[city]):\n if str(index) in res:\n list_remap[index] = new_index\n new_index = new_index + 1\n\n # print(list_remap)\n check_path(exp_data_path + os.sep + 'station_list')\n file_name = exp_data_path + os.sep + 'station_list' + os.sep + 'list_remap_{}'.format(city) + '.npy'\n if os.path.exists(file_name):\n os.remove(file_name)\n np.save(file_name, list_remap)",
"def highestAverage(requestContext, seriesList, n):\n\n return sorted( seriesList, key=lambda s: safeDiv(safeSum(s),safeLen(s)) )[-n:]",
"def top_n_combined(self, n):\n top = set()\n for feat_set in self.itervalues():\n tuples = sorted(feat_set.items(), reverse=True, key=itemgetter(1))\n best = {feat for feat, _ in tuples[:n]}\n top |= best\n return top",
"def strongest(nanobots):\n return max(nanobots, key=attrgetter('strength'))",
"def get_three_largest_stations_graph(filename):\n with open(filename) as f_in:\n reader = csv.DictReader(f_in)\n station = {} # This is a {station-id: station-name} dictionary. It is more efficient by using id.\n start_station_number = {} # This is a {station-id: number of connections} dictionary.\n start_station_route = {} # This is a {start-id: {end_id: number of connections}} dictionary.\n\n largest_station_id = 0\n largest_station_times = 0\n second_largest_station_id = 0\n second_largest_station_times = 0\n third_largest_station_id = 0\n third_largest_station_times = 0\n for row in reader:\n start_id = row['start station id']\n end_id = row['end station id']\n if station.get(start_id) is None:\n station[start_id] = row['start station name']\n if station.get(end_id) is None:\n station[end_id] = row['start station name']\n if start_station_route.get(start_id) is None:\n start_station_route[start_id] = {}\n start_station_route[start_id][end_id] = 1\n start_station_number[start_id] = 1\n else:\n start_station_number[start_id] += 1\n if start_station_route[start_id].get(end_id) is None:\n start_station_route[start_id][end_id] = 1\n else:\n start_station_route[start_id][end_id] += 1\n\n times = start_station_number[start_id]\n if times > third_largest_station_times:\n if times >= second_largest_station_times:\n if times >= largest_station_times:\n # If this one is the largest one, only adding the largest by one\n if start_id != largest_station_id:\n third_largest_station_id = second_largest_station_id\n third_largest_station_times = second_largest_station_times\n second_largest_station_id = largest_station_id\n second_largest_station_times = largest_station_times\n largest_station_id = start_id\n largest_station_times += 1\n else:\n # If this one is the second largest one, only adding the second largest by one\n if start_id != second_largest_station_id:\n third_largest_station_id = second_largest_station_id\n third_largest_station_times = second_largest_station_times\n second_largest_station_id = start_id\n second_largest_station_times = times\n else:\n third_largest_station_id = start_id\n third_largest_station_times = times\n\n # print the largest three stations information\n largest_station = station[largest_station_id]\n second_largest_station = station[second_largest_station_id]\n third_largest_station = station[third_largest_station_id]\n print(\"The largest three stations in NYC are {}, {}, and {}.\"\n .format(largest_station, second_largest_station, third_largest_station))\n print(\"{} has {} connections with {} stations.\".\n format(largest_station, largest_station_times, len(start_station_route[largest_station_id])))\n print(\"{} has {} connections with {} stations.\".\n format(second_largest_station, second_largest_station_times,\n len(start_station_route[second_largest_station_id])))\n print(\"{} has {} connections with {} stations.\".\n format(third_largest_station, third_largest_station_times,\n len(start_station_route[third_largest_station_id])))\n\n # sort the station_route by numbers of connections and get the first ten start-end connections\n largest_station_graph = get_station_graph(largest_station_id,\n sort_end_station_list(start_station_route[largest_station_id]))\n second_largest_station_graph = get_station_graph(second_largest_station_id, sort_end_station_list(\n start_station_route[second_largest_station_id]))\n third_largest_station_graph = get_station_graph(third_largest_station_id, sort_end_station_list(\n start_station_route[third_largest_station_id]))\n\n # convert the station-id back to station-name\n largest_station_graph = get_station_name(largest_station_graph, station)\n second_largest_station_graph = get_station_name(second_largest_station_graph, station)\n third_largest_station_graph = get_station_name(third_largest_station_graph, station)\n\n return largest_station_graph, second_largest_station_graph, third_largest_station_graph",
"def get_top_n_leaders(self, kind, stat, year, n):\n if stat in ['WAR', 'rank', 'G']:\n sort_key = 'fg.{0}.{1}.{0}_{2}'.format(kind, year, stat)\n else:\n sort_key = 'fg.{}.{}.{}'.format(kind, year, stat)\n\n lb = self._db.Players.find({}).sort(sort_key, -1).limit(n)\n\n return [x['fg'][kind][str(year)] for x in lb]",
"def find_table_length(self):\n\n max_x_stations = 0\n for line_num, stations in self.__bus_controller.stations_dict.items():\n max_key = max(stations.keys())\n max_x_stations = max(max_key, max_x_stations)\n max_x_bus = 0\n\n for buses in self.__bus_controller.bus_dict.values():\n if len(buses) != 0:\n buses.sort(key=lambda bus: bus.station_num)\n max_x_bus = max(buses[-1].station_num, max_x_bus)\n max_x = max(max_x_bus, max_x_stations)\n return max_x",
"def get_sectors_with_max_and_min_stocks():\n mydict_sector = dict()\n\n for item in data:\n if item['sector'] not in 'n/a':\n if item['sector'] in mydict_sector.keys():\n mydict_sector[item['sector']] += 1\n else:\n mydict_sector[item['sector']] = 1\n\n foutput = sorted(mydict_sector.items(), key = lambda x:x[1], reverse=True)\n return (foutput[0][0], foutput[-1][0])",
"def highestMax(requestContext, seriesList, n):\n result_list = sorted( seriesList, key=lambda s: max(s) )[-n:]\n\n return sorted(result_list, key=lambda s: max(s), reverse=True)",
"def highest_pop(self, n=100):\n self.highest_x(n, self.n_per_item(), 'popularity')",
"def mostVisited(self, n: int, rounds):\n start, end = rounds[0], rounds[-1]\n if start <= end:\n return list(range(start, end+1))\n else:\n return list(range(1, end+1)) + list(range(start, n+1))",
"def get_max_depth_val():\n data = SUNRGBDTrainDataset(True)\n return max([data[0][i][-1].flatten().item() for i in range(len(data))])",
"def closest(self, w, n=10):\r\n scores = self.m.dot(self.represent(w))\r\n return heapq.nlargest(n, zip(scores, self.iw))",
"def closest(self, w, n=10):\r\n scores = self.m.dot(self.represent(w))\r\n return heapq.nlargest(n, zip(scores, self.iw))"
] | [
"0.6555966",
"0.5809836",
"0.5742762",
"0.5694438",
"0.5691747",
"0.56783056",
"0.56687224",
"0.564456",
"0.56208897",
"0.56125605",
"0.5601425",
"0.5596799",
"0.55452675",
"0.55452675",
"0.5542017",
"0.55126023",
"0.54798305",
"0.54398257",
"0.5438739",
"0.54237264",
"0.54053086",
"0.5373043",
"0.5361835",
"0.53572094",
"0.5346242",
"0.5341355",
"0.5331",
"0.5324631",
"0.53150856",
"0.53150856"
] | 0.83433473 | 0 |
returns the number of vertices of a graph | def num_vertices(self):
return len(self.__graph_dict.keys()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_num_vertices(self):\n\n return self._graph_state.get_num_vertices()",
"def num_vertices(self):\n return self._top_exp.number_of_vertices()",
"def n_vertices(self):\n try: \n return self._n_vertices\n except AttributeError:\n self._n_vertices = 0\n for v in self.vertex_generator(): self._n_vertices += 1\n return self._n_vertices",
"def num_vertices(self):\n return len(self.vertices)",
"def num_vertices(self):\n return len(self.vertices)",
"def vertexCount(self):\n return self._nVertices",
"def get_vertices_count(self) -> int:\n # TODO: verify the following claim:\n raise NotImplementedError",
"def return_num_vertices(self):\n return self.__size",
"def num_vertices(self):\n return len(self)",
"def vertex_count(self) -> int:\n return len(self._vertex_map)",
"def num_vertices(self):\n return self.n * (1 + int(self.variant.is_bipartite()))",
"def obtener_cantidad_vertices(self):\n return len(self.vertices.keys())",
"def getNumVertices(self):\n return len(self.V)",
"def num_vertices(self, p):\n ret_val = self._num_vertices(p)\n return ret_val",
"def vertex_count(self):\n return len(self._outgoing)",
"def compute_num_nodes(graph):\n return len(graph.keys()) # return the number of nodes in the graph",
"def n_vertices(self):\n return len(self.minimized_generators())",
"def vertex_multidegree(breakpoint_graph, vertex):\n return len(list(breakpoint_graph.get_edges_by_vertex(vertex)))",
"def num_edges_rows(graph):\n return len(graph.graph.edges), len(graph.graph.nodes)",
"def compute_num_edges(graph):\n # return the number of edges\n return sum([len(graph[source_node].keys()) for source_node in graph.keys()]) / 2",
"def get_vertex_count(remote, objectid):\n cmd1 = mmapi.StoredCommands()\n key1 = cmd1.AppendSceneCommand_GetVertexCount(objectid)\n remote.runCommand(cmd1)\n result_val = mmapi.any_result()\n cmd1.GetSceneCommandResult_GetVertexCount(key1, result_val)\n return result_val.i",
"def get_num_vertices(triangles):\n return numpy.amax(numpy.reshape(triangles, -1)) + 1",
"def getNumVertexes(self):\n return _osgAnimation.RigTransformHardware_getNumVertexes(self)",
"def graph_count(self) -> int:\n return int(self.graph_tuple_stats.graph_count)",
"def __len__(self):\n return len(self._vertices)",
"def v_size(self) -> int:\n return self.nodes_on_graph",
"def num_edges(self):\n return sum(len(v.adj) for v in self.vertices.values())",
"def num_of_edge(self):\n try:\n return self.edges\n except:\n print(\"ERROR: No graph exists\")",
"def get_vertices_num(self):\n return self.coords.shape[0]",
"def num_nodes(g):\n return len(g.keys())"
] | [
"0.8294428",
"0.82162774",
"0.8099264",
"0.8084873",
"0.8084873",
"0.80744034",
"0.80422086",
"0.7976934",
"0.79424685",
"0.79383194",
"0.7920504",
"0.7877179",
"0.7767539",
"0.7748465",
"0.7637932",
"0.75549054",
"0.74742794",
"0.7297782",
"0.72895753",
"0.7250115",
"0.72318494",
"0.7219264",
"0.7168542",
"0.71635294",
"0.71110535",
"0.7093446",
"0.707254",
"0.7001449",
"0.70007527",
"0.6947438"
] | 0.8577807 | 0 |
assumes that edge is of type set, tuple or list; between two vertices can be multiple edges! | def add_edge(self, edge):
edge = set(edge)
(vertex1, vertex2) = tuple(edge)
if vertex1 in self.__graph_dict.keys() and vertex2 in self.__graph_dict.keys():
if vertex2 in self.__graph_dict[vertex1] and vertex1 in self.__graph_dict[vertex2]:
return
self.__graph_dict[vertex1].add(vertex2)
self.__graph_dict[vertex2].add(vertex1)
elif vertex1 not in self.__graph_dict.keys() and vertex2 in self.__graph_dict.keys():
self.__graph_dict[vertex1] = {vertex2}
self.__graph_dict[vertex2].add(vertex1)
elif vertex1 in self.__graph_dict.keys() and vertex2 not in self.__graph_dict.keys():
self.__graph_dict[vertex2] = {vertex1}
self.__graph_dict[vertex1].add(vertex2)
else:
self.__graph_dict[vertex1] = {vertex2}
self.__graph_dict[vertex2] = {vertex1} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_edge(self, edge):\n edge = set(edge)\n (vertex1, vertex2) = tuple(edge)\n if vertex1 in self.graph_dict:\n self.graph_dict[vertex1].append(vertex2)\n else:\n self.graph_dict[vertex1] = [vertex2]\n return edge",
"def add_edge(self, edge):\n edge = set(edge)\n (vertex1, vertex2) = tuple(edge)\n if vertex1 in self.graph_dict:\n self.graph_dict[vertex1].append(vertex2)\n else:\n self.graph_dict[vertex1] = [vertex2]",
"def add_edge(self, edge):\n edge = set(edge)\n (vertex1, vertex2) = tuple(edge)\n if vertex1 in self.__graph_dict:\n self.__graph_dict[vertex1].append(vertex2)\n else:\n self.__graph_dict[vertex1] = [vertex2]",
"def edge_vertices(edge):\n return [edge.vertex1, edge.vertex2]",
"def add_edge(self, edge):\n edge = set(edge)\n (vertex1, vertex2) = tuple(edge)\n if vertex1 not in self.__graph_dict:\n self.__graph_dict[vertex1] = []\n dbg_str = \"Vertex being initialized ..\" + str(vertex1)\n # logging.debug(dbg_str)\n if vertex2 not in self.__graph_dict:\n self.__graph_dict[vertex2] = []\n dbg_str = \"Vertex being initialized ..\" + str(vertex2)\n # logging.debug(dbg_str)\n if vertex2 not in self.__graph_dict[vertex1]:\n self.__graph_dict[vertex1].append(vertex2)\n dbg_str = \"Appending .. \" + str(vertex2), \"to ->\" +str(vertex1)\n # logging.debug(dbg_str)\n\n if vertex1 not in self.__graph_dict[vertex2]:\n self.__graph_dict[vertex2].append(vertex1)\n dbg_str = \"Appending .. \" + str(vertex1), \"to ->\" +str(vertex2)\n # logging.debug(dbg_str)",
"def test_graph_lists_adds_and_lists_edges(graph_no_edges):\n graph_no_edges.add_edge(82, 34, 4)\n graph_no_edges.add_edge(99, 'AA', 6)\n assert (82, 34, 4) in graph_no_edges.edges()\n assert (99, 'AA', 6) in graph_no_edges.edges()",
"def _edge(u, v):\n return (u, v) if u < v else (v, u)",
"def test_edges(self):\n\n edge_list = self.g.edges()\n self.assertEqual(42, len(edge_list))\n\n # p1 p3 and p3 p1 are valid edges\n t1 = ('p1', 'p3')\n self.assertTrue(t1 in edge_list)\n\n t2 = ('p3', 'p1')\n self.assertTrue(t2 in edge_list)\n\n made_up = ('z1', 'q123')\n self.assertFalse(made_up in edge_list)\n\n return None",
"def add_edge(self, v1, v2):\n pass # TODO",
"def edges( self ):\n raise NotImplementedError(\"edges\");",
"def edge_sequence(self, edge):\n u, v = edge\n return self.node_sequence(u), self.node_sequence(v)",
"def path2edge(iterable,graph):\r\n return (graph.es[graph.get_eid(pair[0],pair[1])] for pair in pairwise(iterable))",
"def add_edge_between(self, a: tuple, b: tuple):\n if a not in self.graph:\n self.graph[a] = set()\n if b not in self.graph:\n self.graph[b] = set()\n self.graph[a].add(b)\n self.graph[b].add(a)",
"def test_incoming_edge_traversals(self):\r\n e1 = TestEdge.create(self.v1, self.v2, numbers=12)\r\n e2 = TestEdge.create(self.v1, self.v3, numbers=13)\r\n e3 = OtherTestEdge.create(self.v2, self.v3, numbers=14)\r\n\r\n results = self.v2.inE()\r\n assert len(results) == 1\r\n assert e1 in results\r\n\r\n results = self.v2.inE(types=[OtherTestEdge])\r\n assert len(results) == 0",
"def add_edge(self, v1, v2): # O(1) time complexity\n if v1 in self.vertices and v2 in self.vertices: # check to see if v1 & v2 exists already\n self.vertices[v1].add(v2) # # add connection from v1 to v2 \n else: # else \n print(\"That vertex does not exist\")\n\n # additional options (class)\n \"\"\"\n if (v1 or v2) not in self.vertices:\n return \"vertex does exist\"\n self.vertices[v1].add(v2)\n ###\n if v1 in self.vertices and v2 in self.vertices:\n self.vertices{v1}.add(v2)\n else:\n print(\"One of these vertices does not exist)\n \"\"\"",
"def edge_apply(op, edge):\n vs = frozenset(op[v] for v in EDGES[edge])\n return EDGES_BY_VERTSET[vs]",
"def add_edge(self, v1, v2):\n pass # TODO\n # both vertices have to exist to make connection(e.g. directed edge)\n\n if v1 in self.vertices and v2 in self.vertices:\n # print(f' type(vertices) is {type(self.vertices)}')\n self.vertices[v1].add(v2) # using set .add() method to append\n else:\n # print(f'ERROR: vertex {v1} or {v2} does not exist') \n raise ValueError(\"Vertex not yet created\")\n # print(f'ERROR: vertex {v1} or {v2} does not exist')\n\n #### not quite\n # try:\n # if v1 in self.vertices or v2 in self.vertices:\n # self.vertices[v1].add(v2)\n # except:\n # raise ValueError(\" BAD VERTEX !!\")\n\n\n if v1 not in self.vertices or v2 not in self.vertices:\n raise ValueError(\" BAD VERTEX !!\")\n else:\n self.vertices[v1].add(v2)",
"def getEdge(self, v1, v2):\n for e in self.edges:\n if (e.pvt, e.nvt) in [(v1, v2), (v2, v1)]:\n return e\n raise ValueError('No edge found')",
"def add_edge(self, v1, v2):\n if v1 in self.vertices and v2 in self.vertices:\n self.vertices[v1].add(v2)\n else:\n print(\"ERROR ADDING EDGE: Vrtes not found\")",
"def add_edge(i, j):\n if (i, j) in edges or (j, i) in edges:\n # Si ya esta agregado en la lista no agrega nada\n return\n edges.add( (i, j) )\n edge_points.append(points[ [i, j] ])",
"def de2ue(edges):\n return set(de for de in edges if tuple(reversed(de)) in edges)",
"def edges(self, edges):\n if edges:\n edges = ensure_list(edges)\n for (nd_out, nd_in) in edges:\n if nd_out not in self.nodes or nd_in not in self.nodes:\n raise Exception(\n f\"edge {(nd_out, nd_in)} can't be added to the graph\"\n )\n self._edges = edges",
"def addEdge(this, a, b):\n if not a in this.m:\n this.m[a]=set()\n this.m[a].add(b)",
"def add_edge(self, vertices: Iterable[\"Vertex\"]) -> None:\n vertices = list(vertices)\n if len(vertices) == 2:\n self.edges.append(self.add_vertices(vertices)) # type: ignore\n else:\n raise DXFValueError(\n \"Invalid vertices count, expected two vertices.\"\n )",
"def edgeAdjacency( gen ):\n if gen == 0:\n return []\n elif gen == 1:\n return [(0,5), (1,8), (2,11)]\n else:\n raise ValueError, \"Hasn't been programmed yet!\"",
"def dstruc_from_edge_set(self, edge_set):\n\t\tself.edge_dict = {}\n\t\tself.vertex_dict = {}\n\t\tedge_list = edge_set[2:-2].split('},{')\n\t\tfor edge in edge_list:\n\t\t\tvertex_list = edge.split(',')\n\t\t\tvertex_list = map(int, vertex_list)\n\t\t\tvertex_list = (vertex_list[0], vertex_list[1])\n\t\t\tself.edge_dict[vertex_list] = 1\n\t\t\tvertex1 = vertex_list[0]\n\t\t\tvertex2 = vertex_list[1]\n\t\t\tself.vertex_pool.add(vertex1)\n\t\t\tself.vertex_pool.add(vertex2)\n\t\t\tif vertex1 not in self.vertex_dict:\n\t\t\t\tself.vertex_dict[vertex1] = 1\n\t\t\tif vertex2 not in self.vertex_dict:\n\t\t\t\tself.vertex_dict[vertex2] = 1",
"def add_edge(self, e):\n a, b = e\n self[a][b] = e\n self[b][a] = e",
"def add_edge(self, v1, v2):\n # Check if they exist\n # if v1 in self.vertices and v2 in self.vertices:\n if v1 in self.vertices:\n # Add the edge\n self.vertices[v1].add(v2)\n else:\n print(f\"ERROR ADDING EDGE between {v1} and {v2} : Vertex not found\")",
"def IsEdge(self, p_int, p_int_1):\n ...",
"def test_get_subedges_from_edge(self):\n subvertices = list()\n subedges = list()\n subvertices.append(PartitionedVertex(None, \"\"))\n subvertices.append(PartitionedVertex(None, \"\"))\n subedges.append(MultiCastPartitionedEdge(subvertices[0],\n subvertices[1]))\n subedges.append(MultiCastPartitionedEdge(subvertices[1],\n subvertices[1]))\n sube = MultiCastPartitionedEdge(subvertices[1], subvertices[0])\n subedges.append(sube)\n graph = GraphMapper()\n edge = TestPartitionableEdge(TestVertex(10, \"pre\"),\n TestVertex(5, \"post\"))\n graph.add_partitioned_edge(sube, edge)\n graph.add_partitioned_edge(subedges[0], edge)\n subedges_from_edge = \\\n graph.get_partitioned_edges_from_partitionable_edge(edge)\n self.assertIn(sube, subedges_from_edge)\n self.assertIn(subedges[0], subedges_from_edge)\n self.assertNotIn(subedges[1], subedges_from_edge)"
] | [
"0.6924097",
"0.67404646",
"0.6645291",
"0.6581878",
"0.65670335",
"0.65597415",
"0.6473252",
"0.6442472",
"0.64420813",
"0.6412036",
"0.6311367",
"0.6298211",
"0.62873614",
"0.6284625",
"0.6282889",
"0.62800133",
"0.6269465",
"0.6244941",
"0.6243469",
"0.62401235",
"0.62276226",
"0.62234277",
"0.62224346",
"0.6210772",
"0.62076193",
"0.6207549",
"0.6205883",
"0.6197106",
"0.6184954",
"0.61306614"
] | 0.69176805 | 1 |
Slim options if more than n left. | def slim_down_options(options, count_func, n=25, v=''):
if len(options) > 100:
options_slim = []
c = count_func(base)
for obj in options:
if c == count_func(obj):
options_slim.append(obj)
if len(options_slim) > n:
options = options_slim
if verbose:
print(v, len(options))
return options | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def keep_n(self, n=100):\n before = self.item_count()\n\n item_count = self.item_count()\n if item_count > n: self.filter(self.sample(n))\n\n after = self.item_count()\n with msg(f'Keeping (at most) {n} items: {after} of {before}', done=False, enabled=self.output):pass",
"def limit_number_prompts(state: SessionState):\n if state.prompts is not None and len(state.prompts) > 1:\n state.prompts = [state.prompts[0]]",
"def get_number_of_extra_items_in_page_with_initially_selected(self):\n return 10",
"def setrecursionlimit(n): # real signature unknown; restored from __doc__\n pass",
"def make_chunk_size_odd(self):\n if self.select_chunk_size % 2 == 0:\n self.select_chunk_size = self.select_chunk_size + 1",
"def NoMore():\n\n if assem.MoreParameters():\n errors.DoWarning('extrign', False)",
"def NoMore():\n\n if assem.MoreParameters():\n errors.DoWarning('extrign', False)",
"def OPTIONS_LOOP():\n pass",
"def limit(requestContext, seriesList, n):\n return seriesList[0:n]",
"def ask_options(self, msg, n_options, delta=0):\n opt_max = n_options\n new_option = n_options + 1\n ignore_option = n_options + 2\n res = map(int, click.prompt(msg, type=str).split(','))\n\n if res == [new_option]:\n return 'add-new'\n elif res == [ignore_option]:\n return 'ignore'\n elif all(1 <= n <= opt_max for n in res):\n return [n + delta for n in res]",
"def __options(self):\n\t\ta = 1 if self.random else 0\n\t\tb = 2 if self.topoftheday else 0\n\t\tc = 4 if self.offline else 0\n\t\treturn a+b+c",
"def skip(n):\n\n if n >= 0:\n @filters\n def _dagpype_internal_fn_act_p(target):\n remaining = n\n try:\n while True:\n e = (yield)\n if remaining == 0:\n target.send(e)\n continue\n t = e.shape[0]\n if t > remaining:\n target.send(e[remaining :])\n remaining = 0\n else:\n remaining -= t\n except GeneratorExit:\n target.close()\n\n return _dagpype_internal_fn_act_p\n\n @filters\n def _dagpype_internal_fn_act_n(target):\n m = -n\n pending = collections.deque([])\n try:\n while True:\n pending.append((yield))\n while len(pending) > 0:\n first = pending.popleft()\n if sum((e.shape[0] for e in pending)) >= m: \n target.send(first)\n else:\n pending.appendleft(first)\n break\n except GeneratorExit:\n if sum((e.shape[0] for e in pending)) < m:\n target.close()\n return\n while m > 0:\n e = pending.pop()\n if e.shape[0] < m:\n m -= e.shape[0]\n else:\n e = e[: e.shape[0] - m]\n if e.shape[0] > 0:\n pending.append(e)\n break\n while len(pending) > 0:\n e = pending.pop()\n target.send(e)\n target.close()\n\n return _dagpype_internal_fn_act_n",
"def validate(n = 5):",
"def display_menu_options(length):\r\n print('\\n***********************************************\\nVeuillez choisir une option entre 1 et', str(length))",
"def _set_number_of_levels(self, number_of_levels):\n if not number_of_levels & 1:\n number_of_levels -= 1\n logging.warning('Set number of levels to an odd number %r',\n number_of_levels)\n\n self._number_of_levels = number_of_levels\n self._compute_quantization_factor()",
"def test_at_most_no_count_no_default_no_args_optional():\n class TestCmdLine(CmdLine):\n yaml_def = '''\n supported_options:\n - category:\n options:\n - name : test_opt\n long : test-opt\n opt : param\n multi_type: at-most\n required : false\n '''\n test_opt = None\n args = \"util-name\"\n parse_result = TestCmdLine.parse(args)\n assert parse_result.value == ParseResultEnum.SUCCESS.value\n assert TestCmdLine.test_opt == []",
"def slide(self, n):\n \n n = self.ladders.get(n, n)\n n = self.snakes.get(n, n)\n \n if n < 1 or n > self.size:\n n = None\n \n return n",
"def skip(self, n=None):\n while n > 0:\n try:\n self.next()\n except StopIteration:\n break\n n -= 1",
"def setNSlices(self,n):\n assert(n> 0)\n self._c_param.lee_richards_n_slices = n",
"def complete_opt_pagesize(self, *_):\n return [\"auto\"]",
"def test_neg_list_size_with_extra_parameter(self):\n key = ('test', 'demo', 1)\n policy = {'timeout': 1000}\n with pytest.raises(TypeError) as typeError:\n self.as_connection.list_size(key, \"contact_no\", {}, policy, \"\")\n\n assert \"list_size() takes at most 4 arguments (5 given)\" in str(\n typeError.value)",
"def __len__(self):\n return len(self.options)",
"def test_tooManyModeParameters(self):\n self._sendModeChange(\"+s\", \"wrong\")\n self._checkModeChange([])\n errors = self.flushLoggedErrors(irc.IRCBadModes)\n self.assertEqual(len(errors), 1)\n self.assertSubstring(\"Too many parameters\", errors[0].getErrorMessage())",
"def n_configs(self, val):\n if val >= 1 and isinstance(val, int):\n if val != self._faux._n_configs:\n self._faux._n_configs = val\n self._faux._update()\n else:\n warn(\"`val` not valid, no update performed\")",
"def drop(n):\n def _drop_xducer(step):\n outer = {\"count\": 0}\n def _drop_step(r=Missing, x=Missing):\n if r is Missing: return step()\n if x is Missing:\n return step(r)\n if outer[\"count\"] < n:\n outer[\"count\"] += 1\n return r\n else:\n return step(r, x)\n return _drop_step\n return _drop_xducer",
"def limit(self, variants):\n return variants[:self.lim]",
"def limit_reached(self):\n if len(self.selected) >= self.limit:\n return True\n return False",
"def onSkipSegLimit(self):\r\n profprint()\r\n #research\r\n logic = self.logic\r\n logic.placeAxialLimitMarker(assign=False)",
"def trim_recommendation_pool(self, n):\n # {k:v for k, v in list(d.items())[:2]}\n self.recommendation_pool = {\n k: v for k, v in list(self.recommendation_pool.items())[:n]}",
"def limit(iterable, n):\n for count, element in enumerate(iterable):\n if count >= n: break\n else: yield element"
] | [
"0.5554807",
"0.54940754",
"0.544022",
"0.53377664",
"0.5276856",
"0.5170868",
"0.5170868",
"0.51587814",
"0.51399946",
"0.51267874",
"0.50891244",
"0.50637853",
"0.5049573",
"0.50469786",
"0.5009878",
"0.49905896",
"0.4988287",
"0.49703386",
"0.49435568",
"0.4943084",
"0.49366468",
"0.49339858",
"0.49319685",
"0.49303332",
"0.49301946",
"0.49145043",
"0.4910446",
"0.49036336",
"0.4897815",
"0.48921692"
] | 0.7012843 | 0 |
Print the words in the vocabulary sorted according to their embeddingdistance to the given word. Different metrics can be used, e.g. 'cosine' or 'euclidean'. | def print_sorted_words(word, metric='cosine'):
# Get the token (i.e. integer ID) for the given word.
token = tokenizer.word_index[word]
# Get the embedding for the given word. Note that the
# embedding-weight-matrix is indexed by the word-tokens
# which are integer IDs.
embedding = weights_embedding[token]
# Calculate the distance between the embeddings for
# this word and all other words in the vocabulary.
distances = cdist(weights_embedding, [embedding],
metric=metric).T[0]
# Get an index sorted according to the embedding-distances.
# These are the tokens (integer IDs) for words in the vocabulary.
sorted_index = np.argsort(distances)
# Sort the embedding-distances.
sorted_distances = distances[sorted_index]
# Sort all the words in the vocabulary according to their
# embedding-distance. This is a bit excessive because we
# will only print the top and bottom words.
sorted_words = [inverse_map[token] for token in sorted_index
if token != 0]
# Helper-function for printing words and embedding-distances.
def _print_words(words, distances):
for word, distance in zip(words, distances):
print("{0:.3f} - {1}".format(distance, word))
# Number of words to print from the top and bottom of the list.
k = 10
print("Distance from '{0}':".format(word))
# Print the words with smallest embedding-distance.
_print_words(sorted_words[0:k], sorted_distances[0:k])
print("...")
# Print the words with highest embedding-distance.
_print_words(sorted_words[-k:], sorted_distances[-k:]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def word_analogy(self):\n data = open(\"data/word_analogy_subset.en.ar.txt\").read().split('\\n')\n data = [x for x in data if len(x.split()) == 4]\n cnt = 0\n keys = list(self.embeddings_index.keys())\n vectors = np.array(list(self.embeddings_index.values()))\n norms = np.linalg.norm(vectors, axis=1)\n for i in data:\n i = self.preprocessor(i).split()\n try:\n v = self.embeddings_index[i[0]] - self.embeddings_index[i[1]] + self.embeddings_index[i[2]]\n except:\n continue\n unit = v / np.linalg.norm(v)\n dists = np.dot(vectors, unit) / norms\n best = np.argpartition(-dists, 10)[:10 + 1]\n best = best.take(np.argsort((-dists).take(best)))\n result = [(keys[sim], float(dists[sim]))\n for sim in best]\n sbv = result[:10]\n for j in sbv:\n if j[0] == i[3]:\n cnt += 1\n return cnt/ len(data)",
"def hamming_distance(words: Iterator[str], vocabulary: Dict[str, int]):\n\n for word in words:\n distances = []\n suggestions = []\n vocab_list = list(vocabulary)\n for (i,vocab) in enumerate(vocab_list):\n if len(vocab) == len(word):\n distances.append(hamming(word, vocab))\n else:\n distances.append(120)\n \n idx = np.array(distances).argsort()[:5]\n \n for i in range(5):\n for j in range(i+1,5):\n if distances[idx[i]] == distances[idx[j]]:\n if vocabulary.get(vocab_list[idx[i]]) < vocabulary.get(vocab_list[idx[j]]):\n temp = idx[i] \n idx[i] = idx[j]\n idx[j] = temp \n\n for i in idx:\n suggestions.append(vocab_list[i])\n\n output(\"{misspelled}\\t{corrections}\".format(\n misspelled=word,\n corrections=\"\\t\".join(suggestions)\n )) # may cause IO bottleneck",
"def print_word_freq(file):\n# Opening file to be read\n with open(file, \"r\") as f:\n file_contents = f.read()\n\n\n# # Taking away punctuation and lowercase all words\n word_list = file_contents.lower().replace(',',' ').replace('.',' ').replace('!',' ').split()\n # print(word_list)\n\n nice_list = []\n for word in word_list:\n if word not in STOP_WORDS:\n nice_list.append(word)\n # print(nice_list)\n\n d = {}\n for word in nice_list:\n if word not in d.keys():\n d[word] = 1\n else:\n d[word] += 1 \n # print(sorted(d, key=d.get, reverse=True)\n # sorted(d, key=d.get, reverse=true)\n # print(d)\n\n # for word in sorted(d):\n # print((word, d[word]), end = \" \")\n\n d_filtered = sorted(d, key=d.get, reverse=True)\n for x in d_filtered:\n print(x, d[x])",
"def nearby(self, words, num=20):\n ids = np.array([vocabulary.getVocabID(x) for x in words])\n vals, idx = self.sess.run(\n [self._nearby_val, self._nearby_idx], {self._nearby_word: ids})\n for i in range(len(words)):\n print(\"\\n%s\\n=====================================\" % (words[i]))\n for (neighbor, distance) in zip(idx[i, :num], vals[i, :num]):\n print(neighbor)\n print(\"%-20s %6.4f\" % (vocabulary.VocabID_to_vocab(neighbor), distance))",
"def closest_words(self, word, n):\n\n vector = self.get_vector(word)\n\n if vector is None:\n return None\n\n distances = [\n (w, torch.dist(vector, self.get_vector(w)).item())\n for w in self.glove.itos\n ]\n\n return [w for w, v in sorted(distances, key=lambda w: w[1])[:n]]",
"def display_words(word_list,specifier):\n \n if specifier.lower() == 'score':\n print(\"{:>6s} - {:s}\".format(\"Score\", \"Word\"))\n if len(word_list) < 5:\n for tup in word_list:\n print(\"{:>6d} - {:s}\".format(tup[1], tup[0]))\n else:\n \n for tup in word_list[:5]:\n print(\"{:>6d} - {:s}\".format(tup[1], tup[0]))\n \n \n elif specifier.lower() == 'length':\n print(\"{:>6s} - {:s}\".format(\"Length\", \"Word\"))\n if len(word_list) < 5:\n for tup in word_list:\n print(\"{:>6d} - {:s}\".format(tup[2], tup[0]))\n else:\n \n for tup in word_list[:5]:\n print(\"{:>6d} - {:s}\".format(tup[2], tup[0]))",
"def nearest_words(embedding, voc_size, word, wint, intw, n_words=10):\n similar_words = {}\n word_embed = embedding(torch.LongTensor([wint[word]]))\n for i in range(voc_size):\n emb = embedding(torch.LongTensor([i]))\n cos_sim = F.cosine_similarity(emb, word_embed)\n if len(similar_words) < n_words:\n similar_words[float(cos_sim)] = intw[i]\n else:\n if cos_sim > min(similar_words):\n min_key = min(similar_words)\n del similar_words[min_key]\n similar_words[float(cos_sim)] = intw[i]\n else:\n pass\n # Ordering dict based on the value of the cosine similarity\n return sorted(similar_words.items())[::-1]",
"def class_conditional_word_dist(self, Mprint=20):\n self.class_word_dist = np.array(np.vstack([self.data[self.labels == ci, :].sum(0)/self.data[self.labels == ci, :].sum() for ci in np.unique(self.labels)])) # num of classes x num of words\n self.labels_word = self.class_word_dist.argmax(0)\n for i in range(self.class_word_dist.shape[0]):\n print('top {} frequent words in class {}'.format(Mprint, i))\n idx = np.argsort(self.class_word_dist[i, :])[::-1][:Mprint]\n for j in range(Mprint):\n print(' {:3d}: {:10s} {:.4f}'.format(j, self.vocab[idx[j]], self.class_word_dist[i, idx[j]]))",
"def get_candidates(self, word):\n candidates = dict()\n for word_list_item in self.vocab_list:\n edit_distance = damerau_levenshtein_distance(word, word_list_item)\n if edit_distance <= 1:\n candidates[word_list_item] = edit_distance\n return sorted(candidates, key=candidates.get, reverse=False)",
"def print_word_freq(file):\n # with open(file, 'r') as text the r as the second arguement means that my intentions are to read the file\n with open(file, 'r') as text:\n # this reads the entire file and puts this into text string\n text_string = text.read()\n # returns the string respresentation of text string without removing special characters so you can see what you need to remove\n # print(repr(text_string))\n # this removes the specified characters from the text string\n text_string = text_string.replace(\",\", \"\")\n text_string = text_string.replace(\".\", \"\")\n text_string = text_string.replace(\"—\", \" \")\n text_string = text_string.replace(\"-\", \" \")\n text_string = text_string.replace(\"?\", \"\")\n text_string = text_string.replace(\":\", \"\")\n text_string = text_string.replace(\"'\", \"\")\n text_string = text_string.replace(\"\\\\n\", \"\")\n text_string = text_string.replace(\"’\", \"\")\n text_string = text_string.replace(\"]\", \"\")\n text_string = text_string.replace(\"[\", \"\")\n text_string = text_string.replace(\"\\\"\", \"\")\n # takes the text string and makes all the characters lower case\n text_string = text_string.lower()\n # takes the text string and splits all the words into a list this splits from space to space\n words_list = text_string.split()\n # a dictionary is a key and a value\n no_stop_words = {}\n # for loop that will cycle through the words list\n for word in words_list:\n # checking to see if the word is stop words\n if word not in STOP_WORDS:\n # if the word is already in the dictionary no stop words increment the value by 1\n if word in no_stop_words:\n no_stop_words[word] += 1\n # if the word is not in the dictionary no stop words add this to the dictionary and give it a value of 1\n else:\n no_stop_words[word] = 1\n \n sorted_dict = {}\n sorted_keys = sorted(no_stop_words, key=no_stop_words.get, reverse=True)\n \n for w in sorted_keys:\n sorted_dict[w] = no_stop_words[w]\n \n for key in sorted_dict:\n print(f\"{key:>15} | {sorted_dict[key]:2} {'*' * sorted_dict[key]}\")\n \n # good practice to ensure that we are properly closing the file in use at the end of the function\n text.close()",
"def word_entropy(self, doc, lemmatized=False):\n # filter out words\n words = [token for token in doc if not token.is_punct and \"'\" not in token.text and not token.is_space]\n # create bag of words\n if lemmatized:\n list_words = [w.lemma_ for w in words]\n else:\n list_words = [w.text for w in words]\n num_words = len(list_words)\n word_freq = Counter(list_words)\n return -sum(\n [\n (word_freq[word] / num_words) * log2(word_freq[word] / num_words)\n for word in word_freq\n ]\n )",
"def nn_words(table, wordvecs, query, k=10):\n\tkeys = table.keys()\n\tqf = table[query]\n\tscores = numpy.dot(qf, wordvecs.T).flatten()\n\tsorted_args = numpy.argsort(scores)[::-1]\n\twords = [keys[a] for a in sorted_args[:k]]\n\tprint ('QUERY: ' + query)\n\tprint ('NEAREST: ')\n\tfor i, w in enumerate(words):\n\t\tprint (w)",
"def count_words(word_list, print_words=False):\n freq_dist = Counter(word_list)\n global global_word_freq_list\n\n if print_words:\n for (word, freq) in freq_dist.items():\n print('{:25}{:10}'.format(word, freq))\n\n global_word_freq_list = freq_dist.copy()\n return freq_dist",
"def print_word_freq(file):\n opened_file = open(file)\n text = opened_file.read()\n #print(text)\n #replace hyphens\n no_hyphen = text.replace(\"-\",\" \")\n #remove punctuation\n no_punctuation = \"\"\n for char in no_hyphen:\n if char not in punctuation:\n no_punctuation = no_punctuation + char\n #make everything lowercase\n lower_case_text = no_punctuation.lower()\n #print(lower_case_text)\n #split into words\n all_words = lower_case_text.split()\n #print(all_words)\n #remove stop words\n no_stop_words = []\n for each_word in all_words:\n if each_word not in STOP_WORDS:\n no_stop_words.append(each_word)\n #print(no_stop_words)\n #find the longest word to use for indention purposes\n word_length = 0\n for word in no_stop_words:\n if len(word) > word_length:\n #print (word, len(word))\n word_length = len(word)\n #print (word_length)\n #count remaining word usage\n word_counts = {}\n for word in no_stop_words:\n if word in word_counts:\n word_counts[word] +=1\n else: word_counts[word] = 1\n #print (word_counts)\n #sort words by frequency\n ordered_by_freq = (sorted(word_counts.items(), key=lambda seq: seq[1], reverse=True))\n #print (ordered_by_freq)\n #print words, freq, graph, indent, and add a space past the pipe for values less than 10\n for key, value in ordered_by_freq:\n indent = (word_length + 1 - len(key))\n space = \" \"\n star = \"*\"\n if value >= 10:\n print (indent * space, key, \" | \", value, value * star)\n else:\n print (indent * space, key, \" | \", value, value * star)\n \n\n \n \n\n \n\n \n\n\n\n #remove the stop words\n #count the frequency of the remaing words (see ex 6 for sort function)\n #output as a cord list, count and graph of *** (ex 7 for justify)",
"def print_words_structure(self):\n print(\"Dictionary words:\")\n for _, word in kc_util.gen_db(self.dictionary_db.cursor()):\n print(word) \n print(\"\")",
"def print_longest_words(word_dict):\n words = all_reducible(word_dict)\n\n # use DSU to sort by word length\n t = []\n for word in words:\n t.append((len(word), word))\n t.sort(reverse=True)\n \n #Print the longest 5 words\n for _, word in t[0:5]:\n print_trail(word)\n print('\\n')",
"def print_word_freq(file):\n with open(file) as text:\n text = text.read().lower()\n text = text.replace(\"\\n\", \" \")\n text = text.replace(\"’\", \"\")\n # text = \" \".join(text.split())\n # print(text)\n for character in string.punctuation:\n text = text.replace(character, \"\")\n word_list = text.split()\n clean_list = []\n for word in word_list:\n if word not in STOP_WORDS:\n clean_list.append(word)\n \n\n # for stop_word in STOP_WORDS:\n # if stop_word in word_list:\n # word_list.remove(stop_word)\n\n\n new_dict = {}\n for word in clean_list:\n new_dict[word] = clean_list.count(word)\n sorted_dict = sorted(new_dict.items())\n print(sorted_dict)\n\n # print(f\"{key} | {value} {'*' * value}\")\n\n \n # for stop_word in STOP_WORDS:\n # text = text.replace(stop_word, \"\")\n\n # for word in word_list:\n # if word in string.punctuation:\n # #do something\n # if word in STOP_WORDS:\n\n \n # for stop_word in STOP_WORDS:\n # text = text.replace(stop_word, \"\")\n # print(text)",
"def print_topics(self, num_topics=10, num_words=10):\n\n topic_modeler = LatentDirichletAllocation(n_topics=num_topics, learning_method='online')\n\n topic_modeler.fit(self.comments_vectorized)\n\n word_list = self.vectorizer.get_feature_names()\n\n for topic_number, topic in enumerate(topic_modeler.components_):\n top_ten = np.argsort(-topic)[:num_words]\n\n words_ranked = \", \".join([word_list[i] for i in top_ten])\n\n print(\"Topic {}: {}\".format(topic_number, words_ranked))",
"def sort_output_wc(word_counts):\n\n # use sorted to sort output by value (item[1] denotes second item)\n sorted_items = sorted(word_counts.items(), key=lambda item: item[1])\n\n for key, value in sorted_items:\n print \"{} {}\".format(value, key)",
"def main():\n vocab = str.split(file(sys.argv[1]).read())\n testlambda = numpy.loadtxt(sys.argv[2])\n testlambda = topN(testlambda, int(sys.argv[3]))\n words_per_topic = 20\n\n for k in range(0, len(testlambda)):\n lambdak = list(testlambda[k, :])\n lambdak = lambdak / sum(lambdak)\n temp = zip(lambdak, range(0, len(lambdak)))\n temp = sorted(temp, key=lambda x: x[0], reverse=True)\n\n print 'topic %d:' % (k)\n # feel free to change the \"53\" here to whatever fits your screen nicely.\n for i in range(0, words_per_topic):\n print '%s:%.4f' % (vocab[temp[i][1]], temp[i][0])\n print",
"def print_word_freq(file):\n \n text_file = open(file, 'r')\n\n contents = text_file.read()\n\n words = contents.split()\n\n def clean_text(text):\n text = text.lower()\n all_letters = \"abcdefghijklmnopqrstuvwxyz\"\n text_to_keep = \"\"\n for char in text:\n if char in all_letters:\n text_to_keep += char\n return text_to_keep\n\n clean_words = []\n\n for word in words:\n clean_words.append(clean_text(word))\n\n go_words = [word for word in clean_words if word not in STOP_WORDS]\n\n word_count = {}\n\n for go_word in go_words:\n word_count.update({go_word: go_words.count(go_word)})\n\n sorted_word_count = sorted(word_count.items(), key=lambda x: x[1], reverse=True)\n\n longest_word_len = len(get_longest_word(words))\n\n for word, value in sorted_word_count[:10]:\n print(word.rjust(longest_word_len), \"|\", str(value).ljust(3), \"*\" * value)",
"def distance_words(result, query):\n distance_dict = {}\n # for each word in result find distance between that word and query word\n # i.e. number of insert/update/delete operations required to change query word to result word\n # smallest distance means closest word to query, therefore it will be\n # shown at the top\n for word in result:\n distances = list(range(len(query) + 1))\n for index_word, char_word in enumerate(word):\n another_distances = [index_word + 1]\n for index_query, char_query in enumerate(query):\n if char_query == char_word:\n another_distances.append(distances[index_query])\n else:\n another_distances.append(1 + min((distances[index_query],\n distances[\n index_query + 1],\n another_distances[-1])))\n distances = another_distances\n distance_dict.update({word: distances[-1]})\n # sort dict by value in ascending order ( smaller to larger distance )\n return sorted(distance_dict.items(), key=lambda x: x[1])",
"def words_in_sorted_order(self):\n print 'Words in sorted order:'\n self.words_in_sorted_order_utils(self.root)",
"def instance_dist(novel, word):\n output = []\n count = 0\n start = False\n text = novel.get_tokenized_text()\n\n for e in text:\n if not start:\n if e == word:\n start = True\n else:\n count += 1\n if e == word:\n output.append(count)\n count = 0\n return output",
"def print_word_freq(file):\n with open(file) as one_today:\n text = one_today.readlines()\n d = dict()\n for line in text:\n line = line.strip()\n line = line.lower()\n line = line.translate(line.maketrans(\"\", \"\", string.punctuation))\n words = line.split(\" \")\n for word in words:\n if word in d:\n d[word] = d[word] + 1\n elif word in STOP_WORDS:\n pass\n else:\n d[word] = 1\n for word in sorted(d, key=d.get, reverse=True):\n print(word, \"|\", d[word])",
"def grounding_words(self, w):\n story_visual_words = [x for x in self.visual_words if x in self.vocab]\n visual_word_ids = [self.vocab_ids[x] for x in story_visual_words]\n visual_similarities = self.sigma_A[self.vocab_ids[w]][visual_word_ids]\n return sorted(zip(story_visual_words, visual_similarities), key = lambda x : -x[1])",
"def print_neighbours(self, word=''):\n\n if word in self.index.keys():\n word_ind = self.index[word]\n for i in self.graph[word_ind]:\n print(self.words[i])\n print()\n else:\n print('Error - Not a valid word')",
"def nearest_neighbors(self, word, dictionary):\n vectors = self.word_embeds.weight.data.cpu().numpy()\n index = dictionary.token2id[word]\n query = vectors[index]\n\n ranks = vectors.dot(query).squeeze()\n denom = query.T.dot(query).squeeze()\n denom = denom * np.sum(vectors ** 2, 1)\n denom = np.sqrt(denom)\n ranks = ranks / denom\n mostSimilar = []\n [mostSimilar.append(idx) for idx in ranks.argsort()[::-1]]\n nearest_neighbors = mostSimilar[:10]\n nearest_neighbors = [dictionary[comp] for comp in nearest_neighbors]\n\n return nearest_neighbors",
"def display(wordsDictionary):\n noOfWords = 0\n print(\"-\" * 42)\n print(\"| %20s | %15s |\" % (\"WORDS\".center(20), \"FREQUENCY\".center(15)))\n print(\"-\" * 42)\n for word in list(sorted(wordsDictionary.keys())):\n noOfWords += 1\n print(\"| %-20s | %15s |\" % (word, str(wordsDictionary.get(word)).center(15)))\n # Halt every 20 words (configurable)\n if (noOfWords != 0 and noOfWords % 20 == 0):\n print(\"\\n\" * 2)\n input(\"PRESS ENTER TO CONTINUE ... \")\n print(\"\\n\" * 5)\n print(\"-\" * 42)\n print(\"| %20s | %15s |\" % (\"WORDS\".center(20), \"FREQUENCY\".center(15)))\n print(\"-\" * 42)\n print(\"-\" * 42)\n print(\"\\n\" * 2)",
"def most_similar_words(self, word, n_words=5, include_similarity=False):\n\n if word in self.vocab:\n token_id = self.diction[word]\n tiled_embedding = np.tile(self.embeddings[token_id], (self.n_words, 1))\n embedding_similarities = self._dist_metric(tiled_embedding, self.embeddings)\n most_similar_token_ids = (-embedding_similarities).argsort()\n\n return list(map(lambda token_id: self.reverse_diction[token_id], most_similar_token_ids))\n else:\n print('not in vocab')"
] | [
"0.6305388",
"0.6230253",
"0.6139421",
"0.6103458",
"0.60941505",
"0.6086169",
"0.60522497",
"0.6010836",
"0.6005883",
"0.5975922",
"0.5967013",
"0.59614223",
"0.59542745",
"0.5919518",
"0.5901676",
"0.5882043",
"0.58806413",
"0.5867562",
"0.5837269",
"0.5824132",
"0.58083713",
"0.5799564",
"0.5789212",
"0.5782038",
"0.575631",
"0.5745112",
"0.57377946",
"0.5706931",
"0.5696941",
"0.5690159"
] | 0.8753714 | 0 |
Calculate the FDR curve for arrays of target scores and decoy scores. | def calc_fdr_arr(target_arr, decoy_arr, ascending=False):
n, m = len(target_arr), len(decoy_arr)
if n != m:
raise TypeError('target should be same length as decoy {} {}'.format(n, m))
ordering = 1 if ascending else -1 # reversed sorting if score is not ascending
combined = np.concatenate((target_arr, decoy_arr))
combined.sort()
# count how often each value occurs
target_bag, decoy_bag = _count(target_arr), _count(decoy_arr)
unique_sorted = np.unique(combined)[::ordering]
target_hits, decoy_hits = zip(*_iter_hits(target_bag, decoy_bag, unique_sorted))
target_hits, decoy_hits = np.cumsum(target_hits), np.cumsum(decoy_hits)
fdr_curve = decoy_hits / target_hits
fdr_curve[target_hits == 0] = 0
return fdr_curve, target_hits, combined[::ordering] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _cost_function_derivative(self, y_pred, y, X, m):\n\n derivatives= np.zeros((X.shape[0],1))\n for j in range(X.shape[0]):\n auxsum = 0\n for i in range(m):\n auxsum+=(y_pred[0][i] -y[0][i])*X[j][i]\n derivatives[j][0] = self.theta[j][0] - self.alpha * 1/m * auxsum\n\n #empty_derivatives = np.zeros((X.shape[0],1))\n return derivatives",
"def calculate_f1(fx, y):\n pred_idxs = fx.max(1, keepdim=True)[1]\n pred_names = [idx2target[i.item()] for i in pred_idxs]\n original_names = [idx2target[i.item()] for i in y]\n true_positive, false_positive, false_negative = 0, 0, 0\n for p, o in zip(pred_names, original_names):\n predicted_subtokens = p.split('|')\n original_subtokens = o.split('|')\n for subtok in predicted_subtokens:\n if subtok in original_subtokens:\n true_positive += 1\n else:\n false_positive += 1\n for subtok in original_subtokens:\n if not subtok in predicted_subtokens:\n false_negative += 1\n try:\n precision = true_positive / (true_positive + false_positive)\n recall = true_positive / (true_positive + false_negative)\n f1 = 2 * precision * recall / (precision + recall)\n except ZeroDivisionError:\n precision, recall, f1 = 0, 0, 0\n return precision, recall, f1",
"def __call__( self, y_score, y_true = NotSet ):\n if y_true is NotSet:\n self.sps = npCurrent.fp_array( y_score[0] )\n self.pds = npCurrent.fp_array( y_score[1] )\n self.pfs = npCurrent.fp_array( y_score[2] )\n self.thresholds = npCurrent.fp_array( y_score[3] )\n else:\n # We have to determine what is signal and noise from the datasets using\n # the targets:\n try:\n from sklearn.metrics import roc_curve\n except ImportError:\n # FIXME Can use previous function that we used here as an alternative\n raise ImportError(\"sklearn is not available, please install it.\")\n self.pfs, self.pds, self.thresholds = roc_curve(y_true, y_score, pos_label=1, drop_intermediate=True)\n pds = self.pds\n bps = 1. - self.pfs\n self.sps = np.sqrt( ( pds + bps )*.5 * np.sqrt( pds * bps ) )",
"def gradient_descent(x0,df,rate=0.1,max_iters=1000,min_step=1e-6,max_step=1e5,\n projection=None,trajectory=False,step_history=False,f=None,\n cost_history=False,feedback=False,plot_history=False):\n if feedback is True:\n print(\"gd.gradient_descent():\")\n if f is not None:\n assert callable(f)\n fx0 = f(x0)\n if feedback is True:\n print(f\" initial cost = {fx0:.2e}\")\n if projection is not None:\n assert callable(projection)\n project = True\n else:\n project = False\n if trajectory is True:\n xx = [x0.copy()]\n if step_history is True:\n steps = []\n if cost_history is True:\n assert callable(f)\n fx = [fx0]\n\n x = x0.copy()\n for i in range(max_iters):\n dx = -rate*df(x)\n if project is True:\n x0 = x.copy()\n x = projection(x0+dx)\n dx = x-x0\n else:\n x += dx\n if trajectory is True:\n xx.append(x.copy())\n if cost_history is True:\n fx += [f(x)]\n step_size = np.linalg.norm(dx)\n if step_history is True:\n steps += [step_size]\n if step_size < min_step or step_size > max_step:\n break\n\n results = dict()\n results['output'] = x\n if trajectory is True:\n results['trajectory'] = xx\n if cost_history is True:\n results['cost_history'] = fx\n if step_history is True:\n results['step_history'] = steps\n if plot_history is True:\n assert step_history is True or cost_history is True\n plt.figure()\n if step_history is True:\n plt.semilogy(steps,label='step size')\n if cost_history is True:\n plt.semilogy(fx,label='cost')\n plt.xlabel('iteration number')\n plt.title('Gradient Descent')\n plt.legend()\n results['figure'] = plt\n plt.show(block=False)\n \n if feedback is True:\n if f is not None:\n print(f\" final cost = {f(x):.2e}\")\n \n return results",
"def test_fdr_correction(self):\r\n pvals = array([.1, .7, .5, .3, .9])\r\n exp = array([.5, .7 * 5 / 4., .5 * 5 / 3., .3 * 5 / 2., .9])\r\n obs = fdr_correction(pvals)\r\n self.assertFloatEqual(obs, exp)",
"def evaluate(y_true, y_preds, labels=[0, 1, 2, 3, 4]):\n p_scores = []\n r_scroes = []\n for label in labels:\n p = (((y_true == label) * (y_preds == label)).sum()+0.001) / ((y_preds == label).sum() + 0.001)\n p_scores.append(p)\n r = (((y_true == label) * (y_preds == label)).sum()+0.001) / ((y_true == label).sum() + 0.001)\n r_scroes.append(r)\n p_scores = np.array(p_scores)\n r_scroes = np.array(r_scroes)\n f1 = 2 * (p_scores * r_scroes + 0.001) / (p_scores + r_scroes + 0.001)\n\n confmat = []\n for label in labels:\n conf = []\n for label2 in labels:\n conf.append(((y_preds == label) * (y_true == label2)).sum())\n confmat.append(conf)\n confmat = np.array(confmat)\n\n return p_scores, r_scroes, f1, confmat",
"def calf_f1(annotated_Y, predicted_Y):\n\n POSITIVE = ADR_MENTION_CLASS_LABEL\n NEGATIVE = NON_ADR_MENTION_CLASS_LABEL\n\n tp = 0\n fp = 0\n fn = 0\n tn = 0\n\n total_actual_positives = 0\n total_actual_negatives = 0\n\n for index, actual in enumerate(annotated_Y):\n predicted = predicted_Y[index]\n\n if actual == POSITIVE:\n total_actual_positives += 1\n\n if predicted == POSITIVE:\n tp += 1\n elif predicted == NEGATIVE:\n fn += 1\n\n elif actual == NEGATIVE:\n total_actual_negatives += 1\n\n if predicted == POSITIVE:\n fp += 1\n elif predicted == NEGATIVE:\n tn += 1\n\n if (tp+fp) == 0:\n precision = 0\n else:\n precision = tp/(tp+fp)\n\n if (tp+fn) == 0:\n recall = 0\n else:\n recall = tp/(tp+fn)\n\n if (precision+recall) == 0:\n f1 = 0\n else:\n f1 = 2*precision*recall/(precision+recall)\n\n # print(\"Total labels: {}, total actual positives: {}, total_actual_negatives: {}\".format(len(predicted_Y), total_actual_positives, total_actual_negatives))\n # print(\"tp: {}, tn: {}, fp: {}, fn: {}\".format(tp, tn, fp, fn))\n # print(\" Accuracy: {}\".format((tp+tn)/(len(test_Y))))\n print(\" Precision: {}\".format(precision))\n print(\" Recall: {}\".format(recall))\n print(\" F1: {}\".format(f1))",
"def compute(self, idx, input_scores, input_names):\n title = self._legends[idx] if self._legends is not None else None\n headers = [\"\" or title, \"Dev. %s\" % input_names[0]]\n if self._eval and input_scores[1] is not None:\n headers.append(\"eval % s\" % input_names[1])\n if self._criterion == \"rr\":\n rr = bob.measure.recognition_rate(input_scores[0], self._thres[idx])\n dev_rr = \"%.1f%%\" % (100 * rr)\n raws = [[\"RR\", dev_rr]]\n if self._eval and input_scores[1] is not None:\n rr = bob.measure.recognition_rate(\n input_scores[1], self._thres[idx]\n )\n eval_rr = \"%.1f%%\" % (100 * rr)\n raws[0].append(eval_rr)\n click.echo(\n tabulate(raws, headers, self._tablefmt), file=self.log_file\n )\n elif self._criterion == \"mindcf\":\n if \"cost\" in self._ctx.meta:\n cost = self._ctx.meta.get(\"cost\", 0.99)\n threshold = (\n bob.measure.min_weighted_error_rate_threshold(\n input_scores[0][0], input_scores[0][1], cost\n )\n if self._thres is None\n else self._thres[idx]\n )\n if self._thres is None:\n click.echo(\n \"[minDCF - Cost:%f] Threshold on Development set `%s`: %e\"\n % (cost, input_names[0], threshold),\n file=self.log_file,\n )\n else:\n click.echo(\n \"[minDCF] User defined Threshold: %e\" % threshold,\n file=self.log_file,\n )\n # apply threshold to development set\n far, frr = bob.measure.farfrr(\n input_scores[0][0], input_scores[0][1], threshold\n )\n dev_far_str = \"%.1f%%\" % (100 * far)\n dev_frr_str = \"%.1f%%\" % (100 * frr)\n dev_mindcf_str = \"%.1f%%\" % (\n (cost * far + (1 - cost) * frr) * 100.0\n )\n raws = [\n [\"FAR\", dev_far_str],\n [\"FRR\", dev_frr_str],\n [\"minDCF\", dev_mindcf_str],\n ]\n if self._eval and input_scores[1] is not None:\n # apply threshold to development set\n far, frr = bob.measure.farfrr(\n input_scores[1][0], input_scores[1][1], threshold\n )\n eval_far_str = \"%.1f%%\" % (100 * far)\n eval_frr_str = \"%.1f%%\" % (100 * frr)\n eval_mindcf_str = \"%.1f%%\" % (\n (cost * far + (1 - cost) * frr) * 100.0\n )\n raws[0].append(eval_far_str)\n raws[1].append(eval_frr_str)\n raws[2].append(eval_mindcf_str)\n click.echo(\n tabulate(raws, headers, self._tablefmt), file=self.log_file\n )\n elif self._criterion == \"cllr\":\n cllr = bob.measure.calibration.cllr(\n input_scores[0][0], input_scores[0][1]\n )\n min_cllr = bob.measure.calibration.min_cllr(\n input_scores[0][0], input_scores[0][1]\n )\n dev_cllr_str = \"%.1f%%\" % cllr\n dev_min_cllr_str = \"%.1f%%\" % min_cllr\n raws = [[\"Cllr\", dev_cllr_str], [\"minCllr\", dev_min_cllr_str]]\n if self._eval and input_scores[1] is not None:\n cllr = bob.measure.calibration.cllr(\n input_scores[1][0], input_scores[1][1]\n )\n min_cllr = bob.measure.calibration.min_cllr(\n input_scores[1][0], input_scores[1][1]\n )\n eval_cllr_str = \"%.1f%%\" % cllr\n eval_min_cllr_str = \"%.1f%%\" % min_cllr\n raws[0].append(eval_cllr_str)\n raws[1].append(eval_min_cllr_str)\n click.echo(\n tabulate(raws, headers, self._tablefmt), file=self.log_file\n )\n else:\n title = self._legends[idx] if self._legends is not None else None\n all_metrics = self._get_all_metrics(idx, input_scores, input_names)\n headers = [\" \" or title, \"Development\"]\n rows = [\n [self.names[0], all_metrics[0][0]],\n [self.names[1], all_metrics[0][1]],\n [self.names[2], all_metrics[0][2]],\n [self.names[3], all_metrics[0][3]],\n [self.names[4], all_metrics[0][4]],\n [self.names[5], all_metrics[0][5]],\n ]\n\n if self._eval:\n # computes statistics for the eval set based on the threshold a\n # priori\n headers.append(\"Evaluation\")\n rows[0].append(all_metrics[1][0])\n rows[1].append(all_metrics[1][1])\n rows[2].append(all_metrics[1][2])\n rows[3].append(all_metrics[1][3])\n rows[4].append(all_metrics[1][4])\n rows[5].append(all_metrics[1][5])\n\n click.echo(\n tabulate(rows, headers, self._tablefmt), file=self.log_file\n )",
"def cost_derivative(self, y_pred, Y, X):\n # Derivative of loss wrt b0\n D_b0 = -2 * sum((Y - y_pred) * y_pred * (1 - y_pred))\n # Derivative of loss wrt b1\n D_b1 = -2 * sum(X * (Y - y_pred) * y_pred * (1 - y_pred))\n return D_b0, D_b1",
"def loss_gradient(self, targets, scores):\n m = targets * scores\n numer = 4. * (2. * numpy.arctan(m) - 1.)\n denom = 1. + m**2\n return numer/denom",
"def test_gradient_step(var_f, len_f, var_y, N):\n\n x, y = build_data(N)\n\n gp_model = initialise_gp_model(var_f, len_f, var_y, x, y)\n markovgp_model = initialise_markovgp_model(var_f, len_f, var_y, x, y)\n\n gv = objax.GradValues(gp_model.energy, gp_model.vars())\n gv_markov = objax.GradValues(markovgp_model.energy, markovgp_model.vars())\n\n lr_adam = 0.1\n lr_newton = 1.\n opt = objax.optimizer.Adam(gp_model.vars())\n opt_markov = objax.optimizer.Adam(markovgp_model.vars())\n\n gp_model.update_posterior()\n gp_grads, gp_value = gv()\n gp_loss_ = gp_value[0]\n opt(lr_adam, gp_grads)\n gp_hypers = np.array([gp_model.kernel.lengthscale, gp_model.kernel.variance, gp_model.likelihood.variance])\n print(gp_hypers)\n print(gp_grads)\n\n markovgp_model.update_posterior()\n markovgp_grads, markovgp_value = gv_markov()\n markovgp_loss_ = markovgp_value[0]\n opt_markov(lr_adam, markovgp_grads)\n markovgp_hypers = np.array([markovgp_model.kernel.lengthscale, markovgp_model.kernel.variance,\n markovgp_model.likelihood.variance])\n print(markovgp_hypers)\n print(markovgp_grads)\n\n np.testing.assert_allclose(gp_grads[0], markovgp_grads[0], rtol=1e-4)\n np.testing.assert_allclose(gp_grads[1], markovgp_grads[1], rtol=1e-4)\n np.testing.assert_allclose(gp_grads[2], markovgp_grads[2], rtol=1e-4)",
"def cost_derivative(self,output_results,y):\r\n\t\treturn (output_results-y)",
"def optimize_rf(data, targets):\r\n def rfc_crossval(n_estimators, min_samples_split, max_features,max_depth,loss_n):\r\n\r\n return rfc_cv(\r\n n_estimators=int(n_estimators),\r\n min_samples_split=int(min_samples_split),\r\n max_features=int(round(max_features)),\r\n max_depth=int(max_depth),\r\n loss_n=loss_n,\r\n data=data,\r\n targets=targets,\r\n )\r\n\r\n optimizer = BayesianOptimization(\r\n f=rfc_crossval,\r\n pbounds={\r\n \"n_estimators\": (10, 5000),\r\n \"min_samples_split\": (2, 25),\r\n \"max_features\": (1, 8),\r\n \"max_depth\":(2,10),\r\n 'loss_n': (0, 1)\r\n },\r\n random_state=1234,\r\n verbose=2\r\n )\r\n logger=JSONLogger(path=\"./randomlogs.json\")\r\n optimizer.subscribe(Events.OPTMIZATION_STEP,logger)\r\n optimizer.maximize(init_points=50,n_iter=300)\r\n with open('./randomlogs.json','a',encoding='utf-8')as f:\r\n f.write(str(optimizer.max))",
"def estimate_F(corrs):\n N, _ = corrs.shape\n corrs_temp = np.zeros([N,4])\n corrs_temp[:,1] = corrs[:,0]\n corrs_temp[:,0] = corrs[:,1]\n corrs_temp[:,2] = corrs[:,3]\n corrs_temp[:,3] = corrs[:,2]\n corrs = corrs_temp\n means = []\n stds = []\n for i in range(4):\n mean = np.mean(corrs[:,i])\n means.append(mean)\n std = np.std(corrs[:,i])\n stds.append(std)\n corrs[:,i] -= mean\n corrs[:,i] /= std\n T1 = np.array([[1/stds[0], 0, -means[0]/stds[0]],[0,1/stds[1], -means[1]/stds[1]], [0,0,1]])\n T2 = np.array([[1/stds[2], 0, -means[2]/stds[2]],[0,1/stds[3], -means[3]/stds[3]], [0,0,1]])\n Y = []\n for j in range(N):\n Y.append(np.outer(np.hstack([corrs[j,2:],1]),np.hstack([corrs[j,:2],1])).flatten())\n Y = np.array(Y)\n\n u, s, v = np.linalg.svd(Y, full_matrices = 0)\n if s[-1] != 0:\n F = v[-1]\n else:\n F = v[-2]\n F = F.reshape([3,3])\n u, s, v = np.linalg.svd(F, full_matrices = 0)\n if len(s)==3:\n s[-1] = 0\n F = u @ np.diag(s) @ v\n F = T2.T @ F @ T1\n F = F/np.linalg.norm(F, ord = 'fro')\n return F",
"def _derY(self, x, y):\n if _isscalar(x):\n x_pos = max(min(self.xSearchFunc(self.x_list, x), self.x_n - 1), 1)\n y_pos = max(min(self.ySearchFunc(self.y_list, y), self.y_n - 1), 1)\n else:\n x_pos = self.xSearchFunc(self.x_list, x)\n x_pos[x_pos < 1] = 1\n x_pos[x_pos > self.x_n - 1] = self.x_n - 1\n y_pos = self.ySearchFunc(self.y_list, y)\n y_pos[y_pos < 1] = 1\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n alpha = (x - self.x_list[x_pos - 1]) / (\n self.x_list[x_pos] - self.x_list[x_pos - 1]\n )\n dfdy = (\n (\n (1 - alpha) * self.f_values[x_pos - 1, y_pos]\n + alpha * self.f_values[x_pos, y_pos]\n )\n - (\n (1 - alpha) * self.f_values[x_pos - 1, y_pos - 1]\n + alpha * self.f_values[x_pos, y_pos - 1]\n )\n ) / (self.y_list[y_pos] - self.y_list[y_pos - 1])\n return dfdy",
"def detEvals(self, targets):\n res = []\n res2 = []\n for f in targets:\n tmp = np.array([np.nan] * len(self.bestfinalfunvals))\n tmp2 = None\n for i, line in enumerate(self.evals):\n if line[0] <= f:\n tmp = line[1:]\n tmp2 = self.algs[i]\n break\n res.append(tmp)\n res2.append(tmp2)\n return res, res2",
"def fscore(links_true, links_pred=None):\n\n prec = precision(links_true, links_pred)\n rec = recall(links_true, links_pred)\n\n return float(2 * prec * rec / (prec + rec))",
"def test_df(x):\n dfx = np.array([x[1]*np.cos(x[0])+np.cos(x[1]),\n np.sin(x[0])-x[0]*np.sin(x[1])])\n return dfx",
"def f1(y_true, y_pred):\n p = precision(y_true, y_pred)\n r = recall(y_true, y_pred)\n score = 2 * p * r / (p + r)\n return score",
"def evaluate_tDCF(self, cm_scores_file: str, asv_scores_file: str,\n evaluation_res_file: str):\n\n # Spoofing related EER\n bona_cm, spoof_cm, eer_cm = self.evaluate_eer(cm_scores_file,\n return_cm=True)\n\n asv_df = pd.read_csv(asv_scores_file)\n tar_asv = asv_df[asv_df['target'] == 'target']\n non_tar_asv = asv_df[asv_df['target'] == 'nontarget']\n spoof_asv = asv_df[asv_df['target'] == 'spoof']\n\n eer_asv, asv_threshold = em.compute_eer(tar_asv, non_tar_asv)\n [Pfa_asv, Pmiss_asv,\n Pmiss_spoof_asv] = em.obtain_asv_error_rates(tar_asv, non_tar_asv,\n spoof_asv, asv_threshold)\n # Default values from ASVspoof2019\n Pspoof = 0.05\n cost_model = {\n 'Pspoof': Pspoof, # Prior probability of a spoofing attack\n 'Ptar': (1 - Pspoof) * 0.99, # Prior probability of target speaker\n 'Pnon':\n (1 - Pspoof) * 0.01, # Prior probability of nontarget speaker\n 'Cmiss_asv':\n 1, # Cost of ASV system falsely rejecting target speaker\n 'Cfa_asv':\n 10, # Cost of ASV system falsely accepting nontarget speaker\n 'Cmiss_cm':\n 1, # Cost of CM system falsely rejecting target speaker\n 'Cfa_cm': 10, # Cost of CM system falsely accepting spoof\n }\n tDCF_curve, CM_thresholds = em.compute_tDCF(bona_cm, spoof_cm, Pfa_asv,\n Pmiss_asv, Pmiss_spoof_asv,\n cost_model, True)\n min_tDCF_index = np.argmin(tDCF_curve)\n min_tDCF = tDCF_curve[min_tDCF_index]\n\n result_string = f\"\"\"\n ASV System\n EER = {eer_asv*100:<8.5f} (Equal error rate (target vs. nontarget)\n Pfa = {Pfa_asv*100:<8.5f} (False acceptance rate)\n Pmiss = {Pmiss_asv*100:<8.5f} (False rejection rate) \n 1-Pmiss, spoof = {(1-Pmiss_asv)*100:<8.5f} (Spoof false acceptance rate)\n \n CM System\n EER = {eer_cm*100:<8.5f} (Equal error rate for counter measure)\n\n Tandem\n min-tDCF = {min_tDCF:<8.f}\n \"\"\"\n\n print(result_string)\n if evaluation_res_file:\n with open(evaluation_res_file, 'w') as wp:\n print(result_string, file=wp)",
"def scores(self, y, y_pred):\n\n aucroc = 0.\n precision = 0.\n recall = 0.\n f1 = 0.\n aucroc_labs = np.zeros(self.datas[self.train_idx].n_labels)\n precision_labs = np.zeros(self.datas[self.train_idx].n_labels)\n recall_labs = np.zeros(self.datas[self.train_idx].n_labels)\n f1_labs = np.zeros(self.datas[self.train_idx].n_labels)\n label_ratios = np.mean(y, axis=0)\n\n if len(y) > 1:\n y_t = np.transpose(y)\n col_keep = np.ones(len(y_t), dtype=bool)\n for i, col_y in enumerate(y_t):\n if 0 not in col_y or 1 not in col_y:\n col_keep[i] = False\n\n if sum(col_keep) > 0:\n if not col_keep.all():\n y = np.transpose(y_t[col_keep])\n y_pred = np.transpose(np.transpose(y_pred)[col_keep])\n\n f1 = f1_score(y, self._round(y_pred), average=self.metrics_avg)\n s = f1_score(y, self._round(y_pred), average=None)\n f1_labs[col_keep] = s if sum(col_keep) > 1 else s[1]\n aucroc = roc_auc_score(y, y_pred, average=self.metrics_avg)\n aucroc_labs[col_keep] = roc_auc_score(y, y_pred, average=None)\n precision = precision_score(y, self._round(y_pred), average=self.metrics_avg)\n recall = recall_score(y, self._round(y_pred), average=self.metrics_avg)\n if sum(col_keep) > 1:\n precision_labs[col_keep] = precision_score(y, self._round(y_pred), average=None)\n recall_labs[col_keep] = recall_score(y, self._round(y_pred), average=None)\n else:\n precision_labs[col_keep] = precision_score(y, self._round(y_pred))\n recall_labs[col_keep] = recall_score(y, self._round(y_pred))\n elif self.verbose:\n print('*Cannot compute other metrics because no label in Truth has alternatives, only precision*')\n precision = precision_score(y, self._round(y_pred), average=self.metrics_avg)\n precision_labs = precision_score(y, self._round(y_pred), average=None)\n\n elif len(y) == 1:\n if self.verbose:\n print('*Cannot compute other metrics with %d samples, only precision*' % len(y))\n precision = precision_score(y, self._round(y_pred), average=self.metrics_avg)\n precision_labs = precision_score(y, self._round(y_pred), average=None)\n\n result = {\n 'aucroc': aucroc,\n 'precision': precision,\n 'recall': recall,\n 'f1': f1,\n 'aucroc_labs': aucroc_labs,\n 'precision_labs': precision_labs,\n 'recall_labs': recall_labs,\n 'f1_labs': f1_labs,\n 'label_ratios': label_ratios\n }\n\n return result",
"def calcECDF(self,arr):\n res=np.zeros_like(arr)\n for index, value in np.ndenumerate(arr):\n res[index]=self.calc(value)\n return res",
"def calcECDF(self,arr):\n res=np.zeros_like(arr)\n for index, value in np.ndenumerate(arr):\n res[index]=self.calc(value)\n return res",
"def derivatives(self, x=[], function='sigmoid', alpha=0.01, y_pred = [], y = []):\n if function == \"sigmoid\":\n dadz = self.activation(x,\"sigmoid\")*(1-self.activation(x,\"sigmoid\"))\n return dadz\n\n if function == \"swish\":\n dadz = self.activation(x,\"sigmoid\") + x * self.activation(x,\"sigmoid\") * (1-self.activation(x,\"sigmoid\"))\n return dadz\n \n if function == \"linear\":\n dadz = np.ones(np.shape(x))\n return dadz\n\n if function == \"relu\":\n dadz = np.greater(x, 0).astype(int)\n return dadz\n\n if function == \"leakyrelu\":\n dadz = 1 * (x > 0) + alpha * (x<0)\n return dadz\n \n if function == \"mse\":\n assert(np.shape(y_pred)) == np.shape(y)\n if y.ndim > 1:\n m = np.shape(y)[0] #number of samples\n n = np.shape(y)[1] #number of output elements\n dCdy_pred = np.sum((y_pred - y), axis=0)*(1/(m*n))*2\n\n else:\n m = 1\n n = len(y) \n dCdy_pred = (y_pred - y)*(1/(m*n))*2\n return dCdy_pred",
"def fmeasure(y_true, y_pred):\n return fbeta_score(y_true, y_pred, beta=1)",
"def fmeasure(y_true, y_pred):\n return fbeta_score(y_true, y_pred, beta=1)",
"def fmeasure(y_true, y_pred):\n return fbeta_score(y_true, y_pred, beta=1)",
"def fmeasure(y_true, y_pred):\n return fbeta_score(y_true, y_pred, beta=1)",
"def calculate_prec_and_rec_gradient(y_true, output, thresholds, average):\n# f1 = calculate_f1(y_true, output, thresholds, average)\n prec, rec = calculate_prec_and_rec(y_true, output, thresholds, average)\n\n classes_num = len(thresholds)\n\n delta = 0.01\n grads = []\n # print(\"calculate_at_gradient, f1:\", f1)\n\n for k, threshold in enumerate(thresholds):\n new_thresholds = thresholds.copy()\n cnt = 0\n while cnt < 10:\n cnt += 1\n new_thresholds[k] += delta\n# f1_new = calculate_f1(y_true, output, new_thresholds, average)\n prec_new, rec_new = calculate_prec_and_rec(y_true, output, new_thresholds, average)\n# if f1_new != f1:\n# break\n if prec_new != prec or rec_new != rec:\n break\n\n grad = 0.5 * (prec_new - prec + rec_new - rec) / (delta * cnt)\n# grad = (0.2*(prec_new - prec) + 0.8*(rec_new - rec)) / (delta * cnt)\n grads.append(grad)\n\n return grads",
"def _test_score_ddp_fn(rank, world_size, preds, targets, exact_match, f1):\n _squad_score_ddp(rank, world_size, preds[rank], targets[rank], exact_match[rank], f1[rank])"
] | [
"0.5936621",
"0.5873862",
"0.5801516",
"0.5756439",
"0.57008326",
"0.5582457",
"0.55692595",
"0.55287987",
"0.55272543",
"0.55269563",
"0.55214703",
"0.5503548",
"0.5492775",
"0.5465149",
"0.54620695",
"0.5462017",
"0.54504746",
"0.5437137",
"0.5423348",
"0.5410652",
"0.54100186",
"0.54056907",
"0.54056907",
"0.53753805",
"0.53640157",
"0.53640157",
"0.53640157",
"0.53640157",
"0.53622264",
"0.5357732"
] | 0.7097738 | 0 |
Find the index of the point before the rightmost crossing point between an FDR curve and a FDR target value. Formally speaking, given an array fdr_curve and a number fdr_target, find the smallest index i such that fdr_curve[j] >= fdr_target for all j > i | def find_crossing(fdr_curve, fdr_target):
#if not is_fdr_curve(fdr_curve):
# raise ValueError("Not a valid FDR curve") #ADP - need to review is_fdr_curve criteria +noise means can start above 0
if not 0 < fdr_target < 1:
return -1
less_zero_indices = np.where(fdr_curve <= fdr_target)[0]
if len(less_zero_indices) == 0:
return len(fdr_curve)-1
i = less_zero_indices[-1]
return i | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_closest(A, target):\n idx = A.searchsorted(target)\n idx = np.clip(idx, 1, len(A)-1)\n left = A[idx-1]\n right = A[idx]\n idx -= target - left < right - target\n return idx",
"def __find_r_corr_in_points(arr):\n n = len(arr)\n th = arr[n // 2] * exp(-1.0)\n for i in range(n // 2, n, 1):\n if arr[i] < th:\n return i - n // 2",
"def nearestIndex(array, value):\n idx = np.searchsorted(array, value, side='left')\n if idx > 0 and (idx == len(array) or math.fabs(value - array[idx-1]) < math.fabs(value - array[idx])):\n return idx-1\n return idx",
"def _get_breaking_point(x, y):\n # select threshold where curve break\n slope = (y[-1] - y[0]) / len(y)\n y_grad = np.gradient(y)\n m = list(y_grad >= slope)\n j = m.index(False)\n m = m[j:]\n x = x[j:]\n y = y[j:]\n if True in m:\n i = m.index(True)\n else:\n i = -1\n breaking_point = float(x[i])\n\n return breaking_point, x, y",
"def find_idx(array, value):\n\n idx = np.searchsorted(array, value, side=\"left\")\n if idx > 0 and (\n idx == len(array)\n or math.fabs(value - array[idx - 1]) < math.fabs(value - array[idx])\n ):\n return idx - 1\n else:\n return idx",
"def find_nearest(numbers, target):\n numbers = np.asarray(numbers)\n idx = (np.abs(numbers - target)).argmin()\n return numbers[idx]",
"def calc_fdr_arr(target_arr, decoy_arr, ascending=False):\n n, m = len(target_arr), len(decoy_arr)\n if n != m:\n raise TypeError('target should be same length as decoy {} {}'.format(n, m))\n ordering = 1 if ascending else -1 # reversed sorting if score is not ascending\n combined = np.concatenate((target_arr, decoy_arr))\n combined.sort()\n # count how often each value occurs\n target_bag, decoy_bag = _count(target_arr), _count(decoy_arr)\n unique_sorted = np.unique(combined)[::ordering]\n target_hits, decoy_hits = zip(*_iter_hits(target_bag, decoy_bag, unique_sorted))\n target_hits, decoy_hits = np.cumsum(target_hits), np.cumsum(decoy_hits)\n fdr_curve = decoy_hits / target_hits\n fdr_curve[target_hits == 0] = 0\n return fdr_curve, target_hits, combined[::ordering]",
"def get_closest_waypoint_idx(self):\n\n # TODO:\n # The churchlot waypoints are roughly circular but have self-\n # intersecting endpoints, so I'm not sure how this code will \n # yield good results. Might need some additional filtering\n # logic to force a choice consistent with the vehicle pose yaw\n # in order to avoid jumping onto the wrong path.\n\n # Vehicle position short reference\n pos = self.pose.pose.position\n\n # Find the closest waypoint index\n # If closest index is zero bump to 1 since we don't want slice for \n # prev_coord to look at the final map waypoint.\n closest_idx = max(self.waypoint_tree.query([pos.x, pos.y], 1)[1], 1)\n\n # Get closest point\n closest_coord = self.waypoints_2d[closest_idx]\n prev_coord = self.waypoints_2d[closest_idx-1]\n\n # Convert coordinates into 2D numpy vectors\n closest_vec = np.array(closest_coord)\n prev_vec = np.array(prev_coord)\n pos_vec = np.array([pos.x, pos.y])\n\n # Find vec(close-prev) dot vec(pos-close) \n val = np.dot(closest_vec - prev_vec, pos_vec - closest_vec)\n\n # If pos is ahead of closest...\n if val > 0: \n\n # Advance index so that closest is ahead of pos\n closest_idx = (closest_idx + 1) % len(self.waypoints_2d)\n\n # Return closest index\n return closest_idx",
"def calcCrossoverIDX(fls, crossover_freq):\n crossover_idx = np.argmax(fls >= crossover_freq)\n return crossover_idx",
"def goal(target, prediction):\n return closest_point_on_segment(prediction, target)",
"def furthest_right_point(list_of_points):\n return max(list_of_points, key = lambda pt: pt.getX())",
"def findNearestIndex(arr,value):\n arr = np.array(arr)\n index=(np.abs(arr-value)).argmin()\n return index",
"def _find(self, candidates, target, lb, rb):\n # we'v made sure there's no duplicate in candidates\n li, ri = lb, rb\n while li < ri:\n mi = (li + ri) // 2\n if candidates[mi] < target:\n li = mi + 1\n elif candidates[mi] > target:\n ri = mi - 1\n else:\n return mi\n\n if li == ri:\n if candidates[li] <= target:\n return li\n else:\n return li - 1\n\n if ri < lb:\n return ri\n\n if li == rb:\n return rb - 1\n\n # now it's like c[ri] < target < c[li]\n # actually these 3 cases are all ri...\n return ri",
"def closest(array, value):\n idx = (np.abs(array-value)).argmin()\n return idx",
"def find_closest(array, value):\n array = np.asarray(array)\n index = (np.abs(array - value)).argmin()\n return index, array[index]",
"def find_closest_frame(point, trajs, cv_evals):\n\n closest_frame = None\n closest_distance = 1e10\n for i, t in enumerate(trajs):\n dists = np.linalg.norm(point - cv_evals[i], axis=1)\n # print(dists.shape, len(t))\n mindist_index = dists.argmin()\n mindist = dists[mindist_index]\n if mindist < closest_distance:\n # logger.debug(\"Found frame in %s at time %s\", simulation.id, t)\n closest_frame = t[mindist_index]\n closest_distance = mindist\n return closest_frame",
"def _first_index_with_bigger_neighbour(P):\n i = len(P) - 1\n while i > 0 and P[i-1] >= P[i]:\n i -= 1\n return i",
"def get_closest_waypoint_idx(self):\n\n # Position\n x = self.car_pose.pose.position.x\n y = self.car_pose.pose.position.y\n closest_idx = self.waypoint_tree.query([x, y], 1)[1]\n\n # Coordinates\n closest_coord = self.waypoints_2d[closest_idx]\n prev_coord = self.waypoints_2d[closest_idx - 1]\n\n # Hyper Plane\n cl_vect = np.array(closest_coord)\n prev_vect = np.array(prev_coord)\n pos_vect = np.array([x, y])\n\n val = np.dot(cl_vect - prev_vect, pos_vect - cl_vect)\n\n if val > 0:\n closest_idx = (closest_idx + 1) % len(self.waypoints_2d)\n\n return closest_idx",
"def findFirstElementGreaterThan(self, array, index):\n l, r = 0, len(array) - 1\n ans = -1;\n while (l <= r):\n mid = l + (r - l) // 2;\n # Move to right side if target is greater\n if (array[mid] <= index):\n l = mid + 1;\n # Move left side.\n else:\n ans = mid;\n r = mid - 1;\n return ans;",
"def get_closest_waypoint(self, x, y):\n closest_idx = self.waypoint_tree.query([x, y])[1] # ckd tree (1st closest, idx)\n\n # Check if closest waypoint is ahead or behind vehicle\n closest_coord = self.waypoints_2d[closest_idx]\n prev_coord = self.waypoints_2d[closest_idx - 1]\n\n # Equation for hyperplane through closest_coors\n cl_vect = np.array(closest_coord)\n prev_vect = np.array(prev_coord)\n pos_vect = np.array([x, y])\n\n val = np.dot(cl_vect - prev_vect, pos_vect - cl_vect)\n # Car is ahead of the closest waypoint\n if val > 0:\n closest_idx = (closest_idx + 1) % len(self.waypoints_2d)\n\n return closest_idx",
"def find_nearest_idx(array: np.array, value: float) -> int:\n return (np.abs(array - value)).argmin()",
"def append_point_by_derivative(contour_points, target_index, target_contour):\n target_contour_points = target_contour.points\n distance = 0xFFFFFF\n points_to_append, rate = None, 0\n x_value, y_value = contour_points[target_index].position\n\n try:\n # Calculates gradient by derivative.\n gradient = -1 / calculate_derivative(contour_points, target_index)\n # Line's equation.\n linear_function = lambda x: gradient*x + y_value - (x_value*gradient)\n # Extends 500 up and down from standard point.\n line = bezier.Curve(np.asfortranarray([\n [x_value+500, x_value-500],\n [linear_function(x_value+500), linear_function(x_value-500)]\n ]), degree=1)\n except ZeroDivisionError:\n line = bezier.Curve(np.asfortranarray([\n [x_value, x_value],\n [float(y_value+500), float(y_value-500)]\n ]), degree=1)\n\n # Finds what curve in target contour is meeted with line.\n for i, _ in enumerate(target_contour_points):\n if i == target_index and target_contour_points == contour_points:\n continue\n if target_contour_points[i].type != 'offcurve' \\\n and target_contour_points[i-1].type == 'offcurve':\n nodes = np.asfortranarray([\n [float(target_contour_points[i+j].x) for j in range(-3, 1)],\n [float(target_contour_points[i+j].y) for j in range(-3, 1)]\n ])\n curve = bezier.Curve(nodes, degree=3)\n\n # If line meet curve.\n if _is_curve_meet(line, curve):\n meeting_object = curve.evaluate(curve.intersect(line)[0, :][0])\n meeting_point = tuple(meeting_object.flatten())\n new_distance = _calculate_distance( \\\n contour_points[target_index].position, meeting_point)\n # Finds nearest curve.\n if new_distance < distance:\n distance = new_distance\n points_to_append = [target_contour_points[i+j] \\\n for j in range(-3, 1)]\n rate = curve.locate(meeting_object)\n\n # Appends point at target curve.\n if points_to_append and rate:\n appendtools.append_point_rate(target_contour, points_to_append, rate)",
"def find_nearest(array,value):\n idx = (np.abs(array-value)).argmin()\n return idx",
"def find_closest_flight_in_range(self, x, y, max_range=10):\n closest_flight = None\n closest_distance = max_range\n point = pygame.math.Vector2(x, y)\n for flight in self.incoming_flights:\n distance = point.distance_to(flight.get_pos())\n if distance < closest_distance:\n closest_distance = distance\n closest_flight = flight\n return closest_flight",
"def find_nearest(array, value):\n idx = (np.abs(array-value)).argmin()\n return idx",
"def lin_interp_from_first_max(\n array: np.ndarray,\n threshold: float,\n direction: InterpolateDirection\n) -> Union[float, int, None]:\n\n index_peak = array.argmax()\n target_value = array[index_peak] * threshold\n\n if threshold == 1:\n return index_peak\n\n elif direction == InterpolateDirection.RIGHT:\n if index_peak == array.size:\n return index_peak\n candidates = np.where(array[index_peak:] <= target_value)[0]\n if candidates.size <= 0:\n return None\n index_found = candidates[0] + index_peak\n value_found = array[index_found]\n if value_found == target_value:\n return index_found\n index_prev = index_found - 1\n value_prev = array[index_prev]\n index_gap = (value_prev - target_value) / (\n value_prev - value_found)\n return index_prev + index_gap\n\n elif direction == InterpolateDirection.LEFT:\n if index_peak == 0:\n return index_peak\n candidates = np.where(array[:index_peak] <= target_value)[0]\n if candidates.size <= 0:\n return None\n index_found = candidates[-1]\n value_found = array[index_found]\n if value_found == target_value:\n return index_found\n index_prev = index_found + 1\n value_prev = array[index_prev]\n index_gap = (value_prev - target_value) / (\n value_prev - value_found)\n return index_prev - index_gap\n\n else:\n raise ValueError(\"invalid `direction`\")",
"def _find_nearest(array, value):\n idx = (np.abs(array - value)).argmin()\n return array[idx], idx",
"def findCrossing(y,data):\n y=1; data = wValsArray;\n #a for loop itterates over an array's rows. transpose it to get the cols.\n armed = True #\n xsPoints = []\n for w,rho in data:\n if rho > y and armed == True:\n xsPoints.append(w)\n armed = False\n if rho < y:\n armed = True\n return xsPoints",
"def search_linear(xs, target):\n for (i, v) in enumerate(xs):\n if v == target: # Is referred to as a probe.\n return i\n return -1",
"def geo_idx(dd, dd_array):\n geo_idx = (np.abs(dd_array - dd)).argmin()\n return geo_idx"
] | [
"0.6155449",
"0.60407877",
"0.5844136",
"0.5744244",
"0.5732119",
"0.5721886",
"0.57151353",
"0.56536",
"0.56136966",
"0.55823135",
"0.5580776",
"0.557572",
"0.557235",
"0.55606055",
"0.5549734",
"0.55226475",
"0.5512953",
"0.5509117",
"0.55001134",
"0.5492124",
"0.5484047",
"0.5464958",
"0.54616016",
"0.542552",
"0.54110056",
"0.54051626",
"0.54015523",
"0.5396166",
"0.5394989",
"0.5367432"
] | 0.8555478 | 0 |
Calculation file hash use md5 | def calc_file_md5(file_path):
hash_md5 = str()
method = hashlib.md5()
if not os.path.exists(file_path):
logger.error("File(%s) don not exist, can not calculation file hash" % file_path)
return hash_md5
with open(file_path, 'rb') as f:
for chunk in read_chunks(f, 1024 * 1024):
method.update(chunk)
return method.hexdigest() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def CalcMD5(filepath):\n with open(filepath,'rb') as f:\n md5obj = hashlib.md5()\n md5obj.update(f.read())\n return md5obj.hexdigest()",
"def calc_file_hash(filepath):\n with open(filepath, 'rb') as f:\n return md5(f.read()).hexdigest()",
"def md5_hash(file_path):\n with open(file_path, 'rb') as fp:\n return md5(fp.read()).hexdigest()",
"def md5sum_file(filepath):\n hasher = hashlib.md5()\n with open(filepath, 'rb') as infile:\n for chunk in util.chunk_reader(infile):\n hasher.update(chunk)\n return hasher.hexdigest()",
"def md5Hash(pathAndFilename, blockSize=8192):\n hashcode = hashlib.md5()\n with open(pathAndFilename, \"rb\" ) as f:\n block = f.read(blockSize)\n while len(block)>0:\n hashcode.update(block)\n block = f.read(blockSize)\n return hashcode.hexdigest()",
"def md5sum(file_name):\n f = open(file_name, mode='rb')\n h = hashlib.md5()\n h.update(f.read())\n return h.hexdigest()",
"def fsum(fpath):\n import hashlib\n import codecs\n with codecs.open(fpath, \"r\", \"utf-8\") as filep:\n buff = filep.read()\n cksum = hashlib.md5(buff.encode(\"utf-8\"))\n return cksum.hexdigest()",
"def md5(filename: str) -> str:\n # using md5 for speed\n _hash = hashlib.md5()\n # open file for reading in binary mode\n with open(filename,'rb') as file:\n for block in iter(lambda: file.read(1024), b\"\"):\n _hash.update(block)\n return _hash.hexdigest()",
"def calculate_md5sum_of_a_file(context, file_name, file_path):\n command = \"md5sum \" + file_path + \"/\" + file_name + \" | awk {'print $1'}\"\n return context.cme_session.send_ssh_command(command=command)",
"def checksumFile(filename):\n return md5File(filename)",
"def MD5(self) -> _n_0_t_3[_n_0_t_9]:",
"def get_file_hash (fullpath) : \n\n # This bit was sourced from Stack Overflow via Google, specifically:\n # http://stackoverflow.com/questions/1131220/get-md5-hash-of-a-files-without-open-it-in-python\n\n md5 = hashlib.md5()\n with open(fullpath,'rb') as f: \n for chunk in iter(lambda: f.read(512*md5.block_size), ''): \n md5.update(chunk)\n # Hexdigest is the safe varchar(32) style output\n return md5.hexdigest()",
"def hash_file(fname):\n hash_md5 = hashlib.md5()\n with open(fname, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n return hash_md5.hexdigest()",
"def hash_file_md5(file_path, binary=False, buffer_size=65536):\n return hash_file(file_path, hash_type=hashlib.md5, binary=binary, buffer_size=buffer_size)",
"def md5(fname):\n hash = hashlib.md5()\n with open(fname, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash.update(chunk)\n return hash.hexdigest()",
"def md5(file_path):\r\n\r\n hasher = hashlib.md5()\r\n with Profiler():\r\n with open(file_path, 'rb') as f:\r\n while True:\r\n buf = f.read(BLOCKSIZE)\r\n if not buf:\r\n break\r\n while len(buf) > 0:\r\n hasher.update(buf)\r\n buf = f.read(BLOCKSIZE)\r\n md5_hash = (hasher.hexdigest()).upper()\r\n return md5_hash",
"def generate_sum(file_path):\n #file = open(file_path, 'rb')\n #header = file.read()\n header = open(file_path, 'rb').read()\n suma_md5 = md5(header).hexdigest()\n return suma_md5",
"def count_md5hash_file(file_path):\n hash_md5 = hashlib.md5()\n with open(file_path, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n return hash_md5.hexdigest()",
"def md5sum(fname):\n\tdef read_chunks(fh):\n\t\tfh.seek(0)\n\t\tchunk = fh.read(8096)\n\t\twhile chunk:\n\t\t\tyield chunk\n\t\t\tchunk = fh.read(8096)\n\t\telse: #最后要将游标放回文件开头\n\t\t\tfh.seek(0)\n\n\tm = hashlib.md5()\n\tif isinstance(fname, str) and os.path.exists(fname):\n\t\tfh = open(fname, \"rb\")\n\t\tfor chunk in read_chunks(fh):\n\t\t\tm.update(chunk)\n\t#上传的文件缓存或已打开的文件流\n\telif fname.__class__.__name__ in [\"StringIO\", \"StringO\"] or isinstance(fname, file):\n\t\tfor chunk in read_chunks(fname):\n\t\t\tm.update(chunk)\n\telse:\n\t\treturn \"\"\n\treturn m.hexdigest()",
"def md5sum(filename):\n with open(filename, mode='rb') as f:\n d = hashlib.md5()\n for buf in iter(functools.partial(f.read, 1024*100), b''):\n d.update(buf)\n return d.hexdigest()",
"def local_md5(filepath, blocksize=65536):\n hasher = hashlib.md5()\n with open(filepath, 'rb') as source:\n buf = source.read(blocksize)\n while len(buf) > 0:\n hasher.update(buf)\n buf = source.read(blocksize)\n return hasher.hexdigest()",
"def md5sum(fname):\n hash_md5 = hashlib.md5()\n with open(fname, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n return hash_md5.hexdigest()",
"def GetFileMd5(file_path):\n return binascii.hexlify(GetFileHashes(file_path, do_md5=True)['md5'])",
"def md5_hash_file(filepath: str) -> str:\n block_size = 64 * 1024\n md5 = hashlib.md5()\n with open(filepath, \"rb\") as file_handler:\n while True:\n data = file_handler.read(block_size)\n if not data:\n break\n md5.update(data)\n return md5.hexdigest()",
"def calc_md5(infile, block_size=256*128):\n logger = logging.getLogger(__name__)\n logger.info(f'Calculating md5 of {infile}')\n md5 = hashlib.md5()\n with open(infile, 'rb') as f:\n for chunk in iter(lambda: f.read(block_size), b''):\n md5.update(chunk)\n return md5.hexdigest()",
"def md5sum_file(filename: str, hr: bool = True) -> str:\n block_size = 256 * 128\n\n md5 = hashlib.md5()\n with open(filename, \"rb\") as f:\n for chunk in iter(lambda: f.read(block_size), b\"\"):\n md5.update(chunk)\n if hr:\n return md5.hexdigest()\n return md5.digest()",
"def file_md5(file_path):\n log.debug(\"Calculating md5 for %s\", file_path)\n start_ms = time.time() * 10**3\n fp = open(file_path, 'rb')\n try:\n # returns tuple (md5_hex, md5_base64, size)\n md5, _, _ = boto.utils.compute_md5(fp)\n finally:\n fp.close()\n duration_ms = (time.time() * 10**3) - start_ms\n log.debug(\"Calculated hash %s for %s in %s ms\", md5, file_path, \n duration_ms)\n return md5",
"def __hash_md5__(self, text):\n key = hashlib.md5()\n key.update(text.encode('utf-8'))\n return key.digest()",
"def svn_fs_file_md5_checksum(*args):\r\n return _fs.svn_fs_file_md5_checksum(*args)",
"def compute_checksum(filename):\n cmd = 'md5sum ' + filename\n return pipe(cmd)"
] | [
"0.82365745",
"0.80100733",
"0.79871917",
"0.7958175",
"0.79547316",
"0.7920195",
"0.78607863",
"0.7847036",
"0.7811081",
"0.7784871",
"0.7781297",
"0.7760272",
"0.7756879",
"0.7747943",
"0.7744211",
"0.7732946",
"0.77212536",
"0.77095705",
"0.7701398",
"0.7690354",
"0.7683295",
"0.7679766",
"0.7667348",
"0.76457787",
"0.7641873",
"0.7607377",
"0.7606256",
"0.75843155",
"0.7567851",
"0.7563343"
] | 0.8185329 | 1 |
Fetch the process of cpu and memory info | def get_cpu_memory_info(process_name):
info_dict = dict()
try:
process_list = get_process_info(process_name)
for process in process_list:
cmdline = process.cmdline()
name = os.path.basename(cmdline[2]) if len(cmdline) > 3 else process_name + "_" + str(process.pid)
name = process_name + "_" + str(process.pid) if not name else name
cpu_info = process.cpu_percent(3)
memory_info = process.memory_full_info()
info_dict.update({name: {"cpu": cpu_info, "memory": memory_info}})
except Exception as e:
logger.error("Fetch the process %s of cpu and memory info err: %s" % (process_name, e), html=True)
return info_dict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cpuinfo(self):\n \n command = 'cat /proc/cpuinfo'\n\tpipe = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = pipe.communicate()\n\tinfo = stdout.strip()\n cpu_type = None\n\tn_proc = 0\n\tfor line in info.split('\\n'):\n if 'model name' in line:\n\t n_proc += 1\n if cpu_type is None:\n\t\t cpu_type = ' '.join(line.split(':')[-1].strip().split())\n\t\n\treturn (cpu_type, n_proc)",
"def cpuinfo():\n cpu_info = OrderedDict()\n procinfo = OrderedDict()\n\n nprocs = 0\n\n with open('/proc/cpuinfo') as cpuinfo_file:\n for line in cpuinfo_file:\n if not line.strip():\n # end of one processor\n cpu_info[\"proc{!s}\".format(nprocs)] = procinfo\n nprocs += 1\n # Reset\n procinfo = OrderedDict()\n else:\n if len(line.split(':')) == 2:\n splitted_line = line.split(':')[1].strip()\n procinfo[line.split(':')[0].strip()] = splitted_line\n else:\n procinfo[line.split(':')[0].strip()] = ''\n\n return cpu_info",
"def cpu_info():\n cpuinfo = OrderedDict()\n procinfo = OrderedDict()\n nprocs = 0\n with open('/proc/cpuinfo') as f:\n for line in f:\n if not line.strip():\n # end of one processor\n cpuinfo['proc%s' % nprocs] = procinfo\n nprocs = nprocs + 1\n # Reset\n procinfo = OrderedDict()\n else:\n if len(line.split(':')) == 2:\n procinfo[line.split(':')[0].strip()] = line.split(':')[1].strip()\n else:\n procinfo[line.split(':')[0].strip()] = ''\n\n return cpuinfo",
"def getProcessInfo():\n \n blacklist = [\"_Total\",\"Idle\"] #processes we don't care about\n \n #execute wmic command and capture output\n temp = subprocess.check_output([\"wmic\", \"path\", \"Win32_PerfRawData_PerfProc_Process\", \"get\", \n \"Name,PercentProcessorTime\"]) \n \n #iterate over processes and split into lists\n firstline = True\n result = [] #list of lists to contain the final result\n \n for line in temp.splitlines():\n if(firstline):\n firstline = False\n continue\n elif not line: #skip empty lines\n continue\n \n proclist = line.split() #split on whitespace to return a 2 element list\n \n if (proclist[0] not in blacklist ):\n result.append([proclist[0], int(proclist[1])/(10**7)]) #convert times to ints, percent processor time is in 100 nanosecond intervals\n \n \n #sort list on processor time, highest first\n result.sort(key=lambda x: x[1])\n result.reverse()\n \n # narrow process list down\n times = [x[1] for x in result]\n\n nonzero = [x for x in times if x]\n \n ind = min(int(math.ceil(len(times)/5)),len(nonzero)) #reduce processes to top 20% (atleast 1) or to all with nonzero cpu time\n cutoff = max(times[ind],1)\n \n return [x for x in result if x[1] >= cutoff]",
"def _get_core_membind_info():\n args = [\"lscpu\", \"--parse=CPU,Core,Socket,Node\"]\n process_lscpu = subprocess.check_output(args, universal_newlines=True).split(\"\\n\")\n\n # Get information about core, node, socket and cpu. On a machine with no NUMA nodes, the last column is empty\n # so regex also check for empty string on the last column\n bind_info = []\n for line in process_lscpu:\n pattern = r\"^([\\d]+,[\\d]+,[\\d]+,([\\d]+|$))\"\n regex_out = re.search(pattern, line)\n if regex_out:\n bind_info.append(regex_out.group(1).strip().split(\",\"))\n\n return bind_info",
"def get_cpu_info():\n try:\n cpu_info = subprocess.check_output('lscpu')\n return cpu_info\n except OSError:\n return None",
"def getProcInfo(self, line):\n try:\n pid, rss, cpu, cmdAndArgs = line.split(None, 3)\n except ValueError:\n # Defunct processes look like this (no RSS data)\n # '28835916 00:00:00 <defunct>'\n pid, cpu, cmdAndArgs = line.split(None, 2)\n rss = \"0\"\n # Exiting and Idle processes look like this\n # (no RSS data, TIME data == '-')\n # '11337738 - <exiting>'\n # '11862166 - <idle>'\n # _extractProcessMetrics(self, line) method will try\n # to parseCpuTime('-') with exception\n if cpu == \"-\":\n cpu = \"00:00:00\"\n\n return pid, rss, cpu, cmdAndArgs",
"async def sysinfo(self, ctx: Context):\n\t\tstart = time.perf_counter()\n\t\tend = time.perf_counter()\n\t\tduration = (end - start) * 1000\n\t\tcpuavg = psutil.cpu_percent(interval=None)\n\t\tmem = psutil.virtual_memory()[2]\n\t\tdurround = round(duration, 3)\n\t\tosun = os.uname()\n\t\tawait self.send(f\"System Info | CPU: {cpuavg}% | RAM: {mem}% | Latency: {durround * 1000}ms | OS: {sys.platform}\", whisper=[ctx.author.id])",
"def cpu_info():\n \n with open(Path.proc_cpuinfo()) as f:\n cpuinfo = {'processor_count': 0}\n for line in f:\n if ':' in line:\n fields = line.replace('\\t', '').strip().split(': ')\n # count processores and filter out core specific items\n if fields[0] == 'processor':\n cpuinfo['processor_count'] += 1\n elif fields[0] != 'core id':\n try:\n cpuinfo[fields[0]] = fields[1]\n except IndexError:\n pass\n return cpuinfo",
"def GetCpuStats(self, pid):\n class ProcTaskInfo(ctypes.Structure):\n \"\"\"Struct for proc_pidinfo() call.\"\"\"\n _fields_ = [(\"pti_virtual_size\", ctypes.c_uint64),\n (\"pti_resident_size\", ctypes.c_uint64),\n (\"pti_total_user\", ctypes.c_uint64),\n (\"pti_total_system\", ctypes.c_uint64),\n (\"pti_threads_user\", ctypes.c_uint64),\n (\"pti_threads_system\", ctypes.c_uint64),\n (\"pti_policy\", ctypes.c_int32),\n (\"pti_faults\", ctypes.c_int32),\n (\"pti_pageins\", ctypes.c_int32),\n (\"pti_cow_faults\", ctypes.c_int32),\n (\"pti_messages_sent\", ctypes.c_int32),\n (\"pti_messages_received\", ctypes.c_int32),\n (\"pti_syscalls_mach\", ctypes.c_int32),\n (\"pti_syscalls_unix\", ctypes.c_int32),\n (\"pti_csw\", ctypes.c_int32),\n (\"pti_threadnum\", ctypes.c_int32),\n (\"pti_numrunning\", ctypes.c_int32),\n (\"pti_priority\", ctypes.c_int32)]\n PROC_PIDTASKINFO = 4\n def __init__(self):\n self.size = ctypes.sizeof(self)\n super(ProcTaskInfo, self).__init__() # pylint: disable=bad-super-call\n\n proc_info = ProcTaskInfo()\n if not self.libproc:\n self.libproc = ctypes.CDLL(ctypes.util.find_library('libproc'))\n self.libproc.proc_pidinfo(pid, proc_info.PROC_PIDTASKINFO, 0,\n ctypes.byref(proc_info), proc_info.size)\n\n # Convert nanoseconds to seconds.\n cpu_time = (proc_info.pti_total_user / 1000000000.0 +\n proc_info.pti_total_system / 1000000000.0)\n results = {'CpuProcessTime': cpu_time,\n 'ContextSwitches': proc_info.pti_csw}\n\n # top only reports idle wakeup count starting from OS X 10.9.\n if self.GetOSVersionName() >= os_version_module.MAVERICKS:\n results.update({'IdleWakeupCount': self._GetIdleWakeupCount(pid)})\n return results",
"def parse_cpu_info(self):\n pipe = subprocess.Popen([self.core_exe, '-c'], 0, None, None,subprocess.PIPE)\n lines = pipe.stdout.readlines()\n x = 0\n json_str = ''\n while x < len(lines):\n json_str += lines[x].decode('utf-8').strip()\n x += 1\n decoder = json.decoder.JSONDecoder()\n self.cpu_info = decoder.decode(json_str)\n return self.cpu_info",
"def get_memory() -> dict:\n import os\n\n import psutil\n\n proc = psutil.Process(os.getpid())\n return proc.memory_info()",
"def get_cpu_info(vars = {}, log = sys.stderr):\n\n try:\n cpuinfo_file= file(PROC_CPUINFO_PATH,\"r\")\n except IOError, e:\n return\n\n cpu_info = {}\n count = 0\n\n for line in cpuinfo_file:\n\n try:\n (fieldname,value)= string.split(line,\":\")\n except ValueError, e:\n # this will happen for lines that don't have two values\n # (like the first line on 2.4 kernels)\n continue\n\n fieldname= string.strip(fieldname)\n value= string.strip(value)\n\n if fieldname == 'processor' or fieldname == 'cpu cores' or fieldname == 'model name' :\n count += 1\n cpu_to_dict(cpu_info, fieldname, value, count)\n\n\n cpuinfo_file.close()\n return cpu_info",
"def get_cpu_usage():\n process_details = RU_OBJ.get_curr_processes()\n return json.dumps(sorted(process_details, key=lambda k: k['name']))",
"def mem_info():\n meminfo = OrderedDict()\n with open('/proc/meminfo') as f:\n for line in f:\n meminfo[line.split(':')[0]] = line.split(':')[1].strip()\n return meminfo",
"def cpu_info(node):\n\n cpu = CpuUtils.get_cpu_info_per_node(node)\n\n item = \"Model name\"\n if item in cpu:\n print(\"{:>20}: {}\".format(item, cpu[item]))\n item = \"CPU(s)\"\n if item in cpu:\n print(\"{:>20}: {}\".format(item, cpu[item]))\n item = \"Thread(s) per core\"\n if item in cpu:\n print(\"{:>20}: {}\".format(item, cpu[item]))\n item = \"Core(s) per socket\"\n if item in cpu:\n print(\"{:>20}: {}\".format(item, cpu[item]))\n item = \"Socket(s)\"\n if item in cpu:\n print(\"{:>20}: {}\".format(item, cpu[item]))\n item = \"NUMA node(s)\"\n numa_nodes = 0\n if item in cpu:\n numa_nodes = int(cpu[item])\n for i in range(0, numa_nodes):\n item = \"NUMA node{} CPU(s)\".format(i)\n print(\"{:>20}: {}\".format(item, cpu[item]))\n item = \"CPU max MHz\"\n if item in cpu:\n print(\"{:>20}: {}\".format(item, cpu[item]))\n item = \"CPU min MHz\"\n if item in cpu:\n print(\"{:>20}: {}\".format(item, cpu[item]))\n\n if node[\"cpu\"][\"smt_enabled\"]:\n smt = \"Enabled\"\n else:\n smt = \"Disabled\"\n print(\"{:>20}: {}\".format(\"SMT\", smt))\n\n # VPP Threads\n print(\"\\nVPP Threads: (Name: Cpu Number)\")\n vpp_processes = cpu[\"vpp_processes\"]\n for i in vpp_processes.items():\n print(\" {:10}: {:4}\".format(i[0], i[1]))",
"def memory():\n\n mem_info = {}\n\n if platform.linux_distribution()[0]:\n with open('/proc/meminfo') as file:\n c = 0\n for line in file:\n lst = line.split()\n if str(lst[0]) == 'MemTotal:':\n mem_info['total'] = int(lst[1])\n elif str(lst[0]) in ('MemFree:', 'Buffers:', 'Cached:'):\n c += int(lst[1])\n mem_info['free'] = c\n mem_info['used'] = (mem_info['total']) - c\n elif platform.mac_ver()[0]:\n ps = subprocess.Popen(['ps', '-caxm', '-orss,comm'], stdout=subprocess.PIPE).communicate()[0]\n vm = subprocess.Popen(['vm_stat'], stdout=subprocess.PIPE).communicate()[0]\n\n # Iterate processes\n process_lines = ps.split('\\n')\n sep = re.compile('[\\s]+')\n rss_total = 0 # kB\n for row in range(1, len(process_lines)):\n row_text = process_lines[row].strip()\n row_elements = sep.split(row_text)\n try:\n rss = float(row_elements[0]) * 1024\n except:\n rss = 0 # ignore...\n rss_total += rss\n\n # Process vm_stat\n vm_lines = vm.split('\\n')\n sep = re.compile(':[\\s]+')\n vm_stats = {}\n for row in range(1, len(vm_lines) - 2):\n row_text = vm_lines[row].strip()\n row_elements = sep.split(row_text)\n vm_stats[(row_elements[0])] = int(row_elements[1].strip('\\.')) * 4096\n\n mem_info['total'] = rss_total\n mem_info['used'] = vm_stats[\"Pages active\"]\n mem_info['free'] = vm_stats[\"Pages free\"]\n else:\n raise('Unsupported Operating System.\\n')\n exit(1)\n\n return mem_info",
"def get_cpu_core():\n processor_info = subprocess.getoutput('dmidecode -t processor')\n cpu_core_value = re.findall(r'(?i)Core Count:\\s+(.*?)\\n', processor_info, re.S)[0]\n log.info('cpu_core value:{}'.format(cpu_core_value))\n if cpu_core_value:\n cpu_core = cpu_core_value\n else:\n cpu_core = ''\n return cpu_core",
"def cpu_online_map():\r\n cpuinfo = get_cpuinfo()\r\n cpus = []\r\n for cpu in cpuinfo:\r\n cpus.append(cpu['processor']) # grab cpu number\r\n return cpus",
"def query_cpu(**_) -> t.Mapping[str, t.Any]:\n if not CPU:\n return {}\n cpu = cpuinfo.get_cpu_info()\n clock_current, clock_min, clock_max = query_cpu_clock()\n logical_cores, physical_cores = query_cpu_cores()\n cache = _get_cache_sizes(cpu)\n return {\n 'brand': cpu.get('brand', None),\n 'logical_cores': logical_cores,\n 'physical_cores': physical_cores,\n 'clock': clock_current,\n 'clock_min': clock_min,\n 'clock_max': clock_max,\n 'cache': cache}",
"def evaluate_data():\n try:\n # General system related info\n ram = psutil.virtual_memory()\n total_ram = round((ram.total / 1024 / 1024),2)\n free_ram = round((ram.available / 1024 / 1024),2)\n used_ram = round((ram.used / 1024 / 1024),2)\n cpu_total = psutil.cpu_count(logical=True)\n cpu_loadavg = round([x / cpu_total * 100 for x in psutil.getloadavg()][0],2)\n acs_8080 = sp.getoutput(\"netstat -an|grep -c 8080\")\n acs_8181 = sp.getoutput(\"netstat -an|grep -c 8181\")\n acs_8443 = sp.getoutput(\"netstat -an|grep -c 8443\")\n mysql = sp.getoutput(\"netstat -an|grep -c 3306\")\n oracle = sp.getoutput(\"netstat -an|grep -c 1521\")\n logging.info('General system info obtained')\n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)\n # Process specific details\n try:\n iis_pid = SystemInformation.get_pid(\"w3wp.exe\")\n iis_ram = SystemInformation.get_ram_usage(iis_pid)\n iis_cpu = SystemInformation.get_cpu_usage(iis_pid)\n java_pid = SystemInformation.get_pid(\"java.exe\")\n java_ram = SystemInformation.get_ram_usage(java_pid)\n java_cpu = SystemInformation.get_cpu_usage(java_pid)\n mysqld_pid = SystemInformation.get_pid(\"mysqld.exe\")\n mysqld_ram = SystemInformation.get_ram_usage(mysqld_pid) \n mysqld_cpu = SystemInformation.get_cpu_usage(mysqld_pid)\n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)\n\n try:\n dictionary = {}\n now = datetime.datetime.now()\n timestampt = now.strftime(\"%Y-%m-%d-%H:%M:%S\")\n fieldnames = ['timestampt','total_ram','free_ram','used_ram','cpu_total','cpu_loadavg','acs_8080','acs_8181','acs_8443','mysql','oracle','iis_ram','iis_cpu','java_ram','java_cpu','mysqld_ram','mysqld_cpu']\n for var in fieldnames:\n dictionary[var] = eval(var)\n \n logging.info('Data for report generated')\n return dictionary\n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)",
"def _proc_info(self):\n ret = cext.proc_info(self.pid)\n assert len(ret) == len(pinfo_map)\n return ret",
"def resource_collect(pid=None):\n try:\n import psutil\n except ImportError:\n return {}\n\n p = psutil.Process(pid or os.getpid())\n return {'cpu_percent': psutil.cpu_percent(),\n 'status': p.status(),\n 'memory_percent': p.memory_percent(),\n 'memory_info_ex': p.memory_info_ex(),\n 'disk_io_counters': metrics.disk_io_counters(),\n 'net_io_counters': metrics.net_io_counters()}",
"def get_mem_info():\n import psutil\n vm = psutil.virtual_memory()\n return {\n \"memtotal\": vm.total,\n \"memavailable\": vm.available,\n }",
"def _cpu_and_men_usage(processes):\n cpu_usage = 0\n mem_usage_mb = 0\n\n for process in processes:\n cpu_usage += process.cpu_percent()\n mem_usage_mb += process.memory_info().rss >> 20 # from bytes to Mb\n\n return cpu_usage, mem_usage_mb",
"def get_processes_info():\n processes_list = []\n for proc in get_processes():\n try:\n # Fetch process details as dict\n pinfo = proc.as_dict(attrs=[\"pid\", \"name\", \"username\"])\n pinfo[\"rss\"] = proc.memory_info().rss / (1024 * 1024)\n pinfo[\"ports\"] = []\n try:\n connections = proc.connections()\n except psutil.Error:\n continue\n if connections:\n for conn in connections:\n pinfo[\"ports\"].append({\"port\": conn.laddr.port, \"status\": conn.status})\n # Append dict to list\n processes_list.append(pinfo)\n except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):\n pass\n processes_list = sorted(processes_list, key=lambda procObj: procObj[\"rss\"], reverse=True)\n return processes_list[:25]",
"def eval_cpuset():\n\tnum_cpu = run('grep -c ^processor /proc/cpuinfo',quiet=True,warn_only=True)\n\tprint(red('Number of cpus : \\t'+num_cpu))",
"def get_cpu(self):\n pass",
"def procinfo() -> None:\n if pwndbg.gdblib.qemu.is_qemu():\n print(\n message.error(\n \"QEMU target detected: showing result for the qemu process\"\n \" - so it will be a bit inaccurate (excessive for the parts\"\n \" used directly by the qemu process)\"\n )\n )\n exe = pwndbg.auxv.get()[\"AT_EXECFN\"]\n print(\"%-10s %r\" % (\"exe\", exe))\n\n proc = Process()\n\n # qemu-usermode fail!\n if not proc.status:\n return\n\n print(\"%-10s %s\" % (\"cmdline\", proc.cmdline))\n\n print(\"%-10s %s\" % (\"cwd\", proc.cwd))\n\n files = dict(proc.open_files)\n\n for c in proc.connections:\n files[c.fd] = str(c)\n\n print(\"%-10s %s\" % (\"pid\", proc.pid))\n print(\"%-10s %s\" % (\"tid\", proc.tid))\n\n if proc.selinux != \"unconfined\":\n print(\"%-10s %s\" % (\"selinux\", proc.selinux))\n\n print(\"%-10s %s\" % (\"ppid\", proc.ppid))\n\n if not pwndbg.gdblib.android.is_android():\n print(\"%-10s %s\" % (\"uid\", proc.uid))\n print(\"%-10s %s\" % (\"gid\", proc.gid))\n print(\"%-10s %s\" % (\"groups\", proc.groups))\n else:\n print(\"%-10s %s\" % (\"uid\", list(map(pwndbg.lib.android.aid_name, proc.uid))))\n print(\"%-10s %s\" % (\"gid\", list(map(pwndbg.lib.android.aid_name, proc.gid))))\n print(\"%-10s %s\" % (\"groups\", list(map(pwndbg.lib.android.aid_name, proc.groups))))\n\n for fd, path in files.items():\n if not set(path) < set(string.printable):\n path = repr(path)\n\n print(\"%-10s %s\" % (\"fd[%i]\" % fd, path))\n\n return",
"def get_gpu_memory_map():\n result = subprocess.check_output(\n [\n 'nvidia-smi', '--query-gpu=memory.used',\n '--format=csv,nounits,noheader'\n ], encoding='utf-8')\n # Convert lines into a dictionary\n gpu_memory = [int(x) for x in result.strip().split('\\n')]\n gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))\n print(\"Current usage: %i of 11178\" % gpu_memory_map[1])"
] | [
"0.7663754",
"0.74077755",
"0.7378916",
"0.72161144",
"0.71284556",
"0.7078209",
"0.7037945",
"0.70256805",
"0.70198095",
"0.6998216",
"0.69025713",
"0.68909866",
"0.68835574",
"0.6817426",
"0.67715806",
"0.67403156",
"0.673724",
"0.6727778",
"0.6664821",
"0.66454977",
"0.66382605",
"0.6633089",
"0.6623557",
"0.6621863",
"0.6620447",
"0.6611179",
"0.66073525",
"0.6599363",
"0.65558076",
"0.65003467"
] | 0.75478333 | 1 |
Check whether `obj` inherits from Boost.Python.enum. | def is_boost_enum(obj: Any) -> bool:
for cls in type(obj).__bases__:
if "Boost.Python.enum" in str(cls):
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_enum(schema_obj):\n\n return (isinstance(schema_obj, schema.Enum) or\n (isinstance(schema_obj, schema.Field) and schema_obj.enum_type))",
"def is_enum(self):\n return False",
"def is_enum(self):\n return self.is_complex and not self.is_class",
"def inherits_from(obj, a_class):\n if a_class == type(obj):\n return False\n return isinstance(obj, a_class)",
"def inherits_from(obj, a_class):\n if type(obj) is not a_class and issubclass(type(obj), a_class):\n return True\n else:\n return False",
"def inherits_from(obj, a_class):\n if type(obj) is not a_class:\n return(issubclass(type(obj), a_class))\n else:\n return False",
"def inherits_from(obj, a_class):\n return issubclass(type(obj), a_class) and type(obj) != a_class",
"def inherits_from(obj, a_class):\n if isinstance(type(obj), a_class) and type(obj) != a_class:\n return True\n return False",
"def inherits_from(obj, a_class):\n return(issubclass(type(obj), a_class) and type(obj) != a_class)",
"def inherits_from(obj, a_class):\n if issubclass(type(obj), a_class):\n if type(obj) is not a_class:\n return True\n return False",
"def inherits_from(obj, a_class):\n\n if issubclass(type(obj), a_class) and type(obj) != a_class:\n return True\n return False",
"def inherits_from(obj, a_class):\n if issubclass(type(obj), a_class) and not type(obj) == a_class:\n return True\n else:\n return False",
"def inherits_from(obj, a_class):\n if type(obj) == a_class:\n return False\n return issubclass(type(obj), a_class)",
"def inherits_from(obj, a_class):\n return ((issubclass(type(obj), a_class)) and type(obj) != a_class)",
"def inherits_from(obj, a_class):\n return isinstance(obj, a_class) and type(obj) != a_class",
"def inherits_from(obj, a_class):\n return isinstance(obj, a_class) and type(obj) != a_class",
"def inherits_from(obj, a_class):\n\n if isinstance(obj, a_class) and type(obj) is not a_class:\n return True\n\n return False",
"def test_enum_detection():\n\n grammar = \"\"\"\n IsEnum: \"keyword1\" | \"keyword2\" | \"keyword3\";\n IsNotEnum: val=\"keyword1\" | val=\"keyword2\" | val=\"keyword3\";\n StillNotEnum: val=\"keyword1\" | \"keyword2\" | \"keyword3\";\n\n // identified as EDatatype with object type\n NotEnumAgain: SubEnum | SubEnum2;\n\n // this is an enumeration\n SubEnum: \"keyword1\" | \"keyword2\";\n SubEnum2: \"keyword3\" | \"keyword4\";\n \"\"\"\n\n mm = metamodel_from_str(grammar)\n\n IsEnum = mm['IsEnum']\n assert isinstance(IsEnum, ecore.EEnum)\n assert IsEnum.name == 'IsEnum'\n assert all((x in IsEnum for x in (\"keyword1\", \"keyword2\", \"keyword3\")))\n\n IsNotEnum = mm['IsNotEnum']\n assert IsNotEnum.name == 'IsNotEnum'\n assert isinstance(IsNotEnum, ecore.EClass)\n\n StillNotEnum = mm['StillNotEnum']\n assert StillNotEnum.name == 'StillNotEnum'\n assert isinstance(StillNotEnum, ecore.EClass)\n\n NotEnumAgain = mm['NotEnumAgain']\n assert isinstance(NotEnumAgain, ecore.EDataType)\n assert NotEnumAgain.name == 'NotEnumAgain'\n\n SubEnum = mm['SubEnum']\n assert isinstance(SubEnum, ecore.EEnum)\n assert SubEnum.name == 'SubEnum'\n assert all((x in IsEnum for x in (\"keyword1\", \"keyword2\")))",
"def isinstance_blender_object(self, b_obj):\n # lame and slow, but functional\n return b_obj in Blender.Object.Get()",
"def inherits_from(obj, a_class):\n return (isinstance(obj, a_class) and type(obj) != a_class)",
"def inherits_from(obj, a_class):\n return isinstance(obj, a_class) and type(obj) is not a_class",
"def inherits_from(obj, a_class):\n return isinstance(obj, a_class) and type(obj) is not a_class",
"def inherits_from(obj, a_class):\n\n return isinstance(obj, a_class) and type(obj) is not a_class",
"def applies(cls, obj):\n return type(obj) in cls.types",
"def verify_type(self, obj):\n return isinstance(obj, self.type_)",
"def is_boost_class(obj: Any) -> bool:\n return \"Boost.Python.class\" in str(type(obj))",
"def _is_this_color(cls, obj: Any) -> bool:\n\n return type(obj) is cls",
"def is_type(obj):\n return type(obj) is type or type(obj) is types.ClassType",
"def check_enumeration_style(ctx, stmt):\n elemtype = stmt.search_one(\"type\")\n if elemtype is None or elemtype.arg != \"enumeration\":\n return\n\n for enum in elemtype.search(\"enum\"):\n if re.match(r\"[a-z]\", enum.arg):\n err_add(ctx.errors, stmt.pos, \"OC_ENUM_CASE\",\n (enum.arg, enum.arg.upper().replace(\"-\", \"_\")))\n elif not re.match(r\"^[A-Z0-9][A-Z0-9\\_\\.]{0,}$\", enum.arg):\n err_add(ctx.errors, stmt.pos, \"OC_ENUM_UNDERSCORES\",\n (enum.arg, enum.arg.upper().replace(\"-\", \"_\")))",
"def kind_of(obj):\n # why don't I use isinstance - it saves us big time\n\n # dict, list, and tuple are differianted from str, unicode, int, bool, and float\n # because they have special treatment and simple `==` or `is` is not enough to\n # prove them valid.\n obj_type = type(obj)\n if obj_type is dict:\n return TYPE_DICTIONARY\n elif obj_type is list:\n return TYPE_LIST\n elif obj_type is tuple:\n return TYPE_TUPLE\n elif obj in ATOMIC_TYPES:\n return TYPE_TYPE\n elif obj is object:\n return TYPE_OBJECT\n elif getattr(obj, \"__class__\", False) and issubclass(obj.__class__, BaseValidator):\n return TYPE_VALIDATOR\n elif callable(obj):\n return TYPE_FUNCTION\n # this f##king SRE_Pattern, why can't I f##king kill it\n elif getattr(obj, \"match\", False) and getattr(obj, \"search\", False):\n return TYPE_REGEX\n else:\n return TYPE_UNKNOWN"
] | [
"0.7379043",
"0.6682498",
"0.6575283",
"0.63988435",
"0.6314002",
"0.63092816",
"0.6300783",
"0.62994534",
"0.62766397",
"0.6261479",
"0.6242206",
"0.6231385",
"0.6226024",
"0.62231505",
"0.6219832",
"0.6219832",
"0.6207228",
"0.61851394",
"0.61775",
"0.6171979",
"0.6163057",
"0.6163057",
"0.61380327",
"0.6103957",
"0.6084181",
"0.60545313",
"0.5948525",
"0.58743703",
"0.5856578",
"0.58438617"
] | 0.8621327 | 0 |
Check whether `obj` is an IceCubespecific class. | def is_icecube_class(obj: Any) -> bool:
classname = str(type(obj))
return "icecube." in classname | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def obj_is_in_class(obj: unrealsdk.UObject, in_class: str) -> bool:\n return bool(obj.Class == unrealsdk.FindClass(in_class))",
"def isclass(object):\r\n return isinstance(object, (type, types.ClassType))",
"def is_child_class(obj, classinfo):\n try:\n return issubclass(obj, classinfo)\n except TypeError:\n return None",
"def _is_this_color(cls, obj: Any) -> bool:\n\n return type(obj) is cls",
"def isclass(object):\n if not inspect.isclass(object):\n return False\n if isbuiltin(object):\n return False\n return type not in inspect.getmro(object)",
"def is_kind_of_class(obj, a_class):\n return(isinstance(obj, a_class))",
"def is_dataclass_instance(obj: Any) -> bool:\n return dataclasses.is_dataclass(obj) and not isinstance(obj, type)",
"def is_kind_of_class(obj, a_class):\n return isinstance(obj, a_class)",
"def is_kind_of_class(obj, a_class):\n return isinstance(obj, a_class)",
"def is_kind_of_class(obj, a_class):\n return isinstance(obj, a_class)",
"def is_kind_of_class(obj, a_class):\n return isinstance(obj, a_class)",
"def is_kind_of_class(obj, a_class):\n return isinstance(obj, a_class)",
"def is_kind_of_class(obj, a_class):\n return isinstance(obj, a_class)",
"def issubclass(obj, cls):\r\n if isinstance(obj, Assert):\r\n obj = obj.obj\r\n return assert_(issubclass(obj, cls),\r\n 'not issubclass(%s, %s)' % (_repr(obj), _repr(cls)))",
"def is_kind_of_class(obj, a_class):\n if isinstance(obj, a_class) is True:\n return True\n else:\n return False",
"def is_kind_of_class(obj, a_class):\n if isinstance(obj, a_class):\n return True\n else:\n return False",
"def is_kind_of_class(obj, a_class):\n if isinstance(obj, a_class):\n return True\n else:\n return False",
"def is_kind_of_class(obj, a_class):\n if isinstance(obj, a_class):\n return True\n return False",
"def is_kind_of_class(obj, a_class):\n return (isinstance(obj, a_class))",
"def is_ctypes_instance(obj):\n return issubclass(type(obj), ctypes.Structure) or issubclass(type(obj), ctypes.Union)",
"def is_kind_of_class(obj, a_class):\n\n return isinstance(obj, a_class)",
"def is_kind_of_class(obj, a_class):\n\n return (isinstance(obj, a_class))",
"def is_kind_of_class(obj, a_class):\n\n if isinstance(obj, a_class):\n return True\n else:\n return False",
"def isclassinstance(object):\n if not hasattr(object, \"__class__\"):\n return False\n if isbuiltin(object.__class__):\n return False\n return True",
"def verify_type(self, obj):\n return isinstance(obj, self.type_)",
"def isinstance_blender_object(self, b_obj):\n # lame and slow, but functional\n return b_obj in Blender.Object.Get()",
"def is_layer(obj):\n # TODO(b/110718070): Replace with isinstance(obj, base_layer.Layer).\n return hasattr(obj, \"_is_layer\") and not isinstance(obj, type)",
"def __instancecheck__(self, instance):\n\n if isinstance(instance, ObjCInstance):\n return bool(instance.isKindOfClass(self))\n else:\n return False",
"def is_type(obj):\n return type(obj) is type or type(obj) is types.ClassType",
"def inherits_from(obj, a_class):\n if type(obj) is not a_class:\n return(issubclass(type(obj), a_class))\n else:\n return False"
] | [
"0.6861989",
"0.6678009",
"0.6629571",
"0.6614257",
"0.6606721",
"0.65752107",
"0.65565765",
"0.6548872",
"0.6548872",
"0.6548872",
"0.6548872",
"0.6548872",
"0.6548872",
"0.6514387",
"0.65030473",
"0.6499084",
"0.6499084",
"0.6495338",
"0.6488729",
"0.6484178",
"0.6481067",
"0.64572537",
"0.64511603",
"0.6429491",
"0.6422612",
"0.6414265",
"0.64065415",
"0.6308692",
"0.6295539",
"0.6291945"
] | 0.8361728 | 0 |
Check whether `obj` is a method. | def is_method(obj: Any) -> bool:
return inspect.ismethod(obj) or "Boost.Python.function" in str(type(obj)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ismethod(object):\r\n return isinstance(object, types.MethodType)",
"def is_method_of(method, object):\n if not callable(method) or not hasattr(method, \"__name__\"):\n return False\n if inspect.ismethod(method):\n return method.__self__ is object\n for cls in inspect.getmro(object.__class__):\n if cls.__dict__.get(method.__name__, None) is method:\n return True\n return False",
"def has_func(cls, obj, *args):\n methods = dir(obj)\n matched = [x for x in args if x in methods]\n return len(matched) == len(args)",
"def hasmethod(obj, methodname):\n \n if not hasattr(obj, methodname):\n return False\n method = getattr(obj, methodname)\n return callable(method)",
"def is_api_method(obj, name):\n try:\n method = getattr(obj, name)\n except AttributeError:\n return False\n return (ismethod(method) and hasattr(method, \"__api_call\"))",
"def is_instance_method(func):\n return inspect.ismethod(func) and not inspect.isclass(func.__self__)",
"def _has_method(arg, method):\n return hasattr(arg, method) and callable(getattr(arg, method))",
"def is_function(obj):\n if type(obj) is types.FunctionType:\n return True\n if not is_object(obj):\n return False\n if not hasattr(obj, '__class__'):\n return False\n module = obj.__class__.__module__\n name = obj.__class__.__name__\n return (module == '__builtin__' and\n name in ('function',\n 'builtin_function_or_method',\n 'instancemethod',\n 'method-wrapper'))",
"def is_callable(obj):\n return callable(obj)",
"def callable(obj):\n return bool(_PyCallable_Check(_py_object(obj)))",
"def callable(obj): # pylint: disable=redefined-builtin\n return bool(PyCallable_Check(py_object(obj)))",
"def ismethoddescriptor(object):\r\n return (hasattr(object, \"__get__\")\r\n and not hasattr(object, \"__set__\") # else it's a data descriptor\r\n and not ismethod(object) # mutual exclusion\r\n and not isfunction(object)\r\n and not isclass(object))",
"def is_method(self, file, i):\n\n # Check if line is a function definition as method is also a function\n # Note: Don't run is_func() if line found inside class\n return self.is_func(file, i)",
"def isinstancemethod(cls, obj):\n return _isinstancemethod(cls, obj)",
"def isroutine(object):\r\n return (isbuiltin(object)\r\n or isfunction(object)\r\n or ismethod(object)\r\n or ismethoddescriptor(object))",
"def is_function(obj):\n return isinstance(obj, (types.FunctionType, types.MethodType,\n types.LambdaType))",
"def isclassmethod(object):\n if isinstance(object, classmethod):\n return True\n\n # Let's not give up quite yet.\n original = _get_dict_function(object)\n return isinstance(original, classmethod)",
"def has_callable(obj, member):\n return hasattr(obj, member) and callable(getattr(obj, member))",
"def is_class_method(func):\n return inspect.ismethod(func) and inspect.isclass(func.__self__)",
"def inspect_method(obj):\n\n print_with_indent(\"+Method %s\" % obj.__name__)\n print_docstr(obj)\n try:\n args, varargs, kwargs, defaults = get_arguments(obj)\n except TypeError:\n print()\n return\n\n if args:\n if args[0] == 'self':\n print_with_indent('\\t%s is an instance method' % obj.__name__)\n args.pop()\n\n print_with_indent('\\t-Method Arguments: ', args)\n\n if defaults:\n default_args = args[len(args) - len(defaults)]\n print_with_indent('\\t-Default Values:',\n zip(default_args, defaults))\n\n if varargs:\n print_with_indent('\\t-Positional Arguments:', varargs)\n if kwargs:\n print_with_indent('\\t-Keyword Arguments:', kwargs)\n\n print()",
"def is_static_method(func, cls):\n return False if cls is None else isinstance(cls.__dict__[func.__name__], staticmethod)",
"def isstaticmethod(object):\n # TODO: This can only identify those static methods that\n # are directly taken from object's dict. Like\n # Class.__dict__[staticmethodname]\n if isinstance(object, staticmethod):\n return True\n\n if not inspect.isfunction(object):\n return False\n\n # Module level functions are disqualified here.\n if \".\" not in getattr(object, \"__qualname__\", \"\"):\n return False\n\n # It is either method (accessed as Class.method) or staticfunction\n # TODO: Is this really the only way?\n args = object.__code__.co_varnames\n if len(args) == 0:\n return True\n\n return args[0] != 'self'",
"def is_function_type(self, objtype):\n # return self.__cfuncptrt == type(objtype)\n return issubclass(objtype, self.__cfuncptrt)\n # return isinstance(objtype, self.__cfuncptrt)",
"def is_fixture_method(callable_):\n # ensure we don't pick up turtles/mocks as fixtures\n if not inspect.isroutine(callable_):\n return False\n\n # _fixture_id indicates this method was tagged by us as a fixture\n return callable_hasattr(callable_, '_fixture_type')",
"def is_classmethod(instancemethod):\n\n # attribute = (isPython3() and ['__self__'] or ['im_self'])[0]\n # if hasattr(instancemethod, attribute):\n # return getattr(instancemethod, attribute) is not None\n # return False\n\n return isinstance(instancemethod, MethodTypes)",
"def is_callable(self, name, method):\r\n return name in self._registry and self._registry[name].method == method",
"def _is_command(obj, cli):\n if not inspect.isfunction(obj) or obj.__name__.startswith(\"_\"):\n return False\n return hasattr(obj, \"__module__\") and obj.__module__ == cli.__name__",
"def isfunction(object):\r\n return isinstance(object, types.FunctionType)",
"def is_top_level_function(obj: Any) -> bool:\r\n return callable(obj) and obj.__name__ in sys.modules[obj.__module__].__dict__",
"def readable(obj: Any) -> bool:\n\n return callable(getattr(obj, \"read\", None))"
] | [
"0.8177807",
"0.77033913",
"0.7681355",
"0.76295954",
"0.7574309",
"0.71349955",
"0.6971231",
"0.6931637",
"0.68637705",
"0.6751655",
"0.6716016",
"0.6662323",
"0.665518",
"0.6646023",
"0.6576209",
"0.6575024",
"0.6565945",
"0.6479175",
"0.6429495",
"0.6226134",
"0.6117707",
"0.6087662",
"0.6033915",
"0.6028288",
"0.60060656",
"0.59741896",
"0.58909076",
"0.58600414",
"0.5858147",
"0.5842862"
] | 0.8378316 | 0 |
Return list of valid member variables. Ignoring mangled (__) variables, types, methods, and Boost enums. | def get_member_variables(
obj: Any, return_discarded: bool = False
) -> Union[List[str], Tuple[List[str], Dict[str, List[str]]]]:
valid_member_variables = []
discarded_member_variables: Dict[str, List[str]] = {
"mangled": [],
"is_type": [],
"invalid_attr": [],
"is_method": [],
"is_boost_enum": [],
"is_boost_class": [],
}
for attr in dir(obj):
if attr.startswith("__"):
discarded_member_variables["mangled"].append(attr)
continue
try:
value = getattr(obj, attr)
except RuntimeError:
discarded_member_variables["invalid_attr"].append(attr)
continue
if is_type(value):
discarded_member_variables["is_type"].append(attr)
elif is_method(value):
discarded_member_variables["is_method"].append(attr)
elif is_boost_enum(value):
discarded_member_variables["is_boost_enum"].append(attr)
elif is_boost_class(value):
discarded_member_variables["is_boost_class"].append(attr)
else:
valid_member_variables.append(attr)
if return_discarded:
return valid_member_variables, discarded_member_variables
return valid_member_variables | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def vars(cls):\n for key in dir(cls):\n if key.startswith('var_'):\n yield key[4:]",
"def unusedVars(self):\n fullcode = self.code_cfg\n variables = set([x[1:] for x in codeconfig_getvars(fullcode)])\n exceptions = set(['complexity', 'code_cfg'])\n clsvars = set(vars(self).keys())\n nones = set(filter(lambda x: self.__dict__[x] is None, clsvars))\n nones = nones.union(set(filter(lambda x: str(self.__dict__[x]) == \"\", clsvars)))\n unused = clsvars - variables - exceptions - nones\n return unused",
"def get_all_variables(instance):\n return [v for v in dir(instance) if not callable(getattr(instance, v))]",
"def get_public_variables(t):\n return [i[0] for i in\n inspect.getmembers(t, lambda i:not inspect.isroutine(i))\n if not i[0].startswith(\"__\")]",
"def _setup_special_names(self):\n special_names = []\n dynamic_params = tuple(set(self._fget_params_list + self._fset_params_list))\n # Check whether class variables of DynamicProperty type are present\n for attr_name, attr in getmembers(self.__class__):\n if isinstance(attr, DynamicProperty):\n special_names += [attr_name + \"_\" + key for key in dynamic_params]\n # Check if special variables are defined at class level\n for attr, value in getmembers(self.__class__):\n if attr in special_names:\n # Copy class special variable at instance level, prefixing reserved_prefix\n setattr(self, self.__reserved_prefix + attr, value)\n return special_names",
"def missing_variables(self):\n return [k for k in self.all_variables if k not in self._properties]",
"def _unicode_members(self):\n return [(m.name or m._as_rhs()) for m in self.members]",
"def variables(self):\n return {u for u in self if u.type == 'var'}",
"def json_properties(self):\n attributes = []\n all = vars(self)\n for var in all:\n if var[:1] != '_':\n attributes.append(var)\n if isinstance(self, db.Model):\n properties = self.properties().keys()\n for property in properties:\n if property[:1] != '_':\n attributes.append(property)\n return attributes",
"def _var_check(self):\n missing = set()\n for v in self.variables:\n if getattr(self, v) is None:\n missing.add(v)\n self.missing = missing",
"def __fields(self):\n return [self.__class__.__dict__[f] for f in self.__class__._fields]",
"def attrs(self):\n return list(name for name in self.__dict__\n if not name.startswith(\"_\"))",
"def vars(self):\n return self._return_if('_vars')",
"def filter_members(self, members, want_all):\r\n\r\n def member_is_special(member):\r\n # TODO implement special matlab methods: disp, subsref, etc.\r\n return False\r\n\r\n def member_is_private(member):\r\n attrs = self.get_attr(member, \"attrs\", None)\r\n if attrs:\r\n access = attrs.get(\"Access\", None)\r\n get_access = attrs.get(\"GetAccess\", None)\r\n if access:\r\n if access == \"private\":\r\n return True\r\n elif get_access:\r\n if get_access == \"private\":\r\n return True\r\n return False\r\n else:\r\n return False\r\n\r\n def member_is_protected(member):\r\n attrs = self.get_attr(member, \"attrs\", None)\r\n if attrs:\r\n access = attrs.get(\"Access\", None)\r\n get_access = attrs.get(\"GetAccess\", None)\r\n if access:\r\n if access == \"protected\":\r\n return True\r\n elif get_access:\r\n if get_access == \"protected\":\r\n return True\r\n return False\r\n else:\r\n return False\r\n\r\n def member_is_hidden(member):\r\n attrs = self.get_attr(member, \"attrs\", None)\r\n if attrs:\r\n hidden = attrs.get(\"Hidden\", None)\r\n # It is either None or True\r\n if hidden:\r\n return True\r\n return False\r\n else:\r\n return False\r\n\r\n def member_is_friend(member):\r\n attrs = self.get_attr(member, \"attrs\", None)\r\n if attrs:\r\n access = attrs.get(\"Access\", None)\r\n if access:\r\n # Only friend meta classes define access lists\r\n if isinstance(access, list):\r\n return True\r\n elif access:\r\n # This is a friend meta class\r\n return access[0] == \"?\"\r\n return False\r\n else:\r\n return False\r\n\r\n def member_is_friend_of(member, friends):\r\n attrs = self.get_attr(member, \"attrs\", None)\r\n if attrs:\r\n access = attrs.get(\"Access\", None)\r\n if not isinstance(access, list):\r\n access = [access]\r\n for has_access in access:\r\n if has_access in friends:\r\n return True\r\n else:\r\n return False\r\n else:\r\n return False\r\n\r\n ret = []\r\n\r\n # search for members in source code too\r\n namespace = \".\".join(self.objpath) # will be empty for modules\r\n\r\n if self.analyzer:\r\n attr_docs = self.analyzer.find_attr_docs()\r\n else:\r\n attr_docs = {}\r\n\r\n # process members and determine which to skip\r\n for membername, member in members:\r\n # if isattr is True, the member is documented as an attribute\r\n isattr = False\r\n\r\n doc = self.get_attr(member, \"__doc__\", None)\r\n # if the member __doc__ is the same as self's __doc__, it's just\r\n # inherited and therefore not the member's doc\r\n cls = self.get_attr(member, \"__class__\", None)\r\n if cls:\r\n cls_doc = self.get_attr(cls, \"__doc__\", None)\r\n if cls_doc == doc:\r\n doc = None\r\n has_doc = bool(doc)\r\n\r\n keep = False\r\n if want_all and member_is_special(member):\r\n # special methods\r\n if self.options.special_members is ALL:\r\n keep = has_doc or self.options.undoc_members\r\n elif (\r\n self.options.special_members\r\n and self.options.special_members is not ALL\r\n and membername in self.options.special_members\r\n ):\r\n keep = has_doc or self.options.undoc_members\r\n elif want_all and member_is_private(member):\r\n # ignore private members\r\n if self.options.private_members is ALL:\r\n keep = has_doc or self.options.undoc_members\r\n elif (\r\n self.options.private_members\r\n and self.options.private_members is not ALL\r\n and membername in self.options.private_members\r\n ):\r\n keep = has_doc or self.options.undoc_members\r\n elif want_all and member_is_protected(member):\r\n # ignore protected members\r\n if self.options.protected_members is ALL:\r\n keep = has_doc or self.options.undoc_members\r\n elif (\r\n self.options.protected_members\r\n and self.options.protected_members is not ALL\r\n and membername in self.options.protected_members\r\n ):\r\n keep = has_doc or self.options.undoc_members\r\n elif want_all and member_is_hidden(member):\r\n # ignore hidden members\r\n if self.options.hidden_members is ALL:\r\n keep = has_doc or self.options.undoc_members\r\n elif (\r\n self.options.hidden_members\r\n and self.options.hidden_members is not ALL\r\n and membername in self.options.hidden_members\r\n ):\r\n keep = has_doc or self.options.undoc_members\r\n elif want_all and member_is_friend(member):\r\n # ignore friend members\r\n if self.options.friend_members is ALL:\r\n keep = has_doc or self.options.undoc_members\r\n elif (\r\n self.options.friend_members\r\n and self.options.friend_members is not ALL\r\n and member_is_friend_of(member, self.options.friend_members)\r\n ):\r\n keep = has_doc or self.options.undoc_members\r\n elif (namespace, membername) in attr_docs:\r\n # keep documented attributes\r\n keep = True\r\n isattr = True\r\n else:\r\n # ignore undocumented members if :undoc-members: is not given\r\n keep = has_doc or self.options.undoc_members\r\n\r\n # give the user a chance to decide whether this member\r\n # should be skipped\r\n if self.env.app:\r\n # let extensions preprocess docstrings\r\n skip_user = self.env.app.emit_firstresult(\r\n \"autodoc-skip-member\",\r\n self.objtype,\r\n membername,\r\n member,\r\n not keep,\r\n self.options,\r\n )\r\n if skip_user is not None:\r\n keep = not skip_user\r\n\r\n if keep:\r\n ret.append((membername, member, isattr))\r\n\r\n return ret",
"def _get_members(obj):\n public = []\n for name in dir(obj):\n try:\n value = getattr(obj, name)\n except AttributeError:\n continue\n if getattr(value, '__module__', None) == obj.__name__:\n if not name.startswith('_'):\n public.append(name)\n return public",
"def get_object_members(self, want_all):\r\n analyzed_member_names = set()\r\n if self.analyzer:\r\n attr_docs = self.analyzer.find_attr_docs()\r\n namespace = \".\".join(self.objpath)\r\n for item in attr_docs.items():\r\n if item[0][0] == namespace:\r\n analyzed_member_names.add(item[0][1])\r\n if not want_all:\r\n if not self.options.members:\r\n return False, []\r\n # specific members given\r\n members = []\r\n for mname in self.options.members:\r\n try:\r\n members.append((mname, self.get_attr(self.object, mname)))\r\n except AttributeError:\r\n if mname not in analyzed_member_names:\r\n logger.warning(\r\n \"[sphinxcontrib-matlabdomain] missing attribute %s in object %s\",\r\n mname,\r\n self.fullname,\r\n )\r\n elif self.options.inherited_members:\r\n # safe_getmembers() uses dir() which pulls in members from all\r\n # base classes\r\n members = inspect.get_members(self.object, attr_getter=self.get_attr)\r\n else:\r\n # __dict__ contains only the members directly defined in\r\n # the class (but get them via getattr anyway, to e.g. get\r\n # unbound method objects instead of function objects);\r\n # using keys() because apparently there are objects for which\r\n # __dict__ changes while getting attributes\r\n try:\r\n obj_dict = self.get_attr(self.object, \"__dict__\")\r\n except AttributeError:\r\n members = []\r\n else:\r\n members = [\r\n (mname, self.get_attr(self.object, mname, None))\r\n for mname in list(obj_dict.keys())\r\n ]\r\n membernames = set(m[0] for m in members)\r\n # add instance attributes from the analyzer\r\n for aname in analyzed_member_names:\r\n if aname not in membernames and (want_all or aname in self.options.members):\r\n members.append((aname, INSTANCEATTR))\r\n return False, sorted(members)",
"def all_values(cls) -> List[str]:\n return list(member.value for member in cls.__members__.values())",
"def allowed_class_vars(self):\n\n\n self.allowed_vars = [\n 'hfMode',\n 'lqCN',\n 'lqCF',\n 'lqPN',\n 'lqPF',\n 'lqCNmode',\n 'lqCFmode',\n 'lqPNmode',\n 'lqPFmode',\n 'S',\n 'SMode',\n 'fracCN',\n 'fracCF',\n 'fracPN',\n 'fracPF',\n 'fracUI',\n 'fracUO',\n 'fracLI',\n 'fracLO',\n 'Pinj',\n 'coreRadFrac',\n 'qBG',\n 'fG',\n 'qFilePath',\n 'qFileTag',\n ]\n return",
"def get_members():",
"def all_names(cls) -> List[str]:\n return list(member_name for member_name in cls.__members__.keys())",
"def all_unrecognized_fields(self):\n return list(self.__unrecognized_fields.keys())",
"def __listAttr(self):\n attr = dir(self) # already sorted\n filter = []\n for name in attr:\n if name[:2] == '__': pass\n elif name[:10] == '_HelpDoc__': pass # used to mask private attr\n elif name in self.__exclude: pass\n else: filter.append(name)\n return filter",
"def scanvars(reader, frame, locals):\n import tokenize\n import keyword\n vars, lasttoken, parent, prefix, value = [], None, None, '', __UNDEF__\n for ttype, token, start, end, line in tokenize.generate_tokens(reader):\n if ttype == tokenize.NEWLINE:\n break\n if ttype == tokenize.NAME and token not in keyword.kwlist:\n if lasttoken == '.':\n if parent is not __UNDEF__:\n value = getattr(parent, token, __UNDEF__)\n vars.append((prefix + token, prefix, value))\n else:\n where, value = lookup(token, frame, locals)\n vars.append((token, where, value))\n elif token == '.':\n prefix += lasttoken + '.'\n parent = value\n else:\n parent, prefix = None, ''\n lasttoken = token\n return vars",
"def variables_declared (self) :\r\n\t\tresult = {}\r\n\r\n\t\tfor var in self.variables :\r\n\t\t\tresult[var.name.upper()] = var\r\n\t\t\r\n\t\treturn result",
"def get_empty_fields(self):\n return [f for f in self.__dict__ if not self.__dict__[f]]",
"def _set_var_ignore(self):\n self._var_ignore = [k for k in self.__dict__.keys() if k[0] != '_']",
"def get_all_variables(self):\n return []",
"def _member_field_names_for_protect(self, protect):\n result = []\n fields = self.MEMBER_DEFAULT_FIELDS.copy()\n fields.update((k,v) for k,v in self.SUPPLEMENTARY_FIELDS.iteritems() if (v['OBJECT'] == 'MEMBER'))\n for (name, spec) in fields.iteritems():\n if spec['PROTECT'] == protect:\n result.append(name)\n return result",
"def varNames(self):\n return self.__varNames",
"def get_public_members(self, obj):\n def isprivate(name):\n if name[0] == '_':\n return True\n else:\n return False\n\n public_members = {}\n for k, v in inspect.getmembers(obj):\n if not isprivate(k):\n public_members[k] = v\n return public_members"
] | [
"0.65596557",
"0.65094596",
"0.64906734",
"0.6424824",
"0.6148797",
"0.61362445",
"0.59760153",
"0.58430034",
"0.5842859",
"0.57547677",
"0.5730297",
"0.5725713",
"0.56944275",
"0.56628054",
"0.56521",
"0.5649057",
"0.5615413",
"0.56144106",
"0.5612419",
"0.5602482",
"0.5580459",
"0.55596024",
"0.5551836",
"0.55481464",
"0.55205894",
"0.55184627",
"0.5506418",
"0.55023664",
"0.55001056",
"0.54976714"
] | 0.6909306 | 0 |
Cast `obj`, and any members/elements, to purepython classes. The function takes any object `obj` and tries to cast it to a pure python class. This is mainly relevant for IceCubespecific classes (I3) that cannot be cast trivially. For IceCubespecific classes, we check whether the object has any member, variables and if does, we recursively try to cast these to pure python. Similarly, if an IceCubespecific class has a signature similar to a python list or dict (e.g, it has a length and supports indexation), we cast it to the corresponding pure python equivalent, and recursively try to cast its elements. For regularpython, nonIcecubespecific, classes, we cast to listlike objects to list and dictlike objects to list, and otherwise return the object itself if it deemed "pythonic" in this way. | def cast_object_to_pure_python(obj: Any) -> Any:
logger = Logger()
logger.debug(f"Value: {obj}")
logger.debug(f"Type: {str(type(obj))}")
if not is_icecube_class(obj):
logger.debug("Found non-I3 class. Exiting.")
if isinstance(obj, (list, tuple, set)):
return [cast_object_to_pure_python(element) for element in obj]
elif isinstance(obj, dict):
return {
str(key): cast_object_to_pure_python(value)
for key, value in obj.items()
}
else:
return obj
(
member_variables,
discarded_member_variables,
) = get_member_variables(obj, return_discarded=True)
logger.debug(f"Found the following member variables: {member_variables}")
logger.debug(
"Discarded the following member variables: "
f"{discarded_member_variables}"
)
# Has valid member variables -- stick to these, then.
results = {}
if len(member_variables) > 0:
for attr in member_variables:
value = getattr(obj, attr)
logger.debug(
f"Calling `extract` on valid member attribute: {attr}"
)
result = cast_object_to_pure_python(value)
results[attr] = result
# Dict-like
if hasattr(obj, "items"):
# Call function again
results_dict = cast_object_to_pure_python(dict(obj))
assert "_dict" not in results
results["_dict"] = results_dict
# List-like
elif hasattr(obj, "__len__") and hasattr(obj, "__getitem__"):
# Call function again
results_list = cast_object_to_pure_python(list(obj))
assert "_list" not in results
results["_list"] = results_list
# If `obj` has no actual member variables, but is otherwise python
# dict- or list-like, there is no need to wrap the data in a single-
# key dict.
if list(results.keys()) == ["_dict"]:
results = results.pop("_dict")
elif list(results.keys()) == ["_list"]:
results = results.pop("_list")
if len(results) == 0:
logger.warning(
f"Cannot extract any information to pure python from {obj}"
)
return results | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def class_casting(obj: object, cls: type):\n orig_cls = obj.__class__\n obj.__class__ = cls\n yield\n obj.__class__ = orig_cls",
"def ns_from_py(pyobj):\n\n if isinstance(pyobj, enum.Enum):\n pyobj = pyobj.value\n\n # Many Objective-C method calls here use the convert_result=False kwarg to\n # disable automatic conversion of return values, because otherwise most of\n # the Objective-C objects would be converted back to Python objects.\n if pyobj is None or isinstance(pyobj, ObjCInstance):\n return pyobj\n elif isinstance(pyobj, str):\n return ObjCInstance(\n NSString.stringWithUTF8String_(pyobj.encode(\"utf-8\"), convert_result=False)\n )\n elif isinstance(pyobj, bytes):\n return ObjCInstance(NSData.dataWithBytes(pyobj, length=len(pyobj)))\n elif isinstance(pyobj, decimal.Decimal):\n return ObjCInstance(\n NSDecimalNumber.decimalNumberWithString_(\n pyobj.to_eng_string(), convert_result=False\n )\n )\n elif isinstance(pyobj, dict):\n dikt = NSMutableDictionary.dictionaryWithCapacity(len(pyobj))\n for k, v in pyobj.items():\n dikt.setObject(v, forKey=k)\n return dikt\n elif isinstance(pyobj, list):\n array = NSMutableArray.arrayWithCapacity(len(pyobj))\n for v in pyobj:\n array.addObject(v)\n return array\n elif isinstance(pyobj, bool):\n return ObjCInstance(NSNumber.numberWithBool_(pyobj, convert_result=False))\n elif isinstance(pyobj, int):\n return ObjCInstance(NSNumber.numberWithLong_(pyobj, convert_result=False))\n elif isinstance(pyobj, float):\n return ObjCInstance(NSNumber.numberWithDouble_(pyobj, convert_result=False))\n else:\n raise TypeError(\n f\"Don't know how to convert a {type(pyobj).__module__}.{type(pyobj).__qualname__} to a Foundation object\"\n )",
"def cast(object, class_, instanceof=object, *args, **kwargs):\n\n\tobject = copy(object)\n\tif isinstance(object, instanceof):\n\t\tobject.__class__ = class_\n\t\tobject.__init__(*args, **kwargs)\n\telse:\n\t\traise TypeError(\"Object is not an instance of {}\".format(instanceof.__name__))\n\treturn object",
"def obj_as_class(obj, new_cls, *args, **kwargs):\n obj_typ = type(obj)\n if obj_typ is bool:\n # HURF DURF MY NAME IS PYTHON AND I CAN'T SUBCLASS bool.\n obj_typ = int\n\n class _Class(obj_typ, new_cls):\n __doc__ = new_cls.__doc__\n\n def __init__(self, obj, *args, **kwargs):\n obj_typ.__init__(self, obj)\n new_cls.__init__(self, *args, **kwargs)\n def __new__(cls, obj, *args, **kwargs):\n return obj_typ.__new__(cls, obj)\n\n\n return _Class(obj, *args, **kwargs)",
"def fl_get_object_objclass(ptr_flobject):\n _fl_get_object_objclass = library.cfuncproto(\n library.load_so_libforms(), \"fl_get_object_objclass\", \\\n cty.c_int, [cty.POINTER(xfdata.FL_OBJECT)], \\\n \"\"\"int fl_get_object_objclass(FL_OBJECT * obj) \"\"\")\n library.check_if_flinitialized()\n library.verify_flobjectptr_type(ptr_flobject)\n library.keep_elem_refs(ptr_flobject)\n retval = _fl_get_object_objclass(ptr_flobject)\n return retval",
"def get_orig_class(obj, default_to__class__=False):\r\n try:\r\n # See https://github.com/Stewori/pytypes/pull/53:\r\n # Returns `obj.__orig_class__` protecting from infinite recursion in `__getattr[ibute]__`\r\n # wrapped in a `checker_tp`.\r\n # (See `checker_tp` in `typechecker._typeinspect_func for context)\r\n # Necessary if:\r\n # - we're wrapping a method (`obj` is `self`/`cls`) and either\r\n # - the object's class defines __getattribute__\r\n # or\r\n # - the object doesn't have an `__orig_class__` attribute\r\n # and the object's class defines __getattr__.\r\n # In such a situation, `parent_class = obj.__orig_class__`\r\n # would call `__getattr[ibute]__`. But that method is wrapped in a `checker_tp` too,\r\n # so then we'd go into the wrapped `__getattr[ibute]__` and do\r\n # `parent_class = obj.__orig_class__`, which would call `__getattr[ibute]__`\r\n # again, and so on. So to bypass `__getattr[ibute]__` we do this:\r\n return object.__getattribute__(obj, '__orig_class__')\r\n except AttributeError:\r\n if sys.version_info.major >= 3:\r\n cls = object.__getattribute__(obj, '__class__')\r\n else:\r\n # Python 2 may return instance objects from object.__getattribute__.\r\n cls = obj.__class__\r\n if is_Generic(cls):\r\n # Workaround for https://github.com/python/typing/issues/658\r\n stck = stack()\r\n # Searching from index 2 is sufficient: At 0 is get_orig_class, at 1 is the caller.\r\n # We assume the caller is not typing._GenericAlias.__call__ which we are after.\r\n for line in stck[2:]:\r\n try:\r\n res = line[0].f_locals['self']\r\n if res.__origin__ is cls:\r\n return res\r\n except (KeyError, AttributeError):\r\n pass\r\n if default_to__class__:\r\n return cls # Fallback\r\n raise",
"def get_orig_class(obj, default_to__class__=False):\n try:\n # See https://github.com/Stewori/pytypes/pull/53:\n # Returns `obj.__orig_class__` protecting from infinite recursion in `__getattr[ibute]__`\n # wrapped in a `checker_tp`.\n # (See `checker_tp` in `typechecker._typeinspect_func for context)\n # Necessary if:\n # - we're wrapping a method (`obj` is `self`/`cls`) and either\n # - the object's class defines __getattribute__\n # or\n # - the object doesn't have an `__orig_class__` attribute\n # and the object's class defines __getattr__.\n # In such a situation, `parent_class = obj.__orig_class__`\n # would call `__getattr[ibute]__`. But that method is wrapped in a `checker_tp` too,\n # so then we'd go into the wrapped `__getattr[ibute]__` and do\n # `parent_class = obj.__orig_class__`, which would call `__getattr[ibute]__`\n # again, and so on. So to bypass `__getattr[ibute]__` we do this:\n return object.__getattribute__(obj, '__orig_class__')\n except AttributeError:\n if sys.version_info.major >= 3:\n cls = object.__getattribute__(obj, '__class__')\n else:\n # Python 2 may return instance objects from object.__getattribute__.\n cls = obj.__class__\n if _typing_3_7 and is_Generic(cls):\n # Workaround for https://github.com/python/typing/issues/658\n # Searching from index 2 is sufficient: At 0 is get_orig_class, at 1 is the caller.\n # We assume the caller is not typing._GenericAlias.__call__ which we are after.\n frame = currentframe().f_back.f_back\n try:\n while frame:\n try:\n res = frame.f_locals['self']\n if res.__origin__ is cls:\n return res\n except (KeyError, AttributeError):\n frame = frame.f_back\n finally:\n del frame\n\n if default_to__class__:\n return cls # Fallback\n raise",
"def _deep_type(obj, checked, checked_len, depth = None, max_sample = None, get_type = None):\n if depth is None:\n depth = pytypes.default_typecheck_depth\n if max_sample is None:\n max_sample = pytypes.deep_type_samplesize\n if -1 != max_sample < 2:\n max_sample = 2\n if get_type is not None:\n res = get_type(obj)\n else:\n try:\n res = get_orig_class(obj, True)\n except AttributeError:\n res = type(obj)\n if depth == 0 or util._is_in(obj, checked[:checked_len]):\n return res\n elif not util._is_in(obj, checked[checked_len:]):\n checked.append(obj)\n # We must operate with a consistent checked list for one certain depth level\n # to avoid issues with a list, tuple, dict, etc containing the same element\n # multiple times. This could otherwise be misconcepted as a recursion.\n # Using a fake len checked_len2 ensures this. Each depth level operates with\n # a common fake length of checked list:\n checked_len2 = len(checked)\n if res == tuple:\n res = Tuple[tuple(_deep_type(t, checked, checked_len2, depth-1, None, get_type) for t in obj)]\n elif res == list:\n if len(obj) == 0:\n return Empty[List]\n if max_sample == -1 or max_sample >= len(obj)-1 or len(obj) <= 2:\n tpl = tuple(_deep_type(t, checked, checked_len2, depth-1, None, get_type) for t in obj)\n else:\n # In case of lists I somehow feel it's better to ensure that\n # first and last element are part of the sample\n sample = [0, len(obj)-1]\n try:\n rsmp = random.sample(xrange(1, len(obj)-1), max_sample-2)\n except NameError:\n rsmp = random.sample(range(1, len(obj)-1), max_sample-2)\n sample.extend(rsmp)\n tpl = tuple(_deep_type(obj[t], checked, checked_len2, depth-1, None, get_type) for t in sample)\n res = List[Union[tpl]]\n elif res == dict:\n if len(obj) == 0:\n return Empty[Dict]\n if max_sample == -1 or max_sample >= len(obj)-1 or len(obj) <= 2:\n try:\n # We prefer a view (avoid copy)\n tpl1 = tuple(_deep_type(t, checked, checked_len2, depth-1, None, get_type) \\\n for t in obj.viewkeys())\n tpl2 = tuple(_deep_type(t, checked, checked_len2, depth-1, None, get_type) \\\n for t in obj.viewvalues())\n except AttributeError:\n # Python 3 gives views like this:\n tpl1 = tuple(_deep_type(t, checked, checked_len2, depth-1, None, get_type) for t in obj.keys())\n tpl2 = tuple(_deep_type(t, checked, checked_len2, depth-1, None, get_type) for t in obj.values())\n else:\n try:\n kitr = iter(obj.viewkeys())\n vitr = iter(obj.viewvalues())\n except AttributeError:\n kitr = iter(obj.keys())\n vitr = iter(obj.values())\n ksmpl = []\n vsmpl = []\n block = (len(obj) // max_sample)-1\n # I know this method has some bias towards beginning of iteration\n # sequence, but it's still more random than just taking the\n # initial sample and better than O(n) random.sample.\n while len(ksmpl) < max_sample:\n if block > 0:\n j = random.randint(0, block)\n k = random.randint(0, block)\n while j > 0:\n next(vitr) # discard\n j -= 1\n while k > 0:\n next(kitr) # discard\n k -= 1\n ksmpl.append(next(kitr))\n vsmpl.append(next(vitr))\n tpl1 = tuple(_deep_type(t, checked, checked_len2, depth-1, None, get_type) for t in ksmpl)\n tpl2 = tuple(_deep_type(t, checked, checked_len2, depth-1, None, get_type) for t in vsmpl)\n res = Dict[Union[tpl1], Union[tpl2]]\n elif res == set or res == frozenset:\n if res == set:\n typ = Set\n else:\n typ = FrozenSet\n if len(obj) == 0:\n return Empty[typ]\n if max_sample == -1 or max_sample >= len(obj)-1 or len(obj) <= 2:\n tpl = tuple(_deep_type(t, checked, depth-1, None, None, get_type) for t in obj)\n else:\n itr = iter(obj)\n smpl = []\n block = (len(obj) // max_sample)-1\n # I know this method has some bias towards beginning of iteration\n # sequence, but it's still more random than just taking the\n # initial sample and better than O(n) random.sample.\n while len(smpl) < max_sample:\n if block > 0:\n j = random.randint(0, block)\n while j > 0:\n next(itr) # discard\n j -= 1\n smpl.append(next(itr))\n tpl = tuple(_deep_type(t, checked, depth-1, None, None, get_type) for t in smpl)\n res = typ[Union[tpl]]\n elif res == types.GeneratorType:\n res = get_generator_type(obj)\n elif sys.version_info.major == 2 and isinstance(obj, types.InstanceType):\n # For old-style instances return the actual class:\n return obj.__class__\n elif _has_base(res, Container) and len(obj) == 0:\n return Empty[res]\n elif hasattr(res, '__origin__') and _has_base(res.__origin__, Container) and len(obj) == 0:\n return Empty[res.__origin__]\n return res",
"def _cast(obj):\n if isinstance(obj, Future):\n return obj\n else:\n return NonFuture(obj)",
"def instance_to_type(o):\n t = type(o)\n if o is None:\n return type(None)\n elif t == pvalue.Row:\n return row_type.RowTypeConstraint.from_fields([\n (name, instance_to_type(value)) for name, value in o.as_dict().items()\n ])\n elif t not in typehints.DISALLOWED_PRIMITIVE_TYPES:\n # pylint: disable=bad-option-value\n if t == BoundMethod:\n return types.MethodType\n return t\n elif t == tuple:\n return typehints.Tuple[[instance_to_type(item) for item in o]]\n elif t == list:\n if len(o) > 0:\n return typehints.List[typehints.Union[[\n instance_to_type(item) for item in o\n ]]]\n else:\n return typehints.List[typehints.Any]\n elif t == set:\n if len(o) > 0:\n return typehints.Set[typehints.Union[[\n instance_to_type(item) for item in o\n ]]]\n else:\n return typehints.Set[typehints.Any]\n elif t == frozenset:\n if len(o) > 0:\n return typehints.FrozenSet[typehints.Union[[\n instance_to_type(item) for item in o\n ]]]\n else:\n return typehints.FrozenSet[typehints.Any]\n elif t == dict:\n if len(o) > 0:\n return typehints.Dict[\n typehints.Union[[instance_to_type(k) for k, v in o.items()]],\n typehints.Union[[instance_to_type(v) for k, v in o.items()]],\n ]\n else:\n return typehints.Dict[typehints.Any, typehints.Any]\n else:\n raise TypeInferenceError('Unknown forbidden type: %s' % t)",
"def from_obj(cls, obj: any) -> Objdict:\n # CASE: list. Convert each item in the list.\n if isinstance(obj, list):\n value = [cls.from_obj(item) for item in obj]\n\n # CASE: dictionary. Convert each item in the dictionary.\n elif isinstance(obj, dict):\n d = {k: cls.from_obj(v) for k, v in obj.items()}\n value = cls(**d)\n\n # CASE: basic number or string. Use the item \"as is\"\n elif (\n isinstance(obj, str)\n or isinstance(obj, Number)\n or isinstance(obj, date)\n or obj is None\n ):\n value = obj\n\n # CASE: object with an internal dictionary. Treat like a dictionary.\n elif hasattr(obj, \"__dict__\"):\n value = cls.from_obj(obj.__dict__)\n\n # OTHERWISE: we need to figure it out.\n else:\n raise DocumentException(f\"Objdict.from_dict: can't convert value {obj}\")\n\n return value",
"def instantiate(obj):\n return obj() if isinstance(obj, type) else obj",
"def build(self, obj):\n if isinstance(obj, self.art_type):\n return obj\n elif isinstance(obj, (tuple, list, dict, set)):\n if obj.__class__ is tuple:\n return self.build_tuple(obj)\n elif obj.__class__ is dict:\n return self.build_dict(obj)\n elif obj.__class__ is list:\n return self.build_list(obj)\n else:\n return self.build_set(obj)\n elif isinstance(obj, SageObject):\n return self.build_from_magic_method(obj)\n else:\n return self.build_from_string(obj)",
"def _isinstance(obj, cls, bound_Generic=None, bound_typevars=None,\n bound_typevars_readonly=False, follow_fwd_refs=True, _recursion_check=None):\n if bound_typevars is None:\n bound_typevars = {}\n # Special treatment if cls is Iterable[...]\n if is_Generic(cls) and cls.__origin__ is typing.Iterable:\n if not is_iterable(obj):\n return False\n itp = get_iterable_itemtype(obj)\n if itp is None:\n return True\n else:\n return _issubclass(itp, cls.__args__[0], bound_Generic, bound_typevars,\n bound_typevars_readonly, follow_fwd_refs, _recursion_check)\n if is_Generic(cls) and cls.__origin__ is typing.Iterator:\n if not is_iterator(obj):\n return False\n itp = get_iterable_itemtype(obj)\n if itp is None:\n return True\n else:\n return _issubclass(itp, cls.__args__[0], bound_Generic, bound_typevars,\n bound_typevars_readonly, follow_fwd_refs, _recursion_check)\n if is_Callable(cls):\n return _isinstance_Callable(obj, cls, bound_Generic, bound_typevars,\n bound_typevars_readonly, follow_fwd_refs, _recursion_check)\n return _issubclass(deep_type(obj), cls, bound_Generic, bound_typevars,\n bound_typevars_readonly, follow_fwd_refs, _recursion_check)",
"def to_list(obj, list_cls=list):\n if obj is None:\n return list_cls()\n if isinstance(obj, list_cls):\n return obj\n if isinstance(obj, (unicode, str)):\n return list_cls((obj,))\n if isinstance(obj, (list, tuple, set, frozenset)) or hasattr(obj, '__iter__'):\n return list_cls(obj)\n return list_cls((obj,))",
"def typed(\n _cls_or_callable: Union[Callable, Type[object]] = None, *, delay: bool = False\n):\n\n def _typed(obj: Union[Type, Callable]):\n _annotations_ = {\"return\": obj}\n typed.__annotations__.update(_annotations_)\n if inspect.isclass(obj):\n typed_class.__annotations__.update(_annotations_)\n return typed_class(obj, delay=delay)\n elif isinstance(obj, Callable):\n typed_callable.__annotations__.update(_annotations_)\n return typed_callable(obj, delay=delay)\n else:\n raise TypeError(\n f\"{__name__} requires a callable or class. Provided: {type(obj)}: {obj}\"\n )\n\n return _typed(_cls_or_callable) if _cls_or_callable is not None else _typed",
"def _type_realize(space, py_obj):\n # missing:\n # unsupported:\n # tp_mro, tp_subclasses\n py_type = rffi.cast(PyTypeObjectPtr, py_obj)\n\n if not py_type.c_tp_base:\n # borrowed reference, but w_object is unlikely to disappear\n base = as_pyobj(space, space.w_object)\n py_type.c_tp_base = rffi.cast(PyTypeObjectPtr, base)\n\n finish_type_1(space, py_type)\n\n if py_type.c_ob_type:\n w_metatype = from_ref(space, rffi.cast(PyObject, py_type.c_ob_type))\n else:\n # Somehow the tp_base type is created with no ob_type, notably\n # PyString_Type and PyBaseString_Type\n # While this is a hack, cpython does it as well.\n w_metatype = space.w_type\n\n w_obj = space.allocate_instance(W_PyCTypeObject, w_metatype)\n track_reference(space, py_obj, w_obj)\n # __init__ wraps all slotdefs functions from py_type via add_operators\n w_obj.__init__(space, py_type)\n w_obj.ready()\n\n finish_type_2(space, py_type, w_obj)\n base = py_type.c_tp_base\n if base:\n # XXX refactor - parts of this are done in finish_type_2 -> inherit_slots\n if not py_type.c_tp_as_number:\n py_type.c_tp_as_number = base.c_tp_as_number\n py_type.c_tp_flags |= base.c_tp_flags & Py_TPFLAGS_CHECKTYPES\n py_type.c_tp_flags |= base.c_tp_flags & Py_TPFLAGS_HAVE_INPLACEOPS\n if not py_type.c_tp_as_sequence:\n py_type.c_tp_as_sequence = base.c_tp_as_sequence\n py_type.c_tp_flags |= base.c_tp_flags & Py_TPFLAGS_HAVE_INPLACEOPS\n if not py_type.c_tp_as_mapping:\n py_type.c_tp_as_mapping = base.c_tp_as_mapping\n #if not py_type.c_tp_as_buffer: py_type.c_tp_as_buffer = base.c_tp_as_buffer\n\n return w_obj",
"def construct(obj):\n if isinstance(obj, OrderedDict):\n new_obj = OrderedDict()\n for key, value in obj.items():\n new_obj[key] = construct(value)\n elif not isinstance(obj, OrderedDict) and isinstance(obj, dict):\n new_obj = dict()\n for key, value in obj.items():\n new_obj[key] = construct(value)\n elif isinstance(obj, list):\n new_obj = list()\n for value in obj:\n new_obj.append(construct(value))\n elif isinstance(obj, tuple):\n base = list()\n for value in obj:\n base.append(construct(value))\n new_obj = tuple(base)\n elif isinstance(obj, str):\n new_obj = str(obj)\n elif isinstance(obj, (int, float, complex, type(None))) or inspect.isclass(obj):\n new_obj = obj\n else:\n raise TypeError(\"Object of unsupported type was passed to construct function: %s\" % type(obj))\n return new_obj",
"def py_from_ns(nsobj):\n\n if isinstance(nsobj, (objc_id, Class)):\n nsobj = ObjCInstance(nsobj)\n if not isinstance(nsobj, ObjCInstance):\n return nsobj\n\n if nsobj.isKindOfClass(NSDecimalNumber):\n return decimal.Decimal(str(nsobj.descriptionWithLocale(None)))\n elif nsobj.isKindOfClass(NSNumber):\n # Choose the property to access based on the type encoding. The actual\n # conversion is done by ctypes. Signed and unsigned integers are in\n # separate cases to prevent overflow with unsigned long longs.\n objc_type = nsobj.objCType\n if objc_type == b\"B\":\n return nsobj.boolValue\n elif objc_type in b\"csilq\":\n return nsobj.longLongValue\n elif objc_type in b\"CSILQ\":\n return nsobj.unsignedLongLongValue\n elif objc_type in b\"fd\":\n return nsobj.doubleValue\n else:\n raise TypeError(\n f\"NSNumber containing unsupported type {objc_type!r} \"\n \"cannot be converted to a Python object\"\n )\n elif nsobj.isKindOfClass(NSString):\n return str(nsobj)\n elif nsobj.isKindOfClass(NSData):\n # Despite the name, string_at converts the data at the address to a\n # bytes object, not str.\n return string_at(\n send_message(nsobj, \"bytes\", restype=POINTER(c_uint8), argtypes=[]),\n nsobj.length,\n )\n elif nsobj.isKindOfClass(NSDictionary):\n return {py_from_ns(k): py_from_ns(v) for k, v in nsobj.items()}\n elif nsobj.isKindOfClass(NSArray):\n return [py_from_ns(o) for o in nsobj]\n else:\n return nsobj",
"def _py2java(gateway, obj):\n if isinstance(obj, RDD):\n obj = _to_java_object_rdd(obj)\n elif isinstance(obj, DataFrame):\n obj = obj._jdf\n elif isinstance(obj, SparkContext):\n obj = obj._jsc\n elif isinstance(obj, SQLContext):\n obj = obj._jsqlContext\n elif isinstance(obj, (list, tuple)):\n obj = ListConverter().convert([_py2java(gateway, x) for x in obj],\n gateway._gateway_client)\n elif isinstance(obj, dict):\n result = {}\n for (key, value) in obj.items():\n result[key] = _py2java(gateway, value)\n obj = MapConverter().convert(result, gateway._gateway_client)\n elif isinstance(obj, JavaValue):\n obj = obj.value\n elif isinstance(obj, JavaObject):\n pass\n elif isinstance(obj, (int, long, float, bool, bytes, unicode)):\n pass\n else:\n data = bytearray(PickleSerializer().dumps(obj))\n obj = gateway.jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.loads(data)\n return obj",
"def to_py_value(v):\n if isinstance(v, (list, tuple, JavaArray)):\n return list(map(to_py_value, v))\n if isinstance(v, (JavaObject,)):\n j_cls_name = v.getClass().getCanonicalName()\n for rule in to_py_rules:\n if j_cls_name == rule[0]:\n return rule[1](v)\n raise ValueError(\"Unexpected JavaObject value of type: \" + j_cls_name)\n return v",
"def get_obj_class(self, obj_type: str) -> Type[TgnObject]:\n pass",
"def _make_type_proxy(obj, dct):\n class TypeProxyMeta(type(obj)):\n def __instancecheck__(cls, x):\n return isinstance(x, obj)\n\n def __subclasscheck__(cls, x):\n return issubclass(x, obj)\n\n # Allow calling the class as usual, which is necessary to\n # use factory classmethod that return new instances\n # (alternative constructors).\n __call__ = obj.__call__\n\n class TypeProxyBase(metaclass=TypeProxyMeta):\n pass\n\n try:\n class TypeProxy(obj, TypeProxyBase):\n pass\n # If we cannot inherit from the class (like bool), pick the first base\n # class that is suitable. That is a tad ugly but better than nothing\n except TypeError:\n # Make sure we get all the methods as on the original type we\n # wanted to subclass\n dct = {**dict(inspect.getmembers(obj)), **dct}\n for obj_ in inspect.getmro(obj):\n try:\n class TypeProxy(obj_, TypeProxyBase):\n pass\n except TypeError:\n continue\n else:\n break\n\n for attr, val in dct.items():\n with contextlib.suppress(TypeError, AttributeError):\n setattr(TypeProxy, attr, val)\n\n TypeProxy.__name__ = obj.__name__\n TypeProxy.__qualname__ = obj.__qualname__\n return TypeProxy",
"def as_pyobj(space, w_obj, w_userdata=None, immortal=False):\n assert not is_pyobj(w_obj)\n if w_obj is not None:\n py_obj = w_obj._cpyext_as_pyobj(space)\n if not py_obj:\n py_obj = create_ref(space, w_obj, w_userdata, immortal=immortal)\n #\n # Try to crash here, instead of randomly, if we don't keep w_obj alive\n ll_assert(py_obj.c_ob_refcnt >= rawrefcount.REFCNT_FROM_PYPY,\n \"Bug in cpyext: The W_Root object was garbage-collected \"\n \"while being converted to PyObject.\")\n return py_obj\n else:\n return lltype.nullptr(PyObject.TO)",
"def make_json_compatible(obj_):\n if isinstance(obj_, (numbers.Number, str, bool)) or obj_ is None:\n # these are handled as is\n return obj_\n elif isinstance(obj_, collections.Mapping):\n return {\n make_json_compatible(k): make_json_compatible(v)\n for k, v in obj_.items()\n }\n elif isinstance(obj_, (collections.Iterable, collections.Set)):\n return [make_json_compatible(v) for v in obj_]\n elif isinstance(obj_, (datetime.datetime, datetime.date)):\n return obj_.isoformat()\n\n raise NotImplementedError(\"Dont know how to handle objects of type {}\".format(type(obj_)))",
"def honor_type(obj, generator):\n # Some objects may not be able to instantiate from a generator directly\n if is_namedtuple(obj):\n return type(obj)(*list(generator))\n else:\n return type(obj)(generator)",
"def get_objects_rednode(obj):\n from redbaron import RedBaron\n # walk til the first 'locals'\n # Example __qualname__: 'TestClassNodeConv.test_get_datamodel.<locals>.T'\n parent = inspect.getmodule(obj)\n for name in obj.__class__.__qualname__.split('.'):\n if name == '<locals>':\n break\n parent = getattr(parent, name)\n\n try:\n # try to find the source code with traditional means by using inspect, this may faile as it requires class to be defined in a file (not true fro REPL or Notebook)\n # if fails use IPYTHON history\n try:\n parent_code = inspect.getsourcelines(parent)[0]\n\n # monkeypatch the inspect module to use 'parent code' as input for searching the class code (else it searches full file)\n with patch('inspect.linecache.getlines', MagicMock(return_value=parent_code)):\n source = textwrap.dedent(inspect.getsource(obj.__class__))\n\n red_list = RedBaron(source)\n return red_list[0]\n\n except TypeError:\n # try finding the class from local IPYTHON input history\n from IPython import get_ipython\n ipython = get_ipython()\n ipython.run_cell_magic(\"capture\", \"out_var\", \"%history\")\n out_var = str(ipython.ev('out_var'))\n\n # filter up to the last occurance of class def\n import re\n lines = str(out_var).splitlines()\n pat = re.compile(r'^(\\s*)class\\s*' + obj.__class__.__name__ + r'\\b')\n\n last_match = -1\n for i in range(len(lines)):\n match = pat.match(lines[i])\n if match:\n last_match = i\n\n if last_match == -1:\n raise Exception('Class was not found at all...')\n out_var = '\\n'.join(lines[last_match:])\n\n with tempfile.NamedTemporaryFile(mode='w+') as temp:\n temp.write(out_var)\n temp.flush()\n with patch('inspect.getfile', MagicMock(return_value=temp.name)):\n source = textwrap.dedent(inspect.getsource(obj.__class__))\n red_list = RedBaron(source)\n logger.warning(f'Found \"{obj.__class__.__name__}\" source from IPython history!')\n return red_list[0]\n except:\n # This is due to the Inspect needing to open a file...\n # could be a bit relaxed with https://github.com/uqfoundation/dill/issues?utf8=%E2%9C%93&q=getsource, but this only works in regular REPL, not Ipython nor Notebook...\n raise Exception(f'Could not fetch \"{obj.__class__}\" source code (also tried loading from IPython history).')",
"def _serialize(obj):\n if obj is None:\n return None\n # obj is a namedtuple \"class\"\n elif _is_namedtuple(obj):\n return list(obj._fields)\n # obj is a list or a tuple\n return list(obj)",
"def isinstancemethod(cls, obj):\n return _isinstancemethod(cls, obj)",
"def object_to_bytes(obj):\n if isinstance(obj, str):\n return bytearray(obj, \"UTF-8\")\n elif isinstance(obj, bool):\n return bytearray()\n elif isinstance(obj, int):\n return pack(\"<L\", obj)\n elif obj == None:\n return bytearray()\n elif isinstance(obj, bytearray):\n return obj\n else:\n #print type(obj), obj\n return obj.get_raw()"
] | [
"0.6468261",
"0.6169947",
"0.60216856",
"0.5780408",
"0.5687424",
"0.5681913",
"0.56698275",
"0.5660798",
"0.5552916",
"0.5541389",
"0.5449535",
"0.5432965",
"0.5367629",
"0.5346508",
"0.53237635",
"0.530361",
"0.52975327",
"0.52757865",
"0.5227139",
"0.5226663",
"0.51880336",
"0.5172647",
"0.5129751",
"0.51291406",
"0.51105404",
"0.50468534",
"0.5046581",
"0.5029452",
"0.5017624",
"0.501595"
] | 0.7611115 | 0 |
Ensure values are <= max brightness and != stop byte | def _check_values(self, rgb_array):
for i, value in enumerate(rgb_array):
if value > self.brightness_limit:
rgb_array[i] = self.brightness_limit
if value == self.STOP_BYTE:
rgb_array[i] -= 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate_channel_value(value: int) -> None:\n if 0 <= value <= 255:\n pass\n else:\n raise ValueError(\"Color channel has to be in range [0; 255]\")",
"def is_0to255(value):\n return 0 <= value <= 255",
"def verify_brightness_value(brightness):\n\n check_value_is_number_type(brightness)\n\n if brightness < 0 or brightness > 1:\n raise ValueError(\"Brightness value must be within range [0, 1]. Value causing error: \" + str(brightness))",
"def checkRGBRange(value):\n\ttry:\n\t\tv = int(value)\n\t\tif 0 <= v <= 255:\n\t\t\treturn v\n\texcept:\n\t\treturn 255",
"def test_change_brightness_back_to_10():",
"def validate(c_name, val):\n n = 80\n threshold = 4\n while (threshold >= 0):\n if ((len(channels[c_name]) > n) and (val <= threshold)):\n return True\n else:\n n -= 20\n threshold -= 1\n\n return False",
"def brightness(value):\n value = int(value)\n if value < 1 or value > 254:\n raise ValueError('Minimum brightness is 1, to the maximum 254')\n return value",
"def test_change_brightness_of_the_device_false():",
"def black_level(arr, max_num, level=0.1):\r\n arr = arr.astype(np.int16)\r\n src = arr\r\n arr = list(np.hstack(arr))\r\n per = arr.count(0)/len(arr)\r\n if max_num > 10:\r\n level = 0.3\r\n if per < level or max_num > 15:\r\n return True\r\n else:\r\n return False",
"def test_value_max(self):\n self.assertEqual(DPTValue1Ucount().to_knx(255), (0xFF,))\n self.assertEqual(DPTValue1Ucount().from_knx((0xFF,)), 255)",
"def set_brightness(distance):\n if math.floor(distance / 100) - 1 >= 0 and math.floor(distance / 100) - 1 <= 9:\n return 9 - (math.floor(distance / 100) - 1)\n elif math.floor(distance / 100) - 1 >= 0:\n return 1\n else:\n return 9",
"def high_bri(self):\r\n for light in self.lights:\r\n bri = self.b.get_light(light,'bri')\r\n bri = bri + 50 \r\n if bri > 255:\r\n bri = 255 \r\n self.b.set_light(light,'bri',bri)",
"def normalize_val(val, min_v, max_v):\n return (((val - min_v) / (max_v - min_v)) * 255).astype(np.uint8)",
"def bright(self,l):\n if 1 <= l <= 4:\n self.send(\"\\x1f\\x58%c\" % l)\n else:\n raise ValueError('brightness values have to be between 1 and 4')",
"def checkValue(c, m, y, k):\n MINVAL=0\n MAXVAL=255\n valueOk=True\n for val in c, m, y, k:\n if val >=MINVAL and val <=255:\n pass\n else:\n valueOk=False\n \n return valueOk",
"def check_if_white_back_black_edge(pred):\n values = np.unique(pred)\n # print(values)\n\n # check if binary\n if len(values) > 2:\n print(\"Your prediction result has not been binarized, please prompt them to choose the appropriate threshold for binarization.\")\n raise ValueError\n\n white_pos = np.where(pred == 255)\n # print(len(white_pos[0]))\n white_count = len(white_pos[0])\n black_pos = np.where(pred == 0)\n # print(len(black_pos[0]))\n black_count = len(black_pos[0])\n # print(black_count / white_count)\n rate = black_count / white_count\n if rate < 5:\n print(\"The results must be submitted with white background and black edge. Please submit after correction.\")\n raise ValueError",
"def find_dark_object_value(arr):\n preval = None\n step = arr.max() #/ 255.0\n for val in np.unique(arr)[:100]:\n if val == 0:\n continue\n if preval is not None and (val - preval) < step:\n break\n else:\n preval = val\n return preval",
"def __hsv_threshold(input, hue, sat, val):\n out = cv2.cvtColor(input, cv2.COLOR_BGR2HSV)\n return cv2.inRange(out, (hue[0], sat[0], val[0]), (hue[1], sat[1], val[1]))",
"def __hsv_threshold(input, hue, sat, val):\n out = cv2.cvtColor(input, cv2.COLOR_BGR2HSV)\n return cv2.inRange(out, (hue[0], sat[0], val[0]), (hue[1], sat[1], val[1]))",
"def __hsl_threshold(input, hue, sat, lum):\r\n out = cv2.cvtColor(input, cv2.COLOR_BGR2HLS)\r\n return cv2.inRange(out, (hue[0], lum[0], sat[0]), (hue[1], lum[1], sat[1]))",
"def _verify_rgba_value(self, val):\n e = \"All RGBA color values must be integers between 0 and 255 (got {0})\"\n try:\n float(val)\n except (ValueError, TypeError):\n raise TypeError(e.format(val))\n if val < 0 or val > 255:\n raise ValueError(e.format(val))",
"def min_brightness(self):\n return .0",
"def test_xmax_set(self):\n\t\tdetails = self.watcher.analyze(layers=[17], xmax=-1)\n\t\tactual_alpha = details.alpha.to_numpy()[0]\n\t\texpected_alpha = 3.0\n\t\tself.assertAlmostEqual(actual_alpha,expected_alpha, delta=0.1 )",
"def constrain_rgb(rgb: ndarray) -> bool:\n w = - min(0, *rgb) # Amount of white needed\n if w > 0:\n rgb += w # Add just enough white to make r, g, b all positive\n return True # Colour modified to fit RGB gamut\n return False # Colour within RGB gamut",
"def __hsl_threshold(input, hue, sat, lum):\n out = cv2.cvtColor(input, cv2.COLOR_BGR2HLS)\n return cv2.inRange(out, (hue[0], lum[0], sat[0]), (hue[1], lum[1], sat[1]))",
"def minmax(value):\n return min(0xff, max(0, value))",
"def cut_scifi_event(data_dict, event) :\n digits = event.digits()\n saturation_counter = 0\n\n for digit in digits :\n if digit.get_adc() == 255 :\n saturation_counter += 1\n\n if saturation_counter > 1000 :\n return True\n\n return False",
"def the_changed_brightness_should_be_reflected_in_the_state_10():\n assert web_app.get_state()\n assert web_app.check_value_in_state(\"brightness\",\"10\")",
"def __hsv_threshold(input, hue, sat, val):\n out = cv2.cvtColor(input, cv2.COLOR_BGR2HSV)\n return cv2.inRange(out, (hue[0], sat[0], val[0]), (hue[1], sat[1], val[1]))",
"def normalize_depth(val, min_v, max_v):\n return (((max_v - val) / (max_v - min_v)) * 255).astype(np.uint8)"
] | [
"0.6506269",
"0.648789",
"0.632641",
"0.6291771",
"0.6192554",
"0.6170371",
"0.6110706",
"0.6030551",
"0.5996378",
"0.59669286",
"0.5962",
"0.59381783",
"0.58866835",
"0.5861975",
"0.5832914",
"0.5733476",
"0.5728271",
"0.5698746",
"0.5698746",
"0.5675874",
"0.56753343",
"0.565005",
"0.5642973",
"0.56393486",
"0.5629954",
"0.5624794",
"0.5616839",
"0.56064385",
"0.56029576",
"0.5586179"
] | 0.77700883 | 0 |
Install emacs with some features in python 2.7 environement | def install_p2k():
if 'pkgs' not in env:
env.pkgs = []
pkgs = [
'python2',
'git',
'mercurial',
'emacs',
# For flymake
'xmlstarlet',
#'csslint-git',
]
require.arch.packages(pkgs)
python_cmd = 'python2.7'
virtualenv = '.virtualenvs/emacs_p2k'
require.python.pip(python_cmd=python_cmd)
require.python.package(
'virtualenv',
python_cmd=python_cmd,
use_sudo=True,
)
require.python.package(
'virtualenvwrapper',
python_cmd=python_cmd,
use_sudo=True,
)
require.python.virtualenv(
virtualenv,
python_cmd=python_cmd,
venv_python='python2.7',
)
with python.virtualenv(virtualenv):
here = os.path.dirname(__file__)
requirements = '%(here)s/requirements.txt' % locals()
put(requirements, '/tmp/requirements.txt')
require.python.requirements(
'/tmp/requirements.txt',
)
# Synchronize user
dotfiles.sync('fabrecipes/emacs/emacs_p2k/user/', '$HOME/')
dotfiles.sync('fabrecipes/emacs/emacs_p2k/sys/', '/', use_sudo='true') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setup_zxpy_repl() -> None:\n print(\"zxpy shell\")\n print(\"Python\", sys.version)\n print()\n\n install()",
"def develop():\n# Install package in development mode\n sh('python setup.py develop')",
"def open_in_emacs_command(event):\n c = event.get('c')\n if c:\n open_in_emacs_helper(c, c.p)",
"def set_dev(session):\n set_environment_variables(PYBAMM_ENV, session=session)\n envbindir = session.bin\n session.install(\"-e\", \".[all]\")\n session.install(\"cmake\")\n if sys.platform == \"linux\" or sys.platform == \"darwin\":\n session.run(\n \"echo\",\n \"export\",\n f\"LD_LIBRARY_PATH={PYBAMM_ENV['LD_LIBRARY_PATH']}\",\n \">>\",\n f\"{envbindir}/activate\",\n external=True, # silence warning about echo being an external command\n )",
"def test_emacs(image):\n ctx = Context()\n container_name = test_utils.get_container_name(\"emacs\", image)\n test_utils.start_container(container_name, image, ctx)\n\n # Make sure the following emacs sanity tests exit with code 0\n test_utils.run_cmd_on_container(container_name, ctx, \"which emacs\")\n test_utils.run_cmd_on_container(container_name, ctx, \"emacs -version\")",
"def install():\n build()\n sh(\"%s setup.py develop\" % PYTHON)",
"def install_step(self):\n\n# if LooseVersion(self.version) < LooseVersion('2012-10-05'):\n\tif (False):\n self.inchworm()\n self.chrysalis()\n self.kmer()\n self.butterfly()\n\n bwapluginver = self.cfg['bwapluginver']\n if bwapluginver:\n self.trinityplugin('bwa-%s-patched_multi_map' % bwapluginver)\n\n if self.cfg['RSEMmod']:\n self.trinityplugin('RSEM-mod', cc=os.getenv('CXX'))\n\n else:\n self.jellyfish()\n\n inchworm_flags = self.inchworm(run=False)\n chrysalis_flags = self.chrysalis(run=False)\n\n cc = os.getenv('CC')\n cxx = os.getenv('CXX')\n\n lib_flags = \"\"\n for lib in ['ncurses', 'zlib']:\n libroot = get_software_root(lib)\n if libroot:\n lib_flags += \" -L%s/lib\" % libroot\n\n fn = \"Makefile\"\n for line in fileinput.input(fn, inplace=1, backup='.orig.eb'):\n\n line = re.sub(r'^(INCHWORM_CONFIGURE_FLAGS\\s*=\\s*).*$', r'\\1%s' % inchworm_flags, line)\n line = re.sub(r'^(CHRYSALIS_MAKE_FLAGS\\s*=\\s*).*$', r'\\1%s' % chrysalis_flags, line)\n line = re.sub(r'(/rsem && \\$\\(MAKE\\))\\s*$',\n r'\\1 CC=%s CXX=\"%s %s\" CFLAGS_EXTRA=\"%s\"\\n' % (cc, cxx, lib_flags, lib_flags), line)\n line = re.sub(r'(/fastool && \\$\\(MAKE\\))\\s*$',\n r'\\1 CC=\"%s -std=c99\" CFLAGS=\"%s ${CFLAGS}\"\\n' % (cc, lib_flags), line)\n\n sys.stdout.write(line)\n\n trinity_compiler = None\n comp_fam = self.toolchain.comp_family()\n if comp_fam in [toolchain.INTELCOMP]:\n trinity_compiler = \"intel\"\n elif comp_fam in [toolchain.GCC]:\n trinity_compiler = \"gcc\"\n else:\n self.log.error(\"Don't know how to set TRINITY_COMPILER for %s compiler\" % comp_fam)\n\n cmd = \"make TRINITY_COMPILER=%s\" % trinity_compiler\n run_cmd(cmd)\n\n # butterfly is not included in standard build\n self.butterfly()\n\n # remove sample data if desired\n if not self.cfg['withsampledata']:\n try:\n shutil.rmtree(os.path.join(self.cfg['start_dir'], 'sample_data'))\n except OSError, err:\n self.log.error(\"Failed to remove sample data: %s\" % err)",
"def build_essential(self):\n self.install_package(\"build-essential\")",
"def dev():\n\n # Python build headers.\n packages = [\n 'python3-setuptools',\n 'python3-dev',\n 'python3-tk',\n 'python-setuptools',\n 'python-dev',\n 'python-tk',\n ]\n\n sudo('apt-get -y install {}'.format(' '.join(packages)))",
"def develop():\n dev_packages = [\n 'pytest', 'pytest-xdist', 'pytest-pep8', 'tox', 'httpie'\n ]\n if not path.exists(\"env\"):\n fab.local(\"virtualenv -p /usr/bin/python3 env\")\n fab.local(\"env/bin/pip install --upgrade pip setuptools\")\n fab.local(\"env/bin/python setup.py develop\")\n fab.local(\"env/bin/pip install {}\".format(\" \".join(dev_packages)))",
"def installDevelopmentPackageDependencies():\n sudo('DEBIAN_FRONTEND=noninteractive '\n 'apt-get install -y gcc python-all-dev')",
"def setup_develop():\n workon = '.'\n if VENVWRAPPER:\n workon=os.getenv(\"WORKON_HOME\")\n cmd = '{workon}/{env}/bin/python setup.py develop'.format(\n envs=ENVS, env=VENV, workon=workon)\n print(cmd)\n subprocess.call(cmd.split())",
"def load_emacs_open_in_editor_bindings():\n registry = Registry()\n\n registry.add_binding(Keys.ControlX, Keys.ControlE,\n filter=EmacsMode() & ~HasSelection())(\n get_by_name('edit-and-execute-command'))\n\n return registry",
"def install() -> None:\n # Get locals from parent frame\n frames = inspect.getouterframes(inspect.currentframe())\n if len(frames) > 1:\n parent_frame = frames[1]\n parent_locals = parent_frame.frame.f_locals\n locals().update(parent_locals)\n\n # For tab completion and arrow key support\n readline.parse_and_bind(\"tab: complete\")\n\n command = ''\n continued_command = False\n while True:\n try:\n if continued_command:\n command += '\\n'\n else:\n command = ''\n\n prompt = '... ' if continued_command else '>>> '\n new_input = input(prompt)\n\n if new_input != '':\n command += new_input\n else:\n continued_command = False\n\n except KeyboardInterrupt:\n print()\n continue\n\n except EOFError:\n print()\n sys.exit(0)\n\n if continued_command:\n continue\n\n try:\n ast_obj = ast.parse(command, '<input>', 'single')\n except SyntaxError:\n try:\n code_obj = code.compile_command(command)\n if code_obj is None:\n continued_command = True\n continue\n\n except BaseException:\n traceback.print_exc()\n continue\n\n assert isinstance(ast_obj, ast.Interactive)\n patch_shell_commands(ast_obj)\n\n try:\n code_obj = compile(ast_obj, '<input>', 'single')\n assert code_obj is not None\n exec(code_obj)\n\n except SystemExit as e:\n sys.exit(e.code)\n\n except BaseException:\n traceback.print_exc()",
"def install_python():\n _require_environment()\n # TODO: find a better criteria for when to use apt-get update\n if not files.exists('/usr/bin/python'):\n apt_get_update()\n # TODO: Install Python 2.7.3 from source, regardless of Linux distribution\n sudo('apt-get -y -qq install python python2.6 python2.6-dev pkg-config gcc')\n sudo('apt-get -y -qq install python-setuptools')\n sudo('easy_install virtualenv')\n sudo('easy_install pip')\n sudo('pip install virtualenvwrapper')\n with settings(warn_only=True):\n sudo(_interpolate('mkdir %(workon)s'))\n sudo(_interpolate('chmod g+w %(workon)s'))\n sudo(_interpolate('chown %%(user)s:%%(user)s %(workon)s') % env)",
"def install():\n sudo('apt-get install python')",
"def setup_machine():\n # Initial setup and package install.\n sudo(\"aptitude update\")\n sudo(\"aptitude -y install git-core python-dev python-setuptools \"\n \"postgresql-dev postgresql-client build-essential \"\n \"libpq-dev subversion mercurial apache2 \"\n \"libapache2-mod-wsgi\")",
"def load_emacs_open_in_editor_bindings() -> KeyBindings:\n key_bindings = KeyBindings()\n\n key_bindings.add(\"c-x\", \"c-e\", filter=emacs_mode & ~has_selection)(\n get_by_name(\"edit-and-execute-command\")\n )\n\n return key_bindings",
"def install_guest_additions():\n vbox = Vbox(env.vm_name)\n print('Starting up to install guest additions...')\n with vbox as session:\n session.wait_for_ssh()\n session.install_guest_additions()",
"def before_packages(manager):\n if manager not in b.packages:\n return\n if 'apt' == manager:\n s.add('export APT_LISTBUGS_FRONTEND=\"none\"')\n s.add('export APT_LISTCHANGES_FRONTEND=\"none\"')\n s.add('export DEBIAN_FRONTEND=\"noninteractive\"')\n s.add('apt-get -q update')\n elif 'yum' == manager:\n s.add('yum makecache')",
"def install():\n remote_egg_path = os.path.join(remote_egg_dir, get_egg_name())\n sudo('easy_install -U %s' % remote_egg_path)\n sudo('rm %s' % remote_egg_path)",
"def ubuntu_add():\n gmsh_installed = shutil.which('gmsh')\n if not gmsh_installed:\n print('Installing gmsh')\n command_line = \"sudo apt-get install gmsh\"\n subprocess.check_call(command_line, shell=True)\n else:\n print('gmsh present')\n ccx_installed = shutil.which('ccx')\n if not ccx_installed:\n print('Installing calculix (ccx)')\n command_line = \"sudo apt-get install calculix-ccx\"\n subprocess.check_call(command_line, shell=True)\n else:\n print('calculix (ccx) present')",
"def setup_cappa():\n with cd('/vagrant'):\n sudo('python setup.py install')",
"def install():\n PackCommandExecutor().pack()\n InstallCommandExecutor().install()",
"def cli(ctx, shell, install):\n name = ctx.find_root().info_name\n envvar = '_%s_COMPLETE' % name.upper().replace('-', '_')\n if shell == 'auto':\n try:\n import psutil\n shell = psutil.Process(os.getpid()).parent().name()\n except ImportError:\n raise click.ClickException(\"psutil must be installed to detect the shell automatically.\")\n if shell == 'fish':\n code = click_fish.get_completion_script(name, envvar)\n path = os.environ['HOME'] + '/.config/fish/completions/%s.fish' % name\n mode = 'w'\n elif shell == 'bash':\n code = click._bashcomplete.get_completion_script(name, envvar)\n path = os.environ['HOME'] + '/.bash_completion'\n mode = 'a'\n elif shell == 'zsh':\n compinit = 'autoload -U compinit && compinit'\n bashcompinit = 'autoload -U bashcompinit && bashcompinit'\n complete = click._bashcomplete.get_completion_script(name, envvar)\n code = '\\n'.join([compinit, bashcompinit, complete])\n path = os.environ['HOME'] + '/.zshrc'\n mode = 'a'\n else:\n raise click.ClickException('%s is not supported.' % shell)\n if install:\n d = os.path.dirname(path)\n if not os.path.exists(d):\n os.makedirs(d)\n f = open(path, mode)\n f.write(code)\n f.write(\"\\n\")\n f.close()\n click.echo('%s completion installed in %s' % (shell, path))\n else:\n click.echo(code)",
"def install(i):\n\n cm_kernel.print_for_con('***********************************************')\n cm_kernel.print_for_con('Installing code ...')\n\n # Check vars\n if 'target_os_uoa' not in i: return {'cm_return':1, 'cm_error':'\"target_os_uoa\" is not defined in \"code install\"'}\n\n # Create entry\n ii={'cm_run_module_uoa':ini['cm_module_uid'],\n 'cm_action':'update'}\n if 'install_data_uid' in i and i['install_data_uid']!='': \n ii['cm_data_uid']=i['install_data_uid']\n if 'install_data_alias' in i and i['install_data_alias']!='': \n ii['cm_data_uoa']=i['install_data_alias']\n if 'install_data_display_as_alias' in i: \n ii['cm_display_as_alias']=i['install_data_display_as_alias']\n if 'install_module_uoa' in i and i['install_module_uoa']!='':\n ii['cm_run_module_uoa']=i['install_module_uoa']\n if 'cm_array' in i and len(i['cm_array'])>0: ii['cm_array']=i['cm_array']\n if 'install_repo_uoa' in i and i['install_repo_uoa']!='': \n ii['cm_repo_uoa']=i['install_repo_uoa']\n r=cm_kernel.access(ii)\n if r['cm_return']>0: return r\n\n target_path=r['cm_path']\n target_uid=r['cm_uid']\n target_alias=r['cm_alias']\n\n # Prepare script\n rx=get_env({'cm_data_uoa':target_uid,\n 'os_uoa':i['target_os_uoa']})\n if rx['cm_return']>0: return rx\n\n script=rx['cm_string']\n\n ii={'script_name':script,\n 'skip_extension':'yes',\n 'target_os_uoa':i['target_os_uoa'],\n 'cm_path':target_path}\n if 'code_deps' in i and i.get('skip_code_deps','')!='yes':\n ii['code_deps']=i['code_deps']\n\n # Add remark about how code was built\n if 'add_rem_to_script' in i:\n run_commands_before=[]\n run_commands_before.append('')\n for x in i['add_rem_to_script']:\n run_commands_before.append(x)\n ii['run_commands_before']=run_commands_before\n\n rx=prepare_script(ii)\n if rx['cm_return']>0: return rx\n\n r['script_name']=rx['cm_path']\n r['script_filename']=script\n\n return r",
"def install_deps():\n dist = check_distribution()\n if dist == Distribution.TEXLIVE:\n texlive_install_deps()\n elif dist == Distribution.MIKTEX:\n miktex_install_deps()\n\n install_pygments()",
"def setup_completion(shell, show_code):\n click.echo('Setup completion for shell {!r}'.format(shell))\n\n if show_code:\n code = click_completion.get_code(shell=shell)\n click.echo('Installing code: \\n{}'.format(code))\n\n shell_, path = click_completion.install(shell=shell)\n click.secho('Installed completion in path {!r}'.format(path))",
"def bootstrap():\n validate_configurator_version()\n\n # put new mkinitcpio.conf in place\n run(\"mv /etc/mkinitcpio.conf.pacnew /etc/mkinitcpio.conf\")\n sed(\"/etc/mkinitcpio.conf\",\n 'MODULES=\"\"',\n 'MODULES=\"xen-blkfront xen-fbfront xen-kbdfront xen-netfront xen-pcifront xenbus_probe_frontend xenfs\"') # nopep8\n sed(\"/etc/mkinitcpio.conf\",\n 'HOOKS=\"base udev autodetect modconf block filesystems keyboard fsck',\n 'HOOKS=\"base udev block filesystems shutdown autodetect\"')\n\n # upgrade pacakges\n run(\"pacman --noconfirm -Syu\")\n\n # put new pacman.conf in place\n run(\"mv /etc/pacman.conf.pacnew /etc/pacman.conf\")\n\n # install essential packages\n run(\"pacman --noconfirm -S base-devel\")\n run(\"pacman --noconfirm -S curl git rsync\")\n\n # create a user, named 'aur', to safely install AUR packages under fakeroot\n # uid and gid values auto increment from 1000\n # to prevent conficts set the 'aur' user's gid and uid to 902\n run(\"groupadd -g 902 aur && useradd -m -u 902 -g 902 -G wheel aur\")\n\n # allow users in the wheel group to sudo without a password\n uncomment(\"/etc/sudoers\", \"wheel.*NOPASSWD\")\n\n # install yaourt and upgrade non-pacman rackspace installed packages\n sudo(\"rm -rf /home/aur/.builds && mkdir /home/aur/.builds/\", user=\"aur\")\n with cd(\"/home/aur/.builds/\"):\n sudo(\"bash <(curl aur.sh) -si --noconfirm package-query yaourt\", user=\"aur\")\n sudo(\"yaourt --noconfirm -S xe-guest-utilities\", user=\"aur\")\n\n # allow fabric to sftp with contrib.files.put\n # http://stackoverflow.com/questions/10221839/cant-use-fabric-put-is-there-any-server-configuration-needed # nopep8\n # change before reboot because then the sshd config will be reloaded\n # sed(\"/etc/ssh/sshd_config\", \"Subsystem sftp /usr/lib/openssh/sftp-server\",\n # \"Subsystem sftp internal-sftp\")\n\n # systemd\n sed(\"/boot/grub/menu.lst\",\n \"kernel /boot/vmlinuz-linux root=/dev/xvda1 ro console=hvc0\",\n \"kernel /boot/vmlinuz-linux root=/dev/xvda1 ro console=hvc0 init=/usr/lib/systemd/systemd\")\n reboot()\n if not contains(\"/proc/1/comm\", \"systemd\"):\n abort(\"systemd is not installed properly\")\n server = [s for s in env.bootmachine_servers if s.public_ip == env.host][0]\n run(\"hostnamectl set-hostname {0}\".format(server.name))\n run(\"mv /etc/locale.gen.pacnew /etc/locale.gen.conf\")\n uncomment(\"/etc/locale.gen\", \"en_US.UTF-8 UTF-8\")\n uncomment(\"/etc/locale.gen\", \"en_US ISO-8859-1\")\n run(\"locale-gen\")\n run(\"localectl set-locale LANG='en_US.utf8'\")\n run(\"timedatectl set-timezone US/Central\")",
"def AptInstall(vm):\n vm.Install('build_tools')\n vm.InstallPackages(APT_PACKAGES)"
] | [
"0.594541",
"0.58539075",
"0.5628848",
"0.5576539",
"0.5572069",
"0.5543689",
"0.5543153",
"0.54723585",
"0.54654664",
"0.54646444",
"0.54574805",
"0.54236794",
"0.5378128",
"0.5352251",
"0.53416926",
"0.53223884",
"0.5298473",
"0.5297451",
"0.5256946",
"0.5242978",
"0.5230459",
"0.5199704",
"0.5165573",
"0.5142145",
"0.5126281",
"0.51229954",
"0.51213294",
"0.5103575",
"0.5092366",
"0.50744677"
] | 0.67693573 | 0 |
Create creates a set session | async def create(
self, *, header: Optional[headers.RequestHeader] = None
) -> CreateResponse:
request = CreateRequest()
if header is not None:
request.header = header
return await self._unary_unary(
"/atomix.set.SetService/Create", request, CreateResponse,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create(data):\n \n return Setlist(\n list_id = data['id'],\n name = data['name'],\n items = data['num_sets'])",
"def new_set(*, ctx: context.ContextLevel, **kwargs) -> irast.Set:\n ir_set = irast.Set(**kwargs)\n ctx.all_sets.append(ir_set)\n return ir_set",
"def test_create_session(self):\n study_id = self.storage.create_study(sample_study_spec())\n\n session = sample_session(study_id=study_id)\n self.storage.create_session(session)\n\n self.assertEqual(self.storage.get_session(study_id, session.id), session)",
"def create():",
"def create():",
"def test_set_session():",
"def create_default_set():\n stock1 = Stock('HD', '25', '247.29')\n stock2 = Stock('TWTR', '230', '31.89')\n stock3 = Stock('DIS', '65', '118.77')\n database.session.add(stock1)\n database.session.add(stock2)\n database.session.add(stock3)\n database.session.commit()",
"def create_session(self, **params):\n raise NotImplementedError('Should be implemented by a sub-class.')",
"def setup(session: 'Session') -> None:\n\n create_many(session, LEVEL_NAMES, commit=False)",
"def create(self, class_name, attrs, session):",
"def create_session():\n session = Session()\n try:\n yield session\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()",
"def make_new_session():\n session = Session.objects.create(uuid=str(uuid4()), container_id=None)\n return session.id",
"def get_or_create(session, name, chat, user):\n name = name.lower()\n sticker_set = session.query(StickerSet).get(name)\n if not sticker_set:\n # Create a task for adding a sticker.\n # This task will be processed by a job, since adding a sticker can take quite a while\n sticker_set = StickerSet(name, None)\n sticker_set.international = user.international\n task = Task(Task.SCAN_SET, sticker_set=sticker_set, chat=chat, user=user)\n session.add(sticker_set)\n session.add(task)\n # Error handling: Retry in case somebody sent to stickers at the same time\n try:\n session.commit()\n except IntegrityError as e:\n session.rollback()\n sticker_set = session.query(StickerSet).get(name)\n if sticker_set is None:\n raise e\n\n return sticker_set",
"def test_create_session(self):\n finder = FinderInsidePro(self.test_key)\n session_id = finder.create_session(2811)\n assert isinstance(session_id, str)\n assert session_id == finder.session_id\n assert len(session_id)",
"async def create(self, session, *, dc=None):\n response = await self._api.put(\n \"/v1/session/create\",\n data=session,\n params={\"dc\": dc})\n return response.body",
"def create_session(self):\n # TODO refactor bids_import pipeline to use same functions as dcm2bids below. To be done in different PR though\n if self.verbose:\n print(\"Creating visit \" + self.visit_label\n + \" for CandID \" + self.cand_id)\n\n column_names = ('CandID', 'Visit_label', 'CenterID', 'Current_stage')\n values = (self.cand_id, self.visit_label, str(self.center_id), 'Not Started')\n\n if self.project_id:\n column_names = column_names + ('ProjectID',)\n values = values + (str(self.project_id),)\n\n if self.cohort_id:\n column_names = column_names + ('CohortID',)\n values = values + (str(self.cohort_id),)\n\n self.db.insert(\n table_name='session',\n column_names=column_names,\n values=values\n )\n\n loris_session_info = self.get_session_info_from_loris()\n\n return loris_session_info",
"def create_session(self, transport):\n session_id = self.session_id_allocator.allocate()\n session = self.SESSION_CLS(self, transport, session_id)\n self.sessions[session.id] = session\n return session",
"def create_session(\n path: str,\n type: str,\n name: Optional[str] = None,\n kernel_name: Optional[str] = None,\n kernel_id: Optional[str] = None,\n) -> str:\n ...",
"def create_session(self):\n self._session = self.create_scoped_session()\n self.session = self._session()",
"def create(self):",
"def create_new_test_session(name):\n now=datetime.datetime.now()\n is_over=0\n sql=\"INSERT INTO sessions(update_time,is_over,name,total_coverage) VALUES(?,?,?,?)\"\n execute_query(sql,(now,is_over,name,\"0\"))\n active_session=get_active_test_session()\n active_files=get_active_files()\n for f in active_files:\n sql=\"INSERT INTO sessions_files VALUES(?,?)\"\n execute_query(sql,(active_session[0],f[0]))",
"def test_ctor_no_cookie(self):\n request = self._make_request()\n session = self._makeOne(request)\n session_dict = session.managed_dict\n self.assertDictEqual(session_dict, {})\n self.assertIs(session.new, True)",
"def create_session(self,session_id,host_id,host_name,spotify_token):\n self.sessions[session_id] = {\n \"HOST\" : {\n \"ID\" : host_id,\n \"NAME\" : host_name,\n \"spotify_token\" : spotify_token,\n \"spotify_player\": None,\n },\n \"queue\" : [],\n \"queue_lock\" : False,\n \"current_track\" : \"\",\n \"previous_track\" : \"\",\n \"USERS\" : {}\n }",
"async def create_session(session: SessionModel, mongo: MongoDB = mongodb) -> SessionOutModel:\n if not await mongo.session_coll.find_one({\"id\": session.id}):\n await mongo.session_coll.insert_one(session.dict())\n else:\n await mongo.session_coll.update_one({\"id\": session.id}, {'$set': {'status': session.status}})\n return SessionOutModel(**session.dict())",
"def create_new_session(sessions, segmeta):\n # Find an available session id\n new_sid = 0\n while new_sid in [s[0].meta.sessionid for s in sessions.values()]:\n new_sid += 1\n # Create meta and fill in information of the file\n meta = MetaInfo(segmeta.filename, segmeta.segmentid, new_sid)\n sp = snc_parameters(meta.segsize, 0.01, 16, 64, 1280, BAND_SNC, 1, 1, 0, -1)\n meta.set_snc_params(sp)\n # Fork a child process and build pipe between parent and child\n session = Session(meta)\n (fdp, fdc) = mp.Pipe()\n session.fdp = fdp\n session.fdc = fdc\n logging.info(\"New session created, ID: %d \" % (new_sid,))\n print(session.meta)\n # Fork a process to serve the clients of the session\n child = mp.Process(target=session.main)\n child.start()\n session.fdc.close() # Close parent's fdc\n sessions[(segmeta.filename, segmeta.segmentid)] = (session, child)\n return session",
"def testSessionCreate(self):\n success = False\n attr = None\n\n try:\n attr = self.session.create_visit_attr()\n\n success = True\n except Exception:\n pass\n\n self.failUnless(success)\n self.failIf(attr is None)",
"def _insert_new_session():\n request = self._make_request()\n session_existing = self._set_up_session_in_Redis_and_makeOne( # noqa: F841\n request, session_id, session_dict={\"visited\": True}, **session_args\n )\n return request",
"def set(self, session):\n raise InvalidSessionException('Need to be implemented')",
"def create():\n pass",
"def _create_session(self, xnat_login, subject_id, visit_id):\n uri = ('/data/archive/projects/{}/subjects/{}/experiments/{}'\n .format(self.inputs.project_id, subject_id, visit_id))\n query = {'xsiType': 'xnat:mrSessionData', 'label': visit_id,\n 'req_format': 'qa'}\n response = xnat_login.put(uri, query=query)\n if response.status_code not in (200, 201):\n raise NiAnalysisError(\n \"Could not create session '{}' in subject '{}' in project '{}'\"\n \" response code {}\"\n .format(visit_id, subject_id, self.inputs.project_id,\n response))\n return xnat_login.classes.MrSessionData(uri=uri,\n xnat_session=xnat_login)"
] | [
"0.6151142",
"0.6048097",
"0.60076725",
"0.5988856",
"0.5988856",
"0.5877816",
"0.5837743",
"0.5801947",
"0.57588005",
"0.572035",
"0.57137334",
"0.5670384",
"0.56422436",
"0.5623634",
"0.5616179",
"0.5612067",
"0.5610329",
"0.5587627",
"0.5562947",
"0.5547399",
"0.550339",
"0.5480935",
"0.54631096",
"0.54613054",
"0.5454255",
"0.5454175",
"0.54532063",
"0.5433916",
"0.54100585",
"0.5397613"
] | 0.62500304 | 0 |
Close closes a set | async def close(
self, *, header: Optional[headers.RequestHeader] = None, delete: bool = False
) -> CloseResponse:
request = CloseRequest()
if header is not None:
request.header = header
request.delete = delete
return await self._unary_unary(
"/atomix.set.SetService/Close", request, CloseResponse,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def close_changeset(self):\n pass",
"def _close_result_set(self):\n if self._result_set:\n self._result_set.close(self.session)\n self._result_set = None",
"def close_file(self, data_set):\n if hasattr(data_set, '_h5_base_group'):\n data_set._h5_base_group.close()\n # Removes reference to closed file\n del data_set._h5_base_group\n else:\n logging.warning(\n 'Cannot close file, data_set has no open hdf5 file')",
"def _close( self ):\n for sji in self._sji_data:\n sji.close()",
"def close():",
"def delete_set(set_name):\n\n flg = logging.getLogger(\"lettuce.xgenSetup.delete_set\")\n\n flg.info(\"Set to delete: {}\".format(set_name))\n\n if mc.objExists(set_name):\n mc.select(set_name)\n old_objects = mc.ls(selection=True)\n flg.debug(\"Old Objects:\")\n for o in old_objects:\n flg.debug(o)\n ref_objects = mc.ls(selection=True, referencedNodes=True)\n\n ref_del_queue = []\n if len(ref_objects) > 0:\n flg.debug(\"Old Reference Nodes:\")\n for o in ref_objects:\n flg.debug(o)\n for o in ref_objects:\n flg.debug(\"Queuing {} for reference removal\".format(o))\n top = mc.referenceQuery(o, referenceNode=True)\n ref_del_queue.append(top)\n if len(ref_del_queue):\n for o in ref_del_queue:\n flg.debug(\"Removing reference: {}\".format(o))\n ref_file = mc.referenceQuery(o, filename=True)\n mc.file(ref_file, removeReference=True)\n for o in old_objects:\n try:\n flg.debug(\"Deleting {}\".format(o))\n mc.delete(o)\n except ValueError as e:\n flg.debug(\"Unable to delete {0}. Error: {1}\".format(o, e))\n flg.debug(\"Deleting set: {}\".format(set_name))\n mc.delete(set_name)",
"def close(self):\n for k in self._shelf:\n self._shelf[k]._shelf.close()",
"def close(self):",
"def close(self):",
"def close(self):",
"def close(self):",
"def close(self):",
"def close(self):",
"def close(self):",
"def close(self):",
"def close(self):",
"def close(self):",
"def close(self):\n total = sum(len(lineset) for lineset in self.linesets)\n duplicated = 0\n stats = self.stats\n for num, couples in self._compute_sims():\n msg = []\n lineset = idx = None\n for lineset, idx in couples:\n msg.append(\"==%s:%s\" % (lineset.name, idx))\n msg.sort()\n\n if lineset:\n for line in lineset._real_lines[idx : idx + num]:\n msg.append(line.rstrip())\n\n self.add_message(\"R0801\", args=(len(couples), \"\\n\".join(msg)))\n duplicated += num * (len(couples) - 1)\n stats[\"nb_duplicated_lines\"] = duplicated\n stats[\"percent_duplicated_lines\"] = total and duplicated * 100.0 / total",
"def Close(self):",
"def close(self):\n self.closed = True\n for cursor in self.cursors:\n try:\n cursor.close()\n except exceptions.Error:\n pass # already closed",
"def closeAll(self):\n\t\tself.ignore = 1\n\t\tself.selectAll(self.tree.GetRootItem())\n\t\tself.ignore = 0\n\t\tself.onCloseDataset(\"\")",
"def close(self) -> None:",
"def close(self) -> None:",
"def close(self) -> None:",
"def close(self) -> None:",
"def close(self) -> None:",
"def close(self) -> None:",
"def close(self) -> None:",
"def close(self) -> None:",
"def delete_set(self, item): # TODO test\n tree = item.parent\n item_label = item.parent_node\n tree.remove_node(item)\n tree.remove_node(item_label)\n self.exercise.sets.remove(item.set)\n print(\"delete set\")"
] | [
"0.6684206",
"0.6438695",
"0.6179789",
"0.5959591",
"0.5912371",
"0.5891763",
"0.5832663",
"0.58226657",
"0.58226657",
"0.58226657",
"0.58226657",
"0.58226657",
"0.58226657",
"0.58226657",
"0.58226657",
"0.58226657",
"0.58226657",
"0.58129406",
"0.58081174",
"0.5796722",
"0.5792065",
"0.57896537",
"0.57896537",
"0.57896537",
"0.57896537",
"0.57896537",
"0.57896537",
"0.57896537",
"0.57896537",
"0.57797074"
] | 0.64467335 | 1 |
Loads 10 seconds of 8000Hz music ('dataset/wind_lq_predicted.wav'), applies algorithm on windows of size alg.N, and outputs the result in a .wav file. | def test_real_song(alg):
alg.input_func = None
alg.input_func_args = 'dataset/wind_lq.wav',True
alg.predict_long_wav_data(fs=8000, outname='wind_lq_predicted.wav') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main(_):\n\tlabel_wav()",
"def load_train_dataset(data_dir, word_list, silence_percentage, noise_percentage):\n validation_percentage, testing_percentage = 0.1, 0.1\n temp_list = []\n\n #wav_lists = os.path.join(data_dir, *, '*.wav')\n for word_l in word_list:\n #wav_word_list = os.path.join(data_dir, word_l)\n wav_list = os.path.join(data_dir, word_l, '*.wav')\n for file in gfile.Glob(wav_list):\n _, word = os.path.split(os.path.dirname(file))\n word = word.lower()\n\n if which_set(file, validation_percentage, testing_percentage) == 'training':\n rate, signal = load_wav(file);\n signal_and_noise = add_noise(signal, rate, 1, os.path.join(data_dir,'_background_noise_'), noise_percentage)\n \n feature = psf.mfcc(signal_and_noise, rate, nfilt = 40,numcep = 12, appendEnergy = False)\n #if feature.shape[0] != 99:\n # print(str(len(signal)) + \" \" + str(rate))\n temp_list.append({'feature': feature, 'label': word_l})\n\n # hotspot\n #silence = len(X_train) * silence_percentage\n silence = int(math.ceil(len(temp_list) * silence_percentage / 100))\n for _ in range(silence):\n temp_list.append({'feature': 0, 'label': \"_silence_\"})\n\n random.shuffle(temp_list)\n\n X_train = np.zeros((len(temp_list), 99, 12))\n Y_train = np.zeros( len(temp_list) )\n\n for i in range(len(X_train)):\n X_train[i] = temp_list[i]['feature']\n Y_train[i] = word2index(temp_list[i]['label'])\n\n return X_train, Y_train",
"def load_wav(wav_path, downsample, n_steps):\n data = scipy.io.wavfile.read(wav_path)[1]\n data = scipy.signal.decimate(data, downsample) \n out = np.zeros((1, n_steps))\n out[0, n_steps - np.shape(data)[0]:] = data\n return out",
"def timbral_warmth(fname, dev_output=False, phase_correction=False, clip_output=False, max_FFT_frame_size=8192,\n max_WR = 12000, fs=0):\n '''\n Read input\n '''\n audio_samples, fs = timbral_util.file_read(fname, fs, phase_correction=phase_correction)\n\n # get the weighted high frequency content\n mean_wr, _, _, weighted_hf = warm_region_cal(audio_samples, fs)\n\n # calculate the onsets\n envelope = timbral_util.sample_and_hold_envelope_calculation(audio_samples, fs, decay_time=0.1)\n envelope_time = np.arange(len(envelope)) / float(fs)\n\n # calculate the onsets\n nperseg = 4096\n original_onsets = timbral_util.calculate_onsets(audio_samples, envelope, fs, nperseg=nperseg)\n # If onsets don't exist, set it to time zero\n if not original_onsets:\n original_onsets = [0]\n # set to start of file in the case where there is only one onset\n if len(original_onsets) == 1:\n original_onsets = [0]\n '''\n Initialise lists for storing features\n '''\n # set defaults for holding\n all_rms = []\n all_ratio = []\n all_SC = []\n all_WR_Ratio = []\n all_decay_score = []\n\n\n # calculate metrics for each onset\n for idx, onset in enumerate(original_onsets):\n if onset == original_onsets[-1]:\n # this is the last onset\n segment = audio_samples[onset:]\n else:\n segment = audio_samples[onset:original_onsets[idx+1]]\n\n segment_rms = np.sqrt(np.mean(segment * segment))\n all_rms.append(segment_rms)\n\n # get FFT of signal\n segment_length = len(segment)\n if segment_length < max_FFT_frame_size:\n freq, time, spec = spectrogram(segment, fs, nperseg=segment_length, nfft=max_FFT_frame_size)\n else:\n freq, time, spec = spectrogram(segment, fs, nperseg=max_FFT_frame_size, nfft=max_FFT_frame_size)\n\n # flatten the audio to 1 dimension. Catches some strange errors that cause crashes\n if spec.shape[1] > 1:\n spec = np.sum(spec, axis=1)\n spec = spec.flatten()\n\n # normalise for this onset\n spec = np.array(list(spec)).flatten()\n this_shape = spec.shape\n spec /= max(abs(spec))\n\n '''\n Estimate of fundamental frequency\n '''\n # peak picking algorithm\n peak_idx, peak_value, peak_x = timbral_util.detect_peaks(spec, freq=freq, fs=fs)\n # find lowest peak\n fundamental = np.min(peak_x)\n fundamental_idx = np.min(peak_idx)\n\n '''\n Warmth region calculation\n '''\n # estimate the Warmth region\n WR_upper_f_limit = fundamental * 3.5\n if WR_upper_f_limit > max_WR:\n WR_upper_f_limit = 12000\n tpower = np.sum(spec)\n WR_upper_f_limit_idx = int(np.where(freq > WR_upper_f_limit)[0][0])\n\n if fundamental < 260:\n # find frequency bin closest to 260Hz\n top_level_idx = int(np.where(freq > 260)[0][0])\n # sum energy up to this bin\n low_energy = np.sum(spec[fundamental_idx:top_level_idx])\n # sum all energy\n tpower = np.sum(spec)\n # take ratio\n ratio = low_energy / float(tpower)\n else:\n # make exception where fundamental is greater than\n ratio = 0\n\n all_ratio.append(ratio)\n\n '''\n Spectral centroid of the segment\n '''\n # spectral centroid\n top = np.sum(freq * spec)\n bottom = float(np.sum(spec))\n SC = np.sum(freq * spec) / float(np.sum(spec))\n all_SC.append(SC)\n\n '''\n HF decay\n - linear regression of the values above the warmth region\n '''\n above_WR_spec = np.log10(spec[WR_upper_f_limit_idx:])\n above_WR_freq = np.log10(freq[WR_upper_f_limit_idx:])\n np.ones_like(above_WR_freq)\n metrics = np.array([above_WR_freq, np.ones_like(above_WR_freq)])\n\n # create a linear regression model\n model = linear_model.LinearRegression(fit_intercept=False)\n model.fit(metrics.transpose(), above_WR_spec)\n decay_score = model.score(metrics.transpose(), above_WR_spec)\n all_decay_score.append(decay_score)\n\n\n '''\n get mean values\n '''\n mean_SC = np.log10(np.mean(all_SC))\n mean_decay_score = np.mean(all_decay_score)\n weighted_mean_ratio = np.average(all_ratio, weights=all_rms)\n\n if dev_output:\n return mean_SC, weighted_hf, mean_wr, mean_decay_score, weighted_mean_ratio\n else:\n\n '''\n Apply regression model\n '''\n all_metrics = np.ones(6)\n all_metrics[0] = mean_SC\n all_metrics[1] = weighted_hf\n all_metrics[2] = mean_wr\n all_metrics[3] = mean_decay_score\n all_metrics[4] = weighted_mean_ratio\n\n coefficients = np.array([-4.464258317026696,\n -0.08819320850778556,\n 0.29156539973575546,\n 17.274733561081554,\n 8.403340066029507,\n 45.21212125085579])\n\n warmth = np.sum(all_metrics * coefficients)\n\n # clip output between 0 and 100\n if clip_output:\n warmth = timbral_util.output_clip(warmth)\n\n return warmth",
"def label_wav(wav, labels, graph, input_name, output_name, how_many_labels):\n if not wav or not tf.gfile.Exists(wav):\n tf.logging.fatal('Audio file does not exist %s', wav)\n\n if not labels or not tf.gfile.Exists(labels):\n tf.logging.fatal('Labels file does not exist %s', labels)\n\n if not graph or not tf.gfile.Exists(graph):\n tf.logging.fatal('Graph file does not exist %s', graph)\n\n labels_list = load_labels(labels)\n\n # load graph, which is stored in the default session\n load_graph(graph)\n\n with open(wav, 'rb') as wav_file:\n wav_data = wav_file.read()\n\n return run_graph(wav_data, labels_list, input_name, output_name, how_many_labels)",
"def label_wav():\n\t#if not wav or not tf.gfile.Exists(wav):\n\t\t#tf.logging.fatal('Audio file does not exist %s', wav)\n\n\t# load graph, which is stored in the default session\n\tgraph='./models/new1.pb'\n\tinput_name='wav_data:0'\n\toutput_name='labels_softmax:0'\n\tload_graph(graph)\n\twave=\"./utils/words/record.wav\"\n\twith open(wave, 'rb') as wav_file:\n\t\t\twav_data = wav_file.read()\n\tword=run_graph(wav_data, input_name, output_name)\n\tnames=['unknown','silence','laddu','modak','pedha']\n\treturn names[word]",
"def process_rnn():\n\n features = None\n labels = None\n file_paths = 'data/emodb'\n lag = 37\n frame_len = 1024\n frame_step = 1024\n n_filter = 40\n min_freq = 130\n max_freq = 6800\n n_fft = 1024\n fbank = None\n features = []\n labels = []\n for file_name in os.listdir(file_paths):\n\n if not file_name.endswith('.wav'):\n continue\n file_path = os.path.join(file_paths, file_name)\n freq, data = wavfile.read(file_path)\n # assuming 16 bit\n # Create features\n\n # try raw data first\n\n # transform in n_samplesx1 :\n # create labels\n sample_label = [1 for i in range(data.shape[0])]\n\n sample_label = np.asarray(sample_label)\n if file_name[5] == 'W':\n sample_label *= 0\n elif file_name[5] == 'L':\n sample_label *= 1\n elif file_name[5] == 'E':\n sample_label *= 2\n elif file_name[5] == 'A':\n sample_label *= 3\n elif file_name[5] == 'F':\n sample_label *= 4\n elif file_name[5] == 'T':\n sample_label *= 5\n elif file_name[5] == 'N':\n sample_label *= 6\n else:\n raise ValueError('Unknown label.')\n\n labels.append(sample_label)\n features.append(data)\n\n return features,labels",
"def runWavelengthDependency():\n RunData([getFiles(mintime=(15, 39, 58), maxtime=(15, 47, 58), folder='data/30Jul/')[0],], out='I600nmwave',\n wavelength='l600')\n RunData([getFiles(mintime=(17, 48, 35), maxtime=(17, 56, 03), folder='data/30Jul/')[0],], out='I700nmwave',\n wavelength='l700')\n RunData([getFiles(mintime=(15, 40, 07), maxtime=(15, 45, 14), folder='data/29Jul/')[0],], out='I800nmwave',\n wavelength='l800')\n RunData([getFiles(mintime=(14, 17, 57), maxtime=(14, 25, 49), folder='data/01Aug/')[4],], out='I890nmwave',\n wavelength='l890')",
"def load_audio(self):\n df = pd.read_csv(\"{dir}/iteration_{iter}.csv\".format(dir=self.directory, iter=self.iteration),\n usecols=[1, 2, 3])\n\n doa_from_file = df.iloc[0][1]\n wav_name = df.iloc[0][0]\n filename = \"{dir}/{wav_name}\".format(dir=self.directory, wav_name=wav_name)\n\n y, sr = librosa.load(filename, mono=False)\n\n y_8k = librosa.resample(y, sr, 8000)\n result_x = librosa.util.fix_length(y_8k, 8000)\n\n return result_x, doa_from_file",
"def analyseData800nm():\n #800 nm\n RunData(getFiles(mintime=(15, 40, 07), maxtime=(15, 45, 14), folder='data/29Jul/'), out='I800nm') #0.31, 0.3\n forwardModelJointFit(getFiles(mintime=(15, 40, 07), maxtime=(15, 45, 14), folder='data/29Jul/'),\n out='J800nm', wavelength='800nm') #0.31, 0.3",
"def algo(self):\n audio = np.array([self.audio.popleft() for _ in range(self.count)])\n # Run Classifier\n wav_data = np.abs(np.fft.rfft(audio.flatten()))\n if len(wav_data) > 0:\n pred = self.clf.predict(np.expand_dims(wav_data, 0))\n if self.verbose > 1:\n print('The prediction is : ' + str(pred))\n self.finished.emit(int(pred[-1]))\n else:\n self.finished.emit(0)",
"def Retrain_Model(self):\n\n self.stream.close()\n\n random.shuffle(self.false_files)\n\n Path_To = REPO_PATH + \"%sData%sWakeWord%sAudio%sNot_Wake_Word%s\" % \\\n (delim, delim, delim, delim, delim)\n\n for files in self.false_files[:self.false_count - 1]:\n os.rename(Path_To + files, \"%s%sTrain_Data%s%s\" %\n (Path_To, delim, delim, files))\n\n os.rename(Path_To + self.false_files[self.false_count - 1],\n \"%s%sTest_Data%s%s\" %\n (Path_To, delim, delim,\n self.false_files[self.false_count - 1]))\n\n self.false_counts = 0\n self.false_files = []\n self.ext_feat.Obtain_WW_Audio_Data()\n\n if not(self.retrain):\n self.ww_model = Model()\n self.ww_model.build_model()\n\n if not(self.randomize):\n self.ww_model.preprocess()\n\n else:\n self.ww_model.randomized_preprocess()\n\n self.ww_model.train_model()\n\n self.stream = self.p.open(format=self.format,\n channels=self.channels,\n rate=self.rate, input=True,\n frames_per_buffer=self.chunk)",
"def generate_wavplot(song_name):\n\n filepath = features[features.inferred_name.str.title() == song_name].feature_file.values[0]\n rate, wave = wavfile.read(filepath)\n mono = np.mean(wave, axis=1)\n mono.shape\n plt.figure(figsize=(20,6))\n plt.axis('off')\n plt.plot(mono[::mono.shape[0]//6000], color='white')\n plt.tight_layout;\n friendly_song_name = '_'.join(song_name.split()).lower()\n output_filepath = './static/wavplots/' + friendly_song_name + '.png'\n plt.savefig(output_filepath, bbox_inches='tight', pad_inches=0, transparent=True)\n return output_filepath",
"def prepareData(args):\n print(\"Starting preprocessing\")\n\n # params\n max_midi = args['max_midi']\n min_midi = args['min_midi']\n note_range = args['note_range']\n window_size = args['window_size']\n sr = args['sr']\n hop_length = args['hop_length']\n wav_dir = args['wav_dir']\n\n datapath = os.path.join(args['proj_root'], 'Features')\n bin_multiple = int(args['bin_multiple'])\n\n framecnt = 0\n maxFramesPerFile = args['maxFramesPerFile']\n maxFrames = args['maxFrames']\n\n fileappend = str(maxFramesPerFile) + 'pf_max' + str(maxFrames) + '.dat'\n\n filenameIN = os.path.join(datapath, 'input_' + fileappend)\n filenameOUT = os.path.join(datapath, 'output_' + fileappend)\n\n if os.path.isfile(filenameIN) and os.path.isfile(filenameOUT):\n n_bins = note_range * bin_multiple\n print('loading precomputed data from ' + filenameIN)\n mmi = np.memmap(filenameIN, mode='r', dtype=\"float64\")\n inputs = np.reshape(mmi, (-1, window_size, n_bins))\n\n mmo = np.memmap(filenameOUT, mode='r', dtype=\"float64\")\n outputs = np.reshape(mmo, (-1, note_range))\n\n return inputs, outputs, datapath\n\n inputs, outputs = [], []\n addCnt, errCnt = 0, 0\n\n # hack to deal with high PPQ from MAPS\n # https://github.com/craffel/pretty-midi/issues/112\n pretty_midi.pretty_midi.MAX_TICK = 1e10\n\n for s in os.listdir(wav_dir):\n subdir = os.path.join(wav_dir, s)\n if not os.path.isdir(subdir):\n continue\n # recursively search in subdir\n print(subdir)\n for dp, dn, filenames in os.walk(subdir):\n # in each level of the directory, look at filenames ending with .mid\n for f in filenames:\n # if there exists a .wav file and .midi file with the same name\n\n if f.endswith('.wav'):\n audio_filename = f\n fprefix = audio_filename.split('.wav')[0]\n mid_fn = fprefix + '.mid'\n txt_fn = fprefix + '.txt'\n print(\"Handling files {}\".format(fprefix))\n if mid_fn in filenames:\n # extract_features\n audio_filename = os.path.join(dp, audio_filename)\n inputnp = extract_features(audio_filename, args)\n times = librosa.frames_to_time(np.arange(inputnp.shape[0]), sr=sr, hop_length=hop_length)\n # mid2outputnp\n mid_fn = os.path.join(dp, mid_fn)\n pm_mid = pretty_midi.PrettyMIDI(mid_fn)\n\n outputnp = mid2outputnp(pm_mid, times, args)\n\n # check that num onsets is equal\n if inputnp.shape[0] == outputnp.shape[0]:\n # Some filtering highly pragmatic filtering on the data!!\n # take only frames that are \"sufficiently loud\", ...\n good2take = np.array(inputnp.max(axis=(1, 2)) > 0.05)\n # ... and always omit the last frame as this has been padded ...\n good2take[-1] = False # omit last\n # ... and only take frames with at least one true label (i.e. some tone is played)\n good2take = good2take & (outputnp.max(axis=1) > 0)\n outputnp = outputnp[good2take, ]\n inputnp = inputnp[good2take, ]\n\n addCnt += 1\n if inputnp.shape[0] > maxFramesPerFile > 0:\n inputnp = inputnp[:maxFramesPerFile]\n outputnp = outputnp[:maxFramesPerFile]\n framecnt += inputnp.shape[0]\n print(\"framecnt is {}\".format(framecnt))\n inputs.append(inputnp)\n outputs.append(outputnp)\n else:\n print(\"error for fprefix {}\".format(fprefix))\n errCnt += 1\n print(inputnp.shape)\n print(outputnp.shape)\n\n if framecnt > maxFrames > 0:\n print(\"have enought frames, leaving {}\".format(subdir))\n break\n if framecnt > maxFrames > 0:\n print(\"have enought frames, leaving {}\".format(wav_dir))\n break\n\n if framecnt > maxFrames > 0:\n print(\"have enought frames, leaving {}\".format(wav_dir))\n break\n\n print(\"{} examples in dataset\".format(addCnt))\n print(\"{} examples couldnt be processed\".format(errCnt))\n\n # concatenate dynamic list to numpy list of example\n if addCnt:\n inputs = np.concatenate(inputs)\n outputs = np.concatenate(outputs)\n\n print(\"inputs.shape\")\n print(inputs.shape)\n print(\"outputs.shape\")\n print(outputs.shape)\n mmi = np.memmap(filename=filenameIN, mode='w+', shape=inputs.shape, dtype=\"float64\")\n mmi[:] = inputs[:]\n mmo = np.memmap(filename=filenameOUT, mode='w+', shape=outputs.shape, dtype=\"float64\")\n mmo[:] = outputs[:]\n del mmi\n del mmo\n\n return inputs, outputs, datapath",
"def test_sound(alg, repNum, soundType):\n\tfs = 1000\n\n\tif soundType=='c_maj':\n\t\talg.input_func = gen_sound_dataset\n\t\talg.input_func_args=([(1,'C',0),(1,'E',0),(1,'G',0)], fs)\n\telif soundType=='c_min':\n\t\talg.input_func = gen_sound_dataset\n\t\talg.input_func_args=([(1,'C',0),(1,'D#',0),(1,'G',0)], fs)\n\telif soundType=='c_fifth':\n\t\talg.input_func = gen_sound_dataset\n\t\talg.input_func_args=([(1,'C',0),(1,'G',0)], fs)\n\telif soundType=='random':\n\t\talg.input_func = gen_random_sound_dataset\n\t\talg.input_func_args=fs\n\telse:\n\t\tprint '[ERROR] test_sound() : \"%s\" not a recognized @soundType' % soundType\n\t\texit(0)\n\t\n\tperformance = alg.predict_perf(repNum=repNum)\n\tprint performance\n\n\tsave_freq_domain_wav(alg.x, fs, '%s_sound.wav'%soundType)\n\tsave_freq_domain_wav(alg.x_pred, fs, '%s_sound_predicted.wav'%soundType)\n\n\talg.plot_spectrogram(fs)\n\n\treturn performance",
"def load_data(self):\r\n if not os.path.exists(self.origin_dir):\r\n raise ValueError(f\"Folder {self.origin_dir} not exists!\")\r\n\r\n # loop folders\r\n listglobs = glob.glob(os.path.join(self.origin_dir)+r\"[0-9]*\")\r\n count = 0\r\n temp = []\r\n for x in listglobs:\r\n\r\n # step1, get speaker id md5\r\n user_id = x.rsplit(\"\\\\\")[-1]\r\n speaker_id = hashlib.md5(user_id.encode(\"utf-8\")).hexdigest()\r\n self.wav_desc[\"speaker_id\"] = speaker_id\r\n print(\"1=>\", x)\r\n\r\n for k in [\"你好小顺\", \"小顺小顺\"]:\r\n paths = os.path.join(x, k)\r\n print(\"2=>\", paths)\r\n # step2, parse speaker info\r\n with open(os.path.join(paths, \"spearker_info.txt\"), 'r', encoding=\"utf-8\") as f:\r\n line = f.readline()\r\n arrs = line.strip().split(\"\\\\t\")\r\n if len(arrs) != 3:\r\n raise ValueError(\"Required three field in speaker_info<id>\\t<gender>\\t<age>\")\r\n self.wav_desc[\"gender\"] = arrs[1].strip(\"<\").rstrip(\">\")\r\n self.wav_desc[\"age\"] = arrs[-1].strip(\"<\").rstrip(\">\")\r\n\r\n # step3, parse wav detailed information\r\n # key: wav_id, value: info_list, [keyword, noise_type, distance, speed,user_id, equipment]\r\n wav_infos_dict = {}\r\n with open(os.path.join(paths, \"wav_desc.txt\"), \"r\", encoding=\"utf-8\") as f:\r\n for line in f.readlines():\r\n arrs = line.strip().split(\"\\\\t\")\r\n wav_infos_dict[arrs[0].strip(\"<\").rstrip(\">\")] = [x.strip(\"<\").rstrip(\">\") for\r\n x in arrs[1:]]\r\n\r\n print(f\"Parse wav info finished find {len(wav_infos_dict)} infos.\")\r\n\r\n # Step4, audio with background noise and without nose, which was back_wav and wav_data folder\r\n for wav_folder in [\"back_wav\", \"wav_data\"]:\r\n audio_lists = glob.glob(os.path.join(paths + f\"\\\\{wav_folder}\", \"*.wav\"))\r\n for xa in audio_lists:\r\n # copy data to\r\n wav_id, user_id = get_wav_name(xa)\r\n # print(wav_id, user_id)\r\n # create md5 id\r\n utt_id = hashlib.md5(xa.encode(\"utf-8\")).hexdigest()\r\n # speaker_id = hashlib.md5(user_id.encode(\"utf-8\")).hexdigest()\r\n # print(utt_id, speaker_id)\r\n # collect all info for an audio\r\n self.wav_desc[\"utt_id\"] = utt_id\r\n infos = wav_infos_dict[wav_id]\r\n if len(infos) != 6:\r\n print(\"==>\", infos)\r\n self.wav_desc[\"keyword_id\"] = self.keywords_dict[infos[0]]\r\n self.wav_desc[\"noise_type\"] = infos[1]\r\n self.wav_desc[\"distance\"] = infos[2]\r\n self.wav_desc[\"record_speed\"] = infos[3]\r\n self.wav_desc[\"speaker_id\"] = speaker_id\r\n self.wav_desc[\"record_equipment\"] = infos[5]\r\n\r\n # record wav information\r\n t_infos = copy.deepcopy(self.wav_desc)\r\n self.all_wavs.append(t_infos)\r\n count += 1\r\n temp.append(utt_id)\r\n\r\n # copy data to resource folder\r\n dest = shutil.copy2(xa, os.path.join(self.dest_dir, f\"audios/{utt_id}.wav\"))\r\n set_index = which_set(dest, 20, 30)\r\n self.data_index[set_index].append(t_infos)\r\n\r\n # write wav information into json file\r\n with open(os.path.join(self.dest_dir, \"resources/wav_desc.json\"), \"w\", encoding=\"utf-8\") as f:\r\n json.dump(self.all_wavs, f, ensure_ascii=False, indent=True)\r\n print(f\"total wavs:{count}, total ids:{len(temp)}\")\r\n for set_index in self.data_index.keys():\r\n with open(os.path.join(self.dest_dir, f\"resources/p_{set_index}.json\"), \"w\", encoding=\"utf-8\") as f:\r\n json.dump(self.data_index[set_index], f, ensure_ascii=False, indent=True)\r\n print(f\"Collect {set_index} data total {len(self.data_index[set_index])} samples.\")",
"def output( class_label_path ):\n #print \"{0:04d}\".format(1)\n # \n seq_list = []\n x =2.0\n p = 0\n i = 0\n for i in range( 0, 10 ):\n seq_list.append( 2*(i-1)+2 )\n for ii in range( 0, i ):\n p = p + x/(i) \n #seq_list.append( 3*(i-1)+1 )\n print seq_list\n #seq_list\n\n \n f = open( class_label_path , 'r' ) #read\n same_label_list = pickle.load( f ) #np\n f.close()\n #same_label_list = outmod2.loadFile2List( \"./data/L\" + str(input_label) + \"-List.txt\" ) # 改行区切りのリストファイルをList型へ変換\n \n # Listとして設定\n player_pack = []\n for i in range(MAX_PLAY_NUM): # \n player_pack.append( outmod.AudioPlayer() ) # 新たなAudioPlayerをListに追加\n out_wav_num = random.choice( same_label_list )\n #out_wav_num = same_label_list[i-1]\n player_pack[i].setAudioFile( \"../clustering/hayakuti_data/\" + \"{0:03d}\".format(int(out_wav_num)) + \"/sound.wav\" )\n player_pack[i].setAudioWaitTime( random.uniform( seq_list[i] , seq_list[i] ) )\n player_pack[i].setAudioLoopTimes( random.randint( 0, 0 ) )\n\n # 基本再生\n # outmod2.playLoop( player1 )\n\n # List再生\n for player_i in player_pack:\n outmod.playLoop( player_i )",
"def get_features(filename, training=True):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n wav, _ = librosa.load(filename, \n sr=SAMPLE_RATE, \n mono=True,\n dtype=np.float64)\n energy = np.abs(wav)\n silence_threshold = np.percentile(energy, 95)\n offsets = np.where(energy > silence_threshold)[0]\n if training:\n audio_voice_only = wav[offsets[0]:offsets[-1]]\n else:\n #avoid cutting off too abruptly\n audio_voice_only = wav[offsets[0]:offsets[-1] + 4800]\n if training:\n if len(audio_voice_only) >= 160 * NUM_FRAMES:\n start_ = np.random.randint(len(audio_voice_only) - 160 * NUM_FRAMES + 1)\n end_ = start_ + 160 * NUM_FRAMES - 1\n audio_voice_only = audio_voice_only[start_:end_]\n else:\n return [0], [0]\n wav = librosa.util.normalize(audio_voice_only)\n #deep speaker uses preemphasis here, I do not, because I want the model to correctly transform lower\n #frequencies, too. I apply preemphasis to spectrum before putting data into model embedder instead.\n wav = lfilter([1., -PREEMPH], [1.], wav)[1:]\n #f0 extraction (most time consuming operation in this function)\n f0, timeaxis = pyworld.harvest(wav, SAMPLE_RATE, frame_period=FRAME_PERIOD, f0_floor=71.0, f0_ceil=800.0)\n sp = pyworld.cheaptrick(wav, f0, timeaxis, SAMPLE_RATE, fft_size=NFFT)\n ap = pyworld.d4c(wav, f0, timeaxis, SAMPLE_RATE, fft_size=NFFT)\n mfe = sp2mfe(sp)\n lmfe = np.log(mfe)\n mean = np.mean(lmfe)\n std = np.std(lmfe)\n nmfe = (lmfe - mean) / std\n \n if training:\n return nmfe.T, f0\n else:\n out_len = len(f0) // 4 * 4\n# out_len = len(f0)\n return nmfe[:out_len].T, mean, std, sp[:out_len], f0[:out_len], ap[:out_len]",
"def morse_to_audio(words, playsound=None, name_file=\"output\\\\code_to_audio_output.wav\"):\n dot = wave.open(\"kropka.wav\", 'rb')\n dash = wave.open(\"kreska.wav\", 'rb')\n\n rate_dot = dot.getframerate()\n\n rate_dash = dash.getframerate()\n\n data_dot = dot.readframes(-1)\n data_dash = dash.readframes(-1)\n data_dot = np.fromstring(data_dot, 'Int16')\n data_dash = np.fromstring(data_dash, 'Int16')\n\n l2=len(data_dot)\n l1=len(data_dash)\n\n output=[]\n\n for element in words:\n # print(element)\n for i in range(0, len(element)):\n # print(element[i])\n if element[i] == '1':\n # playsound(\"kropka.wav\")\n output.extend(data_dot)\n\n if element[i] == '0':\n # playsound(\"kreska.wav\")\n output.extend(data_dash)\n if element[i] == ' ':\n output.extend(np.zeros(int(len(data_dash)))*3)\n if i != len(element) - 1:\n # time.sleep(dl_kropka)\n output.extend(np.zeros(int(len(data_dot))))\n else:\n continue\n # time.sleep(dl_kreska)\n output.extend(np.zeros(int(len(data_dash))))\n\n # print(output)\n\n wynik=np.asarray(output)\n\n wynik=np.array(wynik).astype('int16')\n\n wav.write(name_file, rate_dash, wynik)\n\n #plik sie nie odtwarza w windowsie ale w audacity jest już wyraźnym szumem XD\n\n dot.close()\n dash.close()",
"def load_audio(file_path):\n # load the audio file in its original sampling rate\n audio_data, sr = librosa.load(file_path, sr=sampling_rate)\n\n # get the common file name\n file_name = file_path.split(\"/\")[-1]\n file_name = file_name.split(\".wav\")[0]\n\n # calculate number of samples in the time duration needed\n num_samples = int(sr*time_duration)\n\n # get the cut-off audio signals and save them\n for i in np.arange(num_audio_files):\n audio_cut_data = cut_sample(audio_data, num_samples)\n file_path=dir_to_save + file_name + \"_\" + str(i+1) + \".wav\"\n save_sample(audio_cut_data, file_path, sr)\n print(f\"generating signal {str(i)}, its length {len(audio_cut_data)} by cutting the original signal\")",
"def wav_to_features(sample_rate, clip_duration_ms, window_size_ms,\n window_stride_ms, feature_bin_count, quantize, preprocess,\n input_wav, output_c_file):\n\n # Start a new TensorFlow session.\n sess = tf.compat.v1.InteractiveSession()\n\n model_settings = models.prepare_model_settings(\n 0, sample_rate, clip_duration_ms, window_size_ms, window_stride_ms,\n feature_bin_count, preprocess)\n audio_processor = input_data.AudioProcessor(None, None, 0, 0, '', 0, 0,\n model_settings, None)\n\n results = audio_processor.get_features_for_wav(input_wav, model_settings,\n sess)\n features = results[0]\n\n variable_base = os.path.splitext(os.path.basename(input_wav).lower())[0]\n\n # Save a C source file containing the feature data as an array.\n with gfile.GFile(output_c_file, 'w') as f:\n f.write('/* File automatically created by\\n')\n f.write(' * tensorflow/examples/speech_commands/wav_to_features.py \\\\\\n')\n f.write(' * --sample_rate=%d \\\\\\n' % sample_rate)\n f.write(' * --clip_duration_ms=%d \\\\\\n' % clip_duration_ms)\n f.write(' * --window_size_ms=%d \\\\\\n' % window_size_ms)\n f.write(' * --window_stride_ms=%d \\\\\\n' % window_stride_ms)\n f.write(' * --feature_bin_count=%d \\\\\\n' % feature_bin_count)\n if quantize:\n f.write(' * --quantize=1 \\\\\\n')\n f.write(' * --preprocess=\"%s\" \\\\\\n' % preprocess)\n f.write(' * --input_wav=\"%s\" \\\\\\n' % input_wav)\n f.write(' * --output_c_file=\"%s\" \\\\\\n' % output_c_file)\n f.write(' */\\n\\n')\n f.write('const int g_%s_width = %d;\\n' %\n (variable_base, model_settings['fingerprint_width']))\n f.write('const int g_%s_height = %d;\\n' %\n (variable_base, model_settings['spectrogram_length']))\n if quantize:\n features_min, features_max = input_data.get_features_range(model_settings)\n f.write('const unsigned char g_%s_data[] = {' % variable_base)\n i = 0\n for value in features.flatten():\n quantized_value = int(\n round(\n (255 * (value - features_min)) / (features_max - features_min)))\n if quantized_value < 0:\n quantized_value = 0\n if quantized_value > 255:\n quantized_value = 255\n if i == 0:\n f.write('\\n ')\n f.write('%d, ' % (quantized_value))\n i = (i + 1) % 10\n else:\n f.write('const float g_%s_data[] = {\\n' % variable_base)\n i = 0\n for value in features.flatten():\n if i == 0:\n f.write('\\n ')\n f.write('%f, ' % value)\n i = (i + 1) % 10\n f.write('\\n};\\n')",
"def play_sound(self, wavel, waver=None, samplefreq=44100, postduration = 0.05, attns=[20., 20.],\n isi=1.0, reps=1, storedata=True): \n if storedata:\n runmode = RZ5D_Run\n else:\n runmode = RZ5D_Preview\n # create an output waveform that has the stimulus repeated reps times with the selected ISI\n samplefreq = self.out_sampleFreq\n stimulus_duration = isi*reps # len(wavel)*samplefreq + postduration\n pts_per_rep = int(float(isi)*samplefreq)\n if wavel.shape[0] < pts_per_rep:\n wavel = np.concatenate((wavel, np.zeros(pts_per_rep-wavel.shape[0])), axis=0)\n wavel = np.tile(wavel, reps)\n if waver is not None:\n if waver.shape[0] < pts_per_rep:\n waver = np.concatenate((waver, np.zeros(pts_per_rep-waver.shape[0])), axis=0)\n waver = np.tile(waver, reps)\n \n \n # different approaches to playing out the sound for different hardware configuration:\n \n if 'pyaudio' in self.hardware:\n self.audio = pyaudio.PyAudio()\n chunk = 1024\n FORMAT = pyaudio.paFloat32\n CHANNELS = 2\n RATE = samplefreq\n if self.debugFlag:\n print (\"pysounds.play_sound: samplefreq: %f\" % (RATE))\n self.stream = self.audio.open(format = FORMAT,\n channels = CHANNELS,\n rate = int(RATE),\n output = True,\n input = True,\n frames_per_buffer = chunk)\n # play stream\n #print self.stream\n wave = np.zeros(2*len(wavel))\n if len(wavel) != len(waver):\n print (\"pysounds.play_sound: waves not matched in length: %d vs. %d (L,R)\" % (len(wavel), len(waver)))\n return\n (waver, clipr) = self.clip(waver, 20.0)\n (wavel, clipl) = self.clip(wavel, 20.0)\n wave[0::2] = waver \n wave[1::2] = wavel # order chosen so matches etymotic earphones on my macbookpro.\n postdur = int(float(postduration*self.in_sampleFreq))\n #rwave = read_array(len(wavel)+postdur, CHANNELS)\n write_array(self.stream, wave)\n self.stream.stop_stream()\n self.stream.close()\n self.audio.terminate()\n #self.ch1 = rwave[0::2]\n #self.ch2 = rwave[1::2]\n return\n \n if 'PA5' in self.hardware:\n self.setAttens(atten_left=attns)\n \n if 'RZ5D' in self.hardware:\n swcount = -1\n self.present_stim(wavel, isi, reps, runmode) # this sets up the NI card as well.\n deadmantimer = isi*(reps+1)+0.5 # just in case it doesn't stop as it should\n start_time = time.time() # deadman start time\n# print('done? ', self.RZ5D.GetTargetVal(self.RZ5D_ParTags['SweepDone']))\n while self.RZ5D.GetTargetVal(self.RZ5D_ParTags['SweepDone']) == 0: # wait for zSwDone to be set\n cs = self.RZ5D.GetTargetVal(self.RZ5D_ParTags['CurrentSweep'])\n if cs > swcount:\n # print(' Sweep = %d' % cs)\n swcount = swcount + 1\n time.sleep(0.1)\n elapsed_time = time.time() - start_time # elapsed time is in seconds\n if elapsed_time > deadmantimer:\n print('DeadmanExit')\n break\n self.RZ5D.SetSysMode(RZ5D_Standby) # was (RZ5D_Standby)\n self.task.stop()\n self.setAttens(atten_left=120)\n # self.present_stim(wavel, waver)\n \n if 'RP21' in self.hardware:\n # now take in some acquisition...\n a = self.RP21.ClearCOF()\n if a <= 0:\n print (\"pystim.playSound: Unable to clear RP2.1\")\n return\n a = self.RP21.LoadCOFsf(\"C:\\pyStartle\\startle2.rco\", self.samp_cof_flag)\n if a > 0 and self.debugFlag:\n print (\"pystim.playSound: Connected to TDT RP2.1 and startle2.rco is loaded\")\n else:\n print (\"pystim.playSound: Error loading startle2.rco?, error = %d\" % (a))\n return\n self.trueFreq = self.RP21.GetSFreq()\n Ndata = np.ceil(0.5*(stimulus_duration)*self.trueFreq)\n self.RP21.SetTagVal('REC_Size', Ndata) # old version using serbuf -- with\n # new version using SerialBuf, can't set data size - it is fixed.\n # however, old version could not read the data size tag value, so\n # could not determine when buffer was full/acquisition was done.\n \n if 'PA5' in self.hardware:\n self.setAttens(atten_left=attns[0], atten_right=attns[1]) # set equal, but not at minimum...\n\n self.task.start() # start the NI AO task\n \n a = self.RP21.Run() # start the RP2.1 processor...\n a = self.RP21.SoftTrg(1) # and trigger it. RP2.1 will in turn start the ni card\n \n while not self.task.isTaskDone(): # wait for AO to finish?\n self.RP21.Halt()\n if 'NIDAQ' in self.hardware:\n self.task.stop()\n return\n \n if 'PA5' in self.hardware:\n self.setAttens() # attenuators down (there is noise otherwise)\n # read the data...\n curindex1 = self.RP21.GetTagVal('Index1')\n curindex2 = self.RP21.GetTagVal('Index2')\n \n while(curindex1 < Ndata or curindex2 < Ndata): # wait for input data to be sampled\n self.RP21.Halt()\n return\n curindex1 = self.RP21.GetTagVal('Index1')\n curindex2 = self.RP21.GetTagVal('Index2')\n \n self.ch2 = self.RP21.ReadTagV('Data_out2', 0, Ndata)\n # ch2 = ch2 - mean(ch2[1:int(Ndata/20)]) # baseline: first 5% of trace\n self.ch1 = self.RP21.ReadTagV('Data_out1', 0, Ndata)\n self.RP21.Halt()",
"def train_hmm_n_times(file_id, nstates, trials=20, iter=1000, pickle=True,\n phase=2, cond=None, units=constants.XY, parallel=True):\n\n def pick_lowest_bic(models):\n hmm, d, bic = None, None, 9999999999\n for hmm_ in models:\n # hmm_ = HMM(hmm__, training_data=hmm__.obs, hmm_type=\"ghmm\")\n if hmm_.bic < bic:\n bic = hmm_.bic\n hmm = hmm_\n if hmm is None:\n raise Exception(\"There are no valid models, WTF?!?\")\n # return None\n # Hmm = HMM(hmm, training_data=d, hmm_type=\"hmmlearn\")\n # print_n_flush( \"New hmm and data (%s)\" % d)\n # Hmm.from_R(hmm)\n return hmm\n\n\n import GHmmWrapper\n\n reload(GHmmWrapper)\n from GHmmWrapper import get_range_of_multiple_traj\n # reload(ExperimentalData)\n from leaparticulator.data.functions import fromFile\n from leaparticulator.data.hmm import reconstruct_hmm\n from LeapTheremin import palmToAmpAndFreq, palmToAmpAndMel\n\n\n responses, test_results, responses_p, test_p, images = fromFile(id_to_log(file_id))\n multivariate = False\n reverse_cond = cond in (\"2r\", \"1r\")\n interval = 1\n pick_var = 0\n if reverse_cond:\n interval = -1\n pick_var = 1\n\n if cond in (\"2\", \"2r\"):\n if phase == 1:\n multivariate = True\n else:\n if phase == 2:\n multivariate = True\n\n formatData = None\n\n if multivariate:\n if units == constants.XY:\n formatData = lambda r, phase: [[frame.get_stabilized_position()[:2][::interval] for frame in rr] for rr in\n r[\"127.0.0.1\"][str(phase)].values()]\n elif units == constants.AMP_AND_FREQ:\n # -interval, because amp_and_freq returns y,x and not x,y. \n formatData = lambda r, phase: [\n [palmToAmpAndFreq(frame.get_stabilized_position())[::-interval] for frame in rr] for rr in\n r[\"127.0.0.1\"][str(phase)].values()]\n elif units == constants.AMP_AND_MEL:\n # -interval, because amp_and_freq returns y,x and not x,y. \n formatData = lambda r, phase: [\n [palmToAmpAndMel(frame.get_stabilized_position())[::-interval] for frame in rr] for rr in\n r[\"127.0.0.1\"][str(phase)].values()]\n else:\n if units == constants.XY:\n formatData = lambda r, phase: [[frame.get_stabilized_position()[pick_var] for frame in rr] for rr in\n r[\"127.0.0.1\"][str(phase)].values()]\n elif units == constants.AMP_AND_FREQ:\n # -interval, because amp_and_freq returns y,x and not x,y. \n formatData = lambda r, phase: [\n [palmToAmpAndFreq(frame.get_stabilized_position())[::-interval][pick_var] for frame in rr] for rr in\n r[\"127.0.0.1\"][str(phase)].values()]\n elif units == constants.AMP_AND_MEL:\n # -interval, because amp_and_freq returns y,x and not x,y. \n formatData = lambda r, phase: [\n [palmToAmpAndMel(frame.get_stabilized_position())[::-interval][pick_var] for frame in rr] for rr in\n r[\"127.0.0.1\"][str(phase)].values()]\n\n data = formatData(responses, phase) + formatData(responses_p, phase)\n print_n_flush(\"Sample data: %s\" % data[0][:3])\n # data = [[frame.get_stabilized_position()[:2] for frame in response] for response in data]\n # data.append()\n lview = client = None\n if parallel:\n from IPython.parallel import Client\n\n client = Client(profile=\"default\")\n from types import FunctionType\n from IPython.utils.pickleutil import can_map\n\n can_map.pop(FunctionType, None)\n import pickle\n from IPython.kernel.zmq import serialize\n\n serialize.pickle = pickle\n\n client[:].use_dill()\n reg = \"import copy_reg, ExperimentalData;copy_reg.constructor(ExperimentalData.reconstruct_hmm);copy_reg.pickle(ExperimentalData.HMM, ExperimentalData.reduce_hmm, ExperimentalData.reconstruct_hmm)\"\n # print type(data), type(data[0])\n\n client[:].execute(reg)\n # print data \n\n lview = client.load_balanced_view() # default load-balanced\n\n lview.block = True\n to_return = []\n range_x, range_y = get_range_of_multiple_traj(data)\n\n for n in nstates:\n print_n_flush(\"Doing %d state models...\" % n)\n args = [(data, n, range_x, range_y)] * trials\n\n if not parallel:\n hmms = map(fn, args) #[(data,nstates,range_x,range_y)] * trials)\n else:\n hmms = lview.map(fn, args) #[(data,nstates,range_x,range_y)] * trials)\n hmms = [reconstruct_hmm(matrix, data) for matrix, data in hmms]\n\n to_return.append(pick_lowest_bic(hmms))\n\n if pickle:\n pickle_results(to_return, nstates, trials, iter, id_to_log(file_id), phase, units=units)\n return to_return",
"def run(self):\n from audio import AudioRecorder\n\n loader = SingleInputLoader(128)\n recorder = AudioRecorder()\n\n with tf.Session() as sess:\n model = create_default_model('record', 128, loader)\n model.restore(sess, 'train/best-weights')\n \n while True:\n print('Listening...')\n audio, width = recorder.record()\n audio = np.array(audio)\n\n #calculate the power spectrum of the audio and of sampling rate 16000 \n input_ = preprocess.calculatePowerSpectrogram(audio, 16000)\n\n loader.set_input(input_)\n [decoded] = model.step(sess, loss=False, update=False, decode=True)\n\n decoded_ids_paths = [Test.extract_decoded_ids(path) for path in decoded]\n \n for decoded_path in decoded_ids_paths:\n decoded_ids = next(decoded_path)\n decoded_str = self.idsToSentence(decoded_ids)\n print('Predicted: {}'.format(decoded_str))",
"def load_wave_np(self):\r\n self.wavenpfileName, _ = QtWidgets.QFileDialog.getOpenFileName(self, 'Single File', 'M:/tnw/ist/do/projects/Neurophotonics/Brinkslab/Data',\"(*.npy)\") \r\n \r\n temp_loaded_container = np.load(self.wavenpfileName, allow_pickle=True)\r\n\r\n try:\r\n self.uiDaq_sample_rate = int(os.path.split(self.wavenpfileName)[1][20:-4])\r\n except:\r\n try:\r\n self.uiDaq_sample_rate = int(float(self.wavenpfileName[self.wavenpfileName.find('sr_')+3:-4])) #Locate sr_ in the file name to get sampling rate.\r\n except:\r\n self.uiDaq_sample_rate = 50000\r\n \r\n if self.uiDaq_sample_rate != int(self.SamplingRateTextbox.value()):\r\n print('ERROR: Sampling rates is different!')\r\n \r\n self.PlotDataItem_dict = {}\r\n self.waveform_data_dict = {}\r\n \r\n for i in range(len(temp_loaded_container)):\r\n \r\n channel_keyword = temp_loaded_container[i]['Sepcification']\r\n \r\n if channel_keyword != \"galvos_X_contour\" and channel_keyword != \"galvos_Y_contour\":\r\n self.waveform_data_dict[channel_keyword] = temp_loaded_container[i]['Waveform']\r\n self.generate_graphy(channel_keyword, self.waveform_data_dict[channel_keyword])",
"def readAudioData(self, shouldProcess):\n if shouldProcess:\n return gatherData(self.playlists) \n else:\n return pd.read_pickle(\"data/audioDF.pkl\")",
"def analyzeWAV(inputFile):\n data, fs, nbits = audiolab.wavread(inputFile)\n samplingRate = fs\n return [data, samplingRate]",
"def create_melspectrogram_dataset(label_folder='electronic_music/Trance_label/Train/', save_folder='song_mel_label_data',\n sr=44100, n_mels=128, n_fft=2048, hop_length=512, song_duration=180.0,\n create_data=False):\n if create_data:\n # get list of all labels\n os.makedirs(save_folder, exist_ok=True)\n labels = [path for path in os.listdir(label_folder) if os.path.isdir(label_folder + path)]\n\n # iterate through all lables, songs and find mel spectrogram\n for label in labels:\n print('{} \\n'.format(label))\n label_path = os.path.join(label_folder, label)\n label_songs = os.listdir(label_path)\n\n for song in label_songs:\n print(song)\n song_path = os.path.join(label_path, song)\n\n # Create mel spectrogram for song_duration in the middle of the song and convert it to the log scale\n audio = MP3(song_path)\n audio_lenght = int(audio.info.length)\n audio_middle = (audio_lenght - int(song_duration))/2\n y, sr = librosa.load(song_path, sr=sr, offset=audio_middle, duration=song_duration)\n S = librosa.feature.melspectrogram(y, sr=sr, n_mels=n_mels, n_fft=n_fft, hop_length=hop_length)\n log_S = librosa.logamplitude(S, ref_power=1.0)\n data = (label, log_S, song)\n\n # Save each song\n save_name = label + '_%%-%%_' + song\n with open(os.path.join(save_folder, save_name), 'wb') as fp:\n dill.dump(data, fp)",
"def Run_Extraction(self):\n\n # print the summary of the model\n print(self.ww_model.model.summary(), end=\"\\n\\n\", flush=True)\n # open an audio data stream\n self.stream = self.p.open(format=self.format, channels=self.channels,\n rate=self.rate, input=True,\n frames_per_buffer=self.chunk)\n\n act_count = 0\n\n while True:\n\n # reads chunk of audio\n data = self.stream.read(self.chunk)\n\n # appends chunk to frame list\n self.frames.append(data)\n\n # begins making predictions after the first\n # 2.5 seconds of audio is read\n if (len(self.frames) > 19):\n\n prediction = self.Prediction()\n\n # if the predictions is larger than the defined confidence\n if (prediction > self.confidence):\n\n # increment the activation counter\n act_count += 1\n\n # if the number of consecutive activations\n # exceeds the activation value\n if(act_count >= self.activations):\n\n # print out \"nimbus\"\n print(\" << nimbus >> \", end=\" \", flush=True)\n\n # reset activation count\n act_count = 0\n\n self.False_Activation()\n\n self.frames = self.frames[18:]\n\n if (self.false_counts >= self.false_count):\n self.Retrain_Model()\n\n # if prediction falls below the confidence level\n else:\n\n # reset the activation count\n act_count = 0\n\n if not(self.print_pred):\n # output nothing to the stream\n print(\"-\", end=\"\", flush=True)\n\n # window the data stream\n self.frames = self.frames[1:]",
"def test():\r\n le = preprocessing.LabelEncoder()\r\n le.fit([\"Door Knocking\",\"Shower Running\",\"Toilet Flushing\",\"Vacuum Cleaning\",\"Keyboard Typing\", # encode class labels as numeric id values\r\n \"Coughing\",\"Neutral\"])\r\n \r\n if torch.cuda.is_available():\r\n device = \"cuda:0\"\r\n use_cuda = True\r\n else:\r\n device = \"cpu\"\r\n use_cuda = False\r\n \r\n myModel, start_epoch, train_hist = loadCheckpoint(31, use_cuda)\r\n \r\n #myModel = myModel.double()\r\n myModel = myModel.to(device, dtype=torch.double)\r\n next(myModel.parameters()).device # Check that it is on Cuda\r\n \r\n file_names = []\r\n class_ids = []\r\n max_s = 1\r\n sr = 44100 \r\n for entry in os.scandir(\"test wavs/\"): # for each folder corresponding to a class in dataset\r\n class_id = entry.name # get class numeric id according to label encoder\r\n relative_path = \"test wavs/\"+entry.name # get path location of data sample for loading audio\r\n file_names.append(relative_path) # append to list\r\n class_ids.append(class_id)\r\n\r\n max_s = 1\r\n sr = 44100\r\n X_test = [] \r\n for i in range(len(file_names)):\r\n audio = LoadAudio.load(file_names[i]) # load audio file\r\n audio = LoadAudio.resample(audio, sr) # resample audio\r\n audio = LoadAudio.mono(audio) # make audio stereo\r\n audio = LoadAudio.resize(audio, max_s) # resize audio \r\n sgram = LoadAudio.spectrogram(audio, n_mels=128, n_fft=1024, hop_len=None) # create spectrogram \r\n sgram = LoadAudio.hpssSpectrograms(audio,sgram)\r\n sgram_tensor = torch.tensor(sgram)\r\n X_test.append(sgram_tensor)\r\n\r\n pred = np.array([])\r\n for i in range(len(X_test)):\r\n inputs = X_test[i]\r\n # Normalize the inputs\r\n inputs_m, inputs_s = inputs.mean(), inputs.std()\r\n inputs = (inputs - inputs_m) / inputs_s\r\n inputs = inputs.unsqueeze(0)\r\n inputs = inputs.double()\r\n \r\n # Get predictions\r\n outputs = myModel(inputs)\r\n\r\n # Get the predicted class with the highest score\r\n _, predicted = torch.max(outputs.data, 1)\r\n \r\n pred = np.append(pred, le.inverse_transform(predicted.detach().cpu().numpy()))\r\n \r\n\r\n df = pd.DataFrame(pred, columns=[\"Predicted\"]) # save predictions as a datafram column\r\n df['True'] = class_ids # save true class as a datafram column\r\n print(\"\\nPredicted:\", df)"
] | [
"0.5846201",
"0.57938576",
"0.57843184",
"0.57702315",
"0.56909996",
"0.5670173",
"0.5635622",
"0.5609626",
"0.558032",
"0.5578267",
"0.5574669",
"0.55611974",
"0.55452716",
"0.5497656",
"0.54688525",
"0.5414804",
"0.5396864",
"0.5391886",
"0.538346",
"0.53793347",
"0.5373648",
"0.5367429",
"0.5354466",
"0.5351543",
"0.5343669",
"0.5338197",
"0.5334389",
"0.5332506",
"0.53205055",
"0.5301677"
] | 0.74534905 | 0 |
Ensure class methods' signatures. | def test_class_method() -> None:
assert inspect.signature(lmp.tknzr._bpe.BPETknzr.add_CLI_args) == inspect.signature(BaseTknzr.add_CLI_args) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_implemented_functions(_class):\n mandatory_functions_to_implement = [('generate', 2), ('__init__', 6)]\n implemented_class_function_names = get_implemented_class_functions(_class)\n for function in mandatory_functions_to_implement:\n function_name = function[0]\n number_function_mandatory_params = function[1]\n # check if the method is implemented in the class\n if function_name not in implemented_class_function_names:\n logger.error(f\"Method {function_name} not implemented in class {_class.__name__}\")\n raise SystemExit(0)\n ref_function = getattr(_class, function_name)\n # check if the method is expecting the mandatory number of arguments\n if not len(inspect.getfullargspec(ref_function).args) == number_function_mandatory_params:\n logger.error(\n f\"Method {function_name} implemented in class {_class.__name__} \"\n f\"is not expecting {number_function_mandatory_params} passed arguments\")\n raise SystemExit(0)",
"def signature(cls):\n raise NotImplementedError(\"%s.signature()\" % cls)",
"def check_signature(cls, name, bases, attr):\n check_bases = []\n for base in bases:\n all_bases = base.__mro__\n for i in all_bases:\n if (\n i is not object\n and \"sign_check\" in i.__dict__\n and i not in check_bases\n ):\n check_bases.append(i)\n\n for methodName in attr:\n f = attr[methodName]\n if not isinstance(f, types.FunctionType):\n continue\n\n for baseClass in check_bases:\n try:\n fBase = getattr(baseClass, methodName)\n if isinstance(fBase, types.FunctionType):\n if not inspect.signature(f) == inspect.signature(fBase):\n debtcollector.deprecate(\n \"{}.{} Method signature are not identical with base class {}\".format(\n name, methodName, baseClass\n ),\n category=UserWarning,\n )\n break\n else:\n debtcollector.deprecate(\n \"{}.{} Method is not FunctionType in base class {}\".format(\n name, methodName, baseClass\n ),\n category=UserWarning,\n )\n break\n except AttributeError:\n # This method was not defined in this base class,\n # So just go to the next base class.\n continue",
"def test_class_methods(self):\n\n x = BaseTransformer()\n\n h.test_object_method(obj=x, expected_method=\"fit\", msg=\"fit\")\n\n h.test_object_method(obj=x, expected_method=\"transform\", msg=\"transform\")\n\n h.test_object_method(\n obj=x, expected_method=\"columns_set_or_check\", msg=\"columns_set_or_check\"\n )\n\n h.test_object_method(\n obj=x, expected_method=\"columns_check\", msg=\"columns_check\"\n )",
"def test_missing_args_class_method(cls, test, x, y, z=3): # noqa: D213, D407",
"def test_missing_args_class_method(cls, test, x, y, _, z=3): # noqa: D213, D407",
"def check_class_definition(cls):\n super().check_class_definition()\n\n if not cls.objects_arg:\n cls.definition_error('Must provide \"objects_arg\" attribute.')\n\n func_parameters = inspect.signature(arg.s()(cls.func).func).parameters\n if cls.objects_arg not in func_parameters:\n cls.definition_error(\n f'objects_arg \"{cls.objects_arg}\" not an argument to callable.'\n f' Possible parameters={func_parameters}'\n )",
"def validate_class_args(self, **kwargs):\n pass",
"def check_class_definition(cls):\n super().check_class_definition()\n\n if not cls.object_arg:\n cls.definition_error('Must provide \"object_arg\" attribute.')\n\n func_parameters = inspect.signature(arg.s()(cls.func).func).parameters\n if cls.object_arg not in func_parameters:\n cls.definition_error(\n f'object_arg \"{cls.object_arg}\" not an argument to callable.'\n f' Possible parameters={func_parameters}'\n )",
"def test_required_methods(self):",
"def test_required_methods(self):\n\n required_methods = ('__init__', 'load')\n\n for method in required_methods:\n self.assertIn(method, dir(DatasetLoader_Jakob2019))",
"def doesmatch(TheClass):\n import sys \n\n if sys.version_info.major < 3:\n return None\n S = TheClass.__base__\n for meth_name in dir(TheClass):\n if not hasattr(S, meth_name):\n continue\n meth = getattr(TheClass, meth_name)\n if(callable(meth)):\n try:\n match = (inspect.signature(meth) == inspect.signature(getattr(S,meth_name)))\n #assert(match)\n if not match:\n print(meth_name, ' : does not match parent signature', inspect.signature(meth) , inspect.signature(getattr(S,meth_name)))\n except ValueError:\n pass",
"def check_params(self):\n raise NotImplementedError",
"def _check_h(self, f, *args):\r\n\r\n msg = 'Handler \"[%s:%s] %s()\" must belong to a new style class '\r\n msg += 'and can\\'t be a static method'\r\n msg = msg % (f.func_code.co_filename, str(f.func_code.co_firstlineno),\r\n f.func_name)\r\n try:\r\n if not self._check(args[0]):\r\n raise TypeError(msg)\r\n except IndexError:\r\n raise TypeError(msg)",
"def _check(self, f, *args):\r\n\r\n msg = 'Event \"[%s:%s] %s()\" must belong to a new style class '\r\n msg += 'and can\\'t be a static method'\r\n msg = msg % (f.func_code.co_filename, str(f.func_code.co_firstlineno),\r\n f.func_name)\r\n try:\r\n if isinstance(args[0], (types.FunctionType, types.LambdaType,\r\n types.ClassType, types.InstanceType)):\r\n raise TypeError(msg)\r\n if not hasattr(args[0], '__dict__'):\r\n if not hasattr(args[0], '__slots__'):\r\n raise TypeError(msg)\r\n except IndexError:\r\n raise TypeError(msg)",
"def check_method_signature(abstract, concrete):\n abstract_spec = inspect.getargspec(abstract)\n concrete_spec = inspect.getargspec(concrete)\n #print \"args\",abstract_spec.args[1:], concrete_spec.args[1:]\n if (abstract_spec.args[1:] != concrete_spec.args[1:]\n or abstract_spec.defaults != concrete_spec.defaults\n or (abstract_spec.varargs is None) != (concrete_spec.varargs is None)\n or (abstract_spec.keywords is None) != (concrete_spec.keywords is None)\n ):\n raise NotImplementedError(\"%s.%s%s differs from %s.%s%s\"\n %(concrete.im_class.__name__,\n concrete.__name__,\n formatargs(concrete_spec),\n abstract.im_class.__name__,\n abstract.__name__,\n formatargs(abstract_spec),\n )\n )",
"def add_invariant_checks(cls: ClassT) -> None:\n # Candidates for the decoration as list of (name, dir() value)\n init_name_func = None # type: Optional[Tuple[str, Callable[..., None]]]\n names_funcs = [] # type: List[Tuple[str, Callable[..., None]]]\n names_properties = [] # type: List[Tuple[str, property]]\n\n # Filter out entries in the directory which are certainly not candidates for decoration.\n for name in dir(cls):\n value = getattr(cls, name)\n\n # __new__ is a special class method (though not marked properly with @classmethod!).\n # We need to ignore __repr__ to prevent endless loops when generating error messages.\n # __getattribute__, __setattr__ and __delattr__ are too invasive and alter the state of the instance.\n # Hence we don't consider them \"public\".\n if name in [\"__new__\", \"__repr__\", \"__getattribute__\", \"__setattr__\", \"__delattr__\"]:\n continue\n\n if name == \"__init__\":\n assert inspect.isfunction(value) or isinstance(value, _SLOT_WRAPPER_TYPE), \\\n \"Expected __init__ to be either a function or a slot wrapper, but got: {}\".format(\n type(value))\n\n init_name_func = (name, value)\n continue\n\n if not inspect.isfunction(value) and not isinstance(value, _SLOT_WRAPPER_TYPE) and \\\n not isinstance(value, property):\n continue\n\n # Ignore \"protected\"/\"private\" methods\n if name.startswith(\"_\") and not (name.startswith(\"__\") and name.endswith(\"__\")):\n continue\n\n if inspect.isfunction(value) or isinstance(value, _SLOT_WRAPPER_TYPE):\n # Ignore class methods\n if getattr(value, \"__self__\", None) is cls:\n continue\n\n # Ignore static methods\n # See https://stackoverflow.com/questions/14187973/python3-check-if-method-is-static\n bound_value = inspect.getattr_static(cls, name, None)\n if isinstance(bound_value, staticmethod):\n continue\n\n names_funcs.append((name, value))\n\n elif isinstance(value, property):\n names_properties.append((name, value))\n\n else:\n raise NotImplementedError(\"Unhandled directory entry of class {} for {}: {}\".format(cls, name, value))\n\n if init_name_func:\n name, func = init_name_func\n\n # We have to distinguish this special case which is used by named\n # tuples and possibly other optimized data structures.\n # In those cases, we have to wrap __new__ instead of __init__.\n if func == object.__init__ and hasattr(cls, \"__new__\"):\n new_func = getattr(cls, \"__new__\")\n setattr(cls, \"__new__\", _decorate_new_with_invariants(new_func))\n else:\n wrapper = _decorate_with_invariants(func=func, is_init=True)\n setattr(cls, name, wrapper)\n\n for name, func in names_funcs:\n wrapper = _decorate_with_invariants(func=func, is_init=False)\n setattr(cls, name, wrapper)\n\n for name, prop in names_properties:\n new_prop = property(\n fget=_decorate_with_invariants(func=prop.fget, is_init=False) if prop.fget else None,\n fset=_decorate_with_invariants(func=prop.fset, is_init=False) if prop.fset else None,\n fdel=_decorate_with_invariants(func=prop.fdel, is_init=False) if prop.fdel else None,\n doc=prop.__doc__)\n setattr(cls, name, new_prop)",
"def _verify_matching_signatures(implementation, dispatcher):\n implementation_spec = getargspec(implementation)\n dispatcher_spec = getargspec(dispatcher)\n\n if (implementation_spec.args != dispatcher_spec.args or\n implementation_spec.varargs != dispatcher_spec.varargs or\n implementation_spec.keywords != dispatcher_spec.keywords or\n (bool(implementation_spec.defaults) !=\n bool(dispatcher_spec.defaults)) or\n (implementation_spec.defaults is not None and\n len(implementation_spec.defaults) !=\n len(dispatcher_spec.defaults))):\n raise RuntimeError('implementation and dispatcher for %s have '\n 'different function signatures' % implementation)",
"def check_class_definition(cls):\n if not cls.callable:\n cls.definition_error('Must provide \"callable\" attribute.')\n\n if not re.match(r'\\w+', cls.name):\n cls.definition_error('Must provide alphanumeric \"name\" attribute.')\n\n if not re.match(r'\\w+', cls.app_label):\n cls.definition_error(\n 'Must provide alphanumeric \"app_label\" attribute.'\n )\n\n if len(cls.permission_codename) > 100:\n cls.definition_error(\n f'The permission_codename \"{cls.permission_codename}\"'\n ' exceeds 100 characters. Try making a shorter action name'\n ' or manually overridding the permission_codename attribute.'\n )",
"def check_signature(func, args_list):\n refsig = MethodSignature(func.__name__, args_list)\n actualsig = MethodSignature.from_callable(func)\n if refsig != actualsig:\n raise MethodSignatureMismatch(\n \"Expected {0}, not {1}\".format(refsig, actualsig)\n )\n return True",
"def check_parameters_match(func, *, cls=None, where):\n from numpydoc.validate import validate\n\n name = _func_name(func, cls)\n skip = not name.startswith(\"mne.\") or any(\n re.match(d, name) for d in docstring_ignores\n )\n if skip:\n return list()\n if cls is not None:\n for subclass, ignores in subclass_name_ignores:\n if issubclass(cls, subclass) and name.split(\".\")[-1] in ignores:\n return list()\n incorrect = [\n f\"{where} : {name} : {err[0]} : {err[1]}\"\n for err in validate(name)[\"errors\"]\n if err[0] not in error_ignores\n and (name.split(\".\")[-1], err[0]) not in error_ignores_specific\n ]\n # Add a check that all public functions and methods that have \"verbose\"\n # set the default verbose=None\n if cls is None:\n mod_or_class = importlib.import_module(\".\".join(name.split(\".\")[:-1]))\n else:\n mod_or_class = importlib.import_module(\".\".join(name.split(\".\")[:-2]))\n mod_or_class = getattr(mod_or_class, cls.__name__.split(\".\")[-1])\n callable_ = getattr(mod_or_class, name.split(\".\")[-1])\n try:\n sig = inspect.signature(callable_)\n except ValueError as exc:\n msg = str(exc)\n # E ValueError: no signature found for builtin type\n # <class 'mne.forward.forward.Forward'>\n if inspect.isclass(callable_) and \"no signature found for builtin type\" in msg:\n pass\n else:\n raise\n else:\n if \"verbose\" in sig.parameters:\n verbose_default = sig.parameters[\"verbose\"].default\n if verbose_default is not None:\n incorrect += [\n f\"{name} : verbose default is not None, \" f\"got: {verbose_default}\"\n ]\n return incorrect",
"def validate(cls, **kwargs: Any) -> None: # pragma no cover",
"def _check_e(self, class_, event):\r\n\r\n if not self._check(class_):\r\n msg = 'Event \"%s.%s()\" must belong to a new style class '\r\n msg += 'and can\\'t be a static method'\r\n raise TypeError(msg % (str(class_), str(event)))",
"def test_method_creation():\n my_method = SGMethod(\"Test\")\n \n assert my_method.name == \"Test\"\n assert len(my_method.params) == 0\n assert my_method.return_type == None",
"def signature_check(self, fn):\n if not isinstance(fn, type) or not issubclass(fn, forms.BaseForm):\n raise ValueError(\"validate_form only apply to Django Forms\")\n self.required_arguments_names = set()\n self.optional_arguments_names = {\"data\"}\n self.accepted_argument_names = {\"data\"}",
"def test_raises_when_method_incorrectly_declared(self):\n\n with pytest.raises(exceptions.APIImplementationError):\n\n class API(platform.PlatformAPI):\n def get_teams(self, a):\n pass",
"def assert_dataclass_signature_match(\n cls: \"class_type\", # type: ignore\n datacls: \"dataclass\", # type: ignore\n ignore_args: Optional[List[str]] = None,\n remap_args: Optional[Dict[str, str]] = None,\n):\n class_sig = inspect.signature(cls.__init__)\n\n class_params = dict(**class_sig.parameters)\n class_params.pop(\"self\")\n\n dataclass_sig = inspect.signature(datacls)\n\n dataclass_params = dict(**dataclass_sig.parameters)\n dataclass_params.pop(\"_target_\", None)\n\n class_params = set(class_params.keys()) # type: ignore\n dataclass_params = set(dataclass_params.keys()) # type: ignore\n\n if remap_args is not None:\n for original_arg, new_arg in remap_args.items():\n if original_arg in class_params:\n class_params.remove(original_arg) # type: ignore\n class_params.add(new_arg) # type: ignore\n logging.info(f\"Remapped {original_arg} -> {new_arg} in {cls.__name__}\")\n\n if original_arg in dataclass_params:\n dataclass_params.remove(original_arg) # type: ignore\n dataclass_params.add(new_arg) # type: ignore\n logging.info(f\"Remapped {original_arg} -> {new_arg} in {datacls.__name__}\")\n\n if ignore_args is not None:\n ignore_args = set(ignore_args) # type: ignore\n\n class_params = class_params - ignore_args # type: ignore\n dataclass_params = dataclass_params - ignore_args # type: ignore\n logging.info(f\"Removing ignored arguments - {ignore_args}\")\n\n intersection: Set[type] = set.intersection(class_params, dataclass_params) # type: ignore\n subset_cls = class_params - intersection # type: ignore\n subset_datacls = dataclass_params - intersection # type: ignore\n\n if (len(class_params) != len(dataclass_params)) or len(subset_cls) > 0 or len(subset_datacls) > 0:\n logging.error(f\"Class {cls.__name__} arguments do not match \" f\"Dataclass {datacls.__name__}!\")\n\n if len(subset_cls) > 0:\n logging.error(f\"Class {cls.__name__} has additional arguments :\\n\" f\"{subset_cls}\")\n\n if len(subset_datacls):\n logging.error(f\"Dataclass {datacls.__name__} has additional arguments :\\n{subset_datacls}\")\n\n return False, subset_cls, subset_datacls\n return True, None, None",
"def check_arguments(antns, lcls) -> None:\n for (arg, cls) in antns.items():\n if arg != 'return':\n if not isinstance(lcls[arg], cls):\n raise ValueError(\n (\"type({arg}) must be {cls}\\n\" +\n \"type({arg}) = {typ}\").format(\n arg=arg, cls=cls, typ=type(lcls[arg])))\n return None",
"def test_instance_method():\n assert hasattr(ResRNNBlock, '__init__')\n assert inspect.signature(ResRNNBlock.__init__) == Signature(\n parameters=[\n Parameter(\n name='self',\n kind=Parameter.POSITIONAL_OR_KEYWORD,\n default=Parameter.empty,\n ),\n Parameter(\n name='d_hid',\n kind=Parameter.KEYWORD_ONLY,\n annotation=int,\n default=Parameter.empty,\n ),\n Parameter(\n name='n_hid_lyr',\n kind=Parameter.KEYWORD_ONLY,\n annotation=int,\n default=Parameter.empty,\n ),\n Parameter(\n name='p_hid',\n kind=Parameter.KEYWORD_ONLY,\n annotation=float,\n default=Parameter.empty,\n ),\n Parameter(\n name='kwargs',\n kind=Parameter.VAR_KEYWORD,\n annotation=Optional[Dict],\n ),\n ],\n return_annotation=Signature.empty,\n )\n\n assert hasattr(ResRNNBlock, 'forward')\n assert inspect.signature(ResRNNBlock.forward) == Signature(\n parameters=[\n Parameter(\n name='self',\n kind=Parameter.POSITIONAL_OR_KEYWORD,\n default=Parameter.empty,\n ),\n Parameter(\n name='batch_tk_reps',\n kind=Parameter.POSITIONAL_OR_KEYWORD,\n annotation=torch.Tensor,\n default=Parameter.empty,\n ),\n ],\n return_annotation=torch.Tensor,\n )\n\n assert hasattr(ResRNNModel, '__init__')\n assert inspect.signature(ResRNNModel.__init__) == Signature(\n parameters=[\n Parameter(\n name='self',\n kind=Parameter.POSITIONAL_OR_KEYWORD,\n default=Parameter.empty,\n ),\n Parameter(\n name='d_emb',\n kind=Parameter.KEYWORD_ONLY,\n annotation=int,\n default=Parameter.empty,\n ),\n Parameter(\n name='d_hid',\n kind=Parameter.KEYWORD_ONLY,\n annotation=int,\n default=Parameter.empty,\n ),\n Parameter(\n name='n_hid_lyr',\n kind=Parameter.KEYWORD_ONLY,\n annotation=int,\n default=Parameter.empty,\n ),\n Parameter(\n name='n_post_hid_lyr',\n kind=Parameter.KEYWORD_ONLY,\n annotation=int,\n default=Parameter.empty,\n ),\n Parameter(\n name='n_pre_hid_lyr',\n kind=Parameter.KEYWORD_ONLY,\n annotation=int,\n default=Parameter.empty,\n ),\n Parameter(\n name='p_emb',\n kind=Parameter.KEYWORD_ONLY,\n annotation=float,\n default=Parameter.empty,\n ),\n Parameter(\n name='p_hid',\n kind=Parameter.KEYWORD_ONLY,\n annotation=float,\n default=Parameter.empty,\n ),\n Parameter(\n name='tknzr',\n kind=Parameter.KEYWORD_ONLY,\n annotation=BaseTknzr,\n default=Parameter.empty,\n ),\n Parameter(\n name='kwargs',\n kind=Parameter.VAR_KEYWORD,\n annotation=Optional[Dict],\n ),\n ],\n return_annotation=Signature.empty,\n )",
"def test_accepts_correctly_defined_method(self):\n expected = 42\n\n class API(platform.PlatformAPI):\n def __init__(self, base_url, token, org_name, user):\n pass\n\n def get_teams(self, team_names: Optional[List[str]] = None):\n return expected\n\n assert API(None, None, None, None).get_teams() == expected"
] | [
"0.6865951",
"0.6779388",
"0.6498488",
"0.6362184",
"0.62972075",
"0.62814236",
"0.6257724",
"0.62441605",
"0.6198573",
"0.61527693",
"0.6070059",
"0.60478127",
"0.6041627",
"0.6029048",
"0.5988869",
"0.59486306",
"0.5920295",
"0.5917137",
"0.59127486",
"0.587004",
"0.5849536",
"0.5759283",
"0.5748836",
"0.568802",
"0.5669536",
"0.56350267",
"0.5631413",
"0.56281835",
"0.5626255",
"0.5624029"
] | 0.6849367 | 1 |
Instantiate a StartFunction task. | def __init__(self, func=None, **kwargs):
self.func = func if func is not None else self.start_func_default
super(StartFunction, self).__init__(**kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _init_start(self):\n def start(core, args):\n task = ' '.join(args.task) if args.task else ''\n return core.start(task=task)\n\n usage = 'stl start [task]'\n desc = (\n 'make a log that you are starting to work'\n )\n\n subp = self.subparsers.add_parser(\n 'start', usage=usage, description=desc, help=desc)\n\n subp.add_argument(\n 'task', nargs=argparse.REMAINDER,\n help='the task that you are about to start working on')\n\n subp.set_defaults(func=start)",
"def __init__(self, func, task_loader=None, **kwargs):\n self.func = func\n self.task_loader = task_loader\n super(Function, self).__init__(**kwargs)",
"def create_task():",
"def start(func: Callable, scheduler: abc.Scheduler = None) -> ObservableBase:\n from ..operators.observable.start import start\n return start(func, scheduler)",
"def make_task(self):\n return Task()",
"def from_function(\n cls,\n task: ty.Callable,\n cleanup: ty.Callable=None,\n provides=tuple(),\n depends_on=tuple(),\n submit_to='thread',\n parallel=True,\n changing_inputs=False):\n if not len(provides) and len(depends_on):\n raise ValueError(\"Job must provide or depend on something\")\n self = type('Job' + plarx.random_str(10),\n (Job,),\n dict(provides=provides, depends_on=depends_on,\n submit_to=submit_to, parallel=parallel,\n changing_inputs=changing_inputs))\n self.task = MethodType(task, self)\n if self.cleanup is not None:\n self.cleanup = MethodType(cleanup, self)",
"def __init__(self,\n afunc: Callable[..., Awaitable[None]],\n interval: Union[int, float],\n args: tuple = (),\n kwargs: dict = {}):\n self.afunc = afunc\n self.args = args\n self.kwargs = kwargs\n self.interval = interval\n self.is_started = False\n self._task: Optional[asyncio.Future] = None",
"def schedule(self, hz: float, coroutine_function, priority, task_id, *args, **kwargs):\n assert coroutine_function is not None, \"coroutine function must not be none\"\n task = ScheduledTask(self, hz, coroutine_function, priority, task_id, args, kwargs)\n task.start()\n return task",
"def __init__(self, name, func=None):\n self.func = func\n self.name = name\n self.greenlet = greenlet.greenlet(self._entry, Sched.sched_greenlet)\n self.stopcallbacks = set()\n self.wait = None\n self.throwex = None\n\n Sched.active_tasks.append(self)\n Sched.tasks.add(self)",
"def __init__(self, new_task_name=''):\r\n self._handle = lib_importer.task_handle(0)\r\n\r\n cfunc = lib_importer.windll.DAQmxCreateTask\r\n if cfunc.argtypes is None:\r\n with cfunc.arglock:\r\n if cfunc.argtypes is None:\r\n cfunc.argtypes = [\r\n ctypes_byte_str,\r\n ctypes.POINTER(lib_importer.task_handle)]\r\n\r\n error_code = cfunc(\r\n new_task_name, ctypes.byref(self._handle))\r\n check_for_error(error_code)\r\n\r\n self._initialize(self._handle)",
"def run(self):\n task_func = getattr(self, self.task_data.get('task_type'))\n task_obj = task_func()\n return task_obj",
"def taskwrap(fn):\n coroutine = asyncio.coroutine(fn)\n\n @functools.wraps(fn)\n def create_task(*args, **kwargs):\n logger.debug('Create task %s', fn.__name__)\n loop = asyncio.get_event_loop()\n task = asyncio.async(coroutine(*args, **kwargs))\n task.add_done_callback(task_died)\n return task\n return create_task",
"def start_async(function_async: Callable) -> ObservableBase:\n from ..operators.observable.startasync import start_async\n return start_async(function_async)",
"def __init__(self, an_function: callable):\n print(f\"Instantiating a FalseCeleryApp for {an_function.__name__}.\")\n self.an_function = an_function",
"def start(self):\n if self._start is not None:\n raise ValueError, \"task %s already started\" % self._name\n self._start = 1\n self.run()",
"def task(self, *args, **options):\n\n def inner_create_task_cls(**options):\n\n def _create_task_cls(fun):\n options[\"app\"] = self\n options.setdefault(\"accept_magic_kwargs\", False)\n base = options.pop(\"base\", None) or self.Task\n\n @wraps(fun, assigned=(\"__module__\", \"__name__\"))\n def run(self, *args, **kwargs):\n return fun(*args, **kwargs)\n\n # Save the argspec for this task so we can recognize\n # which default task kwargs we're going to pass to it later.\n # (this happens in celery.utils.fun_takes_kwargs)\n run.argspec = getargspec(fun)\n\n cls_dict = dict(options, run=run,\n __module__=fun.__module__,\n __doc__=fun.__doc__)\n T = type(fun.__name__, (base, ), cls_dict)()\n return registry.tasks[T.name] # global instance.\n\n return _create_task_cls\n\n if len(args) == 1 and callable(args[0]):\n return inner_create_task_cls(**options)(*args)\n return inner_create_task_cls(**options)",
"def __init__(self,\n function: Callable):\n\n self._function = function",
"def task(\n self, name=None, extend=False, once=True, depends=None, desc=None,\n **vars\n ):\n def wrapper(fn):\n if name is not None:\n _name = name\n else:\n _name = fn.__name__\n\n entries = self._tasks.setdefault(_name, [])\n if len(entries) and not extend:\n raise Error(\"Task already defined: {0}\".format(_name))\n\n newtask = Task(self, fn, once, depends, desc, vars)\n entries.append(newtask)\n\n return fn\n return wrapper",
"def make_tasker(func):\n def anonFunc(*args, **kwdargs):\n class anonTask(Task):\n def execute(self):\n self.logger.debug(\"Executing fn %s\" % func)\n try:\n val = func(*args, **kwdargs)\n\n self.logger.debug(\"Done executing fn %s\" % func)\n return val\n\n except Exception as e:\n # Log error message and re-raise exception.\n self.logger.error(\"fn %s raised exception: %s\" % (\n func, str(e)))\n raise e\n\n return anonTask()\n return anonFunc",
"def task():\n pass",
"def task():\n pass",
"def task(self):\n return import_path_to_callable(self.func)",
"def exec(cls, *args, **kwargs):\n task = cls(*args, **kwargs)\n task.run()\n return task",
"def create_task(self, coro):\n task = self.loop.create_task(coro)\n return task",
"def add_task(name, func, help, is_default=False):\n cmd = click.Command(name=name, callback=func, help=help)\n cli.add_command(cmd)\n\n if is_default:\n # Store all functions here without name.\n DEFAULT_TASKS_KEY.append(func)\n\n return cli",
"def start_func_default(self, activation):\n activation.prepare()\n activation.done()\n return activation",
"def start_import_task(clientRequestToken=None, name=None, importUrl=None):\n pass",
"def __init__(self, callable_, time=1):\n Function.__init__(self) # callable_ could go here\n self.time = time\n self.callable = callable_",
"def task_init(self, param1):\n raise NotImplementedError",
"def task(self, callable, name=None):\n if name is None:\n name = callable.__name__\n if self.name:\n name = '%s.%s' % (self.name, name)\n if name in self.tasks:\n raise ValueError('task %r conflicts with existing task' % name)\n self.tasks[name] = callable\n return callable"
] | [
"0.64261806",
"0.6352809",
"0.6259454",
"0.61059666",
"0.6070468",
"0.6019398",
"0.5951755",
"0.593934",
"0.5893391",
"0.5785626",
"0.57718456",
"0.5657987",
"0.56442446",
"0.5641955",
"0.5632375",
"0.5630811",
"0.56288636",
"0.5609423",
"0.56023276",
"0.55829614",
"0.55829614",
"0.5553013",
"0.55490965",
"0.5546672",
"0.5506841",
"0.54955417",
"0.54848325",
"0.5468116",
"0.5445403",
"0.54324406"
] | 0.6456831 | 0 |
Instantiate a Function task. | def __init__(self, func, task_loader=None, **kwargs):
self.func = func
self.task_loader = task_loader
super(Function, self).__init__(**kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_task():",
"def from_function(\n cls,\n task: ty.Callable,\n cleanup: ty.Callable=None,\n provides=tuple(),\n depends_on=tuple(),\n submit_to='thread',\n parallel=True,\n changing_inputs=False):\n if not len(provides) and len(depends_on):\n raise ValueError(\"Job must provide or depend on something\")\n self = type('Job' + plarx.random_str(10),\n (Job,),\n dict(provides=provides, depends_on=depends_on,\n submit_to=submit_to, parallel=parallel,\n changing_inputs=changing_inputs))\n self.task = MethodType(task, self)\n if self.cleanup is not None:\n self.cleanup = MethodType(cleanup, self)",
"def make_task(self):\n return Task()",
"def make_tasker(func):\n def anonFunc(*args, **kwdargs):\n class anonTask(Task):\n def execute(self):\n self.logger.debug(\"Executing fn %s\" % func)\n try:\n val = func(*args, **kwdargs)\n\n self.logger.debug(\"Done executing fn %s\" % func)\n return val\n\n except Exception as e:\n # Log error message and re-raise exception.\n self.logger.error(\"fn %s raised exception: %s\" % (\n func, str(e)))\n raise e\n\n return anonTask()\n return anonFunc",
"def task(self, *args, **options):\n\n def inner_create_task_cls(**options):\n\n def _create_task_cls(fun):\n options[\"app\"] = self\n options.setdefault(\"accept_magic_kwargs\", False)\n base = options.pop(\"base\", None) or self.Task\n\n @wraps(fun, assigned=(\"__module__\", \"__name__\"))\n def run(self, *args, **kwargs):\n return fun(*args, **kwargs)\n\n # Save the argspec for this task so we can recognize\n # which default task kwargs we're going to pass to it later.\n # (this happens in celery.utils.fun_takes_kwargs)\n run.argspec = getargspec(fun)\n\n cls_dict = dict(options, run=run,\n __module__=fun.__module__,\n __doc__=fun.__doc__)\n T = type(fun.__name__, (base, ), cls_dict)()\n return registry.tasks[T.name] # global instance.\n\n return _create_task_cls\n\n if len(args) == 1 and callable(args[0]):\n return inner_create_task_cls(**options)(*args)\n return inner_create_task_cls(**options)",
"def create_task(self, name, value):\n pass",
"def factory(self, taskname, *args, **kwargs):\n import etc\n return str(apply(etc.tasks[taskname], args, kwargs))",
"def __init__(self, name, func=None):\n self.func = func\n self.name = name\n self.greenlet = greenlet.greenlet(self._entry, Sched.sched_greenlet)\n self.stopcallbacks = set()\n self.wait = None\n self.throwex = None\n\n Sched.active_tasks.append(self)\n Sched.tasks.add(self)",
"def run(self):\n task_func = getattr(self, self.task_data.get('task_type'))\n task_obj = task_func()\n return task_obj",
"def task(self, callable, name=None):\n if name is None:\n name = callable.__name__\n if self.name:\n name = '%s.%s' % (self.name, name)\n if name in self.tasks:\n raise ValueError('task %r conflicts with existing task' % name)\n self.tasks[name] = callable\n return callable",
"def __init__(self, new_task_name=''):\r\n self._handle = lib_importer.task_handle(0)\r\n\r\n cfunc = lib_importer.windll.DAQmxCreateTask\r\n if cfunc.argtypes is None:\r\n with cfunc.arglock:\r\n if cfunc.argtypes is None:\r\n cfunc.argtypes = [\r\n ctypes_byte_str,\r\n ctypes.POINTER(lib_importer.task_handle)]\r\n\r\n error_code = cfunc(\r\n new_task_name, ctypes.byref(self._handle))\r\n check_for_error(error_code)\r\n\r\n self._initialize(self._handle)",
"def task():\n pass",
"def task():\n pass",
"def _create_task(self, body, *, task_cls=Task):\n return task_cls(self, body)",
"def __init__(self,\n function: Callable):\n\n self._function = function",
"def task(\n self, name=None, extend=False, once=True, depends=None, desc=None,\n **vars\n ):\n def wrapper(fn):\n if name is not None:\n _name = name\n else:\n _name = fn.__name__\n\n entries = self._tasks.setdefault(_name, [])\n if len(entries) and not extend:\n raise Error(\"Task already defined: {0}\".format(_name))\n\n newtask = Task(self, fn, once, depends, desc, vars)\n entries.append(newtask)\n\n return fn\n return wrapper",
"def add_task(self, func, *args, **kargs):\r\n self.tasks.put((func, args, kargs))",
"def taskwrap(fn):\n coroutine = asyncio.coroutine(fn)\n\n @functools.wraps(fn)\n def create_task(*args, **kwargs):\n logger.debug('Create task %s', fn.__name__)\n loop = asyncio.get_event_loop()\n task = asyncio.async(coroutine(*args, **kwargs))\n task.add_done_callback(task_died)\n return task\n return create_task",
"def add_task(self, func, *args, **kargs):\n self.tasks.put((func, args, kargs))",
"def add_task(self, func, *args, **kargs):\n self.tasks.put((func, args, kargs))",
"def add_task(self, func, *args, **kargs):\n self.tasks.put((func, args, kargs))",
"def add_task(self, func, *args, **kargs):\n self.tasks.put((func, args, kargs))",
"def add_task(self, func, *args, **kargs):\n self.tasks.put((func, args, kargs))",
"def add_task(self, func, *args, **kargs):\n self.tasks.put((func, args, kargs))",
"def add_task(self, func, *args, **kargs):\n self.tasks.put((func, args, kargs))",
"def add_task(self, func, *args, **kargs):\n self.tasks.put((func, args, kargs))",
"def add_task(self, func, *args, **kargs):\n self.tasks.put((func, args, kargs))",
"def add_task(self, func, *args, **kargs):\n self.tasks.put((func, args, kargs))",
"def __init__(self, callable_, time=1):\n Function.__init__(self) # callable_ could go here\n self.time = time\n self.callable = callable_",
"def __init__(self, an_function: callable):\n print(f\"Instantiating a FalseCeleryApp for {an_function.__name__}.\")\n self.an_function = an_function"
] | [
"0.7264041",
"0.6980909",
"0.68889225",
"0.6819239",
"0.6739775",
"0.65774894",
"0.65699",
"0.6558215",
"0.6550279",
"0.6496677",
"0.6468632",
"0.64679205",
"0.64679205",
"0.6448362",
"0.644499",
"0.6404104",
"0.64028794",
"0.63484013",
"0.63439476",
"0.63439476",
"0.63439476",
"0.63439476",
"0.63439476",
"0.63439476",
"0.63439476",
"0.63439476",
"0.63439476",
"0.63439476",
"0.6318198",
"0.631688"
] | 0.722987 | 1 |
Summary for every series | def base_summary(series: pd.Series) -> dict:
summary = {
"frequencies": series.value_counts().to_dict(),
"n_records": series.shape[0],
"memory_size": series.memory_usage(index=True, deep=True),
"dtype": series.dtype,
"types": series.map(lambda x: type(x).__name__).value_counts().to_dict(),
}
return summary | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _summary(self, name=None):\n if name is None:\n if len(self._tracker_dict.keys()) > 1:\n dataframes = []\n for (_name, tracker) in self._tracker_dict.items():\n summary_df = tracker.series.summary()\n summary_df = summary_df.rename_axis(self.PHASE)\n summary_df[self.SERIES] = _name\n dataframes.append(summary_df.reset_index())\n df = pd.concat(dataframes, ignore_index=True, sort=False)\n return df.set_index([self.SERIES, self.PHASE])\n name = self.MAIN\n return self._tracker(name).series.summary()",
"def summary(self):\n raise NotImplementedError",
"def summarize(self):\n return self.df.describe()",
"def get_forecast_summary(self):\n sum = {\"prior_var\":[], \"post_var\":[], \"percent_reduction\":[]}\n for forecast in self.prior_forecast.keys():\n pr = self.prior_forecast[forecast]\n pt = self.posterior_forecast[forecast]\n ur = 100.0 * (1.0 - (pt/pr))\n sum[\"prior_var\"].append(pr)\n sum[\"post_var\"].append(pt)\n sum[\"percent_reduction\"].append(ur)\n return pd.DataFrame(sum,index=self.prior_forecast.keys())",
"def summary(self):\n\n # only possible once we've fit a model with statsmodels\n check_is_fitted(self, \"statistics_\")\n sdf = pd.DataFrame(self.statistics_)\n sdf.rename(columns={\"lambda_\": \"lambda\"}, inplace=True)\n return sdf",
"def summary(self, i):\n return self.__summaries[i]",
"def get_summary_stats(self):\r\n n = len(self.results)\r\n\r\n if n == 0:\r\n mean = None\r\n stdev = None\r\n\r\n elif n == 1:\r\n mean = numpy.mean(self.results)\r\n stdev = None\r\n\r\n else:\r\n mean = numpy.mean(self.results)\r\n stdev = numpy.std(self.results)\r\n\r\n sum_stats = {'n': n, 'mean': mean, 'stdev': stdev}\r\n\r\n return sum_stats",
"def _printSummary(self):\n\t\t### COP OUT\n\t\tif self.params['background'] is True:\n\t\t\tself.stats['count'] += 1\n\t\t\treturn\n\n\t\t### THIS NEEDS TO BECOME MUCH MORE GENERAL, e.g. Peaks\n\t\ttdiff = time.time()-self.stats['startseries']\n\t\tif not self.params['continue'] or tdiff > 0.1:\n\t\t\tcount = self.stats['count']\n\t\t\t#if(count != self.stats['lastcount']):\n\t\t\tsys.stderr.write(\"\\n\\tSUMMARY: \"+self.functionname+\"\\n\")\n\t\t\tself._printLine()\n\t\t\tsys.stderr.write(\"\\tTIME: \\t\"+apDisplay.timeString(tdiff)+\"\\n\")\n\t\t\tself.stats['timesum'] = self.stats['timesum'] + tdiff\n\t\t\tself.stats['timesumsq'] = self.stats['timesumsq'] + (tdiff**2)\n\t\t\ttimesum = self.stats['timesum']\n\t\t\ttimesumsq = self.stats['timesumsq']\n\t\t\tif(count > 1):\n\t\t\t\ttimeavg = float(timesum)/float(count)\n\t\t\t\ttimestdev = math.sqrt(float(count*timesumsq - timesum**2) / float(count*(count-1)))\n\t\t\t\ttimeremain = (float(timeavg)+float(timestdev))*self.stats['seriesleft']\n\t\t\t\tsys.stderr.write(\"\\tAVG TIME: \\t\"+apDisplay.timeString(timeavg,timestdev)+\"\\n\")\n\t\t\t\t#print \"\\t(- TOTAL:\",apDisplay.timeString(timesum),\" -)\"\n\t\t\t\tif(self.stats['seriesleft'] > 0):\n\t\t\t\t\tsys.stderr.write(\"\\t(- REMAINING TIME: \"+apDisplay.timeString(timeremain)+\" for \"\n\t\t\t\t\t\t+str(self.stats['seriesleft'])+\" series -)\\n\")\n\t\t\t#print \"\\tMEM: \",(mem.active()-startmem)/1024,\"M (\",(mem.active()-startmem)/(1024*count),\"M)\"\n\t\t\tself.stats['count'] += 1\n\t\t\tself._printLine()",
"def summarize(self, data):\n\n return self.summary(data).flatten()",
"def station_stats(df):",
"def compute_summary(self, weather_data, ssh):\n\n for i, field in enumerate(self.weather_fields):\n weather_field = weather_data[...,i]\n\n self.summary[field] = (weather_field.mean(), weather_field.std())\n\n self.summary['ssh'] = (ssh.mean(), ssh.std())",
"def _summary(obj):\n return obj.summary",
"def doSummary(self):\n for name in self.stockList:\n tempVolume=0.\n for dateStr in self.listOfDates:\n rawTradeDataPath = FileNames.BinRTTradesDir + '/' + dateStr + '/' + name + '_trades.binRT'\n tradeReader = TAQTradesReader(rawTradeDataPath)\n tempVolume=tempVolume+np.nansum(tradeReader._s)/10000.0 # divide 10000 because otherwise the sum could exceed the range of int32\n self.dict[name]=tempVolume",
"def get_summary(self, df):\n results_df = pd.DataFrame({'Energy kWh': self.get_all_periods(df).sum()})\n results_df['Prices $/kWh'] = self.deliveryPrice + self.get_rates()\n results_df['Value $'] = results_df['Energy kWh'] * results_df['Prices $/kWh']\n return(results_df)",
"def list_of_scalars_summary(self, tag_value_pairs, step):\n for tag, value in tag_value_pairs:\n self.writer.add_scalar(tag, value, step)",
"def summary(self) -> Dict[str, Dict[str, float]]:\n vals: Dict[str, List[float]] = defaultdict(list)\n if not self.steps: # pragma: no cover\n return {}\n\n for timing_dict in self._timings:\n for step in self.steps:\n if step in timing_dict:\n vals[step].append(timing_dict[step])\n summary = {}\n for step in self.steps:\n if vals[step]:\n summary[step] = {\n \"cnt\": len(vals[step]),\n \"sum\": sum(vals[step]),\n \"min\": min(vals[step]),\n \"max\": max(vals[step]),\n \"avg\": sum(vals[step]) / len(vals[step]),\n }\n return summary",
"def summary_stats(self):\n capital_gains = self.df['values'].iloc[-1].sum() - self.tc.starting_cash\n total_return = capital_gains / self.tc.starting_cash\n days_invested = (self.df.index[-1] - self.df.index[0]).days\n annualized_returns = (total_return + 1) ** (365 / days_invested) - 1\n annualized_volatility = self.df['returns'].std() * (252 ** 0.5)\n sharpe = annualized_returns / annualized_volatility\n num_trades = self.trades.shape[0]\n stats = pd.Series(\n data=[capital_gains, total_return, annualized_returns, annualized_volatility, sharpe, num_trades],\n index=['Capital Gains', 'Total Return', 'Annualized Return', 'Annualized Volatility', 'Sharpe Ratio',\n 'Number of Trades']\n )\n return stats",
"def summary(df, city, month, day):\n time_stats(df, month, day)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df, city)\n display_data(df)",
"def summaries(\n cls, obj: pystac.Collection, add_if_missing: bool = False\n ) -> \"SummariesSarExtension\":\n cls.validate_has_extension(obj, add_if_missing)\n return SummariesSarExtension(obj)",
"def summarise(self):\n self.summary = az.summary(self.trace, var_names=[\"~chol\"], round_to=2)\n print(self.summary)\n return self.summary",
"def summary(self):\n\n self.model.summary(print_fn=lambda x: logging.info(x))",
"def summary(self, summary: str):\n return self.swag({\n 'summary': summary\n })",
"def summary(self):\n self.model.summary()",
"def summary(self, **kwargs):\n raise ValueError(\"This function is not available in lazy results evaluation as it would \"\n \"require all pairwise tests to be performed.\")",
"def build_summary(self):\n for k, v in self.metrics.items():\n tf.summary.scalar(k, v)\n \n self.summary_op = tf.summary.merge_all()",
"def printSummary(self):\n pass",
"def print_sum_series(n=7,zero_val=0,one_val=1):\n for i in range(n):\n print(sum_series(i,zero_val,one_val))",
"def sum_values(self):\n raise NotImplementedError",
"def get_graph_summary(self):\n\n pass",
"def handle_series_over(self, stats):\n #############################\n #\n #\n # YOUR CODE HERE\n #\n #\n #############################\n print(\"Series ended, these are the stats:\")\n print(str(stats))"
] | [
"0.7089863",
"0.65536255",
"0.653386",
"0.6407142",
"0.63080764",
"0.62465453",
"0.6217058",
"0.6183639",
"0.61406666",
"0.60740024",
"0.6058445",
"0.6054211",
"0.60406715",
"0.6035013",
"0.6030795",
"0.60064787",
"0.6001196",
"0.5956691",
"0.59167403",
"0.5907618",
"0.59047884",
"0.5876076",
"0.58749723",
"0.58673054",
"0.585996",
"0.5841434",
"0.58388513",
"0.58326524",
"0.5817307",
"0.581389"
] | 0.7123433 | 0 |
Creates a vector out of a string. Gets a string (e.g. Book), splits it into and returns a vector with all possible ngrams/features. | def create_vector(string):
vec = {}
words = string.split()
for word in words:
if len(word) <= NGRAM_SIZE:
add(vec, word)
else:
for i in range(len(word) - NGRAM_SIZE + 1):
add(vec, word[i : i + NGRAM_SIZE])
return vec | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_terms_from_string(s):\n u = s\n return u.split()",
"def ngramas(n, string):\n\n ngrams = []\n i = 0\n while i + n < len(string):\n ngrams.append(string[i:i + n])\n i += 1\n\n return ngrams",
"def from_string(string):\n return Sentence(string.split(\" \"))",
"def parse(string):\n doc = nlp(string)\n return [str(n) for n in doc.noun_chunks]",
"def tokenize(self, input_string: str) -> List[str]:",
"def str2vec(_str):\n vec = np.zeros(4 * 43)\n for i, ch in enumerate(_str):\n offset = i*43 + (ord(ch)-ord('0'))\n vec[offset] = 1\n return vec",
"def _ngrams(self, string_):\n def find_ngrams(input_list, n):\n return zip(*[input_list[i:] for i in range(n)])\n\n ngrams = []\n tokens = string_.split()\n\n for size in range(1, self._ngram_range + 1):\n tuples = find_ngrams(tokens, size)\n concatenated = [\"_\".join(tuple_) for tuple_ in tuples]\n ngrams.extend(concatenated)\n\n return \" \".join(ngrams)",
"def ngrams(name_string, n=3):\n\n string = re.sub(r'[,-./]|\\sBD', r'', name_string)\n n_grams = zip(*[string[i:] for i in range(n)])\n return [''.join(n_gram) for n_gram in n_grams]",
"def _parseVec(self, str):\r\n\t\tvec = []\r\n\t\tsplt = str.split()\r\n\t\tfor i in range(0,len(splt)):\r\n\t\t\tvec.append(self._parseNumber(splt[i]))\r\n\t\treturn vec",
"def training(string):\n print(\"Training...\")\n vec = create_vector(string)\n print(\"Selecting features...\")\n feature_list = select_features(vec)\n print(\"Done!\")\n return feature_list",
"def str_to_nmslib_vect(tokenizer, text):\n lst = unique(get_token_ids(tokenizer, text))\n lst.sort()\n return toks_to_str(lst)",
"def to_vector(text, model, idf, is_tokenized=False):\n if not is_tokenized: text= text.split() # splits the text by space and returns a list of words\n vec = np.zeros(300) # creates an empty vector of 300 dimensions\n for word in text: # iterates over the sentence\n if (word in model) & (word in idf): # checks if the word is both in the word embedding and the tf-idf model\n vec += model[word]*idf[word] # adds every word embedding to the vector\n if np.linalg.norm(vec) > 0:\n return vec / np.linalg.norm(vec) # divides the vector by their normal\n else:\n return vec",
"def convert_str_list_to_vector(self, string_list: Tuple[str]) -> numpy.ndarray:\n if len(string_list) != 4:\n logger.error(\"convert_str_list_to_vector got a too short or long string list: {}. We return a zero-vector!\",\n string_list)\n return numpy.zeros(shape=(self.word2vec_embedding_size +\n self.word2vec_embedding_size / 2 +\n self.word2vec_embedding_size / 3 +\n self.word2vec_embedding_size / 4,),\n dtype=\"float32\"\n )\n ret = numpy.zeros(shape=(0,), dtype=\"float32\")\n for i, token in enumerate(string_list):\n logger.trace(\"Process the {}. token \\\"{}\\\"\", (i + 1), string_list[i])\n ret = numpy.concatenate([ret,\n numpy.average(\n numpy.reshape(\n self.word2vec_dict.get(string_list[i],\n numpy.negative(\n numpy.ones(\n shape=(self.word2vec_embedding_size,),\n dtype=\"float32\")\n )),\n (int(self.word2vec_embedding_size / (i + 1)), (i + 1))\n ),\n axis=1)],\n axis=0)\n return ret",
"def get_genres_vector(genres_str: str):\n genres = genres_str.split(\";\")\n genres_vector = sum([one_hot.transform([[genre]]).toarray() for genre in genres])[0]\n return genres_vector",
"def extract(self, document):\n f_num = len(self.feature_list)\n feature_vector = np.zeros((f_num,))\n words = document.split()\n for i in xrange(len(words)):\n for n in self.ns:\n ngram = self.try_get_ngram(words, n, i)\n if ngram and ngram in self.ngrams:\n self.add_ngram(feature_vector, ngram)\n return feature_vector",
"def create_model_uniform(text: str) -> List[str]:\n return str.split(text)",
"def get_ngrams(s, ngram_range=1):\n # tokens = s.split()\n # return filter(lambda token: len(token)>1, tokens)\n # return bigrams(s.split()) # NLTK bigrams method\n words = s.split()\n return [' '.join(words[i:i+ngram_range]) for i in range(len(words)-1)]",
"def __init__(self, word_string, feature_table):\n self.word_string = word_string\n self.feature_table = feature_table\n self.segments = [Segment(char, self.feature_table) for char in self.word_string]",
"def build_input_vector(sample_text):\n return count_chars(sample_text.lower())",
"def create_feature_map(string, features):\n fmap = {}\n vec = create_vector(string)\n\n for ngram in features:\n if ngram in vec:\n fmap[ngram] = vec[ngram]\n\n return fmap",
"def embed(text: str) -> np.ndarray:\n n = nlp(text)\n return n.vector",
"def bigram_representation(data):\r\n vec = CountVectorizer(ngram_range=(1,2))\r\n vec = vec.fit(data)\r\n return vec",
"def text_to_vecs(self):\n # convert word strings into word vectors\n sent_vec = []\n for w in self.sentence:\n if w in self.word_vectors.getVocab():\n sent_vec.append( self.word_vectors.getWordVectors()[w] )\n else:\n sent_vec.append( self.word_vectors.getOOVWordVector() )\n \n assert(len(self.sentence) == len(sent_vec)) \n self.sent_vec = sent_vec",
"def word2ngrams(text, n=3):\n return [text[i:i+n] for i in range(len(text)-n+1)]",
"def to_vector(texto,model,idf):\n tokens = normalizer(texto).split() # splits the text by space and returns a list of words\n vec = np.zeros(300) # creates an empty vector of 300 dimensions\n for word in tokens: # iterates over the sentence\n if (word in model) & (word in idf): # checks if the word is both in the word embedding and the tf-idf model\n vec += model[word]*idf[word] # adds every word embedding to the vector\n if np.linalg.norm(vec) > 0:\n return vec / np.linalg.norm(vec) # divides the vector by their normal\n else:\n return vec",
"def tokenize(self, string):\n tokens = [tok if tok in self.token_to_ix else self.unk\n for tok in string.split()]\n return [self.bos] + tokens + [self.eos]",
"def parse(self, text):\n return self.dict.txt2vec(text)",
"def stringToAxiom(string):\n sentence = []\n splitted = string.split(\" \")\n for mod in splitted:\n sentence.append(stringToMod(mod.strip()))\n return(sentence)",
"def doc2vec(self, text: str) -> np.array:\n # tfidf_matrix = self.tfidf.transform([text])\n # vectors = []\n # for token in self.tokenize(text):\n # if token in self.word2vec and token in self.feature_names:\n # tfidf_score = tfidf_matrix[0, self.feature_names.index(token)]\n # vectors.append(self.word2vec[token] * tfidf_score)\n vectors = [self.word2vec[token] for token in self.tokenize(text) if token in self.word2vec]\n if not vectors:\n return np.zeros(300)\n return np.mean(vectors, axis=0)",
"def unigram_representation(data):\r\n vec = CountVectorizer()\r\n vec = vec.fit(data)\r\n return vec"
] | [
"0.6692081",
"0.620363",
"0.61938727",
"0.6170458",
"0.6045155",
"0.59495336",
"0.59380984",
"0.592855",
"0.587873",
"0.5878487",
"0.5867894",
"0.5849799",
"0.58483064",
"0.5833884",
"0.58311754",
"0.58128583",
"0.5803685",
"0.57831234",
"0.5774572",
"0.57539713",
"0.5753545",
"0.5696559",
"0.56721747",
"0.5664085",
"0.56590295",
"0.56485546",
"0.56328875",
"0.56313646",
"0.5617541",
"0.560025"
] | 0.7425697 | 0 |
Calculates the minmax similarity of two vectors. Calculates minmax similarity of two vectors vec_x and vec_y. | def minmax(vec_x, vec_y):
minsum = 0
maxsum = 0
for ngram in vec_x:
if ngram in vec_y:
# ngram is in both vectors
minsum += min(vec_x[ngram], vec_y[ngram])
maxsum += max(vec_x[ngram], vec_y[ngram])
else:
# ngram only in vec_x
maxsum += vec_x[ngram]
for ngram in vec_y:
if ngram not in vec_x:
# ngram only in vec_y
maxsum += vec_y[ngram]
if maxsum == 0:
return 0
return float(minsum) / maxsum | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cosine_similarity(vector_x, vector_y):\n if(len(vector_x)!=len(vector_y)):\n raise Exception('Vectors must be the same dimensions')\n \n return 1-np.dot(vector_x,vector_y)/(np.linalg.norm(vector_x)*np.linalg.norm(vector_y))",
"def cosine_similarity(vec_x, vec_y):\n sim_prod = 0.0\n len_x = 0\n len_y = 0\n\n for ngram in vec_x:\n len_x += vec_x[ngram] ** 2\n\n for ngram in vec_y:\n len_y += vec_y[ngram] ** 2\n\n len_x = math.sqrt(len_x)\n len_y = math.sqrt(len_y)\n\n for ngram in vec_x:\n if ngram in vec_y:\n sim_prod += vec_x[ngram] * vec_y[ngram]\n\n return sim_prod / (len_x * len_y)",
"def pairwise_dot_product_similarity(x, y):\n return torch.mm(x, torch.transpose(y, 1, 0))",
"def calc_euclidean_similarity(vec_1, vec_2):\n sim = 0\n vec_1 = vec_1.reshape((vec_1.shape[1],))\n vec_2 = vec_2.reshape((vec_2.shape[1],))\n vec_1_nnz = np.nonzero(vec_1)[0]\n print vec_1_nnz\n # import ipdb; ipdb.set_trace()\n vec_2_nnz = np.nonzero(vec_2)[0]\n print vec_2_nnz\n intersect = set(vec_1_nnz) & set(vec_2_nnz)\n if len(intersect) > 0:\n error_squares = [pow(vec_1[arg] - vec_2[arg], 2) for arg in intersect]\n sim = 1.0 / (1 + np.sqrt(np.sum(error_squares)))\n return sim",
"def _vector_dist(self, vec1, vec2):\r\n return sqrt(sum([(float(v1) - float(v2)) ** 2 for v1, v2 in\r\n zip(vec1, vec2)]))",
"def test_sim(vec_x, vec_y, feature_list, func):\n feature_map_x = create_feature_map(vec_x, feature_list)\n feature_map_y = create_feature_map(vec_y, feature_list)\n\n if func == 0:\n return cosine_similarity(feature_map_x, feature_map_y)\n\n return minmax(feature_map_x, feature_map_y)",
"def cosine_similarity(cls, vec_a, vec_b):\n return np.dot(vec_a, vec_b) / \\\n (np.linalg.norm(vec_a) * np.linalg.norm(vec_b))",
"def overlap_similarity(vect1, vect2, normalize=False, use_bigrams=False):\n overlap = len(set(vect1).intersection(set(vect2)))\n\n if use_bigrams:\n overlap += len(set(bigrams(vect1)).intersection(set(bigrams(vect2))))\n\n if not normalize:\n return overlap\n \n if overlap == 0:\n return 0\n \n return overlap / (math.log10(len(vect1)) + math.log10(len(vect2)))",
"def squaredDistance(vec1, vec2):\n return (distance.euclidean(vec1, vec2))**2",
"def similarity(centroid_a, centroid_b):\n \n vector_a = centroid_a.centroid_vector\n vector_b = centroid_b.centroid_vector\n \n length_a = centroid_a.length\n length_b = centroid_b.length\n \n dotproduct = 0.0\n\n for key, value in vector_a.iteritems():\n if key in vector_b: # if both vectors have the key\n dotproduct += (value * vector_b[key])\n\n return float(dotproduct / (length_a * length_b))",
"def pairwise_euclidean_similarity(x, y):\n s = 2 * torch.mm(x, torch.transpose(y, 1, 0))\n diag_x = torch.sum(x * x, dim=-1)\n diag_x = torch.unsqueeze(diag_x, 0)\n diag_y = torch.reshape(torch.sum(y * y, dim=-1), (1, -1))\n\n return s - diag_x - diag_y",
"def cosine_similarity(vec1, vec2) -> float:\n return np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))",
"def compare_vectors(v1, v2):\n if len(v1) == len(v2):\n distance = 0\n for i in xrange(len(v1)):\n distance += (v1[i] - v2[i]) ** 2\n return distance\n else:\n print \"vector not match in dimensions\"",
"def EuclideanDistanceSq( self, a, b ):\n if not (type(a) == list or type(a) == Vector):\n a = [a]\n if not (type(b) == list or type(a) == Vector):\n b = [b]\n assert len(a) == len(b)\n sqDist = 0\n for x,y in zip(a,b):\n sqDist += (x-y)**2\n return sqDist",
"def minmaxdenorm2d(v, minv0, maxv0, minv1, maxv1):\n v[:, 0] *= maxv0 - minv0\n v[:, 0] += minv0\n v[:, 1] *= maxv1 - minv1\n v[:, 1] += minv1\n return v",
"def compute_similarity(site_a, site_b):\n return np.linalg.norm(site_a - site_b)",
"def mse (vec1, vec2):\n sum = 0.0 #Initializes sum to 0\n count = len(vec1) #Number of total elements in each vector\n for i in range(count):\n sum += (vec2[i]-vec1[i])**2 #Adds the square of the difference between the values at each position in the two vectors\n return sum/count",
"def similarity_function(x, y):\n\n def safe_get(field, row, default_value):\n # Safely get a value from the Row. If the value is None, get the\n # default value.\n return row[field] if row[field] is not None else default_value\n\n # Extract the values for the categorical and continuous features for both\n # the x and y samples. Use an empty string as the default value for missing\n # categorical fields and 0 for the continuous ones.\n x_categorical_features = [safe_get(k, x, \"\") for k in CATEGORICAL_FEATURES]\n x_continuous_features = [safe_get(k, x, 0) for k in CONTINUOUS_FEATURES]\n y_categorical_features = [safe_get(k, y, \"\") for k in CATEGORICAL_FEATURES]\n y_continuous_features = [safe_get(k, y, 0) for k in CONTINUOUS_FEATURES]\n\n # Here a larger distance indicates a poorer match between categorical variables.\n j_d = distance.hamming(x_categorical_features, y_categorical_features)\n j_c = distance.canberra(x_continuous_features, y_continuous_features)\n\n # Take the product of similarities to attain a univariate similarity score.\n # Add a minimal constant to prevent zero values from categorical features.\n # Note: since both the distance function return a Numpy type, we need to\n # call the |item| function to get the underlying Python type. If we don't\n # do that this job will fail when performing KDE due to SPARK-20803 on\n # Spark 2.2.0.\n return abs((j_c + 0.001) * j_d).item()",
"def test_vector_dist(self):\r\n v1 = [1, 4, 2]\r\n v2 = [-1, 12, 4]\r\n\r\n exp = 8.48528137424\r\n obs = self.best._vector_dist(v1, v2)\r\n assert_almost_equal(exp, obs)\r\n\r\n v1 = [1, 2, 100, 4, 2]\r\n v2 = [-1, 12, 4, 12, 99]\r\n\r\n exp = 137.087563258\r\n obs = self.best._vector_dist(v1, v2)\r\n assert_almost_equal(exp, obs)",
"def minimum_distance(object_1, object_2):\n\n # package import\n import numpy as np\n\n # main algorithm\n minimum_distance = 100000\n\n for coord_1 in object_1:\n for coord_2 in object_2:\n distance_btwn_coords = np.linalg.norm(coord_1 - coord_2)\n if distance_btwn_coords == 0:\n minimum_distance = distance_btwn_coords\n return float(minimum_distance)\n elif distance_btwn_coords < minimum_distance:\n minimum_distance = distance_btwn_coords\n\n return float(minimum_distance)",
"def min_scalar_prod(x, y):\n x = sorted(x) # make copies\n y = sorted(y) # to save arguments\n return sum(x[i] * y[-i - 1] for i in range(len(x)))",
"def damerau_levenshtein_similarity(s1, s2):\n max_cost = max(len(s1), len(s2))\n\n if max_cost == 0:\n return 1.0\n\n return 1.0 - float(damerau_levenshtein_distance(s1, s2)) / max_cost",
"def cosine_similarity(self, x, y):\n return np.dot(x, y) / (np.linalg.norm(x) * np.linalg.norm(y))",
"def cosine_similarity(v1, v2):\n return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))",
"def calcCrossMag(v1,v2):\n # Calculate the magnitude of cross product of two vectors\n\n return(abs(np.linalg.norm(np.cross(v1,v2))))",
"def closest_dist(x, y, x_list, y_list):\n points = np.array([x, y]).T\n points_list = np.array([x_list, y_list]).T\n\n dpt0 = points_list[:, 0] - points[:, 0, np.newaxis]\n dpt1 = points_list[:, 1] - points[:, 1, np.newaxis]\n\n return np.argmin((dpt0*dpt0 + dpt1*dpt1), axis=1)",
"def GetDistance(vec1,vec2):\n diff = np.asarray(vec1) - np.asarray(vec2)\n squareDistance = np.dot(diff.T, diff)\n return math.sqrt(squareDistance)",
"def compute_similarity_transform(source_points, target_points):\n assert target_points.shape[0] == source_points.shape[0]\n assert target_points.shape[1] == 3 and source_points.shape[1] == 3\n source_points = source_points.T\n target_points = target_points.T\n mu1 = source_points.mean(axis=1, keepdims=True)\n mu2 = target_points.mean(axis=1, keepdims=True)\n X1 = source_points - mu1\n X2 = target_points - mu2\n var1 = np.sum(X1 ** 2)\n K = X1.dot(X2.T)\n U, _, Vh = np.linalg.svd(K)\n V = Vh.T\n Z = np.eye(U.shape[0])\n Z[-1, -1] *= np.sign(np.linalg.det(U.dot(V.T)))\n R = V.dot(Z.dot(U.T))\n scale = np.trace(R.dot(K)) / var1\n t = mu2 - scale * R.dot(mu1)\n source_points_hat = scale * R.dot(source_points) + t\n source_points_hat = source_points_hat.T\n return source_points_hat",
"def similarity_vec(self, vec1: numpy.ndarray, vec2: numpy.ndarray, metric='cosine') -> float:\n if numpy.count_nonzero(vec1) == 0 or numpy.count_nonzero(vec2) == 0:\n if metric == 'cosine':\n return 0.\n else:\n return 0.\n\n vec1 = vec1.reshape((1, -1))\n vec2 = vec2.reshape((1, -1))\n if metric == 'cosine':\n return (1 - distance.cdist(vec1, vec2, metric=metric).reshape(-1))[0]\n else:\n return distance.cdist(vec1, vec2, metric=metric).reshape(-1)[0]",
"def compare_vectors(word_vector1, word_vector2):\n all_words = list(set(word_vector1).union(set(word_vector2)))\n frequency_dict1 = word_frequencies(word_vector1)\n frequency_dict2 = word_frequencies(word_vector2)\n\n frequency_vector1 = [frequency_dict1.get(word, 0) for word in all_words]\n frequency_vector2 = [frequency_dict2.get(word, 0) for word in all_words]\n\n return similarity(frequency_vector1, frequency_vector2)"
] | [
"0.62996674",
"0.6249005",
"0.6130107",
"0.6097062",
"0.6059761",
"0.6027226",
"0.59534824",
"0.5932027",
"0.59160405",
"0.58726966",
"0.58326",
"0.5826938",
"0.58192986",
"0.579559",
"0.5791049",
"0.57742333",
"0.5760231",
"0.57585263",
"0.57527345",
"0.575017",
"0.5737195",
"0.5716961",
"0.57102805",
"0.5688058",
"0.5655156",
"0.564972",
"0.5643421",
"0.564055",
"0.5622798",
"0.5605551"
] | 0.78629965 | 0 |
Returns a feature list of the vector from the string. Turns a given string into a ngram vector and returns its feature list. | def training(string):
print("Training...")
vec = create_vector(string)
print("Selecting features...")
feature_list = select_features(vec)
print("Done!")
return feature_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_vector(string):\n vec = {}\n words = string.split()\n\n for word in words:\n if len(word) <= NGRAM_SIZE:\n add(vec, word)\n else:\n for i in range(len(word) - NGRAM_SIZE + 1):\n add(vec, word[i : i + NGRAM_SIZE])\n\n return vec",
"def create_feature_map(string, features):\n fmap = {}\n vec = create_vector(string)\n\n for ngram in features:\n if ngram in vec:\n fmap[ngram] = vec[ngram]\n\n return fmap",
"def convert_str_list_to_vector(self, string_list: Tuple[str]) -> numpy.ndarray:\n if len(string_list) != 4:\n logger.error(\"convert_str_list_to_vector got a too short or long string list: {}. We return a zero-vector!\",\n string_list)\n return numpy.zeros(shape=(self.word2vec_embedding_size +\n self.word2vec_embedding_size / 2 +\n self.word2vec_embedding_size / 3 +\n self.word2vec_embedding_size / 4,),\n dtype=\"float32\"\n )\n ret = numpy.zeros(shape=(0,), dtype=\"float32\")\n for i, token in enumerate(string_list):\n logger.trace(\"Process the {}. token \\\"{}\\\"\", (i + 1), string_list[i])\n ret = numpy.concatenate([ret,\n numpy.average(\n numpy.reshape(\n self.word2vec_dict.get(string_list[i],\n numpy.negative(\n numpy.ones(\n shape=(self.word2vec_embedding_size,),\n dtype=\"float32\")\n )),\n (int(self.word2vec_embedding_size / (i + 1)), (i + 1))\n ),\n axis=1)],\n axis=0)\n return ret",
"def word_to_vector_list(self, word, numeric=False, xsampa=False, normalize=True):\n if xsampa:\n word = self.xsampa.convert(word)\n segs = self.word_fts(word, normalize or xsampa)\n if numeric:\n tensor = [x.numeric() for x in segs]\n else:\n tensor = [x.strings() for x in segs]\n return tensor",
"def extract(self, document):\n f_num = len(self.feature_list)\n feature_vector = np.zeros((f_num,))\n words = document.split()\n for i in xrange(len(words)):\n for n in self.ns:\n ngram = self.try_get_ngram(words, n, i)\n if ngram and ngram in self.ngrams:\n self.add_ngram(feature_vector, ngram)\n return feature_vector",
"def _parseVec(self, str):\r\n\t\tvec = []\r\n\t\tsplt = str.split()\r\n\t\tfor i in range(0,len(splt)):\r\n\t\t\tvec.append(self._parseNumber(splt[i]))\r\n\t\treturn vec",
"def _create_feature_vec():\n\tnum_tags = NGRAM_TUPLE[0]\n\tfvec = []\n\tfor _, size in FEATURE_TUPLE:\n\t\tfvec.append(np.zeros((num_tags, size)))\n\n\t# Append tag ngram weights to end\n\tfvec.append(np.zeros((num_tags, num_tags)))\n\treturn fvec",
"def ngrams(name_string, n=3):\n\n string = re.sub(r'[,-./]|\\sBD', r'', name_string)\n n_grams = zip(*[string[i:] for i in range(n)])\n return [''.join(n_gram) for n_gram in n_grams]",
"def __call__(self, string, include_gd=True): # -> \"TokenList\":\r\n self.load(string)\r\n result = []\r\n while True:\r\n try:\r\n result.append(self.nextToken(include_gd))\r\n except:\r\n break\r\n return result",
"def get_ngram_features(train_data, test_data):\n print(\"getting ngram features\")\n ngram_vectorizer = CountVectorizer(ngram_range = (1, 2))\n ngram_vectorizer = ngram_vectorizer.fit(train_data)\n return ngram_vectorizer.transform(train_data), ngram_vectorizer.transform(test_data)",
"def get_seg_features(string):\n seg_feature = []\n\n for word in jieba.cut(string):\n if len(word) == 1:\n seg_feature.append(0)\n else:\n tmp = [2] * len(word)\n tmp[0] = 1\n tmp[-1] = 3\n seg_feature.extend(tmp)\n return seg_feature",
"def _get_seg_features(string):\n seg_feature = []\n for word in jieba.cut(string):\n if len(word) == 1:\n seg_feature.append(0)\n else:\n tmp = [2] * len(word)\n tmp[0] = 1\n tmp[-1] = 3\n seg_feature.extend(tmp)\n return seg_feature",
"def ngramas(n, string):\n\n ngrams = []\n i = 0\n while i + n < len(string):\n ngrams.append(string[i:i + n])\n i += 1\n\n return ngrams",
"def string_features_v1(str):\n N = float(len(str))\n if N==0: return None\n a = len(re.findall(r'/', str))/N\n b = len(re.findall(r'\\.', str))/N\n c = len(re.findall(r'-', str))/N\n d = len(re.findall(r'_', str))/N\n cap = len(re.findall(r'[A-Z]', str))/N\n num = len(re.findall(r'[0-9]', str))/N\n return [log(N), a, b, c, d, num, cap]",
"def __call__(self, string, include_gd=True): # -> \"TokenList\":\r\n self.string = string\r\n return [x for x in self.nextToken(include_gd)]",
"def parse(self, text):\n return self.dict.txt2vec(text)",
"def tokenize(self, input_string: str) -> List[str]:",
"def to_vector(text, model, idf, is_tokenized=False):\n if not is_tokenized: text= text.split() # splits the text by space and returns a list of words\n vec = np.zeros(300) # creates an empty vector of 300 dimensions\n for word in text: # iterates over the sentence\n if (word in model) & (word in idf): # checks if the word is both in the word embedding and the tf-idf model\n vec += model[word]*idf[word] # adds every word embedding to the vector\n if np.linalg.norm(vec) > 0:\n return vec / np.linalg.norm(vec) # divides the vector by their normal\n else:\n return vec",
"def get_genres_vector(genres_str: str):\n genres = genres_str.split(\";\")\n genres_vector = sum([one_hot.transform([[genre]]).toarray() for genre in genres])[0]\n return genres_vector",
"def parse(string):\n doc = nlp(string)\n return [str(n) for n in doc.noun_chunks]",
"def str_to_nmslib_vect(tokenizer, text):\n lst = unique(get_token_ids(tokenizer, text))\n lst.sort()\n return toks_to_str(lst)",
"def getVector(text):\n url = cfg.use_vectoriser\n res = requests.post(url, json={'text': text, 'access_key': cfg.vectoriser_access_key})\n res_dictionary = res.json()\n return res_dictionary['vectors']",
"def extract_vector(inst, neighbor_word_list, _4c_4d_feature, language):\n if language.__eq__(\"English\"):\n sentence = inst.getElementsByTagName('context')[0]\n else:\n sentence = inst.getElementsByTagName('context')[0].getElementsByTagName('target')[0]\n\n x = []\n neighbors = {}\n left_list, right_list = get_left_right_lists(sentence, language)\n\n for word in left_list[-k:]:\n count = neighbors.get(word, 0)\n neighbors[word] = count + 1\n for word in right_list[:k]:\n count = neighbors.get(word, 0)\n neighbors[word] = count + 1\n\n for i in xrange(neighbor_word_list.__len__()):\n n = neighbors.get(neighbor_word_list[i], 0)\n if vector_0_1 and n > 0:\n n = 1\n x.append(n)\n\n for i in xrange(_4c_4d_feature.__len__()):\n n = neighbors.get(_4c_4d_feature[i], 0)\n if vector_0_1 and n > 0:\n n = 1\n x.append(n)\n return x",
"def segment_to_vector(self, seg, normalize=True):\n return self.fts(seg, normalize).strings()",
"def get_ngrams(s, ngram_range=1):\n # tokens = s.split()\n # return filter(lambda token: len(token)>1, tokens)\n # return bigrams(s.split()) # NLTK bigrams method\n words = s.split()\n return [' '.join(words[i:i+ngram_range]) for i in range(len(words)-1)]",
"def readVector(text):\n items = text.split()\n if int(items[0])+1 != len(items):\n raise ValueError(\"Invalid number of items\")\n return [float(v) for v in items[1:]]",
"def _ngrams(self, string_):\n def find_ngrams(input_list, n):\n return zip(*[input_list[i:] for i in range(n)])\n\n ngrams = []\n tokens = string_.split()\n\n for size in range(1, self._ngram_range + 1):\n tuples = find_ngrams(tokens, size)\n concatenated = [\"_\".join(tuple_) for tuple_ in tuples]\n ngrams.extend(concatenated)\n\n return \" \".join(ngrams)",
"def generate_vector(text, tf=None):\n if not _trained:\n print(\"Make sure to train parameterizer first\")\n exit(1)\n if tf is None:\n tf = term_frequency.generate_vector(text)\n vector = []\n for i in range(len(tf)):\n vector.append(tf[i] * _idfs[i])\n return vector",
"def word2vec(self, sentence: str):\n tokens = nltk.word_tokenize(sentence)\n v = [self.word_dict.get(token, 0) for token in tokens]\n return v",
"def transform(self, strings):\n\n logger.debug(\"Converting {} strings into lists of \"\n \"sentences.\".format(len(strings)))\n\n tokenized_strings = []\n for text in strings:\n tokenized_strings.append(text_to_wordlist(text, remove_stopwords=True))\n\n # Pre-allocate a 2D numpy array, for speed\n feature_vecs = np.zeros((len(tokenized_strings), self.num_features),\n dtype=\"float32\")\n\n # Loop through the strings\n for counter, word_list in enumerate(tokenized_strings):\n\n # Call the function (defined above) that makes average feature vectors\n feature_vecs[counter] = self._make_feature_vec(word_list)\n\n # For DEBUG only\n if np.isnan(feature_vecs[counter][0]):\n import ipdb;ipdb.set_trace()\n\n\n return feature_vecs"
] | [
"0.66118634",
"0.64273095",
"0.6327371",
"0.6292743",
"0.62504566",
"0.6237537",
"0.61925775",
"0.61103535",
"0.60202676",
"0.6020088",
"0.5987974",
"0.5959671",
"0.59375215",
"0.59162277",
"0.59138423",
"0.59009033",
"0.58965003",
"0.5834525",
"0.58342415",
"0.5805564",
"0.57944626",
"0.5751324",
"0.5725428",
"0.570712",
"0.57040644",
"0.5691705",
"0.5666668",
"0.5610515",
"0.5574044",
"0.5568376"
] | 0.6722822 | 0 |
Returns a random part of a string. Returns a random part of a string s that has a given length. | def get_random_string(string, length):
words = string.split()
random_part = random.randint(0, len(words) - length)
return "".join(words[random_part : random_part + length]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def my_random_string(string_length=17):\n random = str(uuid.uuid4())\n random = random.upper() \n random = random.replace(\"-\",\"\")\n return random[0:string_length]",
"def random_string(length=8, chars=string.ascii_letters + string.digits):\n return ''.join([chars[random.randint(0, len(chars) - 1)] for i in range(length)])",
"def get_random_string(length: int) -> str:\n return \"\".join(random.choices(string.ascii_letters + string.digits, k=length))",
"def random_string(length, characters=string.ascii_letters + string.digits):\n return \"\".join(random.choice(characters) for i in range(length))",
"def random_string(length=8):\n return \"\".join([random.choice(string.letters + string.digits) for x in range(length)])",
"def getRandomString(length):\n\treturn \"\".join(\n\t\trandom.choice(_RANDOM_STRING_OK_CHARS) for c in range(length))",
"def random_string(length=5):\n possibles = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890'\n return ''.join(random.choice(possibles) for i in range(0, length))",
"def random_string(stringlength=10):\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(stringlength))",
"def random_string(stringLength=10):\n letters = string.ascii_lowercase\n return ''.join(choice(letters) for i in range(stringLength))",
"def randomString(stringLength):\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(stringLength))",
"def randomString(stringLength):\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(stringLength))",
"def randomstring(stringLength=10):\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(stringLength))",
"def randomString(stringLength):\n letters = string.ascii_letters\n return ''.join(random.choice(letters) for i in range(stringLength))",
"def randomString(stringLength=8):\r\n return ''.join(random.choice(letters) for i in range(stringLength))",
"def randomString(stringLength):\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(stringLength))",
"def randomString(stringLength):\n\n letters = string.ascii_letters\n return ''.join(random.choice(letters) for i in range(stringLength))",
"def random_string(length=10):\n\n\tletters = string.ascii_lowercase\n\n\treturn ''.join(random.choice(letters) for i in xrange(length))",
"def rndstr(length):\n return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(length))",
"def get_random_string(length=5):\n return ''.join(random.SystemRandom().choice(string.ascii_lowercase + string.digits)\n for _ in range(length))",
"def random_string(string_length=15):\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(string_length))",
"def random_string(stringLength=10):\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(stringLength))",
"def randomString(stringLength=5):\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(stringLength))",
"def random_string(length):\n return ''.join(SystemRandom().choice(ascii_letters + digits)\n for i in range(length))",
"def randomString(stringLength=27):\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(stringLength))",
"def random_string(string_length=10):\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(string_length))",
"def random_string(string_length=10):\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(string_length))",
"def randomString(stringLength: int = 10) -> str:\r\n letters = string.ascii_lowercase\r\n return ''.join(choice(letters) for i in range(stringLength))",
"def randomString(stringLength=10):\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(stringLength))",
"def randomString(stringLength=10):\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(stringLength))",
"def randomString(stringLength=10):\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(stringLength))"
] | [
"0.72976446",
"0.7289028",
"0.72863644",
"0.7257486",
"0.7251793",
"0.7228094",
"0.7225105",
"0.72134733",
"0.72119886",
"0.72024626",
"0.72024626",
"0.72007173",
"0.71958107",
"0.7194601",
"0.7194079",
"0.7183809",
"0.7183112",
"0.7173273",
"0.7160337",
"0.7153696",
"0.7151409",
"0.71458256",
"0.71246296",
"0.7122301",
"0.7120646",
"0.7120646",
"0.71100456",
"0.710078",
"0.710078",
"0.710078"
] | 0.7963441 | 0 |
Set the sky dip model configuration | def set_configuration(self, configuration):
if not isinstance(configuration, Configuration):
raise ValueError(f"Configuration must be {Configuration} "
f"instance. Received {configuration}.")
self.configuration = configuration
if self.configuration.is_configured('skydip.elrange'):
self.el_range = self.configuration.get_range(
'skydip.elrange', is_positive=True)
self.el_range.scale(units.Unit('degree'))
self.uniform_weights = self.configuration.get_bool('skydip.uniform')
self.fit_for = []
if self.configuration.is_configured('skydip.fit'):
names = self.configuration.get_string_list('skydip.fit')
names = [x.strip().lower() for x in names]
for name in names:
if name in ['tau', 'offset', 'kelvin', 'tsky']:
self.fit_for.append(name)
elif name == 'data2k':
self.fit_for.append('kelvin')
else:
self.fit_for.extend(['tau', 'offset', 'kelvin'])
self.fit_for = list(np.unique(self.fit_for)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def init_parameters(self, skydip):\n if self.configuration.is_configured('skydip.tsky'):\n self.initial_guess['tsky'] = self.configuration.get_float(\n 'skydip.tsky')\n elif skydip.tamb_weight > 0:\n temp = skydip.tamb\n if isinstance(temp, units.Quantity):\n temp = temp.to('Kelvin', equivalencies=units.temperature()\n ).value\n self.initial_guess['tsky'] = temp\n\n signal_range = skydip.get_signal_range()\n if not np.isfinite(self.initial_guess['offset']):\n offset = signal_range.midpoint\n if np.isnan(offset):\n offset = 0.0\n self.initial_guess['offset'] = offset\n\n tsky = self.initial_guess['tsky']\n\n if not np.isfinite(self.initial_guess['kelvin']):\n kelvin = signal_range.span / tsky\n if not np.isfinite(kelvin):\n kelvin = 1.0\n self.initial_guess['kelvin'] = kelvin\n if 'kelvin' not in self.fit_for:\n self.fit_for.append('kelvin')\n else:\n kelvin = self.initial_guess['kelvin']\n am_range = skydip.get_air_mass_range()\n x = signal_range.span / (am_range.span * tsky * kelvin)\n if isinstance(x, units.Quantity):\n x = x.value\n if x < 0:\n tau = 0.1\n elif x >= 1:\n tau = 1.0\n else:\n tau = -np.log(1 - x)\n self.initial_guess['tau'] = tau\n\n for key, value in self.initial_guess.items():\n if isinstance(value, units.Quantity):\n self.initial_guess[key] = value.value",
"def configure(self):\n if self.three_layer:\n config = self.config\n # remove the continental shelf\n config.set('soma', 'phi', '1e-16')\n config.set('soma', 'shelf_depth', '0.0')",
"def setupconfig():\n from Manager import Studio\n studio = Studio.Instance\n cfgeff = studio.configEffect_st\n cfgeff.bloomToggle.isOn = False\n cfgeff.vignetteToggle.isOn = False\n cfgeff.sunShaftsToggle.isOn = False\n cfgeff.fogToggle.isOn = False\n cfgeff.depthOfFieldToggle.isOn = False\n #cfgeff.ssaoToggle.isOn = True\n #cfgeff.selfShadowToggle.isOn = True\n \n # Turn off backgrounds\n studio.uiBGChanger.onOffToggle.isOn = False",
"def set_parameters(self, par):\n try:\n for l in self.cell.layers:\n r_curve = cmf.VanGenuchtenMualem(\n Ksat=10**par.pKsat, phi=par.porosity, alpha=par.alpha, n=par.n\n )\n r_curve.w0 = r_curve.fit_w0()\n l.soil = r_curve\n self.cell.saturated_depth = 0.5\n self.gw.potential = self.cell.z - 0.5\n except RuntimeError as e:\n sys.stderr.write(\"Set parameters failed with:\\n\" + str(par) + \"\\n\" + str(e))\n raise",
"def setup_d2d(self):\n\n self.config[\"d2d\"] = dict()\n\n self.config[\"d2d\"][LC.WHITE] = dict()\n self.config[\"d2d\"][LC.GROWTH] = dict()\n\n self.config[\"d2d\"][LC.WHITE][\"analog-gain\"] = 1.0\n self.config[\"d2d\"][LC.WHITE][\"digital-gain\"] = 1.0\n self.config[\"d2d\"][LC.GROWTH][\"analog-gain\"] = 1.0\n self.config[\"d2d\"][LC.GROWTH][\"digital-gain\"] = 1.0\n\n self.config[\"d2d\"][\"timestamp\"] = time.time()\n\n self.save_config_to_file()",
"def get_model_config(model_name, args):\n if model_name == 'WaveGlow':\n model_config = dict(\n n_mel_channels=args.n_mel_channels,\n n_flows=args.flows,\n n_group=args.groups,\n n_early_every=args.early_every,\n n_early_size=args.early_size,\n WN_config=dict(\n n_layers=args.wn_layers,\n kernel_size=args.wn_kernel_size,\n n_channels=args.wn_channels\n )\n )\n return model_config\n elif model_name == 'FastPitch':\n model_config = dict(\n # io\n n_mel_channels=args.n_mel_channels,\n # symbols\n n_symbols=len(get_symbols(args.symbol_set)),\n padding_idx=get_pad_idx(args.symbol_set),\n symbols_embedding_dim=args.symbols_embedding_dim,\n # input FFT\n in_fft_n_layers=args.in_fft_n_layers,\n in_fft_n_heads=args.in_fft_n_heads,\n in_fft_d_head=args.in_fft_d_head,\n in_fft_conv1d_kernel_size=args.in_fft_conv1d_kernel_size,\n in_fft_conv1d_filter_size=args.in_fft_conv1d_filter_size,\n in_fft_output_size=args.in_fft_output_size,\n p_in_fft_dropout=args.p_in_fft_dropout,\n p_in_fft_dropatt=args.p_in_fft_dropatt,\n p_in_fft_dropemb=args.p_in_fft_dropemb,\n # output FFT\n out_fft_n_layers=args.out_fft_n_layers,\n out_fft_n_heads=args.out_fft_n_heads,\n out_fft_d_head=args.out_fft_d_head,\n out_fft_conv1d_kernel_size=args.out_fft_conv1d_kernel_size,\n out_fft_conv1d_filter_size=args.out_fft_conv1d_filter_size,\n out_fft_output_size=args.out_fft_output_size,\n p_out_fft_dropout=args.p_out_fft_dropout,\n p_out_fft_dropatt=args.p_out_fft_dropatt,\n p_out_fft_dropemb=args.p_out_fft_dropemb,\n # duration predictor\n dur_predictor_kernel_size=args.dur_predictor_kernel_size,\n dur_predictor_filter_size=args.dur_predictor_filter_size,\n p_dur_predictor_dropout=args.p_dur_predictor_dropout,\n dur_predictor_n_layers=args.dur_predictor_n_layers,\n # pitch predictor\n pitch_predictor_kernel_size=args.pitch_predictor_kernel_size,\n pitch_predictor_filter_size=args.pitch_predictor_filter_size,\n p_pitch_predictor_dropout=args.p_pitch_predictor_dropout,\n pitch_predictor_n_layers=args.pitch_predictor_n_layers,\n # pitch conditioning\n pitch_embedding_kernel_size=args.pitch_embedding_kernel_size,\n # speakers parameters\n n_speakers=args.n_speakers,\n speaker_emb_weight=args.speaker_emb_weight,\n # energy predictor\n energy_predictor_kernel_size=args.energy_predictor_kernel_size,\n energy_predictor_filter_size=args.energy_predictor_filter_size,\n p_energy_predictor_dropout=args.p_energy_predictor_dropout,\n energy_predictor_n_layers=args.energy_predictor_n_layers,\n # energy conditioning\n energy_conditioning=args.energy_conditioning,\n energy_embedding_kernel_size=args.energy_embedding_kernel_size,\n )\n return model_config\n\n else:\n raise NotImplementedError(model_name)",
"def setup(self, path_to_conf_file):\n\n self.track = Track.SENSORS\n self.num_frames = 0\n\n with open(path_to_conf_file, 'r') as f:\n config = yaml.safe_load(f)\n\n for key, value in config.items():\n setattr(self, key, value)\n\n self.device = torch.device('cuda')\n\n self.image_model = CameraModel(config).to(self.device)\n self.image_model.load_state_dict(torch.load(self.main_model_dir))\n self.image_model.eval()\n\n self.vizs = []\n\n self.waypointer = None\n\n if self.log_wandb:\n wandb.init(project='carla_evaluate')\n \n self.steers = torch.tensor(np.linspace(-self.max_steers,self.max_steers,self.num_steers)).float().to(self.device)\n self.throts = torch.tensor(np.linspace(0,self.max_throts,self.num_throts)).float().to(self.device)\n\n self.prev_steer = 0\n self.lane_change_counter = 0\n self.stop_counter = 0",
"def set_parameters(self, mode, data):\n if mode == 'design' or self.local_design:\n self.new_design = True\n\n for key, dc in self.variables.items():\n if isinstance(dc, dc_cp):\n if ((mode == 'offdesign' and not self.local_design) or\n (mode == 'design' and self.local_offdesign)):\n self.get_attr(key).design = data[key]\n\n else:\n self.get_attr(key).design = np.nan",
"def set_noise_mode(self, mode):\n if mode == 0:\n self._diar_conf = (3, 1.5)\n else:\n self._diar_conf = (7, 1.4)",
"def viewerSettings():\n node = nuke.thisNode()\n node.knob('near').setValue(100)\n node.knob('far').setValue(500000)\n node.knob('grid_display').setValue(False)\n node.knob('gl_lighting').setValue(1)",
"def model_setup(self):\n self.DNN = DgganMLP(self.settings.hidden_size)\n self.D = DgganMLP(self.settings.hidden_size)\n self.G = Generator(self.settings.hidden_size)",
"def set_config(self): # called from button_set object \n self.settings['lights_on'] = self.lights_on.get()\n self.settings['lights_off'] = self.lights_off.get()\n self.settings['ambient_min'] = self.ambient_min.get()\n self.settings['soil_1'] = self.smc1.get()\n self.settings['soil_2'] = self.smc2.get()\n self.settings['soil_3'] = self.smc3.get()\n self.settings['soil_4'] = self.smc4.get()\n self.settings['overhead_level'] = self.overhead_level.get()\n\n # Save settings to config file in case of reboot / power-loss\n print \"UPDATING SETTINGS FILE\"\n with open(self.settings_path, 'w') as jsonfile:\n jsonfile.write(json.dumps(self.settings, indent=4))\n self.active_changes = True # (flag) changes are active!",
"def kitti_squeezeDet_config():\n mc = base_model_config('KITTI')\n\n mc.IMAGE_WIDTH = 342\n mc.IMAGE_HEIGHT = 342\n mc.BATCH_SIZE = 20\n\n mc.WEIGHT_DECAY = 0.0001\n mc.LEARNING_RATE = 0.01\n mc.DECAY_STEPS = 10000\n mc.MAX_GRAD_NORM = 1.0\n mc.MOMENTUM = 0.9\n mc.LR_DECAY_FACTOR = 0.5\n\n mc.LOSS_COEF_BBOX = 5.0\n mc.LOSS_COEF_CONF_POS = 75.0\n mc.LOSS_COEF_CONF_NEG = 100.0\n mc.LOSS_COEF_CLASS = 1.0\n\n mc.PLOT_PROB_THRESH = 0.4\n mc.NMS_THRESH = 0.4\n mc.PROB_THRESH = 0.005\n mc.TOP_N_DETECTION = 64\n\n mc.DATA_AUGMENTATION = False\n mc.DRIFT_X = 150\n mc.DRIFT_Y = 100\n mc.EXCLUDE_HARD_EXAMPLES = False\n\n mc.ANCHOR_BOX = set_anchors(mc)\n mc.ANCHORS = len(mc.ANCHOR_BOX)\n mc.ANCHOR_PER_GRID = 81\n\n return mc",
"def _initialize_dipole_model(self, model):\n for key, hyst in model['Hysteresis'].items():\n direction = np.array([float(x) for x in hyst['dir'].split(\" \")])\n self.dipoleM.addHysteresis(direction, hyst['vol'], hyst['Hc'], hyst['Bs'], hyst['Br'])\n\n # initialize values for Hysteresis (need B-field @ initial position)\n spacecraft_state = self.state_observer.spacecraftState\n self.inertial2Sat = spacecraft_state.getAttitude().getRotation()\n self.satPos_i = spacecraft_state.getPVCoordinates().getPosition()\n\n gP = self.earth.transform(self.satPos_i, self.in_frame, self.in_date)\n\n topoframe = TopocentricFrame(self.earth, gP, 'ENU')\n topo2inertial = topoframe.getTransformTo(self.in_frame, self.in_date)\n\n lat = gP.getLatitude()\n lon = gP.getLongitude()\n alt = gP.getAltitude() / 1e3 # Mag. Field needs degrees and [km]\n\n # get B-field in geodetic system (X:East, Y:North, Z:Nadir)\n B_geo = FileDataHandler.mag_field_model.calculateField(\n degrees(lat), degrees(lon), alt).getFieldVector()\n\n # convert geodetic frame to inertial and from [nT] to [T]\n B_i = topo2inertial.transformVector(Vector3D(1e-9, B_geo))\n\n B_b = self.inertial2Sat.applyTo(B_i)\n B_field = np.array([B_b.x, B_b.y, B_b.z])\n\n self.dipoleM.initializeHysteresisModel(B_field)\n\n # add bar magnets to satellite\n for key, bar in model['BarMagnet'].items():\n direction = np.array([float(x) for x in bar['dir'].split(\" \")])\n self.dipoleM.addBarMagnet(direction, bar['m'])",
"def _initialize_dipole_model(self, model):\n for key, hyst in model['Hysteresis'].items():\n direction = np.array([float(x) for x in hyst['dir'].split(\" \")])\n self.dipoleM.addHysteresis(direction, hyst['vol'], hyst['Hc'], hyst['Bs'], hyst['Br'])\n\n # initialize values for Hysteresis (need B-field @ initial position)\n spacecraft_state = self.state_observer.spacecraftState\n self.inertial2Sat = spacecraft_state.getAttitude().getRotation()\n self.satPos_i = spacecraft_state.getPVCoordinates().getPosition()\n\n gP = self.earth.transform(self.satPos_i, self.in_frame, self.in_date)\n\n topoframe = TopocentricFrame(self.earth, gP, 'ENU')\n topo2inertial = topoframe.getTransformTo(self.in_frame, self.in_date)\n\n lat = gP.getLatitude()\n lon = gP.getLongitude()\n alt = gP.getAltitude() / 1e3 # Mag. Field needs degrees and [km]\n\n # get B-field in geodetic system (X:East, Y:North, Z:Nadir)\n B_geo = FileDataHandler.mag_field_model.calculateField(\n degrees(lat), degrees(lon), alt).getFieldVector()\n\n # convert geodetic frame to inertial and from [nT] to [T]\n B_i = topo2inertial.transformVector(Vector3D(1e-9, B_geo))\n\n B_b = self.inertial2Sat.applyTo(B_i)\n B_field = np.array([B_b.x, B_b.y, B_b.z])\n\n self.dipoleM.initializeHysteresisModel(B_field)\n\n # add bar magnets to satellite\n for key, bar in model['BarMagnet'].items():\n direction = np.array([float(x) for x in bar['dir'].split(\" \")])\n self.dipoleM.addBarMagnet(direction, bar['m'])",
"def fit(self, skydip):\n parameter_order = ['tau', 'offset', 'kelvin', 'tsky']\n self.parameters = {}\n self.errors = {}\n self.p_opt = None\n self.p_cov = None\n self.fitted_values = None\n self.data = None\n self.sigma = None\n self.elevation = None\n\n log.debug(\"Initial skydip values:\")\n log.debug(f\" Tsky = {self.initial_guess['tsky']}\")\n log.debug(f\" offset = {self.initial_guess['offset']}\")\n log.debug(f\" kelvin = {self.initial_guess['kelvin']}\")\n log.debug(f\" tau = {self.initial_guess['tau']}\")\n\n if self.el_range is not None:\n from_bin = max(0, skydip.get_bin(self.el_range.min))\n to_bin = min(skydip.data.size, skydip.get_bin(self.el_range.max))\n else:\n from_bin = 0\n to_bin = skydip.data.size\n\n self.init_parameters(skydip)\n\n data = skydip.data[from_bin:to_bin]\n weight = skydip.weight[from_bin:to_bin]\n valid = weight > 0\n data = data[valid]\n weight = weight[valid]\n\n if self.uniform_weights:\n sigma = None\n else:\n sigma = 1 / weight\n\n elevation = skydip.get_elevation(\n np.nonzero(valid)[0]).to('radian').value\n\n self.use_points = data.size\n\n p0 = []\n lower_bounds = np.zeros(4, dtype=float)\n upper_bounds = np.zeros(4, dtype=float)\n\n for i, parameter in enumerate(parameter_order):\n value = self.initial_guess[parameter]\n p0.append(value)\n if parameter in self.fit_for:\n lower_bounds[i] = self.bounds[parameter][0]\n upper_bounds[i] = self.bounds[parameter][1]\n else: # An attempt to fix parameters with curve_fit\n eps = abs(value - np.nextafter(value, 1))\n lower_bounds[i] = value - eps\n upper_bounds[i] = value + eps\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', OptimizeWarning)\n p_opt, p_cov = curve_fit(self.value_at, elevation, data,\n p0=p0, sigma=sigma,\n bounds=(lower_bounds, upper_bounds))\n self.p_opt = p_opt\n self.p_cov = p_cov\n self.data = data\n self.elevation = elevation\n self.sigma = sigma\n\n self.has_converged = np.isfinite(p_opt).all()\n if not self.has_converged: # pragma: no cover\n log.warning(\"Skydip fit did not converge!\")\n errors = np.sqrt(np.diag(p_cov))\n\n for i, parameter in enumerate(parameter_order):\n self.parameters[parameter] = p_opt[i]\n self.errors[parameter] = errors[i]\n\n self.fitted_values = self.fit_elevation(elevation)\n fit_weights = None if sigma is None else weight ** 2\n\n t_obs_rms = np.sqrt(np.average((data - self.fitted_values) ** 2,\n weights=fit_weights))\n self.rms = t_obs_rms / self.parameters['kelvin']",
"def setParameters(self, sx_sim=None):\n # TODO rething that ..\n #if sx_sim is not None:\n #if ds_model is not None:\n #if di_model is not None:\n self.sx_sim = sx_sim\n p = defaultParams(chord=self._chord, rho=self._rho, sx=self.sx_sim, ds=self.ds_model, di=self.di_model,\n M=self._M33, C=self._C33, K=self._K33)\n p['beta'] = self._beta\n if len(p['Iq'])==0:\n raise Exception('No states are present')\n\n # --- Dynamic inflow / induction\n p['a0'] = self._a0\n p['ap0'] = self._ap0\n p['di_tau1'] = self.di_tau1\n p['di_tau2'] = self.di_tau2\n\n # --- Aerodynamic parameters\n if self._y_AQ>0: \n print('[WARN] y_AQ positive is unconventional')\n p['y_AQ'] = self._y_AQ\n if self._y_AT is None:\n p['y_AT'] = self._y_AQ+self._chord/2 # default is approximatively half a chord behind\n else:\n p['y_AT'] = self._y_AT\n p['x_AQ'] = self._x_AQ\n p['x_AT'] = self._x_AT\n if self._ppol is None:\n raise Exception('Polar parameters need to be set')\n p.update(self._ppol)\n # # p.update({'linModel':False, 'drag':drag})\n\n self.p_sim = p",
"def set_camera(di):\n di.cam_mode = di.FIXED\n di.cam_target.cart = vec3(0.1,-0.2,0)\n di.cam_eye.spheric = spheric3(5,0.6,-1.0)",
"def pibooth_setup_camera(cfg):",
"def setWandEffectOptions(self, tolerance=20, maxPixels=200, fillMode=\"Volume\"):\r\n # research\r\n profprint()\r\n parameterNode = self.editUtil.getParameterNode()\r\n # set options\r\n parameterNode.SetParameter(\"WandEffect,tolerance\", str(tolerance))\r\n parameterNode.SetParameter(\"WandEffect,maxPixels\", str(maxPixels))\r\n parameterNode.SetParameter(\"WandEffect,fillMode\", fillMode)\r\n wandOpt = EditorLib.WandEffectOptions()\r\n wandOpt.setMRMLDefaults()\r\n wandOpt.__del__()",
"def noisePreset() :\n s.noisePreset()",
"def set_config(self, config):\n self.adversarial = config.adversarial\n self.eps = config.eps\n self.probability = config.probability\n self.use_dynamics = config.use_dynamics\n self.random = config.random\n self.observable_noise = config.observable_noise\n self.use_max_norm = config.use_max_norm",
"def config_and_train(self, sys_args):\n \n self.run_config_function(sys_args)\n self.set_model_name('vgg_16')\n self.set_trainable_and_exclude_scopes(constants.checkpoint_exclude_scopes,\n constants.trainable_scopes)\n self.set_optimizer('sgd')\n self.set_max_number_of_steps(6000)\n self.train_or_eval_net(sys_args)",
"def set_settings_devices(self):\n self.set_thermostat, self.set_humidifier, self.set_sprinklers, self.set_ventilation = self.settings[3:]",
"def DevMode(self):\n for var in DEV_CONFIG:\n self.Cover(var, DEV_CONFIG[var], layer = SETTINGS_LAYER)",
"def add_skymap(config):\n try:\n with h5py.File(cwd + '/config_files/skymap_radiance.h5', 'r') as sky:\n config['skymap'] = sky['skymap'][:]\n except:\n print('Run txt2hdf5_mudis to create skymap file')",
"def configure(self):\n ice_shelf_2d.configure(self.resolution, self.coord_type, self.config)",
"def config_task(self) -> None:\n if self.hparams[\"model\"] == \"resnet18\":\n self.model = models.resnet18(pretrained=True)\n in_features = self.model.fc.in_features\n self.model.fc = nn.Linear( # type: ignore[attr-defined]\n in_features, out_features=1\n )\n else:\n raise ValueError(f\"Model type '{self.hparams['model']}' is not valid.\")",
"def set_mode(self, mode):\n if mode == 'train':\n self.hidden = self._make_hidden(self.batch_size)\n elif mode == 'generate':\n self.hidden = self._make_hidden(1)",
"def set_mode(self, mode):\n if mode == 'train':\n self.hidden = self._make_hidden(self.batch_size)\n elif mode == 'generate':\n self.hidden = self._make_hidden(1)"
] | [
"0.6260431",
"0.59479874",
"0.59430236",
"0.57023",
"0.5684856",
"0.5664265",
"0.5639438",
"0.5633307",
"0.55982137",
"0.55243534",
"0.5451777",
"0.5441184",
"0.54367733",
"0.5416396",
"0.5416396",
"0.5393091",
"0.53578675",
"0.53553176",
"0.53494585",
"0.5281941",
"0.52732015",
"0.5247964",
"0.522988",
"0.52296615",
"0.5225038",
"0.52239424",
"0.52184176",
"0.52062553",
"0.5179538",
"0.5179538"
] | 0.63351405 | 0 |
Fit the skydip model. | def fit(self, skydip):
parameter_order = ['tau', 'offset', 'kelvin', 'tsky']
self.parameters = {}
self.errors = {}
self.p_opt = None
self.p_cov = None
self.fitted_values = None
self.data = None
self.sigma = None
self.elevation = None
log.debug("Initial skydip values:")
log.debug(f" Tsky = {self.initial_guess['tsky']}")
log.debug(f" offset = {self.initial_guess['offset']}")
log.debug(f" kelvin = {self.initial_guess['kelvin']}")
log.debug(f" tau = {self.initial_guess['tau']}")
if self.el_range is not None:
from_bin = max(0, skydip.get_bin(self.el_range.min))
to_bin = min(skydip.data.size, skydip.get_bin(self.el_range.max))
else:
from_bin = 0
to_bin = skydip.data.size
self.init_parameters(skydip)
data = skydip.data[from_bin:to_bin]
weight = skydip.weight[from_bin:to_bin]
valid = weight > 0
data = data[valid]
weight = weight[valid]
if self.uniform_weights:
sigma = None
else:
sigma = 1 / weight
elevation = skydip.get_elevation(
np.nonzero(valid)[0]).to('radian').value
self.use_points = data.size
p0 = []
lower_bounds = np.zeros(4, dtype=float)
upper_bounds = np.zeros(4, dtype=float)
for i, parameter in enumerate(parameter_order):
value = self.initial_guess[parameter]
p0.append(value)
if parameter in self.fit_for:
lower_bounds[i] = self.bounds[parameter][0]
upper_bounds[i] = self.bounds[parameter][1]
else: # An attempt to fix parameters with curve_fit
eps = abs(value - np.nextafter(value, 1))
lower_bounds[i] = value - eps
upper_bounds[i] = value + eps
with warnings.catch_warnings():
warnings.simplefilter('ignore', OptimizeWarning)
p_opt, p_cov = curve_fit(self.value_at, elevation, data,
p0=p0, sigma=sigma,
bounds=(lower_bounds, upper_bounds))
self.p_opt = p_opt
self.p_cov = p_cov
self.data = data
self.elevation = elevation
self.sigma = sigma
self.has_converged = np.isfinite(p_opt).all()
if not self.has_converged: # pragma: no cover
log.warning("Skydip fit did not converge!")
errors = np.sqrt(np.diag(p_cov))
for i, parameter in enumerate(parameter_order):
self.parameters[parameter] = p_opt[i]
self.errors[parameter] = errors[i]
self.fitted_values = self.fit_elevation(elevation)
fit_weights = None if sigma is None else weight ** 2
t_obs_rms = np.sqrt(np.average((data - self.fitted_values) ** 2,
weights=fit_weights))
self.rms = t_obs_rms / self.parameters['kelvin'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def skydip(scans):\n title = Path(scans[0]).name + \" \".join([Path(scan).name.split(\"_\")[4] for scan in scans[1:]])\n\n signal = []\n std = []\n elevation = []\n\n for scan in scans:\n kd = KissData(scan)\n kd.read_data(list_data=[\"A_masq\", \"I\", \"Q\", \"F_tone\", \"F_tl_Az\", \"F_tl_El\"])\n\n # TODO: Why do we need copy here, seems that numpy strides are making\n # funny things here !\n\n F_tone = 1e3 * kd.F_tone.copy().mean(1)[:, np.newaxis] + kd.continuum\n signal.append(F_tone.mean(1))\n std.append(F_tone.std(1))\n elevation.append(kd.F_tl_El.mean())\n\n signal = np.array(signal)\n std = np.array(std)\n elevation = np.array(elevation)\n detectors = kd.list_detector\n\n # rearrange signal to be coherent with the fit ?\n signal_new = 2 * signal[:, 0][:, np.newaxis] - signal\n\n air_mass = 1.0 / np.sin(np.radians(elevation))\n\n def T(\n airm, const, fact, tau_f\n ): # signal definition for skydip model: there is -1 before B to take into account the increasing resonance to lower optical load\n return const + 270.0 * fact * (1.0 - np.exp(-tau_f * airm))\n\n popts = []\n pcovs = []\n for _sig, _std in zip(signal_new.T, std.T):\n P0 = (4e8, 1e8, 1.0)\n popt, pcov = curve_fit(T, air_mass, _sig, sigma=_sig, p0=P0, maxfev=100000)\n\n popts.append(popt)\n pcovs.append(pcovs)\n\n popts = np.array(popts)\n\n ndet = popts.shape[0]\n fig_skydip_fit, axes = plt.subplots(\n np.int(np.sqrt(ndet)), np.int(ndet / np.sqrt(ndet)), sharex=True\n ) # , sharey=True)\n for _sig, _std, popt, detector, ax in zip(signal_new.T, std.T, popts, detectors, axes.flatten()):\n ax.errorbar(air_mass, _sig, _std)\n ax.plot(air_mass, T(air_mass, *popt))\n ax.set_title(detector, pad=-15)\n ax.label_outer()\n\n fig_skydip_fit.suptitle(title)\n fig_skydip_fit.tight_layout()\n fig_skydip_fit.subplots_adjust(wspace=0, hspace=0)\n\n Ao, Bo, tau = popts.T\n\n fig_skydip_stat, axes = plt.subplots(1, 3)\n for (item, value), ax in zip({r\"$A_0$\": Ao, r\"$B_0$\": Bo, \"tau\": tau}.items(), axes):\n mean_value = np.nanmedian(value)\n std_value = mad_std(value, ignore_nan=True)\n range_value = np.array([-3, 3]) * std_value + mean_value\n ax.hist(value, range=range_value)\n ax.set_xlabel(item)\n fig_skydip_stat.suptitle(title)\n\n return fig_skydip_fit, fig_skydip_stat",
"def fit():\n pass",
"def fit_sky(self):\n min_value = self.data.min()\n ring_model = models.Ring2D(\n min_value, self.x, self.y, self._box * 0.4, width=self._box * 0.4\n )\n ring_model.r_in.fixed = True\n ring_model.width.fixed = True\n ring_model.x_0.fixed = True\n ring_model.y_0.fixed = True\n fit_p = fitting.LevMarLSQFitter()\n return fit_p(ring_model, self._XGrid, self._YGrid, self.data).amplitude",
"def fit(self,X,y):\n\n d = X.shape[1]\n # 1. sketch the data\n self.B,a = self._sketch(X,method=self.fd_mode)\n #H = B.T@B + (self.alpha+a)*np.eye(d)\n #self.H = H\n self.H_inv = self._get_inv() #np.linalg.pinv(H)\n self.coef_ = self.H_inv@(X.T@y) #np.linalg.solve(H, X.T@y)\n self.is_fitted = True",
"def fit(self, X, y):\n self.model = self._initialize_model(X, y)\n self.model.optimize()",
"def fit_noise_model(self):\n\n for term in self._term_data.values(): #perform all pairwise fits\n term.fit()\n \n for pair,pauli in self.layer.single_pairs:\n self._term_data[pauli].fit_single()\n pair_dat = self._term_data[pair]\n pair_dat.fidelity = pair_dat.fidelity**2/self._term_data[pauli].fidelity\n\n \n logger.info(\"Fit noise model with following fidelities:\") \n logger.info([term.fidelity for term in self._term_data.values()])\n\n #get noise model from fits\n self.nnls_fit()",
"def fit(self):\n raise NotImplementedError",
"def fit(self, X, y):\n self.model_x = X\n self.model_y = y",
"def fit(self, X, Y):\n ...",
"def makeFit(self):\n if not self.fitModel.params:\n return\n cs = self.spectrum\n self.worker.make_model_curve(cs, allData=csi.allLoadedItems)\n\n dfparams = cs.fitParams\n lcfRes = dfparams['lcf_result']\n self.fitR.setText('R={0:.5g}'.format(lcfRes['R']))\n self.updateFitResults()\n self.fitReady.emit()",
"def fit(self):\n raise NotImplementedError # pragma: no cover",
"def fill_models(self, iegy, icth):\n\n models = self.psf_models\n irf_data = self.irf_data\n psf_data = self.psf_data\n\n egy_range = psf_data.egy_axis.edges[iegy:iegy+2]\n cth_range = psf_data.cth_axis.edges[icth:icth+2]\n ecenter = psf_data.egy_axis.center[iegy]\n emin = 10 ** psf_data.egy_axis.edges[iegy]\n emax = 10 ** psf_data.egy_axis.edges[iegy+1]\n\n bkg_hist = psf_data.bkg_hist[iegy, icth]\n sig_hist = psf_data.sig_hist[iegy, icth]\n on_hist = psf_data.tot_hist[iegy, icth]\n off_hist = psf_data.off_hist[iegy, icth]\n excess_sum = psf_data.excess._counts[iegy, icth]\n\n for i, ml in enumerate(self.model_labels):\n m = models[ml]\n\n print 'Fitting model ', ml\n hmodel_sig = m.histogram(emin, emax,cth_range[0],cth_range[1],\n on_hist.axis().edges).normalize()\n model_norm = excess_sum\n hmodel_sig *= model_norm\n\n irf_data[ml].excess.set(iegy, icth, sig_hist.sum()[0])\n irf_data[ml].ndf.set(iegy, icth, float(sig_hist.axis().nbins))\n\n hmd = hmodel_sig.scale_density(lambda x: x * x * np.pi)\n hmd += psf_data.bkg_density_hist[iegy, icth]\n\n irf_data[ml].tot_density_hist[iegy, icth] = hmd\n irf_data[ml].bkg_density_hist[iegy, icth] = \\\n copy.deepcopy(psf_data.bkg_density_hist[iegy, icth])\n irf_data[ml].sig_hist[iegy, icth] = hmodel_sig\n irf_data[ml].bkg_hist[iegy, icth] = copy.deepcopy(bkg_hist)\n irf_data[ml].tot_hist[iegy, icth] = hmodel_sig + bkg_hist\n\n for j, q in enumerate(psf_data.quantiles):\n ql = psf_data.quantile_labels[j]\n qm = m.quantile(emin, emax, cth_range[0],cth_range[1], q)\n self.irf_data[ml].qdata[j].set(iegy, icth, qm)\n print ml, ql, qm",
"def fit(self, X,y):\n pass",
"def fit(self, X, y):\n self.centers = self._select_centers(X)\n self.ampls = self._select_ampl(y)\n G = self._calculate_interpolation_matrix(X)\n self.weights = np.dot(np.linalg.pinv(G), y)",
"def fit(self, X, y):\n self.__X = X\n self.__y = y\n self.__trained = True",
"def fit(self, X, y):",
"def fit(self, X, y):",
"def fit(self, X, y):",
"def fit(self, x, y):\r\n\r\n self.train_x = x\r\n self.train_y = y\r\n self.__find_psi__()",
"def fit(self, X, y=...):\n ...",
"def fit(self, X, y=...):\n ...",
"def fit(self, X, y=...):\n ...",
"def fit(self, X, y=...):\n ...",
"def fit(self, X, y=...):\n ...",
"def fit(self, X, y=...):\n ...",
"def fit(self, X, y=...):\n ...",
"def fit(self, X, y=...):\n ...",
"def fit(self, X, y=...):\n ...",
"def fit(self, X, y=...):\n ...",
"def run_fit(self, optimize_opts=None):\n fit_range = self.config[\"fit\"].get(\"fit_range\")\n model = self.config[\"fit\"][\"model\"]\n\n for obs in self.extraction.spectrum_observations:\n if fit_range is not None:\n obs.mask_fit = obs.counts.energy_mask(fit_range[0], fit_range[1])\n obs.model = model\n\n self.fit = Fit(self.extraction.spectrum_observations)\n self.fit_result = self.fit.run(optimize_opts=optimize_opts)\n\n model = self.config[\"fit\"][\"model\"]\n modelname = model.__class__.__name__\n\n model.parameters.covariance = self.fit_result.parameters.covariance\n\n filename = make_path(self.config[\"outdir\"]) / \"fit_result_{}.yaml\".format(\n modelname\n )\n\n self.write(filename=filename)\n\n obs_stacker = SpectrumDatasetOnOffStacker(self.extraction.spectrum_observations)\n obs_stacker.run()\n\n datasets_fp = obs_stacker.stacked_obs\n datasets_fp.model = model\n self.flux_point_estimator = FluxPointsEstimator(\n e_edges=self.config[\"fp_binning\"], datasets=datasets_fp\n )\n fp = self.flux_point_estimator.run()\n fp.table[\"is_ul\"] = fp.table[\"ts\"] < 4\n self.flux_points = fp"
] | [
"0.71958387",
"0.6706675",
"0.66250366",
"0.6449008",
"0.62683755",
"0.6221359",
"0.61551917",
"0.6120607",
"0.6083158",
"0.60547644",
"0.60453737",
"0.6025587",
"0.60035104",
"0.5981096",
"0.5972988",
"0.5968424",
"0.5968424",
"0.5968424",
"0.59320873",
"0.58962053",
"0.58962053",
"0.58962053",
"0.58962053",
"0.58962053",
"0.58962053",
"0.58962053",
"0.58962053",
"0.58962053",
"0.58962053",
"0.58871144"
] | 0.7225869 | 0 |
Returns a fit to elevation with the model. | def fit_elevation(self, elevation):
if self.p_opt is None:
result = elevation * np.nan
else:
result = self.value_at(elevation, *self.p_opt)
if isinstance(result, units.Quantity):
result = result.value
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fit_sky(self):\n min_value = self.data.min()\n ring_model = models.Ring2D(\n min_value, self.x, self.y, self._box * 0.4, width=self._box * 0.4\n )\n ring_model.r_in.fixed = True\n ring_model.width.fixed = True\n ring_model.x_0.fixed = True\n ring_model.y_0.fixed = True\n fit_p = fitting.LevMarLSQFitter()\n return fit_p(ring_model, self._XGrid, self._YGrid, self.data).amplitude",
"def view_elevation(self):\n if self.elevation_toggle:\n return rasterize(self.tri_mesh, aggregator=ds.mean('z'), precompute=True)\n else:\n return hv.Curve([])",
"def get_elevation(self):\n return self.elevation",
"def calibrateElevation(self,elevation):\n if len(self.values) == self.values.maxlen:\n self.elevcomp = self.value / ((1.0 - ((elevation + self.heightAboveGround) * 0.3048 / 44330.0)) ** 5.255)\n self.calibrated = True\n else:\n self.calibratedElevation = elevation",
"def elevation(x, y):\n file = os.path.abspath(\"..\") + \"\\Shape\\Shape.vrt\"\n layer = gdal.Open(file)\n gt = layer.GetGeoTransform()\n rasterx = int((x - gt[0]) / gt[1])\n rastery = int((y - gt[3]) / gt[5])\n print('elevation =', layer.GetRasterBand(1).ReadAsArray(rasterx, rastery, 1, 1)[0][0], 'm above sea level')",
"def query_elevation(self, xy_pos=None):\r\n query_pos = xy_pos or self.vehicleNP.get_pos()\r\n \"\"\"\r\n This method is accurate and may be useful for placing \r\n objects on the terrain surface.\r\n \"\"\"\r\n result = self.world.ray_test_closest(\r\n LPoint3(query_pos.x, query_pos.y, -10000),\r\n LPoint3(query_pos.x, query_pos.y, 10000))\r\n if result.has_hit():\r\n hit_pos = result.get_hit_pos()\r\n if not xy_pos:\r\n print(\"Bullet heightfield elevation at \"\r\n \"X {:.2f} | Y {:.2f} is {:.3f}\".format(\r\n hit_pos.x, hit_pos.y, hit_pos.z))\r\n else:\r\n hit_pos = None\r\n if not xy_pos:\r\n print(\"Could not query elevation at {}\".format(xy_pos))\r\n \r\n \"\"\"\r\n This method is less accurate than the one above.\r\n Under heavy ray-testing stress (ray tests are performed for all vehicle\r\n wheels, the above elevation query etc.) Bullet sometimes seems to be a\r\n little unreliable.\r\n \"\"\"\r\n texspace_pos = self.terrain.get_relative_point(render, query_pos)\r\n stm_pos = self.terrain_node.uv_to_world(\r\n LTexCoord(texspace_pos.x, texspace_pos.y))\r\n if not xy_pos:\r\n print(\"ShaderTerrainMesh elevation at \"\r\n \"X {:.2f} | Y {:.2f} is {:.3f}\".format(\r\n stm_pos.x, stm_pos.y, stm_pos.z))\r\n \r\n return hit_pos or stm_pos",
"def elevation(source):\n srcAzEl = subarrayControl.s.azel(source, 0.0);\n return srcAzEl[1];",
"def imu_get_elevation(self):\n return self.imu.get_elevation()",
"def elevation(self):\n return self.container['elevation']",
"def get_fit(self) -> np.poly1d:\n if self.log_fits is not None:\n return next(self.log_fits)\n x = (\n Enumerable(self.mlc_meas)\n .select_many(lambda m: [line.point1.y for line in m.marker_lines])\n .to_list()\n )\n y = (\n Enumerable(self.mlc_meas)\n .select_many(lambda m: [line.point1.x for line in m.marker_lines])\n .to_list()\n )\n if self.orientation == Orientation.UP_DOWN:\n fit = np.polyfit(x, y, 1)\n else:\n fit = np.polyfit(y, x, 1)\n return np.poly1d(fit)",
"def get_raster_elevation(dataset, resample=None, **kwargs):\n extent = get_raster_extent(dataset)\n src_ds = wradlib.io.dem.get_srtm(extent, **kwargs)\n\n driver = gdal.GetDriverByName(\"MEM\")\n dst_ds = driver.CreateCopy(\"ds\", dataset)\n\n if resample is None:\n src_gt = src_ds.GetGeoTransform()\n dst_gt = dst_ds.GetGeoTransform()\n src_scale = min(abs(src_gt[1]), abs(src_gt[5]))\n dst_scale = min(abs(dst_gt[1]), abs(dst_gt[5]))\n ratio = dst_scale / src_scale\n\n resample = gdal.GRA_Bilinear\n if ratio > 2:\n resample = gdal.GRA_Average\n if ratio < 0.5:\n resample = gdal.GRA_NearestNeighbour\n\n gdal.ReprojectImage(\n src_ds, dst_ds, src_ds.GetProjection(), dst_ds.GetProjection(), resample\n )\n elevation = read_gdal_values(dst_ds)\n\n return elevation",
"def fit_peak_az_and_el(self,data):\n\n az = data['level1/spectrometer/pixel_pointing/pixel_az'][0,:]\n el = data['level1/spectrometer/pixel_pointing/pixel_el'][0,:]\n tod_model = self.model.func(self.avg_map_fits['Values'][:], (az,el))\n imax = np.argmax(tod_model)\n az_max = az[imax]\n el_max = el[imax]\n self.az_el_peak = {'AZ_PEAK': np.array([az_max]),\n 'EL_PEAK': np.array([el_max])}",
"def solar_elevation(self, dateandtime=None):\n\n if self.astral is None:\n self.astral = Astral()\n\n if dateandtime is None:\n dateandtime = datetime.datetime.now(tz=self.tz)\n\n return self.astral.solar_elevation(dateandtime, self.latitude, self.longitude)",
"def get_surface_elevation(wind_lat, wind_lon):\n # Load the NetCDF file containing the geopotential of Europe.\n nc = Dataset(path_join(era5_data_dir, geopotential_file_name))\n \n # Read the variables from the netCDF file.\n geopot_lat = nc.variables['latitude'][:]\n geopot_lon = nc.variables['longitude'][:]\n \n \n # Check if wind and geopotential data use same grid.\n assert np.array_equal(geopot_lat, wind_lat) and np.array_equal(geopot_lon, wind_lon), \\\n \"Requested latitudes and/or longitudes do not correspond to those in the NetCDF file.\"\n\n geopot_z = nc.variables['z'][0, :, :]\n nc.close()\n\n surface_elevation = geopot_z/9.81\n print(\"Minimum and maximum elevation found are respectively {:.1f}m and {:.1f}m, removing those below zero.\"\n .format(np.amin(surface_elevation), np.amax(surface_elevation)))\n\n # Get rid of negative elevation values.\n for i, row in enumerate(surface_elevation):\n for j, val in enumerate(row):\n if val < 0.:\n surface_elevation[i, j] = 0.\n\n return surface_elevation",
"def elevation(latitude, longitude):\n elevation = maps.Elevation()\n request = {'locations': '%f,%f' % (latitude, longitude)}\n results, status = elevation.elevation(request)\n if results:\n # We are only interested in the actual elevation\n return results[0]['elevation']\n else:\n raise UnknownLocationError(_('The location could not be found by the elevation API.'))",
"def _fit_gas_trend(cls, x, y, fit_type=None):\n fit_type = cls._max_trend_poly_deg if fit_type is None else fit_type\n if fit_type == 'exp':\n logger.debug('Using exponential fit to extrapolate {}'.format(cls._gas_name))\n fit = np.polynomial.polynomial.Polynomial.fit(x, np.log(y), 1, w=np.sqrt(y))\n return lambda t: np.exp(fit(t))\n\n else:\n logger.debug('Using order {} polynomial to extrapolate {}'.format(fit_type, cls._gas_name))\n fit = np.polynomial.polynomial.Polynomial.fit(x, y, deg=fit_type)\n return fit",
"def inner_fit(self):\n pass",
"def inner_fit(self):\n pass",
"def elevation(self):\n\n\t\twidth = self.no_name_level[0]\n\t\theight = self.no_name_level[1]\n\t\ttile = self.no_name_level[2]\n\t\tx = self.no_name_level[3]\n\t\ty = self.no_name_level[4]\n\t\t\n\t\ttiles = []\n\t\tfor i in tile:\n\t\t\ti = i[:-1]\n\t\t\ttiles.append(i)\t\n\t\ttiles_arranged = [tiles[i:i + width] for i in range(0, len(tile), width)]\n\t\n\t\tplanet_co = []\n\t\t\n\t\tfor i in tiles_arranged:\n\t\t\t\n\t\t\tplanet = []\n\t\t\tfor n in i:\n\t\t\t\tn = n.split(',')\n\t\t\t\tif len(n) != 3:\n\t\t\t\t\ta = ['-']\n\t\t\t\t\tn += a\n\t\t\t\t\t\n\t\t\t\t\tplanet.append(n)\n\t\t\t\telse:\n\t\t\t\t\tplanet.append(n)\n\t\t\t\t\t\n\t\t\tplanet_co.append(planet)\n\t\t\t\n\t\n\t\tplanet_map = Planet(planet_co, width, height)\n\t\tcoordinates = Planet(planet_co, width, height)\n\t\tcoordinates = Planet.coordinates(coordinates)\n\t\tplanet_map = Planet.coordinates_dict(planet_map)#this is my map in dictionary format(coordinates : tile)\n\t\t\n\t\tfor y1 in coordinates:\n\t\t\tif coordinates.index(y1) == y:\n\t\t\t\ty_value = coordinates.index(y1)\n\t\t\t\tfor x1 in y1:\n\t\t\t\t\tif x1 == [x, y]:\n\t\t\t\t\t\tx_value = y1.index(x1)\n\t\trover_d = coordinates[y_value][x_value]\n\t\n\t\tx1 = x_value + 1\n\t\tx2 = x_value + 2\n\t\ty1 = y_value + 1\n\t\ty2 = y_value + 2\n\t\n\t\tif x1 == len(coordinates[1]):\n\t\t\tx1 == 0\n\t\tif y1 == len(coordinates):\n\t\t\ty1 == 0\n\t\n\t\tif x2 > len(coordinates[1]):\n\t\t\tx2 = 1\n\t\tif y2 > len(coordinates[1]):\n\t\t\ty2 == 1\n\t\n\t\tfront2 = coordinates[y2][x_value]\n\t\tfront1 = coordinates[y1][x_value]\n\t\tback1 = coordinates[y_value-1][x_value]\n\t\tback2 = coordinates[y_value-2][x_value]\n\t\tright1 = coordinates[y_value][x1]\n\t\tright2 = coordinates[y_value][x2]\n\t\tleft1 = coordinates[y_value][x_value-1]\n\t\tleft2 = coordinates[y_value][x_value-2]\n\t\n\t\n\t\tfront1_right1 = coordinates[y1][x1]\n\t\tfront1_right2 = coordinates[y1][x2]\n\t\tfront2_right1 = coordinates[y2][x1]\n\t\tfront2_right2 = coordinates[y2][x2]\n\t\tfront1_left1 = coordinates[y1][x_value-1]\n\t\tfront1_left2 = coordinates[y1][x_value-2]\n\t\tfront2_left1 = coordinates[y2][x_value-1]\n\t\tfront2_left2 = coordinates[y2][x_value-2]\n\t\n\t\tback1_right1 = coordinates[y_value-1][x1]\n\t\tback1_right2 = coordinates[y_value-1][x2]\n\t\tback2_right1 = coordinates[y_value-2][x1]\n\t\tback2_right2 = coordinates[y_value-2][x2]\n\t\tback1_left1 = coordinates[y_value-1][x_value-1]\n\t\tback1_left2 = coordinates[y_value-1][x_value-2]\n\t\tback2_left1 = coordinates[y_value-2][x_value-1]\n\t\tback2_left2 = coordinates[y_value-2][x_value-2]\n\t\t\n\t\tco_f2r2 = planet_map[str(front2_right2)]\n\t\tco_f2r1 = planet_map[str(front2_right1)]\n\t\tco_f2 = planet_map[str(front2)]\n\t\tco_f2l1 = planet_map[str(front2_left1)]\n\t\tco_f2l2 = planet_map[str(front2_left2)]\n\t\tco_f1r2 = planet_map[str(front1_right2)]\n\t\tco_f1r1 = planet_map[str(front1_right1)]\n\t\tco_f1 = planet_map[str(front1)]\n\t\tco_f1l1 = planet_map[str(front1_left1)]\n\t\tco_f1l2 = planet_map[str(front1_left2)]\n\t\tco_r2 = planet_map[str(right2)]\n\t\tco_r1 = planet_map[str(right1)]\n\t\tco_rover = planet_map[str([x, y])]\n\t\tco_l1 = planet_map[str(left1)]\n\t\tco_l2 = planet_map[str(left2)]\n\t\tco_b1r2 = planet_map[str(back1_right2)]\n\t\tco_b1r1 = planet_map[str(back1_right1)]\n\t\tco_b1 = planet_map[str(back1)]\n\t\tco_b1l1 = planet_map[str(back1_left1)]\n\t\tco_b1l2 = planet_map[str(back1_left2)]\n\t\tco_b2r2 = planet_map[str(back2_right2)]\n\t\tco_b2r1 = planet_map[str(back2_right1)]\n\t\tco_b2 = planet_map[str(back2)]\n\t\tco_b2l1 = planet_map[str(back2_left1)]\n\t\tco_b2l2 = planet_map[str(back2_left2)]\n\t\n\t\tfirst_lineco = [co_f2l2, co_f2l1, co_f2, co_f2r1, co_f2r2]\n\t\tsecond_lineco = [co_f1l2, co_f1l1, co_f1, co_f1r1, co_f1r2]\n\t\tthird_lineco = [co_l2, co_l1, co_rover, co_r1, co_r2]\n\t\tfourth_lineco = [co_b1l2, co_b1l1, co_b1, co_b1r1, co_b1r2]\n\t\tfifth_lineco = [co_b2l2, co_b2l1, co_b2, co_b2r1, co_b2r2]\n\n\t\tfirst_line = ['|']\n\t\tsec_line = ['|']\n\t\tthird_line = ['|']\n\t\tfourth_line = ['|']\n\t\tfifth_line = ['|']\n\t\tfor i in first_lineco:\n\t\t\tif i[2] == '-' and co_rover[2] == '-':\n\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\tfirst_line.append(' |')\n\t\t\t\telif int(i[1]) < int(co_rover[1]):\n\t\t\t\t\tfirst_line.append(\"-|\")\n\t\t\t\telse:\n\t\t\t\t\tfirst_line.append('+|')\n\t\t\tif i[2] == '-' and co_rover[2] != '-':\n\t\t\t\tif int(co_rover[2]) == int(i[1]):\n\t\t\t\t\tfirst_line.append(' |')\n\t\t\t\telif int(co_rover[2]) > int(i[1]):\n\t\t\t\t\tfirst_line.append(\"-|\")\n\t\t\t\telse:\n\t\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\t\tfirst_line.append(' |')\n\t\t\t\t\t\n\t\t\t\t\telif int(i[1]) > int(co_rover[1]):\n\t\t\t\t\t\tfirst_line.append('+|')\n\t\t\tif i[2] != '-' and co_rover[2] == '-':\n\t\t\t\tif int(co_rover[1]) == int(i[2]):\n\t\t\t\t\tfirst_line.append('/|')\n\t\t\t\telif int(co_rover[1]) < int(i[2]):\n\t\t\t\t\tfirst_line.append(\"+|\")\n\t\t\t\telse:\n\t\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\t\tfirst_line.append(\"\\|\")\n\t\t\t\t\t\n\t\t\t\t\telif int(i[1]) < int(co_rover[1]):\n\t\t\t\t\t\tfirst_line.append('-|')\n\t\t\tif i[2] != '-' and co_rover[2] != '-':\n\t\t\t\tif int(i[2]) == int(co_rover[2]):\n\t\t\t\t\tfirst_line.append(' |')\n\t\t\t\telif int(i[2]) < int(co_rover[2]):\n\t\t\t\t\tif int(co_rover[2]) == int(i[1]):\n\t\t\t\t\t\tfirst_line.append(\"'\\'|\")\n\t\t\t\t\telif int(co_rover[2]) > int(i[1]):\n\t\t\t\t\t\tfirst_line.append('-|')\n\t\t\t\telif int(i[2]) > int(co_rover[2]):\n\t\t\t\t\tif int(i[2]) == int(co_rover[1]):\n\t\t\t\t\t\tfirst_line.append(\"/|\")\n\t\t\t\t\telif int(i[2]) > int(co_rover[1]):\n\t\t\t\t\t\tfirst_line.append(\"+|\")\n\n\n\n\t\tfor i in second_lineco:\n\t\t\tif i[2] == '-' and co_rover[2] == '-':\n\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\tsec_line.append(' |')\n\t\t\t\telif int(i[1]) < int(co_rover[1]):\n\t\t\t\t\tsec_line.append(\"-|\")\n\t\t\t\telse:\n\t\t\t\t\tsec_line.append('+|')\n\t\t\tif i[2] == '-' and co_rover[2] != '-':\n\t\t\t\tif int(co_rover[2]) == int(i[1]):\n\t\t\t\t\tsec_line.append(' |')\n\t\t\t\telif int(co_rover[2]) > int(i[1]):\n\t\t\t\t\tsec_line.append(\"-|\")\n\t\t\t\telse:\n\t\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\t\tsec_line.append(' |')\n\t\t\t\t\t\n\t\t\t\t\telif int(i[1]) > int(co_rover[1]):\n\t\t\t\t\t\tsec_line.append('+|')\n\t\t\tif i[2] != '-' and co_rover[2] == '-':\n\t\t\t\tif int(co_rover[1]) == int(i[2]):\n\t\t\t\t\tsec_line.append('/|')\n\t\t\t\telif int(co_rover[1]) < int(i[2]):\n\t\t\t\t\tsec_line.append(\"+|\")\n\t\t\t\telse:\n\t\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\t\tsec_line.append(\"'\\'|\")\n\t\t\t\t\t\n\t\t\t\t\telif int(i[1]) < int(co_rover[1]):\n\t\t\t\t\t\tsec_line.append('-|')\n\t\t\tif i[2] != '-' and co_rover[2] != '-':\n\t\t\t\tif int(i[2]) == int(co_rover[2]):\n\t\t\t\t\tsec_line.append(' |')\n\t\t\t\telif int(i[2]) < int(co_rover[2]):\n\t\t\t\t\tif int(co_rover[2]) == int(i[1]):\n\t\t\t\t\t\tsec_line.append(\"'\\'|\")\n\t\t\t\t\telif int(co_rover[2]) > int(i[1]):\n\t\t\t\t\t\tsec_line.append('-|')\n\t\t\t\telif int(i[2]) > int(co_rover[2]):\n\t\t\t\t\tif int(i[2]) == int(co_rover[1]):\n\t\t\t\t\t\tsec_line.append(\"/|\")\n\t\t\t\t\telif int(i[2]) > int(co_rover[1]):\n\t\t\t\t\t\tsec_line.append(\"+|\")\n\t\n\t\tfor i in third_lineco:\n\t\t\tif i[2] == '-' and co_rover[2] == '-':\n\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\tthird_line.append(' |')\n\t\t\t\telif int(i[1]) < int(co_rover[1]):\n\t\t\t\t\tthird_line.append(\"-|\")\n\t\t\t\telse:\n\t\t\t\t\tthird_line.append('+|')\n\t\t\tif i[2] == '-' and co_rover[2] != '-':\n\t\t\t\tif int(co_rover[2]) == int(i[1]):\n\t\t\t\t\tthird_line.append(' |')\n\t\t\t\telif int(co_rover[2]) > int(i[1]):\n\t\t\t\t\tthird_line.append(\"-|\")\n\t\t\t\telse:\n\t\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\t\tthird_line.append(' |')\n\t\t\t\t\t\n\t\t\t\t\telif int(i[1]) > int(co_rover[1]):\n\t\t\t\t\t\tthird_line.append('+|')\n\t\t\tif i[2] != '-' and co_rover[2] == '-':\n\t\t\t\tif int(co_rover[1]) == int(i[2]):\n\t\t\t\t\tthird_line.append('/|')\n\t\t\t\telif int(co_rover[1]) < int(i[2]):\n\t\t\t\t\tthird_line.append(\"+|\")\n\t\t\t\telse:\n\t\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\t\tthird_line.append(\"'\\'|\")\n\t\t\t\t\t\n\t\t\t\t\telif int(i[1]) < int(co_rover[1]):\n\t\t\t\t\t\tthird_line.append('-|')\n\t\t\tif i[2] != '-' and co_rover[2] != '-':\n\t\t\t\tif int(i[2]) == int(co_rover[2]):\n\t\t\t\t\tthird_line.append(' |')\n\t\t\t\telif int(i[2]) < int(co_rover[2]):\n\t\t\t\t\tif int(co_rover[2]) == int(i[1]):\n\t\t\t\t\t\tthird_line.append(\"'\\'|\")\n\t\t\t\t\telif int(co_rover[2]) > int(i[1]):\n\t\t\t\t\t\tthird_line.append('-|')\n\t\t\t\telif int(i[2]) > int(co_rover[2]):\n\t\t\t\t\tif int(i[2]) == int(co_rover[1]):\n\t\t\t\t\t\tthird_line.append(\"/|\")\n\t\t\t\t\telif int(i[2]) > int(co_rover[1]):\n\t\t\t\t\t\tthird_line.append(\"+|\")\n\t\n\t\tfor i in fourth_lineco:\n\t\t\tif i[2] == '-' and co_rover[2] == '-':\n\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\tfourth_line.append(' |')\n\t\t\t\telif int(i[1]) < int(co_rover[1]):\n\t\t\t\t\tfourth_line.append(\"-|\")\n\t\t\t\telse:\n\t\t\t\t\tfourth_line.append('+|')\n\t\t\tif i[2] == '-' and co_rover[2] != '-':\n\t\t\t\tif int(co_rover[2]) == int(i[1]):\n\t\t\t\t\tfourth_line.append(' |')\n\t\t\t\telif int(co_rover[2]) > int(i[1]):\n\t\t\t\t\tfourth_line.append(\"-|\")\n\t\t\t\telse:\n\t\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\t\tfourth_line.append(' |')\n\t\t\t\t\t\n\t\t\t\t\telif int(i[1]) > int(co_rover[1]):\n\t\t\t\t\t\tfourth_line.append('+|')\n\t\t\tif i[2] != '-' and co_rover[2] == '-':\n\t\t\t\tif int(co_rover[1]) == int(i[2]):\n\t\t\t\t\tfourth_line.append('/|')\n\t\t\t\telif int(co_rover[1]) < int(i[2]):\n\t\t\t\t\tfourth_line.append(\"+|\")\n\t\t\t\telse:\n\t\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\t\tfourth_line.append(\"'\\'|\")\n\t\t\t\t\t\n\t\t\t\t\telif int(i[1]) < int(co_rover[1]):\n\t\t\t\t\t\tfourth_line.append('-|')\n\t\t\tif i[2] != '-' and co_rover[2] != '-':\n\t\t\t\tif int(i[2]) == int(co_rover[2]):\n\t\t\t\t\tfourth_line.append(' |')\n\t\t\t\telif int(i[2]) < int(co_rover[2]):\n\t\t\t\t\tif int(co_rover[2]) == int(i[1]):\n\t\t\t\t\t\tfourth_line.append(\"'\\'|\")\n\t\t\t\t\telif int(co_rover[2]) > int(i[1]):\n\t\t\t\t\t\tfourth_line.append('-|')\n\t\t\t\telif int(i[2]) > int(co_rover[2]):\n\t\t\t\t\tif int(i[2]) == int(co_rover[1]):\n\t\t\t\t\t\tfourth_line.append(\"/|\")\n\t\t\t\t\telif int(i[2]) > int(co_rover[1]):\n\t\t\t\t\t\tfourth_line.append(\"+|\")\n\t\n\t\tfor i in fifth_lineco:\n\t\t\tif i[2] == '-' and co_rover[2] == '-':\n\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\tfifth_line.append(' |')\n\t\t\t\telif int(i[1]) < int(co_rover[1]):\n\t\t\t\t\tfifth_line.append(\"-|\")\n\t\t\t\telse:\n\t\t\t\t\tfifth_line.append('+|')\n\t\t\tif i[2] == '-' and co_rover[2] != '-':\n\t\t\t\tif int(co_rover[2]) == int(i[1]):\n\t\t\t\t\tfifth_line.append(' |')\n\t\t\t\telif int(co_rover[2]) > int(i[1]):\n\t\t\t\t\tfifth_line.append(\"-|\")\n\t\t\t\telse:\n\t\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\t\tfifth_line.append(' |')\n\t\t\t\t\t\n\t\t\t\t\telif int(i[1]) > int(co_rover[1]):\n\t\t\t\t\t\tfifth_line.append('+|')\n\t\t\tif i[2] != '-' and co_rover[2] == '-':\n\t\t\t\tif int(co_rover[1]) == int(i[2]):\n\t\t\t\t\tfifth_line.append('/|')\n\t\t\t\telif int(co_rover[1]) < int(i[2]):\n\t\t\t\t\tfifth_line.append(\"+|\")\n\t\t\t\telse:\n\t\t\t\t\tif int(i[1]) == int(co_rover[1]):\n\t\t\t\t\t\tfifth_line.append(\"'\\'|\")\n\t\t\t\t\t\n\t\t\t\t\telif int(i[1]) < int(co_rover[1]):\n\t\t\t\t\t\tfifth_line.append('-|')\n\t\t\tif i[2] != '-' and co_rover[2] != '-':\n\t\t\t\tif int(i[2]) == int(co_rover[2]):\n\t\t\t\t\tfifth_line.append(' |')\n\t\t\t\telif int(i[2]) < int(co_rover[2]):\n\t\t\t\t\tif int(co_rover[2]) == int(i[1]):\n\t\t\t\t\t\tfifth_line.append(\"'\\'|\")\n\t\t\t\t\telif int(co_rover[2]) > int(i[1]):\n\t\t\t\t\t\tfifth_line.append('-|')\n\t\t\t\telif int(i[2]) > int(co_rover[2]):\n\t\t\t\t\tif int(i[2]) == int(co_rover[1]):\n\t\t\t\t\t\tfifth_line.append(\"/|\")\n\t\t\t\t\telif int(i[2]) > int(co_rover[1]):\n\t\t\t\t\t\tfifth_line.append(\"+|\")\n\t\tthird_line2 = []\n\t\n\t\tfor n, i in enumerate(third_line):\n\t\t\tif n == 3:\n\t\t\t\ta = \"H|\"\n\t\t\t\t \n\t\t\t\tthird_line2.append(a)\n\t\t\telse:\n\t\t\t\tthird_line2.append(i)\n\t\tnumber1_line = \"\\n{}\\n{}\\n{}\\n{}\\n{}\\n\".format(\"\".join(fifth_line), \"\".join(fourth_line), \"\".join(third_line2),\"\".join(sec_line) , \"\".join(first_line))\n\t\t\n\t\treturn number1_line\n\n\n\n\n\t\tpass",
"def fit():\n pass",
"def view_elevation(self):\n raise ChildProcessError('view elevation method not set')",
"def _elevation(self, node):\n return self.graph_provider.get_coords(node)['z']",
"def elevation(self):\n return self.altitude - self.heightAboveGround",
"def get_offset_model(species_diff_table):\n m_init = modeling.polynomial.Polynomial2D(2)\n fit = modeling.fitting.LevMarLSQFitter()\n xx, yy = species_diff_table['vu'], species_diff_table['Ju']\n zz = species_diff_table['Splat-Barton']\n model_fit = fit(m_init, xx, yy, zz)\n return model_fit",
"def get_elevation(Address):\n loc = get_location_gecode_address_str(Address)\n lat, lng = loc['Latitude']['Value'], loc['Longitude']['Value']\n loc['Elevation'] = {'Value': None}\n if lat is not None and lng is not None:\n elev = gmaps.elevation((lat, lng))\n loc['Elevation']['Value'] = elev[0]['elevation']\n return loc",
"def elevation(self, elevation):\n\n self.container['elevation'] = elevation",
"def fit_z_defocus(data, z, w0, c, d, A, B):\n params = [w0, c, d, A, B]\n return fit_function_LS(data, params, z, z_defocus)",
"def extent(self):\n rx0 = gxapi.float_ref()\n ry0 = gxapi.float_ref()\n rz0 = gxapi.float_ref()\n rx1 = gxapi.float_ref()\n ry1 = gxapi.float_ref()\n rz1 = gxapi.float_ref()\n self.gxvox.get_area(rx0, ry0, rz0, rx1, ry1, rz1)\n if self.is_depth:\n return gxgm.Point2(((rx0.value, ry0.value, -rz1.value), (rx1.value, ry1.value, -rz0.value)))\n return gxgm.Point2(((rx0.value, ry0.value, rz0.value), (rx1.value, ry1.value, rz1.value)),\n self.coordinate_system)",
"def fit(self):\n\n fitdata = np.polyfit(self.v**(-2./3.), self.e, 3, full=True)\n ssr = fitdata[1]\n sst = np.sum((self.e - np.average(self.e))**2.)\n residuals0 = ssr/sst\n deriv0 = np.poly1d(fitdata[0])\n deriv1 = np.polyder(deriv0, 1)\n deriv2 = np.polyder(deriv1, 1)\n deriv3 = np.polyder(deriv2, 1)\n\n self.v0 = None\n for x in np.roots(deriv1):\n if x > 0 and deriv2(x) > 0:\n self.v0 = x**(-3./2.)\n break\n\n if self.v0 is None:\n raise ValueError('No minimum!')\n\n derivV2 = 4./9. * x**5. * deriv2(x)\n derivV3 = (-20./9. * x**(13./2.) * deriv2(x) -\n 8./27. * x**(15./2.) * deriv3(x))\n bulk_modulus0 = derivV2 / x**(3./2.)\n bulk_deriv0 = -1 - x**(-3./2.) * derivV3 / derivV2\n\n self.e0 = deriv0(x)\n self.B0 = bulk_modulus0\n self.B1 = bulk_deriv0\n\n return self.v0, self.e0, self.B0, self.B1, residuals0",
"def getSlantRangeElevation(self, groundRange, el):\r\n \r\n lat = self.ctrLat * pi / 180.0\r\n theta = el * pi / 180.0\r\n \r\n #figure out earth's radius at radar's lat ... non-spherical earth model\r\n e2 = self.eccen # First eccentricity squared - WGS-84 value = 0.00669437999013\r\n a = self.Requator # Equatorial radius - WGS-84 value = 6378137.0\r\n Rearth = a/sqrt(1-e2*(sin(lat))**2) # radius of curvature\r\n \r\n # Inverse of eq. 2.28b in Doviak and Zrnic 1993\r\n # Inverse of eq. 2.28c in Doviak and Zrnic 1993\r\n\r\n Rprime = self.effectiveRadiusMultiplier * self.Requator\r\n\r\n s = array(groundRange, dtype='float64')\r\n\r\n h = Rprime * ( math.cos(theta) / math.cos( theta + s / Rprime) - 1)\r\n\r\n r = (Rprime + h) * math.sin(s / Rprime) / math.cos(theta);\r\n\r\n # Use law of cosines (Side-Angle-Side triangle theorem) with \r\n # R', R'+h as sides and s/R' as the angle to get slant range\r\n #r = sqrt(Rprime**2.0 + (Rprime+h)**2.0 - 2*(Rprime+h)*Rprime*cos(s/Rprime))\r\n # Will return NaN for r=0\r\n #el = arccos((Rprime+h) * sin(s/Rprime) / r) \r\n #el *= 180.0 / pi\r\n \r\n return r,h"
] | [
"0.6175527",
"0.5854759",
"0.57457453",
"0.5709625",
"0.5693753",
"0.5398284",
"0.53812927",
"0.53534794",
"0.53487504",
"0.5308275",
"0.52831",
"0.52750176",
"0.52045095",
"0.520028",
"0.5186781",
"0.51401824",
"0.51340437",
"0.51340437",
"0.5090348",
"0.5071652",
"0.50671566",
"0.50627226",
"0.50535834",
"0.5052634",
"0.5050554",
"0.50320613",
"0.5017769",
"0.49695975",
"0.4928513",
"0.48873752"
] | 0.7041972 | 0 |
Return a string representation of a given parameter. | def get_parameter_string(self, parameter):
if not self.has_converged or self.parameters is None:
return None
if parameter not in self.parameters:
return None
fmt = self.get_parameter_format(parameter)
unit = self.get_parameter_unit(parameter)
value = fmt % self.parameters[parameter]
error = self.errors[parameter]
if np.isfinite(error):
error = fmt % error
else:
error = None
s = f"{parameter} = {value}"
if error is not None:
s += f' +/- {error}'
if unit is not None:
s += f' {unit}'
return s | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __str__(self):\n\n return \"<ExoParameter>: {0}\".format(self.__dict__)",
"def __repr_parameter__(self, name: str, value: Any) -> str:\n return f\"{name}={value!r}\"",
"def format_parameter(param, required):\n\n param_string = check_param(flatten_param(param))\n if not required:\n param_string += '=None'\n return param_string",
"def __str__(self):\n return self.parameters.__str__()",
"def param_str(self, pnames=None):\n l = self.get_params(pnames)\n s = \"\"\n for p in l:\n s += \"%s : %s\\n\" % (p.public_name, p.tostr(self))\n return s",
"def _encode_runtime_parameter(param: data_types.RuntimeParameter) -> str:\n if param.ptype is int:\n type_enum = pipeline_pb2.RuntimeParameter.INT\n elif param.ptype is float:\n type_enum = pipeline_pb2.RuntimeParameter.DOUBLE\n else:\n type_enum = pipeline_pb2.RuntimeParameter.STRING\n type_str = pipeline_pb2.RuntimeParameter.Type.Name(type_enum)\n return f'{param.name}={type_str}:{str(dsl.PipelineParam(name=param.name))}'",
"def __str__(self):\r\n res = [self.Name + ' parameters:']\r\n for t in self._tracked_properties:\r\n res.append(t + ':' + str(getattr(self, t)))\r\n for k, v in sorted(self.Params.items()):\r\n res.append(str(k) + ':' + str(v))\r\n return '\\n'.join(res)",
"def get_param_as_string(self):\n\t\treturn call_sdk_function('PrlResult_GetParamAsString', self.handle)",
"def __parameters_string(self):\n if self._parameters == list():\n return ''\n\n docstring = \"\"\"\n\nParameters:\n\"\"\"\n \n # Compute maximum length of any parameter name\n maxlen = 0\n for param in self._parameters:\n maxlen = max(maxlen, len(param[0]))\n\n # Build documentation for parameters\n for (on_param, param) in enumerate(self._parameters):\n if on_param > 0:\n docstring += '\\n'\n\n docstring += ' ' + param[0].ljust(maxlen + 2)\n doc = wrap(param[1], columns - maxlen - 4)\n padding = str('')\n for line in doc.split('\\n'):\n docstring += padding + line + '\\n'\n padding = str('').ljust(maxlen + 4)\n \n # Pull off the final '\\n'\n return docstring[0:len(docstring)-1]",
"def __repr__(self):\n s = self.name\n if self.param != \"None\":\n s += ' with parameter '+self.param\n s += '; '+self.applyTo\n if self.applyTo != \"global\":\n s += ': '+self.conditions\n return s",
"def __repr__(self):\n return \"<katpoint.Parameter %s = %s %s at 0x%x>\" % \\\n (self.name, self.value_str, self.units, id(self))",
"def __str__(self):\n # defaults to the class name\n if self.p is None:\n return self.__class__.__name__\n\n # class name and parameter values\n temp = [str(i) for i in self.p]\n return self.__class__.__name__+'('+', '.join(temp)+')'",
"def __str__(self):\n num_active = len([p for p in self if p])\n summary = \"%s has %d parameters with %d active (non-default)\" % \\\n (self.__class__.__name__, len(self), num_active)\n if num_active == 0:\n return summary\n return summary + ':\\n' + '\\n'.join(('%s = %s %s (%s)' % ps)\n for ps in self.param_strs())",
"def param_dict_to_str(data):\n if data is None or not data:\n return \"\"\n pairs = []\n for key, val in data.items():\n if isinstance(val, (list, tuple, set)) or is_numpy_1d_array(val):\n pairs.append(str(key) + '=' + ','.join(map(str, val)))\n elif isinstance(val, string_type) or isinstance(val, numeric_types) or is_numeric(val):\n pairs.append(str(key) + '=' + str(val))\n elif val is not None:\n raise TypeError('Unknown type of parameter:%s, got:%s'\n % (key, type(val).__name__))\n return ' '.join(pairs)",
"def parameter_symbol(self) -> str:\n return self._parameter_symbol",
"def param2str(val):\n if isinstance(val, dict):\n try:\n return json.dumps(val)\n except TypeError:\n s = str(val)\n print(\"[WARNING] cannot convert value ('%s') to a string with json.dumps\" % s)\n\n return str(val)",
"def __make_description(self, param_name):\n value = self._status.get_value(param_name)\n if round(value) != value:\n # Parameter is a float. Limit to three decimals.\n value = \"%.3f\" % (value)\n\n return \"%s (%s)\" % (param_name, str(value))",
"def __str__(self):\n return self.params",
"def __str__(self):\n return self.fmt.format(*self.args, **self.kwargs)",
"def getString(self):\n string = self.itemType.find('format').text.strip()\n paramString = string[string.find('('):]\n string = string[:string.find('(')]\n for i in self.params.keys():\n paramString = paramString.replace(i,str(self.params[i]) if isFloat(str(self.params[i])) else '\"'+str(self.params[i]).replace('\"','\\\\\"')+'\"',1)\n return string+paramString",
"def __make_description(self, param_name):\n value = self._params.get_value(param_name)\n return \"%s (Currently %s)\" % (param_name, str(value))",
"def call_str(pvs):\n s = \"'{}', '{}'\".format(pvs.get('place'), pvs.get('stat_var'))\n if pvs.get('measurement_method'):\n s += \", measurement_method='{}'\".format(\n pvs.get('measurement_method'))\n if pvs.get('observation_period'):\n s += \", observation_period='{}'\".format(\n pvs.get('observation_period'))\n if pvs.get('unit'):\n s += \", unit='{}'\".format(pvs.get('unit'))\n if pvs.get('scaling_factor'):\n s += \", scaling_factor={}\".format(pvs.get('scaling_factor'))\n return s",
"def format_parameter_value(self, param_config, precision):\n # type: (Dict[str, Any], int) -> str\n return \"\"",
"def getParamString(paramName, arrayIndex, paramValue):\n\n printGauge = False\n spec1 = \"{:6}\"\n spec2 = \"{:5}\"\n spec3 = \"{:>15.6E}\"\n\n formatSpecParam = ('IFORMT', 'IFORMY')\n\n if paramName in formatSpecParam:\n fullStr = \" \" + spec1.format(paramName) + '\\n' + \" \" + paramValue\n\n else:\n fullStr = \" \" + \\\n spec1.format(paramName) + spec2.format(arrayIndex) + \\\n spec3.format(paramValue)\n\n # if printGauge == True:\n # print(\"12345612345123456789012345\")\n\n return fullStr + '\\r\\n'",
"def __repr__(self):\n name = self.__class__.__name__\n # values = \", \".join(\"{}={}\".format(k, repr(v)) for k, v in sorted(self.__dict__.items())\n # if k[0] != \"_\" and not k.endswith('manager'))\n values = \", \".join(\"{}={}\".format(k, v) for k, v in self.parameters.items())\n return \"{}({})\".format(name, values)",
"def __str__(self):\n return \"{}: {} params, wires {}\".format(self.name, len(self.params), self.wires)",
"def params_to_arg_string(**params):\n\targs = params_to_args(**params)\n\treturn ' '.join(args)",
"def _parameter_summary(self, parameters, parameters_to_show=4):\n params = parameters\n if len(parameters) > parameters_to_show:\n params = parameters[:2] + [\"...\"] + parameters[-2:]\n return \", \".join(params)",
"def to_string(self, name, value):\r\n \r\n return str(value)",
"def _format_parameter_output(self, parameters: dict) -> str:\n \n output = ''\n for key, value in parameters.items():\n output = output + '\\t\\t' + str(key) + ': ' + str(value) + '\\n'\n \n return output"
] | [
"0.7343561",
"0.71605086",
"0.710844",
"0.69979465",
"0.69655436",
"0.6828032",
"0.67813796",
"0.6732115",
"0.67217475",
"0.6646251",
"0.66266364",
"0.65682906",
"0.656694",
"0.6539286",
"0.639672",
"0.63439494",
"0.6307336",
"0.62920564",
"0.628318",
"0.6257743",
"0.62439185",
"0.6228198",
"0.620004",
"0.6198259",
"0.61840844",
"0.6146355",
"0.61390775",
"0.6124975",
"0.6114093",
"0.6112635"
] | 0.7855388 | 0 |
Return the string format for a given parameter. | def get_parameter_format(cls, parameter_name):
formats = {
'tau': '%.3f',
'tsky': '%.1f',
'kelvin': '%.3e'
}
return formats.get(parameter_name, '%.3e') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_parameter_string(self, parameter):\n if not self.has_converged or self.parameters is None:\n return None\n if parameter not in self.parameters:\n return None\n\n fmt = self.get_parameter_format(parameter)\n unit = self.get_parameter_unit(parameter)\n value = fmt % self.parameters[parameter]\n\n error = self.errors[parameter]\n if np.isfinite(error):\n error = fmt % error\n else:\n error = None\n\n s = f\"{parameter} = {value}\"\n if error is not None:\n s += f' +/- {error}'\n if unit is not None:\n s += f' {unit}'\n\n return s",
"def format_parameter(param, required):\n\n param_string = check_param(flatten_param(param))\n if not required:\n param_string += '=None'\n return param_string",
"def format(self) -> str:",
"def format(self):\n groups = [g + \".\" for g in self.groups]\n params = [\";\" + p.format() for p in self.params]\n groups_name_params = \"\".join(groups) + self.name + \"\".join(params)\n return groups_name_params + \":\" + self.format_value() + CRLF",
"def format(self):\n return self.getparam(\"FORMAT\")",
"def format(self):\n return self.getparam(\"FORMAT\")",
"def __str__(self):\n return self.fmt.format(*self.args, **self.kwargs)",
"def FormatParamType(self, param):\n return self.ToPpapiType(param.type_, optional=param.optional)",
"def format(self, *args, **kwargs) -> String:\n pass",
"def format(self) -> str:\n return pulumi.get(self, \"format\")",
"def __format__(self, fmt):\n if not isinstance(fmt, str):\n raise TypeError(\"must be str, not %s\" % type(fmt).__name__)\n if len(fmt) != 0:\n return self.strftime(fmt)\n return str(self)",
"def format(value, arg):\n try:\n if value is not None:\n # return (str(arg)) % value\n return (str(value)) % arg\n else:\n return \"\"\n except (ValueError, TypeError):\n return \"\"",
"def _param_marker(self):\n style = getattr(self, 'paramstyle', 'pyformat')\n\n if style == 'qmark':\n return '?'\n elif style == 'numeric':\n return ':1'\n elif style in ['format', 'pyformat']:\n return '%s'\n raise UnknownParamstyle, style",
"def __make_description(self, param_name):\n value = self._status.get_value(param_name)\n if round(value) != value:\n # Parameter is a float. Limit to three decimals.\n value = \"%.3f\" % (value)\n\n return \"%s (%s)\" % (param_name, str(value))",
"def getParamString(paramName, arrayIndex, paramValue):\n\n printGauge = False\n spec1 = \"{:6}\"\n spec2 = \"{:5}\"\n spec3 = \"{:>15.6E}\"\n\n formatSpecParam = ('IFORMT', 'IFORMY')\n\n if paramName in formatSpecParam:\n fullStr = \" \" + spec1.format(paramName) + '\\n' + \" \" + paramValue\n\n else:\n fullStr = \" \" + \\\n spec1.format(paramName) + spec2.format(arrayIndex) + \\\n spec3.format(paramValue)\n\n # if printGauge == True:\n # print(\"12345612345123456789012345\")\n\n return fullStr + '\\r\\n'",
"def format(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"format\")",
"def getString(self):\n string = self.itemType.find('format').text.strip()\n paramString = string[string.find('('):]\n string = string[:string.find('(')]\n for i in self.params.keys():\n paramString = paramString.replace(i,str(self.params[i]) if isFloat(str(self.params[i])) else '\"'+str(self.params[i]).replace('\"','\\\\\"')+'\"',1)\n return string+paramString",
"def _params_formatter(field, description):\n heads = ['param']\n types = _or_types(field)\n if types:\n heads.append(types)\n heads.append(rst.escape(field['name']))\n tail = description\n return heads, tail",
"def format(self) -> str:\n return self._format",
"def format(self) -> str:\n return self._format",
"def render_param(self, format):\n\t\tdef renderer(ctx, data):\n\t\t\tparName = ctx.tag.children[0].strip()\n\t\t\tctx.tag.clear()\n\t\t\ttry:\n\t\t\t\tval = data.getParam(parName)\n\t\t\t\tif val is None:\n\t\t\t\t\treturn ctx.tag[\"N/A\"]\n\n\t\t\t\treturn ctx.tag[format%val]\n\t\t\texcept base.NotFoundError:\n\t\t\t\treturn ctx.tag[\"N/A\"]\n\t\treturn renderer",
"def _params_formatter(field):\n heads = ['param']\n types = _or_types(field)\n if types:\n heads.append(types)\n heads.append(field['name'])\n tail = field.get('description', '')\n return heads, tail",
"def format_parameter_value(self, param_config, precision):\n # type: (Dict[str, Any], int) -> str\n return \"\"",
"def format(self) -> Optional[pulumi.Input['FlowLogFormatParametersArgs']]:\n return pulumi.get(self, \"format\")",
"def format(cls, value: Optional[T]) -> str:\n return str(value)",
"def get_str(self, item: str, fmt: str = \"{}\") -> str:\n return fmt.format(self[item])",
"def formatted(self) -> str:\r\n ...",
"def _get_fmt_string(self):\n fmt = '>4s'\n for datatype in self.message_datatypes:\n if datatype in self.VALID_DATAYPES:\n if datatype == 'int':\n fmt += 'I'\n if datatype == 'float':\n fmt += 'f'\n if datatype == 'double':\n fmt += 'd'\n if datatype == 'char':\n fmt += 'c'\n if datatype == 'string':\n fmt += str(self.max_str_len)+'s'\n if datatype == 'bool':\n fmt += 'b'\n\n return fmt",
"def _format_parameter_output(self, parameters: dict) -> str:\n \n output = ''\n for key, value in parameters.items():\n output = output + '\\t\\t' + str(key) + ': ' + str(value) + '\\n'\n \n return output",
"def __gen_fmt_str__(self, fmt):\n return '=' + (self.num_pts_recv * (fmt + ' '))"
] | [
"0.7667087",
"0.72878444",
"0.71903205",
"0.68664265",
"0.68105346",
"0.68105346",
"0.68084127",
"0.6786722",
"0.6781522",
"0.67550427",
"0.6714451",
"0.6662671",
"0.6652037",
"0.6647457",
"0.6626901",
"0.6590343",
"0.6559082",
"0.6547835",
"0.6534224",
"0.6534224",
"0.64576155",
"0.64305395",
"0.64258534",
"0.6405606",
"0.6364796",
"0.6363439",
"0.63345504",
"0.6267047",
"0.6254601",
"0.6142242"
] | 0.74919534 | 1 |
Return the parameter unit for the given parameter. | def get_parameter_unit(self, parameter_name):
parameter_units = {
'tsky': units.Unit("Kelvin"),
'kelvin': self.data_unit
}
return parameter_units.get(parameter_name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def unit(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"unit\")",
"def get_unit(self):\n return self.unit",
"def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")",
"def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")",
"def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")",
"def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")",
"def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")",
"def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")",
"def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")",
"def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")",
"def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")",
"def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")",
"def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")",
"def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")",
"def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")",
"def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")",
"def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")",
"def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")",
"def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")",
"def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")",
"def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")",
"def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")",
"def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")",
"def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")",
"def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")",
"def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")",
"def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")",
"def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")",
"def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")",
"def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")"
] | [
"0.73394704",
"0.7300321",
"0.7261834",
"0.7261834",
"0.7261834",
"0.7261834",
"0.7261834",
"0.7261834",
"0.7261834",
"0.7261834",
"0.7261834",
"0.7261834",
"0.7261834",
"0.7261834",
"0.7261834",
"0.7261834",
"0.7261834",
"0.7261834",
"0.7261834",
"0.7261834",
"0.7261834",
"0.7261834",
"0.7261834",
"0.7261834",
"0.7261834",
"0.7261834",
"0.7261834",
"0.7261834",
"0.7261834",
"0.7261834"
] | 0.8258069 | 0 |
Return a string representation of the sky dip fit. Returns str | def __str__(self):
if not self.has_converged or self.parameters is None:
log.warning("The fit has not converged. Try again!")
return ''
result = []
for parameter in self.parameters.keys():
if parameter in self.fit_for:
parameter_string = self.get_parameter_string(parameter)
if parameter_string is not None:
result.append(parameter_string)
rms = self.get_parameter_format('kelvin') % self.rms
result.append(f"[{rms} K rms]")
return '\n'.join(result) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __str__(self):\n #Get an ordered list of the elements strings so it outputs always the same\n #string given a mass function.\n elements = []\n for element in self.focals:\n elements.append((element, str(element)))\n sortedList = sorted(elements, key=lambda x:x[1])\n \n result = \"\"\n first = True\n for t in sortedList:\n if first:\n result += t[1] + \":\" + \"{:.4f}\".format(self.focals[t[0]])\n first = False\n else:\n result += \", \" + t[1] + \":\" + \"{:.4f}\".format(self.focals[t[0]])\n return \"{\" + result + \"}\"",
"def __str__(self):\n\t\n\t\tresult = \"\"\n\t\tresult += \"Torsional Spring Specs: \\n\"\n\t\tresult += \"Shape Eq. Slope: {0}\\n\".format(str(self.shape_slope))\n\t\tresult += \"Z Thickness: {0}\\n\".format(str(self.z_thick))\n\t\tresult += \"In-Plane Thickness: {0}\\n\".format(str(self.thick))\n\t\tresult += \"Spiral Length: {0}\\n\".format(str(self.length))\n\n\t\treturn result",
"def __str__(self):\n temp = 'Fourier'\n if self.dagger:\n temp += '.H'\n return temp",
"def __str__(self):\n\n strme = \"fed method {} {} {} {}\"\\\n .format(ExpandedEnsemble.key, self.eta0, self.c_upd, self.n_upd)\n if self.smooth:\n strme = \"{!s} {!s}\".format(strme, self.smooth)\n\n return strme",
"def _repr_(self):\n return \"Projective hypersurface defined by %s in %s\"%(\n self.defining_polynomial(), self.ambient_space())",
"def __str__(self) -> str:\n return (\n f'[{self.x:g} {self.y:g} {self.z:g} '\n f'{self.offset:g}] {self.scale:g}'\n )",
"def __str__(self):\n\n strme = \"fed method {} {} {} {}\"\\\n .format(WangLandau.key, self.delta0, self.c_upd, self.n_upd)\n if self.smooth:\n strme = \"{} {}\".format(strme, self.smooth)\n\n return strme",
"def output(self):\n to_write = 'P '\n to_write += str(self.def_field['count'])+' '\n to_write += str(self.def_field['part'])+' '\n to_write += str(self.def_field['dmg'])+' '\n to_write += str(self.def_field['pen'])+' '\n for xpos, ypos in self.def_field['XY_poly']:\n to_write += str(self.offset[0] + xpos) + ' ' \\\n + str(self.offset[1] + ypos) + ' '\n to_write += str(self.def_field['fill'])\n to_write += '\\n'\n return to_write",
"def __str__(self) -> str:\n if self.scalar_vector:\n return f\"({self.w:-.4f} {self.x:+.4f}i {self.y:+.4f}j {self.z:+.4f}k)\"\n return f\"({self.x:-.4f}i {self.y:+.4f}j {self.z:+.4f}k {self.w:+.4f})\"",
"def __str__(self):\n s = ''\n for i, (k, v) in enumerate(self.meters.items()):\n if i > 0:\n s += ' '\n s += k + ' ' + str(v)\n return s",
"def _repr_(self):\n return \"Affine hypersurface defined by %s in %s\"%(\n self.defining_polynomial(), self.ambient_space())",
"def __str__(self):\n result=\"curv %f d0 %f z0 %f ctheta %f phi %f barcode %d\"%(self.curv,self.d0,self.z0,self.ctheta,self.phi,self.barcode)\n return result",
"def __str__(self) -> str:\n return (\n f\"GlacierFlowModel '{self.model_name}' \"\n f\"{'' if self.steady_state else 'not '}in steady state with:\"\n f\"\\n - m: {self.m:20.5f} [m/m]\"\n f\"\\n - ela: {self.ela:20.2f} [m MSL]\"\n f\"\\n - resolution: {self.res:20.2f} [m]\"\n f\"\\n - extent: min max\"\n f\"\\n {self.extent[0]:10.1f} \"\n f\"{self.extent[1]:10.1f} [x]\"\n f\"\\n {self.extent[2]:10.1f} \"\n f\"{self.extent[3]:10.1f} [y]\"\n )",
"def prescription(self):\n prescription = \"\\n{0:>10}\\t{1:>10}\\t{2:>10}\\t{3:>10}\\n\".format(\"R\",\"Material\",\"d\",\"diameter\")\n for surface in self.lensSurfaces():\n prescription += \"{0:>10.2f}\\t{1:>10}\\t{2:>10.2f}\\t{3:>10.2f}\\n\".format(surface.R, str(surface.mat), surface.spacing, surface.diameter)\n return prescription",
"def __str__(self):\n out = \"phase polynomial = \\n\"\n out += str(self.poly)\n out += \"\\naffine function = \\n\"\n out += \" (\"\n for row in range(self.num_qubits):\n wrote = False\n for col in range(self.num_qubits):\n if self.linear[row][col] != 0:\n if wrote:\n out += \" + x_\" + str(col)\n else:\n out += \"x_\" + str(col)\n wrote = True\n if self.shift[row] != 0:\n out += \" + 1\"\n if row != self.num_qubits - 1:\n out += \",\"\n out += \")\\n\"\n return out",
"def __str__(self):\r\n s = ''\r\n for i, (k, v) in enumerate(self.meters.items()):\r\n if i > 0:\r\n s += ' '\r\n s += k + ' ' + str(v)\r\n return s",
"def __str__(self):\n return self.fmt('DMY', '.')",
"def __str__(self):\n return self.fmt('DMY', '.')",
"def __str__(self):\n return str((self.code, self.fitness,))",
"def __str__(self):\n s = \"\"\n for i in range(13,25):\n if (self.p1vec[i] > 0):\n s += \"|W{0:02}|\".format(self.p1vec[i])\n elif (self.p2vec[25 - i] > 0):\n s += \"|B{0:02}|\".format(self.p2vec[25 - i])\n else:\n s += \"| |\"\n s += '\\n'\n for i in range(12, 0,-1):\n if (self.p1vec[i] > 0):\n s += \"|W{0:02}|\".format(self.p1vec[i])\n elif (self.p2vec[25 - i] > 0):\n s += \"|B{0:02}|\".format(self.p2vec[25 - i])\n else:\n s += \"| |\"\n return s",
"def __str__(self):\n return self.designation + ' ' +self.winery + ' wine'",
"def __str__(self):\n output = 'Pathogens:\\n'\n for x in self.extant_p:\n output += ' n %s h %f d %f host %s extant\\n' % (x.name, x.height, x.dist, x.host.name)\n for x in self.not_extant_p:\n output += ' n %s h %f d %f host %s not extant\\n' % (x.name, x.height, x.dist, x.host.name)\n for x in self.not_yet_sampled_p:\n output += ' n %s h %f d %f host %s not yet sampled\\n' % (x.name, x.height, x.dist, x.host.name)\n\n output += 'Hosts:\\n'\n for x in self.extant_h:\n output += ' %s %f %f extant\\n' % (x.name, x.height, x.dist)\n for x in self.not_extant_h:\n output += ' %s %f %f not extant\\n' % (x.name, x.height, x.dist)\n for x in self.not_yet_sampled_h:\n output += ' %s %f %f not yet sampled\\n' % (x.name, x.height, x.dist)\n\n return output",
"def __str__(self):\n\n strme = \"fed method {} {} {} {}\"\\\n .format(UmbrellaSampling.key, self.x0, self.kf, self.n_upd)\n\n return strme",
"def output(self):\n to_write = 'S '\n \"\"\"\n print self.def_field\n for key in self.def_field:\n print key,\"=\", self.def_field[key]\n \"\"\"\n to_write += str(self.offset[0] + self.def_field['x1'])+' '\n to_write += str(self.offset[1] + self.def_field['y1'])+' '\n to_write += str(self.offset[0] + self.def_field['x2'])+' '\n to_write += str(self.offset[1] + self.def_field['y2'])+' '\n to_write += str(self.def_field['part'])+' '\n to_write += str(self.def_field['dmg'])+' '\n to_write += str(self.def_field['pen'])+' '\n to_write += self.def_field['fill']+'\\n'\n return to_write",
"def __str__(self):\n s = ''\n for i, (k, v) in enumerate(self.meters.iteritems()):\n if i > 0:\n s += ' '\n s += k + ' ' + str(v)\n return s",
"def snapshot(self):\n text = \"\"\n text += \"{}:\\n{}\\n\".format('chi', np.array2string(self.chi))\n return text",
"def __str__(self):\n s = ' KFData '\n s += 'vector: '+str(self.vec)+', \\t'\n s += 'matrix: '+str(self.cov)+', \\t'\n s += 'zrun: '+str(self.zrun)+',\\t'\n s += 'pars '+str(self.pars)\n return s",
"def __str__(self):\n # TODO also show relative abundance\n s = \"{} ion species\\n\".format(len(self.ions))\n for ion in self.ions:\n s += \" {:2s} (Z = {:3d}) {:.3e} particles\\n\".format(ion.getName(), ion.getCharge(), ion.getParticleNumber())\n \n return s",
"def skydip(scans):\n title = Path(scans[0]).name + \" \".join([Path(scan).name.split(\"_\")[4] for scan in scans[1:]])\n\n signal = []\n std = []\n elevation = []\n\n for scan in scans:\n kd = KissData(scan)\n kd.read_data(list_data=[\"A_masq\", \"I\", \"Q\", \"F_tone\", \"F_tl_Az\", \"F_tl_El\"])\n\n # TODO: Why do we need copy here, seems that numpy strides are making\n # funny things here !\n\n F_tone = 1e3 * kd.F_tone.copy().mean(1)[:, np.newaxis] + kd.continuum\n signal.append(F_tone.mean(1))\n std.append(F_tone.std(1))\n elevation.append(kd.F_tl_El.mean())\n\n signal = np.array(signal)\n std = np.array(std)\n elevation = np.array(elevation)\n detectors = kd.list_detector\n\n # rearrange signal to be coherent with the fit ?\n signal_new = 2 * signal[:, 0][:, np.newaxis] - signal\n\n air_mass = 1.0 / np.sin(np.radians(elevation))\n\n def T(\n airm, const, fact, tau_f\n ): # signal definition for skydip model: there is -1 before B to take into account the increasing resonance to lower optical load\n return const + 270.0 * fact * (1.0 - np.exp(-tau_f * airm))\n\n popts = []\n pcovs = []\n for _sig, _std in zip(signal_new.T, std.T):\n P0 = (4e8, 1e8, 1.0)\n popt, pcov = curve_fit(T, air_mass, _sig, sigma=_sig, p0=P0, maxfev=100000)\n\n popts.append(popt)\n pcovs.append(pcovs)\n\n popts = np.array(popts)\n\n ndet = popts.shape[0]\n fig_skydip_fit, axes = plt.subplots(\n np.int(np.sqrt(ndet)), np.int(ndet / np.sqrt(ndet)), sharex=True\n ) # , sharey=True)\n for _sig, _std, popt, detector, ax in zip(signal_new.T, std.T, popts, detectors, axes.flatten()):\n ax.errorbar(air_mass, _sig, _std)\n ax.plot(air_mass, T(air_mass, *popt))\n ax.set_title(detector, pad=-15)\n ax.label_outer()\n\n fig_skydip_fit.suptitle(title)\n fig_skydip_fit.tight_layout()\n fig_skydip_fit.subplots_adjust(wspace=0, hspace=0)\n\n Ao, Bo, tau = popts.T\n\n fig_skydip_stat, axes = plt.subplots(1, 3)\n for (item, value), ax in zip({r\"$A_0$\": Ao, r\"$B_0$\": Bo, \"tau\": tau}.items(), axes):\n mean_value = np.nanmedian(value)\n std_value = mad_std(value, ignore_nan=True)\n range_value = np.array([-3, 3]) * std_value + mean_value\n ax.hist(value, range=range_value)\n ax.set_xlabel(item)\n fig_skydip_stat.suptitle(title)\n\n return fig_skydip_fit, fig_skydip_stat",
"def to_string(self):\n if self.is_power_onoff():\n return 'Power On/Off'\n else:\n gain = str(hex(int(self['gain_speed'])))\n out = self['target'].ljust(20) + ' ' + self['filters'].ljust(11) + ' ' + self['x_bin'] + 'x' + self['y_bin'] + ' ' + gain[2:].upper()\n \n \n if self.number_windows() > 0:\n out += ' ' + self['x1_size'].rjust(4) + 'x' + self['y1_size'].ljust(4) + ' ' + self['x1_start'].ljust(3) + ' ' + self['y1_start'].ljust(4)\n if self.number_windows() > 1:\n out += ' ' + self['x2_size'].rjust(4) + 'x' + self['y2_size'].ljust(4) + ' ' + self['x2_start'].ljust(3) + ' ' + self['y2_start'].ljust(4)\n \n if 'Comment' in self:\n out += ' ' + self['Comment']\n return out"
] | [
"0.63235986",
"0.5925334",
"0.5918602",
"0.5917366",
"0.59062576",
"0.5836863",
"0.58009154",
"0.578009",
"0.5776318",
"0.57657",
"0.5755946",
"0.574386",
"0.5697488",
"0.569668",
"0.5695591",
"0.5693888",
"0.56415474",
"0.56415474",
"0.5632635",
"0.5627902",
"0.5618648",
"0.56166846",
"0.56146425",
"0.5612461",
"0.5599517",
"0.55953306",
"0.5589314",
"0.5563817",
"0.5563692",
"0.5553248"
] | 0.6171846 | 1 |
Calibrated linear classifier binary model. This model uses a piecewise linear calibration function on each of the real (as opposed to binary) inputs (parametrized) and then combines (sum up) the results. Optionally calibration can be made monotonic. It usually requires a preprocessing step on the data, to calculate the quantiles of each used feature. This can be done locally or in one worker only before training, in a separate invocation of your program (or directly). Typically this can be save (`save_dir` parameter) to the same directory where the data is. Hyperparameters are given in the form of the object tfl_hparams.CalibrationHParams. It takes in perfeature calibration parameters. Internally values will be converted to tf.float32. | def calibrated_linear_classifier(feature_columns=None,
model_dir=None,
quantiles_dir=None,
keypoints_initializers_fn=None,
optimizer=None,
config=None,
hparams=None):
return _CalibratedLinear(
n_classes=2,
feature_columns=feature_columns,
model_dir=model_dir,
quantiles_dir=quantiles_dir,
keypoints_initializers_fn=keypoints_initializers_fn,
optimizer=optimizer,
config=config,
hparams=hparams) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def train_calibrate_predict(clf, X_t, y_t, X_v, y_v, params, jobs):\n\n # Indicate the classifier and the training set size\n print(\"Training a {} with None...\".format(clf.__class__.__name__))\n\n # Train the classifier\n clf = train_classifier(clf, X_t, y_t, params, jobs)\n\n # # Calibrate classifier\n # print(\"Calibrating probabilities of classifier...\")\n # start = time()\n # clf = CalibratedClassifierCV(best_pipe.named_steps['clf'], cv='prefit', method='isotonic')\n # clf.fit(best_pipe.named_steps['dm_reduce'].transform(X_calibrate), y_calibrate)\n # end = time()\n # print(\"Calibrated {} in {:.1f} minutes\".format(clf.__class__.__name__, (end - start) / 60))\n\n # Print the results of prediction for both training and testing\n train_score = predict_labels(clf, X_t, y_t)\n test_score = predict_labels(clf, X_v, y_v)\n print(\"Score of {} for training set: {:.4f}.\".format(clf.__class__.__name__, train_score))\n print(\"Score of {} for test set: {:.4f}.\".format(clf.__class__.__name__, test_score))\n\n # Return classifier, and score for train and test set\n return clf, train_score, test_score",
"def calibrate_onc(data, path, model_name):\n \n #split test data (subsets 7-9) into new test (7-8)/train(9) sets\n calibration_train_set = data[((data.subset==7)|(data.subset==8))].copy()\n calibration_test_set = data[data.subset==9].copy()\n\n #define calibration model\n ir = IsotonicRegression(out_of_bounds=\"clip\")\n #fit the model to the probas from the training set\n ir.fit(calibration_train_set.score, calibration_train_set.y )\n \n #evaluate with the test set and save\n calibration_test_set.loc[:,'p_calibrated'] = ir.transform(calibration_test_set.score) \n \n #calibration_test_set.loc[:,'p_calibrated'] = p_calibrated\n \n #save\n with open(path + 'model_calibrated_' + model_name + '.pickle', 'wb') as picklefile: \n pickle.dump(ir,picklefile)\n \n with open(path + 'y_calibrated_' + model_name + '.pickle', 'wb') as picklefile: \n pickle.dump(calibration_test_set, picklefile)\n \n print_calibrated_results(calibration_test_set.y, calibration_test_set.score, calibration_test_set.p_calibrated)\n return calibration_test_set",
"def _calibrate(self, Otrain, Ftrain, Feval):\n raise NotImplementedError()",
"def train_calibration(config):\n run_dates = pd.date_range(start=config.start_dates[\"train\"],\n end=config.end_dates[\"train\"],\n freq='1D').strftime(config.run_date_format)\n \n target_calib_models = {}\n print()\n print('Loading Data')\n\n for size_index,size in enumerate(config.size_threshold):\n target_calib_models[size] = {}\n train_files, target_files = [], []\n for date in run_dates: \n train_data_files = glob(config.train_data_path+ \\\n \"20{2}/netcdf/*{0}*unsmoothed*_{1}_*{2}*{3}*{4}.nc\".format(\n config.forecast_model_names,size,date,\n config.start_hour,config.end_hour))\n if len(train_data_files) < 1:\n continue\n if config.sector:\n target_data_files = glob(config.target_data_path+'{0}*{1}*{2}*.nc'.format(\n date,size,config.sector)) \n else:\n target_data_files = glob(config.target_data_path+'{0}*{1}*.nc'.format(\n date,size))\n if len(target_data_files) < 1:\n continue\n train_files.append(train_data_files[0])\n target_files.append(target_data_files[0])\n \n date_indices = [index for index in range(len(train_files))]\n percent_train_indices = int(len(train_files)*0.70)\n t_data = [Dataset(x).variables[\"Data\"][:] for x in train_files] \n tar_data = [Dataset(x).variables[\"24_Hour_All_12z_12z\"][:] for x in target_files] \n print()\n print('Number of files:')\n print('Train (70%): {0}'.format(int(len(t_data)*0.70)))\n print('Validate (30%): {0}'.format(int(len(t_data)*0.30)))\n print()\n for ind,model_name in enumerate(config.calibration_model_names):\n bs = []\n random_models = []\n print('Random Cross-Validation, {0} >{1}mm'.format(model_name,size)) \n random_seed = random.sample(range(1, 100), 10)\n for s,seed in enumerate(random_seed):\n np.random.seed(seed)\n print('Index',s, 'Random Seed', seed)\n train_indices = np.random.choice(date_indices, percent_train_indices, replace=False)\n test_indices = [ind for ind in date_indices if ind not in train_indices]\n \n train_data = np.array(t_data)[train_indices].ravel()\n target_train_data = np.array(tar_data)[train_indices].ravel()\n \n val_data = np.array(t_data)[test_indices].ravel()\n target_val_data = np.array(tar_data)[test_indices].ravel()\n \n model = deepcopy(config.calibration_model_objs[ind])\n model.fit(train_data,target_train_data)\n random_models.append(model)\n \n predict = model.transform(val_data)\n \n #plt.figure(figsize=(9, 6))\n #plt.plot(sorted(val_data),model.transform(sorted(val_data)))\n #plt.xlabel('data')\n #plt.ylabel('calibrated')\n #plt.show()\n #plt.close()\n\n print(brier_score(predict, target_val_data))\n bs.append(brier_score(predict, target_val_data))\n \n best_bs = np.argmin(bs)\n target_calib_models[size][model_name] = np.array(random_models)[best_bs]\n print('Lowest Brier Score: {0}'.format(np.array(bs)[best_bs]))\n print()\n print()\n return target_calib_models",
"def _calibration(\n name: str = CALIBRATION_NAME,\n eval_config: Optional[config_pb2.EvalConfig] = None,\n model_name: str = '',\n output_name: str = '',\n sub_key: Optional[metric_types.SubKey] = None,\n aggregation_type: Optional[metric_types.AggregationType] = None,\n class_weights: Optional[Dict[int, float]] = None,\n example_weighted: bool = False) -> metric_types.MetricComputations:\n key = metric_types.MetricKey(\n name=name,\n model_name=model_name,\n output_name=output_name,\n sub_key=sub_key,\n example_weighted=example_weighted)\n\n # Make sure weighted_labels_predictions_examples are calculated.\n computations = _weighted_labels_predictions_examples(\n eval_config=eval_config,\n model_name=model_name,\n output_name=output_name,\n sub_key=sub_key,\n aggregation_type=aggregation_type,\n class_weights=class_weights,\n example_weighted=example_weighted)\n weighted_labels_predictions_key = computations[-1].keys[-1]\n\n def result(\n metrics: Dict[metric_types.MetricKey, Any]\n ) -> Dict[metric_types.MetricKey, Any]:\n \"\"\"Returns calibration.\"\"\"\n metric = metrics[weighted_labels_predictions_key]\n if np.isclose(metric.total_weighted_labels, 0.0):\n value = float('nan')\n else:\n value = metric.total_weighted_predictions / metric.total_weighted_labels\n\n return {key: value}\n\n derived_computation = metric_types.DerivedMetricComputation(\n keys=[key], result=result)\n computations.append(derived_computation)\n return computations",
"def apply_calib(fix_dir, tprobs, run, hour, exper=1, smooth=1, wd=''):\n\n # Apply smoothing\n calib_probs = gaussian_filter(tprobs, smooth, mode='constant')\n\n # Apply calibration\n run_str = str(run).zfill(2)\n in_dir = f'{fix_dir}/calib_files/{exper}/{run_str}_{hour}.pkl'\n\n with open(in_dir, 'rb') as f:\n corr_data = pickle.load(f, encoding='latin1')\n\n for index in np.ndindex(calib_probs.shape):\n this_forecast = calib_probs * 100\n if this_forecast[index] < 5:\n bin = 0\n elif this_forecast[index] >= 5 and this_forecast[index] < 15:\n bin = 10\n elif this_forecast[index] >= 15 and this_forecast[index] < 25:\n bin = 20\n elif this_forecast[index] >= 25 and this_forecast[index] < 35:\n bin = 30\n elif this_forecast[index] >= 35 and this_forecast[index] < 45:\n bin = 40\n elif this_forecast[index] >= 45 and this_forecast[index] < 55:\n bin = 50\n elif this_forecast[index] >= 55 and this_forecast[index] < 65:\n bin = 60\n elif this_forecast[index] >= 65 and this_forecast[index] < 75:\n bin = 70\n elif this_forecast[index] >= 75 and this_forecast[index] < 85:\n bin = 80\n elif this_forecast[index] >= 85:\n bin = 90\n\n calib_probs[index] = calib_probs[index] + (corr_data[index][bin] / 100.)\n calib_probs = calib_probs.astype(float)\n \n # Set anything less than 0 after calibration to 0\n calib_probs[calib_probs < 0] = 0\n \n return calib_probs",
"def calibrated_linear_regressor(feature_columns=None,\n model_dir=None,\n quantiles_dir=None,\n keypoints_initializers_fn=None,\n optimizer=None,\n config=None,\n hparams=None):\n return _CalibratedLinear(\n n_classes=0,\n feature_columns=feature_columns,\n model_dir=model_dir,\n quantiles_dir=quantiles_dir,\n keypoints_initializers_fn=keypoints_initializers_fn,\n optimizer=optimizer,\n config=config,\n hparams=hparams)",
"def calibrate():\n if os.path.exists('calibration_data.pkl'):\n with open('calibration_data.pkl', 'rb') as f:\n return pickle.load(f)\n\n objp = np.zeros((6 * 9, 3), np.float32)\n objp[:, :2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2)\n\n # Arrays to store object points and image points from all the images.\n objpoints = [] # 3d points in real world space\n imgpoints = [] # 2d points in image plane.\n\n for fname in glob.glob('camera_cal/calibration*.jpg'):\n img = cv2.imread(fname)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # print('{}: {}'.format(fname, gray.shape))\n\n # Find the chessboard corners\n ret, corners = cv2.findChessboardCorners(gray, (9, 6), None)\n\n if ret:\n objpoints.append(objp)\n imgpoints.append(corners)\n else:\n print('Failed to detect corners for {}'.format(fname))\n\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, (1280, 720), None, None)\n assert ret\n\n with open('calibration_data.pkl', 'wb') as f:\n pickle.dump((mtx, dist), f)\n\n return mtx, dist",
"def calibrate(self, Otrain, Ftrain, Feval):\n return Otrain[0] * np.ones(len(Feval))",
"def fit(self, X, y):\n self.unique_class = np.sort(np.unique(y))\n if self.unique_class.shape[0] > 2:\n for i in range(self.unique_class.shape[0] - 1):\n # for each k - 1 ordinal value we fit a binary classifier\n binary_y = (y > self.unique_class[i]).astype(np.uint8)\n clf = clone(self.clf)\n clf.fit(X, binary_y)\n if self.cal_data is not None:\n calib_clf = CalibratedClassifierCV(clf, cv='prefit', method=self.cal_method)\n binary_y_cal = (self.y_cal > self.unique_class[i]).astype(np.uint8)\n calib_clf.fit(self.x_cal, binary_y_cal)\n self.clfs[i] = calib_clf\n else:\n self.clfs[i] = clf",
"def _calibrate_without_loss(self, calib_dataset, calib_batch_size,\n calib_steps):\n # Create quantize calibration model\n if not self._optimized_model:\n logger.error(\n 'Should call `optimize_model()` before `_calibrate_without_loss`.')\n self._qcb_model, self._layer_metadata = self._quantizer.create_quantize_model(\n self._optimized_model,\n candidate_layers=self._candidate_layers,\n layer_metadata=self._layer_metadata,\n quantize_strategy=self._quantize_strategy,\n mode='QCB',\n target=self._target,\n dataset=calib_dataset,\n batch_size=calib_batch_size,\n steps=calib_steps,\n specific_layers=self._specific_layers)\n\n if calib_dataset is not None:\n logger.info(\"Start Quantize Calibration...\")\n collector = self._run_model_with_collector(self._qcb_model, calib_dataset,\n calib_batch_size, calib_steps)\n\n # Create quantize calibration evaluation model\n self._qcbev_model = model_utils.clone_model_with_weights(self._qcb_model)\n model_utils.set_layer_mode(self._qcbev_model, 'QCBEV')\n\n if type(self._quantize_strategy\n ) == vitis_pof2s_quantize_strategy.VitisPof2SQuantizeStrategy:\n # Freeze the quantize info into the model, now using most_common_quantize_info\n # last_quantize_info = collector.get_last_quantize_info()\n common_quantize_info = collector.get_most_common_quantize_info()\n self._freeze_quantize_info(common_quantize_info)\n elif type(self._quantize_strategy) in [\n vitis_fs_quantize_strategy.VitisFSQuantizeStrategy,\n vitis_fsx_quantize_strategy.VitisFSXQuantizeStrategy,\n vitis_gpu_quantize_strategy.VitisGPUQuantizeStrategy\n ]:\n # Freeze the quantize info into the model, now using most_common_quantize_info\n # last_quantize_info = collector.get_last_quantize_info()\n common_quantize_info = collector.get_entropy_percentile_amax(\n self._qcb_model)\n self._freeze_quantize_info(common_quantize_info)\n\n logger.info(\"Quantize Calibration Done.\")",
"def Calibrator(\n data_loader, cache=None, BaseClass=None, batch_size=None, quantile=None, regression_cutoff=None, algo=None\n):\n BaseClass = util.default(BaseClass, trt.IInt8EntropyCalibrator2)\n\n class CalibratorClass(BaseClass):\n \"\"\"\n Calibrator that supplies calibration data to TensorRT to calibrate the network for INT8 inference.\n \"\"\"\n\n def __init__(self):\n # Must explicitly initialize parent for any trampoline class! Will mysteriously segfault without this.\n BaseClass.__init__(self) # type: ignore\n\n self.data_loader = data_loader\n self._cache = cache\n self.device_buffers = OrderedDict()\n self.input_metadata = None\n self.reset()\n G_LOGGER.verbose(f\"Created calibrator [cache={self._cache}]\")\n\n self.batch_size = util.default(batch_size, 1)\n\n self.is_polygraphy_calibrator = True\n # The function that constructed this instance\n self.make_func = Calibrator\n\n def set_input_metadata(self, input_metadata):\n \"\"\"\n Sets the input metadata for the calibrator.\n\n This is passed along to the data loader and is also used for\n input data type and shape checks.\n\n NOTE: This generally does not need to be called manually if the calibrator is being used\n with Polygraphy's loaders, like ``CreateConfig`` or ``EngineFromNetwork``.\n\n Args:\n input_metadata (TensorMetadata):\n Mapping of input names to their data types and shapes.\n Passed along to the data loader if provided. This is required if\n using Polygraphy's included `DataLoader` to provide calibration data,\n or if data type and shape checking is desired.\n \"\"\"\n self.input_metadata = input_metadata\n if input_metadata is not None:\n with contextlib.suppress(AttributeError):\n self.data_loader.input_metadata = input_metadata\n\n def reset(self):\n \"\"\"\n Reset this calibrator for reuse.\n\n The calibrator will clear any dynamic ranges cached from previous calibration runs, and will\n attempt to rewind the data loader (note that generators cannot be rewound).\n\n Typically, this is only required if the same calibrator is used for multiple different networks.\n \"\"\"\n # Attempt to reset data loader\n self.data_loader_iter = iter(self.data_loader)\n self.num_batches = 0\n\n # Make sure calibrator will check the cache again when reset.\n self.cache_contents = None\n\n def get_batch_size(self):\n return self.batch_size\n\n def _get_batch_impl(self, names):\n try:\n buffers = next(self.data_loader_iter)\n except StopIteration:\n if not self.num_batches:\n G_LOGGER.critical(\n \"Calibrator data loader provided no data.\\nPossible reasons for this include:\\n(1) data loader \"\n \"has no data to provide\\n(2) data loader was a generator, and the calibrator is being \"\n \"used multiple times (generators cannot be rewound)\"\n )\n return None\n else:\n self.num_batches += 1\n\n util.check_sequence_contains(\n buffers.keys(),\n names,\n name=\"calibration input data provided by the data loader\",\n items_name=\"inputs\",\n )\n\n def check_buffer(name, buffer):\n if self.input_metadata is None:\n return\n\n expected_dtype, expected_shape = self.input_metadata[name]\n\n err_prefix = \"Received an unexpected input from the data loader during calibration. \"\n if buffer.dtype != expected_dtype:\n G_LOGGER.critical(\n err_prefix\n + f\"For input: '{name}', expected data type: {expected_dtype}, but received: {buffer.dtype}\"\n )\n\n if not util.is_valid_shape_override(buffer.shape, expected_shape):\n G_LOGGER.critical(\n err_prefix\n + f\"For input: '{name}', expected a shape compatible with: {expected_shape}, but received: {buffer.shape}\"\n )\n\n ptrs = []\n for name in names:\n buf = buffers[name]\n\n if isinstance(buf, cuda.DeviceView):\n check_buffer(name, buf)\n ptrs.append(buf.ptr)\n elif isinstance(buf, np.ndarray):\n check_buffer(name, buf)\n if name not in self.device_buffers:\n self.device_buffers[name] = cuda.DeviceArray(shape=buf.shape, dtype=buf.dtype)\n G_LOGGER.verbose(f\"Allocated: {self.device_buffers[name]}\")\n\n self.device_buffers[name].resize(buf.shape)\n buf = util.make_contiguous(buf)\n ptrs.append(self.device_buffers[name].copy_from(buf).ptr)\n elif isinstance(buf, int):\n ptrs.append(buf)\n else:\n G_LOGGER.critical(\n f\"Calibration data loader provided an unrecognized type: {type(buf).__name__} for input: {name}.\"\n \"\\nPlease provide either a NumPy array, Polygraphy DeviceView, or GPU pointer. \"\n )\n\n return ptrs\n\n def get_batch(self, names):\n ptrs = None\n try:\n ptrs = self._get_batch_impl(names)\n except PolygraphyException:\n pass\n if ptrs is None:\n self.free()\n return ptrs\n\n def read_calibration_cache(self):\n def load_from_cache():\n if self._cache is None or not util.get_file_size(self._cache):\n return None\n\n try:\n return util.load_file(self._cache, description=\"calibration cache\")\n except Exception as err:\n G_LOGGER.error(f\"Could not read from calibration cache: {self._cache}\\nNote: Error was: {err}\")\n return None\n\n if self.cache_contents is not None:\n return self.cache_contents\n\n self.cache_contents = load_from_cache()\n\n if not self.cache_contents:\n if self.cache_contents is not None:\n G_LOGGER.warning(\n \"Calibration cache was provided, but is empty. \"\n \"Will regenerate scales by running calibration.\",\n mode=LogMode.ONCE,\n )\n self.cache_contents = None\n\n return self.cache_contents\n\n def write_calibration_cache(self, cache):\n self.cache_contents = cache.tobytes()\n\n if self._cache is None:\n return\n\n try:\n util.save_file(contents=self.cache_contents, dest=self._cache, description=\"calibration cache\")\n except Exception as err:\n G_LOGGER.error(f\"Could not write to calibration cache: {self._cache}.\\nNote: Error was: {err}\")\n\n def free(self):\n \"\"\"\n Frees all device buffers associated with this calibrator\n \"\"\"\n for device_buffer in self.device_buffers.values():\n device_buffer.free()\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.free()\n\n # IInt8LegacyCalibrator methods\n if BaseClass == trt.IInt8LegacyCalibrator:\n\n def get_quantile(self):\n return util.default(quantile, 0.5)\n\n def get_regression_cutoff(self):\n return util.default(regression_cutoff, 0.5)\n\n def read_histogram_cache(self, length):\n pass\n\n def write_histogram_cache(self, ptr, length):\n pass\n\n # IInt8Calibrator methods\n if BaseClass == trt.IInt8Calibrator:\n\n def get_algorithm(self):\n return util.default(algo, trt.CalibrationAlgoType.ENTROPY_CALIBRATION_2)\n\n def __repr__(self):\n return util.make_repr(\n \"Calibrator\",\n data_loader,\n cache=cache,\n BaseClass=BaseClass,\n batch_size=batch_size,\n quantile=quantile,\n regression_cutoff=regression_cutoff,\n algo=algo,\n )[0]\n\n return CalibratorClass()",
"def prob_calibration_function(truthvec, scorevec, reg_param_vec='default', knots='sample',\n method='logistic', force_prob=True, eps=1e-15, max_knots=200,\n transform_fn='none', random_state=942, verbose=False, cv_folds=5,\n unity_prior_weight=1, unity_prior_gridsize=20):\n from sklearn import linear_model\n from sklearn.metrics import log_loss, make_scorer\n\n if (unity_prior_weight>0):\n scorevec_coda, truthvec_coda = create_yeqx_bias_vectors(unity_prior_gridsize)\n coda_wt = unity_prior_weight/unity_prior_gridsize\n weightvec = np.concatenate((np.ones(len(scorevec)), coda_wt * np.ones(len(scorevec_coda))))\n scorevec = np.concatenate((scorevec, scorevec_coda))\n truthvec = np.concatenate((truthvec, truthvec_coda))\n\n if transform_fn != 'none':\n scorevec = transform_fn(scorevec)\n\n knot_vec = np.unique(scorevec)\n if (knots == 'sample'):\n num_unique = len(knot_vec)\n if (num_unique > max_knots):\n smallest_knot, biggest_knot = knot_vec[0], knot_vec[-1]\n inter_knot_vec = knot_vec[1:-1]\n random.seed(random_state)\n random.shuffle(inter_knot_vec)\n reduced_knot_vec = inter_knot_vec[:(max_knots-2)]\n reduced_knot_vec = np.concatenate((reduced_knot_vec, [smallest_knot, biggest_knot]))\n reduced_knot_vec = np.concatenate((reduced_knot_vec, np.linspace(0, 1, 21)))\n if (unity_prior_weight>0):\n reduced_knot_vec = np.concatenate((reduced_knot_vec, scorevec_coda))\n knot_vec = np.unique(reduced_knot_vec)\n if verbose:\n print(\"Originally there were {} knots. Reducing to {} while preserving first and last knot.\".format(num_unique, len(knot_vec)))\n X_mat = _natural_cubic_spline_basis_expansion(scorevec, knot_vec)\n\n if (method == 'logistic'):\n if ((type(reg_param_vec) == str) and (reg_param_vec == 'default')):\n reg_param_vec = 10**np.linspace(-7, 5, 61)\n if verbose:\n print(\"Trying {} values of C between {} and {}\".format(len(reg_param_vec), np.min(reg_param_vec), np.max(reg_param_vec)))\n reg = linear_model.LogisticRegressionCV(Cs=reg_param_vec, cv=StratifiedKFold(cv_folds, shuffle=True),\n scoring=make_scorer(log_loss, needs_proba=True, greater_is_better=False))\n if (unity_prior_weight>0):\n reg.fit(X_mat, truthvec, weightvec)\n else:\n reg.fit(X_mat, truthvec)\n if verbose:\n print(\"Best value found C = {}\".format(reg.C_))\n\n if (method == 'ridge'):\n if ((type(reg_param_vec) == str) and (reg_param_vec == 'default')):\n reg_param_vec = 10**np.linspace(-7, 7, 71)\n if verbose:\n print(\"Trying {} values of alpha between {} and {}\".format(len(reg_param_vec), np.min(reg_param_vec),np.max(reg_param_vec)))\n reg = linear_model.RidgeCV(alphas=reg_param_vec, cv=KFold(cv_folds, shuffle=True), scoring=make_scorer(mean_squared_error_trunc,needs_proba=False, greater_is_better=False))\n reg.fit(X_mat, truthvec)\n if verbose:\n print(\"Best value found alpha = {}\".format(reg.alpha_))\n\n def calibrate_scores(new_scores):\n new_scores = np.maximum(new_scores,knot_vec[0]*np.ones(len(new_scores)))\n new_scores = np.minimum(new_scores,knot_vec[-1]*np.ones(len(new_scores)))\n if transform_fn != 'none':\n new_scores = transform_fn(new_scores)\n basis_exp = _natural_cubic_spline_basis_expansion(new_scores,knot_vec)\n if (method == 'logistic'):\n outvec = reg.predict_proba(basis_exp)[:,1]\n if (method == 'ridge'):\n outvec = reg.predict(basis_exp)\n if force_prob:\n outvec = np.where(outvec < eps, eps, outvec)\n outvec = np.where(outvec > 1-eps, 1-eps, outvec)\n return outvec\n\n return calibrate_scores",
"def test_ebm_calibrated_classifier_cv():\n from sklearn.calibration import CalibratedClassifierCV # type: ignore\n\n X = np.array(\n [\n [0, 1, 0, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 0, 1, 0],\n [1, 0, 0, 0],\n [0, 0, 0, 1],\n ],\n dtype=np.uint8,\n )\n\n y = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1], dtype=np.uint8)\n\n clf = ExplainableBoostingClassifier()\n calib = CalibratedClassifierCV(clf)\n calib.fit(X, y)",
"def calibrate(self, calib_fns, calib_params, analytes=None, drift_correct=False):\n # can have calibration function stored in self and pass *coefs?\n if analytes is None:\n analytes = self.analytes\n\n if 'calibrated' not in self.data.keys():\n self.data['calibrated'] = {}\n\n for a in analytes:\n if drift_correct:\n P = self.drift_params(calib_params, a)\n else:\n P = calib_params[a].values[0]\n\n self.data['calibrated'][a] = \\\n calib_fns[a](P,\n self.data['ratios'][a])\n\n # coefs = calib_params[a]\n # if len(coefs) == 1:\n # self.data['calibrated'][a] = \\\n # self.data['ratios'][a] * coefs\n # else:\n # self.data['calibrated'][a] = \\\n # np.polyval(coefs, self.data['ratios'][a])\n # self.data['ratios'][a] * coefs[0] + coefs[1]\n self.setfocus('calibrated')\n return",
"def calib_raw(self, calib_func=\"kidsdata.kids_calib.get_calfact\", clean_raw=False, **kwargs):\n\n if getattr(self, \"__calib\", None) is None:\n self.__log.debug(\"calibration using {}\".format(calib_func))\n self.__check_attributes([\"I\", \"Q\"], read_missing=False)\n\n fmod = self.fmod\n mod_mask = self.mod_mask\n\n # Check about the 3rd bit and the fix_masq keyword\n if np.any(mod_mask & (1 << 2)) and kwargs.get(\"fix_masq\") is True:\n self.__log.error(\"fix_masq should not be used when 3rd bit is set\")\n\n self.__log.info(\"Calibrating with fmod={} and {}\".format(fmod, kwargs))\n calib_func = _import_from(calib_func)\n self.__calib = calib_func(self.I, self.Q, mod_mask, fmod=fmod, **kwargs)\n\n else:\n self.__log.error(\"calibrated data already present\")\n\n # Expand keys :\n # Does not double memory, but it will not be possible to\n # partially free memory : All attribute read at the same time\n # must be deleted together\n for ckey in self.__calib.keys():\n self.__dict__[ckey] = self.__calib[ckey]\n\n if clean_raw:\n self._clean_data(\"_KidsRawData__dataSd\")",
"def trainAndPredict(self):\r\n print(\"train\")\r\n filename= 'finalized_model.sav'\r\n # train the algorithm on training data and predict using the testing data\r\n model = self.svc_model.fit(self.X.T, self.Y)\r\n pickle.dump(model, open(filename, 'wb'))\r\n #model = pickle.load(open(filename, 'rb'))\r\n pred1 =model.predict(self.TestSet.T)\r\n # print the accuracy score of the model\r\n print(\"LinearSVC accuracy : \", accuracy_score(self.TestSetY, pred1, normalize=True))",
"def learnHyperLinear(X, y, xTest, yTest, penalty, scoring, transformedData,jobs=1):\n #Check if test is empty, if it is, don't refit and predict data\n testAvailable = np.size(xTest,0)!=0\n\n # Parameters selection\n #====================\n cRange = np.logspace(-5,1,3)\n parameters = {'C': cRange}\n\n if penalty=='l1':\n dual=False\n else:\n dual=True\n\n #Creating Model and begin classification\n #=======================================\n classif = svm.LinearSVC(penalty=penalty, class_weight=CLASS_WEIGHT, dual=dual)\n clf = grid_search.GridSearchCV(classif, parameters, scoring=scoring, cv=5, n_jobs=jobs, verbose=3, refit=testAvailable)\n print(\"Begin\\n...\")\n clf.fit(X,y)\n\n \n #Get results, print and write them into a file\n #============================================\n print(clf.best_params_, clf.best_score_)\n\n if testAvailable:\n scores = testModel(clf.best_estimator_,X,y,xTest,yTest,penalty)\n writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,'Linear',\\\n penalty,scoring, transformedData, scores=scores)\n else:\n print(\"No test, don't predict data\")\n writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,'Linear',\\\n penalty,scoring, transformedData, scores=None)",
"def cam_calibration():\n # read all calibration images in a folder with similar names\n images = glob.glob('./camera_cal/calibration*.jpg')\n\n # calibrate camera and read object-points (3D), image points (2D) and image shape\n objpoints, imgpoints, img_shape = calibrate_camera(images)\n print(\"DONE: Camera calibration\")\n # save calibration parameters' pickle file\n save_calib_params(objpoints, imgpoints, img_shape)\n print(\"Calibration parameters pickle file saved \")",
"def plot_calibration_curve(classifier_name, pred_csv_file, fig_index):\n\n from sklearn.metrics import brier_score_loss, precision_score, recall_score, f1_score\n from sklearn.calibration import CalibratedClassifierCV, calibration_curve\n import matplotlib\n matplotlib.use('Agg')\n import matplotlib.pyplot as plt\n from sklearn.isotonic import isotonic_regression\n from sklearn.metrics import roc_auc_score, roc_curve, auc\n\n # # Calibrated with isotonic calibration\n # isotonic = CalibratedClassifierCV(base_estimator=None, cv=\"prefit\", method='isotonic')\n\n # # Calibrated with sigmoid calibration\n # sigmoid = CalibratedClassifierCV(base_estimator=None, cv=\"prefit\", method='sigmoid')\n\n # # Logistic regression with no calibration as baseline\n # lr = LogisticRegression(C=1., solver='lbfgs')\n\n fig = plt.figure(fig_index, figsize=(10, 10))\n ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)\n ax2 = plt.subplot2grid((3, 1), (2, 0))\n\n ax1.plot([0, 1], [0, 1], \"k:\", label=\"Perfectly calibrated\")\n\n # for name in [classifier_name, classifier_name + ' + Isotonic', classifier_name + ' + Sigmoid']:\n for name in [classifier_name, classifier_name + ' + Sigmoid']:\n # for name in [classifier_name]:\n\n y_test, prob_pos, y_pred, _, _ = read_pred_csv_file_to_arrays(pred_csv_file)\n\n if name == classifier_name + ' + Sigmoid':\n a, b = sigmoid_calibration(prob_pos, y_test, sample_weight=None)\n prob_pos = predict_sigmoid(a, b, prob_pos)\n print a, b\n y_pred = binary_predict(prob_pos, threshold = 0.5)\n\n\n if name == classifier_name + ' + Isotonic' :\n prob_pos = isotonic_regression(prob_pos, sample_weight=None, y_min=None, y_max=None,\n increasing=True)\n y_pred = binary_predict(prob_pos, threshold = 0.5)\n\n\n # print prob_pos[:20]\n # # plot roc curve for test: class 1 only\n # fpr, tpr, _ = roc_curve(y_test, prob_pos)\n # lw = 2\n # plt.plot(fpr, tpr, color='darkorange',\n # lw=lw, label='ROC curve (area = %0.2f)' %(roc_auc_score(y_test, prob_pos, average='macro')))\n # plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n # plt.xlim([0.0, 1.0])\n # plt.ylim([0.0, 1.05])\n # plt.xlabel('False Positive Rate')\n # plt.ylabel('True Positive Rate')\n # plt.title('Receiver operating characteristic example')\n # plt.legend(loc=\"lower right\")\n # plt.savefig('plots/roc_%s.png'%(name))\n # plt.clf()\n\n clf_score = brier_score_loss(y_test, prob_pos, pos_label=1)\n print(\"%s:\" % name)\n print(\"\\tBrier: %1.3f\" % (clf_score))\n print(\"\\tPrecision: %1.3f\" % precision_score(y_test, y_pred))\n print(\"\\tRecall: %1.3f\" % recall_score(y_test, y_pred))\n print(\"\\tF1: %1.3f\" % f1_score(y_test, y_pred))\n print(\"\\tROC: %1.3f\\n\" % roc_auc_score(y_test, prob_pos, average='macro'))\n\n fraction_of_positives, mean_predicted_value = \\\n calibration_curve(y_test, prob_pos, n_bins=10)\n\n ax1.plot(mean_predicted_value, fraction_of_positives, \"s-\",\n label=\"%s (%1.3f)\" % (name, clf_score))\n\n ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,\n histtype=\"step\", lw=2)\n\n ax1.set_ylabel(\"Fraction of positives\")\n ax1.set_ylim([-0.05, 1.05])\n ax1.legend(loc=\"lower right\")\n ax1.set_title('Calibration plots (reliability curve)')\n\n ax2.set_xlabel(\"Mean predicted value\")\n ax2.set_ylabel(\"Count\")\n ax2.legend(loc=\"upper center\", ncol=2)\n\n plt.tight_layout()\n plt.savefig('plots/calibration.png')\n plt.clf()",
"def fit_recurrent(self, x, y):\n # print('Stage 1')\n x_ = self.scaler_s1.fit_transform(x)\n\n self.basemodel.fit(x_, y)\n self.training_hit_probability = self._hitprobability(x_, y)\n\n # Learn the hit probability\n self.hitproba = HitProbability()\n self.hitproba.fit(x_, self.training_hit_probability)\n\n # Learn high confidence for all classes\n hm_y, auto_gamma = self._adjust_gamma(self.training_hit_probability)\n self.joint_class_hc = HC_LR()\n self.joint_class_hc.fit(x_, hm_y)\n\n # hm_subtypes = []\n # proba_subtypes = []\n\n # while np.mean(y_) > 0.01:\n # for label in np.unique(y):\n\n hm_1hot = []\n hm_1hot.append(self._one_hot(self.training_hit_probability, y)[0])\n y_ = y.copy()\n\n self.recurrent_base = []\n self.recurrent_hpc = []\n for ii in range(self.recurrent_modes):\n print('Stage 1 iter: ' + str(ii))\n #self.recurrent_base.append(BaseSvc())\n\n if np.sum(y_) > 2:\n self.basemodel = BaseSvc()\n hm_y, proba_tmp = self._fit_mode(x_, y_)\n hm_candidate = self._one_hot(proba_tmp, y_)[1]\n else:\n hm_candidate = np.zeros_like(y_)\n\n self.recurrent_base.append(self.basemodel)\n\n #if np.sum(hm_candidate) >= 2:\n hm_1hot.append(hm_candidate)\n\n # remove the selected subgroup from the target list\n y_[hm_1hot[-1] == 1] = 0\n\n # make the default base model the first\n self.basemodel = self.recurrent_base[0]\n\n print('Stage 2')\n # Stage 2\n # hm_1hot = hm_subtypes\n # train stage2\n self.confidencemodel.fit(x_, hm_1hot)",
"def calibrate(self, master):\n if master.polyorder == 'linear':\n self.fitfunction = \"A0 + A1 * D\"\n self.fit_fkt = self.calc_lin\n elif master.polyorder == 'quadratic':\n self.fit_fkt = self.calc_quad\n self.fitfunction = \"A0 + A1 * D + A2 * D**2\"\n elif master.polyorder == \"cubic\":\n self.fitfunction = \"A0 + A1 * D + A2 * D**2 + A3 * D**3\"\n self.fit_fkt = self.calc_cubic\n else:\n print(\"Polynomgrad nicht definiert\")\n \n self.mw = np.asarray(self.mw)\n if master.sensortype == \"Druck\":\n self.best, self.covar = curve_fit(self.fit_fkt, self.mw, master.Referencedata.caldat)\n else:\n print(\"Sensortyp noch nicht Hinterlegt\")",
"def _doCalibration(self):\n self._cmdCalibration(2)",
"def calibrate(\n model: onnx.ModelProto, dataset: List[Dict[str, np.ndarray]]\n) -> Dict[str, Tuple[float, float]]:\n augmented_model = ONNXCalibrator(model).build_calibration_model()\n return calibrator.calibrate(augmented_model, dataset)",
"def _fit_binary(estimator, X, y, classes=None):\n # print('X shape: ',X.shape)\n # print('y shape: ', y.shape)\n # print(y)\n unique_y = np.unique(y)\n if len(unique_y) == 1:\n if classes is not None:\n if y[0] == -1:\n c = 0\n else:\n c = y[0]\n warnings.warn(\"Label %s is present in all training examples.\" %\n str(classes[c]))\n estimator = _ConstantPredictor().fit(X, unique_y)\n else:\n estimator = clone(estimator)\n y[y==0]=-1\n y.reshape(-1,1)\n estimator.fit(X, y.reshape(-1,1))\n return estimator",
"def calibrate(self, Otrain, Ftrain, Feval):\n I = np.where((np.isnan(Otrain) == 0) & (np.isnan(Ftrain) == 0))[0]\n if len(I) == 0:\n return np.nan*np.zeros(Feval.shape)\n Ieval = np.where(np.isnan(Feval) == 0)[0]\n x = np.nan*np.zeros(Feval.shape)\n if len(Ieval) > 0:\n x[Ieval] = self._calibrate(Otrain[I], Ftrain[I], Feval[Ieval])\n return x",
"def experiment_linear_conv(_):\n # Min dft1-norm solution found (norm=1.9895)\n adv_norm_type = 'dftinf'\n dual_norm_type = 'dft1'\n baseline_norm_types = ['l1', 'linf']\n attack_step_dir = 'dftinf_sd' # 'dftinf'\n\n module_name = 'train'\n log_dir = 'runs_linear_conv_%s' % adv_norm_type\n exclude = '*'\n\n d_over_n = [1, 2, 4, 8, 16, 32] # separable >= 1\n dim = 100\n num_train = [int(dim / p) for p in d_over_n]\n\n # Config params\n shared_params = []\n shared_params += [\n ('config', './config.py'),\n ('seed', list(range(3))),\n ]\n\n # Data hyper-parameters\n shared_params += [\n ('temperature', 0.0001),\n ('num_test', 1), # 500\n ('dim', dim),\n ('num_train', num_train),\n ]\n\n # Adversarial configuration: test\n shared_params += nameit('adv', [\n ('norm_type', adv_norm_type),\n # ('lr', 0.1),\n ('niters', 1), # 10\n # ('eps_iter', attack_eps), # Overwritten by cvxpy\n # ('eps_tot', attack_eps), # Overwritten by cvxpy\n ('pre_normalize', True), # multi attacks\n ('post_normalize', True),\n ('eps_from_cvxpy', True),\n ('step_dir', attack_step_dir),\n ])\n\n # Logging to standard output\n shared_params += [\n ('log_interval', 10000), # 1000),\n ('log_keys', '\\'(\"%s\")\\'' % ('\",\"'.join([\n 'risk/train/zero_one',\n 'risk/train/adv/%s' % adv_norm_type,\n 'weight/linear/norm/%s' % dual_norm_type,\n 'margin/%s' % dual_norm_type,\n ]))),\n # Compare with cvxpy\n ('enable_cvxpy', True),\n ]\n\n # No 0 regularization coefficient\n reg_coeff = [1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1]\n\n # Model hyper-parameters\n linear_noreg_model_params = nameit('model', [\n ('arch', 'linear'),\n ('regularizer', 'none'),\n ])\n linear_reg_model_params = nameit('model', [\n ('arch', 'linear'),\n ('regularizer', ['w_%s' % b for b in baseline_norm_types] +\n ['w_%s' % dual_norm_type]),\n ('reg_coeff', reg_coeff),\n ])\n deep_linear_params = nameit('model', [\n ('arch', 'deep_linear'),\n ('nlayers', 2),\n ('regularizer', 'none'),\n ])\n conv_linear_params = nameit('model', [\n ('arch', 'conv_linear'),\n ('nlayers', 2),\n ('regularizer', 'none'),\n ])\n\n params = []\n\n # cvxpy solution\n cvxpy_params = nameit('optim', [\n ('name', 'cvxpy'),\n ('norm', dual_norm_type),\n ('niters', 10000),\n ('lr', 0), # keep cvxpy sol fixed\n ])\n params += [OrderedDict(shared_params+linear_noreg_model_params+cvxpy_params)]\n\n # GD line search implicit bias\n gd_ls = nameit('optim', [\n ('name', 'gd_ls'),\n ('niters', 10000),\n ('bound_step', True),\n ])\n params += [OrderedDict(shared_params+deep_linear_params+gd_ls)]\n params += [OrderedDict(shared_params+conv_linear_params+gd_ls)]\n\n # CD, SignGD implicit bias\n cd_fixed_lr = nameit('optim', [\n ('name', ['cd', 'signgd']),\n ('niters', 10000),\n ('lr', [\n 1e-5, 3e-5, 1e-4, 3e-4, 1e-3, 3e-3, 1e-2, 3e-2, 1e-1,\n 3e-1, 1, 2, 3, 6, 9, 10, 20, 30, 50\n ]),\n ])\n params += [OrderedDict(shared_params+linear_noreg_model_params+cd_fixed_lr)]\n\n # Explicit regularization with line search\n explicit_reg = nameit('optim', [\n ('name', 'fista'),\n ('niters', 10000),\n ('bound_step', True),\n ('step_size', [1, 10, 100, 1000]),\n ])\n params += [OrderedDict(shared_params+linear_reg_model_params+explicit_reg)]\n\n # Adversarial training with line search\n adv_train_params = nameit('optim', [\n ('name', 'gd_ls'),\n ('niters', 10000),\n ('bound_step', True),\n ])\n adv_train_params += nameit('optim', nameit('adv_train', [\n ('enable', True),\n ('norm_type', adv_norm_type),\n # ('lr', 0.1),\n ('niters', 1), # niters, 1000\n ('pre_normalize', True),\n ('post_normalize', True),\n ('step_dir', attack_step_dir),\n ('eps_from_cvxpy', True),\n ]))\n params += [OrderedDict(\n shared_params+linear_noreg_model_params+adv_train_params)]\n\n return params, log_dir, module_name, exclude",
"def fit(self, X, y):\n self.support_vectors_ = check_array(X)\n self.y = check_array(y, ensure_2d=False)\n random_state = check_random_state(self.random_state)\n self.kernel_args = {}\n if self.kernel == \"rbf\" and self.gamma is not None:\n self.kernel_args[\"gamma\"] = self.gamma\n elif self.kernel == \"poly\":\n self.kernel_args[\"degree\"] = self.degree\n self.kernel_args[\"coef0\"] = self.coef0\n elif self.kernel == \"sigmoid\":\n self.kernel_args[\"coef0\"] = self.coef0\n K = pairwise_kernels(X, metric=self.kernel, **self.kernel_args)\n self.dual_coef_ = np.zeros(X.shape[0])\n self.intercept_ = _svm.smo(\n K, y, self.dual_coef_, self.C, random_state, self.tol,\n self.numpasses, self.maxiter, self.verbose)\n # If the user was using a linear kernel, lets also compute and store\n # the weights. This will speed up evaluations during testing time.\n if self.kernel == \"linear\":\n self.coef_ = np.dot(self.dual_coef_ * self.y, self.support_vectors_)\n # only samples with nonzero coefficients are relevant for predictions\n support_vectors = np.nonzero(self.dual_coef_)\n self.dual_coef_ = self.dual_coef_[support_vectors]\n self.support_vectors_ = X[support_vectors]\n self.y = y[support_vectors]\n return self",
"def calibration(self, cal: int, /) -> None:",
"def refit_simple(x_train: np.ndarray, y: np.ndarray, interp: bool = True,\n p_val: float = 0.05, x_val: Optional[np.ndarray] = None, y_val: Optional[np.ndarray] = None\n ) -> Tuple[np.ndarray, float, np.ndarray, np.ndarray, np.ndarray]:\n sl_ok = np.ones(x_train.shape[1], dtype=bool)\n\n n = -1\n\n while True:\n n += 1\n assert sl_ok.sum() > 0, 'No features left to fit on iter'.format(n)\n\n logger.info('Iter {0} of final refit starts with {1} features'.format(n, sl_ok.sum()))\n\n x_train_ = x_train[:, sl_ok]\n # индексы в исходном массиве\n ok_idx = np.arange(x_train.shape[1])[sl_ok]\n\n clf = LogisticRegression(penalty='none', solver='lbfgs', warm_start=False,\n intercept_scaling=1)\n clf.fit(x_train_, y)\n\n # check negative coefs here if interp\n sl_pos_coef = np.zeros((x_train_.shape[1],), dtype=np.bool)\n if interp:\n sl_pos_coef = clf.coef_[0] >= 0\n\n # если хотя бы один неотрицательный - убирай самый большой и по новой\n if sl_pos_coef.sum() > 0:\n max_coef_idx = clf.coef_[0].argmax()\n sl_ok[ok_idx[max_coef_idx]] = False\n continue\n\n # если прошли все отрицательные смотрим на pvalue\n p_vals, b_var = calc_p_val(x_train_, clf.coef_[0], clf.intercept_[0])\n # без интерсепта\n p_vals_f = p_vals[:-1]\n\n model_p_vals = p_vals.copy()\n model_b_var = b_var.copy\n\n # если хотя бы один больше p_val - дропай самый большой и погнали по новой\n if p_vals_f.max() > p_val:\n max_p_val_idx = p_vals_f.argmax()\n sl_ok[ok_idx[max_p_val_idx]] = False\n continue\n\n if x_val is not None:\n # то же самое на валидационной выборке\n logger.info('Validation data checks')\n x_val_ = x_val[:, sl_ok]\n\n p_vals, b_var = calc_p_val_on_valid(x_val_, y_val)\n p_vals_f = p_vals[:-1]\n\n # если хотя бы один больше p_val - дропай самый большой и погнали по новой\n if p_vals_f.max() > p_val:\n max_p_val_idx = p_vals_f.argmax()\n sl_ok[ok_idx[max_p_val_idx]] = False\n continue\n\n weights = cast(np.ndarray, clf.coef_[0])\n intercept = cast(float, clf.intercept_[0])\n\n return weights, intercept, sl_ok, cast(np.ndarray, model_p_vals), cast(np.ndarray, model_b_var)"
] | [
"0.60893214",
"0.60358334",
"0.5759546",
"0.5748178",
"0.57063353",
"0.56848466",
"0.56386817",
"0.5601485",
"0.5578915",
"0.5504322",
"0.55038977",
"0.549859",
"0.54438186",
"0.5441058",
"0.5316333",
"0.5284502",
"0.52768564",
"0.5275868",
"0.52657294",
"0.52634394",
"0.5245846",
"0.52304536",
"0.5187408",
"0.5171467",
"0.5170929",
"0.5169425",
"0.5150441",
"0.51485896",
"0.5139914",
"0.5139014"
] | 0.6290439 | 0 |
Calibrated linear estimator (model) for regression. This model uses a piecewise linear calibration function on each of the inputs (parametrized) and then combine (sum up) the results. Optionally calibration can be made monotonic. It usually requires a preprocessing step on the data, to calculate the quantiles of each used feature. This can be done locally or in one worker only before training, in a separate invocation of your program (or directly) in . Typically this can be save (`save_dir` parameter) to the same directory where the data is. Hyperparameters are given in the form of the object tfl_hparams.CalibrationHParams. It takes in perfeature calibration parameters. Internally values will be converted to tf.float32. | def calibrated_linear_regressor(feature_columns=None,
model_dir=None,
quantiles_dir=None,
keypoints_initializers_fn=None,
optimizer=None,
config=None,
hparams=None):
return _CalibratedLinear(
n_classes=0,
feature_columns=feature_columns,
model_dir=model_dir,
quantiles_dir=quantiles_dir,
keypoints_initializers_fn=keypoints_initializers_fn,
optimizer=optimizer,
config=config,
hparams=hparams) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calibrated_linear_classifier(feature_columns=None,\n model_dir=None,\n quantiles_dir=None,\n keypoints_initializers_fn=None,\n optimizer=None,\n config=None,\n hparams=None):\n return _CalibratedLinear(\n n_classes=2,\n feature_columns=feature_columns,\n model_dir=model_dir,\n quantiles_dir=quantiles_dir,\n keypoints_initializers_fn=keypoints_initializers_fn,\n optimizer=optimizer,\n config=config,\n hparams=hparams)",
"def _calibrate(self, Otrain, Ftrain, Feval):\n raise NotImplementedError()",
"def calibrate_onc(data, path, model_name):\n \n #split test data (subsets 7-9) into new test (7-8)/train(9) sets\n calibration_train_set = data[((data.subset==7)|(data.subset==8))].copy()\n calibration_test_set = data[data.subset==9].copy()\n\n #define calibration model\n ir = IsotonicRegression(out_of_bounds=\"clip\")\n #fit the model to the probas from the training set\n ir.fit(calibration_train_set.score, calibration_train_set.y )\n \n #evaluate with the test set and save\n calibration_test_set.loc[:,'p_calibrated'] = ir.transform(calibration_test_set.score) \n \n #calibration_test_set.loc[:,'p_calibrated'] = p_calibrated\n \n #save\n with open(path + 'model_calibrated_' + model_name + '.pickle', 'wb') as picklefile: \n pickle.dump(ir,picklefile)\n \n with open(path + 'y_calibrated_' + model_name + '.pickle', 'wb') as picklefile: \n pickle.dump(calibration_test_set, picklefile)\n \n print_calibrated_results(calibration_test_set.y, calibration_test_set.score, calibration_test_set.p_calibrated)\n return calibration_test_set",
"def find_calibration_parameters(df, temperature, cal_mode, calibration_statistics, num_iterations, optimal_t=25):\n\n if \"temp\" in cal_mode:\n # create a column of T - optimal_T (mean temperature for each still bout minus the optimal temperature)\n # i.e. the deviation in T from the optimal\n df[\"T_dev\"] = temperature.data - optimal_t\n\n for i in range(num_iterations):\n # do linear regression:\n x_results, y_results, z_results = dataframe_regression(df, cal_mode, do_or_undo=\"do\")\n\n # results.params() gives the calibration parameters thus:\n # x_results.params() = [x_scale, x_offset, x_temp_offset] (last item only applies if temperature is used)\n df = dataframe_transformation(df, x_results.params, y_results.params, z_results.params,\n cal_mode)\n # update the \"matched\" arrays to reflect the new \"closest points\" after the dataframe transformation\n update_matched(df)\n\n # Regress the backup copy of the original input against the transformed version,\n # to calculate offset, scale and temperature offset scalar (if temperature used)\n x_results_final, y_results_final, z_results_final = dataframe_regression(df, cal_mode, do_or_undo=\"undo\")\n\n calibration_parameters = {\"x_offset\": x_results_final.params[1],\n \"x_scale\": x_results_final.params[0],\n \"y_offset\": y_results_final.params[1],\n \"y_scale\": y_results_final.params[0],\n \"z_offset\": z_results_final.params[1],\n \"z_scale\": z_results_final.params[0]\n }\n\n if \"temp\" in cal_mode:\n calibration_parameters[\"x_temp_offset\"] = x_results_final.params[2]\n calibration_parameters[\"y_temp_offset\"] = y_results_final.params[2]\n calibration_parameters[\"z_temp_offset\"] = z_results_final.params[2]\n else:\n calibration_parameters[\"x_temp_offset\"] = 0\n calibration_parameters[\"y_temp_offset\"] = 0\n calibration_parameters[\"z_temp_offset\"] = 0\n\n # if enhanced calibration statistics are required...\n if calibration_statistics:\n\n ######################\n\n # extract the error in the final regression fit for each axis\n calibration_parameters[\"x_rsquared\"] = x_results_final.rsquared\n calibration_parameters[\"y_rsquared\"] = y_results_final.rsquared\n calibration_parameters[\"z_rsquared\"] = z_results_final.rsquared\n\n x_bse = x_results_final.bse\n y_bse = y_results_final.bse\n z_bse = z_results_final.bse\n\n calibration_parameters[\"x_scale_se\"] = x_bse[0]\n calibration_parameters[\"y_scale_se\"] = y_bse[0]\n calibration_parameters[\"z_scale_se\"] = z_bse[0]\n\n calibration_parameters[\"x_offset_se\"] = x_bse[1]\n calibration_parameters[\"y_offset_se\"] = y_bse[1]\n calibration_parameters[\"z_offset_se\"] = z_bse[1]\n\n if \"temp\" in cal_mode:\n calibration_parameters[\"x_temp_offset_se\"] = x_bse[2]\n calibration_parameters[\"y_temp_offset_se\"] = y_bse[2]\n calibration_parameters[\"z_temp_offset_se\"] = z_bse[2]\n\n #########################\n\n return calibration_parameters",
"def mlr(df, exp_vars, resp_var, \n method='ols', \n fit_intercept=True,\n kcv=3,\n normalize=False):\n from sklearn import cross_validation\n from sklearn.linear_model import LinearRegression, RidgeCV\n from sklearn.linear_model import LassoCV, ElasticNetCV\n from sklearn.metrics import r2_score\n from sklearn.utils import resample\n import matplotlib.pyplot as plt\n import seaborn as sn\n import pandas as pd\n import numpy as np\n \n # Separate data\n X = df[exp_vars]\n y = df[resp_var]\n \n # Setup model\n if method == 'ols':\n model = LinearRegression(fit_intercept=fit_intercept, \n normalize=normalize)\n elif method == 'lasso':\n model = LassoCV(fit_intercept=fit_intercept, \n normalize=normalize, \n max_iter=10000,\n cv=kcv)\n elif method == 'ridge':\n model = RidgeCV(fit_intercept=fit_intercept, \n normalize=normalize, \n alphas=np.logspace(-10, 10, 21))\n elif method == 'el-net':\n model = ElasticNetCV(l1_ratio=[.1, .5, .7, .9, .95, .99, 1],\n fit_intercept=fit_intercept, \n normalize=normalize,\n cv=kcv)\n else:\n raise ValueError('\"method\" parameter must be in [\"ols\", \"lasso\", \"ridge\", \"el-net\"]')\n \n # k-fold cross validation\n #cv_scores = cross_validation.cross_val_score(model, X, y, cv=kcv, scoring='r2')\n #print 'Mean r2 from %s-fold CV: %.3f\\n' % (kcv, cv_scores.mean())\n \n # Train model on full dataset\n model.fit(X, y)\n \n # Get y-hat\n y_pred = model.predict(X)\n \n # r2 based on calibration data\n r2 = r2_score(y, y_pred)\n print 'r2:', r2\n print ''\n \n # Summary of model\n print model\n print ''\n \n if method == 'lasso':\n print 'Lasso alpha:', model.alpha_\n print ''\n elif method == 'ridge':\n print 'Ridge alpha:', model.alpha_\n print ''\n elif method == 'el-net':\n print 'Elastic net alpha:', model.alpha_ \n print 'Elastic net L1 ratio:', model.l1_ratio_ \n print ''\n else: # OLS\n pass\n \n # Plot\n fig = plt.figure(figsize=(15,15))\n \n # Paired points for each site\n ax1 = plt.subplot2grid((2,2), (0,0), colspan=2)\n ax1.plot(range(0, len(X.index)), y, 'ro', label='Observed')\n ax1.plot(range(0, len(X.index)), y_pred, 'b^', label='Modelled')\n \n ax1.set_xticks(range(0, len(X.index)))\n ax1.set_xticklabels(X.index, rotation=90, fontsize=12)\n ax1.set_xlim(0, len(X.index)-1)\n \n ax1.set_xlabel('Site code', fontsize=16)\n ax1.set_ylabel(resp_var)\n ax1.set_title('Points paired for each location', fontsize=20)\n ax1.legend(loc='best', fontsize=16)\n \n # Modelled versus observed\n ax2 = plt.subplot2grid((2,2), (1,0), colspan=1)\n ax2.plot(y, y_pred, 'ro')\n ax2.set_xlabel('Observed', fontsize=16)\n ax2.set_ylabel('Modelled', fontsize=16)\n ax2.set_title('Modelled versus observed', fontsize=20)\n \n # Hist of residuals\n ax3 = plt.subplot2grid((2,2), (1,1), colspan=1)\n sn.distplot(y - y_pred, kde=True, ax=ax3)\n ax3.set_title('Histogram of residuals', fontsize=20)\n \n plt.tight_layout()\n \n # Get param estimates\n params = pd.Series(model.coef_, index=X.columns)\n\n # Estimate confidence using bootstrap\n # i.e. what is the std. dev. of the estimates for each parameter\n # based on 1000 resamplings\n err = np.std([model.fit(*resample(X, y)).coef_ for i in range(1000)], \n axis=0)\n\n # Build df\n res = pd.DataFrame({'effect':params,\n 'error':2*err})\n\n # Rough indicator of significance: are the estimated values more than\n # 2 std. devs. from 0 (~95% CI?). NB: this assumnes the \"marginal posterior\" \n # is normal, which I haven't tested for and which quite possibly isn't true\n # - use with care! \n res['signif'] = np.abs(res['effect']) > res['error']\n \n return res",
"def _calibration(\n name: str = CALIBRATION_NAME,\n eval_config: Optional[config_pb2.EvalConfig] = None,\n model_name: str = '',\n output_name: str = '',\n sub_key: Optional[metric_types.SubKey] = None,\n aggregation_type: Optional[metric_types.AggregationType] = None,\n class_weights: Optional[Dict[int, float]] = None,\n example_weighted: bool = False) -> metric_types.MetricComputations:\n key = metric_types.MetricKey(\n name=name,\n model_name=model_name,\n output_name=output_name,\n sub_key=sub_key,\n example_weighted=example_weighted)\n\n # Make sure weighted_labels_predictions_examples are calculated.\n computations = _weighted_labels_predictions_examples(\n eval_config=eval_config,\n model_name=model_name,\n output_name=output_name,\n sub_key=sub_key,\n aggregation_type=aggregation_type,\n class_weights=class_weights,\n example_weighted=example_weighted)\n weighted_labels_predictions_key = computations[-1].keys[-1]\n\n def result(\n metrics: Dict[metric_types.MetricKey, Any]\n ) -> Dict[metric_types.MetricKey, Any]:\n \"\"\"Returns calibration.\"\"\"\n metric = metrics[weighted_labels_predictions_key]\n if np.isclose(metric.total_weighted_labels, 0.0):\n value = float('nan')\n else:\n value = metric.total_weighted_predictions / metric.total_weighted_labels\n\n return {key: value}\n\n derived_computation = metric_types.DerivedMetricComputation(\n keys=[key], result=result)\n computations.append(derived_computation)\n return computations",
"def calibrate(self, Otrain, Ftrain, Feval):\n return Otrain[0] * np.ones(len(Feval))",
"def calibrate(self, master):\n if master.polyorder == 'linear':\n self.fitfunction = \"A0 + A1 * D\"\n self.fit_fkt = self.calc_lin\n elif master.polyorder == 'quadratic':\n self.fit_fkt = self.calc_quad\n self.fitfunction = \"A0 + A1 * D + A2 * D**2\"\n elif master.polyorder == \"cubic\":\n self.fitfunction = \"A0 + A1 * D + A2 * D**2 + A3 * D**3\"\n self.fit_fkt = self.calc_cubic\n else:\n print(\"Polynomgrad nicht definiert\")\n \n self.mw = np.asarray(self.mw)\n if master.sensortype == \"Druck\":\n self.best, self.covar = curve_fit(self.fit_fkt, self.mw, master.Referencedata.caldat)\n else:\n print(\"Sensortyp noch nicht Hinterlegt\")",
"def train_calibrate_predict(clf, X_t, y_t, X_v, y_v, params, jobs):\n\n # Indicate the classifier and the training set size\n print(\"Training a {} with None...\".format(clf.__class__.__name__))\n\n # Train the classifier\n clf = train_classifier(clf, X_t, y_t, params, jobs)\n\n # # Calibrate classifier\n # print(\"Calibrating probabilities of classifier...\")\n # start = time()\n # clf = CalibratedClassifierCV(best_pipe.named_steps['clf'], cv='prefit', method='isotonic')\n # clf.fit(best_pipe.named_steps['dm_reduce'].transform(X_calibrate), y_calibrate)\n # end = time()\n # print(\"Calibrated {} in {:.1f} minutes\".format(clf.__class__.__name__, (end - start) / 60))\n\n # Print the results of prediction for both training and testing\n train_score = predict_labels(clf, X_t, y_t)\n test_score = predict_labels(clf, X_v, y_v)\n print(\"Score of {} for training set: {:.4f}.\".format(clf.__class__.__name__, train_score))\n print(\"Score of {} for test set: {:.4f}.\".format(clf.__class__.__name__, test_score))\n\n # Return classifier, and score for train and test set\n return clf, train_score, test_score",
"def fit_model(X_train, X_test, y_train, y_test, model):\n \n if model == 'LinearRegression':\n \n regressor=LinearRegression()\n regressor.fit(X_train,y_train)\n y_pred =regressor.predict(X_test)\n r2 = r2_score(y_test, y_pred)\n \n elif model == 'Lasso':\n \n lasso = Lasso()\n lasso.fit(X_train, y_train)\n lasso_pred = lasso.predict(X_test)\n r2 = r2_score(y_test, lasso_pred)\n\n elif model == 'Ridge':\n \n ridge = Ridge()\n ridge.fit(X_train, y_train)\n ridge_pred = ridge.predict(X_test)\n r2 = r2_score(y_test, ridge_pred)\n \n \n else:\n model = make_pipeline(PolynomialFeatures(2), LinearRegression())\n model.fit(X_train, y_train)\n y_pred = model.predict(X_test)\n r2= r2_score(y_test,y_pred)\n\n\n return r2",
"def calibrate(self, poly_n=0, analytes=None, drift_correct=False,\n srm_errors=False, srms_used=['NIST610', 'NIST612', 'NIST614']):\n # MAKE CALIBRATION CLEVERER!?\n # USE ALL DATA OR AVERAGES?\n # IF POLY_N > 0, STILL FORCE THROUGH ZERO IF ALL\n # STDS ARE WITHIN ERROR OF EACH OTHER (E.G. AL/CA)\n # can store calibration function in self and use *coefs?\n # check for identified srms\n\n if analytes is None:\n analytes = self.analytes\n elif isinstance(analytes, str):\n analytes = [analytes]\n\n if not hasattr(self, 'srmtabs'):\n self.srm_id_auto(srms_used)\n\n # calibration functions\n def calib_0(P, x):\n return x * P[0]\n\n def calib_n(P, x):\n # where p is a list of polynomial coefficients n items long,\n # corresponding to [..., 2nd, 1st, 0th] order coefficients\n return np.polyval(P, x)\n\n # wrapper for ODR fitting\n def odrfit(x, y, fn, coef0, sx=None, sy=None):\n dat = odr.RealData(x=x, y=y,\n sx=sx, sy=sy)\n m = odr.Model(fn)\n mod = odr.ODR(dat, m, coef0)\n mod.run()\n return un.uarray(mod.output.beta, mod.output.sd_beta)\n\n # make container for calibration params\n if not hasattr(self, 'calib_params'):\n self.calib_params = pd.DataFrame(columns=self.analytes)\n\n # set up calibration functions\n if not hasattr(self, 'calib_fns'):\n self.calib_fns = {}\n\n for a in analytes:\n if poly_n == 0:\n self.calib_fns[a] = calib_0\n p0 = [1]\n else:\n self.calib_fns[a] = calib_n\n p0 = [1] * (poly_n - 1) + [0]\n\n # calculate calibrations\n if drift_correct:\n for n, g in self.srmtabs.loc[a, :].groupby(level=0):\n if srm_errors:\n p = odrfit(x=self.srmtabs.loc[a, 'meas_mean'].values,\n y=self.srmtabs.loc[a, 'srm_mean'].values,\n sx=self.srmtabs.loc[a, 'meas_err'].values,\n sy=self.srmtabs.loc[a, 'srm_err'].values,\n fn=self.calib_fns[a],\n coef0=p0)\n else:\n p = odrfit(x=self.srmtabs.loc[a, 'meas_mean'].values,\n y=self.srmtabs.loc[a, 'srm_mean'].values,\n sx=self.srmtabs.loc[a, 'meas_err'].values,\n fn=self.calib_fns[a],\n coef0=p0)\n uTime = g.index.get_level_values('uTime').values.mean()\n self.calib_params.loc[uTime, a] = p\n else:\n if srm_errors:\n p = odrfit(x=self.srmtabs.loc[a, 'meas_mean'].values,\n y=self.srmtabs.loc[a, 'srm_mean'].values,\n sx=self.srmtabs.loc[a, 'meas_err'].values,\n sy=self.srmtabs.loc[a, 'srm_err'].values,\n fn=self.calib_fns[a],\n coef0=p0)\n else:\n p = odrfit(x=self.srmtabs.loc[a, 'meas_mean'].values,\n y=self.srmtabs.loc[a, 'srm_mean'].values,\n sx=self.srmtabs.loc[a, 'meas_err'].values,\n fn=self.calib_fns[a],\n coef0=p0)\n self.calib_params.loc[0, a] = p\n\n # apply calibration\n for d in tqdm(self.data, desc='Calibration'):\n try:\n d.calibrate(self.calib_fns, self.calib_params, analytes, drift_correct=drift_correct)\n except:\n print(d.sample + ' failed - probably first or last SRM\\nwhich is outside interpolated time range.')\n\n self.focus_stage = 'calibrated'\n # # save calibration parameters\n # # self.save_calibration()\n return",
"def RegressionMain(full_raw, target_col, feature_cols, test_size, model='Ridge', max_train_size=200, embargo_size=1, logpx=True, \n resample_per='B', ewm_span=50, verbose=False, alpha_override=None, lasso_positive=False):\n \n # pre-process\n cols = np.hstack((target_col, feature_cols))\n data = full_raw[cols].copy(deep=True)\n raw_clean = data.asfreq(resample_per).dropna(how='any')\n if ewm_span is not None:\n data = data.ewm(span=ewm_span).mean()\n data = data.asfreq(resample_per).dropna(how='any')\n if logpx:\n data = data.apply(np.log)\n raw_clean = raw_clean.apply(np.log)\n \n dates, betas = [],[]\n \n # get alpha to use in model fits\n ## we only use first quarter of data to not cheat so hard\n x_full = data[feature_cols]\n y_full = data[target_col]\n x_raw_clean = raw_clean[feature_cols]\n \n x_find_alpha = x_full.iloc[:int(data.shape[0]/4)]\n y_find_alpha = y_full.iloc[:int(data.shape[0]/4)]\n tss = TimeSeriesSplit(n_splits=20)\n alpha_space = np.logspace(-6,2,25)\n if model == 'Ridge':\n cv = RidgeCV(alphas=alpha_space, cv=tss)\n cv.fit(x_find_alpha, y_find_alpha)\n alpha = cv.alpha_\n \n elif model == 'Lasso':\n cv = LassoCV(alphas=alpha_space, cv=tss)\n cv.fit(x_find_alpha, y_find_alpha)\n alpha = cv.alpha_\n \n else:\n alpha = 0.0001\n if alpha_override is not None: alpha = alpha_override\n if verbose: print(alpha)\n \n pred_full = pd.Series(name='Pred')\n for train_idx, embargo_idx, test_idx in _embargo_ts_splitter(data, test_size, max_train_size=max_train_size,\n embargo=embargo_size):\n x_train = x_full.iloc[train_idx]\n y_train = y_full.iloc[train_idx]\n beta = _regression_loop(y_train, x_train, model, lasso_positive, alpha=alpha)\n \n x_test = x_raw_clean.iloc[test_idx]\n pred = sm.add_constant(x_test).dot(beta).rename('Pred')\n pred_full = pred_full.append(pred)\n \n # save to return params\n # date associated with beta (for rebalancing) should be day after computation\n dates.append(data.index[test_idx[0]])\n betas.append(beta)\n \n #rescale if necessary\n if logpx:\n pred_full = pred_full.apply(np.exp)\n \n return pred_full, dates, betas",
"def calibrate(self, calib_fns, calib_params, analytes=None, drift_correct=False):\n # can have calibration function stored in self and pass *coefs?\n if analytes is None:\n analytes = self.analytes\n\n if 'calibrated' not in self.data.keys():\n self.data['calibrated'] = {}\n\n for a in analytes:\n if drift_correct:\n P = self.drift_params(calib_params, a)\n else:\n P = calib_params[a].values[0]\n\n self.data['calibrated'][a] = \\\n calib_fns[a](P,\n self.data['ratios'][a])\n\n # coefs = calib_params[a]\n # if len(coefs) == 1:\n # self.data['calibrated'][a] = \\\n # self.data['ratios'][a] * coefs\n # else:\n # self.data['calibrated'][a] = \\\n # np.polyval(coefs, self.data['ratios'][a])\n # self.data['ratios'][a] * coefs[0] + coefs[1]\n self.setfocus('calibrated')\n return",
"def _calibrate_without_loss(self, calib_dataset, calib_batch_size,\n calib_steps):\n # Create quantize calibration model\n if not self._optimized_model:\n logger.error(\n 'Should call `optimize_model()` before `_calibrate_without_loss`.')\n self._qcb_model, self._layer_metadata = self._quantizer.create_quantize_model(\n self._optimized_model,\n candidate_layers=self._candidate_layers,\n layer_metadata=self._layer_metadata,\n quantize_strategy=self._quantize_strategy,\n mode='QCB',\n target=self._target,\n dataset=calib_dataset,\n batch_size=calib_batch_size,\n steps=calib_steps,\n specific_layers=self._specific_layers)\n\n if calib_dataset is not None:\n logger.info(\"Start Quantize Calibration...\")\n collector = self._run_model_with_collector(self._qcb_model, calib_dataset,\n calib_batch_size, calib_steps)\n\n # Create quantize calibration evaluation model\n self._qcbev_model = model_utils.clone_model_with_weights(self._qcb_model)\n model_utils.set_layer_mode(self._qcbev_model, 'QCBEV')\n\n if type(self._quantize_strategy\n ) == vitis_pof2s_quantize_strategy.VitisPof2SQuantizeStrategy:\n # Freeze the quantize info into the model, now using most_common_quantize_info\n # last_quantize_info = collector.get_last_quantize_info()\n common_quantize_info = collector.get_most_common_quantize_info()\n self._freeze_quantize_info(common_quantize_info)\n elif type(self._quantize_strategy) in [\n vitis_fs_quantize_strategy.VitisFSQuantizeStrategy,\n vitis_fsx_quantize_strategy.VitisFSXQuantizeStrategy,\n vitis_gpu_quantize_strategy.VitisGPUQuantizeStrategy\n ]:\n # Freeze the quantize info into the model, now using most_common_quantize_info\n # last_quantize_info = collector.get_last_quantize_info()\n common_quantize_info = collector.get_entropy_percentile_amax(\n self._qcb_model)\n self._freeze_quantize_info(common_quantize_info)\n\n logger.info(\"Quantize Calibration Done.\")",
"def train_calibration(config):\n run_dates = pd.date_range(start=config.start_dates[\"train\"],\n end=config.end_dates[\"train\"],\n freq='1D').strftime(config.run_date_format)\n \n target_calib_models = {}\n print()\n print('Loading Data')\n\n for size_index,size in enumerate(config.size_threshold):\n target_calib_models[size] = {}\n train_files, target_files = [], []\n for date in run_dates: \n train_data_files = glob(config.train_data_path+ \\\n \"20{2}/netcdf/*{0}*unsmoothed*_{1}_*{2}*{3}*{4}.nc\".format(\n config.forecast_model_names,size,date,\n config.start_hour,config.end_hour))\n if len(train_data_files) < 1:\n continue\n if config.sector:\n target_data_files = glob(config.target_data_path+'{0}*{1}*{2}*.nc'.format(\n date,size,config.sector)) \n else:\n target_data_files = glob(config.target_data_path+'{0}*{1}*.nc'.format(\n date,size))\n if len(target_data_files) < 1:\n continue\n train_files.append(train_data_files[0])\n target_files.append(target_data_files[0])\n \n date_indices = [index for index in range(len(train_files))]\n percent_train_indices = int(len(train_files)*0.70)\n t_data = [Dataset(x).variables[\"Data\"][:] for x in train_files] \n tar_data = [Dataset(x).variables[\"24_Hour_All_12z_12z\"][:] for x in target_files] \n print()\n print('Number of files:')\n print('Train (70%): {0}'.format(int(len(t_data)*0.70)))\n print('Validate (30%): {0}'.format(int(len(t_data)*0.30)))\n print()\n for ind,model_name in enumerate(config.calibration_model_names):\n bs = []\n random_models = []\n print('Random Cross-Validation, {0} >{1}mm'.format(model_name,size)) \n random_seed = random.sample(range(1, 100), 10)\n for s,seed in enumerate(random_seed):\n np.random.seed(seed)\n print('Index',s, 'Random Seed', seed)\n train_indices = np.random.choice(date_indices, percent_train_indices, replace=False)\n test_indices = [ind for ind in date_indices if ind not in train_indices]\n \n train_data = np.array(t_data)[train_indices].ravel()\n target_train_data = np.array(tar_data)[train_indices].ravel()\n \n val_data = np.array(t_data)[test_indices].ravel()\n target_val_data = np.array(tar_data)[test_indices].ravel()\n \n model = deepcopy(config.calibration_model_objs[ind])\n model.fit(train_data,target_train_data)\n random_models.append(model)\n \n predict = model.transform(val_data)\n \n #plt.figure(figsize=(9, 6))\n #plt.plot(sorted(val_data),model.transform(sorted(val_data)))\n #plt.xlabel('data')\n #plt.ylabel('calibrated')\n #plt.show()\n #plt.close()\n\n print(brier_score(predict, target_val_data))\n bs.append(brier_score(predict, target_val_data))\n \n best_bs = np.argmin(bs)\n target_calib_models[size][model_name] = np.array(random_models)[best_bs]\n print('Lowest Brier Score: {0}'.format(np.array(bs)[best_bs]))\n print()\n print()\n return target_calib_models",
"def calibrate(\n model: onnx.ModelProto, dataset: List[Dict[str, np.ndarray]]\n) -> Dict[str, Tuple[float, float]]:\n augmented_model = ONNXCalibrator(model).build_calibration_model()\n return calibrator.calibrate(augmented_model, dataset)",
"def Calibrator(\n data_loader, cache=None, BaseClass=None, batch_size=None, quantile=None, regression_cutoff=None, algo=None\n):\n BaseClass = util.default(BaseClass, trt.IInt8EntropyCalibrator2)\n\n class CalibratorClass(BaseClass):\n \"\"\"\n Calibrator that supplies calibration data to TensorRT to calibrate the network for INT8 inference.\n \"\"\"\n\n def __init__(self):\n # Must explicitly initialize parent for any trampoline class! Will mysteriously segfault without this.\n BaseClass.__init__(self) # type: ignore\n\n self.data_loader = data_loader\n self._cache = cache\n self.device_buffers = OrderedDict()\n self.input_metadata = None\n self.reset()\n G_LOGGER.verbose(f\"Created calibrator [cache={self._cache}]\")\n\n self.batch_size = util.default(batch_size, 1)\n\n self.is_polygraphy_calibrator = True\n # The function that constructed this instance\n self.make_func = Calibrator\n\n def set_input_metadata(self, input_metadata):\n \"\"\"\n Sets the input metadata for the calibrator.\n\n This is passed along to the data loader and is also used for\n input data type and shape checks.\n\n NOTE: This generally does not need to be called manually if the calibrator is being used\n with Polygraphy's loaders, like ``CreateConfig`` or ``EngineFromNetwork``.\n\n Args:\n input_metadata (TensorMetadata):\n Mapping of input names to their data types and shapes.\n Passed along to the data loader if provided. This is required if\n using Polygraphy's included `DataLoader` to provide calibration data,\n or if data type and shape checking is desired.\n \"\"\"\n self.input_metadata = input_metadata\n if input_metadata is not None:\n with contextlib.suppress(AttributeError):\n self.data_loader.input_metadata = input_metadata\n\n def reset(self):\n \"\"\"\n Reset this calibrator for reuse.\n\n The calibrator will clear any dynamic ranges cached from previous calibration runs, and will\n attempt to rewind the data loader (note that generators cannot be rewound).\n\n Typically, this is only required if the same calibrator is used for multiple different networks.\n \"\"\"\n # Attempt to reset data loader\n self.data_loader_iter = iter(self.data_loader)\n self.num_batches = 0\n\n # Make sure calibrator will check the cache again when reset.\n self.cache_contents = None\n\n def get_batch_size(self):\n return self.batch_size\n\n def _get_batch_impl(self, names):\n try:\n buffers = next(self.data_loader_iter)\n except StopIteration:\n if not self.num_batches:\n G_LOGGER.critical(\n \"Calibrator data loader provided no data.\\nPossible reasons for this include:\\n(1) data loader \"\n \"has no data to provide\\n(2) data loader was a generator, and the calibrator is being \"\n \"used multiple times (generators cannot be rewound)\"\n )\n return None\n else:\n self.num_batches += 1\n\n util.check_sequence_contains(\n buffers.keys(),\n names,\n name=\"calibration input data provided by the data loader\",\n items_name=\"inputs\",\n )\n\n def check_buffer(name, buffer):\n if self.input_metadata is None:\n return\n\n expected_dtype, expected_shape = self.input_metadata[name]\n\n err_prefix = \"Received an unexpected input from the data loader during calibration. \"\n if buffer.dtype != expected_dtype:\n G_LOGGER.critical(\n err_prefix\n + f\"For input: '{name}', expected data type: {expected_dtype}, but received: {buffer.dtype}\"\n )\n\n if not util.is_valid_shape_override(buffer.shape, expected_shape):\n G_LOGGER.critical(\n err_prefix\n + f\"For input: '{name}', expected a shape compatible with: {expected_shape}, but received: {buffer.shape}\"\n )\n\n ptrs = []\n for name in names:\n buf = buffers[name]\n\n if isinstance(buf, cuda.DeviceView):\n check_buffer(name, buf)\n ptrs.append(buf.ptr)\n elif isinstance(buf, np.ndarray):\n check_buffer(name, buf)\n if name not in self.device_buffers:\n self.device_buffers[name] = cuda.DeviceArray(shape=buf.shape, dtype=buf.dtype)\n G_LOGGER.verbose(f\"Allocated: {self.device_buffers[name]}\")\n\n self.device_buffers[name].resize(buf.shape)\n buf = util.make_contiguous(buf)\n ptrs.append(self.device_buffers[name].copy_from(buf).ptr)\n elif isinstance(buf, int):\n ptrs.append(buf)\n else:\n G_LOGGER.critical(\n f\"Calibration data loader provided an unrecognized type: {type(buf).__name__} for input: {name}.\"\n \"\\nPlease provide either a NumPy array, Polygraphy DeviceView, or GPU pointer. \"\n )\n\n return ptrs\n\n def get_batch(self, names):\n ptrs = None\n try:\n ptrs = self._get_batch_impl(names)\n except PolygraphyException:\n pass\n if ptrs is None:\n self.free()\n return ptrs\n\n def read_calibration_cache(self):\n def load_from_cache():\n if self._cache is None or not util.get_file_size(self._cache):\n return None\n\n try:\n return util.load_file(self._cache, description=\"calibration cache\")\n except Exception as err:\n G_LOGGER.error(f\"Could not read from calibration cache: {self._cache}\\nNote: Error was: {err}\")\n return None\n\n if self.cache_contents is not None:\n return self.cache_contents\n\n self.cache_contents = load_from_cache()\n\n if not self.cache_contents:\n if self.cache_contents is not None:\n G_LOGGER.warning(\n \"Calibration cache was provided, but is empty. \"\n \"Will regenerate scales by running calibration.\",\n mode=LogMode.ONCE,\n )\n self.cache_contents = None\n\n return self.cache_contents\n\n def write_calibration_cache(self, cache):\n self.cache_contents = cache.tobytes()\n\n if self._cache is None:\n return\n\n try:\n util.save_file(contents=self.cache_contents, dest=self._cache, description=\"calibration cache\")\n except Exception as err:\n G_LOGGER.error(f\"Could not write to calibration cache: {self._cache}.\\nNote: Error was: {err}\")\n\n def free(self):\n \"\"\"\n Frees all device buffers associated with this calibrator\n \"\"\"\n for device_buffer in self.device_buffers.values():\n device_buffer.free()\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.free()\n\n # IInt8LegacyCalibrator methods\n if BaseClass == trt.IInt8LegacyCalibrator:\n\n def get_quantile(self):\n return util.default(quantile, 0.5)\n\n def get_regression_cutoff(self):\n return util.default(regression_cutoff, 0.5)\n\n def read_histogram_cache(self, length):\n pass\n\n def write_histogram_cache(self, ptr, length):\n pass\n\n # IInt8Calibrator methods\n if BaseClass == trt.IInt8Calibrator:\n\n def get_algorithm(self):\n return util.default(algo, trt.CalibrationAlgoType.ENTROPY_CALIBRATION_2)\n\n def __repr__(self):\n return util.make_repr(\n \"Calibrator\",\n data_loader,\n cache=cache,\n BaseClass=BaseClass,\n batch_size=batch_size,\n quantile=quantile,\n regression_cutoff=regression_cutoff,\n algo=algo,\n )[0]\n\n return CalibratorClass()",
"def fit(self, X, y):\n\n X, y = self._check_X_y(X, y, accept_sparse=['csr'])\n n_samples, n_features = X.shape\n n_components = self.transformer.n_components\n # valid hyper parameters and init parameters\n self._valid_params()\n self._init_params(X, y)\n\n loss = self.LOSSES[self.loss]\n alpha = self.alpha / self.C\n intercept_decay = self.intercept_decay / self.C\n random_state = check_random_state(self.random_state)\n is_sparse = sparse.issparse(X)\n\n it = _adagrad_fast(self.coef_, self.intercept_,\n get_dataset(X, order='c'), X, y, self.acc_grad_,\n self.acc_grad_norm_, self.acc_grad_intercept_,\n self.acc_grad_norm_intercept_, self.mean_, self.var_,\n loss, alpha, self.l1_ratio, intercept_decay,\n self.eta0, self.t_, self.max_iter, self.tol,\n self.eps, is_sparse, self.verbose,\n self.fit_intercept, self.shuffle,\n random_state, self.transformer,\n get_fast_random_feature(self.transformer))\n self.t_ += n_samples*(it+1)\n\n return self",
"def fit_least_squares(input_data, output_data):\n # This function's code follows the formula for finding the weights\n # that create the least mean-squared error, which is:\n # w = (((y_t)x)(inv((x_t)x))_t)\n\n xtx = numpy.matmul(numpy.transpose(input_data),input_data)\n xtx_inv = numpy.linalg.inv(xtx)\n ytx = numpy.matmul(numpy.transpose(output_data),input_data)\n\n return LinearModel(numpy.transpose(numpy.matmul(ytx,xtx_inv)))",
"def fit(model, input: str, output: str):\n print(f\"Fitting model of type: {model}\")\n\n # Define the model. Use a randomized search to efficiently explore the\n # hyperparameter space in a limited time.\n if model == \"logistic\":\n # Primal logistic regression\n param_dist = {\n 'C': loguniform(0.1, 100), 'max_iter': [250], 'fit_intercept': [True],\n 'intercept_scaling': [1], 'penalty': ['l2'], 'tol': loguniform(1e-6, 1e-4)\n }\n mdl_cv = RandomizedSearchCV(LogisticRegression(solver='lbfgs'), param_dist, cv=3, refit=True, verbose=2, n_iter=10)\n elif model == \"rand_forest\":\n # Random Forest classifier\n param_dist = {'max_depth': randint(3,8), 'max_features': randint(2,9), 'n_estimators': randint(50, 100),\n 'min_samples_split': randint(3, 7)}\n mdl_cv = RandomizedSearchCV(RandomForestClassifier(), param_dist, cv=3, refit=True, verbose=2, n_iter=10)\n elif model == \"gradient_boost\":\n # Extreme Gradient Boost classifier\n param_dist = {'max_depth': [3, 4], 'gamma': loguniform(1e-3, 1e-2), 'min_child_weight': randint(1, 10),\n 'learning_rate': loguniform(0.05, 0.3), 'n_estimators': randint(10, 40)}\n mdl_cv = RandomizedSearchCV(XGBClassifier(), param_dist, cv=3, refit=True, verbose=2, n_iter=10)\n else:\n raise NotImplementedError(f\"Don't know how to train model of type: {model}.\\nValid options are: logistic, rand_forest, gradient_boost.\")\n\n # Define x (input data) and y (target data)\n df = pd.read_csv(input)\n x = df.loc[:, df.columns != 'Exited']\n y = df.Exited\n print(f\"Data has x.shape = {x.shape} and y.shape = {y.shape}\")\n\n # Fit the model with randomized search\n mdl_cv.fit(x, y)\n\n # Print some results\n print(\"Best score:\", mdl_cv.best_score_)\n print(\"Best params:\", pprint.pformat(mdl_cv.best_params_))\n\n # Save to data store\n os.makedirs(os.path.dirname(output), exist_ok=True)\n with open(output, \"wb\") as f:\n joblib.dump(mdl_cv.best_estimator_, f)",
"def fit(self, X, y=None):\n # default to QuicGraphicalLassoCV\n estimator = self.estimator or QuicGraphicalLassoCV()\n\n self.lam_ = None\n self.estimator_ = None\n\n X = check_array(X, ensure_min_features=2, estimator=self)\n X = as_float_array(X, copy=False, force_all_finite=False)\n\n n_samples_, n_features_ = X.shape\n \n # perform first estimate\n estimator.fit(X)\n\n if self.method == \"binary\":\n # generate weights\n self.lam_ = self._binary_weights(estimator)\n\n # perform second step adaptive estimate\n self.estimator_ = QuicGraphicalLasso(\n lam=self.lam_ * estimator.lam_,\n mode=\"default\",\n init_method=\"cov\",\n auto_scale=False,\n )\n self.estimator_.fit(X)\n\n elif self.method == \"inverse_squared\":\n self.lam_ = self._inverse_squared_weights(estimator)\n\n # perform second step adaptive estimate\n self.estimator_ = QuicGraphicalLassoCV(\n lam=self.lam_ * self.estimator.lam_, auto_scale=False\n )\n self.estimator_.fit(X)\n\n elif self.method == \"inverse\":\n self.lam_ = self._inverse_weights(estimator)\n\n # perform second step adaptive estimate\n self.estimator_ = QuicGraphicalLassoCV(\n lam=self.lam_ * estimator.lam_, auto_scale=False\n )\n self.estimator_.fit(X)\n\n else:\n raise NotImplementedError(\n (\n \"Only method='binary', 'inverse_squared', or\",\n \"'inverse' have been implemented.\",\n )\n )\n\n self.is_fitted_ = True\n self.n_features_in_ = X.shape[1]\n return self",
"def fit(self):\n \n # Open an existing model and get the training & test dataset and targets\n train_test_df, target_df = self._get_model_and_data(target=True, set_feature_def=True)\n \n # Check that the estimator is an supervised ML algorithm\n if self.model.estimator_type not in [\"classifier\", \"regressor\"]:\n err = \"Incorrect usage. The estimator specified is not a known classifier or regressor: {0}\".format(self.model.estimator)\n raise Exception(err)\n \n # Check which validation strategy is to be used, if any\n # For an explanation of cross validation in scikit-learn see: http://scikit-learn.org/stable/modules/cross_validation.html#multimetric-cross-validation\n if self.model.time_series_split > 0:\n self.model.validation = \"timeseries\"\n # Set up cross validation to be performed using TimeSeriesSplit\n self.model.cv = TimeSeriesSplit(n_splits=self.model.time_series_split, max_train_size=self.model.max_train_size)\n elif self.model.cv > 0:\n self.model.validation = \"k-fold\"\n elif self.model.test_size > 0:\n self.model.validation = \"hold-out\"\n else:\n self.model.validation = \"external\"\n\n if self.model.validation == \"hold-out\": \n # Split the data into training and testing subsets\n self.X_train, self.X_test, self.y_train, self.y_test = \\\n train_test_split(train_test_df, target_df, test_size=self.model.test_size, random_state=self.model.random_state)\n else:\n self.X_train = train_test_df\n self.y_train = target_df\n \n # Add the training and test data to the model if required\n if self.model.retain_data:\n self.model.X_train = self.X_train\n self.model.y_train = self.y_train\n \n try:\n self.model.X_test = self.X_test\n self.model.y_test = self.y_test\n except AttributeError:\n pass\n \n # Scale the targets and increase stationarity if required\n if self.model.scale_target or self.model.make_stationary:\n # Set up the target transformer\n self.model.target_transformer = TargetTransformer(scale=self.model.scale_target, make_stationary=self.model.make_stationary, stationarity_lags=self.model.stationarity_lags,\\\n missing=self.model.missing, scaler=self.model.scaler, logfile=self.logfile, **self.model.scaler_kwargs)\n\n # Fit the transformer to the training targets\n self.model.target_transformer = self.model.target_transformer.fit(self.y_train)\n\n # Apply the transformer to the training targets\n self.y_train = self.model.target_transformer.transform(self.y_train)\n # Drop samples where the target cannot be transformed due to insufficient lags\n self.X_train = self.X_train.iloc[len(self.X_train)-len(self.y_train):] \n \n # Add lag observations to the samples if required\n if self.model.lags or self.model.lag_target:\n # Check if the current sample will be included as an input, or whether we only use lag observations for predictions\n extrapolate = 1 if self.model.current_sample_as_input else 0\n # Add the lag observations\n self.X_train = self._add_lags(self.X_train, self.y_train, extrapolate=extrapolate, update_features_df=True)\n # Drop targets for samples which were dropped due to null values after adding lags.\n if len(self.y_train) > len(self.X_train):\n self.y_train = self.y_train.iloc[len(self.y_train)-len(self.X_train):]\n\n # If this is a Keras estimator, we require the preprocessing to return a data frame instead of a numpy array\n prep_return = 'df' if self.model.using_keras else 'np'\n\n # Construct the preprocessor\n prep = Preprocessor(self.model.features_df, return_type=prep_return, scale_hashed=self.model.scale_hashed, scale_vectors=self.model.scale_vectors,\\\n missing=self.model.missing, scaler=self.model.scaler, logfile=self.logfile, **self.model.scaler_kwargs)\n\n # Setup a list to store steps for the sklearn pipeline\n pipe_steps = [('preprocessor', prep)]\n\n if self.model.dim_reduction:\n # Construct the dimensionality reduction object\n reduction = self.decomposers[self.model.reduction](**self.model.dim_reduction_args)\n \n # Include dimensionality reduction in the pipeline steps\n pipe_steps.append(('reduction', reduction))\n self.model.estimation_step = 2\n else:\n self.model.estimation_step = 1 \n\n # If this is a Keras estimator, update the input shape and reshape the data if required\n if self.model.using_keras:\n # Update the input shape based on the final number of features after preprocessing\n self._keras_update_shape(prep)\n\n # Add the Keras build function, architecture and prediction_periods to the estimator keyword arguments\n self.model.estimator_kwargs['build_fn'] = self._keras_build_fn\n self.model.estimator_kwargs['architecture'] = self.model.architecture\n self.model.estimator_kwargs['prediction_periods'] = self.model.prediction_periods\n\n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(10)\n \n # Check than an identifier has been provided for sorting data if this is a sequence prediction problem\n if self.model.lags or len(self.model.first_layer_kwargs[\"input_shape\"]) > 1:\n assert len(self.model.original_features_df[self.model.original_features_df['variable_type'].isin([\"identifier\"])]) == 1, \\\n \"An identifier is mandatory when using lags or with sequence prediction problems. Define this field in your feature definitions.\"\n\n # Cater for multi-step predictions\n if self.model.prediction_periods > 1:\n # Transform y to a vector of values equal to prediction_periods\n self.y_train = utils.vectorize_array(self.y_train, steps=self.model.prediction_periods)\n # Drop values from x for which we don't have sufficient y values\n self.X_train = self.X_train.iloc[:-len(self.X_train)+len(self.y_train)]\n\n # Add a pipeline step to update the input shape and reshape the data if required\n # This transform will also add lag observations if specified through the lags parameter\n # If lag_target is True, an additional feature will be created for each sample using the previous value of y \n reshape = Reshaper(first_layer_kwargs=self.model.first_layer_kwargs, logfile=self.logfile)\n pipe_steps.append(('reshape', reshape))\n self.model.estimation_step += self.model.estimation_step\n\n # Avoid tensorflow error for keras models\n # https://github.com/tensorflow/tensorflow/issues/14356\n # https://stackoverflow.com/questions/40785224/tensorflow-cannot-interpret-feed-dict-key-as-tensor\n kerasbackend.clear_session()\n \n # Try assuming the pipeline involves a grid search\n try:\n # Construct an estimator\n estimator = self.algorithms[self.model.estimator](**self.model.estimator_kwargs)\n\n # Prepare the grid search using the previously set parameter grid\n grid_search = GridSearchCV(estimator=estimator, param_grid=self.model.param_grid, **self.model.grid_search_args)\n \n # Add grid search to the pipeline steps\n pipe_steps.append(('grid_search', grid_search))\n\n # Construct the sklearn pipeline using the list of steps\n self.model.pipe = Pipeline(pipe_steps)\n\n if self.model.validation in [\"k-fold\", \"timeseries\"]:\n # Perform K-fold cross validation\n self._cross_validate()\n\n # Fit the training data to the pipeline\n if self.model.using_keras:\n # https://stackoverflow.com/questions/54652536/keras-tensorflow-backend-error-tensor-input-10-specified-in-either-feed-de\n session = tf.Session()\n kerasbackend.set_session(session)\n with session.as_default():\n with session.graph.as_default():\n sys.stdout.write(\"\\nMODEL: {}, INPUT SHAPE: {}\\n\\n\".format(self.model.name, self.model.first_layer_kwargs['input_shape']))\n y = self.y_train.values if self.y_train.shape[1] > 1 else self.y_train.values.ravel()\n self.model.pipe.fit(self.X_train, y)\n else:\n self.model.pipe.fit(self.X_train, self.y_train.values.ravel())\n\n # Get the best parameters and the cross validation results\n grid_search = self.model.pipe.named_steps['grid_search']\n self.model.best_params = grid_search.best_params_\n self.model.cv_results = grid_search.cv_results_\n\n # Get the best estimator to add to the final pipeline\n estimator = grid_search.best_estimator_\n\n # Update the pipeline with the best estimator\n self.model.pipe.steps[self.model.estimation_step] = ('estimator', estimator)\n\n except AttributeError:\n # Construct an estimator\n estimator = self.algorithms[self.model.estimator](**self.model.estimator_kwargs)\n\n # Add the estimator to the pipeline steps\n pipe_steps.append(('estimator', estimator))\n\n # Construct the sklearn pipeline using the list of steps\n self.model.pipe = Pipeline(pipe_steps)\n\n if self.model.validation in [\"k-fold\", \"timeseries\"]:\n # Perform K-fold cross validation\n self._cross_validate()\n\n # Fit the training data to the pipeline\n if self.model.using_keras:\n # https://stackoverflow.com/questions/54652536/keras-tensorflow-backend-error-tensor-input-10-specified-in-either-feed-de\n session = tf.Session()\n kerasbackend.set_session(session)\n with session.as_default():\n with session.graph.as_default():\n sys.stdout.write(\"\\nMODEL: {}, INPUT SHAPE: {}\\n\\n\".format(self.model.name, self.model.first_layer_kwargs['input_shape']))\n y = self.y_train.values if self.y_train.shape[1] > 1 else self.y_train.values.ravel()\n self.model.pipe.fit(self.X_train, y)\n else:\n self.model.pipe.fit(self.X_train, self.y_train.values.ravel())\n \n if self.model.validation == \"hold-out\": \n # Evaluate the model using the test data \n self.calculate_metrics(caller=\"internal\")\n \n if self.model.calc_feature_importances:\n # Select the dataset for calculating importances\n if self.model.validation == \"hold-out\":\n X = self.X_test\n y = self.y_test # Already a numpy array after calculate_metrics\n else:\n X = self.X_train\n y = self.y_train.values.ravel()\n \n # Calculate model agnostic feature importances\n self._calc_importances(X = X, y = y)\n\n # Persist the model to disk\n self.model = self.model.save(self.model.name, self.path, overwrite=self.model.overwrite, compress=self.model.compress)\n \n # Update the cache to keep this model in memory\n self._update_cache()\n \n # Prepare the output\n if self.model.validation != \"external\": \n message = [[self.model.name, 'Model successfully trained, tested and saved to disk.',\\\n time.strftime('%X %x %Z', time.localtime(self.model.state_timestamp)),\\\n \"{0} model has a score of {1:.3f} against the test data.\"\\\n .format(self.model.estimator, self.model.score), self.model.score]]\n else:\n message = [[self.model.name, 'Model successfully trained and saved to disk.',\\\n time.strftime('%X %x %Z', time.localtime(self.model.state_timestamp)),\\\n \"{0} model score unknown as test_size was <= 0.\"\\\n .format(self.model.estimator), np.NaN]]\n \n self.response = pd.DataFrame(message, columns=['model_name', 'result', 'time_stamp', 'score_result', 'score'])\n \n # Send the reponse table description to Qlik\n self._send_table_description(\"fit\")\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n # Finally send the response\n return self.response",
"def fit(self, X, y):\n self.support_vectors_ = check_array(X)\n self.y = check_array(y, ensure_2d=False)\n random_state = check_random_state(self.random_state)\n self.kernel_args = {}\n if self.kernel == \"rbf\" and self.gamma is not None:\n self.kernel_args[\"gamma\"] = self.gamma\n elif self.kernel == \"poly\":\n self.kernel_args[\"degree\"] = self.degree\n self.kernel_args[\"coef0\"] = self.coef0\n elif self.kernel == \"sigmoid\":\n self.kernel_args[\"coef0\"] = self.coef0\n K = pairwise_kernels(X, metric=self.kernel, **self.kernel_args)\n self.dual_coef_ = np.zeros(X.shape[0])\n self.intercept_ = _svm.smo(\n K, y, self.dual_coef_, self.C, random_state, self.tol,\n self.numpasses, self.maxiter, self.verbose)\n # If the user was using a linear kernel, lets also compute and store\n # the weights. This will speed up evaluations during testing time.\n if self.kernel == \"linear\":\n self.coef_ = np.dot(self.dual_coef_ * self.y, self.support_vectors_)\n # only samples with nonzero coefficients are relevant for predictions\n support_vectors = np.nonzero(self.dual_coef_)\n self.dual_coef_ = self.dual_coef_[support_vectors]\n self.support_vectors_ = X[support_vectors]\n self.y = y[support_vectors]\n return self",
"def fit_models(self, config_file, features_dic, tstart = None, tend = None,\n output_folder = None):\n \n if output_folder == None:\n output_folder = str(Path(dir_path, 'rf_models'))\n \n try:\n config = envyaml(config_file)\n except:\n logging.warning('Using default config as no valid config file was provided')\n config_file = dir_path + '/default_config.yml'\n \n config = envyaml(config_file)\n \n #######################################################################\n # Read data\n #######################################################################\n \n logging.info('Loading input data')\n radartab = pd.read_parquet(str(Path(self.input_location, 'radar_x0y0.parquet')))\n gaugetab = pd.read_parquet(str(Path(self.input_location, 'gauge.parquet')))\n grp = pickle.load(open(str(Path(self.input_location, 'grouping_idx_x0y0.p')),'rb'))\n grp_vertical = grp['grp_vertical']\n vweights = 10**(config['VERT_AGG']['BETA'] * (radartab['HEIGHT']/1000.)) # vert. weights\n \n ###############################################################################\n # Compute additional data if needed\n ###############################################################################\n \n # currently the only supported additional features is zh (refl in linear units)\n # and DIST_TO_RAD{A-D-L-W-P} (dist to individual radars)\n # Get list of unique features names\n features = np.unique([item for sub in list(features_dic.values())\n for item in sub])\n\n for f in features:\n if 'zh' in f:\n logging.info('Computing derived variable {:s}'.format(f))\n radartab[f] = 10**(0.1 * radartab[f.replace('zh','ZH')])\n elif 'zv' in f:\n logging.info('Computing derived variable {:s}'.format(f))\n radartab[f] = 10**(0.1 * radartab[f.replace('zv','ZV')]) \n if 'DIST_TO_RAD' in f:\n info_radar = constants.RADARS\n vals = np.unique(radartab['RADAR'])\n for val in vals:\n dist = np.sqrt((radartab['X'] - info_radar['X'][val])**2+\n (radartab['Y'] - info_radar['Y'][val])**2) / 1000.\n radartab['DIST_TO_RAD' + str(val)] = dist\n \n ###############################################################################\n # Compute data filter\n ###############################################################################\n \n filterconf = config['FILTERING']\n logging.info('Computing data filter')\n logging.info('List of stations to ignore {:s}'.format(','.join(filterconf['STA_TO_REMOVE'])))\n logging.info('Start time {:s}'.format(str(tstart)))\n logging.info('End time {:s}'.format(str(tend))) \n logging.info('ZH must be > {:f} if R <= {:f}'.format(filterconf['CONSTRAINT_MIN_ZH'][1],\n filterconf['CONSTRAINT_MIN_ZH'][0])) \n logging.info('ZH must be < {:f} if R <= {:f}'.format(filterconf['CONSTRAINT_MAX_ZH'][1],\n filterconf['CONSTRAINT_MAX_ZH'][0])) \n\n ZH_agg = vert_aggregation(pd.DataFrame(radartab['ZH_mean']),\n vweights,\n grp_vertical,\n True, radartab['VISIB_mean'])\n cond1 = np.array(np.isin(gaugetab['STATION'], filterconf['STA_TO_REMOVE']))\n cond2 = np.logical_and(ZH_agg['ZH_mean'] < filterconf['CONSTRAINT_MIN_ZH'][1],\n 6 * gaugetab['RRE150Z0'].values >= filterconf['CONSTRAINT_MIN_ZH'][0])\n cond3 = np.logical_and(ZH_agg['ZH_mean'] > filterconf['CONSTRAINT_MAX_ZH'][1],\n 6 * gaugetab['RRE150Z0'].values <= filterconf['CONSTRAINT_MIN_ZH'][0])\n \n invalid = np.logical_or(cond1,cond2)\n invalid = np.logical_or(invalid,cond3)\n invalid = np.logical_or(invalid,cond3)\n invalid = np.array(invalid)\n if tend != None:\n tend_unix = (tend - datetime.datetime(1970,1,1) ).total_seconds()\n invalid[gaugetab['TIMESTAMP'] > tend_unix] = 1\n if tstart != None:\n tstart_unix = (tstart - datetime.datetime(1970,1,1) ).total_seconds()\n invalid[gaugetab['TIMESTAMP'] < tstart_unix] = 1\n invalid[np.isnan(gaugetab['RRE150Z0'])] = 1\n \n ###############################################################################\n # Prepare training dataset\n ###############################################################################\n \n gaugetab = gaugetab[~invalid]\n \n for model in features_dic.keys():\n logging.info('Performing vertical aggregation of input features for model {:s}'.format(model)) \n features_VERT_AGG = vert_aggregation(radartab[features_dic[model]], \n vweights, grp_vertical,\n config['VERT_AGG']['VISIB_WEIGHTING'],\n radartab['VISIB_mean'])\n features_VERT_AGG = features_VERT_AGG[~invalid]\n \n ###############################################################################\n # Fit\n ###############################################################################\n # create name of variables used in the model\n features = []\n for f in features_VERT_AGG.columns:\n if '_max' in f:\n f = f.replace('_max','')\n elif '_min' in f:\n f = f.replace('_min','')\n elif '_mean' in f:\n f = f.replace('_mean','')\n features.append(f)\n\n reg = RandomForestRegressorBC(degree = 1, \n bctype = config['BIAS_CORR'],\n variables = features,\n beta = config['VERT_AGG']['BETA'],\n **config['RANDOMFOREST_REGRESSOR'])\n \n Y = np.array(gaugetab['RRE150Z0'] * 6)\n logging.info('')\n \n logging.info('Training model on gauge data')\n\n valid = np.all(np.isfinite(features_VERT_AGG),axis=1)\n reg.fit(features_VERT_AGG[valid], Y[valid])\n \n out_name = str(Path(output_folder, '{:s}_BETA_{:2.1f}_BC_{:s}.p'.format(model, \n config['VERT_AGG']['BETA'],\n config['BIAS_CORR'])))\n logging.info('Saving model to {:s}'.format(out_name))\n \n pickle.dump(reg, open(out_name, 'wb'))",
"def linear_model(data, precision = 4):\n # Handle input errors\n matrix_of_scalars(data, 'first')\n long_vector(data)\n positive_integer(precision)\n\n # Store independent and dependent variable values separately\n independent_variable = single_dimension(data, 1)\n dependent_variable = single_dimension(data, 2)\n\n # Create matrices for independent and dependent variables\n independent_matrix = []\n dependent_matrix = column_conversion(dependent_variable)\n\n # Iterate over inputted data\n for element in independent_variable:\n # Store linear and constant evaluations of original independent elements together as lists within independent matrix\n independent_matrix.append([element, 1])\n\n # Solve system of equations\n solution = system_solution(independent_matrix, dependent_matrix, precision)\n\n # Eliminate zeroes from solution\n coefficients = no_zeroes(solution, precision)\n\n # Generate evaluations for function, derivatives, and integral\n equation = linear_equation(*coefficients, precision)\n derivative = linear_derivatives(*coefficients, precision)['first']['evaluation']\n integral = linear_integral(*coefficients, precision)['evaluation']\n\n # Determine key points of graph\n points = key_coordinates('linear', coefficients, precision)\n\n # Generate values for lower and upper bounds\n five_numbers = five_number_summary(independent_variable, precision)\n min_value = five_numbers['minimum']\n max_value = five_numbers['maximum']\n q1 = five_numbers['q1']\n q3 = five_numbers['q3']\n\n # Calculate accumulations\n accumulated_range = accumulated_area('linear', coefficients, min_value, max_value, precision)\n accumulated_iqr = accumulated_area('linear', coefficients, q1, q3, precision)\n\n # Determine average values and their points\n averages_range = average_values('linear', coefficients, min_value, max_value, precision)\n averages_iqr = average_values('linear', coefficients, q1, q3, precision)\n\n # Create list of predicted outputs\n predicted = []\n for element in independent_variable:\n predicted.append(equation(element))\n \n # Calculate correlation coefficient for model\n accuracy = correlation_coefficient(dependent_variable, predicted, precision)\n\n # Package preceding results in multiple dictionaries\n evaluations = {\n 'equation': equation,\n 'derivative': derivative,\n 'integral': integral\n }\n points = {\n 'roots': points['roots'],\n 'maxima': points['maxima'],\n 'minima': points['minima'],\n 'inflections': points['inflections']\n }\n accumulations = {\n 'range': accumulated_range,\n 'iqr': accumulated_iqr\n }\n averages = {\n 'range': averages_range,\n 'iqr': averages_iqr\n }\n\n # Package all dictionaries in single dictionary to return\n result = {\n 'constants': coefficients,\n 'evaluations': evaluations,\n 'points': points,\n 'accumulations': accumulations,\n 'averages': averages,\n 'correlation': accuracy\n }\n return result",
"def calibrate(\n model: ModelProto, dataset: Iterable[Dict[str, np.ndarray]]\n) -> Dict[str, Tuple[float, float]]:\n ort.set_default_logger_severity(3)\n session = ort.InferenceSession(model.SerializeToString())\n\n reduces = [\n output.name\n for output in session.get_outputs()\n if (output.name.endswith(\"_ReduceMin\") or output.name.endswith(\"_ReduceMax\"))\n ]\n\n minimum = defaultdict(lambda: math.inf)\n maximum = defaultdict(lambda: -math.inf)\n if not os.environ.get(\"TQDM_DISABLE\"):\n dataset = tqdm.tqdm(dataset, desc=\"Calibration\")\n for inputs in dataset:\n reduce_vals = session.run(reduces, inputs)\n for reduce, reduce_val in zip(reduces, reduce_vals):\n if reduce.endswith(\"_ReduceMin\"):\n name = reduce[: reduce.rfind(\"_ReduceMin\")]\n if minimum[name] > reduce_val:\n minimum[name] = reduce_val\n elif reduce.endswith(\"_ReduceMax\"):\n name = reduce[: reduce.rfind(\"_ReduceMax\")]\n if maximum[name] < reduce_val:\n maximum[name] = reduce_val\n return {name: (float(minimum[name]), float(maximum[name])) for name in minimum}",
"def fit(self, X, y):\n # X = as_float_array(X)\n # X, y = check_array(X, dtype=np.float64)\n if not isinstance(X, sp.csr_matrix):\n X = sp.csr_matrix(X)\n\n self._validate_params()\n\n self.t_ = 1.0\n self.max_target_ = y.max()\n self.min_target_ = y.min()\n\n # convert member variables to ints for use in cython\n k0 = self._bool_to_int(self.k0)\n k1 = self._bool_to_int(self.k1)\n shuffle_training = self._bool_to_int(self.shuffle_training)\n verbose = self._bool_to_int(self.verbose)\n learning_rate_schedule = self._get_learning_rate_type(self.learning_rate_schedule)\n task = self._get_task(self.task)\n\n # use sklearn to create a validation dataset for lambda updates\n if self.verbose:\n print(\"Creating validation dataset of %.2f of training for adaptive regularization\"\n % self.validation_size)\n X_train, validation, train_labels, validation_labels = cross_validation.train_test_split(\n X, y, test_size=self.validation_size, random_state=self.seed)\n\n self.n_features_ = X_train.shape[1]\n\n # Convert datasets to sklearn sequential datasets for fast traversal\n X_train_dataset = _make_dataset(X_train, train_labels)\n validation_dataset = _make_dataset(validation, validation_labels)\n\n # Set up params\n self.w0 = 0.0\n self.w = np.zeros(self.n_features_, dtype=np.float64)\n rng = np.random.RandomState(self.seed)\n self.v = rng.normal(scale=self.init_stdev,\n size=(self.num_factors, self.n_features_)).astype(np.float64)\n\n self.fm_fast = FM_fast(self.w,\n self.v,\n self.num_factors,\n self.n_features_,\n self.num_iter,\n k0,\n k1,\n self.w0,\n self.t_,\n self.t0,\n self.power_t,\n self.min_target_,\n self.max_target_,\n self.eta0,\n learning_rate_schedule,\n shuffle_training,\n task,\n self.seed,\n verbose)\n\n self.fm_fast.fit(X_train_dataset, validation_dataset)\n return self",
"def fit_model(X, y,metric, model):\n cv_sets = ShuffleSplit(n_splits=10, test_size= 0.2, train_size= 0.8, random_state=42)\n \n\n if model == 'regression_tree':\n\n clf = DecisionTreeRegressor(random_state=42)\n\n # Creating a dictionary for the parameter 'max_depth' with a range from 1 to 10\n param = {\n 'max_depth': [1,2,3,4,5,6,7,8,9,10]\n }\n\n\n elif model == 'ridge':\n clf = Ridge(random_state=42, fit_intercept=False)\n param = {\n 'alpha': [0, 0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000]\n }\n\n\n if metric == 'r2':\n scoring_fnc = make_scorer(r_squared,greater_is_better=True)\n\n elif metric == 'rss':\n scoring_fnc = make_scorer(rss, greater_is_better=False)\n\n # Creating the grid search cv object --> GridSearchCV()\n grid = GridSearchCV(estimator=clf, param_grid=param, cv=cv_sets,scoring= scoring_fnc)\n\n # Fit the grid search object to the data to compute the optimal model\n grid = grid.fit(X, y)\n\n # Return the optimal model after fitting the data\n return grid.best_estimator_",
"def learnHyperLinear(X, y, xTest, yTest, penalty, scoring, transformedData,jobs=1):\n #Check if test is empty, if it is, don't refit and predict data\n testAvailable = np.size(xTest,0)!=0\n\n # Parameters selection\n #====================\n cRange = np.logspace(-5,1,3)\n parameters = {'C': cRange}\n\n if penalty=='l1':\n dual=False\n else:\n dual=True\n\n #Creating Model and begin classification\n #=======================================\n classif = svm.LinearSVC(penalty=penalty, class_weight=CLASS_WEIGHT, dual=dual)\n clf = grid_search.GridSearchCV(classif, parameters, scoring=scoring, cv=5, n_jobs=jobs, verbose=3, refit=testAvailable)\n print(\"Begin\\n...\")\n clf.fit(X,y)\n\n \n #Get results, print and write them into a file\n #============================================\n print(clf.best_params_, clf.best_score_)\n\n if testAvailable:\n scores = testModel(clf.best_estimator_,X,y,xTest,yTest,penalty)\n writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,'Linear',\\\n penalty,scoring, transformedData, scores=scores)\n else:\n print(\"No test, don't predict data\")\n writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,'Linear',\\\n penalty,scoring, transformedData, scores=None)",
"def calibrate(x, y, z, temperature=None, budget=1000, noise_cutoff_mg=13, hdf5_file=None, calibration_statistics=False):\n\n args = {\"x\": x, \"y\": y, \"z\": z, \"temperature\": temperature, \"budget\": budget, \"noise_cutoff_mg\": noise_cutoff_mg,\n \"calibration_statistics\": calibration_statistics}\n params = [\"temperature\", \"budget\", \"noise_cutoff_mg\", \"calibration_statistics\"]\n calibration_diagnostics = do_if_not_cached(\"calibrate\", calibrate_slave, args, params, get_calibrate, set_calibrate, hdf5_file)\n\n # Regardless of how we get the results, extract the offset and scales\n calibration_parameters = [calibration_diagnostics[var] for var in [\"x_offset\", \"x_scale\", \"y_offset\", \"y_scale\", \"z_offset\", \"z_scale\"]]\n\n if temperature is not None:\n calibration_parameters = [calibration_diagnostics[var] for var in [\"x_temp_offset\", \"y_temp_offset\", \"z_temp_offset\"]]\n\n # Apply the best calibration factors to the data\n do_calibration(x, y, z, temperature, calibration_parameters)\n\n return x, y, z, calibration_diagnostics"
] | [
"0.6054098",
"0.5928749",
"0.5801186",
"0.57329446",
"0.57322824",
"0.5710905",
"0.57070893",
"0.56966364",
"0.56741047",
"0.56055945",
"0.55519617",
"0.5540491",
"0.5518398",
"0.5488559",
"0.54868513",
"0.5474618",
"0.54654014",
"0.54624677",
"0.54408914",
"0.54407585",
"0.54191893",
"0.54155797",
"0.5414061",
"0.5406579",
"0.53987086",
"0.5396133",
"0.53684217",
"0.5356617",
"0.53515744",
"0.53490186"
] | 0.6264422 | 0 |
For a given WABBIT parameter file, check for the most common stupid errors | def check_parameters_for_stupid_errors( file ):
import os
# print('~~~~~~~~~~~~~~~~~~~~~ini-file~~~~~~~~~~~')
# # read jobfile
# with open(file) as f:
# # loop over all lines
# for line in f:
# line = line.lstrip()
# line = line.rstrip()
# if len(line)>0:
# if ';' in line:
# line = line[0:line.index(";")]
# if len(line)>0:
# if '[' in line and ']' in line:
# print(bcolors.OKBLUE + line + bcolors.ENDC)
# else:
# print(line)
# print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print("We scan %s for stupid errors." % (file) )
# check if the file exists, at least
if not os.path.isfile(file):
raise ValueError("Stupidest error of all: we did not find the INI file.")
wavelet = get_ini_parameter(file, 'Wavelet', 'wavelet', str, default="CDF40")
# since 05 Jul 2023, g is set automatically, unless we do something stupid.
if wavelet == 'CDF20':
g_default = 2
elif wavelet=='CDF22':
g_default = 3
elif wavelet=='CDF40':
g_default = 4
elif wavelet=='CDF42':
g_default = 5
elif wavelet=='CDF44' or wavelet=='CDF62':
g_default = 7
else:
g_default = 1
jmax = get_ini_parameter(file, 'Blocks', 'max_treelevel', int)
jmin = get_ini_parameter(file, 'Blocks', 'min_treelevel', int)
adapt_mesh = get_ini_parameter(file, 'Blocks', 'adapt_tree', int)
ceps = get_ini_parameter(file, 'Blocks', 'eps')
bs = get_ini_parameter(file, 'Blocks', 'number_block_nodes', int, vector=True)
g = get_ini_parameter(file, 'Blocks', 'number_ghost_nodes', int, default=g_default)
g_rhs = get_ini_parameter(file, 'Blocks', 'number_ghost_nodes_rhs', int, default=g)
dealias = get_ini_parameter(file, 'Blocks', 'force_maxlevel_dealiasing', int)
dim = get_ini_parameter(file, 'Domain', 'dim', int)
L = get_ini_parameter(file, 'Domain', 'domain_size', vector=True)
discretization = get_ini_parameter(file, 'Discretization', 'order_discretization', str)
time_step_method = get_ini_parameter( file, 'Time', 'time_step_method', str, default="RungeKuttaGeneric")
CFL = get_ini_parameter( file, 'Time', 'CFL', float, default=1.0)
CFL_eta = get_ini_parameter( file, 'Time', 'CFL_eta', float, default=0.99)
CFL_nu = get_ini_parameter( file, 'Time', 'CFL_nu', float, default=0.99*2.79/(float(dim)*np.pi**2))
c0 = get_ini_parameter( file, 'ACM-new', 'c_0', float)
nu = get_ini_parameter( file, 'ACM-new', 'nu', float)
ceta = get_ini_parameter( file, 'VPM', 'C_eta', float, default=0.0)
penalized = get_ini_parameter( file, 'VPM', 'penalization', bool, default=False)
geometry = get_ini_parameter( file, 'VPM', 'geometry', str, default='default')
sponged = get_ini_parameter( file, 'Sponge', 'use_sponge', bool, default=False)
csponge = get_ini_parameter( file, 'Sponge', 'C_sponge', float, default=0.0)
sponge_type = get_ini_parameter( file, 'Sponge', 'sponge_type', str, default='default')
L_sponge = get_ini_parameter( file, 'Sponge', 'L_sponge', default=0.0)
time_max = get_ini_parameter( file, 'Time', 'time_max', float)
time_stepper = get_ini_parameter( file, 'Time', 'time_step_method', str, default="RungeKuttaGeneric")
CFL = get_ini_parameter( file, 'Time', 'CFL', float, default=0.5)
CFL_nu = get_ini_parameter( file, 'Time', 'CFL_nu', float, default=0.99*2.79/(float(dim)*np.pi**2) )
CFL_eta = get_ini_parameter( file, 'Time', 'CFL_eta', float, default=0.99)
filter_type = get_ini_parameter( file, 'Discretization', 'filter_type', str, default='no_filter')
filter_freq = get_ini_parameter( file, 'Discretization', 'filter_freq', int, default=-1)
dx = L[0]*2**-jmax/(bs[0])
keta = np.sqrt(ceta*nu)/dx
print("======================================================================================")
print("Bs= %i g= %i g_rhs= %i dim= %i Jmax= %i L= %2.2f %s==> dx= %2.3e N_equi= %i N= %i per unit length%s" %
(bs[0],g,g_rhs, dim,jmax,L[0],bcolors.OKBLUE, dx, int(L[0]/dx), int(1.0/dx), bcolors.ENDC))
print("equidistant grids: Jmin=%i^%i, Jmax=%i^%i" % (int(bs[0]*2**jmin), dim, int(bs[0]*2**jmax), dim) )
print("discretization= %s" % (discretization))
print("T_max = %2.2f CFL= %2.2f CFL_eta= %2.2f CFL_nu= %2.3f time_stepper= %s" % (time_max, CFL, CFL_eta, CFL_nu, time_stepper))
print("use_penalization= %i geometry= %s C_eta= %2.2e %s ==> K_eta = %2.2f%s" %
(penalized, geometry, ceta, bcolors.OKBLUE, keta, bcolors.ENDC))
if sponged:
print("use_sponge=%i type=%s C_sponge=%2.2e L_sponge=%2.2f %s==> Ntau = %2.2f%s" %
(sponged, sponge_type, csponge, L_sponge, bcolors.OKBLUE, L_sponge/(c0*csponge), bcolors.ENDC))
print("C_0 = %2.2f delta_shock= %2.2f dx nu=%e" % (c0, c0*ceta/dx, nu))
print("C_eps = %2.2e wavelet= %s dealias=%i adapt_mesh=%i" % (ceps, wavelet, dealias, adapt_mesh))
print("dt_CFL= %2.3e" % (CFL*dx/c0))
print("filter_type= %s filter_freq=%i" % (filter_type, filter_freq))
print("======================================================================================")
if len(bs) > 1:
bs = bs[0]
if bs % 2 == 0:
warn('The block size is bs=%i which is an EVEN number.' % (bs) )
if bs < 3:
warn('The block size is bs=%i is very small or even negative.' % (bs) )
if (wavelet == "CDF22") and g<3:
warn("Not enough ghost nodes for wavelet %s g=%i < 3" % (wavelet, g) )
if (wavelet == "CDF42") and g<5:
warn("Not enough ghost nodes for wavelet %s g=%i < 5" % (wavelet, g) )
if (wavelet == "CDF44" or wavelet == "CDF62") and g<7:
warn("Not enough ghost nodes for wavelet %s g=%i < 7" % (wavelet, g) )
if (wavelet == "CDF40") and g<4:
warn("Not enough ghost nodes for wavelet %s g=%i < 4" % (wavelet, g) )
if time_step_method == "RungeKuttaChebychev":
if CFL_eta < 999:
warn('are you sure you did not forget to adjustl CFL_eta for the RKC scheme???')
if CFL_nu < 999:
warn('are you sure you did not forget to adjustl CFL_nu for the RKC scheme???')
if CFL != 0.75:
warn('are you sure you did not forget to adjustl CFL for the RKC scheme??? often we used 0.75.')
if time_step_method == "RungeKuttaGeneric":
if CFL_eta > 1.0:
warn('are you sure you did not forget to adjustl CFL_eta for the RK scheme? it may be unstable.')
if CFL_nu > 0.99*2.79/(float(dim)*np.pi**2):
warn('are you sure you did not forget to adjustl CFL_nu for the RK scheme? it may be unstable.')
if CFL > 1.0:
warn('are you sure you did not forget to adjustl CFL for the RK scheme? it may be unstable.')
# if somebody modifies the standard parameter file, users have to update their
# ini files they use. this is often forgoten and obnoxious. Hence, if we find
# value sthat no longer exist, warn the user.
if exists_ini_parameter( file, "Blocks", "number_data_fields" ) :
warn('Found deprecated parameter: [Blocks]::number_data_fields')
if exists_ini_parameter( file, "Physics", "initial_cond" ) :
warn('Found deprecated parameter: [Physics]::initial_cond')
if exists_ini_parameter( file, "Dimensionality", "dim" ) :
warn('Found deprecated parameter: [Dimensionality]::dim')
if exists_ini_parameter( file, "DomainSize", "Lx" ) :
warn('Found deprecated parameter: [DomainSize]::Lx')
if exists_ini_parameter( file, "Time", "time_step_calc" ) :
warn('Found deprecated parameter: [Time]::time_step_calc')
if exists_ini_parameter( file, "ACM", "forcing" ):
warn('Found deprecated parameter: [ACM]::forcing')
if exists_ini_parameter( file, "ACM", "forcing_type" ):
warn('Found deprecated parameter: [ACM]::forcing_type')
if exists_ini_parameter( file, "ACM", "p_mean_zero" ):
warn('Found deprecated parameter: [ACM]::p_mean_zero')
if exists_ini_parameter( file, "ACM", "compute_laplacian" ):
warn('Found deprecated parameter: [ACM]::compute_laplacian')
if exists_ini_parameter( file, "ACM", "compute_nonlinearity" ):
warn('Found deprecated parameter: [ACM]::compute_nonlinearity')
if exists_ini_parameter( file, "Blocks", "adapt_mesh" ):
warn('Found deprecated parameter: [Blocks]::adapt_mesh ===> adapt_tree')
HIT = get_ini_parameter( file, 'ACM-new', 'use_HIT_linear_forcing', bool, default=False)
if HIT:
print(type(HIT))
print(HIT)
warn('You use HIT linear forcing, which is HIGHLY EXPERIMENTAL')
jmax = get_ini_parameter( file, 'Blocks', 'max_treelevel', int)
if jmax > 18:
warn('WABBIT can compute at most 18 refinement levels, you set more!')
if sponged:
# default value is TRUE so if not found, all is well
mask_time_dependent = get_ini_parameter( file, 'VPM', 'mask_time_dependent_part', int, default=1)
if mask_time_dependent != 1:
warn("""you use sponge, but mask_time_dependent_part=0! The sponge
is treated as if it were time dependent because it does not have
to be at the maximum refinement level.""")
# loop over ini file and check that each non-commented line with a "=" contains the trailing semicolon ";"
with open(file) as f:
# loop over all lines
linenumber = 0
for line in f:
# remove trailing & leading spaces
line = line.strip()
linenumber += 1
if line != "" :
if line[0] != "!" and line[0] != "#" and line[0] != ";" :
if "=" in line and ";" not in line:
warn('It appears the line #%i does not contain the semicolon' % (linenumber) )
restart = get_ini_parameter( file, 'Physics', 'read_from_files', int)
print("read_from_files=%i" %(restart))
if restart == 1:
info("This simulation is being resumed from file")
infiles = get_ini_parameter( file, 'Physics', 'input_files', str)
infiles = infiles.split()
for file in infiles:
print(file)
if not os.path.isfile(file):
raise ValueError("CRUTIAL: read_from_files=1 but infiles NOT found!.")
else:
info("This simulation is being started from initial condition (and not from file)") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def checkParamsError(self):\n # check if parameter combinations match with the simulation filename.\n for i, f in enumerate(self.yadeDataFiles):\n # get the file name fore the suffix\n f = f.split('.' + f.split('.')[-1])[0]\n # get parameters from the remaining string\n paramsString = f.split('_')[-self.numParams:]\n # element wise comparison of the parameter vector\n if not (np.abs((np.float64(paramsString) - self.getSmcSamples()[-1][i])\n / self.getSmcSamples()[-1][i] < 1e-10).all()):\n raise RuntimeError(\n \"Parameters \" + \", \".join(\n [\"%s\" % v for v in self.getSmcSamples()[-1][i]]) + \" do not match with the data file name \" + f)",
"def _check_params(self):\n\t\tstrange_param_helper = False\n\t\tfor param in self.params:\n\t\t\n\t\t\t# It could be that the param encapsulates several values (e.g., \"FLUX_RADIUS(10)\")\n\t\t\t# So we have to dissect this\n\t\t\tmatch = re.compile(\"(\\w*)\\(\\d*\\)\").match(param)\n\t\t\tif match:\n\t\t\t\tcleanparam = match.group(1)\n\t\t\telse:\n\t\t\t\tcleanparam = param\n\t\t\t\t\n\t\t\tif cleanparam not in self.fullparamlist:\n\t\t\t\tlogger.warning(\"Parameter '%s' seems strange and might be unknown to SExtractor\" \\\n % (param))\n\t\t\t\tstrange_param_helper = True\n\t\t\t\t\n\t\tif strange_param_helper:\n\t\t\tlogger.warning(\"Known parameters are: %s\" % (self.fullparamtxt))",
"def check_params_set():\n critical = {'machineinfo' : MACHINEID, \n 'error_serverinfo' : ERROR_SERVER, \n 'serverinfo' : SERVER}\n for i, val in critical.iteritems():\n if not val:\n print \"ERROR: Set value for \\\"%s\\\" in baseconfig.cfg file first\\n\" % i\n sys.exit(1)",
"def checkSuccess():\n try:\n relin = np.genfromtxt(\"{}reliability.in\".format(basedir), dtype=str)\n # Accurate argument order\n args = [str(\"\\'\"+probpath+\"\\'\"), str(\"\\'\"+obpath+\"\\'\"),\n str(\"\\'\"+outfile+\"\\'\"),\n fcsthr, str(\"\\'\"+variable+\"\\'\"),\n rthresh, sixhour, nbrhd, rbox_bounds[0],\n rbox_bounds[1], rbox_bounds[2], rbox_bounds[3]]\n success = True # Assume success initially\n # Ensure that each argument was placed into the proper line of the\n # reliability input file\n for ind, line in enumerate(relin):\n # If an argument doesn't line up with the rel in arg, set False\n print(str(args[ind]).replace('\\\\', ''), line)\n if (str(args[ind]).replace('\\\\', '') != line):\n success = False\n print(success)\n except:\n success = False\n return success",
"def checkSuccess():\n try:\n relin = np.genfromtxt(\"{}reliability.in\".format(basedir), dtype=str)\n # Accurate argument order\n args = [str(\"\\'\"+probpath+\"\\'\"), str(\"\\'\"+obpath+\"\\'\"),\n str(\"\\'\"+outfile+\"\\'\"),\n fcsthr, str(\"\\'\"+variable+\"\\'\"),\n rthresh, sixhour, nbrhd, rbox_bounds[0],\n rbox_bounds[1], rbox_bounds[2], rbox_bounds[3]]\n success = True # Assume success initially\n # Ensure that each argument was placed into the proper line of the\n # reliability input file\n for ind, line in enumerate(relin):\n # If an argument doesn't line up with the rel in arg, set False\n print(str(args[ind]).replace('\\\\', ''), line)\n if (str(args[ind]).replace('\\\\', '') != line):\n success = False\n print(success)\n except:\n success = False\n return success",
"def test_bad_file():\n\n bad = ''.join(random.choices(string.ascii_uppercase + string.digits, k=5))\n rv, out = getstatusoutput(f'{prg} -f {bad}')\n assert rv != 0\n assert re.match('usage:', out, re.I)\n assert re.search(f\"No such file or directory: '{bad}'\", out)",
"def __checkBinaryParametersAreSpecified(paramsObj):\n missing = []\n for paramName, paramVal in paramsObj._asdict().iteritems():\n if not paramVal:\n missing.append(paramName)\n if missing:\n raise ValueError(\"Missing parameters to load binary series files - \" +\n \"these must be given either as arguments or in a configuration file: \" +\n str(tuple(missing)))",
"def check_parameter_file(filename):\n\n # Load file\n with open(filename, \"r\") as fin:\n content = fin.read()\n\n # Check cols and splits strings\n\n bad_names = []\n line_numbers = []\n\n strs = [\"cols\", \"splits\", \"divs\"]\n\n for tstr in strs:\n\n start = content.find(tstr)\n\n while start != -1:\n\n cols_str = \"\".join(content[start:].split(\"\\n\")[0].split(\"=\")[-1].split(\" \"))\n\n semis = cols_str.count(\";\")\n\n # Get line number\n line_end = content.find(\"\\n\", start)\n line_number = content[:line_end].count(\"\\n\") + 1\n\n if tstr == \"divs\":\n colons = cols_str.count(\",\")\n else:\n colons = cols_str.count(\":\")\n\n if colons != (semis + 1):\n bad_names.append(tstr)\n line_numbers.append(line_number)\n\n start = content.find(tstr, start + 1)\n\n return bad_names, line_numbers",
"def check_parameters_valid(self) :\n for check_parameter in self.parameters :\n if (not self.parameters[check_parameter]['set']) :\n error_message = \"Missing key -> '\" + check_parameter + \"'\"\n if (Config.logger) :\n dtpo_log('fatal', error_message)\n raise ParseError(error_message)\n\n if self.parameters[check_parameter]['type'] == 'dir' :\n value = self.parameters[check_parameter]['value']\n return_string = check_directory_permissions(value)\n if return_string :\n error_message = \"{0} not accessible \" \\\n \"-> {1}\".format(\n check_parameter,\n return_string)\n raise ParseError(error_message)\n elif self.parameters[check_parameter]['type'] == 'file' :\n value = self.parameters[check_parameter]['value']\n try :\n file_pointer = open(value)\n file_pointer.close()\n except IOError as io_error :\n error_message = \"File {0} not accessible -> {2}\" \\\n .format(\n check_parameter,\n self.parameters[check_parameter]['value'],\n str(io_error))\n raise ParseError(error_message)",
"def checkParameters(self):\n self.DEBUG(\"EDPluginWaitMultiFile.checkParameters\")\n self.checkMandatoryParameters(self.dataInput, \"Data Input is None\")\n self.checkMandatoryParameters(self.dataInput.expectedFile, \"Data Input is None\")\n self.checkMandatoryParameters(self.dataInput.expectedSize, \"Data Input is None\")",
"def test_checkParameters(self):\n self.failUnlessEqual(self.nice.opts['long'], \"Alpha\")\n self.failUnlessEqual(self.nice.opts['another'], \"Beta\")\n self.failUnlessEqual(self.nice.opts['longonly'], \"noshort\")\n self.failUnlessEqual(self.nice.opts['shortless'], \"Gamma\")",
"def test_readbadformat(self):\n\n self.assertRaises(ParseError, self.hw, self.badfile)",
"def f_check_adr_parameters_correctness(dict):\n\n if int(dict[\"operation_mode_num\"]) not in (0, 1, 2, 3, 4, 5, 6):\n print('\\n Error!!! Operation mode is wrong!\\n')\n sys.exit(' Program stopped!')\n\n if int(dict[\"FFT_size_samples\"]) not in (2048, 4096, 8192, 16384, 32768):\n print('\\n Error!!! FFT size is wrong!\\n')\n sys.exit(' Program stopped!')\n\n if int(dict[\"spectra_averaging\"]) < 16 or int(dict[\"spectra_averaging\"]) > 32768:\n print('\\n Error!!! Spectra averaging number is wrong!\\n')\n sys.exit(' Program stopped!')\n\n if int(dict[\"start_line_freq\"]) not in (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16): # 0 … (SFFT-1024)/1024\n print('\\n Error!!! Start frequency line is wrong!\\n')\n sys.exit(' Program stopped!')\n\n if int(dict[\"width_line_freq\"]) not in (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16):\n print('\\n Error!!! Frequency width line is wrong!\\n')\n sys.exit(' Program stopped!')\n\n if int(dict[\"width_line_freq\"]) > ((int(dict[\"FFT_size_samples\"]) - int(dict[\"start_line_freq\"]) * 1024) / 1024): # 1 … (SFFT-SLINE*1024)/1024\n print('\\n Error!!! Frequency width is bigger than FFT size allows!\\n')\n sys.exit(' Program stopped!')\n\n if int(dict[\"clock_source\"]) not in (0, 1):\n print('\\n Error!!! Clock source is wrong!\\n')\n sys.exit(' Program stopped!')\n\n if int(dict[\"sum_diff_mode_num\"]) not in (0, 1):\n print('\\n Error!!! Sum-diff mode is wrong!\\n')\n sys.exit(' Program stopped!')\n\n if int(dict[\"data_file_size\"]) < -1 or int(dict[\"data_file_size\"]) > 4096:\n print('\\n Error!!! File size value is wrong!\\n')\n sys.exit(' Program stopped!')\n\n '''\n if (int(dict[\"chan_diff_delay\"]) < 0 or int(parameters_dict[\"chan_diff_dalay\"]) > 1024):\n print('\\n Error!!! Channel difference delay is wrong!\\n')\n sys.exit(' Program stopped!')\n '''\n\n # print('\\n ADR parameters from file are correct!\\n')\n\n return dict",
"def checkParameters(self):\n EDVerbose.DEBUG(\"EDPluginExecVideov10.checkParameters\")\n self.checkMandatoryParameters(self.getDataInput(), \"Data Input is None\")\n self.checkMandatoryParameters(self.getDataInput().getInputImagePath(), \"inputImage list is None\")\n for oneXSDataFile in self.getDataInput().getInputImagePath():\n self.checkMandatoryParameters(oneXSDataFile.getPath().getValue(), \"input Image does not exist\" + oneXSDataFile.marshal())",
"def checkbands() :\n dontThrowException = False \n success = s.checkConfig(dontThrowException)\n return success",
"def test_lpad_config_file_flags():\n with pytest.raises(FileNotFoundError, match=\"launchpad_file '' does not exist!\"):\n lpad([\"-l\", \"\", \"get_fws\"])\n\n with pytest.raises(FileNotFoundError, match=\"fworker_file 'missing_file' does not exist!\"):\n lpad([\"recover_offline\", \"-w\", \"missing_file\"])",
"def validate_config(config: NeedlemanWunschAlgorithmConfig):\n\n parameters_names_list = [\"SAME\", \"DIFF\", \"GAP_PENALTY\", \"MAX_NUMBER_PATHS\", \"MAX_SEQ_LENGTH\"]\n\n for param_name in parameters_names_list:\n if not isinstance(config[param_name], int):\n return False, f\"Parameter {param_name} is not int!\"\n \n for param_name in parameters_names_list[0:3]:\n if config[param_name] == 0:\n return False, f\"Parameter {param_name} can not be equal to 0!\"\n\n for param_name in parameters_names_list[3:]:\n if config[param_name] < 1:\n return False, f\"Parameter {param_name} can not be less than 1!\"\n\n if config.SAME <= config.DIFF:\n return False, f\"Parameter SAME must be greater than parameter DIFF!\"\n\n if config.MAX_SEQ_LENGTH > constants.MAXIMUM_SEQ_LEN:\n return False, f\"Value of parameter MAX_SEQ_LENGTH is too big. It should be less than {constants.MAXIMUM_SEQ_LEN}\"\n\n if config.MAX_NUMBER_PATHS > constants.MAXIMUM_NUMBER_PATHS:\n return False, f\"Value of parameter MAX_NUMBER_PATHS is too big. It should be less than {constants.MAXIMUM_NUMBER_PATHS}\"\n\n return True, \"\"",
"def check_mfdp_read(mfdp_params):\n print(\"opening mfdp file, checking data\")\n\n # 3 body interaction?\n three_body = (abs(mfdp_params.interaction_type) == 3)\n\n # there must be a better way than hard-coding all these indices, right?\n\n # parse TBME file for parameters\n directories = mfdp_params.two_body_interaction.split(\"/\")\n tbme_filename = directories[-1] # last one's the actual file\n tbme_type = int(tbme_filename[5])\n last_chunk = tbme_filename.split(\".\")[-1]\n [hbar_omega_verif_0, other_stuff] = last_chunk.split(\"_\")\n hbar_omega_verif_0 = float(hbar_omega_verif_0)\n N_1max_verif = int(other_stuff[0])\n N_12max_verif = int(other_stuff[1:])\n\n # parse output file name\n sections = mfdp_params.output_file.split(\"_\")\n the_rest = sections[2]\n dot_sections = the_rest.split(\".\")\n hbar_omega_verif_1 = float(dot_sections[1])\n\n # parse 3-body\n if three_body:\n # check 3-body file?\n pass\n\n message = \"\" # adjust error message as needed\n\n # check obvious things\n if mfdp_params.saved_pivot not in [\"F\", \"T\"]:\n message = \"saved_pivot must be either T or F\"\n if mfdp_params.two_body_file_type != tbme_type:\n message = \"TBME type does not match type from TMBE filename\"\n if mfdp_params.hbar_omega != hbar_omega_verif_0:\n message = \"freq does not match freq from TMBE filename\"\n if mfdp_params.hbar_omega != hbar_omega_verif_1:\n message = \"freq does not match freq from output filename\"\n if mfdp_params.N_1max != N_1max_verif:\n message = \"N_1max does not match value from TBME filename\"\n if mfdp_params.N_12max != N_12max_verif:\n message = \"N_12max does not match value from TBME filename\"\n if mfdp_params.eff_charge_p != 1.0:\n message = (\"effective charge of proton is 1.0, \"\n \"not \"+str(mfdp_params.eff_charge_p))\n if mfdp_params.eff_charge_n != 0.0:\n message = (\"effective charge of neutron is 0.0, \"\n \"not \"+str(mfdp_params.eff_charge_n))\n if mfdp_params.glp != 1.0:\n message = \"glp is always 1.0, not \"+str(mfdp_params.glp)\n if mfdp_params.gln != 0.0:\n message = \"gln is always 1.0, not \"+str(mfdp_params.gln)\n if mfdp_params.gsp != 5.586:\n message = \"gsp is always 5.586, not \"+str(mfdp_params.gsp)\n if mfdp_params.gsn != -3.826:\n message = \"gsn is always -3.826, not \"+str(mfdp_params.gsn)\n\n # mod 2 checks\n Z, N = mfdp_params.ZN\n if ((Z + N) % 2) == 0:\n if mfdp_params.total_2Jz != 0:\n message = (\"Z + N is even, so total_2Jz must be 0, \"\n \"not \"+str(mfdp_params.total_2Jz))\n else:\n if mfdp_params.total_2Jz != 1:\n message = (\"Z + N is odd, so total_2Jz must be 1, \"\n \"not \"+str(mfdp_params.total_2Jz))\n\n if mfdp_params.parity != (mfdp_params.Nhw % 2):\n message = \"we require parity = Nhw mod 2\"\n if mfdp_params.parity != (mfdp_params.nhw0 % 2):\n message = \"we require parity = nhw0 mod 2\"\n if mfdp_params.parity != (mfdp_params.nhw_min % 2):\n message = \"we require parity = nhw_min mod 2\"\n\n # raise last error detected\n if message:\n raise ValueError(\"Bad template MFDP file: \"+message)",
"def sanityCheck(parameters):\n if not parameters: printUsage(); sys.exit()\n\n # these may differ depending on type of processing to do\n padPath = parameters['padPath']\n dateStart = parameters['dateStart']\n dateStop = parameters['dateStop']\n sensor = parameters['sensor']\n abbr = parameters['abbr']\n whichAx = parameters['whichAx']\n pm = parameters['pm']\n tag = parameters['tag']\n Nfft = parameters['Nfft']\n No = parameters['No']\n \n if not os.path.isdir(padPath): print '%s does not exist' % padPath; sys.exit()\n if not(pm in ['+','-']): print 'bad pm flag (%s): it should be either (+) for demean OR (-) for keep mean' % pm; sys.exit()\n\n return padPath,dateStart,dateStop,sensor,abbr,whichAx,pm,tag,Nfft,No",
"def test_bad_file() -> None:\n\n bad = random_string()\n rv, out = getstatusoutput(f'{RUN} {bad}')\n assert rv != 0\n assert out.lower().startswith('usage:')\n assert re.search(f\"No such file or directory: '{bad}'\", out)",
"def manual_input_check(manual_params, machine, paths):\n print(\"checking manual input\")\n m = manual_params # so we don't have to type out manual_params everywhere\n\n int_dir = paths[0]\n ncsd_path = paths[1]\n working_dir = paths[2]\n # do we have a 3-body interaction?\n three_body = (abs(m.interaction_type) == 3)\n\n # first check if paths exist\n if not exists(int_dir):\n raise IOError(\n \"Interactions directory \" + int_dir + \" does not exist\")\n if not exists(working_dir):\n raise IOError(\n \"Working directory \" + working_dir + \" does not exist\")\n f2 = join(int_dir, m.two_body_interaction)\n if not exists(f2):\n raise IOError(\"Two body file \"+f2+\" does not exist\")\n if three_body:\n f3 = join(int_dir, m.three_body_interaction)\n if not exists(f3):\n raise IOError(\"Three body file \"+f3+\" does not exist\")\n if not exists(ncsd_path):\n raise IOError(\"NCSD file \"+ncsd_path+\" does not exist!\")\n\n # check that parameters make sense\n if not (m.N_12max >= m.N_1max):\n raise ValueError(\"N_12max must be >= N_1max\")\n if three_body:\n if not (m.N_123max >= m.N_12max):\n raise ValueError(\"N_123max must be >= N_12max\")\n\n # check that parameters match with filenames\n try:\n # TBME file\n tbme_filename = m.two_body_interaction\n last_chunk = tbme_filename.split(\".\")[-1]\n [hbar_omega_verif_0, other_stuff] = last_chunk.split(\"_\")\n hbar_omega_verif_0 = float(hbar_omega_verif_0)\n # see if str(N_1max) + str(N_1max) == other_stuff\n if other_stuff != str(m.N_1max) + str(m.N_12max):\n print(\"\\nYour TMBE file doesn't seem to match your parameters!\")\n print(\"N_1max = \"+str(m.N_1max))\n print(\"N_12max = \"+str(m.N_12max))\n print(\"TBME filename = \"+tbme_filename)\n print(\"relevant section = \"+other_stuff)\n yn = \"\"\n while yn not in [\"y\", \"n\"]:\n yn = input(\"Do you want to continue? (y/n): \")\n if yn == \"y\":\n pass\n else:\n sys.exit(0)\n # see if hbar_omega matches\n if hbar_omega_verif_0 != m.hbar_omega:\n print(\"\\nYour TMBE file doesn't seem to match your parameters!\")\n print(\"hbar_omega = \"+str(m.hbar_omega))\n print(\"TBME filename = \"+tbme_filename)\n print(\"hbar_omega from the file is\", hbar_omega_verif_0)\n yn = \"\"\n while yn not in [\"y\", \"n\"]:\n yn = input(\"Do you want to continue? (y/n): \")\n if yn == \"y\":\n pass\n else:\n sys.exit(0)\n except Exception as e:\n print(\"Minor error caught while parsing TMBE filename.\")\n print(\"Printing traceback as if it had caused a crash:\")\n traceback.print_exc()\n print(\"TBME filename that caused this error:\", tbme_filename)\n print(\"We assume everything's fine, but double-check!\\n\")\n\n if three_body:\n try:\n # three-body file\n three_filename = m.three_body_interaction\n [penultimate_chunk, last_chunk] = three_filename.split(\".\")[-2:]\n # get hbar_omega\n [hbar_omega_verif_1, other_stuff] = last_chunk.split(\"_\")\n hbar_omega_verif_1 = float(hbar_omega_verif_1)\n # get N_#max variables\n n_maxes = penultimate_chunk.split(\"_\")[-1]\n # see if str(N_1max) + str(N_1max) == other_stuff\n if n_maxes != str(m.N_123max) + str(m.N_12max) + str(m.N_1max):\n print(\n \"\\nYour 3-body file doesn't seem \"\n \"to match your parameters!\")\n print(\"N_1max = \"+str(m.N_1max))\n print(\"N_12max = \"+str(m.N_12max))\n print(\"N_123max = \"+str(m.N_123max))\n print(\"3-body filename = \"+three_filename)\n print(\"relevant section = \"+n_maxes)\n yn = \"\"\n while yn not in [\"y\", \"n\"]:\n yn = input(\"Do you want to continue? (y/n): \")\n if yn == \"y\":\n pass\n else:\n sys.exit(0)\n # see if hbar_omega matches\n if hbar_omega_verif_1 != m.hbar_omega:\n print(\n \"\\nYour 3-body file doesn't seem \"\n \"to match your parameters!\")\n print(\"hbar_omega = \"+str(m.hbar_omega))\n print(\"3-body filename = \"+three_filename)\n print(\"hbar_omega from the file is\", hbar_omega_verif_1)\n yn = \"\"\n while yn not in [\"y\", \"n\"]:\n yn = input(\"Do you want to continue? (y/n): \")\n if yn == \"y\":\n pass\n else:\n sys.exit(0)\n except Exception as e:\n print(\"Minor error caught while parsing 3-body filename.\")\n print(\"Printing traceback as if it had caused a crash:\")\n traceback.print_exc()\n print(\"3-body filename that caused the error:\", three_filename)\n print(\"We assume everything's fine, but double-check!\\n\")\n\n # check there's at least kappa_points kappa values\n kappa_vals = list(map(float, m.kappa_vals.split()))\n if len(kappa_vals) < m.kappa_points:\n raise ValueError(\n \"You must have at least kappa_points kappa values!\"\n \" kappa_points = \"+str(m.kappa_points))\n\n # and if kappa_points and kappa_vals disagree, make sure they know that\n if len(kappa_vals) > m.kappa_points:\n print(\n \"Did you mean to enter \"+str(len(kappa_vals)) +\n \" values for kappa_min, but set kappa_points to \" +\n str(m.kappa_points)+\"?\")\n user_input = \"\"\n while user_input not in [\"Y\", \"N\"]:\n user_input = input(\"Enter Y to proceed, N to cancel: \")\n if user_input == \"N\":\n print(\"Okay, exiting... Try again!\")\n sys.exit(0)\n\n kr_values = [-1, 1, 2, 3, 4]\n if m.kappa_restart not in kr_values:\n raise ValueError(\n \"kappa_restart must be one of\" + \" \".join(map(str, kr_values)))\n\n if m.saved_pivot not in [\"F\", \"T\"]:\n raise ValueError(\"saved_pivot must be either T or F\")\n\n if (m.irest == 1 or m.kappa_restart != -1 or m.nhw_restart != -1) \\\n and m.saved_pivot == \"F\":\n raise ValueError(\"why not use the saved pivot if you're restarting?\")\n\n # if this function runs, the input passes the test",
"def validate(settings):\n if not settings.filename.endswith(\".xml\"):\n print_error(\"{} must have a .xml extension to interoperate with build tool\".format(settings.filename))\n exit(1)\n \n failed = False\n \n print color(\"Validating: \", color_code(BLUE)), settings.filename\n if platform in [\"linux\", \"linux2\"] and not(settings.skip_permissions):\n stat_info = os.stat(settings.filename)\n gid = stat_info.st_gid\n mode = stat_info.st_mode & 0777\n group = getgrgid(gid)[0]\n if group != \"cs0220ta\":\n print_error(\"Wrong group, you MUST run `chgrp cs0220ta {}'\".format(settings.filename))\n failed = True\n if mode ^ 0660 != 0000:\n print_error(\"Wrong permissions, you MUST run `chmod 660 {}'\".format(settings.filename))\n failed = True\n \n invalid_lt = re.compile(\"<(?!/?(problem|usedin|version|authors?|year|topics?|types?|param|deps?|dependency|dependencies|body|solution|rubric|resource))\")\n invalid_amp = re.compile(r\"&(?!\\w{1,10};)\")\n invalid_char = re.compile(r\"[^\\x00-\\x7f]\")\n \n # Some more manual checking \n with open(settings.filename) as f:\n for num, line in enumerate(f):\n if len(string.rstrip(line)) > 80:\n print_warning(\"Line {} longer than 80 characters (has {})\".format(num+1, len(string.rstrip(line))))\n failed = True\n problem_lt = re.search(invalid_lt, line)\n if problem_lt:\n print_error(\"Invalid < character on line {} at character {}\".format(num+1, problem_lt.start()))\n print color(\"\\tA literal < can be escaped using \\\"<\\\" instead.\", \n color_code(YELLOW, foreground=False) + color_code(BLACK))\n failed = True\n problem_amp = re.search(invalid_amp, line)\n if problem_amp:\n print_error(\"Invalid raw & character on line {} at character {}\".format(num+1, problem_amp.start()))\n print color(\"\\tA literal & can be escaped by using \\\"&\\\" instead.\", \n color_code(YELLOW, foreground=False) + color_code(BLACK))\n failed = True\n problem_char = re.search(invalid_char, line)\n if problem_char:\n print_error(\"Invalid non-ASCII character on line {} at character {}\".format(num+1, problem_char.start()))\n failed = True\n \n try:\n tree = ET.parse(settings.filename)\n except Exception:\n print_error(\"XML in {} could not be parsed.\".format(settings.filename))\n print color(\"\\nPlease rerun validation once XML is fixed\", color_code(CYAN))\n exit(1)\n if tree.getroot().tag == 'assignment':\n print_error(\"This looks like an assignment xml file. Did you mean 22edit validate_doc?\")\n exit(1)\n try:\n problem = Problem(settings.filename)\n problem.parse_tree(tree, False)\n except ImproperXmlException as e:\n print_error(e.args[0])\n print color(\"\\nPlease rerun validation after fixing\", color_code(CYAN))\n exit(1)\n \n firstProblem = True\n for version in problem.get_versions():\n if not version.standalone and not firstProblem:\n continue\n firstProblem = False\n \n print color(\"\\n\\nVERSION {}:\\n\".format(version.vid),\n color_code(BLUE))\n validate_version(version, failed)",
"def check_sanity(params):\n \n for dpath in ['input_dir','output_dir']:\n if path.isdir(params[dpath]) == False:\n print('ERROR: Cannot find directory '+params[dpath])\n exit()\n \n if path.isfile(params['star_file']) == False:\n print('ERROR: Cannot find star file '+params['star_file'])\n exit()",
"def param_vals_test(param_dict):\n file_msg = param_dict['Prog_msg']\n ##\n ## Testing if `wget` exists in the system\n if is_tool('wget'):\n pass\n else:\n msg = '{0} You need to have `wget` installed in your system to run '\n msg += 'this script. You can download the entire dataset at {1}.\\n\\t\\t'\n msg += 'Exiting....'\n msg = msg.format(file_msg, param_dict['url_catl'])\n raise ValueError(msg)\n ##\n ## Checking that Esmeralda is not ran when doing 'SO' halos\n if (param_dict['halotype'] == 'so') and (param_dict['sample'] == 20):\n msg = '{0} The `halotype`==`so` and `sample`==`20` are no compatible '\n msg += 'input parameters.\\n\\t\\t'\n msg += 'Exiting...'\n msg = msg.format(file_msg)\n raise ValueError(msg)\n ##\n ## Checking that `hod_model_n` is set to zero for FoF-Halos\n if (param_dict['halotype'] == 'fof') and (param_dict['hod_n'] != 0):\n msg = '{0} The `halotype`==`{1}` and `hod_n`==`{2}` are no compatible '\n msg += 'input parameters.\\n\\t\\t'\n msg += 'Exiting...'\n msg = msg.format( file_msg,\n param_dict['halotype'],\n param_dict['hod_n'])\n raise ValueError(msg)\n ##\n ## Checking input different types of `test_train_opt`\n #\n # `sample_frac`\n if (param_dict['test_train_opt'] == 'sample_frac'):\n # `sample_frac`\n if not ((param_dict['sample_frac'] > 0) and\n (param_dict['sample_frac'] <= 1.)):\n msg = '{0} `sample_frac` ({1}) must be between (0,1]'.format(\n file_msg, param_dict['sample_frac'])\n raise ValueError(msg)\n # `test_size`\n if not ((param_dict['test_size'] > 0) and\n (param_dict['test_size'] < 1)):\n msg = '{0} `test_size` ({1}) must be between (0,1)'.format(\n file_msg, param_dict['test_size'])\n raise ValueError(msg)\n #\n # boxes_n\n if (param_dict['test_train_opt'] == 'boxes_n'):\n box_n_arr = num.array(param_dict['box_idx'].split('_')).astype(int)\n box_n_diff = num.diff(box_n_arr)\n # Larger than zero\n if not (all(box_n_arr >= 0)):\n msg = '{0} All values in `box_idx` ({1}) must be larger than 0!'\n msg = msg.format(file_msg, box_n_arr)\n raise ValueError(msg)\n # Difference between elements\n if not (all(box_n_diff > 0)):\n msg = '{0} The value of `box_idx` ({1}) is not valid!'.format(\n file_msg, param_dict['box_idx'])\n raise ValueError(msg)\n #\n # `box_test`\n if (param_dict['test_train_opt'] == 'box_sample_frac'):\n # Value of `box_test`\n if not (param_dict['box_test'] >= 0):\n msg = '{0} `box_test` ({1}) must be larger or equal to `0`.'\n msg = msg.format(file_msg, param_dict['box_test'])\n raise ValueError(msg)\n # Testing `test_size`\n # `test_size`\n if not ((param_dict['test_size'] > 0) and\n (param_dict['test_size'] < 1)):\n msg = '{0} `test_size` ({1}) must be between (0,1)'.format(\n file_msg, param_dict['test_size'])\n raise ValueError(msg)\n ##\n ## Checking that `kf_splits` is larger than `2`\n if (param_dict['kf_splits'] < 2):\n msg = '{0} The value for `kf_splits` ({1}) must be LARGER than `2`'\n msg += 'Exiting...'\n msg = msg.format(param_dict['Prog_msg'], param_dict['kf_splits'])\n raise ValueError(msg)\n ##\n ## Checking that `n_predict` is not smaller than `1`.\n if (param_dict['n_predict'] < 1):\n msg = '{0} The value for `n_predict` ({1}) must be LARGER than `1`'\n msg += 'Exiting...'\n msg = msg.format(param_dict['Prog_msg'], param_dict['n_predict'])\n raise ValueError(msg)",
"def test_bad_file():\n\n bad_file = random_string()\n letter = random.choice(string.ascii_lowercase)\n rv, out = getstatusoutput('{} {} -f {}'.format(prg, letter, bad_file))\n assert rv != 0\n expected = \"No such file or directory: '{}'\".format(bad_file)\n assert re.search(expected, out)",
"def test_invalidFile(self):\n self.assertRaises(cesmEnvLib.checkFile(\"blah\", \"write\"))",
"def check_file_valid_input(config, modname, fname, fdict, indent=''):\n\n cnts = [0] * NUMCNTS\n\n # check that any given filename pattern has a definition\n if pfwdefs.SW_FILEPAT in fdict:\n cnts2 = check_filepat_valid(config, fdict[pfwdefs.SW_FILEPAT], modname, fname, indent+' ')\n cnts = [x + y for x, y in zip(cnts, cnts2)] # increment counts\n\n # check that it has filepat, filename, depends, or query wcl (required)\n # if filename is a pattern, can I check that all needed values exist?\n # todo check depends happens in same block previous to this module\n if (('listonly' not in fdict or not miscutils.convertBool(fdict['listonly'])) and\n pfwdefs.SW_FILEPAT not in fdict and pfwdefs.FILENAME not in fdict and\n 'fullname' not in fdict and 'query_fields' not in fdict and pfwdefs.DATA_DEPENDS not in fdict):\n error(indent, \"module %s, %s, %s - Missing terms needed to determine input filename\" % (modname, pfwdefs.SW_INPUTS, fname))\n cnts[ERRCNT_POS] += 1\n\n # check that it has pfwdefs.DIRPAT : err\n # can I check that all values for pfwdefs.DIRPAT exist?\n if pfwdefs.DIRPAT not in fdict:\n error(indent, \"module %s, %s, %s - Missing %s\" % (modname, pfwdefs.SW_INPUTS, fname, pfwdefs.DIRPAT))\n cnts[ERRCNT_POS] += 1\n\n return cnts",
"def test_read_file_invalid():\n\tfrom ..skySurvey import SkySurvey\n\tfile_list = 0\n\ttry:\n\t\tSkySurvey(file_list = file_list)\n\texcept TypeError:\n\t\tassert True\n\telse:\n\t\tassert False",
"def _check_config(self):",
"def test_buoy_format1():\n with pytest.raises(AssertionError) as err_info:\n Check_BuoyDC.check_buoy_format(buoy_format_fail_1)\n assert str(err_info.value) == 'Input formatted incorrectly, see instructions'"
] | [
"0.6367045",
"0.63627934",
"0.626574",
"0.6190518",
"0.6190518",
"0.61401314",
"0.60877657",
"0.607748",
"0.6049413",
"0.59709966",
"0.59606194",
"0.5958436",
"0.5945568",
"0.58771724",
"0.5862091",
"0.5859376",
"0.5834433",
"0.58315766",
"0.5829629",
"0.5814439",
"0.5794336",
"0.5775559",
"0.57703763",
"0.5763447",
"0.5701511",
"0.56845623",
"0.5671896",
"0.56680655",
"0.56629497",
"0.56588084"
] | 0.73345405 | 0 |
check if a given parameter in the ini file exists or not. can be used to detect deprecated entries somebody removed | def exists_ini_parameter( inifile, section, keyword ):
found_section = False
found_parameter = False
# read jobfile
with open(inifile) as f:
# loop over all lines
for line in f:
# once found, do not run to next section
if found_section and line[0] == "[":
found_section = False
# until we find the section
if "["+section+"]" in line:
found_section = True
# only if were in the right section the keyword counts
if found_section and keyword+"=" in line:
found_parameter = True
return found_parameter | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_config(cfg):",
"def exists_ini_section( inifile, section ):\n found_section = False\n\n # read jobfile\n with open(inifile) as f:\n # loop over all lines\n for line in f:\n # until we find the section\n if \"[\"+section+\"]\" in line and line[0]!=\";\" and line[0]!=\"!\" and line[0]!=\"#\":\n found_section = True\n\n\n return found_section",
"def _check_config(self):",
"def _verify_ini(self, config_file=None):\n\n imgur_values = ['ClientID', 'ClientSecret', 'AccessToken', 'RefreshToken']\n mysql_values = ['Host', 'User', 'Password', 'Database']\n missing_values = []\n\n if not config_file:\n print(\"No Config Filed Supplied. Aborting\")\n sys.exit(1)\n\n for val in imgur_values:\n if val not in config_file['IMGURAPI']:\n missing_values.append('IMGURAPI: ' + val)\n\n for val in mysql_values:\n if val not in config_file['MYSQL']:\n missing_values.append('MYSQL: ' + val)\n\n if missing_values:\n print('ERROR: ini file is missing required values. \\n Missing Values:')\n for val in missing_values:\n print(val)\n sys.exit(1)",
"def validate_missing_information(conf):\n failed = False\n\n for field, _ in REQUIRED_SETTINGS.items():\n if field not in conf:\n print 'error: missing configuration for \"{0}\"'.format(field)\n failed = True\n\n if failed:\n sys.exit(1)",
"def is_config_exist(self) -> bool:\n pass",
"def security_vars_exists():\n return os.path.exists(SECURITY_PATH)",
"def has_section(self, section):\r\n return self.configparser.has_section(section)",
"def is_config_exist(self) -> bool:\n return True",
"def validate_file(inp, name=''):\n validate_string(inp, name)\n assert (os.path.exists(inp)), name + ' settings with value ' + inp + ' should exist.'",
"def exists(self, path):\n raise TestException(self.settings_merged)",
"def has_option(self, section, option):\n raise NotImplementedError()",
"def check_config(config):\n pass",
"def _check(self, config: Dict):\n if 'path' not in config:\n raise FileNotFoundError(\"File not found.\")",
"def check_filename(name, fileinfos): \n try:\n if not name in fileinfos.keys():\n raise ValueError(\"Error: The XML file could not be found.\")\n except ValueError as err:\n print(err)\n exit(1)",
"def available(\n\t\t\tconfig_file):\n\t\treturn",
"def check_key_exists(self) -> None:\n omitted_configs = self.necessary_config_names - set(self.config.keys())\n assert len(omitted_configs) == 0, omitted_configs",
"def _search_for_key_file(path_to_key_file):\n\n return True if os.path.exists(path_to_key_file) else False",
"def test_load_non_existing_help_nc_params(self) -> None:\n with self.assertRaises(FileNotFoundError):\n load_help_nc_params(\"unknown_param\")",
"def check_configure_scan(project_path):\n for file_name in CONFIGURE_AC_NAMES:\n file_path = os.path.join(project_path, file_name)\n if os.path.exists(file_path):\n return file_path\n return None",
"def check_parameter_existence(self, d: dict, params: list):\n for param_name in params:\n if param_name not in d:\n raise Exception('Expecting the parameter \"' + param_name\n + '\" but cannot find it.')",
"def check_settings(self):\r\n pass",
"def has_configuration(config_file=CONFIG_FILE):\n return os.path.exists(config_file)",
"def check_params_set():\n critical = {'machineinfo' : MACHINEID, \n 'error_serverinfo' : ERROR_SERVER, \n 'serverinfo' : SERVER}\n for i, val in critical.iteritems():\n if not val:\n print \"ERROR: Set value for \\\"%s\\\" in baseconfig.cfg file first\\n\" % i\n sys.exit(1)",
"def is_attr_exist(self, section_name: str, attr_name: str) -> bool:\n pass",
"def check_settings_existence(self):\n options = [\n 'AUTH_LDAP_SERVER_URI',\n 'AUTH_LDAP_USER_SEARCH_BASE',\n 'AUTH_LDAP_USER_USERNAME_ATTR',\n 'AUTH_LDAP_PROTOCOL_VERSION',\n 'AUTH_LDAP_BIND_DN',\n 'AUTH_LDAP_BIND_PASSWORD',\n ]\n for option in options:\n if not hasattr(settings, option):\n logger.error('LDAP::check_settings_existence\\tSetting %s is '\n 'not provided', option)\n sys.exit(1)",
"def config_has_section(section):\n return __CONFIG.has_section(section)",
"def is_shed_tool_conf(self):",
"def config_has_option(section, option):\n return __CONFIG.has_option(section, option)",
"def check_settings(self):\n pass"
] | [
"0.6333489",
"0.63048834",
"0.61332923",
"0.60334665",
"0.6019133",
"0.59849584",
"0.58812857",
"0.5851556",
"0.58060586",
"0.57881004",
"0.5765527",
"0.5734839",
"0.5717389",
"0.56993365",
"0.568473",
"0.56755203",
"0.56699747",
"0.56562424",
"0.56547135",
"0.56511873",
"0.56482416",
"0.56441206",
"0.564066",
"0.5627988",
"0.5602319",
"0.56001186",
"0.5599453",
"0.5595488",
"0.5568614",
"0.55676794"
] | 0.6938629 | 0 |
we look for the latest .h5 files to resume the simulation, and prepare the INI file accordingly. Some errors are caught. | def prepare_resuming_backup( inifile ):
import numpy as np
import os
import glob
import flusi_tools
# does the ini file exist?
if not os.path.isfile(inifile):
raise ValueError("Inifile not found!")
Tmax = get_ini_parameter(inifile, "Time", "time_max", float)
dim = get_ini_parameter(inifile, "Domain", "dim", int)
# This code currenty only works with ACMs
physics_type = get_ini_parameter(inifile, "Physics", "physics_type", str)
if physics_type != "ACM-new":
raise ValueError("ERROR! backup resuming is available only for ACM")
if dim == 2:
state_vector_prefixes = ['ux', 'uy', 'p']
else:
state_vector_prefixes = ['ux', 'uy', 'uz', 'p']
# if used, take care of passive scalar as well
if exists_ini_parameter( inifile, 'ACM-new', 'use_passive_scalar' ):
scalar = get_ini_parameter(inifile, 'ACM-new', 'use_passive_scalar', bool, default=False)
if scalar:
n_scalars = get_ini_parameter(inifile, 'ConvectionDiffusion', 'N_scalars', int, default=0)
for i in range(n_scalars):
state_vector_prefixes.append( "scalar%i" % (i+1) )
# find list of H5 files for first prefix.
files = glob.glob( state_vector_prefixes[0] + "*.h5" )
files.sort()
if not files:
raise ValueError( "Something is wrong: no h5 files found for resuming" )
# first, we try the latest snapshots (obviously)
# it can happen (disk quota) that the code cannot complete writing this backup.
index = -1
timestamp = flusi_tools.get_timestamp_name( files[index] )
t0 = float(timestamp) / 1e6
# is this complete ?
snapshot_complete = True
for prefix in state_vector_prefixes:
if not os.path.isfile( prefix + '_' + timestamp + '.h5'):
snapshot_complete = False
print('For snapshot %s we did not find %s!! -> trying another one' % (timestamp, prefix))
# if not, we try the second latest, if it exists
if not snapshot_complete:
if len(files) >= 2:
index = -2
timestamp = flusi_tools.get_timestamp_name( files[index] )
t0 = float(timestamp) / 1e6
snapshot_complete = True
for prefix in state_vector_prefixes:
if not os.path.isfile( prefix + '_' + timestamp + '.h5'):
snapshot_complete = False
print('For snapshot %s we did not find all required input files!! -> trying another one' % (timestamp))
else:
raise ValueError("We did not find a complete snapshot to resume from...you'll have to start over.")
# if we still were unable to resume...well, then its time to give up (if both snapshots are incomplete, you may have forgotten
# to save enough data, simply)
if not snapshot_complete:
raise ValueError("We did not find a complete snapshot to resume from (tried -1 and -2)...you'll have to start over.")
print('Latest file is: ' + files[index])
print('Latest file is at time: %f' % (t0))
# if we find the dt.t file, we now at what time the job ended.
# otherwise, just resume the latest H5 files
if os.path.isfile('dt.t'):
d = np.loadtxt('dt.t')
t1 = d[-1,0]
print('Last time stamp in logs is: %f' % (t1))
# time check when resuming a backup
if t0 > t1:
print( "Something is wrong: the latest H5 file is at LATER time than the log files. Is this the right data?" )
if t0 < 1.0e-6:
print("Something is wrong: the latest H5 file is almost at t=0. That means no backup has been saved?" )
if t1 > t0:
print('Warning: the latest H5 file is younger than the last entry in the log: we will have to compute some times twice.')
if abs(t1-t0) < 1.0e-4:
print('Good news: timestamp in H5 file and time in log file match!')
if t1 >= 0.9999*Tmax or t0 >= 0.9999*Tmax:
raise ValueError( "Something is wrong: the run seems to be already finnished!" )
# check if all required input files exist
for prefix in state_vector_prefixes:
if not os.path.isfile( prefix + '_' + timestamp + '.h5'):
raise ValueError( "file not found!!!! " + prefix + '_' + timestamp + '.h5' )
# create the string we will put in the ini file
infiles_string = ""
for prefix in state_vector_prefixes:
infiles_string += prefix + '_' + timestamp + '.h5' + ' '
# remove trailing space:
infiles_string = infiles_string.strip()
# add colon
infiles_string += ';'
# information (debug)
print(infiles_string)
f1 = open( inifile, 'r')
f2 = open( inifile+'.tmptmp', 'w')
found, okay1, okay2 = False, False, False
for line in f1:
# remove trailing space:
line_cpy = line.strip()
if '[Physics]' in line_cpy:
found = True
if 'read_from_files=' in line_cpy and found and line_cpy[0] != ";":
line = "read_from_files=1;\n"
okay1 = True
if 'input_files=' in line_cpy and found and line_cpy[0] != ";":
line = "input_files=" + infiles_string + "\n"
okay2 = True
f2.write( line )
f1.close()
f2.close()
if okay1 and okay2:
os.rename( inifile+'.tmptmp', inifile ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def preprocess_phase(self):\r\n if not self.C.restart: # start preprocessing job from scratch\r\n if (\r\n os.path.exists(self.valid_h5_path)\r\n or os.path.exists(self.test_h5_path)\r\n or os.path.exists(self.train_h5_path)\r\n ):\r\n raise OSError(\r\n f\"There currently exist(s) pre-created *.h5 file(s) in the \"\r\n f\"dataset directory. If you would like to proceed with \"\r\n f\"creating new ones, please delete them and rerun the \"\r\n f\"program. Otherwise, check your input file.\"\r\n )\r\n self.preprocess_valid_data()\r\n self.preprocess_test_data()\r\n self.preprocess_train_data()\r\n else: # restart existing preprocessing job\r\n # as some datasets may have already been preprocessed, check for this\r\n if os.path.exists(self.train_h5_path + \".chunked\") or os.path.exists(self.test_h5_path):\r\n print(\r\n f\"-- Restarting preprocessing job from 'train.h5' \"\r\n f\"(skipping over 'test.h5' and 'valid.h5' as they seem \"\r\n f\"to be finished).\",\r\n flush=True,\r\n )\r\n self.preprocess_train_data()\r\n elif os.path.exists(self.test_h5_path + \".chunked\") or os.path.exists(self.valid_h5_path):\r\n print(\r\n f\"-- Restarting preprocessing job from 'test.h5' \"\r\n f\"(skipping over 'valid.h5' as it appears to be \"\r\n f\"finished).\",\r\n flush=True,\r\n )\r\n self.preprocess_test_data()\r\n self.preprocess_train_data()\r\n elif os.path.exists(self.valid_h5_path + \".chunked\"):\r\n print(f\"-- Restarting preprocessing job from 'valid.h5'\", flush=True)\r\n self.preprocess_valid_data()\r\n self.preprocess_test_data()\r\n self.preprocess_train_data()\r\n else:\r\n raise ValueError(\r\n \"Warning: Nothing to restart! Check input \"\r\n \"file and/or submission script.\"\r\n )",
"def setup(self):\n EventGenerator.setup(self)\n\n if self.egs5_dir is None:\n self.egs5_dir = self.get_install_dir()\n logger.debug(\"Using EGS5 from install dir: \" + self.egs5_dir)\n\n ## data directory\n self.egs5_data_dir = os.path.join(self.egs5_dir, \"data\")\n ## config directory\n self.egs5_config_dir = os.path.join(self.egs5_dir, \"config\")\n\n logger.debug(\"egs5_data_dir=%s\" % self.egs5_data_dir)\n logger.debug(\"egs5_config_dir=%s\" % self.egs5_config_dir)\n\n if os.path.exists(\"data\"):\n os.unlink(\"data\")\n os.symlink(self.egs5_data_dir, \"data\")\n\n if os.path.exists(\"pgs5job.pegs5inp\"):\n os.unlink(\"pgs5job.pegs5inp\")\n os.symlink(self.egs5_config_dir + \"/src/esa.inp\", \"pgs5job.pegs5inp\")\n\n logger.debug(\"Reading run parameters: {}\".format(self.run_params))\n ## run parameters\n self.run_param_data = RunParameters(self.run_params)\n\n # Set target thickness from job parameter or use the default from run parameters\n if self.target_thickness is not None:\n self.target_z = self.target_thickness\n logger.debug(\"Target thickness set from job param: {}\".format(self.target_z))\n else:\n self.target_z = self.run_param_data.get(\"target_z\")\n logger.debug(\"Target thickness set from run_params: {}\".format(self.target_z))\n\n ebeam = self.run_param_data.get(\"beam_energy\")\n electrons = self.run_param_data.get(\"num_electrons\") * self.bunches\n\n seed_data = \"%d %f %f %d\" % (self.seed, self.target_z, ebeam, electrons)\n logger.debug(\"Seed data (seed, target_z, ebeam, electrons): {}\".format(seed_data))\n seed_file = open(\"seed.dat\", 'w')\n seed_file.write(seed_data)\n seed_file.close()",
"def setup(self):\n print(\"Looking for \", self.filename)\n if os.path.exists(self.filename):\n n, ext = os.path.splitext(self.filename)[:2]\n if ext == \".h5\" or ext == \".hdf5\":\n with h5py.File(self.filename, \"r\") as file:\n keys = list(file.keys())\n self.data = file[keys[0]].value\n print(\"Behavior Data length is \", self.data.shape[2])\n\n else:\n raise FileNotFoundError",
"def init_hdf5_file(datasets, init_start, init_end, hdf5_file):\n schema = tokio.connectors.hdf5.SCHEMA.get(SCHEMA_VERSION)\n for dataset_name, dataset in datasets.items():\n hdf5_dataset_name = schema.get(dataset_name)\n if hdf5_dataset_name is None:\n if '/_' not in dataset_name:\n warnings.warn(\"Dataset key %s is not in schema\" % dataset_name)\n continue\n if hdf5_dataset_name not in hdf5_file:\n # attempt to convert dataset into a timeseries\n timeseries = hdf5_file.to_timeseries(dataset_name=hdf5_dataset_name)\n\n # if dataset -> timeseries failed, create and commit a new, empty timeseries\n if timeseries is None:\n timeseries = tokio.timeseries.TimeSeries(dataset_name=hdf5_dataset_name,\n start=init_start,\n end=init_end,\n timestep=dataset.timestep,\n num_columns=dataset.dataset.shape[1])\n hdf5_file.commit_timeseries(timeseries=timeseries)\n print(\"Initialized %s in %s with size %s\" % (\n hdf5_dataset_name,\n hdf5_file.name,\n timeseries.dataset.shape))",
"def setup(self):\n # Call the baseclass setup to resolve any selections\n super().setup()\n\n self.outcont = None\n\n # If we are returning the same file for every iteration,\n # then load that file now.\n if self.only_prefix:\n filename = self.prefix\n\n split_ext = os.path.splitext(filename)\n if split_ext[1] not in [\".h5\", \".hdf5\"]:\n filename = split_ext[0] + \".h5\"\n\n # Load file into outcont attribute\n self.outcont = self._load_file(filename)\n\n else:\n self.prefix = os.path.splitext(self.prefix)[0]",
"def setup(self):\n EGS5.setup(self)\n if not len(self.inputs):\n raise Exception(\"Missing required input LHE file.\")",
"def setUp(self):\n input_files = glob.glob(PATH_TO_INPUT_FILES) # Read input (csv) files from current (sw/test) directory.\n if not self.sessionizing:\n self.sessionizing = Sessionizing()\n self.sessionizing.initialize(*input_files)\n if not self.sites_session_counter:\n self.merge_and_sort_input_files(*input_files)\n self.process_input_files()",
"def read_h5(self):\n infile = h5py.File(self.inf_name,'r')\n\n vardict = self.labdict\n #store data with the correct labels\n for k in infile['plasma/1d'].keys():\n try:\n vardict[k] = infile[self.labdict[k]].value\n except:\n vardict[k] = []\n\n vardict['a_ions']=infile['/plasma/anum'].value\n vardict['znum']=infile['/plasma/znum'].value\n \n\n self.rho_in = vardict['rho']\n self._rho_vol = infile['distributions/rhoDist/abscissae/dim1'].value[1:]\n self._volumes = infile['distributions/rhoDist/shellVolume'].value\n self.nrho_in = np.size(self.rho_in)\n\n if vardict['a_ions'][0]!='/':\n self.nspec = len(vardict['a_ions'])\n else:\n self.nspec = vardict['ni'].shape[1]\n print(\"Number of ions: \", self.nspec)\n if len(vardict['a_ions'])!=len(vardict['znum']):\n print(\"ERROR! array of A and Z don't have the same length\")\n\n self.A = vardict['a_ions']\n self.Z = vardict['znum']\n self.nion = self.nspec\n \n self.te_in = vardict['te'][:]\n self.ne_in = vardict['ne'][:]\n self.ti_in = vardict['ti'][:]\n ni1_in = vardict['ni'][:,0]\n self.ni_in = np.zeros((self.nion, self.nrho_in),dtype=float)\n self.ni_in[0,:] = ni1_in\n if self.nion==2:\n ni2_in = vardict['ni'][:,1]\n self.ni_in[1,:] = ni2_in\n elif self.nion==3:\n ni2_in = vardict['ni'][:,1]\n ni3_in = vardict['ni'][:,2]\n self.ni_in[1,:] = ni2_in\n self.ni_in[2,:] = ni3_in\n\n try:\n self.vt_in = vardict['vtor']\n except:\n self.vt_in = np.zeros(self.nrho_in,dtype=float)\n\n try:\n self.zeff_in = vardict['zeff'][:]\n except:\n self.zeff_in = np.zeros(self.nrho_in,dtype=float)\n\n self.ni = np.zeros((self.nion, self.nrho),dtype = float)\n self.spline()",
"def loadEEGh5(subject, stimulus_class, data_dir,\n\teeg_epochs=True, resp_mean = True, binarymat=False, binaryfeatmat = True, envelope=True, pitch=True, gabor_pc10=False, \n\tspectrogram=True, binned_pitches=True, spectrogram_scaled=True, scene_cut=True):\t \n\n\tstim_dict = dict()\n\tresp_dict = dict()\n\twith h5py.File('%s/fullEEGmatrix.hf5'%(data_dir),'r') as fh:\n\t\tprint(stimulus_class)\n\t\tall_stim = [k for k in fh['/%s' %(stimulus_class)].keys()]\n\t\tprint(all_stim)\n\t\t\t\n\t\tfor idx, wav_name in enumerate(all_stim): \n\t\t\tprint(wav_name)\n\t\t\tstim_dict[wav_name] = []\n\t\t\tresp_dict[wav_name] = []\n\t\t\ttry:\n\t\t\t\tepochs_data = fh['/%s/%s/resp/%s/epochs' %(stimulus_class, wav_name, subject)][:]\n\t\t\t\tphnfeatmat = fh['/%s/%s/stim/phn_feat_timings' %(stimulus_class, wav_name)][:]\n\t\t\t\tntimes = phnfeatmat.shape[1] #always resample to the size of phnfeat \n\t\t\t\tif binarymat:\n\t\t\t\t\tphnmat = fh['/%s/%s/stim/phn_timings' %(stimulus_class, wav_name)][:] \n\t\t\t\t\tstim_dict[wav_name].append(phnmat)\n\t\t\t\t\tntimes = phnmat.shape[1]\n\t\t\t\t\tprint('phnmat shape is:')\n\t\t\t\t\tprint(phnmat.shape)\n\t\t\t\tif binaryfeatmat:\n\t\t\t\t\tstim_dict[wav_name].append(phnfeatmat)\n\t\t\t\t\tprint('phnfeatmat shape is:')\n\t\t\t\t\tprint(phnfeatmat.shape)\n\t\t\t\tif envelope:\n\t\t\t\t\tenvs = fh['/%s/%s/stim/envelope' %(stimulus_class, wav_name)][:] \n\t\t\t\t\tenvs = scipy.signal.resample(envs, ntimes) #resampling to size of phnfeat\n\t\t\t\t\tstim_dict[wav_name].append(envs.T)\n\t\t\t\t\tprint('envs shape is:')\n\t\t\t\t\tprint(envs.shape)\n\t\t\t\tif pitch:\n\t\t\t\t\tpitch_mat = fh['/%s/%s/stim/pitches' %(stimulus_class, wav_name)][:] \n\t\t\t\t\tpitch_mat = scipy.signal.resample(pitch_mat, ntimes) #resample to size of phnfeat\n\t\t\t\t\tpitch_mat = np.atleast_2d(pitch_mat)\n\t\t\t\t\tstim_dict[wav_name].append(pitch_mat)\n\t\t\t\t\tprint('pitch_mat shape is:')\n\t\t\t\t\tprint(pitch_mat.shape)\t\n\t\t\t\tif binned_pitches:\n\t\t\t\t\tbinned_p = fh['/%s/%s/stim/binned_pitches' %(stimulus_class, wav_name)][:] \n\t\t\t\t\t#binned_p = scipy.signal.resample(binned_p, ntimes) #resample to size of phnfeat\n\t\t\t\t\tbinned_p = np.atleast_2d(binned_p)\n\t\t\t\t\tstim_dict[wav_name].append(binned_p.T)\n\t\t\t\t\tprint('binned pitch shape is:')\n\t\t\t\t\tprint(binned_p.shape)\t\t\t\t\n\t\t\t\tif gabor_pc10:\n\t\t\t\t\tgabor_pc10_mat = fh['/%s/%s/stim/gabor_pc10' %(stimulus_class, wav_name)][:]\n\t\t\t\t\tstim_dict[wav_name].append(gabor_pc10_mat.T)\n\t\t\t\t\tprint('gabor_mat shape is:')\n\t\t\t\t\tprint(gabor_pc10_mat.shape) \n\t\t\t\tif spectrogram:\n\t\t\t\t\tspecs = fh['/%s/%s/stim/spec' %(stimulus_class, wav_name)][:] \n\t\t\t\t\tspecs = scipy.signal.resample(specs, ntimes, axis=1)\n\t\t\t\t\tnew_freq = 15 #create new feature size, from 80 to 15. Easier to fit STRF with the specified time delay\n\t\t\t\t\tspecs = scipy.signal.resample(specs, new_freq, axis=0)\n\t\t\t\t\tstim_dict[wav_name].append(specs)\n\t\t\t\t\tprint('specs shape is:')\n\t\t\t\t\tprint(specs.shape)\n\t\t\t\t\tfreqs = fh['/%s/%s/stim/freqs' %(stimulus_class, wav_name)][:]\n\t\t\t\tif spectrogram_scaled:\n\t\t\t\t\tspecs = fh['/%s/%s/stim/spec' %(stimulus_class, wav_name)][:] \n\t\t\t\t\tspecs = scipy.signal.resample(specs, ntimes, axis=1)\n\t\t\t\t\tnew_freq = 15 #create new feature size, from 80 to 15. Easier to fit STRF with the specified time delay\n\t\t\t\t\tspecs = scipy.signal.resample(specs, new_freq, axis=0)\n\t\t\t\t\tspecs = specs/np.abs(specs).max()\n\t\t\t\t\tstim_dict[wav_name].append(specs)\n\t\t\t\t\tprint('specs shape is:')\n\t\t\t\t\tprint(specs.shape)\n\t\t\t\tif scene_cut:\n\t\t\t\t\ts_cuts = fh['/%s/%s/stim/scene_cut' %(stimulus_class, wav_name)][:] \n\t\t\t\t\ts_cuts = scipy.signal.resample(s_cuts, ntimes, axis=1)\n\t\t\t\t\tstim_dict[wav_name].append(s_cuts)\n\t\t\t\t\tprint('scene cut shape is:')\n\t\t\t\t\tprint(s_cuts.shape)\n\t\t\t\n\t\t\t\t\t#return freqs once\n\t\t\t\t\tfreqs = fh['/%s/%s/stim/freqs' %(stimulus_class, wav_name)][:]\n\t\t\texcept Exception:\n\t\t\t\ttraceback.print_exc()\n\t\t\t\t\n\t\t\tif eeg_epochs:\n\t\t\t\ttry: \n\t\t\t\t\tepochs_data = fh['/%s/%s/resp/%s/epochs' %(stimulus_class, wav_name, subject)][:]\n\t\t\t\t\tif resp_mean:\n\t\t\t\t\t\tprint('taking the mean across repeats')\n\t\t\t\t\t\tepochs_data = epochs_data.mean(0)\n\t\t\t\t\t\tepochs_data = scipy.signal.resample(epochs_data.T, ntimes).T #resample to size of phnfeat\n\t\t\t\t\telse:\n\t\t\t\t\t\tepochs_data = scipy.signal.resample(epochs_data, ntimes, axis=2)\n\t\t\t\t\tprint(epochs_data.shape)\n\t\t\t\t\tresp_dict[wav_name].append(epochs_data)\n\t\t\t\t\t\n\t\t\t\texcept Exception:\n\t\t\t\t\ttraceback.print_exc()\n\t\t\t\t\t# print('%s does not have neural data for %s'%(subject, wav_name))\n\n\t\t\t\t\t# epochs_data = []\n\n\tif spectrogram:\n\t\treturn resp_dict, stim_dict, freqs\n\n\tif spectrogram_scaled:\n\t\treturn resp_dict, stim_dict, freqs\n\t\t\n\telse:\n\t\treturn resp_dict, stim_dict",
"def main(\n simulate_late_report=True, simulate_format_error=True, simulate_unseen_error=True,\n simulate_repeted_error=True, simulate_timeout_error=True):\n\n # get script directory\n #dirName = os.path.dirname(os.path.realpath(__file__))\n\n # load users\n #filename = dirName + '/users.pkl'\n filename = './users.pkl'\n users = []\n with open(filename, 'rb') as usersFile:\n users = pickle.load(usersFile)\n\n # load value mappings\n #filename = dirName + '/mappings.pkl'\n filename = './mappings.pkl'\n mappings = {}\n with open(filename, 'rb') as mappingsFile:\n mappings = pickle.load(mappingsFile)\n\n\n # load models and scalers (for each user)\n models = {}\n scalers = {}\n for user in users:\n #filename = dirName + '/' + user + '.pkl'\n filename = './' + user + '.pkl'\n with open(filename, 'rb') as modelFile:\n models[user] = pickle.load(modelFile)\n\n #filename = dirName + '/' + user + '_scaler.pkl'\n filename = './' + user + '_scaler.pkl'\n with open(filename, 'rb') as scalerFile:\n scalers[user] = pickle.load(scalerFile)\n \n line_counter = -1\n while True: # repeat until empty line\n line_counter += 1\n line = sys.stdin.readline() # read line from stdin (including \\n character)\n # count your time\n loop_start_time = time.time()\n\n if not line or line.strip() == 'exit': # if line is empty or exit string, break loop\n # +----------------------------------------------------+\n # | before the end of script, you can report anomalies |\n # +----------------------------------------------------+\n #if line_counter > 1:\n # report last line as anomaly to demonstrate functionality\n #sys.stdout.write('%i\\n' % (line_counter - 1))\n #sys.stdout.flush()\n # write `ok\\n` for system not to wait for another output\n sys.stdout.write('ok\\n')\n sys.stdout.flush()\n # +----------------------------------------------------+\n # break to end infinite loop\n break\n\n # convert JSON serialized string to object (Python dict)\n activity_log = json.loads(line)\n\n # timestamp of event\n timestamp = datetime.datetime.fromtimestamp(int(activity_log[\"unix_timestamp\"]))\n\n\n user = activity_log['user']\n\t# get day and time and map values to numbers according to learned mappings\n mappedLog = prepLine(activity_log, mappings)\n\t# scale (subtract mean and divide by variance or sthg like that)\n mappedLogNorm = scalers[user].transform(mappedLog)\n\t# predict if normal or anomaly\n prediction = models[user].predict(mappedLogNorm)\n\t# if anomaly, print it's id\n if prediction == -1:\n sys.stdout.write(str(activity_log['id']) + '\\n')\n sys.stdout.flush()\n \n\n # +----------------------------------------------------+\n # write `ok\\n` to continue loop (only if we didn't exceed time limit)\n if time.time() - loop_start_time < 2:\n sys.stdout.write('ok\\n')\n # don't forget to flush stdout\n sys.stdout.flush()",
"def setUp(self):\n\n super().setUp()\n self.h5fname = self._getTempFileName()\n self.h5file = tb.open_file(\n self.h5fname, self.open_mode, title=self._getName(),\n **self.open_kwargs)",
"def lemon_prepare():\n\n # Path Configuration\n\n this_dir = os.path.dirname(__file__)\n data_dir = os.path.join(this_dir,'..','_data')\n root_path = os.path.abspath(os.path.join(data_dir,'lemon'))\n os.makedirs(data_dir,exist_ok=True)\n\n # Download lemon Database\n\n urls = ['https://fcp-indi.s3.amazonaws.com/data/Projects/INDI/MPI-LEMON/Compressed_tar/EEG_MPILMBB_LEMON/EEG_Raw_BIDS_ID/sub-032301.tar.gz',\n 'https://fcp-indi.s3.amazonaws.com/data/Projects/INDI/MPI-LEMON/Compressed_tar/EEG_MPILMBB_LEMON/EEG_Raw_BIDS_ID/sub-032302.tar.gz',\n 'https://fcp-indi.s3.amazonaws.com/data/Projects/INDI/MPI-LEMON/Compressed_tar/EEG_MPILMBB_LEMON/EEG_Raw_BIDS_ID/sub-032303.tar.gz',\n 'https://fcp-indi.s3.amazonaws.com/data/Projects/INDI/MPI-LEMON/name_match.csv']\n\n for url in urls:\n download(url,os.path.join(data_dir,'lemon'))\n\n # Generate all filepaths\n\n filepaths = _get_files(root_path)\n\n\n # Label Correction\n name_match = read_csv(os.path.join(root_path,'name_match.csv'))\n \n # Unpack files\n\n # TAR FILES\n tars = [x for x in filepaths if 'tar.gz' in x ]\n\n # SUBJECTS\n old_ids = [parse_from_regex(x,'(sub-.*?).tar.gz',['id']) for x in tars]\n old_ids = [x['id'] for x in old_ids]\n new_ids = [name_match.loc[(name_match.INDI_ID==x),'Initial_ID']._values[0] for x in old_ids]\n\n # EEG FILES\n not_tars = [x for x in filepaths if '.vhdr' in x ]\n not_tars_ids = [parse_from_regex(x,'RSEEG\\\\/(sub-.*?).vhdr',['id']) for x in not_tars]\n not_tars_ids = [x['id'] for x in not_tars_ids] \n\n\n assert len(tars) == len(old_ids) == len(new_ids)\n\n if set(new_ids) == set(not_tars_ids): # all done\n return\n else:\n for file,old,new in zip(tars,old_ids,new_ids):\n if not new in not_tars_ids: # skip already prepared files\n shutil.unpack_archive(file,root_path)\n olddir = os.path.join(root_path,old)\n subject_files = _get_files(olddir)\n for subfile in subject_files: # fix sub-id\n new_path = subfile.replace(old,new)\n dir,_ = os.path.split(new_path)\n os.makedirs(dir,exist_ok=True)\n shutil.move(subfile,new_path)\n shutil.rmtree(olddir)\n print('LEMON PREPARE DONE!')",
"def test_stop_resume(self):\n self.create_sample_data_set_dir(\"node59p1_step1.dat\", TELEM_DIR, \"node59p1.dat\",\n copy_metadata=False)\n driver_config = self._driver_config()['startup_config']\n sio_mule_config = driver_config['harvester'][DataSourceKey.PHSEN_ABCDEF_SIO_MULE]\n fullfile = os.path.join(sio_mule_config['directory'], sio_mule_config['pattern'])\n mod_time = os.path.getmtime(fullfile)\n\n # Create and store the new driver state\n self.memento = {DataSourceKey.PHSEN_ABCDEF_SIO_MULE: {\n \"node59p1.dat\": {\n DriverStateKey.FILE_SIZE: 911,\n DriverStateKey.FILE_CHECKSUM: '8b7cf73895eded0198b3f3621f962abc',\n DriverStateKey.FILE_MOD_DATE: mod_time,\n DriverStateKey.PARSER_STATE: {\n StateKey.IN_PROCESS_DATA: [],\n StateKey.UNPROCESSED_DATA:[[0, 172]],\n StateKey.FILE_SIZE: 911\n }\n }\n }}\n\n self.driver = self._get_driver_object(memento=self.memento)\n\n # create some data to parse\n self.clear_async_data()\n self.create_sample_data_set_dir(\"node59p1_step2.dat\", TELEM_DIR, \"node59p1.dat\",\n copy_metadata=False)\n\n self.driver.start_sampling()\n\n # verify data is produced\n self.assert_data(PhsenParserDataParticle, 'test_data_2.txt.result.yml',\n count=2, timeout=10)",
"def setUp(self):\n\n self.hw = HMMERWrapper\n\n modpath = os.path.abspath(os.path.dirname(__file__))\n self.seqfile = os.path.join(modpath, 'data', 'P00929.fasta')\n self.badfile = os.path.join(modpath, 'data', 'bad.fasta')",
"def run(pars, #parameter files\n #directory of scenario files\n scen_dir = r'C:\\LS\\03_TOOLS\\_git\\COVID_01\\scenarios',\n \n #map to scenario files\n scen_d = {\n 'NoNPI':'NPI_Scenario1_None.R',\n 'BI1918':'NPI_Scenario2_Bootsma_1918Influenza.R',\n 'SouthKorea':'NPI_Scenario3_SouthKorea.R',\n 'Reduced':'NPI_Scenario4_ReducedGamma.R', \n }\n ):\n \n \n \n #===========================================================================\n # precheck \n #===========================================================================\n assert len(pars)==4, 'unexpected inputs count'\n print('pars: \\n%s'%pars)\n \n #check the R Environment variables\n assert 'R_USER' in os.environ\n assert 'R_HOME' in os.environ\n \n #print('R_USER=%s \\nR_HOME=%s'%(os.getenv('R_USER'), os.getenv('R_HOME')))\n\n \n \n \n \n #===========================================================================\n # setup\n #===========================================================================\n s = setup.Setup(setup_name = 'mid_utah_'+pars[2],\n spatial_setup = WestCoastSpatialSetup(),\n nsim = int(pars[1]),\n ti = datetime.date(2020, 3, 6),\n tf = datetime.date(2020, 10, 1),\n interactive = False,\n write_csv = True,\n dt = 1/4)\n \n #===========================================================================\n # set the scenario parmaters\n #===========================================================================\n\n \n \n assert pars[2] in scen_d, 'unrecognized scenario: %s'%pars[2]\n \n rfp = os.path.join(scen_dir, scen_d[pars[2]])\n assert os.path.exists(rfp)\n \n s.script_npi = rfp\n \n print('set script_npi=%s'%s.script_npi)\n\n #===========================================================================\n # execute\n #===========================================================================\n\n print()\n print()\n print(f\">>> Starting {s.nsim} model runs on {pars[3]} processes\")\n print(f\">>> Setup *** {s.setup_name} *** from {s.ti} to {s.tf} !\")\n print(f\">>> writing to folder : {s.datadir}{s.setup_name}\")\n print()\n print()\n \n tic = time.time()\n \n res_l = seir.run_parallel(s, int(pars[3]))\n print(f\">>> Runs done in {time.time()-tic} seconds...\")",
"def reffile_setup(self):\n # Prepare to find files listed as 'config'\n # and set up PSF path\n\n # set up as dictionary of dictionaries\n self.configfiles = {}\n self.psfpath = {}\n self.psfbasename = {}\n self.psfpixfrac = {}\n self.reference_file_dir = {}\n\n for instrument in 'nircam niriss fgs'.split():\n self.configfiles[instrument] = {}\n self.psfpath[instrument] = os.path.join(self.datadir, instrument, 'gridded_psf_library')\n self.psfbasename[instrument] = instrument\n self.reference_file_dir[instrument] = os.path.join(self.datadir, instrument, 'reference_files')\n\n # Set instrument-specific file paths\n if instrument == 'nircam':\n self.psfpixfrac[instrument] = 0.25\n elif instrument == 'niriss':\n self.psfpixfrac[instrument] = 0.1\n elif instrument == 'fgs':\n self.psfpixfrac[instrument] = 0.1\n\n # Set global file paths\n self.configfiles[instrument]['filter_throughput'] = os.path.join(self.modpath, 'config', 'placeholder.txt')\n\n for instrument in 'miri nirspec'.split():\n self.configfiles[instrument] = {}\n self.psfpixfrac[instrument] = 0\n self.psfbasename[instrument] = 'N/A'\n\n # create empty dictionaries\n list_names = 'superbias linearity gain saturation ipc astrometric photom pam dark lindark'.split()\n for list_name in list_names:\n setattr(self, '{}_list'.format(list_name), {})\n\n self.det_list = {}\n self.det_list['nircam'] = ['A1', 'A2', 'A3', 'A4', 'A5', 'B1', 'B2', 'B3', 'B4', 'B5']\n self.det_list['niriss'] = ['NIS']\n self.det_list['fgs'] = ['G1', 'G2']\n self.det_list['nirspec'] = ['NRS']\n self.det_list['miri'] = ['MIR']\n\n for instrument in 'nircam niriss fgs miri nirspec'.split():\n for list_name in list_names:\n getattr(self, '{}_list'.format(list_name))[instrument] = {}\n\n if self.offline:\n # no access to central store. Set all files to none.\n for list_name in list_names:\n if list_name in 'dark lindark'.split():\n default_value = ['None']\n else:\n default_value = 'None'\n for det in self.det_list[instrument]:\n getattr(self, '{}_list'.format(list_name))[instrument][det] = default_value\n\n elif instrument == 'nircam':\n rawdark_dir = os.path.join(self.datadir, 'nircam/darks/raw')\n lindark_dir = os.path.join(self.datadir, 'nircam/darks/linearized')\n for det in self.det_list[instrument]:\n self.dark_list[instrument][det] = glob(os.path.join(rawdark_dir, det, '*.fits'))\n self.lindark_list[instrument][det] = glob(os.path.join(lindark_dir, det, '*.fits'))\n\n elif instrument in ['nirspec', 'miri']:\n for key in 'subarray_def_file fluxcal filtpupil_pairs readpatt_def_file crosstalk ' \\\n 'dq_init_config saturation_config superbias_config refpix_config ' \\\n 'linearity_config filter_throughput'.split():\n self.configfiles[instrument][key] = 'N/A'\n default_value = 'none'\n for list_name in list_names:\n for det in self.det_list[instrument]:\n getattr(self, '{}_list'.format(list_name))[instrument][det] = default_value\n\n else: # niriss and fgs\n for det in self.det_list[instrument]:\n if det == 'G1':\n self.dark_list[instrument][det] = glob(os.path.join(self.datadir, 'fgs/darks/raw', FGS1_DARK_SEARCH_STRING))\n self.lindark_list[instrument][det] = glob(os.path.join(self.datadir, 'fgs/darks/linearized', FGS1_DARK_SEARCH_STRING))\n\n elif det == 'G2':\n self.dark_list[instrument][det] = glob(os.path.join(self.datadir, 'fgs/darks/raw', FGS2_DARK_SEARCH_STRING))\n self.lindark_list[instrument][det] = glob(os.path.join(self.datadir, 'fgs/darks/linearized', FGS2_DARK_SEARCH_STRING))\n\n elif det == 'NIS':\n self.dark_list[instrument][det] = glob(os.path.join(self.datadir, 'niriss/darks/raw',\n '*uncal.fits'))\n self.lindark_list[instrument][det] = glob(os.path.join(self.datadir, 'niriss/darks/linearized',\n '*linear_dark_prep_object.fits'))",
"def run_gen_and_econ(self):\n try:\n super().run_gen_and_econ()\n except SAMExecutionError as e:\n logger.error(\"Skipping site {}; received sam error: {}\"\n .format(self._site, str(e)))\n self.outputs = {}",
"def main():\n tpd_file_name = get_nonexisting_file(\"Enter name of new tpd file: \")\n tpd = TrainPredictData(tpd_file_name)\n\n print \"You can now enter the file paths of the the newly created tpd file.\"\n print \"If you want to skip a data set, just press enter without typing anything.\"\n\n train_raw_path = get_existing_file(\"Enter training raw path: \", skip=True)\n if train_raw_path is not None:\n train_raw_key = extract_h5_key(train_raw_path, \"Enter training raw h5 key: \")\n tpd.set_train_raw(train_raw_path, train_raw_key)\n\n train_gt_path = get_existing_file(\"Enter training gt path: \", skip=True)\n if train_gt_path is not None:\n train_gt_key = extract_h5_key(train_gt_path, \"Enter training gt h5 key: \")\n tpd.set_train_gt(train_gt_path, train_gt_key)\n\n train_pred_path = get_existing_file(\"Enter training pred path: \", skip=True)\n if train_pred_path is not None:\n train_pred_key = extract_h5_key(train_pred_path, \"Enter training pred h5 key: \")\n tpd.set_train_pred(train_pred_path, train_pred_key)\n\n train_feat_path = get_existing_file(\"Enter training feature path: \", skip=True)\n while train_feat_path is not None:\n train_feat_key = extract_h5_key(train_feat_path, \"Enter training feature path: \")\n tpd.add_train_feature(train_feat_path, train_feat_key)\n train_feat_path = get_existing_file(\"Enter training feature path: \", skip=True)\n\n test_raw_path = get_existing_file(\"Enter test raw path: \", skip=True)\n if test_raw_path is not None:\n test_raw_key = extract_h5_key(test_raw_path, \"Enter test raw h5 key: \")\n tpd.set_test_raw(test_raw_path, test_raw_key)\n\n test_gt_path = get_existing_file(\"Enter test gt path: \", skip=True)\n if test_gt_path is not None:\n test_gt_key = extract_h5_key(test_gt_path, \"Enter test gt h5 key: \")\n tpd.set_test_gt(test_gt_path, test_gt_key)\n\n test_pred_path = get_existing_file(\"Enter test pred path: \", skip=True)\n if test_pred_path is not None:\n test_pred_key = extract_h5_key(test_pred_path, \"Enter test pred h5 key: \")\n tpd.set_test_pred(test_pred_path, test_pred_key)\n\n test_feat_path = get_existing_file(\"Enter test feature path: \", skip=True)\n while test_feat_path is not None:\n test_feat_key = extract_h5_key(test_feat_path, \"Enter test feature path: \")\n tpd.add_test_feature(test_feat_path, test_feat_key)\n test_feat_path = get_existing_file(\"Enter test feature path: \", skip=True)\n\n return 0",
"def config():\n experiment_dir = './experiments'\n simulation_steps = 1000\n device = 'cpu'\n path_to_molecules = os.path.join(experiment_dir, 'data/ethanol.xyz')\n simulation_dir = os.path.join(experiment_dir, 'simulation')\n training_dir = os.path.join(experiment_dir, 'training')\n model_path = os.path.join(training_dir, 'best_model')\n overwrite = True",
"def _setup(self, emma_conf):\n settings = configparser.RawConfigParser()\n settings.read('settings.conf')\n self.root = settings.get(\"Datasets\", \"datasets_path\")\n\n # Assign trace set paths\n if self.format == \"cw\": # .npy\n path = join(self.root, self.id)\n self.trace_set_paths = sorted([join(self.id, f) for f in listdir(path) if isfile(join(path, f)) and '_traces.npy' in f])\n elif self.format == \"sigmf\": # .meta\n self.trace_set_paths = None\n raise NotImplementedError\n elif self.format == \"gnuradio\": # .cfile\n self.trace_set_paths = None\n raise NotImplementedError\n elif self.format == \"ascad\": # ASCAD .h5\n if ':' not in self.id:\n raise EMMAConfException(\"No group specified. Specify the H5 group to use by using a colon, e.g. file:group\")\n file, _, group = self.id.rpartition(\":\")\n path = join(self.root, 'ASCAD/ASCAD_data/ASCAD_databases/%s.h5' % file)\n\n # Make sure we never use training set when attacking or classifying\n self.trace_set_paths = emma.io.io.get_ascad_paths(path, group)\n else:\n raise Exception(\"Unknown input format '%s'\" % self.format)\n\n # Limit trace set paths\n self.trace_set_paths = self.trace_set_paths[0:emma_conf.max_num_tracesets]\n assert(len(self.trace_set_paths) > 0)\n\n # Assign reference signal\n reference_trace_set = emma.io.io.get_trace_set(join(self.root, self.trace_set_paths[0]), self.format, ignore_malformed=False, remote=False) # TODO add parameter to allow choosing reference trace set index. Fixed now to 0.\n\n self.traces_per_set = len(reference_trace_set.traces)\n self.reference_signal = reference_trace_set.traces[self.reference_index].signal",
"def resume(self, tag=\"current\"):\n\n if not self.is_resumable(tag):\n logging.warning(\"This exeriment is not resumable!\")\n self.force_restart(tag)\n\n else:\n logging.info(\"Loading the experiment from {}\".format(self._dir_name))\n\n save_dir = os.path.join(self._dir_name, tag)\n\n if self._model is not None:\n self._model.load(save_dir)\n\n if self._config is not None:\n file_name = os.path.join(save_dir, \"config.p\")\n self._config.load(file_name)\n\n if self._logger is not None:\n file_name = os.path.join(save_dir, \"logger\")\n self._logger.load(file_name)\n\n if self._train_statistics is not None:\n file_name = os.path.join(save_dir, \"train_statistics.p\")\n self._train_statistics.load(file_name)\n\n if self._data_iterator is not None:\n file_name = os.path.join(save_dir, \"data_iterator.p\")\n self._data_iterator.load(file_name)",
"def prepare(self):\n\n # step 0: load only when not loaded yet\n if TRAINING in self.data \\\n and VALIDATION in self.data:\n return\n\n # step 1: load the file names\n file_list = sorted(glob.glob(self.location+\"*.mhd\"))\n # count the number of data points\n\n # make a stratified validation set\n # note, the seed decides the validation set, but it is deterministic in the names\n random.seed(317070)\n patient_names = [self.patient_name_from_file_name(f) for f in file_list]\n validation_patients = random.sample(patient_names, int(VALIDATION_SET_SIZE*len(patient_names)))\n\n # make the static data empty\n for s in self.datasets:\n self.data[s] = []\n self.labels[s] = []\n self.names[s] = []\n\n # load the filenames and put into the right dataset\n labels_as_dict = defaultdict(list)\n\n with open(paths.LUNA_LABELS_PATH, 'rb') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n next(reader) # skip the header\n for row in reader:\n label = (float(row[1]), float(row[2]), float(row[3]), float(row[4]))\n labels_as_dict[str(row[0])].append(label)\n\n for patient_file in file_list:\n patient_name = self.patient_name_from_file_name(patient_file)\n\n if patient_name in validation_patients:\n s = VALIDATION\n else:\n s = TRAINING\n label = labels_as_dict[str(patient_name)]\n if self.only_positive and not label:\n continue\n self.data[s].append(patient_file)\n \n if self.pick_nodule:\n self.labels[s].append([random.choice(label)]) \n else:\n self.labels[s].append(label)\n \n \n self.names[s].append(patient_name)\n\n # give every patient a unique number\n last_index = -1\n for s in self.datasets:\n self.indices[s] = range(last_index+1,last_index+1+len(self.data[s]))\n if len(self.indices[s]) > 0:\n last_index = self.indices[s][-1]\n print s, len(self.indices[s]), \"samples\"",
"def run(self):\n\n # driver=\"H5FD_CORE\" another driver for Solid State devs?\n theFile = tables.open_file(self.hdfFileName, \"w\")\n theFile.create_group(\"/\", \"transitionLogs\")\n theLog = theFile.create_earray(where=theFile.root,\n name=\"log\",\n atom=tables.StringAtom(itemsize=120),\n shape=(0,),\n title=\"log messages\",\n filters=tables.Filters(complevel=9,\n complib='zlib'))\n speciesTables = {}\n\n try:\n # do a loop!\n while True:\n try:\n msg = self.transitionsPipe.recv()\n # msg=messagequeue.get()\n except EOFError:\n break\n cmd = msg[0]\n if cmd == \"parameters\":\n # expect two dictionaries\n parameters, runParameters = msg[1], msg[2]\n\n if type(parameters) is dict:\n if \"/parameters\" in theFile:\n parameterTable = theFile.root.parameters\n else:\n parameterTable = theFile.create_table(\n \"/\",\n \"parameters\",\n HDFLoggingProcess.parameterTableFormat)\n parameterRow = parameterTable.row\n varTypeEnum = parameterTable.coldescrs[\"varType\"].enum\n varTypeDict = {int: varTypeEnum[\"INT\"],\n str: varTypeEnum[\"STR\"],\n float: varTypeEnum[\"FLOAT\"],\n bool: varTypeEnum[\"BOOL\"]}\n runType = varTypeEnum[\"RUN\"]\n\n for k, v in parameters.items():\n varType = varTypeDict[type(v)]\n parameterRow[\"varName\"] = str(k)\n parameterRow[\"varType\"] = varType\n parameterRow[\"varValue\"] = str(v)\n parameterRow.append()\n\n for k, v in runParameters.items():\n parameterRow[\"varName\"] = str(k)\n parameterRow[\"varType\"] = runType\n parameterRow[\"varValue\"] = str(v)\n parameterRow.append()\n\n parameterTable.close()\n del parameterRow, parameterTable\n elif type(parameters) is scenario:\n print(\"writing scenarios\")\n parameters.writeToHDF(theFile.root, 'scenario')\n else:\n print(\"unsupported type: {}\".format(type(parameters)))\n\n # need a table def and a transition log\n elif cmd == \"registerTransitionType\":\n # change lists to enumerations!\n # expect list of extra columns as msg[2]\n theColumns = {}\n for name, col in msg[2].items():\n if type(col) is dict:\n # this is an enumeration type used\n # for the from/to state\n col = tables.EnumCol(tables.Enum(col),\n \"start\",\n \"uint16\")\n elif type(col) is str:\n # column of type defined by string\n col = eval(col) # ToDo: remove eval\n theColumns[name] = col\n\n # gets species name and table format as dict\n transitions = type(\"transitions\",\n (tables.IsDescription,),\n theColumns)\n speciesTables[msg[1]] = theFile.create_table(\n \"/transitionLogs\",\n msg[1],\n transitions,\n filters=tables.Filters(\n complevel=9,\n complib=\"lzo\",\n least_significant_digit=3))\n\n elif cmd == \"changeFile\":\n # close tables and file\n for t in speciesTables.values():\n t.close()\n del t\n del speciesTables\n theLog.close()\n del theLog\n theFile.close()\n del theFile\n\n # set new file name\n self.hdfFileName = msg[1]\n # open new one\n # potentially a driver=\"H5FD_CORE\" ?\n theFile = tables.open_file(self.hdfFileName, \"w\")\n theFile.create_group(\"/\", \"transitionLogs\")\n theLog = theFile.create_earray(\n where=theFile.root,\n name=\"log\",\n atom=tables.StringAtom(itemsize=120),\n shape=(0,),\n title=\"log messages\",\n filters=tables.Filters(complevel=9,\n complib='zlib'))\n speciesTables = {}\n # expecting replay of species tables\n\n elif cmd == \"logTransition\":\n # gets species name and values in order as defined by the\n # table format\n # todo: check the format!\n table = speciesTables[msg[1]]\n row = table.row\n agentId, t1, t2, fromState, toState, effort = msg[2]\n row[\"agentId\"] = agentId\n row[\"timeStamp\"] = t2\n row[\"fromState\"] = fromState\n row[\"toState\"] = toState\n row[\"dwellTime\"] = t2-t1\n row[\"effort\"] = effort\n\n if len(msg) > 2:\n # are there any extra parameters?\n for name, value in msg[3].items():\n if type(value) is str:\n row[name] = numpy.array(value.encode(),\n dtype=\"S\")\n else:\n row[name] = value\n row.append()\n del table, row\n\n # also a progress table\n elif cmd == \"progress\":\n # if not there, create new table\n if \"/progress\" not in theFile:\n theFile.create_table(\n '/',\n 'progress',\n HDFLoggingProcess.hdfProgressTable)\n # add values as they are...\n theFile.root.progress.append([msg[1]])\n\n elif cmd == \"message\":\n theLog.append(numpy.array([str(msg[1])], dtype=\"S120\"))\n\n elif cmd == \"end\":\n break\n\n else:\n print(\"unknown type {}\".format(msg[0]))\n except:\n raise\n finally:\n # messagequeue.close()\n self.transitionsPipe.close()\n del self.transitionsPipe\n # print(\"finished \", messagepipe)\n # done, be pedantic about closing all resources\n for t in speciesTables.values():\n t.close()\n del t\n del speciesTables\n theLog.close()\n del theLog\n theFile.close()\n del theFile",
"def bootstrap(self):\n\n\t\t#---paths.yaml specifies directories which might be absent so make them\n\t\tif not os.path.isdir(self.postdir): os.mkdir(self.postdir)\n\t\tif not os.path.isdir(self.plotdir): os.mkdir(self.plotdir)\n\t\t#---parse the simulations found in each \"spot\"\n\t\tfor spot in self.spots: self.treeparser(spot)\n\t\t#---if there is a part named edr then we use it to get simulation times\n\t\t#---! edr files are required to infer times for slicing however we might also use xtc or trr later\n\t\tassert 'edr' in zip(*self.spots.keys())[1]\n\t\tself.treeparser_edr()\n\t\t#---data are stored in dictionaries by spot name\n\t\tall_top_keys = [i for j in [k.keys() for k in self.toc.values()] for i in j]\n\n\t\t#---! under development\n\t\tfor key in ['post','groups','slices']:\n\t\t\tif key not in self.members_with_specific_parts:\n\t\t\t\tself.__dict__[key] = {i:{} for i in all_top_keys}\n\t\t\telse: self.__dict__[key] = {(spot,i):{} \n\t\t\t\tfor spot in self.toc for i in self.toc[spot]}\n\t\tself.save()",
"def main():\n try:\n # parse argument\n s3_region = sys.argv[1]\n s3_bucket = sys.argv[2]\n s3_prefix = sys.argv[3]\n s3_yaml_name = sys.argv[4]\n launch_name = sys.argv[5]\n\n # create boto3 session/client and download yaml/json file\n session = boto3.session.Session()\n\n s3_endpoint_url = os.environ.get(\"S3_ENDPOINT_URL\", None)\n \n if s3_endpoint_url is not None:\n LOG.info('Endpoint URL {}'.format(s3_endpoint_url))\n rospy.set_param('S3_ENDPOINT_URL', s3_endpoint_url)\n else:\n # create boto3 session/client and download yaml/json file\n ec2_client = session.client('ec2', s3_region)\n LOG.info('Checking internet connection...')\n response = ec2_client.describe_vpcs()\n if not response['Vpcs']:\n log_and_exit(\"No VPC attached to instance\", SIMAPP_SIMULATION_WORKER_EXCEPTION,\n SIMAPP_EVENT_ERROR_CODE_500)\n LOG.info('Verified internet connection')\n\n s3_client = session.client('s3', region_name=s3_region, endpoint_url=s3_endpoint_url, config=get_boto_config())\n\n yaml_key = os.path.normpath(os.path.join(s3_prefix, s3_yaml_name))\n local_yaml_path = os.path.abspath(os.path.join(os.getcwd(), s3_yaml_name))\n s3_client.download_file(Bucket=s3_bucket, Key=yaml_key, Filename=local_yaml_path)\n # Get values passed in yaml files. Default values are for backward compatibility and for single racecar racing\n default_yaml_values = {RACE_TYPE_YAML_KEY: TIME_TRIAL_RACE_TYPE,\n MODEL_S3_BUCKET_YAML_KEY: s3_bucket,\n MODEL_S3_PREFIX_YAML_KEY: s3_prefix,\n CAR_COLOR_YAML_KEY: DEFAULT_COLOR,\n MODEL_METADATA_FILE_S3_YAML_KEY: None}\n yaml_dict = get_yaml_dict(local_yaml_path)\n yaml_values = get_yaml_values(yaml_dict, default_yaml_values)\n\n # Forcing the yaml parameter to list\n force_list_params = [MODEL_METADATA_FILE_S3_YAML_KEY, MODEL_S3_BUCKET_YAML_KEY, MODEL_S3_PREFIX_YAML_KEY,\n CAR_COLOR_YAML_KEY]\n\n for params in force_list_params:\n yaml_values[params] = force_list(yaml_values[params])\n\n # Populate the model_metadata_s3_key values to handle both training and evaluation for all race_formats\n if None in yaml_values[MODEL_METADATA_FILE_S3_YAML_KEY]:\n # MODEL_METADATA_FILE_S3_KEY not passed as part of yaml file ==> This happens during evaluation\n # Assume model_metadata.json is present in the s3_prefix/model/ folder\n yaml_values[MODEL_METADATA_FILE_S3_YAML_KEY] = list()\n for s3_prefix in yaml_values[MODEL_S3_PREFIX_YAML_KEY]:\n yaml_values[MODEL_METADATA_FILE_S3_YAML_KEY].append(os.path.join(s3_prefix, 'model/model_metadata.json'))\n\n # Set multicar value if its a head to model racetype\n multicar = yaml_values[RACE_TYPE_YAML_KEY] == HEAD_TO_MODEL_RACE_TYPE\n # Validate the yaml values\n validate_yaml_values(yaml_values, multicar)\n # List of racecar names that should include second camera while launching\n racecars_with_stereo_cameras = list()\n\n # List of racecar names that should include lidar while launching\n racecars_with_lidars = list()\n\n # List of SimApp versions\n simapp_versions = list()\n\n for agent_index, model_s3_bucket in enumerate(yaml_values[MODEL_S3_BUCKET_YAML_KEY]):\n\n racecar_name = 'racecar_'+str(agent_index) if len(yaml_values[MODEL_S3_BUCKET_YAML_KEY]) > 1 else 'racecar'\n # Make a local folder with the racecar name to download the model_metadata.json\n if not os.path.exists(os.path.join(os.getcwd(), racecar_name)):\n os.makedirs(os.path.join(os.getcwd(), racecar_name))\n local_model_metadata_path = os.path.abspath(os.path.join(os.path.join(os.getcwd(), racecar_name),\n 'model_metadata.json'))\n json_key = yaml_values[MODEL_METADATA_FILE_S3_YAML_KEY][agent_index]\n json_key = json_key.replace('s3://{}/'.format(model_s3_bucket), '')\n s3_client.download_file(Bucket=model_s3_bucket, Key=json_key, Filename=local_model_metadata_path)\n sensors, _, simapp_version = utils_parse_model_metadata.parse_model_metadata(local_model_metadata_path)\n simapp_versions.append(simapp_version)\n if Input.STEREO.value in sensors:\n racecars_with_stereo_cameras.append(racecar_name)\n if Input.LIDAR.value in sensors or Input.SECTOR_LIDAR.value in sensors:\n racecars_with_lidars.append(racecar_name)\n\n cmd = [''.join((\"roslaunch deepracer_simulation_environment {} \".format(launch_name),\n \"local_yaml_path:={} \".format(local_yaml_path),\n \"racecars_with_stereo_cameras:={} \".format(','.join(racecars_with_stereo_cameras)),\n \"racecars_with_lidars:={} multicar:={} \".format(','.join(racecars_with_lidars), multicar),\n \"car_colors:={} simapp_versions:={}\".format(','.join(yaml_values[CAR_COLOR_YAML_KEY]),\n ','.join(simapp_versions))))]\n Popen(cmd, shell=True, executable=\"/bin/bash\")\n \n except botocore.exceptions.ClientError as ex:\n log_and_exit(\"Download params and launch of agent node failed: s3_bucket: {}, yaml_key: {}, {}\"\n .format(s3_bucket, yaml_key, ex), \n SIMAPP_SIMULATION_WORKER_EXCEPTION,\n SIMAPP_EVENT_ERROR_CODE_400)\n except botocore.exceptions.EndpointConnectionError:\n log_and_exit(\"No Internet connection or s3 service unavailable\",\n SIMAPP_SIMULATION_WORKER_EXCEPTION,\n SIMAPP_EVENT_ERROR_CODE_500)\n except Exception as ex:\n log_and_exit(\"Download params and launch of agent node failed: s3_bucket: {}, yaml_key: {}, {}\"\n .format(s3_bucket, yaml_key, ex), \n SIMAPP_SIMULATION_WORKER_EXCEPTION,\n SIMAPP_EVENT_ERROR_CODE_500)",
"def main():\n parser = ArgumentParser(description=\"write to a file\")\n\n parser.add_argument(\"-i\",\"--input\", type=setup.is_valid_h5_file, required=True, nargs='+',\n help=\"path(s) of HDF5 master file(s)\")\n\n parser.add_argument(\"-b\",\"--beamcenter\", nargs=2, required=True,\n help=\"beam center in X and Y (two arguments)\")\n\n parser.add_argument(\"-r\",\"--oscillation\", type=float, default=1,\n help=\"oscillation angle per well, default = 1\")\n\n parser.add_argument(\"-d\",\"--distance\", type=float, default=100,\n help=\"detector distance in mm\")\n\n parser.add_argument(\"-w\",\"--wavelength\", type=float, default=1.216,\n help=\"Wavelength in Angstrom, default is 1.216\")\n\n parser.add_argument(\"-f\",\"--framesperdegree\", type=int, default=5,\n help=\"Number of frames per degree, default is 5\")\n\n parser.add_argument(\"-t\",\"--totalframes\", type=int, default=0,\n help=\"Total number of frames to be processed, default all\")\n\n parser.add_argument(\"--output\", default=os.getcwd(),\n help=\"Use this option to change output directory, default pwd\")\n\n parser.add_argument(\"-sg\",\"--spacegroup\", type=int, default=0,\n help=\"Space group\")\n\n parser.add_argument(\"-u\",\"--unitcell\", type=str, default=\"50 50 50 90 90 90\",\n help=\"unit cell\")\n\n argslist = parser.parse_args()\n for masterfile in argslist.input:\n master1= Master(argslist,masterfile)\n master1.printDataWells()",
"def prepare_and_save(path):\n \n raw, timestamp = ur.MNE_Read_EDF(path)\n \n #Use the time columns to create MNE events structure\n events_log, event_id = clean_log(path)\n event_sample_indexes = ur.parse_events(events_log, timestamp)\n events = ur.events_for_MNE(event_sample_indexes, event_id)\n \n #Add response correct/incorrect to events\n new_events, new_event_id = expand_events(path, events, event_id)\n #Crop the data to include only the time between start and stop of the experiment - many artifacts outside this interval \n raw_cropped = raw.copy().crop(tmin = events[0,0]/raw.info['sfreq'], tmax = events[-1,0]/raw.info['sfreq'])\n #Since the raw was cropped to the time of the first event its' new time is now 0. All following events are shifted.\n new_events[:,0] = new_events[:,0] - new_events[0,0]\n \n #Delete bad channels, ears and visually identified channels\n ears = [ch for ch in raw_cropped.ch_names if 'A' in ch]\n raw_cropped = raw_cropped.drop_channels(ears)\n \n subject_bads = {'Adrianna': ['T4'], 'BartekB' : ['Pz'], 'JeremiaszW' : [], 'KonradW' : ['T3'], 'Lucja' : ['T4', 'F8'], 'MaciekG':[], 'MariuszZ' : [], 'OlaS' :['P4'], 'Patrycja' :[]}\n bads = subject_bads[path.split('\\\\')[-3]]\n if len(bads) != 0:\n raw_cropped = raw_cropped.drop_channels(bads)\n \n #Apply average re-reference\n raw_cropped.save('raw_cropped/' + path.split('\\\\')[-3] +'_raw_cropped.fif', overwrite = True)\n return raw_cropped, new_events, new_event_id",
"def create_output_database():\n\n# Do not alter the hdf5 file if it already exists\n if os.path.exists(database_path):\n print(\"DATABASE STATUS:\")\n print(\"\\t\" + database_path + \" already exists and is ready to store the results of computations\")\n return None\n# Create hdf5 file. The flag \"-w\" means \"create file, fail if exists\" \n else:\n computations_database = h5py.File(database_path, \"w-\")\n\n# Create initial data datasets and write initial data into them \n for initial_condition in initial_conditions:\n for k in range (6,17):\n dataset_initial_path = initial_condition + \"/k = \" + str(k) + \" initial_data\"\n computations_database[dataset_initial_path] = initial_data(initial_condition, k)\n# Create data groups for storing the results of computations \n for flux in fluxes: \n group_path = initial_condition + \"/\" + flux\n computations_database.create_group(group_path)\n\n# Write the appropriate attributes that are needed for particular computations, \n# i.e. create the appropriate environment for each computational method \n computations_database[group_path].attrs[\"a\"] = 3.0\n computations_database[group_path].attrs[\"T\"] = 9.0\n if flux == \"Lax_Wendroff_Fourth_Order\": \n computations_database[group_path].attrs[\"CFL\"] = 0.2\n elif flux in [\"Fromm_CFL_0.5\", \"Fromm_van_Leer_CFL_0.5\"]:\n computations_database[group_path].attrs[\"CFL\"] = 0.5\n else:\n computations_database[group_path].attrs[\"CFL\"] = 0.9\n \n computations_database.close() \n print(\"DATABASE STATUS:\")\n print(\"\\t\" + database_path + \" has been created and is ready to store the results of computations\")",
"def gene_h5_train_file(data_path, h5_name):\n img = []\n y_cls_mask = []\n y_reg_cls_mask = []\n os.chdir(data_path)\n jpgfiles = glob.glob('*.jpg')\n idx = 1\n # the position of generator objector is very important\n gene_obj = image_output_pair(data_path, 1/255.0)\n while True:\n if idx == len(jpgfiles):\n break\n print '\\t{0}/{1}'.format(idx, len(jpgfiles))\n # the position of generator objector is very important\n # gene_obj = image_output_pair(data_path, 1/255.0)\n img_it, y_cls_mask_it, y_reg_cls_mask_it = gene_obj.next()\n img.append(img_it)\n y_cls_mask.append(y_cls_mask_it)\n y_reg_cls_mask.append(y_reg_cls_mask_it)\n idx += 1\n\n # img => (320, 320, 3)\n # after np.stack => (19041, 320, 320, 3)\n img_input = np.stack(img, axis=0)\n y_cls = np.stack(y_cls_mask, axis=0)\n y_reg = np.stack(y_reg_cls_mask, axis=0)\n print 'input data shape is {0}'.format(img_input.shape)\n print 'y_cls data shape is {0}'.format(y_cls.shape)\n print 'y_reg data shape is {0}'.format(y_reg.shape)\n \n # wirte data\n h5 = '/home/yuquanjie/Documents/train_' + h5_name\n file_write = h5py.File(h5, 'w')\n file_write.create_dataset('X_train', data=img_input)\n file_write.create_dataset('Y_train_cls', data=y_cls)\n file_write.create_dataset('Y_train_merge', data=y_reg)\n file_write.close()",
"def sequencePreparation(self):\n #Calculation of the number of frames in function of the duration + LED list for the acquisition\n if self.seqMode == \"rgbMode\":\n self._rgbSequenceInit()\n elif self.seqMode == 'rbMode':\n self._rbSequenceInit()\n else:\n print('Please select a valid mode of led sequence initialization')\n #Sending nb of frames to initialize the progress bar\n if type(self.nbFrames) == int:\n self.nbFramesSig.emit(self.nbFrames)\n\n print('acquisition Side : ', self.expRatio)\n #Saving the configuration of the experiment file (.json)\n self.savePath = cfgFileSaving(self.experimentName,\n self.nbFrames,\n self.duration,\n self.expRatio,\n self.acquMode,\n self.seqMode,\n self.rgbLedRatio,\n self.greenFrameInterval,\n round(1/self.cycleTime,2), #framerate\n self.folderPath,\n self.colorMode,\n self.mmc,\n 'Zyla') #WARNING > modulabilty (there is a way to get device label but it's not so easy)\n\n #initialization of the acquisition saving files : .tif (frames) and .txt (metadata)\n (self.tiffWriterList, self.textFile) = filesInit( self.savePath,\n self.experimentName,\n self.nbFrames,\n self.maxFrames)\n #send all informations to each LED driver\n self.arduinoSync()"
] | [
"0.60261214",
"0.5696248",
"0.5661924",
"0.5481591",
"0.5385601",
"0.5378313",
"0.5364455",
"0.5312231",
"0.5307268",
"0.52681684",
"0.5235969",
"0.5219296",
"0.5212105",
"0.5211143",
"0.52081215",
"0.51801795",
"0.5176859",
"0.5160141",
"0.51429856",
"0.5139128",
"0.50798786",
"0.5076746",
"0.50606847",
"0.5059945",
"0.50519454",
"0.50275457",
"0.50261736",
"0.5014504",
"0.50093734",
"0.5004886"
] | 0.57191813 | 1 |
Read a 2D/3D wabbit file and return a list of how many blocks are at the different levels | def block_level_distribution_file( file ):
import h5py
import numpy as np
# open the h5 wabbit file
fid = h5py.File(file,'r')
# read treecode table
b = fid['block_treecode'][:]
treecode = np.array(b, dtype=float)
# close file
fid.close()
# number of blocks
Nb = treecode.shape[0]
# min/max level. required to allocate list!
jmin, jmax = get_max_min_level( treecode )
counter = np.zeros(jmax+1)
# fetch level for each block and count
for i in range(Nb):
J = treecode_level(treecode[i,:])
counter[J] += 1
return counter | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def readGR3File(inputFilename):\n print 'Reading ' + inputFilename + ' ...'\n infile = open(inputFilename, 'r')\n description = infile.readline().strip() # remove leading/trailing whitespace\n tmpStr = infile.readline()\n nTriangles, nNodes = (int(s) for s in tmpStr.split())\n print ' nTriangles={0:d} nNodes={1:d}'.format(nTriangles, nNodes)\n\n # nodes\n nodeArray = readNodeBlock(infile, nNodes)\n nodenum = np.array(nodeArray[:, 0].flatten(), dtype=int)\n nodexyz = np.zeros((nNodes, 3))\n nodexyz[:, :2] = nodeArray[:, 1:3]\n nodalValues = nodeArray[:, 3]\n\n print ' Nodal values min={0:g} max={1:g}'.format(min(nodalValues), max(nodalValues))\n\n # triangular elements\n triArray = readElemBlock(infile, nTriangles)\n\n trinum = triArray[:, 0].flatten()\n tritype = triArray[0, 1]\n trinodes = triArray[:, -3:] - 1 # three last columns, 0-based indexing\n #triangles = meshElements(trinodes,trinum,tritype)\n\n x = nodexyz[:, 0]\n y = nodexyz[:, 1]\n\n tmpStr = infile.readline()\n boundaries = []\n if len(tmpStr) > 0:\n # boundary information, if not end of file\n nOpenBndSegments = int(tmpStr.split()[0])\n nOpenBndNodesTot = int(infile.readline().split()[0])\n print ' nOpenBndSegments={0:d} nOpenBndNodesTot={1:d}'.format(nOpenBndSegments, nOpenBndNodesTot)\n for iBnd in range(nOpenBndSegments):\n bndHeader = infile.readline().split()\n nBndNodes = int(bndHeader[0])\n tag = bndHeader[-1]\n if tag.isdigit():\n tag = 'open' + tag\n print ' open bnd {0:d} {1:s}: {2:d} nodes'.format(iBnd + 1, tag, nBndNodes)\n tmpList = []\n for iN in range(nBndNodes):\n tmpList.append(int(infile.readline()))\n nodes = np.array(tmpList, dtype=int) - 1\n boundaries.append(meshContainer.meshBoundary('open', tag, nodes))\n nLandBndSegments = int(infile.readline().split()[0])\n nLandBndNodesTot = int(infile.readline().split()[0])\n landBndTags = range(\n nOpenBndSegments + 1,\n nOpenBndSegments + nLandBndSegments + 1)\n print ' nLandBndSegments={0:d} nLandBndNodesTot={1:d}'.format(nLandBndSegments, nLandBndNodesTot)\n for iBnd in range(nLandBndSegments):\n bndHeader = infile.readline().split()\n nBndNodes = int(bndHeader[0])\n try:\n landType = int(bndHeader[1])\n except:\n print \"\"\"Land boundary type missing in gr3 file. Add 0/1 (land/island) after number of nodes in each land boudary, e.g.\n 1002 = Total number of closed boundary nodes\n 501 0 = Number of nodes in closed boundary 1\"\"\"\n raise Exception(\n 'Could not parse land boundary type (0/1 - land/island)\\n')\n landType = 'island' if landType == 1 else 'land'\n tag = landType + bndHeader[-1]\n print ' land bnd {0:d} {1:s}: {2:d} nodes'.format(iBnd + 1, tag, nBndNodes)\n tmpList = []\n for iN in range(nBndNodes):\n tmpList.append(int(infile.readline()))\n #tmpList = fromfile(infile,dtype=int,count=nBndNodes,sep=' ')\n nodes = np.array(tmpList, dtype=int) - 1\n boundaries.append(meshContainer.meshBoundary(landType, tag, nodes))\n\n infile.close()\n\n # for better interpolation, round coordinates to 1e-4\n nDig = 4\n x = np.round(x, nDig)\n y = np.round(y, nDig)\n\n return x, y, nodalValues, trinodes, boundaries, description",
"def depth_read(filename):\n f = open(filename,'rb')\n check = np.fromfile(f,dtype=np.float32,count=1)[0]\n assert check == TAG_FLOAT, ' depth_read:: Wrong tag in flow file (should be: {0}, is: {1}). Big-endian machine? '.format(TAG_FLOAT,check)\n width = np.fromfile(f,dtype=np.int32,count=1)[0]\n height = np.fromfile(f,dtype=np.int32,count=1)[0]\n size = width*height\n assert width > 0 and height > 0 and size > 1 and size < 100000000, ' depth_read:: Wrong input size (width = {0}, height = {1}).'.format(width,height)\n depth = np.fromfile(f,dtype=np.float32,count=-1).reshape((height,width))\n return depth",
"def read_state(path: str):\n state = State(0, (0, 0), [])\n n = 0\n with open(path, \"r\") as file:\n first_line = True\n line_counter = 0\n bin_width = 0\n bin_height = 0\n lines = file.readlines()\n for line in lines:\n line_counter += 1\n values = line.strip().split(' ')\n # Ignore comments in the file\n if values[0] != \"%\":\n # bin size is in the first line\n if first_line:\n if len(values) == 2:\n bin_width, bin_height = values\n try:\n bin_width = int(bin_width)\n except ValueError:\n print(f'File is not valid, in line {line_counter} {width} cannot be converted to int!')\n try:\n bin_height = int(bin_height)\n except ValueError:\n print(f'File is not valid, in line {line_counter} {height} cannot be converted to int!')\n state.bin_size = (bin_width, bin_height)\n state.open_new_bin()\n else:\n raise IOError(f'Wrong format of first line: \\n\\t {line} should be of format: \\n\\t bin_width'\n f'bin_height')\n first_line = False\n else:\n if len(values) == 2:\n width, height = values\n try:\n width = int(width)\n except ValueError:\n print(f'File is not valid, in line {line_counter} {width} cannot be converted to int!')\n try:\n height = int(height)\n except ValueError:\n print(f'File is not valid, in line {line_counter} {height} cannot be converted to int!')\n state.boxes_open.append(Box(width, height, n=n))\n n += 1\n elif len(values) == 5:\n width, height, box_x, box_y, bin_id = values\n while len(state.bins) < int(bin_id) + 1:\n state.bins.append(Bin(bin_width, bin_height))\n validation = state.bins[int(bin_id)].place_box_at_pnt(\n Box(int(width), int(height), n=n), Point(int(box_x), int(box_y)))\n n += 1\n if not validation:\n raise IOError(\n f'File contains no valid configuration, in line {line_counter} the box in bin {bin_id} with size {(width, height)} and position {(box_x, box_y)} is overlapping with some other box.')\n else:\n raise IOError(f'Wrong format of line {line_counter} should be of format: \\n\\t box_width '\n f'box_height box_x box_y bin_width bin_height bin_id \\n\\t or \\n\\t box_width '\n f'box_height')\n return state",
"def total_hpwl(file_name):\r\n\r\n nodes = {}\r\n netsx = {}\r\n netsy = {}\r\n counter = 0\r\n hpwl = 0\r\n\r\n with open(file_name + \".nodes\") as f:\r\n for i, line in enumerate(f):\r\n\r\n line = line.strip()\r\n if line:\r\n if re.match(r'[a-z]{1}[0-9]+', line.split()[0]):\r\n if line.split()[0] not in nodes:\r\n nodes[line.split()[0]] = []\r\n nodes[line.split()[0]].append(line.split()[1])\r\n nodes[line.split()[0]].append(line.split()[2])\r\n\r\n with open(file_name + \".pl\") as f:\r\n for i, line in enumerate(f):\r\n\r\n line = line.strip()\r\n if line:\r\n if re.match(r'[a-z]{1}[0-9]+', line.split()[0]):\r\n nodes[line.split()[0]].append(line.split()[1])\r\n nodes[line.split()[0]].append(line.split()[2])\r\n\r\n with open(file_name + \".nets\") as f:\r\n for i, line in enumerate(f):\r\n\r\n line = line.strip()\r\n if line:\r\n if \"NetDegree\" in line:\r\n num_of_nodes = int(line.split()[2])\r\n net_name = \"n\" + str(counter)\r\n counter += 1\r\n netsx[net_name] = []\r\n netsy[net_name] = []\r\n elif re.match(r'[a-z]{1}[0-9]+', line.split()[0]):\r\n if net_name in netsx:\r\n if len(netsx[net_name]) == 0:\r\n netsx[net_name].append(int(nodes[line.split()[0]][2]))\r\n netsx[net_name].append(int(nodes[line.split()[0]][2]) + int(nodes[line.split()[0]][0]))\r\n\r\n netsy[net_name].append(int(nodes[line.split()[0]][3]))\r\n netsy[net_name].append(int(nodes[line.split()[0]][3]) + int(nodes[line.split()[0]][1]))\r\n else:\r\n if int(nodes[line.split()[0]][2]) < netsx[net_name][0]:\r\n netsx[net_name][0] = int(nodes[line.split()[0]][2])\r\n\r\n if int(nodes[line.split()[0]][2]) + int(nodes[line.split()[0]][0]) > netsx[net_name][1]:\r\n netsx[net_name][1] = int(nodes[line.split()[0]][2]) + int(nodes[line.split()[0]][0])\r\n\r\n if int(nodes[line.split()[0]][3]) < netsy[net_name][0]:\r\n netsy[net_name][0] = int(nodes[line.split()[0]][3])\r\n\r\n if int(nodes[line.split()[0]][3]) + int(nodes[line.split()[0]][1]) > netsy[net_name][1]:\r\n netsy[net_name][1] = int(nodes[line.split()[0]][3]) + int(nodes[line.split()[0]][1])\r\n\r\n for net in netsx:\r\n hpwl += float(netsx[net][1] - netsx[net][0] + netsy[net][1] - netsy[net][0])\r\n\r\n return (hpwl)",
"def read_grid(filename):\r\n with open(filename) as infile:\r\n lines = infile.read().splitlines()\r\n\r\n grid = [[int(bit) for bit in line.split()] for line in lines]\r\n return grid",
"def read_grid(filename):\r\n with open(filename) as infile:\r\n lines = infile.read().splitlines()\r\n\r\n grid = [[int(bit) for bit in line.split()] for line in lines]\r\n return grid",
"def read_bounding_boxes(filename):\n f = open(filename)\n objects = []\n weight = 0\n height = 0\n for line in f:\n print(line)\n first_word = line.split(';')[0]\n if first_word == \"Dimensions\":\n weight = line.split(';')[1]\n height = line.split(';')[2]\n if first_word == \"Object\":\n objects.append((line.split(';')[1], line.split(';')[2], line.split(';')[4],\n line.split(';')[5], line.split(';')[6], line.split(';')[7]))\n return weight, height, objects",
"def get_initial_blocks(self):\n block = []\n index = 0\n for number in self.numbers_from_file(self.input_file_name):\n block.append(number)\n if len(block) == self.block_size:\n block.sort()\n self.write_block(index, block)\n block = []\n index += 1\n else:\n if block:\n block.sort()\n self.write_block(index, block)\n index += 1\n return 0, index",
"def read_flow(filename):\n with open(filename, 'rb') as f:\n magic = np.fromfile(f, np.float32, count=1)\n if 202021.25 != magic:\n print('Magic number incorrect. Invalid .flo file')\n else:\n w = np.fromfile(f, np.int32, count=1)\n h = np.fromfile(f, np.int32, count=1)\n data = np.fromfile(f, np.float32, count=int(2*w*h))\n # Reshape data into 3D array (columns, rows, bands)\n return np.resize(data, (h[0], w[0], 2))",
"def parse_file(filepath):\n with fitz.open(filepath) as doc:\n block_dict = {(idx + 1): page.getText(\"blocks\") for idx, page in enumerate(doc)}\n block_dict = {\n key: [block[4] for block in value] for key, value in block_dict.items()\n }\n return block_dict",
"def file_parser(file_name):\n h = 480\n w = 640\n out = []\n with open(file_name, 'r') as f:\n line_num = 1\n for line in f:\n if line_num < 17:\n # Read to where data starts\n line_num += 1\n continue\n elif line_num > 74:\n break\n # print(list(map(int, line.strip().split(\" \"))))\n vals = line.split()\n # print(list(\"\".join(line)))\n # print(line.split())\n assert(float(vals[2]) < 640)\n assert(float(vals[3]) < 480)\n point = [float(vals[2]) * w, float(vals[3]) * h]\n # print(point)\n out.append(point)\n line_num += 1\n\n out.append([0,0])\n out.append([w-1, 0])\n out.append([0, h-1])\n out.append([w-1, h-2])\n return out",
"def num_blocks(self): # -> int:\n ...",
"def readTestFile(self, filename):\n size = 0\n agentNum = 0\n block = {}\n agentList = []\n f = open(filename, 'r')\n for line in f:\n if line[0] != '#':\n c = line.split(' ')\n if c[0] == 'grid':\n size = int(line[5:7])\n elif c[0] =='block':\n block[(int(c[2]), int(c[1]))] = (int(c[3]) - int(c[1]) + 1, int(c[4]) - int(c[2]) + 1)\n elif c[0] == 'nets':\n agentNum = int(c[1])\n elif c[0] == 'net' or c[0] == 'xet':\n print(c)\n agentList.append([int(c[1]), (int(c[3]), int(c[2])), (int(c[6]), int(c[5]))])\n f.close()\n print(size)\n print(block)\n print(agentNum)\n print(agentList)\n return size, block, agentNum, agentList",
"def read_binning_file(file_name, lmax):\n\n bin_lo,bin_hi,bin_c = plt.loadtxt(file_name,unpack=True)\n id = np.where(bin_hi <lmax)\n bin_lo,bin_hi,bin_c=bin_lo[id],bin_hi[id],bin_c[id]\n if bin_lo[0]<2:\n bin_lo[0]=2\n bin_hi=bin_hi.astype(np.int)\n bin_lo=bin_lo.astype(np.int)\n bin_size=bin_hi-bin_lo+1\n return (bin_lo,bin_hi,bin_c,bin_size)",
"def readKuruczGrid(fname=''):\n\n with open(fname, 'r') as rfile:\n #\n # Skip the program part\n #\n for i in range(22):\n dum = rfile.readline()\n\n #\n # Read the wavelength grid\n #\n wav = []\n n = 10\n for i in range(153):\n dum = rfile.readline().split()\n for j in range(len(dum)):\n wav.append(float(dum[j]))\n\n #\n # Convert the wavelength in Angstrom to micron\n #\n wav = np.array(wav) * 1e-3\n #\n # Now read the grid of spectra\n #\n nwav = wav.shape[0]\n tgrid_list = []\n logg_list = []\n inu_list = []\n inucont_list = []\n\n #\n # Read the first section header\n #\n dum = rfile.readline()\n while dum.strip() != '':\n # print '>>>> ', dum, len(dum.strip())\n sdum = dum.split()\n tgrid_list.append(float(sdum[1]))\n logg_list.append(float(sdum[3]))\n\n #\n # Read the stellar spectrum\n #\n arr = []\n for i in range(152):\n dum = rfile.readline()\n for j in range(8):\n arr.append(float(dum[j * n:(j + 1) * n]))\n dum = rfile.readline()\n for j in range(5):\n arr.append(float(dum[j * n:(j + 1) * n]))\n inu_list.append(np.array(arr))\n #\n # Read the continuum spectrum\n #\n arr = []\n for i in range(152):\n dum = rfile.readline()\n for j in range(8):\n arr.append(float(dum[j * n:(j + 1) * n]))\n dum = rfile.readline()\n for j in range(5):\n arr.append(float(dum[j * n:(j + 1) * n]))\n inucont_list.append(np.array(arr))\n\n #\n # Read the next section header\n #\n dum = rfile.readline()\n\n teff_grid = np.array(tgrid_list)\n logg_grid = np.array(logg_list)\n inu = np.array(inu_list)\n inucont = np.array(inucont_list)\n\n return {'wav': wav, 'inu': inu, 'inucont': inucont, 'teff': teff_grid, 'logg': logg_grid, 'nwav': nwav}",
"def read_wabbit_hdf5(file, verbose=True, return_iteration=False):\n import h5py\n import numpy as np\n\n if verbose:\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~\")\n print(\"Reading file %s\" % (file) )\n\n fid = h5py.File(file,'r')\n b = fid['coords_origin'][:]\n x0 = np.array(b, dtype=float)\n\n b = fid['coords_spacing'][:]\n dx = np.array(b, dtype=float)\n\n b = fid['blocks'][:]\n data = np.array(b, dtype=float)\n\n b = fid['block_treecode'][:]\n treecode = np.array(b, dtype=float)\n\n # get the dataset handle\n dset_id = fid.get('blocks')\n \n # from the dset handle, read the attributes\n time = dset_id.attrs.get('time')\n iteration = dset_id.attrs.get('iteration')\n box = dset_id.attrs.get('domain-size')\n version=dset_id.attrs.get('version')\n\n\n fid.close()\n\n jmin, jmax = get_max_min_level( treecode )\n N = data.shape[0]\n Bs = data.shape[1:]\n Bs = np.asarray(Bs[::-1]) # we have to flip the array since hdf5 stores in [Nz, Ny, Nx] order\n \n if version == 20200408 or version == 20231602:\n Bs = Bs-1\n #print(\"!!!Warning old (old branch: newGhostNodes) version of wabbit format detected!!!\")\n else:\n print(\"This file includes redundant points\")\n \n if verbose:\n print(\"Time=%e it=%i N=%i Bs[0]=%i Bs[1]=%i Jmin=%i Jmax=%i\" % (time, iteration, N, Bs[0], Bs[1], jmin, jmax) )\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~\")\n\n\n if return_iteration:\n return time, x0, dx, box, data, treecode, iteration[0]\n else:\n return time, x0, dx, box, data, treecode",
"def read(file):\n\n blocks = ['bus', 'load', 'fshunt', 'gen', 'branch', 'transf', 'area',\n 'twotermdc', 'vscdc', 'impedcorr', 'mtdc', 'msline', 'zone',\n 'interarea', 'owner', 'facts', 'swshunt', 'gne', 'Q']\n nol = [1, 1, 1, 1, 1, 4, 1,\n 0, 0, 0, 0, 0, 1,\n 0, 1, 0, 0, 0, 0]\n rawd = re.compile('rawd\\d\\d')\n\n retval = True\n version = 0\n b = 0 # current block index\n raw = {}\n for item in blocks:\n raw[item] = []\n\n data = []\n mdata = [] # multi-line data\n mline = 0 # line counter for multi-line models\n\n # parse file into raw with to_number conversions\n fid = open(file, 'r')\n for num, line in enumerate(fid.readlines()):\n line = line.strip()\n if num == 0: # get basemva and frequency\n data = line.split('/')[0]\n data = data.split(',')\n\n mva = float(data[1])\n freq = float(data[5])\n version = int(data[2])\n\n if not version:\n version = int(rawd.search(line).group(0).strip('rawd'))\n if version < 32 or version > 33:\n logging.warning('RAW file version is not 32 or 33. Error may occur.')\n continue\n elif num == 1: # store the case info line\n logging.info(line)\n continue\n elif num == 2:\n continue\n elif num >= 3:\n if line[0:2] == '0 ' or line[0:3] == ' 0 ': # end of block\n b += 1\n continue\n elif line[0] is 'Q': # end of file\n break\n data = line.split(',')\n\n data = [to_number(item) for item in data]\n mdata.append(data)\n mline += 1\n if mline == nol[b]:\n if nol[b] == 1:\n mdata = mdata[0]\n raw[blocks[b]].append(mdata)\n mdata = []\n mline = 0\n fid.close()\n\n # add device elements params and add to PSAT formatted dictionary\n\n for data in raw['bus']:\n \"\"\"version 32:\n 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10\n ID, NAME, BasekV, Type, Area Zone Owner Va, Vm, latitude longitude\n \"\"\"\n idx = data[0]\n ty = data[3]\n angle = data[8]\n try:\n lat = data[9]\n except:\n # logging.warning('<No Coordinates in .raw file>')\n param = {'idx': idx,\n 'name': data[1],\n 'Vn': data[2],\n 'type': data[3],\n 'area': data[4],\n 'voltage': data[7],\n 'region': data[5],\n 'owner': data[6],\n 'angle': angle,\n }\n psatlist = [data[0], data[2], data[7], angle, data[4], data[5]]\n else:\n param = {'idx': idx,\n 'name': data[1],\n 'Vn': data[2],\n 'type': data[3],\n 'area': data[4],\n 'voltage': data[7],\n 'region': data[5],\n 'owner': data[6],\n 'angle': angle,\n 'latitude': data[9],\n 'longitude': data[10]\n }\n psatlist = [data[0], data[2], data[7], angle, data[4], data[5], data[9], data[10]]\n Settings.Bus.append(psatlist)\n Settings.BusNames.append(data[1])\n # Add BusSTORE Dictionary For Later Reference\n Settings.BusStore[idx] = param\n\n xcoord = [34.560040, 34.938385, 34.360040, 40.5152473, 40.3142473, 36.527401, 36.857401, 36.687401, 36.856401,\n 40.487041, 36.903901, 36.702901, 35.832561, 33.386047, 33.185047, 37.105571, 37.104154, 33.706718,\n 37.103549, 36.703539, 37.103559, 36.703549, 36.033561, 35.631561, 36.032561, 35.732561, 36.525401,\n 36.857401, 49.869314, 50.969314, 51.979314, 52.481674, 54.973192, 56.276212, 41.734596, 34.551015,\n 34.652015, 34.537507, 34.587507, 34.157904, 33.714453, 33.762453, 39.548160, 39.496160, 34.313143,\n 34.545782, 34.380686, 34.111686, 34.137762, 34.118650, 34.158650, 33.918650, 33.718650, 34.018650,\n 34.018650, 34.018650, 34.018650, 34.018650, 34.312456, 34.315456, 34.243600, 34.566258, 34.565258,\n 46.064672, 46.565672, 45.514571, 45.606833, 45.806833, 44.890000, 45.596416, 45.295416, 45.891161,\n 47.954899, 46.511440, 45.913936, 45.713936, 46.669335, 47.954899, 47.624154, 43.784730, 44.482350,\n 42.006860, 42.934919, 42.731919, 43.013135, 44.068350, 43.558350, 42.438350, 42.938350, 44.068350,\n 43.558350, 43.048350, 42.638350, 44.068350, 43.558350, 43.048350, 42.638350, 43.620189, 39.120428,\n 40.398031, 35.216200, 35.215200, 36.202099, 39.777745, 39.539598, 37.052929, 35.403217, 35.352217,\n 36.807243, 39.567450, 40.807689, 40.806689, 41.008689, 39.555494, 37.954721, 38.406721, 38.906721,\n 38.656721]\n ycoord = [-109.277313, -110.303798, -109.777313, -107.546455, -107.546455, -108.325669, -108.654569, -108.486669,\n -108.325669, -107.185575, -111.390408, -111.390408, -111.448566, -112.860397, -112.659397, -108.243555,\n -108.441191, -112.322033, -111.590816, -111.190816, -111.190816, -111.590806, -111.648566, -111.248566,\n -111.249566, -111.647566, -108.655669, -108.323669, -122.150895, -122.150895, -122.150895, -121.61684,\n -121.924221, -122.21370, -108.790427, -117.568105, -117.538105, -118.607375, -118.658375, -118.280282,\n -118.146319, -118.096319, -112.52797, -112.72797, -118.690631, -118.389938, -118.478496, -118.478496,\n -118.299917, -118.095428, -118.095428, -118.095428, -118.095428, -118.195428, -118.395428, -117.995428,\n -117.795428, -117.995428, -118.481217, -118.891217, -118.391667, -117.166428, -117.368428, -106.60906,\n -106.80906, -122.681289, -121.114785, -122.113785, -123.29000, -121.312202, -121.114202, -106.612578,\n -118.997945, -112.88531, -120.692286, -120.693974, -119.571501, -120.997945, -122.219492, -118.77463,\n -121.019484, -121.316546, -114.419206, -114.419206, -120.956476, -120.79484, -120.93484, -121.216546,\n -121.156546, -121.215484, -121.135484, -121.255484, -121.175484, -121.013484, -120.733484, -121.053484,\n -120.973484, -118.865882, -122.073631, -122.263453, -120.847567, -120.900567, -120.129849, -122.142965,\n -122.262993, -121.021929, -119.450452, -119.450452, -121.779037, -122.276225, -122.135718, -121.935718,\n -121.935718, -121.24000, -121.18379, -121.10879, -121.27379, -121.23979]\n\n #for idx, line in enumerate(Settings.Bus):\n # line.extend([xcoord[idx], ycoord[idx]])\n\n maxV = 1.1\n minV = 0.9\n maxQ = 1\n minQ = 0\n convimp = 0\n status = 1\n loss = 1\n\n for data in raw['load']:\n \"\"\"version 32:\n 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11\n Bus, Id, Status, Area, Zone, PL(MW), QL (MW), IP, IQ, YP, YQ, OWNER\n \"\"\"\n\n busidx = data[0]\n vn = Settings.BusStore[busidx]['Vn']\n voltage = Settings.BusStore[busidx]['voltage']\n param = {'bus': busidx,\n 'Vn': vn,\n 'Sn': mva,\n 'p': (data[5] + data[7] * voltage + data[9] * voltage ** 2) / mva,\n 'q': (data[6] + data[8] * voltage - data[10] * voltage ** 2) / mva,\n 'owner': data[11],\n 'type': Settings.BusStore[busidx]['type'],\n 'voltage': voltage\n }\n\n psatlist = [busidx, mva, vn, param['p'], param['q'], maxV, minV, convimp, status]\n Settings.PQ.append(psatlist)\n \"\"\"CONFIRM THAT OTHER BUSES HAVE 0 P and 0 Q which are not added\"\"\"\n\n for data in raw['fshunt']:\n \"\"\"\n 0, 1, 2, 3, 4\n Bus, name, Status, g (MW), b (Mvar)\n \"\"\"\n busidx = data[0]\n vn = Settings.BusStore[busidx]['Vn']\n param = {'bus': busidx,\n 'Vn': vn,\n 'status': data[2],\n 'Sn': mva,\n 'g': data[3] / mva,\n 'b': data[4] / mva,\n }\n\n psatlist = [busidx, mva, vn, freq, param['g'], param['b'], param['status']]\n Settings.Shunt.append(psatlist)\n\n gen_idx = 0\n type = 6\n\n for data in raw['gen']:\n \"\"\"\n 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11, 12, 13, 14, 15, 16,17,18,19\n I,ID,PG,QG,QT,QB,VS,IREG,MBASE,ZR,ZX,RT,XT,GTAP,STAT,RMPCT,PT,PB,O1,F1\n \"\"\"\n busidx = data[0]\n vn = Settings.BusStore[busidx]['Vn']\n gen_mva = data[8]\n gen_idx += 1\n status = data[14]\n leak = 0\n param = {'Sn': gen_mva,\n 'Vn': vn,\n 'u': status,\n 'idx': gen_idx,\n 'bus': busidx,\n 'pg': status * data[2] / mva,\n 'qg': status * data[3] / mva,\n 'qmax': data[4] / mva,\n 'qmin': data[5] / mva,\n 'v0': data[6],\n 'ra': data[9], # ra armature resistance\n 'xs': data[10], # xs synchronous reactance\n 'pmax': data[16] / mva,\n 'pmin': data[17] / mva,\n }\n\n if Settings.BusStore[busidx]['type'] == 3: #Check Bus Type for Slack\n refangle = 0\n refBus = 1\n PGuess = 1\n swlist = [busidx, gen_mva, vn, param['v0'], refangle, param['qmax'], param['qmin'],\n maxV, minV, PGuess, loss, refBus, status]\n SW = swlist\n Settings.SW.append(swlist)\n Settings.SWStore[busidx] = param\n Settings.SynStore[busidx] = param\n continue\n\n if busidx not in Settings.BusStore.keys():\n \"\"\" Need data from .dyr file. Create initial list, then append data from .dyr\"\"\"\n else:\n # psatlist = [busidx, gen_mva, vn, freq, type, leak, param['ra'],param['xs']]\n # Syn.append(psatlist)\n Settings.SynStore[busidx] = param\n pvlist = [busidx, gen_mva, vn, param['pg'], Settings.BusStore[busidx]['voltage'],\n param['qmax'], param['qmin'], maxV, minV, loss, status]\n Settings.PV.append(pvlist)\n\n\n for data in raw['branch']:\n \"\"\"\n I,J,ID,R,X,B,RATEA,RATEB,RATEC,GI,BI,GJ,BJ,ST,LEN,O1,F1,...,O4,F4\n \"\"\"\n param = {'bus1': data[0],\n 'bus2': data[1],\n 'id' : data[2],\n 'r': data[3],\n 'x': data[4],\n 'b': data[5],\n 'rate_a': data[6],\n 'rate_b': data[7],\n 'rate_c': data[8],\n 'Vn': Settings.BusStore[data[0]]['Vn'],\n 'Vn2': Settings.BusStore[data[1]]['Vn'],\n 'length': data[14],\n 'Ilim': EMPTY,\n 'Plim': EMPTY,\n 'Slim': EMPTY,\n 'status': data[13]\n }\n\n psatlist = [param['bus1'], param['bus2'], param['rate_c'], param['Vn'], freq, EMPTY,\n param['length'], param['r'], param['x'], param['b'], param['Ilim'], param['Plim'], EMPTY, EMPTY,\n param['Slim'], param['status']]\n Settings.Lineij.append([data[0], data[1], data[2]])\n Settings.Lineji.append([data[1], data[0], data[2]])\n Settings.LineOrd[param['bus1']].append(psatlist)\n Settings.branches += 1\n Settings.linecount += 1\n Settings.LineBusMatij[param['bus2']].append(Settings.branches)\n Settings.LineBusMatji[param['bus1']].append(Settings.branches)\n\n for data in raw['transf']:\n \"\"\"\n I,J,K,CKT,CW,CZ,CM,MAG1,MAG2,NMETR,'NAME',STAT,O1,F1,...,O4,F4\n R1-2,X1-2,SBASE1-2\n WINDV1,NOMV1,ANG1,RATA1,RATB1,RATC1,COD1,CONT1,RMA1,RMI1,VMA1,VMI1,NTP1,TAB1,CR1,CX1\n WINDV2,NOMV2\n \"\"\"\n if len(data[1]) < 5:\n ty = 2\n else:\n ty = 3\n if ty == 3:\n continue\n # raise NotImplementedError('Three-winding transformer not implemented')\n\n tap = data[2][0]\n phi = data[2][2]\n\n if tap == 1 and phi == 0:\n trasf = False\n else:\n trasf = True\n param = {'trasf': trasf,\n 'bus1': data[0][0],\n 'bus2': data[0][1],\n 'u': data[0][11],\n 'b': data[0][8],\n 'r': data[1][0],\n 'x': data[1][1],\n 'tap': tap,\n 'phi': phi,\n 'rate_a': data[2][3],\n 'Vn': Settings.BusStore[busidx]['Vn'],\n 'Vn2': Settings.BusStore[busidx]['Vn'],\n # 'length': data[?][?], FIND CORRECT INDEX\n 'Ilim': EMPTY,\n 'Plim': EMPTY,\n 'Slim': EMPTY,\n }\n psatlist = [param['bus1'], param['bus2'], param['rate_a'], param['Vn'], freq, EMPTY,\n EMPTY, param['r'], param['x'], param['b'], param['Ilim'], param['Plim'], EMPTY, EMPTY,\n param['Slim'], param['u']]\n\n Settings.LineOrd[param['bus1']].append(psatlist)\n Settings.linecount += 1\n Settings.transformers += 1\n # ADD Line Data(All Branch Types) to Sys Param Dict after .dyr Transformer Data Added\n # Re-Order Line Data for correct sequence\n for key in Settings.LineOrd:\n for item in Settings.LineOrd[key]:\n Settings.Line.append(item)\n\n for data in raw['area']:\n Settings.Areas.append(data[4])\n\n for data in raw['zone']:\n Settings.Regions.append(data[1])\n\n return retval",
"def count_cells(fpath):\n cells = []\n for i in range(40):\n fname = f\"{fpath}/Mesh2d_{i}.vtu\"\n if not os.path.exists(fname):\n print(f\"File {fname} does not exist.\")\n break\n mesh = meshio.read(fname)\n for cell_block in mesh.cells:\n if cell_block.type in (\"triangle\"):\n num_cells = len(cell_block)\n print(f\"{i:2d}: {num_cells:6d} elements, {len(mesh.points):6d} vertices\")\n cells.append(num_cells)\n continue\n return cells",
"def read_groups_particles(filename):\n \n f = open(filename,'r')\n\n Ntot = fromstring(f.read(4),int32)[0]\n Pos\t = fromstring(f.read(3*4*Ntot),float32)\n Pos.shape = (Ntot,3)\n f.close()\n \n return Pos",
"def read_level(level: str):\n positions = []\n z = 0\n y = 0\n for line in level.splitlines():\n if not line:\n z += 1\n y = 0\n continue\n\n for x, char in enumerate(line):\n positions.append(([x, y, z], char))\n y += 1\n\n for pos, _ in positions:\n pos[2] = z - pos[2]\n\n return positions",
"def read_faces(zone_id, Nmin, Nmax, bc_type, face, ifile):\n \n line = ifile.readline()\n readline = False\n if re.search(re_parant, line): # check for initial paranthesis\n readline = True\n\n ls = []\n for i in range(Nmin, Nmax + 1):\n if readline:\n line = ifile.readline()\n readline = True\n ln = line.split()\n if face == 0:\n nd = int(ln[0]) # Number of nodes\n nds = [int(x, 16) for x in ln[1:(nd + 1)]]\n cells = [int(x, 16) for x in ln[(nd + 1):]]\n else:\n nd = face\n nds = [int(x, 16) for x in ln[:nd]]\n cells = [int(x, 16) for x in ln[nd:]]\n \n face_list.append([nd, copy(nds), copy(cells), bc_type, zone_id])\n if len(nds) == 2:\n face_cell_map[(nds[0], nds[1])] = copy(cells)\n face_cell_map[(nds[1], nds[0])] = copy(cells)\n\n face_number = len(face_list)\n if min(cells) == 0: # A boundary zone\n if zone_id in boundary_nodes:\n boundary_nodes[zone_id] += nds\n boundary_faces[zone_id] += [face_number - 1]\n for nd in nds:\n if nd in boundary_nodes_face_map[zone_id]:\n boundary_nodes_face_map[zone_id][nd] += [face_number - 1]\n else:\n boundary_nodes_face_map[zone_id][nd] = [face_number - 1]\n else:\n boundary_nodes[zone_id] = nds\n boundary_faces[zone_id] = [face_number - 1]\n boundary_nodes_face_map[zone_id] = { nd: [face_number - 1]}\n\n for c in cells:\n if c > 0: \n if not c in cell_face_map:\n cell_face_map[c] = [face_number]\n else:\n # Preliminary cell_face_map. Needs shaping up later\n cell_face_map[c].append(face_number) \n\n if min(cells) == 0:\n boundary_nodes[zone_id] = list(Set(boundary_nodes[zone_id]))",
"def file_read(file_name):\n \n #open specified file in read mode\n in_file = open(file_name, \"r\")\n \n #create data lists\n sp_length_v3 = []\n sp_period_v3 = [] \n\n #save header to string and split into list\n header_string = in_file.readline()\n header_v3 = header_string.split()\n \n #save revelent data to respective lists\n for line in in_file:\n values = line.split()\n sp_length_v3.append(float(values[1]))\n sp_period_v3.append(float(values[2]))\n \n #close the file\n in_file.close()\n \n #return 3D lists of lists containing data\n ans = [sp_length_v3, sp_period_v3, header_v3]\n \n return ans",
"def read(self, filePath):\n \n result = {\n 'coordinates': {\n 'count': 0,\n 'nodes': []\n },\n 'element_groups': { \n 'number_of_elements': 0,\n 'count': 0,\n 'groups': []\n },\n 'bars': [],\n 'materials': {\n 'count': 0,\n 'materials': []\n },\n 'geometric_properties': {\n 'count': 0\n },\n 'bcnodes': {\n 'count': 0\n },\n 'loads': {\n 'count': 0\n }\n }\n # print(result['coordinates']['nodes'])\n \n with open(filePath,'r') as f:\n lines = f.readlines()\n elementCounter = 0\n groupCounter = 0\n geometricCounter = 0\n\n for line in lines:\n line = line.strip()\n el = line.split(' ')\n \n if len(line) == 0:\n continue\n\n if len(line) != 0 and line[0] == \"*\":\n section = line[1:].lower()\n continue\n \n if section == 'coordinates':\n if len(el) == 1 :\n result[section]['count'] = el[0]\n else:\n result[section]['nodes'].append(Node(int(el[0]), float(el[1]), float(el[2])))\n \n elif section == 'element_groups':\n if len(line) == 1:\n result[section]['count'] = int(el[0])\n else: \n result[section]['groups'].append(Group(el[0], el[1], el[2]))\n result[section]['number_of_elements'] += int(el[1])\n\n elif section == 'incidences':\n groups = result['element_groups']['groups']\n nodes = result['coordinates']['nodes']\n print(el)\n\n currentGroup = groups[groupCounter]\n if (currentGroup.amount == 0):\n groupCounter += 1\n currentGroup = groups[groupCounter]\n \n print(\"Group n: {} count: {}\".format(currentGroup.n, currentGroup.amount))\n \n bar = Bar(el[0], nodes[int(el[1])-1], nodes[int(el[2])-1], groups[groupCounter])\n print(\n \"\"\"\n Bar {} created \n Start node: {} End Node: {} Group: {}\n \"\"\".format(bar.id, bar.startNode.n, bar.endNode.n, bar.group))\n result['bars'].append(bar)\n currentGroup.amount -= 1\n \n elif section == 'materials':\n if len(el) == 1:\n result[section]['count'] = el[0]\n groupCounter = 0\n else:\n material = Material(el[0], el[1], el[2])\n result[section]['materials'].append(material)\n result['element_groups']['groups'][groupCounter].setMaterial(material)\n groupCounter += 1\n\n elif section == 'geometric_properties':\n if geometricCounter == 0:\n result[section]['count'] = el[0]\n else:\n result['element_groups']['groups'][geometricCounter - 1].setSectionArea(\n el[0]\n )\n geometricCounter += 1\n\n elif section == 'bcnodes':\n if len(el) == 1:\n result[section]['count'] = el[0]\n else:\n nodeIndex = next((e for e, item in enumerate(\n result['coordinates']['nodes']) if item.n == int(el[0])), None\n )\n result['coordinates']['nodes'][nodeIndex].setRestriction(int(el[1]))\n\n elif section == 'loads':\n if len(el) == 1:\n result[section]['count'] = el[0]\n else:\n load = Load(el[1], el[2])\n nodeIndex = next((e for e, item in enumerate(\n result['coordinates']['nodes']) if item.n == int(el[0])), None\n )\n result['coordinates']['nodes'][nodeIndex].addLoad(load)\n\n for bar in result['bars']:\n bar.createLocalArray()\n\n print('---------- Parsing complete! ----------')\n pprint(result)\n print('---------------------------------------')\n\n return result",
"def analyze_pressure_dump(filename, Lx=200., Ly=200, Lz=900., N=10, bin_divide_flag=False, Natoms=113579):\n myfile = open(filename+'.txt')\n trajectory = []\n traj_pd = []\n frames = []\n\n for _ in range(3):\n next(myfile)\n count = 0\n while EOF(myfile):\n count += 1\n s = next(myfile) # info with the time step\n\n x = np.zeros(N, dtype=[('Chunk',np.float32), ('Coord1',np.float32), ('Ncount',np.float32), ('density',np.float32), ('temp',np.float32), ('vx',np.float32), ('fx',np.float32),('c_pciKE[1]',np.float32), ('c_pciKE[2]',np.float32), ('c_pciKE[3]',np.float32), ('c_pciVIR[1]',np.float32), ('c_pciVIR[2]',np.float32), ('c_pciVIR[3]',np.float32), ('c_pgelELAS[1]',np.float32), ('c_pgelELAS[2]',np.float32), ('c_pgelELAS[3]',np.float32), ('c_pgelVIR[1]', np.float32), ('c_pgelVIR[2]', np.float32), ('c_pgelVIR[3]', np.float32), ('c_pgelPAIR[1]', np.float32), ('c_pgelPAIR[2]', np.float32), ('c_pgelPAIR[3]', np.float32)])\n\n# Chunk Coord1 Ncount density/number temp vx fx c_pciKE[1] c_pciKE[2] c_pciKE[3] c_pciVIR[1] c_pciVIR[2] c_pciVIR[3] c_pgelELAS[1] c_pgelELAS[2] c_pgelELAS[3] c_pgelVIR[1] c_pgelVIR[2] c_pgelVIR[3] c_pgelPAIR[1] c_pgelPAIR[2] c_pgelPAIR[3]\n\n list_line = re.findall(\"[-+]?\\d+[\\.]?\\d*[eE]?[-+]?\\d*\", s)\n frame, _, _ = list_line\n frames.append(int(frame))\n # print( \"reading lines\")\n\n for i in xrange(N):\n count += 1\n s = next(myfile)\n list_line = re.findall(\"[-+]?\\d+[\\.]?\\d*[eE]?[-+]?\\d*\", s)\n # print( \"reading line\", i, list_line)\n for il, l in enumerate(list_line):\n x[i][il] = float(l)\n\n trajectory.append(x)\n\n # names = x.dtype.fields.keys()\n # data = x.dtype.fields.values()\n\n df = pd.DataFrame.from_records(x)\n traj_pd.append(df)\n\n myfile.close()\n\n\n\n # # volume = 218.*44.*44.\n volume = Lx*Ly*Lz\n # N_atoms = 113579\n # if bin_divide_flag:\n # bin_volume = volume / float(N)\n # else:\n # bin_volume = 1.\n\n bin_volume = volume / float(N)\n # bin_volume = volume\n # bin_volume /= float(Natoms)\n\n Combine_PD = pd.concat(traj_pd)\n FINAL_PD = pd.DataFrame()\n\n FINAL_PD['Coord1'] = Combine_PD['Coord1']\n FINAL_PD['p_ciKE'] = -1 * Combine_PD['Ncount'] * (Combine_PD['c_pciKE[1]'] + Combine_PD['c_pciKE[2]'] + Combine_PD['c_pciKE[3]'])/(3.*bin_volume)\n FINAL_PD['p_ciVIR'] = -1 * Combine_PD['Ncount'] * (Combine_PD['c_pciVIR[1]'] + Combine_PD['c_pciVIR[2]'] + Combine_PD['c_pciVIR[3]'])/(3.*bin_volume)\n FINAL_PD['p_gelELAS'] = -1 * Combine_PD['Ncount'] * (Combine_PD['c_pgelELAS[1]'] + Combine_PD['c_pgelELAS[2]'] + Combine_PD['c_pgelELAS[3]'])/(3.*bin_volume)\n\n FINAL_PD['p_gelVIR'] = -1 * Combine_PD['Ncount'] * (Combine_PD['c_pgelVIR[1]'] + Combine_PD['c_pgelVIR[2]'] + Combine_PD['c_pgelVIR[3]'])/(3.*bin_volume)\n FINAL_PD['p_gelPAIR'] = -1 * Combine_PD['Ncount'] * (Combine_PD['c_pgelPAIR[1]'] + Combine_PD['c_pgelPAIR[2]'] + Combine_PD['c_pgelPAIR[3]'])/(3.*bin_volume)\n\n # So now I have to\n # P_bin = (sigma_per_atom_xx + ... + sigma_per_atom_zz)/(bin_volume*3)\n # *N_atoms_per_bin\n # N_atoms_per_bin = number_density*N_atoms\n\n\n df_concat = FINAL_PD\n\n by_row_index = df_concat.groupby(df_concat.index)\n df_means = by_row_index.mean()\n by_row_index_2 = df_concat.groupby(df_concat.index)\n df_stds = by_row_index_2.std()\n\n # print( df_means.head())\n # print( df_stds.head())\n return df_means, df_stds",
"def countDataSize(self,filename):\n \n try:\n d = h5py.File(filename,'r')\n except:\n print(filename)\n return \n\n N = 0\n scan_edges = d['level2/Statistics/scan_edges'][:]\n for (start,end) in scan_edges:\n N += (end-start)//self.offsetLen * self.offsetLen\n d.close()\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)",
"def find_blockages(self):\n debug.info(1,\"Finding blockages.\") \n for layer in [self.vert_layer_number,self.horiz_layer_number]:\n self.retrieve_blockages(layer)",
"def toBlock_txt(filename):\n blocks = []\n block = []\n \n for line in open(filename).readlines()[3:-3]:\n spline = line.split(\"\\t\")\n if not re.search(\"[0-9]\", line):\n if block != []:\n blocks.append(block)\n block = []\n else:\n for i in spline[2:-2]:\n if re.search(\"[0-9]\", i):\n block.append(float(\"\".join([chr(j) for j in map(ord, i) if j > 0])))\n\n return blocks",
"def reader(filename,only_length=False):\n print(\"Counting lines in file %s\"%filename)\n total_lines=0\n for n,line in enumerate(open(filename,\"r\")):\n total_lines+=1\n \n if only_length:\n return total_lines\n \n X,Y,Z,W,J=[np.zeros(total_lines) for _ in range(5)]\n \n for n, line in enumerate(open(filename, 'r')):\n if n%1000000==0:\n print(\"Reading line %d of %d from file %s\" %(n,total_lines,filename))\n split_line=np.array(line.split(\" \"), dtype=float) \n X[n]=split_line[0];\n Y[n]=split_line[1];\n Z[n]=split_line[2];\n W[n]=split_line[3];\n J[n]=int(split_line[4]);\n return X,Y,Z,W,J",
"def read_input(filename):\n with open(filename, 'r') as f:\n (N) = map(int, next(f).split())\n def parse_line(line):\n l = line.split()\n h = 0 if l[0] == 'H' else 1\n n = int(l[1])\n return [h, l[2:]]\n\n photos = transform_strings([parse_line(line) for line in f])\n return (len(photos), photos)",
"def readFlow(fn):\n with open(fn, 'rb') as f:\n magic = np.fromfile(f, np.float32, count=1)\n if 202021.25 != magic:\n print('Magic number incorrect. Invalid .flo file')\n return None\n else:\n w = np.fromfile(f, np.int32, count=1)\n h = np.fromfile(f, np.int32, count=1)\n # print 'Reading %d x %d flo file\\n' % (w, h)\n data = np.fromfile(f, np.float32, count=2*int(w)*int(h))\n # Reshape data into 3D array (columns, rows, bands)\n # The reshape here is for visualization, the original code is (w,h,2)\n return np.resize(data, (int(h), int(w), 2))"
] | [
"0.62876856",
"0.62518936",
"0.60057634",
"0.5994908",
"0.5979423",
"0.5979423",
"0.59490186",
"0.5920892",
"0.58702004",
"0.5859132",
"0.5837403",
"0.5836277",
"0.5833768",
"0.58279437",
"0.5771492",
"0.5762479",
"0.57591116",
"0.5756959",
"0.5705301",
"0.56720674",
"0.5665514",
"0.5642536",
"0.56392765",
"0.56271666",
"0.5609831",
"0.56093323",
"0.560319",
"0.560272",
"0.559966",
"0.55995566"
] | 0.707528 | 0 |
Read a wabbittype HDF5 of blockstructured data. Return time, x0, dx, box, data, treecode. Get number of blocks and blocksize as N, Bs = data.shape[0], data.shape[1] | def read_wabbit_hdf5(file, verbose=True, return_iteration=False):
import h5py
import numpy as np
if verbose:
print("~~~~~~~~~~~~~~~~~~~~~~~~~")
print("Reading file %s" % (file) )
fid = h5py.File(file,'r')
b = fid['coords_origin'][:]
x0 = np.array(b, dtype=float)
b = fid['coords_spacing'][:]
dx = np.array(b, dtype=float)
b = fid['blocks'][:]
data = np.array(b, dtype=float)
b = fid['block_treecode'][:]
treecode = np.array(b, dtype=float)
# get the dataset handle
dset_id = fid.get('blocks')
# from the dset handle, read the attributes
time = dset_id.attrs.get('time')
iteration = dset_id.attrs.get('iteration')
box = dset_id.attrs.get('domain-size')
version=dset_id.attrs.get('version')
fid.close()
jmin, jmax = get_max_min_level( treecode )
N = data.shape[0]
Bs = data.shape[1:]
Bs = np.asarray(Bs[::-1]) # we have to flip the array since hdf5 stores in [Nz, Ny, Nx] order
if version == 20200408 or version == 20231602:
Bs = Bs-1
#print("!!!Warning old (old branch: newGhostNodes) version of wabbit format detected!!!")
else:
print("This file includes redundant points")
if verbose:
print("Time=%e it=%i N=%i Bs[0]=%i Bs[1]=%i Jmin=%i Jmax=%i" % (time, iteration, N, Bs[0], Bs[1], jmin, jmax) )
print("~~~~~~~~~~~~~~~~~~~~~~~~~")
if return_iteration:
return time, x0, dx, box, data, treecode, iteration[0]
else:
return time, x0, dx, box, data, treecode | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write_wabbit_hdf5( file, time, x0, dx, box, data, treecode, iteration = 0, dtype=np.float64 ):\n import h5py\n import numpy as np\n\n\n Level = np.size(treecode,1)\n if len(data.shape)==4:\n # 3d data\n Bs = np.zeros([3,1])\n N, Bs[0], Bs[1], Bs[2] = data.shape\n Bs = Bs[::-1]\n print( \"Writing to file=%s max=%e min=%e size=%i %i %i \" % (file, np.max(data), np.min(data), Bs[0], Bs[1], Bs[2]) )\n\n else:\n # 2d data\n Bs = np.zeros([2,1])\n N, Bs[0], Bs[1] = data.shape\n Bs = Bs[::-1]\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~\")\n print(\"Writing file %s\" % (file) )\n print(\"Time=%e it=%i N=%i Bs[0]=%i Bs[1]=%i Level=%i Domain=[%d, %d]\" % (time, iteration, N, Bs[0], Bs[1],Level, box[0], box[1]) )\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~\")\n\n\n fid = h5py.File( file, 'w')\n\n fid.create_dataset( 'coords_origin', data=x0, dtype=dtype )\n fid.create_dataset( 'coords_spacing', data=dx, dtype=dtype )\n fid.create_dataset( 'blocks', data=data, dtype=dtype )\n fid.create_dataset( 'block_treecode', data=treecode, dtype=dtype )\n\n fid.close()\n\n fid = h5py.File(file,'a')\n dset_id = fid.get( 'blocks' )\n dset_id.attrs.create( \"version\", 20200902) # this is used to distinguish wabbit file formats\n dset_id.attrs.create('time', time, dtype=dtype)\n dset_id.attrs.create('iteration', iteration)\n dset_id.attrs.create('domain-size', box, dtype=dtype )\n dset_id.attrs.create('total_number_blocks', N )\n fid.close()",
"def read_wabbit_hdf5_dir(dir):\n import numpy as np\n import re\n import ntpath\n import os\n\n it=0\n data={'time': [],'x0':[],'dx':[],'treecode':[]}\n # we loop over all files in the given directory\n for file in os.listdir(dir):\n # filter out the good ones (ending with .h5)\n if file.endswith(\".h5\"):\n # from the file we can get the fieldname\n fieldname=re.split('_',file)[0]\n print(fieldname)\n time, x0, dx, box, field, treecode = read_wabbit_hdf5(os.path.join(dir, file))\n #increase the counter\n data['time'].append(time[0])\n data['x0'].append(x0)\n data['dx'].append(dx)\n data['treecode'].append(treecode)\n if fieldname not in data:\n # add the new field to the dictionary\n data[fieldname]=[]\n data[fieldname].append(field)\n else: # append the field to the existing data field\n data[fieldname].append(field)\n it=it+1\n # the size of the domain\n data['box']=box\n #return time, x0, dx, box, data, treecode\n return data",
"def dense_to_wabbit_hdf5(ddata, name , Bs, box_size = None, time = 0, iteration = 0, dtype=np.float64):\n # concatenate filename in the same style as wabbit does\n fname = name + \"_%12.12d\" % int(time*1e6) + \".h5\"\n Ndim = ddata.ndim\n Nsize = np.asarray(ddata.shape)\n level = 0\n Bs = np.asarray(Bs)# make sure Bs is a numpy array\n Bs = Bs[::-1] # flip Bs such that Bs=[BsY, BsX] the order is the same as for Nsize=[Ny,Nx]\n \n #########################################################\n # do some initial checks on the input data\n # 1) check if the size of the domain is given\n if box_size is None:\n box = np.ones(Ndim)\n else:\n box = np.asarray(box_size)\n\n if (type(Bs) is int):\n Bs = [Bs]*Ndim\n \n # 2) check if number of lattice points is block decomposable\n # loop over all dimensions\n for d in range(Ndim):\n # check if Block is devidable by Bs\n if (np.remainder(Nsize[d], Bs[d]-1) == 0):\n if(is_power2(Nsize[d]//(Bs[d]-1))):\n level = int(max(level, np.log2(Nsize[d]/(Bs[d]-1))))\n else:\n err(\"Number of Intervals must be a power of 2!\")\n else:\n err(\"datasize must be multiple of Bs!\")\n \n # 3) check dimension of array:\n if Ndim < 2 or Ndim > 3:\n err(\"dimensions are wrong\")\n #########################################################\n\n # assume periodicity:\n data = np.zeros(Nsize+1,dtype=dtype)\n if Ndim == 2:\n data[:-1, :-1] = ddata\n # copy first row and column for periodicity\n data[-1, :] = data[0, :]\n data[:, -1] = data[:, 0]\n else:\n data[:-1, :-1, :-1] = ddata\n # copy for periodicity\n data[-1, :, :] = data[0, :, :]\n data[:, -1, :] = data[:, 0, :]\n data[:, :, -1] = data[:, :, 0]\n\n # number of intervals in each dimension\n Nintervals = [int(2**level)]*Ndim # note [val]*3 means [val, val , val]\n Lintervals = box[:Ndim]/np.asarray(Nintervals)\n Lintervals = Lintervals[::-1]\n \n\n x0 = []\n treecode = []\n dx = []\n bdata = []\n if Ndim == 3:\n for ibx in range(Nintervals[0]):\n for iby in range(Nintervals[1]):\n for ibz in range(Nintervals[2]):\n x0.append([ibx, iby, ibz]*Lintervals)\n dx.append(Lintervals/(Bs-1))\n\n lower = [ibx, iby, ibz]* (Bs - 1)\n lower = np.asarray(lower, dtype=int)\n upper = lower + Bs\n\n treecode.append(blockindex2treecode([ibx, iby, ibz], 3, level))\n bdata.append(data[lower[0]:upper[0], lower[1]:upper[1], lower[2]:upper[2]])\n else:\n for ibx in range(Nintervals[0]):\n for iby in range(Nintervals[1]):\n x0.append([ibx, iby]*Lintervals)\n dx.append(Lintervals/(Bs-1))\n lower = [ibx, iby]* (Bs - 1)\n lower = np.asarray(lower, dtype=int)\n upper = lower + Bs\n treecode.append(blockindex2treecode([ibx, iby], 2, level))\n bdata.append(data[lower[0]:upper[0], lower[1]:upper[1]])\n\n\n x0 = np.asarray(x0,dtype=dtype)\n dx = np.asarray(dx,dtype=dtype)\n treecode = np.asarray(treecode, dtype=dtype)\n block_data = np.asarray(bdata, dtype=dtype)\n\n write_wabbit_hdf5(fname, time, x0, dx, box, block_data, treecode, iteration, dtype )\n return fname",
"def _readHDF5(self):\n\n h5 = h5py.File(self.pointInputFile, 'r')\n self.coords = h5['geometry/vertices'][:]\n self.stations = h5['stations'][:]\n self.dispRaw = h5['vertex_fields/displacement'][self.timeStep,:,:]\n h5.close()\n\n self.numStations = self.coords.shape[0]\n\n return",
"def load_data(file_path):\n with h5py.File(file_path) as f:\n # load meta info\n fs, channels, p_names, signals = _get_info(f)\n\n # load raw data\n data = [f['protocol{}/raw_data'.format(k + 1)][:] for k in range(len(p_names))]\n df = pd.DataFrame(np.concatenate(data), columns=channels)\n\n # load signals data\n signals_data = [f['protocol{}/signals_data'.format(k + 1)][:] for k in range(len(p_names))]\n df_signals = pd.DataFrame(np.concatenate(signals_data), columns=['signal_'+s for s in signals])\n df = pd.concat([df, df_signals], axis=1)\n\n # load timestamps\n if 'timestamp' in df:\n timestamp_data = [f['protocol{}/timestamp_data'.format(k + 1)][:] for k in range(len(p_names))]\n df['timestamps'] = np.concatenate(timestamp_data)\n\n # events data\n events_data = [f['protocol{}/mark_data'.format(k + 1)][:] for k in range(len(p_names))]\n df['events'] = np.concatenate(events_data)\n\n # set block names and numbers\n df['block_name'] = np.concatenate([[p]*len(d) for p, d in zip(p_names, data)])\n df['block_number'] = np.concatenate([[j + 1]*len(d) for j, d in enumerate(data)])\n return df, fs, channels, p_names",
"def data_reading(data_file):\n\n # The number of samples is needed to read the HDF5 file, which is stored in the name\n # Split by '/' to remove the directory, and by '.' to remove the file format\n file_name = data_file.split(\"/\")[-1].split(\".\")[-2]\n # The file name ends with the number of samples and before that the number of included neighbours\n n_samples = int(file_name.split(\"_\")[-1])\n n_neighbours = int(file_name.split(\"_\")[-2])\n\n # Read the data\n h5f = h5py.File(data_file, 'r')\n\n # The data set name is the name of the path where the data file can be found\n data = h5f[\"dataset_{}\".format(n_samples)][:]\n\n # Close the H5py file\n h5f.close()\n\n return data, n_samples, n_neighbours",
"def data_reading(data_file):\n\n # The number of samples is needed to read the HDF5 file, which is stored in the name\n # Split by '/' to remove the directory, and by '.' to remove the file format\n file_name = data_file.split(\"/\")[-1].split(\".\")[-2]\n # The file name ends with the number of samples and before that the number of included neighbours\n n_samples = int(file_name.split(\"_\")[-1])\n n_neighbours = int(file_name.split(\"_\")[-2])\n\n # Read the data\n h5f = h5py.File(data_file, 'r')\n\n # The data set name is the name of the path where the data file can be found\n data = h5f[\"dataset_{}\".format(n_samples)][:]\n\n # Close the H5py file\n h5f.close()\n\n return data, n_samples, n_neighbours",
"def get_box_data(index, hdf5_data):\n meta_data = dict()\n meta_data['height'] = []\n meta_data['label'] = []\n meta_data['left'] = []\n meta_data['top'] = []\n meta_data['width'] = []\n\n def print_attrs(name, obj):\n vals = []\n if obj.shape[0] == 1:\n vals.append(obj[0][0])\n else:\n for k in range(obj.shape[0]):\n vals.append(int(hdf5_data[obj[k][0]][0][0]))\n meta_data[name] = vals\n\n box = hdf5_data['/digitStruct/bbox'][index]\n hdf5_data[box[0]].visititems(print_attrs)\n return meta_data",
"def _block_info(data):\n # check that the data is an array of bytes\n if len(data) != 6:\n raise ValueError(\"'data' should be 6 bytes. Got {} instead.\".format(\n len(data)))\n return struct.unpack('<Hi', data)",
"def load_data(infile, nstep): \n \n f = h5py.File(infile, 'r')\n \n edges_grp = f['edges']\n xedges = np.asarray(edges_grp['x'][nstep], dtype=float)\n yedges = np.asarray(edges_grp['y'][nstep], dtype=float)\n\n time = np.asarray(f['time'][nstep])\n\n tables_grp = f['tables']\n rho_hist = np.asarray(tables_grp['rho'][nstep], dtype=float)\n vx_hist = np.asarray(tables_grp['vx'][nstep], dtype=float)\n vy_hist = np.asarray(tables_grp['vy'][nstep], dtype=float)\n vorticity = np.asarray(tables_grp['vorticity'][nstep], dtype=float) \n \n box_grp = f['box']\n lx = box_grp['x'][...]\n ly = box_grp['y'][...]\n \n #nsteps = f['nsteps'][...]\n f.close()\n\n return lx, ly, time, xedges, yedges, rho_hist, vx_hist, vy_hist, vorticity",
"def _read_bsurfs(self, data: bytes, n: int) -> int:\n bsurfs",
"def read_h5(self):\n infile = h5py.File(self.inf_name,'r')\n\n vardict = self.labdict\n #store data with the correct labels\n for k in infile['plasma/1d'].keys():\n try:\n vardict[k] = infile[self.labdict[k]].value\n except:\n vardict[k] = []\n\n vardict['a_ions']=infile['/plasma/anum'].value\n vardict['znum']=infile['/plasma/znum'].value\n \n\n self.rho_in = vardict['rho']\n self._rho_vol = infile['distributions/rhoDist/abscissae/dim1'].value[1:]\n self._volumes = infile['distributions/rhoDist/shellVolume'].value\n self.nrho_in = np.size(self.rho_in)\n\n if vardict['a_ions'][0]!='/':\n self.nspec = len(vardict['a_ions'])\n else:\n self.nspec = vardict['ni'].shape[1]\n print(\"Number of ions: \", self.nspec)\n if len(vardict['a_ions'])!=len(vardict['znum']):\n print(\"ERROR! array of A and Z don't have the same length\")\n\n self.A = vardict['a_ions']\n self.Z = vardict['znum']\n self.nion = self.nspec\n \n self.te_in = vardict['te'][:]\n self.ne_in = vardict['ne'][:]\n self.ti_in = vardict['ti'][:]\n ni1_in = vardict['ni'][:,0]\n self.ni_in = np.zeros((self.nion, self.nrho_in),dtype=float)\n self.ni_in[0,:] = ni1_in\n if self.nion==2:\n ni2_in = vardict['ni'][:,1]\n self.ni_in[1,:] = ni2_in\n elif self.nion==3:\n ni2_in = vardict['ni'][:,1]\n ni3_in = vardict['ni'][:,2]\n self.ni_in[1,:] = ni2_in\n self.ni_in[2,:] = ni3_in\n\n try:\n self.vt_in = vardict['vtor']\n except:\n self.vt_in = np.zeros(self.nrho_in,dtype=float)\n\n try:\n self.zeff_in = vardict['zeff'][:]\n except:\n self.zeff_in = np.zeros(self.nrho_in,dtype=float)\n\n self.ni = np.zeros((self.nion, self.nrho),dtype = float)\n self.spline()",
"def read_hdf5(filename, namelist=None, **kwargs):\n\n print('Reading %s...'%filename)\n\n fid = h5py.File(filename, mode='r')\n \n data = read_hdf5_tree(fid, namelist, **kwargs)\n\n fid.close()\n \n print('Finished reading %s.'%filename)\n return data",
"def readHtk(filename):\n with open(filename, \"rb\") as f:\n # Read header\n nSamples, sampPeriod, sampSize, parmKind = struct.unpack(\">iihh\", f.read(12))\n # Read data\n data = struct.unpack(\">%df\" % (nSamples * sampSize / 4), f.read(nSamples * sampSize))\n # return numpy.array(data).reshape(nSamples, int(sampSize / 4))\n return nSamples, sampPeriod, sampSize, parmKind, data",
"def _get_data_chunk(self):\n if self._start_pos < self.max_pos:\n self._current_sho_spec_slice = slice(self.sho_spec_inds_per_forc * self._current_forc,\n self.sho_spec_inds_per_forc * (self._current_forc + 1))\n self._end_pos = int(min(self.h5_main.shape[0], self._start_pos + self.max_pos))\n self.data = self.h5_main[self._start_pos:self._end_pos, self._current_sho_spec_slice]\n elif self._current_forc < self._num_forcs - 1:\n # Resest for next FORC\n self._current_forc += 1\n\n self._current_sho_spec_slice = slice(self.sho_spec_inds_per_forc * self._current_forc,\n self.sho_spec_inds_per_forc * (self._current_forc + 1))\n self._current_met_spec_slice = slice(self.metrics_spec_inds_per_forc * self._current_forc,\n self.metrics_spec_inds_per_forc * (self._current_forc + 1))\n self._get_dc_offset()\n\n self._start_pos = 0\n self._end_pos = int(min(self.h5_main.shape[0], self._start_pos + self.max_pos))\n self.data = self.h5_main[self._start_pos:self._end_pos, self._current_sho_spec_slice]\n\n else:\n self.data = None\n\n return",
"def read_hdf5(path_to_file):\n\n print(\"\\nReading HDF5 file: \", path_to_file)\n file = h5py.File(path_to_file, 'r')\n\n # List the groups\n groups = list(file.keys())\n print(\"Groups available: \", groups)\n\n # Read Zemax Metadata\n zemax_metadata = {}\n print(\"\\nZemax Metadata:\")\n for key in file['Zemax Metadata'].attrs.keys():\n print('{} : {}'.format(key, file['Zemax Metadata'].attrs[key]))\n zemax_metadata[key] = file['Zemax Metadata'].attrs[key]\n\n # Read the analysis groups\n for group_name in groups:\n if group_name != 'Zemax Metadata':\n analysis_group = file[group_name]\n print('\\nAnalysis: ', group_name)\n # For each Analysis Group we loop over subgroups\n for subgroup_key in analysis_group.keys():\n subgroup = analysis_group[subgroup_key]\n print('Subgroup #', subgroup_key)\n # List the metadata of the subgroup\n for att_key in subgroup.attrs.keys():\n print(' {} : {}'.format(att_key, subgroup.attrs[att_key]))\n\n file.close()\n\n return zemax_metadata",
"def block_level_distribution_file( file ):\n import h5py\n import numpy as np\n\n # open the h5 wabbit file\n fid = h5py.File(file,'r')\n\n # read treecode table\n b = fid['block_treecode'][:]\n treecode = np.array(b, dtype=float)\n\n # close file\n fid.close()\n\n # number of blocks\n Nb = treecode.shape[0]\n\n # min/max level. required to allocate list!\n jmin, jmax = get_max_min_level( treecode )\n counter = np.zeros(jmax+1)\n\n # fetch level for each block and count\n for i in range(Nb):\n J = treecode_level(treecode[i,:])\n counter[J] += 1\n\n return counter",
"def read_h5file(self, fname, datasetname):\n with h5py.File(fname, 'r') as f:\n atom_pos = f.get(datasetname + '/r').value # atom position -> N x 3 array\n ion_list = f.get(\n datasetname + '/xyz').value # length = N, contain atom type id for each atom\n self.atom_pos = atom_pos[np.argsort(ion_list)]\n _, idx = np.unique(np.sort(ion_list), return_index=True)\n self.split_idx = np.append(idx, [len(ion_list)])\n\n # get atom factor table, sorted by atom type id\n atom_type = f.get(\n datasetname + '/T').value # atom type array, each type is represented by an integer\n self.num_atom_types = len(atom_type)\n ff_table = f.get(datasetname + '/ff').value\n self.ff_table = ff_table[np.argsort(atom_type)]\n\n self.q_sample = f.get(datasetname + '/halfQ').value\n self.num_q_samples = len(self.q_sample)\n self.compton_q_sample = f.get(datasetname + '/Sq_halfQ').value\n self.num_compton_q_samples = len(self.compton_q_sample)\n self.sBound = f.get(datasetname + '/Sq_bound').value\n self.nFree = f.get(datasetname + '/Sq_free').value",
"def read_treecode_hdf5(file):\n import h5py\n import numpy as np\n\n fid = h5py.File(file,'r')\n\n b = fid['block_treecode'][:]\n treecode = np.array(b, dtype=float)\n\n return treecode",
"def get_data_block_contents_bytes(self):\n bb = self.volume.blkdev.block_bytes\n if self.volume.is_ffs:\n return bb\n else:\n return bb - 24",
"def loadHDF5(ofile):\n \n ## Groups \n size_grp = ofile['size']\n beads_grp = ofile['beads']\n props_grp = ofile['props']\n \n ## Datasets\n size = np.asarray(size_grp['size'])\n x = np.asarray(beads_grp['x'])\n comx = np.asarray(props_grp['comx'])\n \n ## Filament list\n fil_grp = props_grp['filament_list']\n fil_list_1 = np.asarray(fil_grp['1'])\n \n print size\n print x\n print comx\n print fil_list_1\n \n return",
"def readBlocks(self):\n self.data_block_list = []\n self.data_block_list.append(Rhd2000DataBlock(self))\n #read data blocks untill the EOF\n while True:\n try:\n self.data_block_list.append(Rhd2000DataBlock(self))\n except:\n break",
"def parse_hdf5(inp, close=True, **kwargs):\n import json\n import h5py\n # Path\n path = kwargs.pop('path', '/')\n # Open\n if isinstance(inp, basestring):\n hdf5 = h5py.File(inp, 'r')\n else:\n hdf5 = inp\n # Data\n data = hdf5[path+'data'][()]\n # Meta\n if 'meta' in hdf5[path].keys():\n meta = json.loads(hdf5[path+'meta'][()])\n # Headers\n for jj,heads in enumerate(meta['headers']):\n try:\n meta['headers'][jj] = fits.Header.fromstring(meta['headers'][jj])\n except TypeError: # dict\n if not isinstance(meta['headers'][jj], dict):\n raise IOError(\"Bad meta type\")\n else:\n meta = None\n # Units\n units = json.loads(hdf5[path+'units'][()])\n for key,item in units.items():\n if item == 'dimensionless_unit':\n units[key] = u.dimensionless_unscaled\n else:\n units[key] = getattr(u, item)\n # Other arrays\n try:\n sig = data['sig']\n except (NameError, IndexError):\n sig = None\n try:\n co = data['co']\n except (NameError, IndexError):\n co = None\n # Finish\n if close:\n hdf5.close()\n return XSpectrum1D(data['wave'], data['flux'], sig=sig, co=co,\n meta=meta, units=units, **kwargs)",
"def read_hdf5(self, file_name,\r\n projections_start=None,\r\n projections_end=None,\r\n projections_step=None,\r\n slices_start=None,\r\n slices_end=None,\r\n slices_step=None,\r\n pixels_start=None,\r\n pixels_end=None,\r\n pixels_step=None,\r\n white_start=None,\r\n white_end=None,\r\n dark_start=None,\r\n dark_end=None,\r\n dtype='float32'):\r\n print \"Reading data...\"\r\n self.file_name = file_name\r\n\r\n # Initialize f to null.\r\n f = None\r\n\r\n # Get the file_name in lower case.\r\n lFn = file_name.lower()\r\n\r\n # Split the string with the delimeter '.'\r\n end = lFn.split('.')\r\n\r\n # If the string has an extension.\r\n if len(end) > 1:\r\n # Check.\r\n if end[len(end) - 1] == 'h5' or end[len(end) - 1] == 'hdf':\r\n f = Hdf5()\r\n\r\n # If f != None the call read on it.\r\n if not f == None:\r\n # Read data from exchange group.\r\n self.data = f.read(file_name,\r\n array_name='exchange/data',\r\n x_start=projections_start,\r\n x_end=projections_end,\r\n x_step=projections_step,\r\n y_start=slices_start,\r\n y_end=slices_end,\r\n y_step=slices_step,\r\n z_start=pixels_start,\r\n z_end=pixels_end,\r\n z_step=pixels_step).astype(dtype)\r\n\r\n # Read white field data from exchange group.\r\n print white_start, white_end, slices_start, slices_end\r\n self.white = f.read(file_name,\r\n array_name='exchange/data_white',\r\n x_start=white_start,\r\n x_end=white_end,\r\n y_start=slices_start,\r\n y_end=slices_end,\r\n y_step=slices_step,\r\n z_start=pixels_start,\r\n z_end=pixels_end,\r\n z_step=pixels_step).astype(dtype)\r\n\r\n # Read dark field data from exchange group.\r\n self.dark = f.read(file_name,\r\n array_name='exchange/data_dark',\r\n x_start=dark_start,\r\n x_end=dark_end,\r\n y_start=slices_start,\r\n y_end=slices_end,\r\n y_step=slices_step,\r\n z_start=pixels_start,\r\n z_end=pixels_end,\r\n z_step=pixels_step).astype(dtype)\r\n\r\n # Assign the rotation center.\r\n self.center = self.data.shape[2] / 2\r\n else:\r\n print 'Unsupported file.'",
"def _read_block_v20(self, size, blk_size=5):\n arr = np.empty(size)\n for offset in range(0, size, blk_size):\n values = self._next_noncomment().split()\n arr[offset:offset+blk_size] = np.array(values, dtype=np.float64)\n return arr",
"def ReadData( fName = '/tmp/chartdata' ):\n blocks = common.ReadDataFromFile( fName )\n\n return blocks",
"def from_neuropype_h5(filename: str, chunk_names: List[str] = []) -> List[Tuple[str, dict]]:\n import numpy as np\n import h5py\n from pandas import DataFrame\n f = h5py.File(filename, 'r')\n\n chunks = []\n if 'chunks' in f.keys():\n chunks_group = f['chunks']\n ch_keys = [_ for _ in chunks_group.keys() if _ in chunk_names]\n for ch_key in ch_keys:\n chunk_group = chunks_group.get(ch_key)\n\n # Process data\n block_group = chunk_group.get('block')\n data_ = block_group.get('data')\n if isinstance(data_, h5py.Dataset):\n data = data_[()]\n else:\n # Data is a group. This only happens with sparse matrices.\n import scipy.sparse\n data = scipy.sparse.csr_matrix((data_['data'][:], data_['indices'][:], data_['indptr'][:]),\n data_.attrs['shape'])\n\n axes_group = block_group.get('axes')\n axes = []\n for ax_ix, axis_key in enumerate(axes_group.keys()):\n axis_group = axes_group.get(axis_key)\n ax_type = axis_group.attrs.get('type')\n new_ax = {'name': axis_key, 'type': ax_type}\n if ax_type == 'axis':\n new_ax.update(dict(x=np.arange(data.shape[ax_ix])))\n elif ax_type == 'time':\n nom_rate = axis_group.attrs.get('nominal_rate')\n if np.isnan(nom_rate):\n nom_rate = None\n new_ax.update(dict(nominal_rate=nom_rate,\n times=axis_group.get('times')[()]))\n elif ax_type == 'frequency':\n new_ax.update(dict(frequencies=axis_group.get('frequencies')[()]))\n elif ax_type == 'space':\n new_ax.update(dict(names=axis_group.get('names')[()],\n naming_system=axis_group.attrs['naming_system'],\n positions=axis_group.get('positions')[()],\n coordinate_system=axis_group.attrs['coordinate_system'],\n units=axis_group.get('units')[()]))\n elif ax_type == 'feature':\n new_ax.update(dict(names=axis_group.get('names')[()],\n units=axis_group.get('units')[()],\n properties=axis_group.get('properties')[()],\n error_distrib=axis_group.get('error_distrib')[()],\n sampling_distrib=axis_group.get('sampling_distrib')[()]))\n elif ax_type == 'instance':\n new_ax.update({'times': axis_group.get('times')[()]})\n if 'instance_type' in axis_group.attrs:\n new_ax.update({'instance_type': axis_group.attrs['instance_type']})\n _dat = axis_group.get('data')[()]\n if not _dat.dtype.names:\n new_ax.update({'data': axis_group.get('data')[()]})\n else:\n _df = DataFrame(_dat)\n # Convert binary objects to string objects\n str_df = _df.select_dtypes([np.object])\n str_df = str_df.stack().str.decode('utf-8').unstack()\n for col in str_df:\n _df[col] = str_df[col]\n new_ax.update({'data': _df})\n\n elif ax_type == 'statistic':\n new_ax.update(dict(param_types=axis_group.get('param_types')[()]))\n elif ax_type == 'lag':\n new_ax.update(dict(xlags=axis_group.get('lags')[()]))\n if new_ax is not None:\n axes.append(new_ax)\n\n chunks.append((ch_key, dict(data=data, axes=axes,\n props=_recurse_get_dict_from_group(chunk_group.get('props')))))\n\n return chunks",
"def get_lh5_header(in_file, verbose=False):\n hf = h5py.File(in_file)\n\n # pretty print the raw structure, with all attributes\n if verbose:\n def print_groups(name, obj):\n if isinstance(obj, h5py.Group):\n print(f\"GROUP /{name}\")\n indent = \" \"\n if isinstance(obj, h5py.Dataset):\n print(\" DATASET\", obj.shape, obj.name)\n indent = \" \"\n for att, val in obj.attrs.items():\n print(f\"{indent}ATTRIBUTE {att}:\", val)\n print(\" \")\n hf.visititems(print_groups) # accesses __call__\n \n # find each LH5 \"Table\" contained in the file, and create a DataFrame header\n tables = {}\n for g_top in hf.keys():\n \n h5group = hf[f\"/{g_top}\"]\n attrs = {att:val for att, val in h5group.attrs.items()}\n \n # LH5 table condition\n if \"datatype\" in attrs.keys() and \"table{\" in attrs[\"datatype\"]:\n \n # call our nice iterator at this group level\n table = {g_top:[]}\n for (path, name, size, dtype, units, spec) in get_datasets(h5group):\n table[g_top].append((name, size, dtype, units, spec))\n \n hdr = pd.DataFrame(table[g_top], columns=['name','size','dtype',\n 'units','spec'])\n \n # fix waveform datatype to match flattened_data\n if 'waveform' in hdr['name'].values:\n wf_dt = h5group['waveform/values/flattened_data'].dtype\n hdr.loc[hdr['name'] == 'waveform', ['dtype']] = wf_dt\n \n tables[g_top] = hdr\n\n return tables",
"def hdf5_data(self):\n if self._hdf5_data is None:\n self._hdf5_data = self.parent.request_data(Hdf5DataSpec)\n return self._hdf5_data",
"def read_data(path):\n with h5py.File(path, 'r') as hf:\t\n input_ = np.array(hf.get('input'))\n label_ = np.array(hf.get('label'))\n return input_, label_"
] | [
"0.7095553",
"0.684908",
"0.67961204",
"0.62631047",
"0.608267",
"0.6046284",
"0.6046284",
"0.5913353",
"0.585708",
"0.5756927",
"0.57358474",
"0.56931156",
"0.5675092",
"0.5658997",
"0.55911225",
"0.5576085",
"0.5573528",
"0.55517644",
"0.55503565",
"0.5536557",
"0.5531724",
"0.5523873",
"0.5522037",
"0.55155987",
"0.54862285",
"0.54741365",
"0.54573774",
"0.54388076",
"0.54351604",
"0.54303324"
] | 0.74340147 | 0 |
Read a wabbittype HDF5 of blockstructured data. same as read_wabbit_hdf5, but reads ONLY the treecode array. | def read_treecode_hdf5(file):
import h5py
import numpy as np
fid = h5py.File(file,'r')
b = fid['block_treecode'][:]
treecode = np.array(b, dtype=float)
return treecode | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_wabbit_hdf5(file, verbose=True, return_iteration=False):\n import h5py\n import numpy as np\n\n if verbose:\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~\")\n print(\"Reading file %s\" % (file) )\n\n fid = h5py.File(file,'r')\n b = fid['coords_origin'][:]\n x0 = np.array(b, dtype=float)\n\n b = fid['coords_spacing'][:]\n dx = np.array(b, dtype=float)\n\n b = fid['blocks'][:]\n data = np.array(b, dtype=float)\n\n b = fid['block_treecode'][:]\n treecode = np.array(b, dtype=float)\n\n # get the dataset handle\n dset_id = fid.get('blocks')\n \n # from the dset handle, read the attributes\n time = dset_id.attrs.get('time')\n iteration = dset_id.attrs.get('iteration')\n box = dset_id.attrs.get('domain-size')\n version=dset_id.attrs.get('version')\n\n\n fid.close()\n\n jmin, jmax = get_max_min_level( treecode )\n N = data.shape[0]\n Bs = data.shape[1:]\n Bs = np.asarray(Bs[::-1]) # we have to flip the array since hdf5 stores in [Nz, Ny, Nx] order\n \n if version == 20200408 or version == 20231602:\n Bs = Bs-1\n #print(\"!!!Warning old (old branch: newGhostNodes) version of wabbit format detected!!!\")\n else:\n print(\"This file includes redundant points\")\n \n if verbose:\n print(\"Time=%e it=%i N=%i Bs[0]=%i Bs[1]=%i Jmin=%i Jmax=%i\" % (time, iteration, N, Bs[0], Bs[1], jmin, jmax) )\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~\")\n\n\n if return_iteration:\n return time, x0, dx, box, data, treecode, iteration[0]\n else:\n return time, x0, dx, box, data, treecode",
"def read_wabbit_hdf5_dir(dir):\n import numpy as np\n import re\n import ntpath\n import os\n\n it=0\n data={'time': [],'x0':[],'dx':[],'treecode':[]}\n # we loop over all files in the given directory\n for file in os.listdir(dir):\n # filter out the good ones (ending with .h5)\n if file.endswith(\".h5\"):\n # from the file we can get the fieldname\n fieldname=re.split('_',file)[0]\n print(fieldname)\n time, x0, dx, box, field, treecode = read_wabbit_hdf5(os.path.join(dir, file))\n #increase the counter\n data['time'].append(time[0])\n data['x0'].append(x0)\n data['dx'].append(dx)\n data['treecode'].append(treecode)\n if fieldname not in data:\n # add the new field to the dictionary\n data[fieldname]=[]\n data[fieldname].append(field)\n else: # append the field to the existing data field\n data[fieldname].append(field)\n it=it+1\n # the size of the domain\n data['box']=box\n #return time, x0, dx, box, data, treecode\n return data",
"def write_wabbit_hdf5( file, time, x0, dx, box, data, treecode, iteration = 0, dtype=np.float64 ):\n import h5py\n import numpy as np\n\n\n Level = np.size(treecode,1)\n if len(data.shape)==4:\n # 3d data\n Bs = np.zeros([3,1])\n N, Bs[0], Bs[1], Bs[2] = data.shape\n Bs = Bs[::-1]\n print( \"Writing to file=%s max=%e min=%e size=%i %i %i \" % (file, np.max(data), np.min(data), Bs[0], Bs[1], Bs[2]) )\n\n else:\n # 2d data\n Bs = np.zeros([2,1])\n N, Bs[0], Bs[1] = data.shape\n Bs = Bs[::-1]\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~\")\n print(\"Writing file %s\" % (file) )\n print(\"Time=%e it=%i N=%i Bs[0]=%i Bs[1]=%i Level=%i Domain=[%d, %d]\" % (time, iteration, N, Bs[0], Bs[1],Level, box[0], box[1]) )\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~\")\n\n\n fid = h5py.File( file, 'w')\n\n fid.create_dataset( 'coords_origin', data=x0, dtype=dtype )\n fid.create_dataset( 'coords_spacing', data=dx, dtype=dtype )\n fid.create_dataset( 'blocks', data=data, dtype=dtype )\n fid.create_dataset( 'block_treecode', data=treecode, dtype=dtype )\n\n fid.close()\n\n fid = h5py.File(file,'a')\n dset_id = fid.get( 'blocks' )\n dset_id.attrs.create( \"version\", 20200902) # this is used to distinguish wabbit file formats\n dset_id.attrs.create('time', time, dtype=dtype)\n dset_id.attrs.create('iteration', iteration)\n dset_id.attrs.create('domain-size', box, dtype=dtype )\n dset_id.attrs.create('total_number_blocks', N )\n fid.close()",
"def read_hdf5(filename, namelist=None, **kwargs):\n\n print('Reading %s...'%filename)\n\n fid = h5py.File(filename, mode='r')\n \n data = read_hdf5_tree(fid, namelist, **kwargs)\n\n fid.close()\n \n print('Finished reading %s.'%filename)\n return data",
"def read_hdf5(path_to_file):\n\n print(\"\\nReading HDF5 file: \", path_to_file)\n file = h5py.File(path_to_file, 'r')\n\n # List the groups\n groups = list(file.keys())\n print(\"Groups available: \", groups)\n\n # Read Zemax Metadata\n zemax_metadata = {}\n print(\"\\nZemax Metadata:\")\n for key in file['Zemax Metadata'].attrs.keys():\n print('{} : {}'.format(key, file['Zemax Metadata'].attrs[key]))\n zemax_metadata[key] = file['Zemax Metadata'].attrs[key]\n\n # Read the analysis groups\n for group_name in groups:\n if group_name != 'Zemax Metadata':\n analysis_group = file[group_name]\n print('\\nAnalysis: ', group_name)\n # For each Analysis Group we loop over subgroups\n for subgroup_key in analysis_group.keys():\n subgroup = analysis_group[subgroup_key]\n print('Subgroup #', subgroup_key)\n # List the metadata of the subgroup\n for att_key in subgroup.attrs.keys():\n print(' {} : {}'.format(att_key, subgroup.attrs[att_key]))\n\n file.close()\n\n return zemax_metadata",
"def _readHDF5(self):\n\n h5 = h5py.File(self.pointInputFile, 'r')\n self.coords = h5['geometry/vertices'][:]\n self.stations = h5['stations'][:]\n self.dispRaw = h5['vertex_fields/displacement'][self.timeStep,:,:]\n h5.close()\n\n self.numStations = self.coords.shape[0]\n\n return",
"def load_data(file_path):\n with h5py.File(file_path) as f:\n # load meta info\n fs, channels, p_names, signals = _get_info(f)\n\n # load raw data\n data = [f['protocol{}/raw_data'.format(k + 1)][:] for k in range(len(p_names))]\n df = pd.DataFrame(np.concatenate(data), columns=channels)\n\n # load signals data\n signals_data = [f['protocol{}/signals_data'.format(k + 1)][:] for k in range(len(p_names))]\n df_signals = pd.DataFrame(np.concatenate(signals_data), columns=['signal_'+s for s in signals])\n df = pd.concat([df, df_signals], axis=1)\n\n # load timestamps\n if 'timestamp' in df:\n timestamp_data = [f['protocol{}/timestamp_data'.format(k + 1)][:] for k in range(len(p_names))]\n df['timestamps'] = np.concatenate(timestamp_data)\n\n # events data\n events_data = [f['protocol{}/mark_data'.format(k + 1)][:] for k in range(len(p_names))]\n df['events'] = np.concatenate(events_data)\n\n # set block names and numbers\n df['block_name'] = np.concatenate([[p]*len(d) for p, d in zip(p_names, data)])\n df['block_number'] = np.concatenate([[j + 1]*len(d) for j, d in enumerate(data)])\n return df, fs, channels, p_names",
"def read_hdf5(file_path):\n if not os.path.exists(file_path):\n logging.fatal(\"Cannot read feature file {}.\".format(file_path))\n exit()\n hdf5_file = h5py.File(file_path, 'r')\n data = np.array(hdf5_file['data'])\n hdf5_file.close()\n\n return data",
"def read(self, simtype):\n\n if simtype == 'original':\n if self.filename.endswith(\".pkl\"):\n logging.debug(\"Loading pickle file %s\", self.filename)\n data = pd.read_pickle(self.filename)\n\n elif self.filename.endswith(\".hdf5\"):\n logging.debug(\"Loading HDF5 file %s\", self.filename)\n with h5py.File(self.filename, \"r\") as data_file:\n #print('treeIndex', data_file[\"treeIndex\"].keys())\n #print('haloTrees', data_file[\"haloTrees\"].keys())\n \n # Find dimensionality of keys\n columns_1dim = [] \n columns_2dim = [] \n for column in self.columns:\n if len(data_file[\"/haloTrees/%s\" % column].shape) == 1:\n columns_1dim.append(column)\n else:\n columns_2dim.append(column)\n \n # 1D keys\n data = pd.DataFrame(\n {\n column: data_file[\"/haloTrees/%s\" % column].value\n for column in columns_1dim\n },\n columns=columns_1dim\n ).set_index(\"nodeIndex\")\n del columns_1dim\n\n # 2D keys\n for column in columns_2dim:\n if column == 'position':\n pos = data_file[\"/haloTrees/%s\" % column].value\n data['X'] = pd.Series(pos[:, 0], index=data.index)\n data['Y'] = pd.Series(pos[:, 1], index=data.index)\n data['Z'] = pd.Series(pos[:, 2], index=data.index)\n del columns_2dim\n\n data.rename(index=str,\n columns={\"snapshotNumber\": \"snapnum\"})\n ## eliminate fake elements with isIntegrated=1\n #data = data[data.isInterpolated != 1]\n\n else:\n raise TypeError(\"Unknown filetype %s\" % self.filename)\n if simtype == 'EAGLE':\n if self.filename.endswith(\".pkl\"):\n logging.debug(\"Loading pickle file %s\", self.filename)\n data = pd.read_pickle(self.filename)\n\n elif self.filename.endswith(\".hdf5\"):\n logging.debug(\"Loading HDF5 file %s\", self.filename)\n data_file = h5py.File(self.filename, 'r')\n column_mt = []\n column_sh = []\n for column in self.columns:\n if column in data_file['MergerTree']:\n column_mt.append(column)\n else:\n column_sh.append(column)\n\n data = pd.DataFrame(\n {\n column: data_file[\"/MergerTree/%s\" % column].value\n for column in column_mt\n },\n columns=column_mt\n ).set_index(\"HaloID\")\n #.set_index(data_file[\"/Merger/HaloID\"].value)\n\n for column in column_sh:\n data[column] = pd.Series(data_file[\"/Subhalo/%s\" % column].value,\n index=data.index)\n data = data.rename(index=str,\n columns={\"SnapNum\": \"snapnum\", #\"HaloID\": \"nodeIndex\",\n \"DescendantID\" : \"descendantIndex\"})\n else:\n raise TypeError(\"Unknown filetype %s\" % self.filename)\n\n return data",
"def _read_h5_dataset(self):\n dev = self.getParentObj()\n top = dev.getFileDescriptor()\n for attr in self._attr_list:\n data = top.get(attr)\n if data is None:\n msg = \"Unable to open object (Object %s doesn't exist)\" % attr\n raise TaurusException(msg)\n top = data\n return data",
"def block_level_distribution_file( file ):\n import h5py\n import numpy as np\n\n # open the h5 wabbit file\n fid = h5py.File(file,'r')\n\n # read treecode table\n b = fid['block_treecode'][:]\n treecode = np.array(b, dtype=float)\n\n # close file\n fid.close()\n\n # number of blocks\n Nb = treecode.shape[0]\n\n # min/max level. required to allocate list!\n jmin, jmax = get_max_min_level( treecode )\n counter = np.zeros(jmax+1)\n\n # fetch level for each block and count\n for i in range(Nb):\n J = treecode_level(treecode[i,:])\n counter[J] += 1\n\n return counter",
"def dense_to_wabbit_hdf5(ddata, name , Bs, box_size = None, time = 0, iteration = 0, dtype=np.float64):\n # concatenate filename in the same style as wabbit does\n fname = name + \"_%12.12d\" % int(time*1e6) + \".h5\"\n Ndim = ddata.ndim\n Nsize = np.asarray(ddata.shape)\n level = 0\n Bs = np.asarray(Bs)# make sure Bs is a numpy array\n Bs = Bs[::-1] # flip Bs such that Bs=[BsY, BsX] the order is the same as for Nsize=[Ny,Nx]\n \n #########################################################\n # do some initial checks on the input data\n # 1) check if the size of the domain is given\n if box_size is None:\n box = np.ones(Ndim)\n else:\n box = np.asarray(box_size)\n\n if (type(Bs) is int):\n Bs = [Bs]*Ndim\n \n # 2) check if number of lattice points is block decomposable\n # loop over all dimensions\n for d in range(Ndim):\n # check if Block is devidable by Bs\n if (np.remainder(Nsize[d], Bs[d]-1) == 0):\n if(is_power2(Nsize[d]//(Bs[d]-1))):\n level = int(max(level, np.log2(Nsize[d]/(Bs[d]-1))))\n else:\n err(\"Number of Intervals must be a power of 2!\")\n else:\n err(\"datasize must be multiple of Bs!\")\n \n # 3) check dimension of array:\n if Ndim < 2 or Ndim > 3:\n err(\"dimensions are wrong\")\n #########################################################\n\n # assume periodicity:\n data = np.zeros(Nsize+1,dtype=dtype)\n if Ndim == 2:\n data[:-1, :-1] = ddata\n # copy first row and column for periodicity\n data[-1, :] = data[0, :]\n data[:, -1] = data[:, 0]\n else:\n data[:-1, :-1, :-1] = ddata\n # copy for periodicity\n data[-1, :, :] = data[0, :, :]\n data[:, -1, :] = data[:, 0, :]\n data[:, :, -1] = data[:, :, 0]\n\n # number of intervals in each dimension\n Nintervals = [int(2**level)]*Ndim # note [val]*3 means [val, val , val]\n Lintervals = box[:Ndim]/np.asarray(Nintervals)\n Lintervals = Lintervals[::-1]\n \n\n x0 = []\n treecode = []\n dx = []\n bdata = []\n if Ndim == 3:\n for ibx in range(Nintervals[0]):\n for iby in range(Nintervals[1]):\n for ibz in range(Nintervals[2]):\n x0.append([ibx, iby, ibz]*Lintervals)\n dx.append(Lintervals/(Bs-1))\n\n lower = [ibx, iby, ibz]* (Bs - 1)\n lower = np.asarray(lower, dtype=int)\n upper = lower + Bs\n\n treecode.append(blockindex2treecode([ibx, iby, ibz], 3, level))\n bdata.append(data[lower[0]:upper[0], lower[1]:upper[1], lower[2]:upper[2]])\n else:\n for ibx in range(Nintervals[0]):\n for iby in range(Nintervals[1]):\n x0.append([ibx, iby]*Lintervals)\n dx.append(Lintervals/(Bs-1))\n lower = [ibx, iby]* (Bs - 1)\n lower = np.asarray(lower, dtype=int)\n upper = lower + Bs\n treecode.append(blockindex2treecode([ibx, iby], 2, level))\n bdata.append(data[lower[0]:upper[0], lower[1]:upper[1]])\n\n\n x0 = np.asarray(x0,dtype=dtype)\n dx = np.asarray(dx,dtype=dtype)\n treecode = np.asarray(treecode, dtype=dtype)\n block_data = np.asarray(bdata, dtype=dtype)\n\n write_wabbit_hdf5(fname, time, x0, dx, box, block_data, treecode, iteration, dtype )\n return fname",
"def read_h5_file_beads(folder, filen):\n \n ### file path\n \n fpath = folder + filen + '.h5'\n assert os.path.exists(fpath), \"The out.h5 file does NOT exist for \" + fpath\n fl = h5py.File(fpath, 'r')\n \n ### bead information\n \n xu = np.array(fl['/beads/xu'], dtype=np.float32)\n #pol = np.array(fl['/beads/pol'], dtype=np.float32)\n cid = np.array(fl['/beads/cid'], dtype=np.int32)\n \n ### simulation information\n \n lx = fl['/info/box/x'][...]\n ly = fl['/info/box/y'][...]\n dt = fl['/info/dt'][...]\n nsteps = fl['/info/nsteps'][...]\n nfils = fl['/info/nfils'][...]\n nbeads = fl['/info/nbeads'][...]\n nsamp = fl['/info/nsamp'][...]\n nbpf = fl['/info/nbpf'][...]\n \n ### simulation parameters\n \n density = fl['/param/density'][...]\n kappa = fl['/param/kappa'][...]\n km = fl['/param/km'][...]\n pa = fl['/param/pa'][...]\n pp = fl['/param/pp'][...]\n bl = fl['/param/bl'][...]\n sigma = fl['/param/sigma'][...]\n \n fl.close()\n \n ### generate classes to submerge data\n \n sim = misc_tools.Simulation(lx, ly, dt, nsteps, nfils, nbeads, nsamp, nbpf, \\\n density, kappa, km, pa, pp, bl, sigma)\n beads = misc_tools.Beads(xu, cid)\n \n return sim, beads",
"def read(filename: str) -> orm.Data:\n return from_bands_inspect(load(hdf5_file=filename))",
"def _read(self, item):\n return read_hdf5(self.file_name, title=self._get_h5_path(item))",
"def read_hdf5(filename, **extras):\n groups = {'sampling': {}, 'obs': {}}\n res = {}\n with h5py.File(filename, \"r\") as hf:\n # loop over the groups\n for group, d in groups.items():\n # read the arrays in that group into the dictionary for that group\n for k, v in hf[group].items():\n d[k] = np.array(v)\n # unserialize the attributes and put them in the dictionary\n for k, v in hf[group].attrs.items():\n try:\n d[k] = json.loads(v)\n except:\n d[k] = v\n # do top-level attributes.\n for k, v in hf.attrs.items():\n try:\n res[k] = json.loads(v)\n except:\n res[k] = v\n res.update(groups['sampling'])\n res['obs'] = groups['obs']\n try:\n res['obs']['filters'] = load_filters([str(f) for f in res['obs']['filters']])\n except:\n pass\n try:\n res['rstate'] = pickle.loads(res['rstate'])\n except:\n pass\n try:\n mp = [names_to_functions(p.copy()) for p in res['model_params']]\n res['model_params'] = mp\n except:\n pass\n\n return res",
"def read_data(path):\n with h5py.File(path, 'r') as hf:\t\n input_ = np.array(hf.get('input'))\n label_ = np.array(hf.get('label'))\n return input_, label_",
"def read_lh5(in_file, key=None, cols=None, ilo=0, ihi=None):\n if \".lh5\" not in in_file:\n print(\"Error, unknown file:\", in_file)\n exit()\n \n # open the file in context manager to avoid weird crashes \n t_start = time.time()\n with h5py.File(os.path.expanduser(in_file)) as hf:\n \n header = get_lh5_header(f_lh5, verbose=False)\n\n # pick off first table by default, or let the user specify the name\n table = list(header.keys())[0] if key is None else key\n df_hdr = header[table] \n \n # this function reads the Table into memory\n df = read_table(table, hf, df_hdr, ilo, ihi)\n\n # t_elapsed = time.time() - t_start\n # print(\"elapsed: {t_elapsed:.4f} sec\")\n \n return df",
"def read_h5file(self, fname, datasetname):\n with h5py.File(fname, 'r') as f:\n atom_pos = f.get(datasetname + '/r').value # atom position -> N x 3 array\n ion_list = f.get(\n datasetname + '/xyz').value # length = N, contain atom type id for each atom\n self.atom_pos = atom_pos[np.argsort(ion_list)]\n _, idx = np.unique(np.sort(ion_list), return_index=True)\n self.split_idx = np.append(idx, [len(ion_list)])\n\n # get atom factor table, sorted by atom type id\n atom_type = f.get(\n datasetname + '/T').value # atom type array, each type is represented by an integer\n self.num_atom_types = len(atom_type)\n ff_table = f.get(datasetname + '/ff').value\n self.ff_table = ff_table[np.argsort(atom_type)]\n\n self.q_sample = f.get(datasetname + '/halfQ').value\n self.num_q_samples = len(self.q_sample)\n self.compton_q_sample = f.get(datasetname + '/Sq_halfQ').value\n self.num_compton_q_samples = len(self.compton_q_sample)\n self.sBound = f.get(datasetname + '/Sq_bound').value\n self.nFree = f.get(datasetname + '/Sq_free').value",
"def read_data(path):\n with h5py.File(path, 'r') as hf:\n data = np.array(hf.get('data'))\n return data",
"def _loadHDF5File(self, filename):\n matfile = h5py.File(filename)\n\n self.StokesI = np.transpose(matfile['StokesI'][:,:])\n self.StokesQ = np.transpose(matfile['StokesQ'][:,:])\n self.StokesU = np.transpose(matfile['StokesU'][:,:])\n self.StokesV = np.transpose(matfile['StokesV'][:,:])\n self.detectorPosition = matfile['detectorPosition'][:,0]\n self.detectorDirection = matfile['detectorDirection'][:,0]\n self.detectorVisang = matfile['detectorVisang'][0,0]\n\n try: self.wall = matfile['wall'][:,:]\n except KeyError: pass\n\n try: self.separatrix = matfile['separatrix'][:,:]\n except KeyError: pass",
"def load_h5py_file(fname, offsets = [0, 0, 0]):\n # Load the data\n f = h5py.File(fname, 'r') # r for read only\n print(\"Available fields: \", list(f.keys())) # f is a dictionary. Let's look at the keys\n\n # Create variables from loaded dictionary\n neural_data = f['ripple_data'][:,0:32]\n emg_data = f['ripple_data'][:,32:]\n force_data = f['data'][0:6,:].transpose()\n fs = f['mySampleRate'][:]\n\n # Transform matrix for force data\n TF = [[1.117\t, -0.096747,\t 1.7516, 0.03441, -0.88072, 0.042127, -0.89026],\n [0.3134, 0.0041349, 0.0045219, -0.055942, 1.5273, 0.037719,-1.5227],\n [0.135\t, 1.4494, -0.061075, 1.6259, 0.083867, 1.5999, 0.0058155]]\n TF = np.array(TF)\n\n # Read force data\n force_data = np.concatenate((np.ones((len(force_data),1)), force_data), axis=1)\n force_data = force_data @ TF.transpose()\n\n # Make baseband zero\n force_data[:,0] = force_data[:,0] - offsets[0]\n force_data[:,1] = force_data[:,1] - offsets[1]\n force_data[:,2] = force_data[:,2] - offsets[2]\n\n # Use sent and received pulse signals to allign DAQ and RIPPLE data\n pulse_sent = f['data'][6,:].transpose()\n ps_ind, = np.nonzero(pulse_sent>1)\n ps_ind = ps_ind[0]\n\n pulse_received = f['ttl_data'][:,0]\n pr_ind, = np.nonzero(pulse_received>2000)\n pr_ind = pr_ind[0]\n\n p_diff = ps_ind - pr_ind\n\n # Align data\n if p_diff > 0:\n pulse_sent = np.concatenate((pulse_sent[p_diff:], np.zeros((p_diff,))), axis=0)\n trailing = np.mean(force_data[-int(fs*0.1):], axis=0) * np.ones((p_diff,1))\n force_data = np.concatenate((force_data[p_diff:,:], trailing))\n else:\n pulse_sent = np.concatenate((np.zeros((-p_diff,)), pulse_sent[:p_diff]), axis=0)\n leading = np.mean(force_data[:int(fs * 0.1)], axis=0) * np.ones((-p_diff, 1))\n force_data = np.concatenate((leading, force_data[:p_diff,:]))\n\n # Choose force channel for analysis\n force_data = force_data[:,1]\n force_data = -force_data # Invert the sign (increased as applied force increased)\n\n # Choose EMG data\n emg_data = emg_data[:,(5,15)]-emg_data[:,(23,25)]\n\n # Re-order EMG data so that 1. Dorsal 2. Biceps 3. Ventral 4. Triceps\n positions3 = (0,1)\n emg_data = emg_data[:,positions3]\n\n # Corresponding time vectors\n time = f['ripple_time'][:]\n return neural_data, emg_data, force_data, time, fs",
"def plot_wabbit_file( file, savepng=False, savepdf=False, cmap='rainbow', caxis=None,\n caxis_symmetric=False, title=True, mark_blocks=True, block_linewidth=1.0,\n gridonly=False, contour=False, ax=None, fig=None, ticks=True,\n colorbar=True, dpi=300, block_edge_color='k',\n block_edge_alpha=1.0, shading='auto',\n colorbar_orientation=\"vertical\",\n gridonly_coloring='mpirank', flipud=False, fileContainsGhostNodes=False):\n\n import numpy as np\n import matplotlib.patches as patches\n import matplotlib.pyplot as plt\n import h5py\n\n cb = []\n # read procs table, if we want to draw the grid only\n if gridonly:\n fid = h5py.File(file,'r')\n\n # read procs array from file\n b = fid['procs'][:]\n procs = np.array(b, dtype=float)\n\n if gridonly_coloring in ['refinement-status', 'refinement_status']:\n b = fid['refinement_status'][:]\n ref_status = np.array(b, dtype=float)\n\n if gridonly_coloring == 'lgt_id':\n b = fid['lgt_ids'][:]\n lgt_ids = np.array(b, dtype=float)\n \n fid.close()\n\n # read data\n time, x0, dx, box, data, treecode = read_wabbit_hdf5( file )\n\n # get number of blocks and blocksize\n N, Bs = data.shape[0], data.shape[1:]\n\n # we need these lists to modify the colorscale, as each block usually gets its own\n # and we would rather like to have a global one.\n h, c1, c2 = [], [], []\n\n\n if fig is None:\n fig = plt.gcf()\n fig.clf()\n\n if ax is None:\n ax = fig.gca()\n\n # clear axes\n ax.cla()\n\n # if only the grid is plotted, we use grayscale for the blocks, and for\n # proper scaling we need to know the max/min level in the grid\n jmin, jmax = get_max_min_level( treecode )\n\n\n\n if gridonly:\n #----------------------------------------------------------------------\n # Grid data only (CPU distribution, level, or grid only)\n #----------------------------------------------------------------------\n cm = plt.cm.get_cmap(cmap)\n\n # loop over blocks and plot them individually\n for i in range(N):\n # draw some other qtys (mpirank, lgt_id or refinement-status)\n if gridonly_coloring in ['mpirank', 'cpu']:\n color = cm( procs[i]/max(procs) )\n\n elif gridonly_coloring in ['refinement-status', 'refinement_status']:\n color = cm((ref_status[i]+1.0) / 2.0)\n\n elif gridonly_coloring == 'level':\n level = treecode_level( treecode[i,:] )\n if (jmax-jmin>0):\n c = 0.9 - 0.75*(level-jmin)/(jmax-jmin)\n color = [c,c,c]\n else:\n color ='w'\n \n \n elif gridonly_coloring == 'file-index':\n color = cm( float(i)/float(N) )\n\n tag = \"%i\" % (i)\n x = Bs[1]/2*dx[i,1]+x0[i,1]\n if not flipud:\n y = Bs[0]/2*dx[i,0]+x0[i,0]\n else:\n y = box[0] - Bs[0]/2*dx[i,0]+x0[i,0]\n plt.text( x, y, tag, fontsize=6, horizontalalignment='center', verticalalignment='center')\n \n elif gridonly_coloring == 'lgt_id':\n color = cm( lgt_ids[i]/max(lgt_ids) )\n\n tag = \"%i\" % (lgt_ids[i])\n x = Bs[1]/2*dx[i,1]+x0[i,1]\n if not flipud:\n y = Bs[0]/2*dx[i,0]+x0[i,0]\n else:\n y = box[0] - Bs[0]/2*dx[i,0]+x0[i,0]\n \n plt.text( x, y, tag, fontsize=6, horizontalalignment='center', verticalalignment='center')\n \n elif gridonly_coloring == 'treecode':\n color = 'w'\n tag = \"\"\n for jj in range(treecode.shape[1]):\n if treecode[i,jj] != -1:\n tag += \"%1.1i\" % treecode[i,jj]\n\n print(tag)\n \n x = Bs[1]/2*dx[i,1]+x0[i,1]\n if not flipud:\n y = Bs[0]/2*dx[i,0]+x0[i,0]\n else:\n y = box[0] - Bs[0]/2*dx[i,0]+x0[i,0]\n plt.text( x, y, tag, fontsize=6, horizontalalignment='center', verticalalignment='center')\n \n \n elif gridonly_coloring == 'none':\n color = 'w'\n else:\n raise ValueError(\"ERROR! The value for gridonly_coloring is unkown\")\n\n # draw colored rectangles for the blocks\n if not fileContainsGhostNodes: \n ax.add_patch( patches.Rectangle( (x0[i,1],x0[i,0]), (Bs[1]-1)*dx[i,1], (Bs[0]-1)*dx[i,0],\n fill=True, edgecolor=block_edge_color, alpha=block_edge_alpha,\n facecolor=color))\n else:\n ax.add_patch( patches.Rectangle( (x0[i,1]+6*dx[i,1],x0[i,0]+6*dx[i,0]), (Bs[1]-1-6*2)*dx[i,1], (Bs[0]-1-6*2)*dx[i,0],\n fill=True, edgecolor=block_edge_color, alpha=block_edge_alpha,\n facecolor=color))\n cb = None\n hplot = None\n\n else:\n #----------------------------------------------------------------------\n # Plot real data.\n #----------------------------------------------------------------------\n # loop over blocks and plot them individually\n for i in range(N):\n\n if not flipud :\n [X, Y] = np.meshgrid( np.arange(Bs[0])*dx[i,0]+x0[i,0], np.arange(Bs[1])*dx[i,1]+x0[i,1])\n else:\n [X, Y] = np.meshgrid( box[0]-np.arange(Bs[0])*dx[i,0]+x0[i,0], np.arange(Bs[1])*dx[i,1]+x0[i,1])\n\n # copy block data\n block = data[i,:,:].copy().transpose()\n\n if contour:\n # --- contour plot ----\n hplot = ax.contour( Y, X, block, [0.1, 0.2, 0.5, 0.75] )\n\n else:\n # --- pseudocolor plot ----\n #hplot=plt.pcolormesh(X,X,X)\n hplot = ax.pcolormesh( Y, X, block, cmap=cmap, shading=shading )\n\n # use rasterization for the patch we just draw\n hplot.set_rasterized(True)\n\n # unfortunately, each patch of pcolor has its own colorbar, so we have to take care\n # that they all use the same.\n h.append(hplot)\n a = hplot.get_clim()\n c1.append(a[0])\n c2.append(a[1])\n\n if mark_blocks:\n # empty rectangle to mark the blocks border\n ax.add_patch( patches.Rectangle( (x0[i,1],x0[i,0]), (Bs[1]-1)*dx[i,1], (Bs[0]-1)*dx[i,0],\n fill=False, edgecolor=block_edge_color, alpha=block_edge_alpha,\n linewidth=block_linewidth))\n\n # unfortunately, each patch of pcolor has its own colorbar, so we have to take care\n # that they all use the same.\n if caxis is None:\n if not caxis_symmetric:\n # automatic colorbar, using min and max throughout all patches\n for hplots in h:\n hplots.set_clim( (min(c1),max(c2)) )\n else:\n # automatic colorbar, but symmetric, using the SMALLER of both absolute values\n c= min( [abs(min(c1)), max(c2)] )\n for hplots in h:\n hplots.set_clim( (-c,c) )\n else:\n # set fixed (user defined) colorbar for all patches\n for hplots in h:\n hplots.set_clim( (min(caxis),max(caxis)) )\n\n # add colorbar, if desired\n cb = None\n if colorbar:\n cb = plt.colorbar(h[0], ax=ax, orientation=colorbar_orientation)\n\n if title:\n plt.title( \"t=%f Nb=%i Bs=(%i,%i)\" % (time,N,Bs[1],Bs[0]) )\n\n\n if not ticks:\n ax.tick_params(\n axis='x', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom=False, # ticks along the bottom edge are off\n top=False, # ticks along the top edge are off\n labelbottom=False) # labels along the bottom edge are off\n\n ax.tick_params(\n axis='y', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom=False, # ticks along the bottom edge are off\n top=False, # ticks along the top edge are off\n right=False, # ticks along the bottom edge are off\n left=False, # ticks along the top edge are off\n labelleft=False) # labels along the bottom edge are off\n\n# plt.xlim([0.0, box[0]])\n# plt.ylim([0.0, box[1]])\n\n ax.axis('tight')\n ax.set_aspect('equal')\n fig.canvas.draw()\n\n if not gridonly:\n if savepng:\n plt.savefig( file.replace('h5','png'), dpi=dpi, transparent=True, bbox_inches='tight' )\n\n if savepdf:\n plt.savefig( file.replace('h5','pdf'), bbox_inches='tight', dpi=dpi )\n else:\n if savepng:\n plt.savefig( file.replace('.h5','-grid.png'), dpi=dpi, transparent=True, bbox_inches='tight' )\n\n if savepdf:\n plt.savefig( file.replace('.h5','-grid.pdf'), bbox_inches='tight' )\n\n return ax,cb,hplot",
"def read_h5(self):\n infile = h5py.File(self.inf_name,'r')\n\n vardict = self.labdict\n #store data with the correct labels\n for k in infile['plasma/1d'].keys():\n try:\n vardict[k] = infile[self.labdict[k]].value\n except:\n vardict[k] = []\n\n vardict['a_ions']=infile['/plasma/anum'].value\n vardict['znum']=infile['/plasma/znum'].value\n \n\n self.rho_in = vardict['rho']\n self._rho_vol = infile['distributions/rhoDist/abscissae/dim1'].value[1:]\n self._volumes = infile['distributions/rhoDist/shellVolume'].value\n self.nrho_in = np.size(self.rho_in)\n\n if vardict['a_ions'][0]!='/':\n self.nspec = len(vardict['a_ions'])\n else:\n self.nspec = vardict['ni'].shape[1]\n print(\"Number of ions: \", self.nspec)\n if len(vardict['a_ions'])!=len(vardict['znum']):\n print(\"ERROR! array of A and Z don't have the same length\")\n\n self.A = vardict['a_ions']\n self.Z = vardict['znum']\n self.nion = self.nspec\n \n self.te_in = vardict['te'][:]\n self.ne_in = vardict['ne'][:]\n self.ti_in = vardict['ti'][:]\n ni1_in = vardict['ni'][:,0]\n self.ni_in = np.zeros((self.nion, self.nrho_in),dtype=float)\n self.ni_in[0,:] = ni1_in\n if self.nion==2:\n ni2_in = vardict['ni'][:,1]\n self.ni_in[1,:] = ni2_in\n elif self.nion==3:\n ni2_in = vardict['ni'][:,1]\n ni3_in = vardict['ni'][:,2]\n self.ni_in[1,:] = ni2_in\n self.ni_in[2,:] = ni3_in\n\n try:\n self.vt_in = vardict['vtor']\n except:\n self.vt_in = np.zeros(self.nrho_in,dtype=float)\n\n try:\n self.zeff_in = vardict['zeff'][:]\n except:\n self.zeff_in = np.zeros(self.nrho_in,dtype=float)\n\n self.ni = np.zeros((self.nion, self.nrho),dtype = float)\n self.spline()",
"def read_uvh5(\n self,\n filename,\n antenna_nums=None,\n antenna_names=None,\n ant_str=None,\n bls=None,\n frequencies=None,\n freq_chans=None,\n times=None,\n time_range=None,\n polarizations=None,\n blt_inds=None,\n keep_all_metadata=True,\n read_data=True,\n data_array_dtype=np.complex128,\n multidim_index=False,\n background_lsts=True,\n run_check=True,\n check_extra=True,\n run_check_acceptability=True,\n strict_uvw_antpos_check=False,\n ):\n if not os.path.exists(filename):\n raise IOError(filename + \" not found\")\n\n # open hdf5 file for reading\n with h5py.File(filename, \"r\") as f:\n # extract header information\n header = f[\"/Header\"]\n self._read_header(\n header,\n filename,\n run_check_acceptability=run_check_acceptability,\n background_lsts=background_lsts,\n )\n\n if not read_data:\n # don't read in the data. This means the object is incomplete,\n # but that may not matter for many purposes.\n return\n\n # Now read in the data\n dgrp = f[\"/Data\"]\n self._get_data(\n dgrp,\n antenna_nums,\n antenna_names,\n ant_str,\n bls,\n frequencies,\n freq_chans,\n times,\n time_range,\n polarizations,\n blt_inds,\n data_array_dtype,\n keep_all_metadata,\n multidim_index,\n run_check,\n check_extra,\n run_check_acceptability,\n strict_uvw_antpos_check,\n )\n\n return",
"def readH5 (dataset):\n if dataset.attrs['type']==PhotoZTemplateSED.typestr:\n return PhotoZTemplateSED()\n else:\n return None",
"def loadh5(fname, path='/data'):\n fp = open_read(fname)\n slab = fp.get_node(path)\n mat = slab.read()\n fp.close()\n return mat",
"def ReadHDF5(self,filename):\n\n if self.elements is not None and self.points is not None:\n self.__reset__()\n\n DictOutput = loadmat(filename)\n\n # GENERIC READER - READS EVERYTHING FROM HDF5 AND ASSIGNS IT TO MESH OBJECT\n for key, value in DictOutput.items():\n if isinstance(DictOutput[key],np.ndarray):\n if \"elements\" in key or \"edge\" in key or \"face\" in key:\n setattr(self, key, np.ascontiguousarray(value).astype(np.uint64))\n else:\n setattr(self, key, np.ascontiguousarray(value))\n else:\n setattr(self, key, value)\n\n if isinstance(self.element_type,np.ndarray):\n self.element_type = str(self.element_type[0])\n if isinstance(self.nelem,np.ndarray):\n self.nelem = int(self.nelem[0])\n\n for key in self.__dict__.keys():\n if isinstance(self.__dict__[str(key)],np.ndarray):\n if self.__dict__[str(key)].size == 1:\n self.__dict__[str(key)] = np.asscalar(self.__dict__[str(key)])",
"def read_data_test(path):\n with h5py.File(path, 'r') as hf:\n input_ = np.array(hf.get('data'))\n label_ = np.array(hf.get('label'))\n\t\n return input_, label_",
"def data_reading(data_file):\n\n # The number of samples is needed to read the HDF5 file, which is stored in the name\n # Split by '/' to remove the directory, and by '.' to remove the file format\n file_name = data_file.split(\"/\")[-1].split(\".\")[-2]\n # The file name ends with the number of samples and before that the number of included neighbours\n n_samples = int(file_name.split(\"_\")[-1])\n n_neighbours = int(file_name.split(\"_\")[-2])\n\n # Read the data\n h5f = h5py.File(data_file, 'r')\n\n # The data set name is the name of the path where the data file can be found\n data = h5f[\"dataset_{}\".format(n_samples)][:]\n\n # Close the H5py file\n h5f.close()\n\n return data, n_samples, n_neighbours"
] | [
"0.7511299",
"0.68497235",
"0.6839176",
"0.6466713",
"0.6055406",
"0.6010037",
"0.6009357",
"0.5869854",
"0.5803738",
"0.57437724",
"0.57119524",
"0.5656739",
"0.5625802",
"0.5617597",
"0.5598882",
"0.55979973",
"0.55816346",
"0.5550571",
"0.5539923",
"0.5537304",
"0.548983",
"0.5444579",
"0.5440177",
"0.5422461",
"0.5405808",
"0.53972566",
"0.53576946",
"0.5348217",
"0.53456146",
"0.53300023"
] | 0.7173856 | 1 |
Write data from wabbit to an HDF5 file | def write_wabbit_hdf5( file, time, x0, dx, box, data, treecode, iteration = 0, dtype=np.float64 ):
import h5py
import numpy as np
Level = np.size(treecode,1)
if len(data.shape)==4:
# 3d data
Bs = np.zeros([3,1])
N, Bs[0], Bs[1], Bs[2] = data.shape
Bs = Bs[::-1]
print( "Writing to file=%s max=%e min=%e size=%i %i %i " % (file, np.max(data), np.min(data), Bs[0], Bs[1], Bs[2]) )
else:
# 2d data
Bs = np.zeros([2,1])
N, Bs[0], Bs[1] = data.shape
Bs = Bs[::-1]
print("~~~~~~~~~~~~~~~~~~~~~~~~~")
print("Writing file %s" % (file) )
print("Time=%e it=%i N=%i Bs[0]=%i Bs[1]=%i Level=%i Domain=[%d, %d]" % (time, iteration, N, Bs[0], Bs[1],Level, box[0], box[1]) )
print("~~~~~~~~~~~~~~~~~~~~~~~~~")
fid = h5py.File( file, 'w')
fid.create_dataset( 'coords_origin', data=x0, dtype=dtype )
fid.create_dataset( 'coords_spacing', data=dx, dtype=dtype )
fid.create_dataset( 'blocks', data=data, dtype=dtype )
fid.create_dataset( 'block_treecode', data=treecode, dtype=dtype )
fid.close()
fid = h5py.File(file,'a')
dset_id = fid.get( 'blocks' )
dset_id.attrs.create( "version", 20200902) # this is used to distinguish wabbit file formats
dset_id.attrs.create('time', time, dtype=dtype)
dset_id.attrs.create('iteration', iteration)
dset_id.attrs.create('domain-size', box, dtype=dtype )
dset_id.attrs.create('total_number_blocks', N )
fid.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_as_hdf5(self, filename):",
"def write_hdf5(filename, data):\n \n if '.h5' in filename:\n fid = h5py.File(filename, 'w')\n else:\n filename = filename+'.h5'\n fid = h5py.File(filename, 'w')\n\n print('Writing %s...'%filename)\n\n write_hdf5_group(fid, data)\n\n fid.close()\n print('Finished writting %s.'%filename)\n return",
"def write(data: orm.Data, filename: str) -> None:\n save(to_bands_inspect(data), hdf5_file=filename)",
"def write_data_to_h5(data, filename):\n f = h5py.File(filename, 'w', libver='latest')\n dset = f.create_dataset('array', shape=(data.shape), data=data, compression='gzip', compression_opts=9)\n f.close()",
"def to_hdf5(self, filename):\n\n f = h5py.File(filename, 'w')\n f['xyz'] = self.xyz\n f.close()\n\n return",
"def save_hdf5(self, filename):\n filename += '.h5'\n try:\n hf = h5py.File(filename, 'w')\n hf.create_dataset('Array', data=self.flat_array)\n hf.close()\n except TypeError as err:\n if isinstance(self.mess_inst, MessagesGUI):\n self.mess_inst.message('TypeError [{}] when attempting to save HDF5'.format(err))\n else:\n print('TypeError [{}] when attempting to save HDF5'.format(err))",
"def to_hdf5(self, filename):\n\n f = h5py.File(filename, 'w')\n f['xyz'] = np.vstack(np.squeeze(self.xyz))\n f.close()\n\n return",
"def export_to_hdf5(self, path, mode='a', libver='earliest'):\n\n # Open file and write version.\n with h5py.File(str(path), mode, libver=libver) as f:\n f.attrs['filetype'] = np.string_('data_wmp')\n f.attrs['version'] = np.array(WMP_VERSION)\n\n g = f.create_group(self.name)\n\n # Write scalars.\n g.create_dataset('spacing', data=np.array(self.spacing))\n g.create_dataset('sqrtAWR', data=np.array(self.sqrtAWR))\n g.create_dataset('E_min', data=np.array(self.E_min))\n g.create_dataset('E_max', data=np.array(self.E_max))\n\n # Write arrays.\n g.create_dataset('data', data=self.data)\n g.create_dataset('windows', data=self.windows)\n g.create_dataset('broaden_poly',\n data=self.broaden_poly.astype(np.int8))\n g.create_dataset('curvefit', data=self.curvefit)",
"def write_h5(fname: str, data: dict) -> None:\n try:\n with h5py.File(fname, 'w') as f:\n recursively_save_dict_contents_to_group(f,'/',data)\n except IOError as e:\n print(f\"Cannot write HDF5 file {fname}\")\n print(f\"IOError: {e}\")",
"def to_hdf5(self, path: Union[str, Path]):\n import h5py # : import-outside-toplevel\n\n with h5py.File(path, \"w\") as hdf:\n for k, v in self._to_list_dict().items():\n if k in self._cal_paras:\n hdf.create_dataset(k, data=v.tolist())\n elif v:\n hdf[k] = v",
"def write_data(data, filename):\n f = h5py.File(filename, 'w', libver='latest')\n dset = f.create_dataset('array', shape=(data.shape), data = data, compression='gzip', compression_opts=9)\n f.close()",
"def writeH5Dataset( self, foldername, time, nameConvention = \"grid\" ):\n filename = \"{0}/{1}_{2:06}.h5\".format(foldername,nameConvention,time)\n file = h5py.File(filename,'w',driver='mpio',comm=self.global_comm)\n dset = file.create_dataset(\"dset\",self._layout.fullShape, dtype = self._f.dtype)\n slices = tuple([slice(s,e) for s,e in zip(self._layout.starts,self._layout.ends)])\n dset[slices]=self._f[:]\n attr_data = np.array(self._layout.dims_order)\n dset.attrs.create(\"Layout\", attr_data, (self._nDims,), h5py.h5t.STD_I32BE)\n file.close()",
"def read_wabbit_hdf5(file, verbose=True, return_iteration=False):\n import h5py\n import numpy as np\n\n if verbose:\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~\")\n print(\"Reading file %s\" % (file) )\n\n fid = h5py.File(file,'r')\n b = fid['coords_origin'][:]\n x0 = np.array(b, dtype=float)\n\n b = fid['coords_spacing'][:]\n dx = np.array(b, dtype=float)\n\n b = fid['blocks'][:]\n data = np.array(b, dtype=float)\n\n b = fid['block_treecode'][:]\n treecode = np.array(b, dtype=float)\n\n # get the dataset handle\n dset_id = fid.get('blocks')\n \n # from the dset handle, read the attributes\n time = dset_id.attrs.get('time')\n iteration = dset_id.attrs.get('iteration')\n box = dset_id.attrs.get('domain-size')\n version=dset_id.attrs.get('version')\n\n\n fid.close()\n\n jmin, jmax = get_max_min_level( treecode )\n N = data.shape[0]\n Bs = data.shape[1:]\n Bs = np.asarray(Bs[::-1]) # we have to flip the array since hdf5 stores in [Nz, Ny, Nx] order\n \n if version == 20200408 or version == 20231602:\n Bs = Bs-1\n #print(\"!!!Warning old (old branch: newGhostNodes) version of wabbit format detected!!!\")\n else:\n print(\"This file includes redundant points\")\n \n if verbose:\n print(\"Time=%e it=%i N=%i Bs[0]=%i Bs[1]=%i Jmin=%i Jmax=%i\" % (time, iteration, N, Bs[0], Bs[1], jmin, jmax) )\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~\")\n\n\n if return_iteration:\n return time, x0, dx, box, data, treecode, iteration[0]\n else:\n return time, x0, dx, box, data, treecode",
"def onestatfile():\n with hp.File('StatsFile.h5', 'w') as onefile:\n alldata = np.empty((600, 4, 3, 500), dtype=np.float32)\n for j in range(600):\n for i in range(3):\n msd, vol, rms, asp = getstats(i, j+1)\n alldata[j, 0, i, :] = msd\n alldata[j, 1, i, :] = vol\n alldata[j, 2, i, :] = rms\n alldata[j, 3, i, :] = asp\n onefile.create_dataset('Stats', data=alldata, chunks=(1, 4, 3, 500),\n compression='gzip', compression_opts=9)",
"def SaveObjectAsHDF5(FolderPath,WaveObject):\n WaveData = WaveObject.DataY\n FilePath = FolderPath + GetFileSaveName(WaveObject)\n HDF5Util.WriteHDF5Array(FilePath,WaveData,attr=WaveObject.Note)",
"def writeHedr(self):\n path = os.path.join(self.dir,self.name)\n out = file(path,'r+b')\n out.seek(16) #--Skip to Hedr record data\n self.tes3.hedr.getSize()\n self.tes3.hedr.dump(out)\n out.close()\n #--Done\n self.getHeader()\n self.setMTime()",
"def write(self,data): \n if not os.path.exists(self.output_dir):\n os.makedirs(self.output_dir)\n\n # We will store these in a separate file and link them to the level2s\n fname = data.filename.split('/')[-1]\n \n if os.path.exists(self.outfile):\n output = h5py.File(self.outfile,'a')\n else:\n output = h5py.File(self.outfile,'w')\n\n # Set permissions and group\n if self.set_permissions:\n try:\n os.chmod(self.outfile,0o664)\n shutil.chown(self.outfile, group=self.permissions_group)\n except PermissionError:\n self.logger(f'{fname}:{self.name}: Warning, couldnt set the file permissions.')\n\n # Store datasets in root\n data_out = {'tod':self.all_tod,\n 'weights':self.all_weights,\n 'mask':self.all_mask,\n 'cal_factors':self.all_cal_factors,\n 'frequency':self.all_frequency,\n 'auto_rms':self.all_auto}\n\n for dname, dset in data_out.items():\n if dname in output:\n del output[dname]\n output.create_dataset(dname, data=dset)\n\n output.attrs['version'] = __level3_version__\n output['cal_factors'].attrs['source'] = self.cal_source\n output['cal_factors'].attrs['calibrator_obsid'] = self.nearest_calibrator\n\n output.close()\n \n if self.level3 in data.keys():\n del data[self.level3]\n data[self.level3] = h5py.ExternalLink(self.outfile,'/')",
"def to_hdf5(self, filepath, **kwargs):\n hdf = pd.HDFStore(filepath, **kwargs)\n hdf.put(self.INDEXDATAFRAME, self.df, format='fixed', data_columns=True)\n hdf.close()",
"def export_to_hdf5(cls, h5_file, model, eids):\n #comments = []\n pids = []\n nodes = []\n x = []\n g0 = []\n offt = []\n unused_bit = []\n pa = []\n pb = []\n wa = []\n wb = []\n nan = np.full(3, np.nan)\n encoding = model._encoding\n for eid in eids:\n element = model.elements[eid]\n #comments.append(element.comment)\n pids.append(element.pid)\n nodes.append(element.nodes)\n if element.g0 is None:\n x.append(element.x)\n g0.append(-1)\n else:\n x.append(nan)\n g0.append(element.g0)\n\n offti = element.offt\n if isinstance(offti, integer_types):\n offti = str(offti)\n offt.append(offti.encode(encoding))\n pa.append(element.pa)\n pb.append(element.pb)\n wa.append(element.wa)\n wb.append(element.wb)\n #h5_file.create_dataset('_comment', data=comments)\n h5_file.create_dataset('eid', data=eids)\n h5_file.create_dataset('nodes', data=nodes)\n h5_file.create_dataset('pid', data=pids)\n #print('x =', x)\n #print('g0 =', g0)\n h5_file.create_dataset('x', data=x)\n h5_file.create_dataset('g0', data=g0)\n h5_file.create_dataset('offt', data=offt)\n\n h5_file.create_dataset('pa', data=pa)\n h5_file.create_dataset('pb', data=pb)\n\n h5_file.create_dataset('wa', data=wa)\n h5_file.create_dataset('wb', data=wb)",
"def _update_hdf5_file(self, field_name, saveformat, data, timestep, t):\n assert saveformat == \"hdf5\"\n fullname, metadata = self._get_datafile_name(field_name, saveformat, timestep)\n\n # Create \"good enough\" hash. This is done to avoid data corruption when restarted from\n # different number of processes, different distribution or different function space\n local_hash = sha1()\n local_hash.update(str(data.function_space().mesh().num_cells()))\n local_hash.update(str(data.function_space().ufl_element()))\n local_hash.update(str(data.function_space().dim()))\n local_hash.update(str(MPI.size(mpi_comm_world())))\n\n # Global hash (same on all processes), 10 digits long\n global_hash = MPI.sum(mpi_comm_world(), int(local_hash.hexdigest(), 16))\n global_hash = str(int(global_hash%1e10)).zfill(10)\n\n #key = (field_name, saveformat)\n #datafile = self._datafile_cache.get(key)\n #if datafile is None:\n # datafile = HDF5File(mpi_comm_world(), fullname, 'w')\n # self._datafile_cache[key] = datafile\n\n # Open HDF5File\n if not os.path.isfile(fullname):\n datafile = HDF5File(mpi_comm_world(), fullname, 'w')\n else:\n datafile = HDF5File(mpi_comm_world(), fullname, 'a')\n\n # Write to hash-dataset if not yet done\n if not datafile.has_dataset(global_hash) or not datafile.has_dataset(global_hash+\"/\"+field_name):\n datafile.write(data, str(global_hash)+\"/\"+field_name)\n\n if not datafile.has_dataset(\"Mesh\"):\n datafile.write(data.function_space().mesh(), \"Mesh\")\n\n # Write vector to file\n # TODO: Link vector when function has been written to hash\n datafile.write(data.vector(), field_name+str(timestep)+\"/vector\")\n\n # HDF5File.close is broken in 1.4\n if dolfin_version() == \"1.4.0+\":\n datafile.close()\n del datafile\n # Link information about function space from hash-dataset\n hdf5_link(fullname, str(global_hash)+\"/\"+field_name+\"/x_cell_dofs\", field_name+str(timestep)+\"/x_cell_dofs\")\n hdf5_link(fullname, str(global_hash)+\"/\"+field_name+\"/cell_dofs\", field_name+str(timestep)+\"/cell_dofs\")\n hdf5_link(fullname, str(global_hash)+\"/\"+field_name+\"/cells\", field_name+str(timestep)+\"/cells\")\n\n return metadata",
"def write2hdf5(filename, dict2store, compression=\"lzf\"):\n\twith h5py.File(filename,'w') as hf:\n\t\tfor key,value in dict2store.iteritems():\n\t\t\thf.create_dataset(key, data=value,compression=compression)",
"def save_frame_to_hdf5_file(fsrc, key = 'images', compression = 0):\n preparation = \"\"\n preparation += \"from h5py import File;\"\n preparation += \"from tempfile import gettempdir;\"\n preparation += \"import os;\"\n preparation += \"root = gettempdir()\"\n preparation += \"filename_dst = os.path.join(root,'test_destination.hdf5')\"\n preparation += \"filename_dst = os.path.join(root,'test_destination.hdf5')\"\n testcode = ''",
"def SaveObjectWrapper(args):\n SaveObjectAsHDF5(*args)",
"def _write(self, h5_group, _) -> None:\n # Convert text from unicode to byte-string to avoid error in h5py\n data = np.asarray(self.data, dtype=np.string_)\n h5_field = h5_group.create_dataset(h5_group.attrs[\"fieldname\"], self.data.shape, dtype=data.dtype)\n h5_field[...] = data",
"def write(self, h5, name=None):\n if isinstance(h5, str):\n fname = os.path.expandvars(os.path.expanduser(h5))\n h5 = File(fname, 'w')\n pmd_field_init(h5, externalFieldPath='/ExternalFieldPath/%T/')\n g = h5.create_group('/ExternalFieldPath/1/')\n else:\n g = h5\n \n write_pmd_field(g, self.data, name=name)",
"def read_wabbit_hdf5_dir(dir):\n import numpy as np\n import re\n import ntpath\n import os\n\n it=0\n data={'time': [],'x0':[],'dx':[],'treecode':[]}\n # we loop over all files in the given directory\n for file in os.listdir(dir):\n # filter out the good ones (ending with .h5)\n if file.endswith(\".h5\"):\n # from the file we can get the fieldname\n fieldname=re.split('_',file)[0]\n print(fieldname)\n time, x0, dx, box, field, treecode = read_wabbit_hdf5(os.path.join(dir, file))\n #increase the counter\n data['time'].append(time[0])\n data['x0'].append(x0)\n data['dx'].append(dx)\n data['treecode'].append(treecode)\n if fieldname not in data:\n # add the new field to the dictionary\n data[fieldname]=[]\n data[fieldname].append(field)\n else: # append the field to the existing data field\n data[fieldname].append(field)\n it=it+1\n # the size of the domain\n data['box']=box\n #return time, x0, dx, box, data, treecode\n return data",
"def saveh5(fname, mat, name='data'):\n fp = open_write(fname)\n save_vec(mat, fp, fp.root, name)\n fp.close()",
"def _setup_h5(self, data_gen_parms):\n\n '''\n Build the group structure down to the channel group\n '''\n # Set up the basic group structure\n root_grp = VirtualGroup('')\n root_parms = dict()\n root_parms['translator'] = 'FAKEBEPS'\n root_parms['data_type'] = data_gen_parms['data_type']\n root_grp.attrs = root_parms\n\n meas_grp = VirtualGroup('Measurement_')\n chan_grp = VirtualGroup('Channel_')\n\n meas_grp.attrs.update(data_gen_parms)\n\n # Create the Position and Spectroscopic datasets for the Raw Data\n ds_pos_inds, ds_pos_vals, ds_spec_inds, ds_spec_vals = self._build_ancillary_datasets()\n\n raw_chunking = calc_chunks([self.n_pixels,\n self.n_spec_bins],\n np.complex64(0).itemsize,\n unit_chunks=[1, self.n_bins])\n\n ds_raw_data = VirtualDataset('Raw_Data', data=None,\n maxshape=[self.n_pixels, self.n_spec_bins],\n dtype=np.complex64,\n compression='gzip',\n chunking=raw_chunking,\n parent=meas_grp)\n\n chan_grp.add_children([ds_pos_inds, ds_pos_vals, ds_spec_inds, ds_spec_vals,\n ds_raw_data])\n meas_grp.add_children([chan_grp])\n root_grp.add_children([meas_grp])\n\n hdf = HDFwriter(self.h5_path)\n hdf.delete()\n h5_refs = hdf.write(root_grp)\n\n # Delete the MicroDatasets to save memory\n del ds_raw_data, ds_spec_inds, ds_spec_vals, ds_pos_inds, ds_pos_vals\n\n # Get the file and Raw_Data objects\n h5_raw = get_h5_obj_refs(['Raw_Data'], h5_refs)[0]\n h5_chan_grp = h5_raw.parent\n\n # Get the Position and Spectroscopic dataset objects\n h5_pos_inds = get_h5_obj_refs(['Position_Indices'], h5_refs)[0]\n h5_pos_vals = get_h5_obj_refs(['Position_Values'], h5_refs)[0]\n h5_spec_inds = get_h5_obj_refs(['Spectroscopic_Indices'], h5_refs)[0]\n h5_spec_vals = get_h5_obj_refs(['Spectroscopic_Values'], h5_refs)[0]\n\n # Link the Position and Spectroscopic datasets as attributes of Raw_Data\n link_as_main(h5_raw, h5_pos_inds, h5_pos_vals, h5_spec_inds, h5_spec_vals)\n\n '''\n Build the SHO Group\n '''\n sho_grp = VirtualGroup('Raw_Data-SHO_Fit_', parent=h5_chan_grp.name)\n\n # Build the Spectroscopic datasets for the SHO Guess and Fit\n sho_spec_starts = np.where(h5_spec_inds[h5_spec_inds.attrs['Frequency']].squeeze() == 0)[0]\n sho_spec_labs = get_attr(h5_spec_inds, 'labels')\n ds_sho_spec_inds, ds_sho_spec_vals = build_reduced_spec_dsets(h5_spec_inds,\n h5_spec_vals,\n keep_dim=sho_spec_labs != 'Frequency',\n step_starts=sho_spec_starts)\n\n sho_chunking = calc_chunks([self.n_pixels,\n self.n_sho_bins],\n sho32.itemsize,\n unit_chunks=[1, 1])\n ds_sho_fit = VirtualDataset('Fit', data=None,\n maxshape=[self.n_pixels, self.n_sho_bins],\n dtype=sho32,\n compression='gzip',\n chunking=sho_chunking,\n parent=sho_grp)\n ds_sho_guess = VirtualDataset('Guess', data=None,\n maxshape=[self.n_pixels, self.n_sho_bins],\n dtype=sho32,\n compression='gzip',\n chunking=sho_chunking,\n parent=sho_grp)\n\n sho_grp.add_children([ds_sho_fit, ds_sho_guess, ds_sho_spec_inds, ds_sho_spec_vals])\n\n # Write the SHO group and datasets to the file and delete the MicroDataset objects\n h5_sho_refs = hdf.write(sho_grp)\n del ds_sho_fit, ds_sho_guess, ds_sho_spec_inds, ds_sho_spec_vals\n\n # Get the dataset handles for the fit and guess\n h5_sho_fit = get_h5_obj_refs(['Fit'], h5_sho_refs)[0]\n h5_sho_guess = get_h5_obj_refs(['Guess'], h5_sho_refs)[0]\n\n # Get the dataset handles for the SHO Spectroscopic datasets\n h5_sho_spec_inds = get_h5_obj_refs(['Spectroscopic_Indices'], h5_sho_refs)[0]\n h5_sho_spec_vals = get_h5_obj_refs(['Spectroscopic_Values'], h5_sho_refs)[0]\n\n # Link the Position and Spectroscopic datasets as attributes of the SHO Fit and Guess\n link_as_main(h5_sho_fit, h5_pos_inds, h5_pos_vals, h5_sho_spec_inds, h5_sho_spec_vals)\n link_as_main(h5_sho_guess, h5_pos_inds, h5_pos_vals, h5_sho_spec_inds, h5_sho_spec_vals)\n\n '''\n Build the loop group\n '''\n loop_grp = VirtualGroup('Fit-Loop_Fit_', parent=h5_sho_fit.parent.name)\n\n # Build the Spectroscopic datasets for the loops\n loop_spec_starts = np.where(h5_sho_spec_inds[h5_sho_spec_inds.attrs['DC_Offset']].squeeze() == 0)[0]\n loop_spec_labs = get_attr(h5_sho_spec_inds, 'labels')\n ds_loop_spec_inds, ds_loop_spec_vals = build_reduced_spec_dsets(h5_sho_spec_inds,\n h5_sho_spec_vals,\n keep_dim=loop_spec_labs != 'DC_Offset',\n step_starts=loop_spec_starts)\n\n # Create the loop fit and guess MicroDatasets\n loop_chunking = calc_chunks([self.n_pixels, self.n_loops],\n loop_fit32.itemsize,\n unit_chunks=[1, 1])\n ds_loop_fit = VirtualDataset('Fit', data=None,\n maxshape=[self.n_pixels, self.n_loops],\n dtype=loop_fit32,\n compression='gzip',\n chunking=loop_chunking,\n parent=loop_grp)\n\n ds_loop_guess = VirtualDataset('Guess', data=None,\n maxshape=[self.n_pixels, self.n_loops],\n dtype=loop_fit32,\n compression='gzip',\n chunking=loop_chunking,\n parent=loop_grp)\n\n # Add the datasets to the loop group then write it to the file\n loop_grp.add_children([ds_loop_fit, ds_loop_guess, ds_loop_spec_inds, ds_loop_spec_vals])\n h5_loop_refs = hdf.write(loop_grp)\n\n # Delete the MicroDatasets\n del ds_loop_spec_vals, ds_loop_spec_inds, ds_loop_guess, ds_loop_fit\n\n # Get the handles to the datasets\n h5_loop_fit = get_h5_obj_refs(['Fit'], h5_loop_refs)[0]\n h5_loop_guess = get_h5_obj_refs(['Guess'], h5_loop_refs)[0]\n h5_loop_spec_inds = get_h5_obj_refs(['Spectroscopic_Indices'], h5_loop_refs)[0]\n h5_loop_spec_vals = get_h5_obj_refs(['Spectroscopic_Values'], h5_loop_refs)[0]\n\n # Link the Position and Spectroscopic datasets to the Loop Guess and Fit\n link_as_main(h5_loop_fit, h5_pos_inds, h5_pos_vals, h5_loop_spec_inds, h5_loop_spec_vals)\n link_as_main(h5_loop_guess, h5_pos_inds, h5_pos_vals, h5_loop_spec_inds, h5_loop_spec_vals)\n\n self.h5_raw = USIDataset(h5_raw)\n self.h5_sho_guess = USIDataset(h5_sho_guess)\n self.h5_sho_fit = USIDataset(h5_sho_fit)\n self.h5_loop_guess = USIDataset(h5_loop_guess)\n self.h5_loop_fit = USIDataset(h5_loop_fit)\n self.h5_spec_vals = h5_spec_vals\n self.h5_spec_inds = h5_spec_inds\n self.h5_sho_spec_inds = h5_sho_spec_inds\n self.h5_sho_spec_vals = h5_sho_spec_vals\n self.h5_loop_spec_inds = h5_loop_spec_inds\n self.h5_loop_spec_vals = h5_loop_spec_vals\n self.h5_file = h5_raw.file\n\n return",
"def write_hdf5(data, labels, output_filename):\n\n x = data.astype(numpy.float32)\n y = labels.astype(numpy.float32)\n\n with h5py.File(output_filename, 'w') as h:\n h.create_dataset('data', data=x, shape=x.shape)\n h.create_dataset('label', data=y, shape=y.shape)\n # h.create_dataset()",
"def store_hdf_h5py(self, compression_name, **compression_options):\n\n types = [('counter', uint8), ('timestamp', uint64),\n ('acceleration', uint16)]\n number_lines = len(self.values)\n data = recarray(number_lines, dtype=types)\n data['counter'] = asarray(self.counters)\n data['timestamp'] = asarray(self.timestamps)\n data['acceleration'] = asarray(self.acceleration)\n\n filepath = self.filepath.with_name(\n f\"{self.filepath.stem} h5py {compression_name}\").with_suffix(\n \".hdf5\")\n with File(filepath, 'w') as hdf:\n hdf.create_dataset(\"acceleration\",\n data=data,\n shape=(number_lines, ),\n **compression_options)"
] | [
"0.739396",
"0.718801",
"0.7148199",
"0.69659054",
"0.68264276",
"0.6792738",
"0.6753575",
"0.66656506",
"0.66584826",
"0.6568733",
"0.65683955",
"0.6483694",
"0.64607877",
"0.63932073",
"0.63770145",
"0.632899",
"0.63053787",
"0.63048476",
"0.63000256",
"0.6252908",
"0.62099403",
"0.61890405",
"0.615163",
"0.6149115",
"0.6141714",
"0.61226296",
"0.6107201",
"0.61007667",
"0.6098579",
"0.60933083"
] | 0.7852353 | 0 |
Read all h5 files in directory dir. Return time, x0, dx, box, data, treecode. Use data["phi"][it] to reference quantity phi at iteration it | def read_wabbit_hdf5_dir(dir):
import numpy as np
import re
import ntpath
import os
it=0
data={'time': [],'x0':[],'dx':[],'treecode':[]}
# we loop over all files in the given directory
for file in os.listdir(dir):
# filter out the good ones (ending with .h5)
if file.endswith(".h5"):
# from the file we can get the fieldname
fieldname=re.split('_',file)[0]
print(fieldname)
time, x0, dx, box, field, treecode = read_wabbit_hdf5(os.path.join(dir, file))
#increase the counter
data['time'].append(time[0])
data['x0'].append(x0)
data['dx'].append(dx)
data['treecode'].append(treecode)
if fieldname not in data:
# add the new field to the dictionary
data[fieldname]=[]
data[fieldname].append(field)
else: # append the field to the existing data field
data[fieldname].append(field)
it=it+1
# the size of the domain
data['box']=box
#return time, x0, dx, box, data, treecode
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_h5_file_arvind_format(folder, filen):\n \n ### file path\n \n fpath = folder + filen + '.h5'\n assert os.path.exists(fpath), \"The out.h5 file does NOT exist for \" + fpath\n fl = h5py.File(fpath, 'r')\n \n ### cell information\n \n xu = np.array(fl['/positions/xu'], dtype=np.float32)\n pol = np.array(fl['/positions/ori'], dtype=np.float32)\n pol = np.array([xt.T for xt in pol[:]])\n \n ### simulation information\n \n lx = fl['/info/box/x'][...]\n ly = fl['/info/box/y'][...]\n dt = fl['/info/dt'][...]\n nsteps = fl['/info/nsteps'][...]\n nfils = fl['/info/nfils'][...]\n nbeads = fl['/info/nbeads'][...]\n nsamp = fl['/info/nsamp'][...]\n nbpf = fl['/info/nbpf'][...]\n \n ### simulation parameters\n \n density = fl['/param/density'][...]\n kappa = fl['/param/kappa'][...]\n km = fl['/param/km'][...]\n pa = fl['/param/pa'][...]\n pp = fl['/param/pp'][...]\n bl = fl['/param/bl'][...]\n sigma = fl['/param/sigma'][...]\n \n fl.close()\n \n ### generate classes to submerge data\n \n sim = misc_tools.Simulation(lx, ly, dt, nsteps, nfils, nbeads, nsamp, nbpf, \\\n density, kappa, km, pa, pp, bl, sigma)\n fils = misc_tools.Cells(xu, pol, nbpf, sim)\n \n return sim, fils",
"def read_h5_file(folder, filen):\n \n ### file path\n \n fpath = folder + filen + '.h5'\n assert os.path.exists(fpath), \"The out.h5 file does NOT exist for \" + fpath\n fl = h5py.File(fpath, 'r')\n \n ### cell information\n \n xu = np.array(fl['/cells/comu'], dtype=np.float32)\n \n ### simulation information\n \n lx = fl['/info/box/x'][...]\n ly = fl['/info/box/y'][...]\n dt = fl['/info/dt'][...]\n nsteps = fl['/info/nsteps'][...]\n nfils = fl['/info/nfils'][...]\n nbeads = fl['/info/nbeads'][...]\n nsamp = fl['/info/nsamp'][...]\n nbpf = fl['/info/nbpf'][...]\n \n ### simulation parameters\n \n density = fl['/param/density'][...]\n kappa = fl['/param/kappa'][...]\n km = fl['/param/km'][...]\n pa = fl['/param/pa'][...]\n pp = fl['/param/pp'][...]\n bl = fl['/param/bl'][...]\n sigma = fl['/param/sigma'][...]\n \n fl.close()\n \n ### generate classes to submerge data\n \n sim = misc_tools.Simulation(lx, ly, dt, nsteps, nfils, nbeads, nsamp, nbpf, \\\n density, kappa, km, pa, pp, bl, sigma)\n fils = misc_tools.Cells(xu, nbpf, sim)\n \n return sim, fils",
"def read_data(path):\n with h5py.File(path, \"r\") as f:\n transformed_poses = np.array(f['transformed_poses'])\n extracted_poses = np.array(f['poses'])\n target = np.array(f['target'])\n \n return extracted_poses, transformed_poses, target",
"def read_hdf5(path_to_file):\n\n print(\"\\nReading HDF5 file: \", path_to_file)\n file = h5py.File(path_to_file, 'r')\n\n # List the groups\n groups = list(file.keys())\n print(\"Groups available: \", groups)\n\n # Read Zemax Metadata\n zemax_metadata = {}\n print(\"\\nZemax Metadata:\")\n for key in file['Zemax Metadata'].attrs.keys():\n print('{} : {}'.format(key, file['Zemax Metadata'].attrs[key]))\n zemax_metadata[key] = file['Zemax Metadata'].attrs[key]\n\n # Read the analysis groups\n for group_name in groups:\n if group_name != 'Zemax Metadata':\n analysis_group = file[group_name]\n print('\\nAnalysis: ', group_name)\n # For each Analysis Group we loop over subgroups\n for subgroup_key in analysis_group.keys():\n subgroup = analysis_group[subgroup_key]\n print('Subgroup #', subgroup_key)\n # List the metadata of the subgroup\n for att_key in subgroup.attrs.keys():\n print(' {} : {}'.format(att_key, subgroup.attrs[att_key]))\n\n file.close()\n\n return zemax_metadata",
"def read_dhalo_trees(basename):\n \n # Read in the tree file(s)\n ifile = 0\n nfiles = 1\n data = collections.OrderedDict()\n while ifile < nfiles:\n treefile = h5py.File(\"%s.%d.hdf5\" % (basename, ifile), \"r\")\n if ifile == 0:\n nfiles = treefile[\"fileInfo\"].attrs[\"numberOfFiles\"]\n for uname in treefile[\"haloTrees\"].keys():\n name = str(uname)\n if ifile == 0:\n data[name] = []\n data[name].append(treefile[\"haloTrees\"][name][...])\n treefile.close()\n ifile += 1\n \n # Combine arrays from separate files and return\n for name in data.keys():\n data[name] = np.concatenate(data[name], axis=0)\n return data",
"def read_data(path):\n with h5py.File(path, 'r') as hf:\t\n input_ = np.array(hf.get('input'))\n label_ = np.array(hf.get('label'))\n return input_, label_",
"def read_h5(self):\n infile = h5py.File(self.inf_name,'r')\n\n vardict = self.labdict\n #store data with the correct labels\n for k in infile['plasma/1d'].keys():\n try:\n vardict[k] = infile[self.labdict[k]].value\n except:\n vardict[k] = []\n\n vardict['a_ions']=infile['/plasma/anum'].value\n vardict['znum']=infile['/plasma/znum'].value\n \n\n self.rho_in = vardict['rho']\n self._rho_vol = infile['distributions/rhoDist/abscissae/dim1'].value[1:]\n self._volumes = infile['distributions/rhoDist/shellVolume'].value\n self.nrho_in = np.size(self.rho_in)\n\n if vardict['a_ions'][0]!='/':\n self.nspec = len(vardict['a_ions'])\n else:\n self.nspec = vardict['ni'].shape[1]\n print(\"Number of ions: \", self.nspec)\n if len(vardict['a_ions'])!=len(vardict['znum']):\n print(\"ERROR! array of A and Z don't have the same length\")\n\n self.A = vardict['a_ions']\n self.Z = vardict['znum']\n self.nion = self.nspec\n \n self.te_in = vardict['te'][:]\n self.ne_in = vardict['ne'][:]\n self.ti_in = vardict['ti'][:]\n ni1_in = vardict['ni'][:,0]\n self.ni_in = np.zeros((self.nion, self.nrho_in),dtype=float)\n self.ni_in[0,:] = ni1_in\n if self.nion==2:\n ni2_in = vardict['ni'][:,1]\n self.ni_in[1,:] = ni2_in\n elif self.nion==3:\n ni2_in = vardict['ni'][:,1]\n ni3_in = vardict['ni'][:,2]\n self.ni_in[1,:] = ni2_in\n self.ni_in[2,:] = ni3_in\n\n try:\n self.vt_in = vardict['vtor']\n except:\n self.vt_in = np.zeros(self.nrho_in,dtype=float)\n\n try:\n self.zeff_in = vardict['zeff'][:]\n except:\n self.zeff_in = np.zeros(self.nrho_in,dtype=float)\n\n self.ni = np.zeros((self.nion, self.nrho),dtype = float)\n self.spline()",
"def load_data(infile, nstep): \n \n f = h5py.File(infile, 'r')\n \n edges_grp = f['edges']\n xedges = np.asarray(edges_grp['x'][nstep], dtype=float)\n yedges = np.asarray(edges_grp['y'][nstep], dtype=float)\n\n time = np.asarray(f['time'][nstep])\n\n tables_grp = f['tables']\n rho_hist = np.asarray(tables_grp['rho'][nstep], dtype=float)\n vx_hist = np.asarray(tables_grp['vx'][nstep], dtype=float)\n vy_hist = np.asarray(tables_grp['vy'][nstep], dtype=float)\n vorticity = np.asarray(tables_grp['vorticity'][nstep], dtype=float) \n \n box_grp = f['box']\n lx = box_grp['x'][...]\n ly = box_grp['y'][...]\n \n #nsteps = f['nsteps'][...]\n f.close()\n\n return lx, ly, time, xedges, yedges, rho_hist, vx_hist, vy_hist, vorticity",
"def read_data_test(path):\n with h5py.File(path, 'r') as hf:\n input_ = np.array(hf.get('data'))\n label_ = np.array(hf.get('label'))\n\t\n return input_, label_",
"def read_h5(fpath):\n _check_h5_installed()\n import h5py as h5\n\n f = h5.File(fpath, 'r')\n res = dict()\n\n if 'est_n_dips' in f.keys():\n res['est_n_dips'] = list(f['est_n_dips'][:])\n else:\n res['est_n_dips'] = 'Not available.'\n\n if 'exponents' in f.keys():\n res['exponents'] = f['exponents'][:]\n else:\n res['exponents'] = 'Not available.'\n\n if 'ch_names' in f.keys():\n _temp = list(f['ch_names'][:].flatten())\n res['ch_names'] = list(x.decode('utf-8', 'ignore') for x in _temp)\n del _temp\n else:\n res['ch_names'] = 'Not available.'\n\n for _k in ['prob_map', 'est_locs', 'model_sel', 'est_dip_mom_std']:\n if _k in f.keys():\n res[_k] = list(f[_k][_key][:] for _key in sorted(f[_k].keys(),\n key=lambda x: int(x)))\n else:\n res[_k] = 'Not available.'\n\n for _k in ['final_dip_mom_std', 'tmin', 'tmax', 'fmin', 'fmax', 'subsample']:\n if _k in f.keys():\n res[_k] = f[_k][()]\n else:\n res[_k] = None\n\n for _k in ['lambda', 'noise_std', 'dip_mom_std', 'max_n_dips',\n 'subject', 'subject_viz', 'data_path', 'fwd_path',\n 'cov_path', 'src_path', 'lf_path', 'fwd_fixed_ori']:\n if _k in f.keys():\n res[_k] = f[_k][()]\n else:\n res[_k] = 'Not available.'\n\n if 'est_dip_moms' in f.keys():\n est_dip_moms_temp = np.asarray(list(f['est_dip_moms'][_key][:] for _key in sorted(f['est_dip_moms'].keys(),\n key=lambda x: int(x))))\n if f['fwd_fixed_ori'][()]:\n est_dip_moms_aux = np.zeros((res['est_locs'][-1].shape[0], est_dip_moms_temp.shape[0]))\n for i in range(est_dip_moms_temp.shape[0]):\n _temp = est_dip_moms_temp[i, :].reshape(-1, 1)\n for j in range(res['est_locs'][-1].shape[0]):\n est_dip_moms_aux[j, i] += _temp[j]\n elif f['fwd_fixed_ori'][()] == 'Not available.':\n print('Uknown forward source orientation. Skipping dipole moments.')\n else:\n est_dip_moms_aux = np.zeros((res['est_locs'][-1].shape[0], est_dip_moms_temp.shape[0], 3))\n for i in range(est_dip_moms_temp.shape[0]):\n _temp = est_dip_moms_temp[i, :].reshape(-1, 3)\n for j in range(res['est_locs'][-1].shape[0]):\n est_dip_moms_aux[j, i, :] += _temp[j]\n res['est_dip_moms'] = est_dip_moms_aux\n f.close()\n return res",
"def read_hdf5(filename, namelist=None, **kwargs):\n\n print('Reading %s...'%filename)\n\n fid = h5py.File(filename, mode='r')\n \n data = read_hdf5_tree(fid, namelist, **kwargs)\n\n fid.close()\n \n print('Finished reading %s.'%filename)\n return data",
"def _readHDF5(self):\n\n h5 = h5py.File(self.pointInputFile, 'r')\n self.coords = h5['geometry/vertices'][:]\n self.stations = h5['stations'][:]\n self.dispRaw = h5['vertex_fields/displacement'][self.timeStep,:,:]\n h5.close()\n\n self.numStations = self.coords.shape[0]\n\n return",
"def open_h5meta(filepath):\n data = dict()\n h5meta_content = read_h5meta(filepath)\n for file in h5meta_content[\"filelist\"]:\n data[file] = read_detector_data(file)\n\n return data",
"def read_wabbit_hdf5(file, verbose=True, return_iteration=False):\n import h5py\n import numpy as np\n\n if verbose:\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~\")\n print(\"Reading file %s\" % (file) )\n\n fid = h5py.File(file,'r')\n b = fid['coords_origin'][:]\n x0 = np.array(b, dtype=float)\n\n b = fid['coords_spacing'][:]\n dx = np.array(b, dtype=float)\n\n b = fid['blocks'][:]\n data = np.array(b, dtype=float)\n\n b = fid['block_treecode'][:]\n treecode = np.array(b, dtype=float)\n\n # get the dataset handle\n dset_id = fid.get('blocks')\n \n # from the dset handle, read the attributes\n time = dset_id.attrs.get('time')\n iteration = dset_id.attrs.get('iteration')\n box = dset_id.attrs.get('domain-size')\n version=dset_id.attrs.get('version')\n\n\n fid.close()\n\n jmin, jmax = get_max_min_level( treecode )\n N = data.shape[0]\n Bs = data.shape[1:]\n Bs = np.asarray(Bs[::-1]) # we have to flip the array since hdf5 stores in [Nz, Ny, Nx] order\n \n if version == 20200408 or version == 20231602:\n Bs = Bs-1\n #print(\"!!!Warning old (old branch: newGhostNodes) version of wabbit format detected!!!\")\n else:\n print(\"This file includes redundant points\")\n \n if verbose:\n print(\"Time=%e it=%i N=%i Bs[0]=%i Bs[1]=%i Jmin=%i Jmax=%i\" % (time, iteration, N, Bs[0], Bs[1], jmin, jmax) )\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~\")\n\n\n if return_iteration:\n return time, x0, dx, box, data, treecode, iteration[0]\n else:\n return time, x0, dx, box, data, treecode",
"def read_h5file(self, fname, datasetname):\n with h5py.File(fname, 'r') as f:\n atom_pos = f.get(datasetname + '/r').value # atom position -> N x 3 array\n ion_list = f.get(\n datasetname + '/xyz').value # length = N, contain atom type id for each atom\n self.atom_pos = atom_pos[np.argsort(ion_list)]\n _, idx = np.unique(np.sort(ion_list), return_index=True)\n self.split_idx = np.append(idx, [len(ion_list)])\n\n # get atom factor table, sorted by atom type id\n atom_type = f.get(\n datasetname + '/T').value # atom type array, each type is represented by an integer\n self.num_atom_types = len(atom_type)\n ff_table = f.get(datasetname + '/ff').value\n self.ff_table = ff_table[np.argsort(atom_type)]\n\n self.q_sample = f.get(datasetname + '/halfQ').value\n self.num_q_samples = len(self.q_sample)\n self.compton_q_sample = f.get(datasetname + '/Sq_halfQ').value\n self.num_compton_q_samples = len(self.compton_q_sample)\n self.sBound = f.get(datasetname + '/Sq_bound').value\n self.nFree = f.get(datasetname + '/Sq_free').value",
"def list_h5(walk_dir):\n\n file_list = []\n for root, subdirs, files in os.walk(walk_dir):\n\n for filename in files:\n file_path = os.path.join(root, filename)\n if file_path[-2:] == 'h5':\n file_list.append(file_path)\n\n return file_list",
"def load_data(file_path):\n with h5py.File(file_path) as f:\n # load meta info\n fs, channels, p_names, signals = _get_info(f)\n\n # load raw data\n data = [f['protocol{}/raw_data'.format(k + 1)][:] for k in range(len(p_names))]\n df = pd.DataFrame(np.concatenate(data), columns=channels)\n\n # load signals data\n signals_data = [f['protocol{}/signals_data'.format(k + 1)][:] for k in range(len(p_names))]\n df_signals = pd.DataFrame(np.concatenate(signals_data), columns=['signal_'+s for s in signals])\n df = pd.concat([df, df_signals], axis=1)\n\n # load timestamps\n if 'timestamp' in df:\n timestamp_data = [f['protocol{}/timestamp_data'.format(k + 1)][:] for k in range(len(p_names))]\n df['timestamps'] = np.concatenate(timestamp_data)\n\n # events data\n events_data = [f['protocol{}/mark_data'.format(k + 1)][:] for k in range(len(p_names))]\n df['events'] = np.concatenate(events_data)\n\n # set block names and numbers\n df['block_name'] = np.concatenate([[p]*len(d) for p, d in zip(p_names, data)])\n df['block_number'] = np.concatenate([[j + 1]*len(d) for j, d in enumerate(data)])\n return df, fs, channels, p_names",
"def read_data(path):\n with h5py.File(path, 'r') as hf:\n data = np.array(hf.get('data'))\n label = np.array(hf.get('label'))\n return data, label",
"def read_uvh5(\n self,\n filename,\n antenna_nums=None,\n antenna_names=None,\n ant_str=None,\n bls=None,\n frequencies=None,\n freq_chans=None,\n times=None,\n time_range=None,\n polarizations=None,\n blt_inds=None,\n keep_all_metadata=True,\n read_data=True,\n data_array_dtype=np.complex128,\n multidim_index=False,\n background_lsts=True,\n run_check=True,\n check_extra=True,\n run_check_acceptability=True,\n strict_uvw_antpos_check=False,\n ):\n if not os.path.exists(filename):\n raise IOError(filename + \" not found\")\n\n # open hdf5 file for reading\n with h5py.File(filename, \"r\") as f:\n # extract header information\n header = f[\"/Header\"]\n self._read_header(\n header,\n filename,\n run_check_acceptability=run_check_acceptability,\n background_lsts=background_lsts,\n )\n\n if not read_data:\n # don't read in the data. This means the object is incomplete,\n # but that may not matter for many purposes.\n return\n\n # Now read in the data\n dgrp = f[\"/Data\"]\n self._get_data(\n dgrp,\n antenna_nums,\n antenna_names,\n ant_str,\n bls,\n frequencies,\n freq_chans,\n times,\n time_range,\n polarizations,\n blt_inds,\n data_array_dtype,\n keep_all_metadata,\n multidim_index,\n run_check,\n check_extra,\n run_check_acceptability,\n strict_uvw_antpos_check,\n )\n\n return",
"def ReadData(self, path):\n os.chdir(path)\n folders=os.listdir()\n if 'data.hdf5' in folders:\n print('Loading data from hdf5 file! Might take some time, be patient!')\n file=h5py.File('data.hdf5','r+')\n data=(np.array(list(file['imgs'])),np.array(list(file['lables'])))\n self.real_labels=list(file['real_labels'])\n file.close()\n\n else:\n print('1. Collecting data.')\n err_logs = []\n img=[]\n lable=[]\n for folder in tqdm(folders):\n\n os.chdir(os.path.join(path,folder))\n for file in os.listdir():\n try:\n dat=(plt.imread(open(file,'rb')))\n img.append(resize_image(dat, (resize_x, resize_y),\n mode='constant',\n ))\n lable.append(folder)\n if folder not in self.real_labels:\n self.real_labels.append(folder)\n \n except OSError:\n err_logs.append([folder, file])\n print('\\nError logs:')\n for e in range(len(err_logs)):\n print('\\tFolder: {} | Some OSError for file: {}'.format(err_logs[e][0],\n err_logs[e][0]))\n \n \n print('2. Encoding data to categorical.')\n # Encode Letters into numerical categories.\n le = LabelEncoder()\n le.fit(lable)\n lable = le.transform(lable)\n lable = np.array(lable).reshape(-1, 1)\n \n print('3. Onehot encoding.')\n # Onehot encoding.\n ohe = OneHotEncoder(sparse=False)\n ohe.fit(lable)\n lable = ohe.transform(lable)\n \n # Shaffle data.\n print('4. Shuffle data.')\n img, lable = shuffle(img, lable)\n\t\t \n print('5. Saving data.')\n data=(np.asarray(img), np.asarray(lable))\n os.chdir(path)\n \n file=h5py.File('data.hdf5','w')\n x=file.create_dataset('imgs',data=np.array(img))\n y=file.create_dataset('lables',data=np.array(lable))\n print(self.real_labels)\n rl=file.create_dataset('real_labels',data=np.string_(self.real_labels))\n file.close()\n print('Data set is stored in Data.hdf5 file. ')\n\n return data",
"def read_data(path):\n with h5py.File(path, 'r') as hf:\n data = np.array(hf.get('data'))\n label = np.array(hf.get('label'))\n return data, label",
"def read_sim_info(folder, filen):\n \n ### file path\n \n fpath = folder + filen + '.h5'\n assert os.path.exists(fpath), \"The out.h5 file does NOT exist for \" + fpath\n fl = h5py.File(fpath, 'r') \n \n ### simulation information\n \n lx = fl['/info/box/x'][...]\n ly = fl['/info/box/y'][...]\n dt = fl['/info/dt'][...]\n nsteps = fl['/info/nsteps'][...]\n nfils = fl['/info/nfils'][...]\n nbeads = fl['/info/nbeads'][...]\n nsamp = fl['/info/nsamp'][...]\n nbpf = fl['/info/nbpf'][...]\n \n ### simulation parameters\n \n density = fl['/param/density'][...]\n kappa = fl['/param/kappa'][...]\n km = fl['/param/km'][...]\n pa = fl['/param/pa'][...]\n pp = fl['/param/pp'][...]\n bl = fl['/param/bl'][...]\n sigma = fl['/param/sigma'][...]\n \n fl.close()\n \n ### generate classes to submerge data\n \n sim = misc_tools.Simulation(lx, ly, dt, nsteps, nfils, nbeads, nsamp, nbpf, \\\n density, kappa, km, pa, pp, bl, sigma)\n \n return sim",
"def read_hdf5(ID_images:str, path:str = \"data/dataset/\"):\n images, labels = [], []\n\n #open the HDF5 file\n file = h5py.File(path +ID_images+\"_lens.h5\", \"r\")\n\n images = np.array(file[\"/images\"]).astype(\"float64\")\n labels = pd.read_hdf(path +ID_images+'_meta.h5', \"table\")\n\n return images, labels",
"def read_data(path):\n with h5py.File(path, 'r') as hf:\n data = np.array(hf.get('data'))\n return data",
"def read(files, save):\n\t# NOTE all soundings are size obs long, they must be filled in with zeros for this data format...\n\t# create the HDF5 document\n\tdoc = h5(save)\n\tsize = 450 # this hopefully exceeds the size of the arrays # CPIN Files are much shorter...\n\tdoc.create(pres=size, temp=size, dewpt=size, rh=size, r=size, u=size, v=size, z=size, lat=1, lon=1, theta=size, thte=size,\n\t\twspd=size, wdir=size, gamma=size, stab=size, N=size, rich=size, thtdef=size, cpin=size)\n\t# those last two do not have to be included...\n\t# Z=geopotenital height\n\n\t# now read the files!\n\tfor f in sorted(files):\n\t\tfname = f.split('/')[-1]\n\t\t# if 'smth' not in fname and NCAR not in fname: continue\n\t\tl.info('reading ' + fname)\n\t\t# launch time comes from line 2 of the file, the last element\n\t\tdf = open(f, 'r')\n\t\ttxt = df.read(2000).split('\\n') # way more than we need\n\t\tdf.close()\n\t\tlatln = txt[0].split() # keys 1,2 will be what we want\n\t\ttry:\n\t\t\ttm = s2t(txt[1].split()[-1] + 'UTC', '%Y%m%d%H%M%Z')\n\t\texcept:\n\t\t\t# drat.\n\t\t\tprint txt.split('\\n')[1]\n\t\t\tcontinue\n\t\ttry:\n\t\t\tif 'cpin' in fname:\n\t\t\t\tz, p, t, td, rh, r, wb, tv, tht, thte, thtw, ws, wd, u, v, vflg, gamma, stab, N, rich, thtdef, cpin = np.loadtxt(f, skiprows=4, unpack=True)\n\t\t\t\t# r is mixing ratio\n\t\t\telse:\n\t\t\t\tz, p, t, td, rh, r, wb, tv, tht, thte, thtw, ws, wd, u, v, vflg, gamma, stab, N, rich = np.loadtxt(f, skiprows=4, unpack=True)\n\t\t\t\t# r is mixing ratio\n\t\texcept:\n\t\t\tl.warning('This file could not be read')\n\t\t\tcontinue\n\n\t\t# and append this data! I will trust the time seconds, instead of recomputing the time\n\t\t# but, before that, we have to make them all the same size - size long\n\t\tnl = np.zeros(size - t.shape[0]) - 999.00 # -999 array to fluff the end\n\t\tp = np.concatenate((p, nl))\n\t\tt = np.concatenate((t, nl))\n\t\ttd = np.concatenate((td, nl))\n\t\trh = np.concatenate((rh, nl))\n\t\tr = np.concatenate((r, nl))\n\t\ttv = np.concatenate((tv, nl))\n\t\ttht = np.concatenate((tht, nl))\n\t\tthte = np.concatenate((thte, nl))\n\t\tws = np.concatenate((ws, nl))\n\t\twd = np.concatenate((wd, nl))\n\t\tgamma = np.concatenate((gamma, nl))\n\t\tstab = np.concatenate((stab, nl))\n\t\tN = np.concatenate((N, nl))\n\t\trich = np.concatenate((rich, nl))\n\t\tu = np.concatenate((u, nl))\n\t\tv = np.concatenate((v, nl))\n\t\tz = np.concatenate((z, nl))\n\t\tif 'cpin' in fname:\n\t\t\tcpin = np.concatenate((cpin, nl))\n\t\t\tthtdef = np.concatenate((thtdef, nl))\n\t\t\tdoc.append(tm, persist=True, pres=p, temp=t, dewpt=td, rh=rh, r=r, u=u, v=v, z=z, lat=[latln[1]], lon=[latln[2]],\n\t\t\t\ttheta=tht, thte=thte, wspd=ws, wdir=wd, gamma=gamma, stab=stab, N=N, rich=rich, cpin=cpin, thtdef=thtdef)\n\t\telse:\n\t\t\tdoc.append(tm, persist=True, pres=p, temp=t, dewpt=td, rh=rh, r=r, u=u, v=v, z=z, lat=[latln[1]], lon=[latln[2]],\n\t\t\t\ttheta=tht, thte=thte, wspd=ws, wdir=wd, gamma=gamma, stab=stab, N=N, rich=rich)\n\tdoc.close()",
"def convert_calculations(filename, hdf5_data):\n x1 = []\n\n with open(filename, 'r') as inp:\n for line in inp:\n x1.append(line)\n\n idx = 1\n dset = require_dataset(hdf5_data, structure.H5_ENV_VOLUME, (1,), dtype=settings.NEMOH_FLOAT)\n dset[0] = float(x1[idx].split()[0])\n set_hdf5_attributes(dset, structure.H5_ENV_VOLUME_ATTR)\n idx += 1\n\n dset = require_dataset(hdf5_data, structure.H5_ENV_GRAVITY, (1,), dtype=settings.NEMOH_FLOAT)\n dset[0] = float(x1[idx].split()[0])\n set_hdf5_attributes(dset, structure.H5_ENV_GRAVITY_ATTR)\n idx += 1\n\n\n dset = require_dataset(hdf5_data, structure.H5_ENV_DEPTH, (1,), dtype=settings.NEMOH_FLOAT)\n dset[0] = float(x1[idx].split()[0])\n set_hdf5_attributes(dset, structure.H5_ENV_DEPTH_ATTR)\n idx += 1\n\n dset = require_dataset(hdf5_data, structure.H5_ENV_WAVE_POINT, (2,), dtype=settings.NEMOH_FLOAT)\n x2 = x1[idx].split()\n dset[0] = float(x2[0])\n dset[1] = float(x2[1])\n set_hdf5_attributes(dset, structure.H5_ENV_WAVE_POINT_ATTR)\n\n idx = 6\n\n num_bodies = int(x1[idx].split()[0])\n\n for i in range(num_bodies):\n\n body = structure.H5_BODIES + structure.H5_BODY_BASE + str(i+1) + '/'\n idx += 2\n\n mesh_x = []\n\n mesh_path = os.path.join(os.path.abspath(os.path.dirname(filename)), str(x1[idx].split()[0]).strip(' \\t\\n\\r'))\n\n with open(mesh_path, 'r') as mesh_file:\n for line in mesh_file:\n mesh_x.append(line)\n\n idx += 1\n x2 = x1[idx].split()\n\n num_points = int(x2[0])\n num_panels = int(x2[1])\n dset = require_dataset(hdf5_data, body + structure.H5_BODY_NUM_POINTS, (1, ), dtype=settings.NEMOH_INT)\n dset[0] = num_points\n set_hdf5_attributes(dset, structure.H5_BODY_NUM_POINTS_ATTR)\n\n dset = require_dataset(hdf5_data, body + structure.H5_BODY_NUM_PANELS, (1, ), dtype=settings.NEMOH_INT)\n dset[0] = num_panels\n set_hdf5_attributes(dset, structure.H5_BODY_NUM_PANELS_ATTR)\n\n mesh_idx = 0\n dset = require_dataset(hdf5_data, body + structure.H5_BODY_MESH, (num_points+num_panels+1, 4), dtype=settings.NEMOH_FLOAT)\n mesh_x2 = mesh_x[mesh_idx].split()\n set_hdf5_attributes(dset, structure.H5_BODY_MESH_ATTR)\n\n dset[0, 0] = int(mesh_x2[0])\n dset[0, 1] = int(mesh_x2[1])\n\n for j in range(1, num_points+num_panels+1):\n mesh_idx += 1\n mesh_x2 = mesh_x[mesh_idx].split()\n dset[j, :] = [float(x) for x in mesh_x2[:4]]\n\n if j == num_points:\n mesh_idx += 1\n\n idx += 1\n num = int(x1[idx].split()[0])\n dset = require_dataset(hdf5_data, body + structure.H5_FREEDOM_DEGREE, (num, 7), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_FREEDOM_DEGREE_ATTR)\n for j in range(num):\n idx += 1\n x2 = x1[idx].split()\n dset[j, :] = np.array([float(x) for x in x2[:7]])\n\n idx += 1\n num = int(x1[idx].split()[0])\n dset = require_dataset(hdf5_data, body + structure.H5_GENERALISED_FORCES, (num, 7), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_GENERALISED_FORCES_ATTR)\n for j in range(num):\n idx += 1\n x2 = x1[idx].split()\n dset[j, :] = [float(x) for x in x2[:7]]\n\n idx += 1\n num = int(x1[idx].split()[0])\n for j in range(num):\n idx += 1\n\n idx += 2\n x2 = x1[idx].split()\n\n dset = require_dataset(hdf5_data, structure.H5_NUM_WAVE_FREQUENCIES, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_NUM_WAVE_FREQUENCIES_ATTR)\n dset[0] = int(x2[0])\n dset = require_dataset(hdf5_data, structure.H5_MIN_WAVE_FREQUENCIES, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_MIN_WAVE_FREQUENCIES_ATTR)\n dset[0] = float(x2[1])\n dset = require_dataset(hdf5_data, structure.H5_MAX_WAVE_FREQUENCIES, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_MAX_WAVE_FREQUENCIES_ATTR)\n dset[0] = float(x2[2])\n idx += 1\n x2 = x1[idx].split()\n dset = require_dataset(hdf5_data, structure.H5_NUM_WAVE_DIRECTIONS, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_NUM_WAVE_DIRECTIONS_ATTR)\n dset[0] = int(x2[0])\n\n dset = require_dataset(hdf5_data, structure.H5_MIN_WAVE_DIRECTIONS, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_MIN_WAVE_DIRECTIONS_ATTR)\n dset[0] = float(x2[1])\n\n dset = require_dataset(hdf5_data, structure.H5_MAX_WAVE_DIRECTIONS, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_MAX_WAVE_DIRECTIONS_ATTR)\n dset[0] = float(x2[2])\n\n idx += 2\n x2 = x1[idx].split()\n\n dset = require_dataset(hdf5_data, structure.H5_COMPUTE_IRF, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_COMPUTE_IRF_ATTR)\n dset[0] = int(x2[0])\n\n dset = require_dataset(hdf5_data, structure.H5_IRF_TIME_STEP, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_IRF_TIME_STEP_ATTR)\n dset[0] = float(x2[1])\n dset = require_dataset(hdf5_data, structure.H5_IRF_DURATION, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_IRF_DURATION_ATTR)\n dset[0] = float(x2[2])\n\n idx += 1\n x2 = x1[idx].split()\n dset = require_dataset(hdf5_data, structure.H5_SHOW_PRESSURE, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_SHOW_PRESSURE_ATTR)\n dset[0] = int(x2[0])\n\n idx += 1\n x2 = x1[idx].split()\n dset = require_dataset(hdf5_data, structure.H5_KOCHIN_NUMBER, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_KOCHIN_NUMBER_ATTR)\n dset[0] = float(x2[0])\n dset = require_dataset(hdf5_data, structure.H5_KOCHIN_MIN, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_KOCHIN_MIN_ATTR)\n dset[0] = float(x2[1])\n dset = require_dataset(hdf5_data, structure.H5_KOCHIN_MAX, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_KOCHIN_MAX_ATTR)\n dset[0] = float(x2[2])\n\n\n idx += 1\n x2 = x1[idx].split()\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_POINTS_X, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_POINTS_X_ATTR)\n dset[0] = int(x2[0])\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_POINTS_Y, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_POINTS_Y_ATTR)\n dset[0] = int(x2[1])\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_DIMENSION_X, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_DIMENSION_X_ATTR)\n dset[0] = float(x2[2])\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_DIMENSION_Y, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_DIMENSION_Y_ATTR)\n dset[0] = float(x2[3])",
"def printAllColumnsInH5(pathToData):\n\n #Check if a correct path is given\n\n if not os.path.isfile(pathToData):\n raise ValueError(\"h5 file not found. Wrong path given?\")\n elif os.path.isfile(pathToData):\n Data = h5.File(pathToData, 'r')\n\n\n Files = Data.keys()\n\n for File in Files:\n print()\n print('Filename = %s' %(File))\n print('----------------------')\n\n #Every time you see Xr*' '\n #It means I add X spaces to line it\n print('\\t column name%sunit%slength'%(29*' ',16*' '))\n print('\\t '+'-----------------'*4)\n \n #In this file give me all the column names\n columns = Data[File].keys()\n \n #for every column in the columns\n for nrc,column in enumerate(columns):\n #always want the column name printed in 40 char\n spaces = ' '*(40 - len(column))\n length = Data[File][column].shape[0]\n #always want the unit name printed over 20 chars\n unit = Data[File][column].attrs['units']\n spaces2 = ' '*(20 - len(unit))\n #--\n length = Data[File][column].shape[0]\n\n print('\\t %s%s%s%s%s'%(column,spaces, unit,spaces2, length))\n #Every 4 lines print a dashed line to read output easier\n if (nrc%5==4):\n print('\\t '+'-----------------'*4)\n Data.close()",
"def load_raw_data(dir, matlab=False):\n\n\tcurrent_dir = os.getcwd() \n\t\n\tos.chdir(dir)\n\t\n\tfile_names = []\n\tdata = {}\n\t\n\t\n\t## For text files\n\tif not matlab:\n\t\tfiles = glob.glob('*.txt')\n\t\t\n\t\tassert len(files) > 0, 'No *.txt files found!'\n\n\t\tif len(glob.glob('*.mat')) > 0:\n\t\t\tprint('WARNING: matlab files also found in directory: \\t%s'%dir)\n\t\t\n\t\tfor f in files:\n\t\t\tf_name = f.lower()\n\t\t\n\t\t\tif f_name.find('mark') > -1:\n\t\t\t\tdata['markers'] = np.loadtxt(f_name, skiprows=1)\n\t\t\t\tfile_names.append(f)\n\t\t\t\n\t\t\telif f_name.find('spike') > -1:\n\t\t\t\tdata['spikes'] = np.loadtxt(f_name, skiprows=1)\n\t\t\t\tfile_names.append(f)\n\t\t\t\n\t\t\telif f_name.find('shape') > -1:\n\t\t\t\tdata['shape'] = np.loadtxt(f_name, skiprows=1)\n\t\t\t\tfile_names.append(f)\n\t\n\n\t## For matlab files\n\t# These matlab files have more useful data than is extracted here.\n\telif matlab:\n\t\tfiles = glob.glob('*.mat')\n\t\t\n\t\tassert len(files) > 0, 'No matlab files found!'\n\t\t\n\t\tif len(glob.glob('*.txt')) > 0:\n\t\t\tprint('WARNING: text files also found in directory: \\t%s' %dir)\n\n\t\tfor f in files:\n\t\t\tf_name = f.lower()\n\t\t\t\n\t\t\t\n\t\t\tif f_name.find('mark') > -1:\n\t\t\t\t\n\t\t\t\tmark_file = h5py.File(f) # Loads hfd5 file\n\t\t\t\tmark_key = mark_file.keys()[0] # Gets name of relevant file for extract\n\t\t\t\t\n\t\t\t\t# Extract times of the markers\n\t\t\t\tdata['markers'] = np.array(mark_file['%s/times' %mark_key])\n\t\t\t\tdata['markers'] = np.reshape(data['markers'], -1) # turn to 1D array, as first axis redundant\n\t\t\t\t\n\t\t\t\t# Extract the numerical codes of the markers, which are listed one-to-one\n\t\t\t\t# with the times extracted above. Useful for an integrity check.\n\t\t\t\t# Zero index necessary as marker codes has three empty columns\n\t\t\t\tdata['marker_codes'] = np.array(mark_file['%s/codes' %mark_key][0])\n\t\t\t\tdata['marker_codes'] = np.reshape(data['marker_codes'], -1) # turn to 1D array, as first axis redundant\n\t\t\t\tfile_names.append(f)\n\n\t\t\telif f_name.find('spike') > -1:\n\n\t\t\t\tspike_file = h5py.File(f) # Loads hfd5 file\n\t\t\t\tspike_key = spike_file.keys()[0] # Gets name of relevant file for extract\n\t\t\t\t\n\t\t\t\t# Extract times of the spikes\n\t\t\t\tdata['spikes'] = np.array(spike_file['%s/times' %spike_key])\n\t\t\t\tdata['spikes'] = np.reshape(data['spikes'], -1) # turn to 1D array, as first axis redundant\n\n\n\t\t\t\t#Extract trace for each spike. First Dim-trace, second-spikes.\n\t\t\t\tspike_traces = np.array(spike_file['%s/values' %spike_key])\n\t\t\t\t\n\t\t\t\t# Calculate Average shape (for all templates, which are coded in '/codes')\n\t\t\t\tavg_spike_trace = np.mean(spike_traces, axis=1)\n\t\t\t\tsem_avg_spike_trace = stats.sem(spike_traces, axis=1, ddof=1)\n\t\t\t\t\n\t\t\t\tdata['shape'] = avg_spike_trace\n\t\t\t\tdata['shape_SEM'] = sem_avg_spike_trace\n\t\t\t\tfile_names.append(f) \n\t\t\t\t\n\t\t\t\t\t\t\n\tos.chdir(current_dir)\n\n\t\t\t\n\tif len(data.keys()) != len(files):\n\t\tmesg = 'Not all of your file names are recognised; they may not have been imported appropriately.'\n\t\tmesg2 = 'File names must contain the key words \"mark\", \"spike\" and/or \"shape.\"'\n\t\tprint(mesg)\n\t\tprint(mesg2)\n\t\tprint('\\nFollowing files loaded successfully:\\n')\n\t\tfor i in file_names: print(i)\n\t\treturn data\n\n\t\n\telif len(data.keys()) == len(files):\n\t\tprint('All files imported and assigned')\n\t\tprint('\\nFollowing files loaded successfully:\\n')\n\t\tfor i in file_names: print(i)\n\t\treturn data",
"def read_h5_file_beads(folder, filen):\n \n ### file path\n \n fpath = folder + filen + '.h5'\n assert os.path.exists(fpath), \"The out.h5 file does NOT exist for \" + fpath\n fl = h5py.File(fpath, 'r')\n \n ### bead information\n \n xu = np.array(fl['/beads/xu'], dtype=np.float32)\n #pol = np.array(fl['/beads/pol'], dtype=np.float32)\n cid = np.array(fl['/beads/cid'], dtype=np.int32)\n \n ### simulation information\n \n lx = fl['/info/box/x'][...]\n ly = fl['/info/box/y'][...]\n dt = fl['/info/dt'][...]\n nsteps = fl['/info/nsteps'][...]\n nfils = fl['/info/nfils'][...]\n nbeads = fl['/info/nbeads'][...]\n nsamp = fl['/info/nsamp'][...]\n nbpf = fl['/info/nbpf'][...]\n \n ### simulation parameters\n \n density = fl['/param/density'][...]\n kappa = fl['/param/kappa'][...]\n km = fl['/param/km'][...]\n pa = fl['/param/pa'][...]\n pp = fl['/param/pp'][...]\n bl = fl['/param/bl'][...]\n sigma = fl['/param/sigma'][...]\n \n fl.close()\n \n ### generate classes to submerge data\n \n sim = misc_tools.Simulation(lx, ly, dt, nsteps, nfils, nbeads, nsamp, nbpf, \\\n density, kappa, km, pa, pp, bl, sigma)\n beads = misc_tools.Beads(xu, cid)\n \n return sim, beads",
"def _read_h5_dataset(self):\n dev = self.getParentObj()\n top = dev.getFileDescriptor()\n for attr in self._attr_list:\n data = top.get(attr)\n if data is None:\n msg = \"Unable to open object (Object %s doesn't exist)\" % attr\n raise TaurusException(msg)\n top = data\n return data"
] | [
"0.66390574",
"0.65969133",
"0.65183514",
"0.64874375",
"0.64845943",
"0.6476374",
"0.6446031",
"0.6411749",
"0.63776684",
"0.63614887",
"0.63193595",
"0.62539995",
"0.6228941",
"0.6222934",
"0.61189467",
"0.61138064",
"0.60822386",
"0.6060295",
"0.60524553",
"0.60471475",
"0.6034819",
"0.6032639",
"0.60120404",
"0.5990184",
"0.5974157",
"0.59627205",
"0.5951419",
"0.5932819",
"0.5929363",
"0.590585"
] | 0.7388911 | 0 |
This generic function adds the local convergence rate as nice labels between | def add_convergence_labels(dx, er):
import numpy as np
import matplotlib.pyplot as plt
for i in range(len(dx)-1):
x = 10**( 0.5 * ( np.log10(dx[i]) + np.log10(dx[i+1]) ) )
y = 10**( 0.5 * ( np.log10(er[i]) + np.log10(er[i+1]) ) )
order = "%2.1f" % ( convergence_order(dx[i:i+1+1],er[i:i+1+1]) )
plt.text(x, y, order, horizontalalignment='center', verticalalignment='center',
bbox=dict(facecolor='w', alpha=0.75, edgecolor='none'), fontsize=7 ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _show_learning_rate():\n fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(6.4 * 2, 4.8))\n\n # Visualize c_prime\n c_prime_list = np.linspace(1, 100, num=11)\n x_label = f\"c'\"\n y_label = \"Minimum Clusters Size\"\n title = \"\"\n\n ax = axes[0]\n x_list = c_prime_list\n\n # MNIST\n y_list = [161, 16, 14, 15, 20, 21, 24, 27, 30, 30, 35]\n ax.plot(x_list, y_list, label=\"MNIST\")\n\n # Fashion MNIST\n y_list = [63, 12, 12, 15, 18, 19, 22, 25, 26, 28, 30]\n ax.plot(x_list, y_list, label=\"Fashion MNIST\")\n\n # 20 news groups\n y_list = [1297, 724, 221, 80, 52, 51, 54, 54, 52, 60, 60]\n ax.plot(x_list, y_list, label=\"Newsgroups\")\n\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n ax.set_title(title)\n ax.legend()\n ax.set_yscale('log')\n\n # Visualize t0\n t0_list = np.linspace(1, 100, num=11)\n x_label = f\"t0\"\n y_label = \"Minimum Clusters Size\"\n title = \"\"\n\n ax = axes[1]\n x_list = t0_list\n\n # MNIST\n y_list = [16, 16, 16, 16, 16, 17, 16, 16, 16, 16, 16]\n ax.plot(x_list, y_list, label=\"MNIST\")\n\n # Fashion MNIST\n y_list = [12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12]\n ax.plot(x_list, y_list, label=\"Fashion MNIST\")\n\n # 20 news groups\n y_list = [765, 765, 767, 772, 772, 773, 789, 789, 793, 796, 799]\n ax.plot(x_list, y_list, label=\"Newsgroups\")\n\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n ax.set_title(title)\n ax.legend()\n ax.set_yscale('log')\n\n plt.show()",
"def lr_scheduler(epochs):\n\n switch_points = [0, 99, 149]\n for i in [2, 1, 0]:\n if epochs >= switch_points[i]:\n return 0.001 * pow(0.1, i)",
"def plot_convergence(S, NN, S_lb=np.array([]), S_ub=np.array([]),\r\n SExact=np.array([]), X_Label='Sample size',\r\n Y_Label='Sensitivity', labelinput=[]):\r\n\r\n # Options for the graphic\r\n pltfont = {'fontname': 'Bitstream Vera Sans', 'fontsize': 15} # font for axes\r\n pltfont_leg = {'family': 'Bitstream Vera Sans', 'size': 15} # font for legend\r\n # Options for the legend\r\n sorting = 1 # If 1, inputs will be displayed in the legend\r\n # according to their influence, i.e. from most sensitive to least sensitive\r\n # (if 0 they will be displayed according to their original order)\r\n nb_legend = 5 # number of input names that will be displayed in the legend\r\n end_length = 0.3 # adjust the space left for the legend\r\n\r\n # Options for the colours:\r\n # You can produce a coloured plot or a black and white one\r\n # (printer-friendly). Furthermore, you can use matplotlib colourmaps or\r\n # repeat 5 'easy-to-distinguish' colours (see http://colorbrewer2.org/).\r\n # The variable 'col' must be a np.ndarray\r\n # Option 1a - coloured using colorbrewer: uncomment the following lines:\r\n col = np.array([[228, 26, 28], [55, 126, 184], [77, 175, 74],\r\n [152, 78, 163], [255, 127, 0]])/256\r\n # Option 1b - coloured using matplotlib colormap: uncomment the following line:\r\n # colorscale = plt.cm.jet\r\n # col = colorscale(np.linspace(0, 1, 5))\r\n # Option 1a - B&W using matlab colorbrewer: uncomment the following lines:\r\n # col = np.array([[37, 37, 37], [90, 90, 90], [150, 150, 150],\r\n # [189, 189, 189], [217, 217, 217]])/256\r\n # Option 1b - B&W using matlab colormap: uncomment the following line:\r\n # colorscale = plt.cm.gray\r\n # col = colorscale(np.linspace(0, 1, 5))\r\n\r\n ###########################################################################\r\n # Check inputs\r\n ###########################################################################\r\n if not isinstance(S, np.ndarray):\r\n raise ValueError('\"S\" must be a numpy.array.')\r\n if S.dtype.kind != 'f' and S.dtype.kind != 'i' and S.dtype.kind != 'u':\r\n raise ValueError('\"S\" must contain floats or integers.')\r\n\r\n if not isinstance(NN, np.ndarray):\r\n raise ValueError('\"NN\" must be a numpy.array.')\r\n if NN.dtype.kind != 'i':\r\n raise ValueError('\"NN\" must contain integers.')\r\n if any(i < 0 for i in np.diff(NN)):\r\n raise ValueError('elements in \"NN\" must be sorted in ascending order')\r\n if any(i < 0 for i in NN):\r\n raise ValueError('elements in \"NN\" must be positive')\r\n NN_shape = NN.shape\r\n if len(NN_shape) > 1:\r\n raise ValueError('\"NN\" must be of shape (R,).')\r\n R = len(NN)\r\n if R <= 1:\r\n raise ValueError('\"NN\" must have at least 2 elements')\r\n\r\n Ns = S.shape\r\n if Ns[0] != R:\r\n raise ValueError('number of rows in \"S\" must be equal to the number of elements in \"NN\"')\r\n M = Ns[1]\r\n ###########################################################################\r\n # Check optional inputs\r\n ###########################################################################\r\n if len(S_lb) != 0:\r\n if not isinstance(S_lb, np.ndarray):\r\n raise ValueError('\"S_lb\" must be a numpy.array.')\r\n if S_lb.dtype.kind != 'f' and S_lb.dtype.kind != 'i' and S_lb.dtype.kind != 'u':\r\n raise ValueError('\"S_lb\" must contain floats or integers.')\r\n Ns_lb = S_lb.shape\r\n if Ns_lb[0] != R:\r\n raise ValueError('\"S\" and \"S_lb\" must have the same number of rows')\r\n if Ns_lb[1] != M:\r\n raise ValueError('\"S\" and \"S_lb\" must have the same number of colums')\r\n\r\n if len(S_ub) != 0:\r\n if not isinstance(S_ub, np.ndarray):\r\n raise ValueError('\"S_ub\" must be a numpy.array.')\r\n if S_ub.dtype.kind != 'f' and S_ub.dtype.kind != 'i' and S_ub.dtype.kind != 'u':\r\n raise ValueError('\"S_ub\" must contain floats or integers.')\r\n Ns_ub = S_ub.shape\r\n if Ns_ub[0] != R:\r\n raise ValueError('\"S\" and \"S_ub\" must have the same number of rows')\r\n if Ns_ub[1] != M:\r\n raise ValueError('\"S\" and \"S_ub\" must have the same number of colums')\r\n\r\n if len(SExact) != 0:\r\n if not isinstance(SExact, np.ndarray):\r\n raise ValueError('\"SExact\" must be a numpy.array.')\r\n if SExact.dtype.kind != 'f' and SExact.dtype.kind != 'i' and SExact.dtype.kind != 'u':\r\n raise ValueError('\"SExact\" must contain floats or integers.')\r\n NS_E = SExact.shape\r\n if len(NS_E) > 1:\r\n raise ValueError('\"SExact\" must be of shape (M, )')\r\n if NS_E[0] != M:\r\n raise ValueError('number of elements in \"SExact\" must be equal' +\r\n 'to number of columns in \"S\"')\r\n\r\n if not isinstance(X_Label, str):\r\n raise ValueError('\"X_Label\" must be a string.')\r\n if not isinstance(Y_Label, str):\r\n raise ValueError('\"Y_Label\" must be a string.')\r\n\r\n if not labelinput:\r\n labelinput = [np.nan]*M\r\n for i in range(M):\r\n labelinput[i] = 'X' + str(i+1)\r\n else:\r\n if not isinstance(labelinput, list):\r\n raise ValueError('\"labelinput\" must be a list with M elements.')\r\n if not all(isinstance(i, str) for i in labelinput):\r\n raise ValueError('Elements in \"labelinput\" must be strings.')\r\n if len(labelinput) != M:\r\n raise ValueError('\"labelinput\" must have M elements.')\r\n\r\n ###########################################################################\r\n # Create plot\r\n ###########################################################################\r\n R = len(NN)\r\n A = len(col)\r\n L = int(np.ceil(M/A))\r\n clrs = repmat(col, L, 1)\r\n\r\n # Set horizontal and vertical limits:\r\n if NN[0] - np.mean(np.diff(NN)) > 0:\r\n H1 = NN[0] - np.mean(np.diff(NN))\r\n else:\r\n H1 = 0\r\n H2 = NN[-1] + end_length*(NN[-1] - NN[0])\r\n\r\n # Set minimum and maximum for y-axis\r\n if len(S_lb) != 0:\r\n V1 = min(-0.1, np.min(S_lb.flatten()))\r\n else:\r\n V1 = min(-0.1, np.min(S.flatten()))\r\n if len(S_ub) != 0:\r\n V2 = max(1.1, np.max(S_ub.flatten()))\r\n else:\r\n V2 = max(1.1, np.max(S.flatten()))\r\n\r\n labelinput_new = [np.nan]*M\r\n\r\n if sorting:\r\n Sidx = np.flip(np.argsort(S[-1, :]), axis=0)\r\n S = S[:, Sidx]\r\n for i in range(M):\r\n labelinput_new[i] = labelinput[Sidx[i]]\r\n if len(S_ub) != 0:\r\n S_ub = S_ub[:, Sidx]\r\n if len(S_lb) != 0:\r\n S_lb = S_lb[:, Sidx]\r\n if len(SExact) != 0:\r\n SExact = SExact[Sidx]\r\n\r\n if nb_legend < M:\r\n labelinput_new = labelinput_new[0:nb_legend]\r\n labelinput_new[-1] = labelinput_new[-1] + '...'\r\n\r\n # plt.figure()\r\n\r\n # For each index, plot final estimated value:\r\n for i in range(M):\r\n plt.plot(NN[-1], S[-1, i], 'o', markerfacecolor=clrs[i],\r\n markeredgecolor='k', markersize=10)\r\n\r\n # Draw an horizontal line at 0:\r\n plt.plot([H1, H2], [0, 0], 'k')\r\n\r\n for i in range(M):\r\n # Plot trajectory with increasing number of samples:\r\n plt.plot(NN, S[:, i], color=clrs[i], linewidth=2.5)\r\n plt.box(on=True)\r\n\r\n if len(SExact) != 0:\r\n plt.plot([H1, H2], [SExact[i], SExact[i]], '--', color=clrs[i],\r\n linewidth=2)\r\n\r\n # plot confidence bounds\r\n if len(S_lb) != 0:\r\n for i in range(M):\r\n plt.plot(NN, S_lb[:, i], '--', color=clrs[i], linewidth=1.2)\r\n\r\n if len(S_ub) != 0:\r\n for i in range(M):\r\n plt.plot(NN, S_ub[:, i], '--', color=clrs[i], linewidth=1.2)\r\n\r\n # Axes labels:\r\n plt.xlabel(X_Label, **pltfont)\r\n plt.ylabel(Y_Label, **pltfont)\r\n\r\n plt.legend(labelinput_new, loc='upper right', prop=pltfont_leg)\r\n\r\n # Tick labels for horizontal axis:\r\n xtick_label = [np.nan]*R\r\n for k in range(R):\r\n xtick_label[k] = '%d' % (NN[k])\r\n plt.xlim(H1, H2)\r\n plt.ylim(V1, V2)\r\n plt.xticks(NN, label=xtick_label, **pltfont)\r\n plt.grid(linestyle='--')",
"def label(self):\r\n if isinstance(self.Lbeta, str):\r\n result = self.Lbeta\r\n else:\r\n result = 'T%.2d' % int(round(self.Lbeta))\r\n result += 'E%.2d' % int(round(self.E))\r\n result += 'G%.2d' % int(round(self.minTauG))\r\n result += self.insulation\r\n return result",
"def custom_scoring(y_te, y_pred):\n #weights computed with training data set\n w = np.array([0.02409584, 0.00787456, 0.03685528, 0.01760536, 0.04589969, 0.8483942 , 0.01724058, 0.00203449]);\n \n ## F1 SCORES\n #evaluate F1 score, precision and recall for each label, \n #along with custom proportionally weighted F1 score\n #and built in weighted and macro F1 scores\n F1_tab, Ptab, Rtab, pf1 = F1_score(y_te, y_pred, w)\n f = F1Score(8, threshold = 0.5, average = 'weighted')\n f.update_state(y_te, y_pred)\n wf1 = f.result().numpy() #weighted f1 score\n f.reset_states()\n f = F1Score(8, threshold = 0.5, average = 'macro')\n f.update_state(y_te, y_pred)\n mf1 = f.result().numpy() #macro f1 score\n f.reset_states()\n\n ##EDIT DISTANCE\n #edit_dist_av = LevDistMultilabels(y_true, y_pred)\n\n ##ACCURACY\n #evaluate accuracy per label\n acc_tab = Acc(y_te, y_pred)\n\n return wf1, mf1, pf1, F1_tab, Ptab, Rtab, acc_tab",
"def getLabel(self, names, values, withRescale = False, pow10first=False, sigma = 0.45):\n #lb_name = (names[-1] == ',') and names[:-1] or names[-1]\n lb = names + \" = \"\n lb += \",\".join([str(i) for i in values])\n if len(values)==2:\n L, k = values\n if len(values)==3:\n L, k, W = values\n\n if withRescale:\n if len(values)==2:\n lb = names + \"=\"\n lb += str(values[0])\n if pow10first:\n lb += r\", $10^{%d}$\" %(int(round(numpy.log10(1.0*k/L))))\n else:\n lb += \", %.3e\" %(1.0*k/L)\n if len(values)==3:\n lb = r\"$k, W_s =$\"\n #lb += str(L)\n if pow10first:\n lb += r\"$10^{%d}$\" %(int(round(numpy.log10(1.0*k/L))))\n else:\n lb += \"%.2e\" %(1.0*k/L)\n lb += \",%.2f\" %(W*(1.0*k/L)**sigma)\n #lb += str(W)\n #lb += \",%.3e\" %((1.0*k/L)**(-sigma)/L)\n #for nm, val in zip(a,b):\n # exec(nm + \"= \" + str(val))\n #if len(values) == 2:\n # lb += str(1.0*k/L)**sigma\n #elif len(values) == 3:\n # lb += str((1.0*k/L)**sigma*W)[0:5]\n return lb",
"def convergence():\n fig, axes = plt.subplots(nrows=2, figsize=figsize(aspect=1.2))\n\n # label names\n label1 = str(league.lambda1)\n label2_list = [str(lambda2) for lambda2 in league.lambda2_list]\n\n # point spread and point total subplots\n subplots = [\n (False, [-0.5, 0.5], league.spreads, 'probability spread > 0.5'),\n (True, [200.5], league.totals, 'probability total > 200.5'),\n ]\n\n for ax, (commutes, lines, values, ylabel) in zip(axes, subplots):\n\n # train margin-dependent Elo model\n melo = Melo(lines=lines, commutes=commutes, k=1e-4)\n melo.fit(league.times, league.labels1, league.labels2, values)\n\n line = lines[-1]\n\n for label2 in label2_list:\n\n # evaluation times and labels\n times = np.arange(league.times.size)[::1000]\n labels1 = times.size * [label1]\n labels2 = times.size * [label2]\n\n # observed win probability\n prob = melo.probability(times, labels1, labels2, lines=line)\n ax.plot(times, prob)\n\n # true (analytic) win probability\n if ax.is_first_row():\n prob = skellam.sf(line, int(label1), int(label2))\n ax.axhline(prob, color='k')\n else:\n prob = poisson.sf(line, int(label1) + int(label2))\n ax.axhline(prob, color='k')\n\n # axes labels\n if ax.is_last_row():\n ax.set_xlabel('Iterations')\n ax.set_ylabel(ylabel)\n\n set_tight(w_pad=.5)",
"def update_learning_rate(self):\n for scheduler in self.schedulers:\n if self.opt.lr_policy == 'plateau':\n scheduler.step(self.metric)\n else:\n scheduler.step()\n\n lr = self.optimizers[0].param_groups[0]['lr']\n print('learning rate = %.7f' % lr)",
"def learnign_rate_examples():\n #######\n bad_larning_rate = 0.1\n not_bad_learning_rate = 1e-4\n good_learning_rate = 1e-3\n #######\n return bad_larning_rate, not_bad_learning_rate, good_learning_rate",
"def get_learning_rate():\n return 0.00001",
"def __learning_rate(self, lr0, epoch):\n \n \"\"\"\n Dan's Methos\n \"\"\"\n lrs = lr0 * 0.001\n c = np.power((lrs/lr0), 1.0/self.__maxEpoch)\n \n return lr0*np.power(c, epoch)",
"def display_convergence_error(train_losses, valid_losses):\n if len(valid_losses) > 0:\n plt.plot(len(train_losses), train_losses, color=\"red\")\n plt.plot(len(valid_losses), valid_losses, color=\"blue\")\n plt.legend([\"Train\", \"Valid\"])\n else:\n plt.plot(len(train_losses), train_losses, color=\"red\")\n plt.legend([\"Train\"])\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n plt.show()",
"def display_label(self) -> str:\n return \"linear (variable levels)\"",
"def decay_proportion_plot(Lmax=1000, p1=database['K+'], p=75, target_rate=53957518.001):\r\n L_range = np.linspace(0, 1000, 10000)\r\n prop = []\r\n for L in L_range:\r\n prop.append(decay_proportion(L, p1, p, target_rate))\r\n# charac_L = p*c*(p1.tau*1e-3/c)/p1.mass\r\n fig = plt.figure(figsize=[12, 3])\r\n ax = fig.add_subplot(1, 1, 1)\r\n ax.plot(L_range, prop, 'r', lw=2)\r\n ax.set_xlim(0, Lmax)\r\n ax.set_ylim(0)\r\n ax.set_xlabel('Target Distance', fontsize=20)\r\n ax.set_ylabel(r'$K^+$ flux', fontsize=20)\r\n# ax.xaxis.set_major_locator(plt.MultipleLocator(charac_L/4))\r\n# ax.xaxis.set_minor_locator(plt.MultipleLocator(charac_L/20))\r\n# ax.xaxis.set_major_formatter(plt.FuncFormatter(multiple_formatter_non_int(1, charac_L, 'L_{K^+}')))\r\n ax.set_xticks([0])\r\n ax.set_yticks([target_rate])\r\n ax.yaxis.set_major_locator(plt.MultipleLocator(target_rate/1))\r\n ax.yaxis.set_minor_locator(plt.MultipleLocator(target_rate/1))\r\n ax.yaxis.set_major_formatter(plt.FuncFormatter(multiple_formatter_non_int(1, target_rate, 'R_t')))\r\n ax.legend(fontsize=20)\r\n ax.minorticks_off()\r\n# ax.grid()\r\n plt.show()\r\n return",
"def lr_schedule(epoch,lr):\r\n learning_rate = lr\r\n if epoch > 10:\r\n learning_rate *= 0.1\r\n if epoch > 20:\r\n learning_rate *= 0.1\r\n if epoch > 50:\r\n learning_rate *= 0.01\r\n\r\n # tf.summary.scalar('learning rate', data=learning_rate, step=epoch)\r\n return learning_rate",
"def animate_pointwise_convergence(dirname=DEFAULT_DIR):\n pass",
"def augmenter_score():\n\n global label_score\n global score\n\n score += 1\n label_score.config(text= \"score : \" + str(score))",
"def learning_rate_range():\n # Lower and upper bounds\n #######\n lower_bound = 0.1 \n upper_bound = 1e-6\n #######\n return lower_bound, upper_bound",
"def L_curveTV(f,lam_init = 2.0, q = 0.9):\n lam = lam_init\n max_iter = 50\n residual_list = np.zeros(max_iter)\n size_list = np.zeros(max_iter)\n error = np.zeros(max_iter)\n alt_error = np.zeros(max_iter)\n \n for i in range(max_iter): #range(max_iter):\n u = ChambollePock_denoise(f,lam, tau = 0.5, sig = 0.25, acc=True, tol = 1.0e-5)\n #u, _, j = projected_gradient_alt(f,lam, tau = 0.2, tol = 1.0e-4)\n lam = lam_init * (q ** i)\n residual_list[i] = np.linalg.norm(u - f)\n size_list[i] = np.linalg.norm(u)\n error[i] = np.linalg.norm(u - f) * np.linalg.norm(u)\n #plt.loglog(residual_list,size_list)\n #plt.show()\n opt_idx = np.argmin(error)\n t = 1.0 / (1.0 + lam_init * (q ** opt_idx))\n lam = lam_init * (q ** opt_idx)\n u = ChambollePock_denoise(f,lam, tau = 0.5, sig = 0.25, acc = True, tol = 1.0e-5)\n return u, t",
"def update_learning_rate(self) -> None:\n optimizer = list(self.optimizers.values())[0]\n old_lr = optimizer.param_groups[0]['lr']\n for name, scheduler in self.schedulers.items():\n if name == 'generator' and self.opt.generator_scheduler_name == 'plateau':\n scheduler.step(self.metric)\n elif name == 'discriminator' and self.opt.discriminator_scheduler_name == 'plateau':\n scheduler.step(self.metric)\n else:\n scheduler.step()\n\n lr = optimizer.param_groups[0]['lr']\n print('learning rate %.7f -> %.7f' % (old_lr, lr))\n return",
"def updateLearnRate(\n self, phi, phi_prime, eligibility_trace, discount_factor, nnz, terminal\n ):\n\n if self.learn_rate_decay_mode == \"dabney\":\n # We only update learn_rate if this step is non-terminal; else phi_prime becomes\n # zero and the dot product below becomes very large, creating a very\n # small learn_rate\n if not terminal:\n # Automatic learning rate: [Dabney W. 2012]\n # http://people.cs.umass.edu/~wdabney/papers/alphaBounds.pdf\n candid_learn_rate = np.dot(\n discount_factor * phi_prime - phi, eligibility_trace\n )\n if candid_learn_rate < 0:\n self.learn_rate = np.minimum(\n self.learn_rate, -1.0 / candid_learn_rate\n )\n elif self.learn_rate_decay_mode == \"boyan\":\n self.learn_rate = (\n self.initial_learn_rate\n * (self.boyan_N0 + 1.0)\n / (self.boyan_N0 + (self.episode_count + 1) ** 1.1)\n )\n # divide by l1 of the features; note that this method is only called if phi != 0\n self.learn_rate /= np.sum(np.abs(phi))\n elif self.learn_rate_decay_mode == \"boyan_const\":\n # New little change from not having +1 for episode count\n self.learn_rate = (\n self.initial_learn_rate\n * (self.boyan_N0 + 1.0)\n / (self.boyan_N0 + (self.episode_count + 1) ** 1.1)\n )\n elif self.learn_rate_decay_mode == \"const\":\n self.learn_rate = self.initial_learn_rate\n else:\n self.logger.warn(\"Unrecognized decay mode \")",
"def plot_perf(ax, best_per_lr, learning_rate_updates_epoch, mode=\"loss\"):\n colors = [ \"b\", \"r\", \"g\", \"c\", \"m\", \"y\", \"k\", \"w\"]\n ind = 2*np.arange(len(best_per_lr))\n ybars = [elem[1] for elem in best_per_lr]\n width = 1\n rect = plt.bar(ind, ybars, width, color=colors[0:len(ybars)], alpha=0.5)\n ax.set_ylim([min(ybars)*0.8,max(ybars)*1.2])\n ax.set_ylabel(\"Best models %s\"%mode)\n ax.set_xticks(ind+width*0.5)\n tlabels = [\"Epoch %d\"%best_per_lr[0][0]]\n if len(best_per_lr) > 1:\n for i, elem in enumerate(best_per_lr[1:]):\n tlabels.append(\"Epoch %d\"%(elem[0]+learning_rate_updates_epoch[i]))\n ax.set_xticklabels(tlabels)\n ax.set_yticks([])\n autolabel(ax, rect)",
"def adjust_learning_rate(self):\n out_base_lr = self.args.base_lr\n for param_group in self.optimizer.param_groups:\n in_lr = param_group[\"initial_lr\"]\n out_lr = in_lr\n if self.args.lr_decay_type == \"cos\": # cosine lr schedule\n out_lr *= 0.5 * (1.0 + np.cos(np.pi * self.epoch / self.args.epochs))\n else: # stepwise lr schedule\n for milestone in self.args.lr_step_schedule:\n out_lr *= 0.1 if self.epoch >= milestone else 1.0\n param_group[\"lr\"] = out_lr\n if in_lr == self.args.base_lr:\n out_base_lr = out_lr\n if self.train_logger is not None:\n self.train_logger.scalar_summary(\n \"metrics/%s/epoch\" % self.full_name, self.epoch, step=self.iteration, increment_counter=False\n )\n self.train_logger.scalar_summary(\n \"metrics/%s/lr\" % self.full_name, out_base_lr, step=self.iteration, increment_counter=False\n )\n print(\"Epoch\", self.epoch, \"Learning rate\", out_base_lr)\n return out_base_lr",
"def relabelling(run):\n np.random.seed((run ** 5 + 1323002) % 123123) # np.random.seed() alternatively\n\n Xtr, Str, Xts, Yts = data_cache[dset]\n X_train, X_val, y_train, y_val = train_test_split(Xtr, Str, test_size=prop)\n # clf1 is the first classifier while clf2 is the second\n if dset == 2:\n clf1 = svm.SVC(C=2.5, gamma=0.000225, probability=True, max_iter=max_itera)\n else:\n clf1 = svm.SVC(gammma = 'scale',probability=True, max_iter=max_itera)\n if run == 1:\n print(\"learn pre training model:\")\n clf1.fit(X_train, y_train)\n if run == 1:\n print(\"calculating weighting and fit final model:\")\n bb = clf1.predict_proba(X_train)\n nn = len(y_train)\n ind = np.where(abs(bb[:, 1] - y_train) >= 0.5)\n y_train[ind] = 1 - y_train[ind]\n ind_p = int(nn / 3)\n ind5 = np.hstack((np.argsort(-bb[:, 1])[0:ind_p], np.argsort(-bb[:, 0])[0:ind_p]))\n if dset == 2:\n clf2 = svm.SVC(gamma=0.000225, max_iter=max_itera)\n else:\n clf2 = svm.SVC(gamma=0.00865, max_iter=max_itera)\n clf2.fit(X_train[ind5, :], y_train[ind5])\n return clf2.score(Xts, Yts)",
"def convergence_info(res, parinfo, dof):\n\n if res.status == -16:\n print('status = %s : A parameter or function value has become infinite or an undefined number.' % res.status)\n if -15 <= res.status <= -1:\n print('status = %s : MYFUNCT or iterfunct functions return to terminate the fitting process. ' % res.status)\n if res.status == 0:\n print('status = %s : Improper input parameters.' % res.status)\n if res.status == 1:\n print('status = %s : Both actual and predicted relative reductions in the sum of squares are at most ftol.' % res.status)\n if res.status == 2:\n print('status = %s : Relative error between two consecutive iterates is at most xtol.' % res.status)\n if res.status == 3:\n print('status = %s : Conditions for status = 1 and status = 2 both hold.' % res.status)\n if res.status == 4:\n print('status = %s : The cosine of the angle between fvec and any column of the jacobian is at most gtol in absolute value.' % res.status)\n if res.status == 5:\n print('status = %s : The maximum number of iterations has been reached.' % res.status)\n if res.status == 6:\n print('status = %s : ftol is too small.' % res.status)\n if res.status == 7:\n print('status = %s : xtol is too small.' % res.status)\n if res.status == 8:\n print('status = %s : gtol is too small.' % res.status)\n\n x_red = round((res.fnorm / dof),4)\n print('Iterations: %s' % res.niter)\n print('Value of the summed squared residuals: %s' % res.fnorm)\n print('Reduced chi squared: %s' % x_red)\n print('Fitted parameters with uncertainties:')\n # scaled uncertainties\n pcerror = res.perror * np.sqrt(res.fnorm / dof)\n teff = round(float(res.params[0]),0)\n logg = round(float(res.params[1]),3)\n feh = round(float(res.params[2]),3)\n vt = round(float(res.params[3]),2)\n vmac = round(float(res.params[4]),2)\n vsini = round(float(res.params[5]),1)\n #scaled error\n erteff = round(float(pcerror[0]),0)\n erlogg = round(float(pcerror[1]),3)\n erfeh = round(float(pcerror[2]),3)\n ervt = round(float(pcerror[3]),2)\n ervmac = round(float(pcerror[4]),2)\n ervsini = round(float(pcerror[5]),1)\n # Save only the scaled error\n parameters = [teff, erteff, logg, erlogg, feh, erfeh, vt, ervt, vmac, ervmac, vsini, ervsini, x_red, res.status]\n for i, x in enumerate(res.params):\n print( \"\\t%s: %s +- %s (scaled error)\" % (parinfo[i]['parname'], round(x, 3), round(pcerror[i], 3)))\n #print( \"\\t%s: %s +- %s (scaled error +- %s)\" % (parinfo[i]['parname'], round(x, 3), round(res.perror[i], 3), round(pcerror[i], 3)))\n return parameters",
"def sp_recovery_rate(model_df):\n new_rr_map = {'1+(100)': 0.75,\n '1(95%)': 0.70,\n '1(90%)': 0.65,\n '2(85%)': 0.625,\n '2(80%)': 0.60,\n '2(75%)': 0.55,\n '2(70%)': 0.5,\n '3(65%)': 0.45,\n '3(60%)': 0.4,\n '3(55%)': 0.35,\n '3(50%)': 0.3,\n '4(45%)': 0.285,\n '4(40%)': 0.27,\n '4(35%)': 0.235,\n '4(30%)': 0.20,\n '5(25%)': 0.175,\n '5(20%)': 0.15,\n '5(15%)': 0.10,\n '5(10%)': 0.05,\n '6(5%)': 0.035,\n '6(0%)': 0.02,\n '3H': 0.40,\n '1': 0.65}\n \n LienOne_map = {'AU':0.50,'AT':0.50,'BE':0.50,\n 'CA':0.50,'DK':0.50,'FI':0.50,'FR':0.50,\n 'DE':0.50,'HK':0.50,'IE':0.50,'IS':0.50,\n 'JP':0.50,'LU':0.50,'NL':0.50,'NO':0.50,\n 'PO':0.50,'PT':0.50,'SG':0.50,'ES':0.50,\n 'SE':0.50,'CH':0.50,'GB':0.50,'US':0.50,\n 'BR':0.39,'CZ':0.39,'GR':0.39,'IT':0.39,\n 'MX':0.39,'ZA':0.39,'TR':0.39,'UA':0.39}\n LienTwo_map = {'AU':0.18,'AT':0.18,'BE':0.18,\n 'CA':0.18,'DK':0.18,'FI':0.18,'FR':0.18,\n 'DE':0.18,'HK':0.18,'IE':0.18,'IS':0.18,\n 'JP':0.18,'LU':0.18,'NL':0.18,'NO':0.18,\n 'PO':0.18,'PT':0.18,'SG':0.18,'ES':0.18,\n 'SE':0.18,'CH':0.18,'GB':0.18,'US':0.18,\n 'BR':0.13,'CZ':0.13,'GR':0.13,'IT':0.13,\n 'MX':0.13,'ZA':0.13,'TR':0.13,'UA':0.13}\n \n bond_map = {'US':0.41}\n \n \n # if it the Recovery rate exists lookup in AAA table\n model_df['S&P Recovery Rate (AAA)'] = model_df['S&P Recovery'].map(new_rr_map)\n #map(dict(new_rr[['S&P Recovery Rating\\nand Recovery\\nIndicator of\\nCollateral Obligations','“AAA”']].values))\n \n # doesn't exist, but first lien, use first lien table\n model_df.loc[pd.isna(model_df['S&P Recovery']) & (model_df['Lien Type']== 'First Lien'),'S&P Recovery Rate (AAA)'] =\\\n model_df.loc[pd.isna(model_df['S&P Recovery']) & (model_df['Lien Type']== 'First Lien'),'Issuer Country'].\\\n map(LienOne_map)\n #map(dict(lien[['Country Abv','RR']].values))\n \n \n # doesn't exist, but 2nd lien, use 2nd lien table\n model_df.loc[pd.isna(model_df['S&P Recovery']) & (model_df['Lien Type']== 'Second Lien'),'S&P Recovery Rate (AAA)'] = \\\n model_df.loc[pd.isna(model_df['S&P Recovery']) & (model_df['Lien Type']== 'Second Lien'),'Issuer Country'].\\\n map(LienTwo_map)\n #map(dict(lien[['Country Abv','RR.2nd']].values))\n \n # the bonds\n model_df.loc[pd.isna(model_df['S&P Recovery']) & pd.isna(model_df['Lien Type']),'S&P Recovery Rate (AAA)'] = \\\n model_df.loc[pd.isna(model_df['S&P Recovery']) & pd.isna(model_df['Lien Type']),'Issuer Country'].\\\n map(bond_map)\n #map(dict(bond_table[['Country Abv.1','RR.1']].values))\n\n return model_df",
"def begin_labeling(self):\n\n self.__fit_model()\n\n while True:\n\n self.__get_labels()\n self.__fit_model()\n\n print (\"\\n\")\n for stat in self.statistics:\n print (\"precision: {0} recall: {1}\".format(stat[0], stat[1]))\n\n another_round = input(\"\\nContinue active labeling? (y/n)\\n \")\n\n if another_round.upper() != \"Y\":\n\n break",
"def target_rate(L=102.4, p1=database['K+'], p=75, dec_reg_rate=19591295,\r\n decay_region=65):\r\n tau = p1.tau*1e-3/c\r\n decay_region_time = (decay_region*p1.mass)/(p*c)\r\n dec_reg_start_rate = dec_reg_rate/(1-np.exp(-(decay_region_time/tau)))\r\n return dec_reg_start_rate/(np.exp(-(((L*p1.mass)/(p*c))/tau)))",
"def adjust_learning_rate(opt, optimizer, epoch, F_txt):\n\tif opt.classifier_model == 'Baseline':\n\t\tlr = opt.lr * (0.5 ** (epoch // 30))\n\telse:\n\t\tlr = opt.lr * (0.1 ** (epoch // 10))\n\tprint('Learning rate: %f' %lr)\n\tprint('Learning rate: %f' %lr, file=F_txt)\n\tfor param_group in optimizer.param_groups:\n\t\tparam_group['lr'] = lr",
"def double_linear_con(progress):\n progress *= 2\n eps = 0.125\n if 1 - progress < eps:\n return eps\n return 1 - progress"
] | [
"0.5802661",
"0.57484186",
"0.5591566",
"0.55610377",
"0.554035",
"0.5481976",
"0.541913",
"0.54128134",
"0.5400878",
"0.53928256",
"0.52825266",
"0.5259621",
"0.5238246",
"0.52341664",
"0.52058315",
"0.519785",
"0.5196426",
"0.5191651",
"0.5189148",
"0.51671124",
"0.5163327",
"0.51619494",
"0.515962",
"0.5145193",
"0.5140278",
"0.51300853",
"0.51278037",
"0.5127246",
"0.51116574",
"0.5109257"
] | 0.6630967 | 0 |
This is a small function that returns the convergence order, i.e. the least squares fit to the log of the two passed lists. | def convergence_order(N, err):
import numpy as np
if len(N) != len(err):
raise ValueError('Convergence order args do not have same length')
A = np.ones([len(err), 2])
B = np.ones([len(err), 1])
# ERR = A*N + B
for i in range( len(N) ) :
A[i,0] = np.log(N[i])
B[i] = np.log(err[i])
x, residuals, rank, singval = np.linalg.lstsq(A, B, rcond=None)
return x[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def logfit(N, err):\n import numpy as np\n\n if len(N) != len(err):\n raise ValueError('Convergence order args do not have same length')\n\n A = np.ones([len(err), 2])\n B = np.ones([len(err), 1])\n # ERR = A*N + B\n for i in range( len(N) ) :\n A[i,0] = np.log10(N[i])\n B[i] = np.log10(err[i])\n\n x, residuals, rank, singval = np.linalg.lstsq(A, B, rcond=None)\n\n return x",
"def fitPowerRegressionCurveComparisons(self, xVals0, yVals0):\r\n xValCount = 0\r\n yValCount = 0\r\n if len(xVals0) > 2:\r\n xValCount += int(len(xVals0) / 2) - 1\r\n yValCount += int(len(xVals0) / 2) - 1\r\n else:\r\n return \"regression error\", 0.0\r\n xVals = []\r\n yVals = []\r\n xValIndex = xValCount + 1\r\n yValIndex = yValCount + 1\r\n for i in range(xValIndex, len(xVals0)):\r\n xVals.append(xVals0[i])\r\n for i in range(yValIndex, len(xVals0)):\r\n yVals.append(yVals0[i])\r\n n = len(xVals)\r\n sumLnxLny = 0.0\r\n sumLnx = 0.0\r\n sumLny = 0.0\r\n sumLnx2 = 0.0\r\n sumLny2 = 0.0\r\n for i in range(0, n - 1):\r\n lnx = np.log(xVals[i])\r\n lny = np.log(yVals[i])\r\n sumLnxLny += (lnx * lny)\r\n for i in range(0, n - 1):\r\n lnx = np.log(xVals[i])\r\n sumLnx += lnx\r\n for i in range(0, n - 1):\r\n lny = np.log(yVals[i])\r\n sumLny += lny\r\n for i in range(0, n - 1):\r\n lny = np.log(yVals[i])\r\n sumLny2 += (lny * lny)\r\n for i in range(0, n - 1):\r\n lnx = np.log(xVals[i])\r\n sumLnx2 += (lnx * lnx)\r\n lnxBar = sumLnx / n\r\n lnyBar = sumLny / n\r\n sxx = sumLnx2 - (n * (lnxBar ** 2))\r\n syy = sumLny2 - (n * (lnyBar ** 2))\r\n sxy = sumLnxLny - (n * lnxBar * lnyBar)\r\n b = sxy / sxx\r\n a = pow(np.e, lnyBar - (b * lnxBar))\r\n r = sxy / (np.sqrt(sxx) * np.sqrt(syy))\r\n xx = np.array(xVals)\r\n yy = np.array(yVals)\r\n def power_law(xx, a, b):\r\n return a * np.power(xx, b)\r\n yHats = []\r\n for xPrime in xx:\r\n yHats.append(power_law(xPrime, a, b))\r\n eq = str(f' y = {str(round(a, 4))} (x) ^ {str(round(b, 4))} w/ correlation {str(round(100.0000 * r, 1))} %')\r\n if 'nan' in eq:\r\n eq_nan = 'could not calculate regression\\t\\t'\r\n self.eq = eq_nan\r\n return eq_nan\r\n else:\r\n self.ex_eq = eq\r\n return eq",
"def convergence(n0,l,nt,m,numthreads,display=False):\n# call stats and initialise variables\n qnetm,qmaxm,qvarm = ns.stats(n0,l,nt,m)\n qmax_ave = np.zeros(m)\n qmax_vec = np.zeros(m)\n\n# assign qmax_vec the qmax of qnetn value for n=1->m realizations\n# assign qmax_ave the value of the avegerage over the n realizations of qmax \n\n for n in range(1,m+1):\n qmax_vec[n-1] = float(np.amax(qnetm[:,n-1]))\n qmax_ave[n-1] = np.sum(qmax_vec)/(n)\n \n x = np.arange(1,m+1)\n\n# use polyfit to solve for k and a satisfying qmax_ave = a*m**(-k)\n# reduce problem to log(qmax_ave) = c - k*log(m) (c = log(a), and flip sgn(k) for now)\n\n k, c = np.polyfit(np.log(x),np.log(qmax_ave),1)\n\n# if display flag is true, create log-log plot of qmax_ave vs x=1->m \n\n if display:\n #plt.figure()\n #plt.loglog(x,qmax_ave,'b')\n #plt.loglog(x,np.exp(b+k*x),'r')\n #plt.show()\n \n plt.figure()\n plt.plot(np.log(x),np.log(qmax_ave),'b')\n plt.plot(np.log(x),c + k*np.log(x),'r')\n plt.xlabel('log(x) x=1->m')\n plt.ylabel('log(qmax_ave)')\n plt.title('log-log plot of m against qmax_ave with rate of convergence fit')\n plt.legend(loc='best')\n plt.show()\n\n return -k",
"def plot_convergence_distance_loglog(xs, a, xi, n, coeff_func, func_name, f, b,\n label, name, save=False, dirname=DEFAULT_DIR):\n betas = []\n for x in xs:\n print(x)\n series = legendre_series(x, coeff_func(a))\n degrees = np.arange(n)\n values = np.array([next(series) for _ in degrees])\n errors = np.abs(f(x, a) - values)\n\n a_min = -convergence_rate(x, a, b)\n alpha, beta = convergence_line_log(degrees, errors, a_min)\n betas.append(beta)\n\n # Fit a line\n xi_log = np.log10(xi)\n z = np.polyfit(xi_log, np.log10(betas), 1)\n p = np.poly1d(z)\n\n fig = plt.figure()\n plt.xlabel(r\"$\\xi$\")\n plt.ylabel(rf\"$\\beta({label})$\")\n plt.loglog(xi, np.array(betas), '.', label=r\"$\\beta$\")\n # TODO: improve label, variable names\n plt.loglog(xi, 10 ** p(xi_log),\n label=\"\\n\".join((rf\"$\\rho={-z[0]:.5f}$\", rf\"$D={10**z[1]:.5f}$\")))\n plt.legend()\n\n if save:\n fpath = os.path.join(dirname, \"convergence_distances_loglog\", func_name, str(a))\n os.makedirs(fpath, exist_ok=True)\n plt.savefig(os.path.join(fpath, f\"{name}.png\"))\n else:\n plt.show()\n plt.close(fig)",
"def __log_likelihood(self, params, *args):\n\t\tX, y, feature_set, lambda_reg, empirical_weights, verbose, sign = args\n\n\t\tno_example = len(X)\n\t\ttotal_logZ = 0\n\t\ttotal_logProb = 0\n\t\texpected_weights = np.zeros(len(feature_set))\n\t\tfor t in range(len(X)):\n\t\t\t# example_features = X[t], example_labels = y[t]\n\n\t\t\tpotential = np.zeros(len(X[t]))\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\t#candidate_features = X[t][i], candidate_label = y[t][i]\n\t\t\t\tpotential[i] = feature_set.calc_inner_product(X[t][i], params)\n\n\t\t\t#scaling\n\t\t\tpotential = potential - np.max(potential, keepdims=True)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\ttotal_logProb += potential[i] * y[t][i]\n\n\t\t\tpotential, Z = self.__softmax(potential)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\tfeature_set.calc_inner_sum(expected_weights, X[t][i], potential[i])\n\n\t\t\ttotal_logZ += log(Z)\n\n\t\t# _params = feature_set.get_regularized_params(params, 'bias')\n\t\t_params = params\n\t\tlog_likelihood = total_logProb - total_logZ - (lambda_reg/2) * np.sum(np.multiply(_params,_params))\n\t\tgradients = empirical_weights - expected_weights - lambda_reg * _params\n\n\t\tglobal SUB_ITERATION_NUM\n\t\tif verbose:\n\t\t\tsub_iteration_str = ' '\n\t\t\tif SUB_ITERATION_NUM > 0:\n\t\t\t\tsub_iteration_str = '(' + '{0:02d}'.format(SUB_ITERATION_NUM) + ')'\n\t\t\tprint(' ', '{0:03d}'.format(ITERATION_NUM), sub_iteration_str, ':', log_likelihood * sign)\n\n\t\tSUB_ITERATION_NUM += 1\n\n\t\treturn sign * log_likelihood, sign * gradients",
"def fit_model(x1, x2, order=None, max_order=10,\r\n criterion=utils.bayesian_information_criterion):\r\n c_old = np.inf\r\n n_process = 2\r\n Ntotal = n_process * x1.shape[-1]\r\n\r\n # If model order was provided as an input:\r\n if order is not None:\r\n lag = order + 1\r\n Rxx = utils.autocov_vector(np.vstack([x1, x2]), nlags=lag)\r\n coef, ecov = alg.lwr_recursion(np.array(Rxx).transpose(2, 0, 1))\r\n\r\n # If the model order is not known and provided as input:\r\n else:\r\n for lag in range(1, max_order):\r\n Rxx_new = utils.autocov_vector(np.vstack([x1, x2]), nlags=lag)\r\n coef_new, ecov_new = alg.lwr_recursion(\r\n np.array(Rxx_new).transpose(2, 0, 1))\r\n order_new = coef_new.shape[0]\r\n c_new = criterion(ecov_new, n_process, order_new, Ntotal)\r\n if c_new > c_old:\r\n # Keep the values you got in the last round and break out:\r\n break\r\n\r\n else:\r\n # Replace the output values with the new calculated values and\r\n # move on to the next order:\r\n c_old = c_new\r\n order = order_new\r\n Rxx = Rxx_new\r\n coef = coef_new\r\n ecov = ecov_new\r\n else:\r\n e_s = (\"Model estimation order did not converge at max_order = %s\"\r\n % max_order)\r\n raise ValueError(e_s)\r\n\r\n return order, Rxx, coef, ecov",
"def fitPowerRegressionCurveExchanges(self, xVals0, yVals0):\r\n xValCount = 0\r\n yValCount = 0\r\n if len(xVals0) > 2:\r\n xValCount += int(len(xVals0) / 2) - 1\r\n yValCount += int(len(xVals0) / 2) - 1\r\n else:\r\n return \"regression error\", 0.0\r\n xVals = []\r\n yVals = []\r\n xValIndex = xValCount + 1\r\n yValIndex = yValCount + 1\r\n for i in range(xValIndex, len(xVals0)):\r\n xVals.append(xVals0[i])\r\n for i in range(yValIndex, len(xVals0)):\r\n yVals.append(yVals0[i])\r\n n = len(xVals)\r\n sumLnxLny = 0.0\r\n sumLnx = 0.0\r\n sumLny = 0.0\r\n sumLnx2 = 0.0\r\n sumLny2 = 0.0\r\n for i in range(0, n - 1):\r\n lnx = np.log(xVals[i])\r\n lny = np.log(yVals[i])\r\n sumLnxLny += (lnx * lny)\r\n for i in range(0, n - 1):\r\n lnx = np.log(xVals[i])\r\n sumLnx += lnx\r\n for i in range(0, n - 1):\r\n lny = np.log(yVals[i])\r\n sumLny += lny\r\n for i in range(0, n - 1):\r\n lny = np.log(yVals[i])\r\n sumLny2 += (lny * lny)\r\n for i in range(0, n - 1):\r\n lnx = np.log(xVals[i])\r\n sumLnx2 += (lnx * lnx)\r\n lnxBar = sumLnx / n\r\n lnyBar = sumLny / n\r\n sxx = sumLnx2 - (n * (lnxBar ** 2))\r\n syy = sumLny2 - (n * (lnyBar ** 2))\r\n sxy = sumLnxLny - (n * lnxBar * lnyBar)\r\n b = sxy / sxx\r\n a = pow(np.e, lnyBar - (b * lnxBar))\r\n r = sxy / (np.sqrt(sxx) * np.sqrt(syy))\r\n xx = np.array(xVals)\r\n yy = np.array(yVals)\r\n def power_law(xx, a, b):\r\n return a * np.power(xx, b)\r\n yHats = []\r\n for xPrime in xx:\r\n yHats.append(power_law(xPrime, a, b))\r\n eq = str(f' y = {str(round(a, 4))} (x) ^ {str(round(b, 4))} w/ correlation {str(round(100.0000 * r, 1))} %')\r\n if 'nan' in eq:\r\n eq_nan = 'could not calculate regression\\t\\t'\r\n self.eq = eq_nan\r\n return eq_nan\r\n else:\r\n self.ex_eq = eq\r\n return eq",
"def estimate_order_of_convergence(abscissae, errors):\n assert len(abscissae) == len(errors)\n if len(abscissae) <= 1:\n raise RuntimeError, \"Need more than one value to guess order of convergence.\"\n\n coefficients = np.polyfit(np.log10(abscissae), np.log10(errors), 1)\n return 10**coefficients[-1], coefficients[-2]",
"def __pll(L,h,x, L2=0):\n if len(x.shape)>1: h = h.reshape(-1,1);\n pll = -np.log(1+np.exp(-2*x*(L.dot(x)+h))).sum(0)\n if L2>0: pll += L2*(L**2).sum()\n return pll",
"def logp(self, xs, ys, **kwargs):\n ind = np.isclose(self.predict(xs, **kwargs),ys)\n axis = tuple(range(1,len(xs.shape)))\n return np.log(np.prod(ind, axis=axis)) # default behavior",
"def compare_lists(l1, l2):\n score = 0\n total = len(l1)\n weight = 110\n\n for item in range(len(l2)):\n if item in range(len(l1)):\n score += math.log(weight/total) * (weight)\n else:\n score += math.log(0.5/total) * (1)\n weight -= 10\n return score",
"def brute_leastsquare_fit(fun, x_data, y_data,weight_data=None,p_names=None,p_min_max_steps_dict=None,\r\n const_params=[], visualize=False):\r\n \r\n if p_names == None or p_min_max_steps_dict==None:\r\n raise Exception ('p_names and p_min_max_steps must be given!'+ \r\n 'structure of p_min_max_steps_dict: {\"pname0\":[min0,max0,brute_steps0]}')\r\n \r\n params = Parameters() ### initialize LMfit parameters\r\n for p_name in p_names:\r\n min_val=p_min_max_steps_dict[p_name][0]\r\n max_val=p_min_max_steps_dict[p_name][1]\r\n steps=p_min_max_steps_dict[p_name][2]\r\n params.add(p_name,value=min_val,\r\n min=min_val,\r\n max=max_val,\r\n brute_step=(max_val-min_val)/(steps-1))\r\n \r\n ### define function to be minimized for fit \r\n \r\n def cost_function_fit(p=params):\r\n def minimize_fun(pars):\r\n \r\n v=pars.valuesdict()\r\n arglist=[]\r\n for p_name in p_names:\r\n arglist.append(v[p_name])\r\n \r\n for const_param in const_params:\r\n arglist.append(const_param)\r\n \r\n ret=np.array((fun(x_data,*arglist)-y_data),dtype=float)\r\n if weight_data is not None:\r\n ret=ret*np.sqrt(weight_data)\r\n return(ret)\r\n brute_result=lmfit.minimize(minimize_fun,params,method='brute',nan_policy='omit')\r\n best_result=copy.deepcopy(brute_result)\r\n for candidate in brute_result.candidates[0:5]:\r\n trial = lmfit.minimize(minimize_fun, params=candidate.params,method='leastsq',nan_policy='omit')\r\n if trial.chisqr < best_result.chisqr:\r\n best_result = trial\r\n \r\n return((best_result,brute_result))\r\n \r\n best_result,brute_result = cost_function_fit()\r\n arg_list=[]\r\n for p_name in p_names:\r\n arg_list.append(best_result.params.valuesdict()[p_name])\r\n for const_param in const_params:\r\n arg_list.append(const_param)\r\n \r\n \r\n if visualize == True:\r\n plot_brute_leastsquares_results(brute_result,leastsq_fit_result=best_result)\r\n plt.figure()\r\n plt.plot(x_data,y_data,label='data',color='blue')\r\n plt.plot(x_data,fun(x_data,*arg_list),label='Fit',color='red')\r\n plt.title(best_result.params.valuesdict())\r\n plt.show()\r\n return (arg_list[0:len(p_names)])",
"def compute_log_likelihood(self, indicators, weights, l2):\n scores, _ = self.predict_probability(self.train_feature_x, weights)\n probs = self.predict_probability(self.train_feature_x, weights)\n lp = np.sum((indicators-1)*scores + np.log(probs)) - l2* np.sum(weights[1:]**2)\n return lp",
"def logrels(rets):\n return np.log(rets + 1)",
"def fit_exp_data(x_vals, y_vals):\n log_vals = []\n for y in y_vals:\n log_vals.append(math.log(y, 2)) #get log base 2\n fit = np.polyfit(x_vals, log_vals, 1)\n return fit, 2",
"def experiment_linear_tradeoff_linf(_):\n adv_norm_type = 'linf'\n dual_norm_type = 'l1'\n # Min l1-norm solution found (norm=0.6876)\n attack_eps = 1/0.6876\n attack_step_dir = 'sign_grad'\n module_name = 'train'\n log_dir = 'runs_linear_tradeoff_%s' % adv_norm_type\n exclude = '*'\n\n d_over_n = [32]\n dim = 100\n num_train = [int(dim / p) for p in d_over_n]\n\n # Config params\n shared_params = []\n shared_params += [\n ('config', './config.py'),\n ('seed', list(range(3))),\n ]\n\n # Data hyper-parameters\n shared_params += [\n ('temperature', 0.0001),\n ('num_test', 500),\n ('dim', dim),\n ('num_train', num_train),\n ]\n\n # Adversarial configuration: test\n shared_params += nameit('adv', [\n ('norm_type', adv_norm_type),\n ('lr', 0.1),\n ('niters', 10),\n # ('eps_iter', attack_eps), # Overwritten by cvxpy\n # ('eps_tot', attack_eps), # Overwritten by cvxpy\n ('pre_normalize', True), # multi attacks\n ('post_normalize', True),\n ('eps_from_cvxpy', True),\n ('step_dir', attack_step_dir),\n ])\n\n # Logging to standard output\n shared_params += [\n ('log_interval', 10000), # 1000),\n ('log_keys', '\\'(\"%s\")\\'' % ('\",\"'.join([\n 'risk/train/zero_one',\n 'risk/train/adv/%s' % adv_norm_type,\n 'weight/linear/norm/%s' % dual_norm_type,\n 'margin/%s' % dual_norm_type,\n ]))),\n # Compare with cvxpy\n ('enable_cvxpy', True),\n ]\n params = []\n\n # reg_coeff = [1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1]\n reg_coeff = [1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 10]\n # Between 1e-3 and 1e-1 for d/n=10 the adv robustness drops\n reg_coeff += [3e-3, 5e-3, 3e-2, 5e-2, 3e-1, 5e-1]\n\n # Model hyper-parameters\n linear_noreg_model_params = nameit('model', [\n ('arch', 'linear'),\n ('regularizer', 'none'),\n ])\n linear_reg_model_params = nameit('model', [\n ('arch', 'linear'),\n ('regularizer', ['w_%s' % dual_norm_type]),\n ('reg_coeff', reg_coeff),\n ])\n\n # Explicit regularization with line search\n # njobs=3*6*20*4*2=2880\n explicit_reg = nameit('optim', [\n ('name', 'fista'),\n ('niters', 10000),\n ('bound_step', True),\n ('step_size', [1, 10, 100, 1000]),\n ])\n params += [OrderedDict(shared_params+linear_reg_model_params+explicit_reg)]\n\n # Adversarial training with line search\n for i in [1] + list(np.arange(0.1, 2, 0.2)): # [0.1, 0.3, 0.5, 0.7, 1, 1.3]:\n adv_train_params = nameit('optim', [\n ('name', 'gd_ls'),\n ('niters', 10000),\n ('bound_step', True),\n ])\n adv_train_params += nameit('optim', nameit('adv_train', [\n ('enable', True),\n ('norm_type', adv_norm_type),\n ('lr', 0.1),\n ('niters', 10), # niters, 1000\n ('pre_normalize', True),\n ('post_normalize', True),\n ('step_dir', attack_step_dir),\n ('eps_iter', float(attack_eps) * i),\n ('eps_tot', float(attack_eps) * i),\n ]))\n params += [OrderedDict(\n shared_params+linear_noreg_model_params+adv_train_params)]\n\n return params, log_dir, module_name, exclude",
"def logsum_pair(logx, logy):\n if logx == logzero():\n return logy\n elif logx > logy:\n return logx + np.log1p(np.exp(logy-logx))\n else:\n return logy + np.log1p(np.exp(logx-logy))",
"def _simple_logistic_regression(x,y,beta_start=None,verbose=False,\n CONV_THRESH=1.e-3,MAXIT=500):\n if len(x) != len(y):\n raise ValueError, \"x and y should be the same length!\"\n if beta_start is None:\n beta_start = NA.zeros(2,x.dtype.char)\n iter = 0; diff = 1.; beta = beta_start # initial values\n if verbose:\n print 'iteration beta log-likliehood |beta-beta_old|' \n while iter < MAXIT:\n beta_old = beta \n p = NA.exp(beta[0]+beta[1]*x)/(1.+NA.exp(beta[0]+beta[1]*x))\n l = NA.sum(y*NA.log(p) + (1.-y)*NA.log(1.-p)) # log-likliehood\n s = NA.array([NA.sum(y-p), NA.sum((y-p)*x)]) # scoring function\n # information matrix\n J_bar = NA.array([[NA.sum(p*(1-p)),NA.sum(p*(1-p)*x)],\n [NA.sum(p*(1-p)*x),NA.sum(p*(1-p)*x*x)]])\n beta = beta_old + NA.dot(LA.inverse(J_bar),s) # new value of beta\n diff = NA.sum(NA.fabs(beta-beta_old)) # sum of absolute differences\n if verbose:\n print iter+1, beta, l, diff\n if diff <= CONV_THRESH: break\n iter = iter + 1\n return beta, J_bar, l",
"def gradient_descent(self, x, y):\n # Initialize weights vector\n self.weights = np.zeros(len(x[0]))\n\n # Storing number of training example in a variable \n n = len(x)\n\n # Initiate variables to keep track of the current and smallest loss recorded\n lowest_loss = sys.float_info.max\n current_loss = sys.float_info.max\n\n # Initiate variables to keep track of step sizes\n norm = sys.float_info.max\n smallest_norm = sys.float_info.max\n\n # Initiate list variable that stores all previous weights\n prev_weights = []\n\n # Initiate list that stores all the errors. \n errors = []\n \n # Variable to keep track of the number of iterations that returns a bigger loss than current loss\n k_loss_iteration = 1\n\n # Learning loop\n for i in range(self.max_iter):\n\n # Append current weights\n prev_weights.append(np.array(self.weights))\n \n # Minimizing Loss Function Error by adjusting weights using Gradient Descent\n self.weights += self.learning_rate * (sum([x[i] * (y[i] - self.logistic_function(self.weights.dot(x[i]))) for i in range(n)]) - 2 * self.l2 * self.weights)\n\n # Compute the error of the Cost Function and store it in a list\n current_loss = self.cost(x,y)\n\n if len(errors) > 1 and current_loss > errors[-1]:\n k_loss_iteration += 1\n else: \n k_loss_iteration = 1\n\n errors.append(current_loss)\n \n # Track smallest loss\n if current_loss < lowest_loss:\n lowest_loss = current_loss\n\n # Compute the L2 Norm of the difference between current weights and previous weights\n norm = np.linalg.norm(self.weights - prev_weights[-1])\n\n # Track smallest step size and set it as error threshold\n if norm < smallest_norm:\n smallest_norm = norm\n\n # If this L2 norm is smaller than the error_threshold it means that it converged, hence we can break. In other words, repeat until the step size is too small\n if self.error_threshold != None and norm < self.error_threshold:\n print(\"Converged after {} iterations!\".format(i))\n break\n\n # stop if error hasn't gone down in k iterations\n if k_loss_iteration >= 10:\n print(k_loss_iteration + \" iterations of loss not decreasing on {}th itertion.\".format(i))\n break\n\n # Log final weights\n print(\"Final norm: \" + str(norm) + \"\\nSmallest step size recorded: \" + str(smallest_norm) + \"\\nFinal error: \" + str(current_loss) + \"\\nLowest error recorded: \" + str(lowest_loss) + \"\\nNumber of epochs: \" + str(len(errors)) + \"\\nFinal weights: \" + str(self.weights))",
"def calculateCoefficientsTrainExp(np.ndarray[double, ndim=2, mode=\"c\"] x_logs not None, np.ndarray[double, ndim=2, mode=\"c\"] derivatives not None, np.ndarray[double, ndim=1] x_log_eigenvals not None, np.ndarray[double, ndim=2, mode=\"c\"] coefficients not None):\n cdef int n, dd, d\n\n n, dd = x_logs.shape[0], x_logs.shape[1]\n d = np.sqrt(dd)\n \n\n out = c_calculateCoefficientsTrainExp (&x_logs[0,0], &derivatives[0,0], &x_log_eigenvals[0], &coefficients[0,0], n, dd, d)\n\n return out",
"def compute_cost(A2, Y, params):\n m = Y.shape[1]\n \n logprobs1 = -np.dot(Y, np.log(A2).T)\n logprobs2 = -np.dot(1-Y, np.log(1-A2).T)\n cost = 1/m * (logprobs1 + logprobs2)\n \n cost = np.asscalar(cost)\n return cost",
"def trainLogRegres(train_x, train_y, opts):\n startTime = time.time() # calculate training time\n\n numSamples, numFeatures = np.shape(train_x)\n alpha = opts['alpha']\n maxIter = opts['maxIter']\n weights = np.ones((numFeatures, 1))\n\n for k in range(maxIter):\n if opts['optimizeType'] == 'stocGradDescent': # stochastic gradient descent\n for i in range(numSamples):\n output = sigmoid(train_x[i, :] * weights)\n loss = train_y[i, 0] - output\n weights = weights + alpha * train_x[i, :].transpose() * loss\n elif opts[\n 'optimizeType'] == 'smoothStocGradDescent': # smooth stochastic gradient descent. randomly select samples to optimize for reducing cycle fluctuations.\n dataIndex = list(range(numSamples))\n for i in range(numSamples):\n alpha = 4.0 / (1.0 + k + i) + 0.01\n randIndex = int(np.random.uniform(0, len(dataIndex)))\n output = sigmoid(train_x[randIndex, :] * weights)\n loss = train_y[randIndex, 0] - output\n weights = weights + alpha * train_x[randIndex, :].transpose() * loss\n del (dataIndex[randIndex])\n print('Congratulations, training complete! Took %fs!' % (time.time() - startTime))\n return weights",
"def logistic_regression(x,y,beta_start=None,verbose=False,CONV_THRESH=1.e-3,\n MAXIT=500):\n if x.shape[-1] != len(y):\n raise ValueError, \"x.shape[-1] and y should be the same length!\"\n try:\n N, npreds = x.shape[1], x.shape[0]\n except: # single predictor, use simple logistic regression routine.\n return _simple_logistic_regression(x,y,beta_start=beta_start,\n CONV_THRESH=CONV_THRESH,MAXIT=MAXIT,verbose=verbose)\n if beta_start is None:\n beta_start = NA.zeros(npreds+1,x.dtype.char)\n X = NA.ones((npreds+1,N), x.dtype.char)\n X[1:, :] = x\n Xt = NA.transpose(X)\n iter = 0; diff = 1.; beta = beta_start # initial values\n if verbose:\n print 'iteration beta log-likliehood |beta-beta_old|' \n while iter < MAXIT:\n beta_old = beta \n ebx = NA.exp(NA.dot(beta, X))\n p = ebx/(1.+ebx)\n l = NA.sum(y*NA.log(p) + (1.-y)*NA.log(1.-p)) # log-likeliehood\n s = NA.dot(X, y-p) # scoring function\n J_bar = NA.dot(X*p,Xt) # information matrix\n beta = beta_old + NA.dot(LA.inverse(J_bar),s) # new value of beta\n diff = NA.sum(NA.fabs(beta-beta_old)) # sum of absolute differences\n if verbose:\n print iter+1, beta, l, diff\n if diff <= CONV_THRESH: break\n iter = iter + 1\n if iter == MAXIT and diff > CONV_THRESH: \n print 'warning: convergence not achieved with threshold of %s in %s iterations' % (CONV_THRESH,MAXIT)\n return beta, J_bar, l",
"def expectation(N,K,log_M):\n\tg0 = log_M[0,0]\n\tg = log_M[1:]\n\ta = forward(g0,g,N,K)\n\tb = backward(g,N,K)\n\tprint \"Forward:\"\n\tprint a\n\tprint \"Backward:\"\n\tprint b\n\t# log-normalizing constant\n\tlogZ = misc.logsumexp(a[N-1,:])\n\n\tE = defaultdict(float)\n\n\t# The first factor needs to be special case'd\n\t# E[ f( y_0 ) ] = p(y_0 | y_[1:N], x) * f(y_0)\n\tc = exp(g0 + b[0,:] - logZ).clip(0.0, 1.0)\n\tfor y in xrange(K):\n\t\tp = c[y]\n\t\tif p < 1e-40: continue # skip really small updates.\n\t\tfor k in f[0, None, y]:\n\t\t\tE[k] += p\n\n\tfor t in xrange(1,N):\n\t\t# vectorized computation of the marginal for this transition factor\n\t\tc = exp((add.outer(a[t-1,:], b[t,:]) + g[t-1,:,:] - logZ)).clip(0.0, 1.0)\n\n\t\tfor yp in xrange(K):\n\t\t\tfor y in xrange(K):\n\t\t\t\t# we can also use the following to compute ``p`` but its quite\n\t\t\t\t# a bit slower than the computation of vectorized quantity ``c``.\n\t\t\t\t#p = exp(a[t-1,yp] + g[t-1,yp,y] + b[t,y] - logZ).clip(0.0, 1.0)\n\t\t\t\tp = c[yp, y]\n\t\t\t\tif p < 1e-40: continue # skip really small updates.\n\t\t\t\t# expectation of this factor is p*f(t, yp, y)\n\t\t\t\tfor k in f[t, yp, y]:\n\t\t\t\t\tE[k] += p\n\n\treturn E",
"def _ls_solver(A, B, warm_start=None):\n # TODO - do conjugate gradient if n is too large\n return np.linalg.lstsq(A.T, B.T)[0].T",
"def get_y_logl(self, y_list):",
"def addlogs(a,b):\n \n if a>b:\n return a + np.log(1+np.exp(b-a))\n else:\n return b + np.log(1+np.exp(a-b))",
"def cost(self, A, b, w):\n f = 0\n if self.glm == 'Gaussian':\n tt = np.dot(A, w) - b\n # nao é loglik mesmo, é só mse\n loglik = 0.5 * np.linalg.norm(tt) ** 2.0\n elif self.glm == 'Poisson':\n xb = np.maximum(np.minimum(np.dot(A, w), 100), -100)#avoid overflow\n loglik = -(b * xb - np.exp(xb)).sum()\n elif self.glm == 'Gamma':\n loglik = 0\n for i in np.arange(0, A.shape[0]):\n loglik += scipy.stats.gamma.logpdf(b[i], 1.0 / np.dot(A[i, :], w))\n elif self.glm == 'Binomial':\n ov_lim = 50\n Xbeta = np.maximum(np.minimum(np.dot(A, w), ov_lim), -ov_lim)#avoid overflow\n loglik = -1 * np.sum(((b * Xbeta) - np.log(1 + np.exp(Xbeta))))\n if self.mean:\n loglik /= float(A.shape[0])\n if not np.isnan(loglik):\n f += loglik\n else:\n print(\"****** WARNING: loglik is nan.\")\n return f",
"def _log_fold_change_pairs(self, idx0, idx1, base):\n logfc = np.zeros(shape=(len(idx0), len(idx1), self._theta_mle.shape[1]))\n for i, xi in enumerate(idx0):\n for j, xj in enumerate(idx1):\n logfc[i, j, :] = self._theta_mle[xi, :] - self._theta_mle[xj, :]\n\n if base == np.e:\n return logfc\n else:\n return logfc / np.log(base)",
"def LikelihoodFromTrace (trace, A, B, prec=1e-14):\n\n if A.shape[0]==1:\n # uniformization based solution\n tr = np.sort(trace)\n lambd = np.max(np.abs(np.diag(B)))\n loglambda = math.log(lambd)\n P = B/lambd + ml.eye(B.shape[0])\n a = np.sum(-B,1)\n eps = max(prec, 10**(math.log(prec)/math.log(10.0) + math.log(lambd)/math.log(10.0)))\n lpoi = -lambd*tr\n logtr = np.log(tr)\n poi = np.exp(lpoi)\n spoi = np.array(poi)\n fx = poi*(A*a)[0,0]\n k = 1\n first = 0\n coeffv = ml.matrix(A)\n maxIter = 10000\n while k<maxIter:\n coeffv = coeffv * P\n lpoi[first:] += loglambda + logtr[first:] - math.log(k)\n poi[first:] = np.exp(lpoi[first:])\n spoi[first:] += poi[first:]\n fx[first:] += poi[first:] * (coeffv*a)[0,0]\n k += 1\n nfirst = (spoi[first:]<1-eps).nonzero()[0]\n if len(nfirst)==0:\n break\n first += nfirst[0]\n return np.sum(np.log(fx))/len(logtr)\n else:\n D0 = A\n D1 = B\n N = D0.shape[0]\n L = len(trace)\n \n # first we calculate matrix e^(D0*x(i))*D1 for each sample\n ix = np.argsort(trace)\n tr = trace[ix]\n lambd = np.max(np.abs(np.diag(D0)))\n loglambda = math.log(lambd)\n P = D0/lambd + ml.eye(N)\n eps = max(prec, 10**(math.log(prec)/math.log(10.0) + math.log(lambd)/math.log(10.0)))\n lpoi = -lambd*tr;\n logtr = np.log(tr)\n poi = np.exp(lpoi)\n spoi = np.array(poi)\n coeffv = ml.matrix(D1)\n fx = np.kron(poi,coeffv)\n k = 1\n first = 0\n maxIter = 10000\n while k<maxIter:\n coeffv = P * coeffv\n lpoi[first:] += loglambda + logtr[first:] - math.log(k)\n poi[first:] = np.exp(lpoi[first:])\n spoi[first:] += poi[first:] \n fx[:,first*N:] += np.kron(poi[first:],coeffv)\n k += 1 \n nfirst = (spoi[first:]<1-eps).nonzero()[0]\n if len(nfirst)==0:\n break\n first += nfirst[0]\n alpha = DTMCSolve ((-D0).I*D1)\n l = np.array(alpha)\n sc = 0\n ixrev = np.argsort(ix)\n for i in range(L):\n l = l.dot(fx[:,ixrev[i]*N:(ixrev[i]+1)*N])\n if i % 10 ==0:\n # sometimes we need to rescale the results to avoid \"nan\"s\n scale = math.ceil(math.log2(np.sum(l)))\n if scale>1:\n l /= 2**scale\n sc += scale\n if scale<-10:\n scale += 10\n l /= 2**scale\n sc += scale\n return (math.log(np.sum(l))+sc*math.log(2)) / len(logtr)"
] | [
"0.6337905",
"0.62787294",
"0.6042211",
"0.5989017",
"0.5960942",
"0.5899967",
"0.58939946",
"0.58498496",
"0.5838031",
"0.5715212",
"0.56407213",
"0.56352633",
"0.5627826",
"0.56239766",
"0.5622977",
"0.55958897",
"0.5592456",
"0.55830264",
"0.55826944",
"0.55535156",
"0.5552824",
"0.5534111",
"0.5528307",
"0.5521143",
"0.5504154",
"0.5493818",
"0.5492777",
"0.5482239",
"0.5462012",
"0.5459982"
] | 0.67130005 | 0 |
This is a small function that returns the logfit, i.e. the least squares fit to the log of the two passed lists. | def logfit(N, err):
import numpy as np
if len(N) != len(err):
raise ValueError('Convergence order args do not have same length')
A = np.ones([len(err), 2])
B = np.ones([len(err), 1])
# ERR = A*N + B
for i in range( len(N) ) :
A[i,0] = np.log10(N[i])
B[i] = np.log10(err[i])
x, residuals, rank, singval = np.linalg.lstsq(A, B, rcond=None)
return x | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fit_exp_data(x_vals, y_vals):\n log_vals = []\n for y in y_vals:\n log_vals.append(math.log(y, 2)) #get log base 2\n fit = np.polyfit(x_vals, log_vals, 1)\n return fit, 2",
"def logp(self, xs, ys, **kwargs):\n ind = np.isclose(self.predict(xs, **kwargs),ys)\n axis = tuple(range(1,len(xs.shape)))\n return np.log(np.prod(ind, axis=axis)) # default behavior",
"def logsum_pair(logx, logy):\n if logx == logzero():\n return logy\n elif logx > logy:\n return logx + np.log1p(np.exp(logy-logx))\n else:\n return logy + np.log1p(np.exp(logx-logy))",
"def logLikeNormal(fitFn, paramsVec, freqs, data, sigmas):\n #calculate the residual, which should already be weighted in the fitFn\n residual = fitFn(paramsVec, freqs, data, sigmas)\n\n #Return the log-liklihood of a normally distributed residual\n return -0.5*np.sum(np.log(2*np.pi*sigmas**2)+residual**2)",
"def get_y_logl(self, y_list):",
"def addlogs(a,b):\n \n if a>b:\n return a + np.log(1+np.exp(b-a))\n else:\n return b + np.log(1+np.exp(a-b))",
"def sum_log(*args):\n # if all(a == LOG_ZERO for a in args):\n # return LOG_ZERO\n a_max = np.max(args, 0)\n lsp = np.log(np.sum([np.exp(a - a_max) for a in args], 0))\n return a_max + lsp",
"def log_likelihood(parameters):\n if len(copula.bounds_param) == 1:\n params = [parameters]\n else:\n param1, param2 = parameters\n params = [param1, param2]\n logl = -np.sum(np.log(copula.get_pdf(psd_obs[0], psd_obs[1], params)))\n return logl",
"def log_reg(x_train, y_train):\n\n log_reg_classifier = LogisticRegression(max_iter=1000, solver='lbfgs')\n log_reg_classifier.fit(x_train, y_train)\n return log_reg_classifier\n\n # log_reg_classifier.fit(x_train, y_train)",
"def __pll(L,h,x, L2=0):\n if len(x.shape)>1: h = h.reshape(-1,1);\n pll = -np.log(1+np.exp(-2*x*(L.dot(x)+h))).sum(0)\n if L2>0: pll += L2*(L**2).sum()\n return pll",
"def loglf2py(store):\n loglike=0.0\n return loglinear.logl(store['xb'],store['xmatf'], store['beta'],store['yvec'],loglike)",
"def log_likelihood(self, y_list):\n if self.lambda_mat is None:\n raise ValueError(\"Can't compute model likelihood before fitting!\")\n\n # precision prior distribution given precision hyper-parameters\n prec_distr = stats.gamma(a=self.prec_distr[0],\n scale=self.prec_distr[1] ** -1.0)\n\n # likelihood of projection matrix precision priors given\n # precision hyper-parameters\n lambda_logl = np.sum(\n prec_distr.logpdf(self.lambda_mat['alpha']\n / self.lambda_mat['beta'])\n )\n\n # likelihood of projection matrix values given their precision priors\n a_logl = np.sum(\n stats.norm(loc=0, scale=(self.lambda_mat['beta']\n / self.lambda_mat['alpha']))\n .logpdf(self.A_mat['mu'])\n )\n\n # likelihood of latent feature matrix given kernel matrix,\n # projection matrix, and standard deviation hyper-parameter\n h_logl = np.sum(\n stats.norm(loc=self.A_mat['mu'].transpose() @ self.kernel_mat,\n scale=self.sigma_h)\n .logpdf(self.H_mat['mu'])\n )\n\n # likelihood of bias parameter precision priors given\n # precision hyper-parameters\n weight_prior_logl = np.sum(\n prec_distr.logpdf(np.array(self.weight_priors['alpha'])\n / np.array(self.weight_priors['beta']))\n )\n\n # likelihood of bias parameters given their precision priors\n weight_logl = np.sum(\n stats.norm(loc=0, scale=(np.array(self.weight_priors['beta'])\n / np.array(self.weight_priors['alpha'])))\n .logpdf(self.weight_mat['mu'])\n )\n\n # likelihood of predicted outputs given latent features, bias\n # parameters, and latent feature weight parameters\n f_logl = np.sum(\n stats.norm(\n loc=(self.weight_mat['mu'][1:, :].transpose()\n @ self.H_mat['mu']\n + np.vstack(self.weight_mat['mu'][0, :])),\n scale=1).logpdf(self.output_mat['mu'])\n )\n\n # likelihood of actual output labels given class separation margin\n # and predicted output labels\n y_logl = np.sum(self.get_y_logl(y_list))\n\n return (lambda_logl + a_logl + h_logl\n + weight_prior_logl + weight_logl + f_logl + y_logl)",
"def fit_data(self, data):\n d = log(-log(data))\n return dot(d, self._fit_matrix.T)",
"def logit_transform(params, bounds):\n with np.errstate(divide=\"ignore\", invalid=\"ignore\"):\n ret_array = np.ma.array(\n [np.ma.log(np.true_divide((x - a), (b - x))) for x, (a, b) in zip(params, bounds)])\n ret_array.set_fill_value(0)\n return np.ma.filled(ret_array)",
"def classifier_score_from_logits(logits):\n logits.shape.assert_has_rank(2)\n\n # Use maximum precision for best results.\n logits_dtype = logits.dtype\n if logits_dtype != dtypes.float64:\n logits = math_ops.to_double(logits)\n\n p = nn_ops.softmax(logits)\n q = math_ops.reduce_mean(p, axis=0)\n kl = _kl_divergence(p, logits, q)\n kl.shape.assert_has_rank(1)\n log_score = math_ops.reduce_mean(kl)\n final_score = math_ops.exp(log_score)\n\n if logits_dtype != dtypes.float64:\n final_score = math_ops.cast(final_score, logits_dtype)\n\n return final_score",
"def loglike(self, params):\n\n if type(params) is not MixedLMParams:\n params = MixedLMParams.from_packed(params, self.k_fe,\n self.use_sqrt)\n\n fe_params = params.get_fe_params()\n cov_re = params.get_cov_re()\n try:\n cov_re_inv = np.linalg.inv(cov_re)\n except np.linalg.LinAlgError:\n cov_re_inv = None\n _, cov_re_logdet = np.linalg.slogdet(cov_re)\n\n # The residuals\n expval = np.dot(self.exog, fe_params)\n resid_all = self.endog - expval\n\n likeval = 0.\n\n # Handle the covariance penalty\n if self.cov_pen is not None:\n likeval -= self.cov_pen.func(cov_re, cov_re_inv)\n\n # Handle the fixed effects penalty\n if self.fe_pen is not None:\n likeval -= self.fe_pen.func(fe_params)\n\n xvx, qf = 0., 0.\n for k, lab in enumerate(self.group_labels):\n\n exog = self.exog_li[k]\n ex_r = self.exog_re_li[k]\n ex2_r = self.exog_re2_li[k]\n resid = resid_all[self.row_indices[lab]]\n\n # Part 1 of the log likelihood (for both ML and REML)\n ld = _smw_logdet(1., ex_r, ex2_r, cov_re, cov_re_inv,\n cov_re_logdet)\n likeval -= ld / 2.\n\n # Part 2 of the log likelihood (for both ML and REML)\n u = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv, resid)\n qf += np.dot(resid, u)\n\n # Adjustment for REML\n if self.reml:\n mat = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n exog)\n xvx += np.dot(exog.T, mat)\n\n if self.reml:\n likeval -= (self.n_totobs - self.k_fe) * np.log(qf) / 2.\n _,ld = np.linalg.slogdet(xvx)\n likeval -= ld / 2.\n likeval -= (self.n_totobs - self.k_fe) * np.log(2 * np.pi) / 2.\n likeval += ((self.n_totobs - self.k_fe) *\n np.log(self.n_totobs - self.k_fe) / 2.)\n likeval -= (self.n_totobs - self.k_fe) / 2.\n else:\n likeval -= self.n_totobs * np.log(qf) / 2.\n likeval -= self.n_totobs * np.log(2 * np.pi) / 2.\n likeval += self.n_totobs * np.log(self.n_totobs) / 2.\n likeval -= self.n_totobs / 2.\n\n return likeval",
"def Log(A, B):\n return logm(inv(A).dot(B))",
"def _log_linear_interpolation(predictions):\n log_probs = utils.average_arrays([mx.nd.log(p) for p in predictions])\n return -mx.nd.log(mx.nd.softmax(log_probs))",
"def get_loglikelis(\n self, points: numpy.ndarray | Sequence[numpy.ndarray]\n ) -> numpy.ndarray:\n return numpy.log(numpy.asarray(self.weights)[points])",
"def log_Schechter_log(self, logl, alpha, logls, logl0):\n phi = (logl - logls) * (alpha+1) * np.log(10.) - np.power(10., logl-logls)\n lik = phi.copy()\n lik [logl < logl0] = -1e99\n return lik",
"def double_logits(input_logits):\n if len(input_logits.shape) == 0:\n value_logit = float(input_logits)\n return np.array([1 - value_logit, value_logit])\n\n input_shape = input_logits.shape\n twin_logits = np.ones(input_shape) - input_logits\n\n output_logits = np.stack((twin_logits, input_logits), axis=1)\n\n return output_logits",
"def _log_add(logx: float, logy: float) -> float:\n a, b = min(logx, logy), max(logx, logy)\n if a == -np.inf: # adding 0\n return b\n # Use exp(a) + exp(b) = (exp(a - b) + 1) * exp(b)\n return math.log1p(math.exp(a - b)) + b # log1p(x) = log(x + 1)",
"def LinearLeastSquaresFit(x,y):\n \n xavg = np.zeros(len(x),float) #<x> average\n xavg = sum(x)/len(x)\n \n yavg = np.zeros(len(y),float) #<y> average\n yavg = sum(y)/len(y)\n \n x2avg = np.zeros(len(x),float) #<x^2> average\n x2avg = sum(x**2)/len(x)\n \n xyavg = np.zeros(len(x),float) #<xy> average\n xyavg = sum(x*y)/len(x)\n \n m = (xyavg - xavg*yavg)/(x2avg-xavg**2) #slope\n b = (x2avg*yavg-xavg*xyavg)/(x2avg-xavg**2) #intercept\n \n d = np.zeros(len(x),float)\n for n in range(len(x)):\n d[n] = y[n] -(m*x[n]+b)\n \n x2 = np.zeros(len(x),float)\n for n in range(len(x)):\n x2[n] = sum(d[n]**2)\n \n \n d2avg = np.zeros(len(d),float) #<d^2> average\n d2avg = sum(x2)/float(len(x))\n \n Dm = sqrt((1/float(len(x)-2))*(d2avg/(x2avg-xavg**2))) #slope error\n Db = sqrt((1/float(len(x)-2))*((d2avg*x2avg)/(x2avg-xavg**2))) # intercept error\n print \"slope=\", m, \"Slope Error=\", Dm,\"Intercept=\", b, \"Intercept Error=\", Db\n return \"slope=\", m, \"Slope Error=\", Dm,\"Intercept=\", b, \"Intercept Error=\",Db",
"def fitPowerRegressionCurveComparisons(self, xVals0, yVals0):\r\n xValCount = 0\r\n yValCount = 0\r\n if len(xVals0) > 2:\r\n xValCount += int(len(xVals0) / 2) - 1\r\n yValCount += int(len(xVals0) / 2) - 1\r\n else:\r\n return \"regression error\", 0.0\r\n xVals = []\r\n yVals = []\r\n xValIndex = xValCount + 1\r\n yValIndex = yValCount + 1\r\n for i in range(xValIndex, len(xVals0)):\r\n xVals.append(xVals0[i])\r\n for i in range(yValIndex, len(xVals0)):\r\n yVals.append(yVals0[i])\r\n n = len(xVals)\r\n sumLnxLny = 0.0\r\n sumLnx = 0.0\r\n sumLny = 0.0\r\n sumLnx2 = 0.0\r\n sumLny2 = 0.0\r\n for i in range(0, n - 1):\r\n lnx = np.log(xVals[i])\r\n lny = np.log(yVals[i])\r\n sumLnxLny += (lnx * lny)\r\n for i in range(0, n - 1):\r\n lnx = np.log(xVals[i])\r\n sumLnx += lnx\r\n for i in range(0, n - 1):\r\n lny = np.log(yVals[i])\r\n sumLny += lny\r\n for i in range(0, n - 1):\r\n lny = np.log(yVals[i])\r\n sumLny2 += (lny * lny)\r\n for i in range(0, n - 1):\r\n lnx = np.log(xVals[i])\r\n sumLnx2 += (lnx * lnx)\r\n lnxBar = sumLnx / n\r\n lnyBar = sumLny / n\r\n sxx = sumLnx2 - (n * (lnxBar ** 2))\r\n syy = sumLny2 - (n * (lnyBar ** 2))\r\n sxy = sumLnxLny - (n * lnxBar * lnyBar)\r\n b = sxy / sxx\r\n a = pow(np.e, lnyBar - (b * lnxBar))\r\n r = sxy / (np.sqrt(sxx) * np.sqrt(syy))\r\n xx = np.array(xVals)\r\n yy = np.array(yVals)\r\n def power_law(xx, a, b):\r\n return a * np.power(xx, b)\r\n yHats = []\r\n for xPrime in xx:\r\n yHats.append(power_law(xPrime, a, b))\r\n eq = str(f' y = {str(round(a, 4))} (x) ^ {str(round(b, 4))} w/ correlation {str(round(100.0000 * r, 1))} %')\r\n if 'nan' in eq:\r\n eq_nan = 'could not calculate regression\\t\\t'\r\n self.eq = eq_nan\r\n return eq_nan\r\n else:\r\n self.ex_eq = eq\r\n return eq",
"def logrels(rets):\n return np.log(rets + 1)",
"def likelihood(mean, logs, x):\n return -0.5 * (logs * 2. + ((x - mean) ** 2) / np.exp(logs * 2.) + GaussianDiag.Log2PI)",
"def _loglike(self, y, f):\n ll = -0.5 * (tf.log(2 * self.variance * np.pi) +\n (y - f)**2 / self.variance)\n return ll",
"def negative_log_likelihood(logits, targets):\n # if we rescale the targets so off is -1 and on is 1\n # then we can multiply through the logits\n # and sigmoid gives us the probabilities :)\n # because 1-sigmoid(x) = sigmoid(-x)\n targets = [(2.0 * targ) - 1.0 for targ in targets]\n probs = [tf.sigmoid(logit * targ) for logit, targ in zip(logits, targets)]\n probs = [tf.reduce_sum(tf.log(prob), reduction_indices=1)\n for prob in probs]\n return -tf.reduce_mean(tf.pack(probs))",
"def get_loglikelis(\n self, points: numpy.ndarray | list[numpy.ndarray] | Sequence[Sequence[float]]\n ) -> numpy.ndarray:\n points = numpy.array(points)\n weight_likelis_list = [\n numpy.log(self.weights[i] * pdf.pdf(points))\n for i, pdf in enumerate(self.pdfs)\n ]\n weight_likelis = numpy.array(weight_likelis_list)\n # (num_weights, num_points) => (num_points, num_weights)\n weight_likelis = weight_likelis.transpose()\n\n # log-sum-exp trick\n max_likeli = numpy.nanmax(weight_likelis, axis=1)\n point_likeli = max_likeli + numpy.log(\n numpy.nansum(numpy.exp(weight_likelis - max_likeli[:, None]), axis=1)\n )\n\n return point_likeli",
"def fit_energylaw(showplots = False):\r\n #Data is from Cosmlc Ray Muon Spectrum In the Atmoephere M. Circella et al 1993 Fig 4\r\n #(at 15KM. conversion from depth to altitude using https://www.engineeringtoolbox.com/air-altitude-pressure-d_462.html)\r\n #Units are GeV/c vs (cm^2 s sr Gev / c) ^ -1\r\n data = np.array([[.4, .025], [.5, .017], [.7, .01], [1, .008], [1.25, .004], [1.8, .003], [2.5, .0015], [5,.00035], [18, .00001]])\r\n xbounds = [.1, 100]\r\n #Fit data to ax^b\r\n data_log = np.log(data)\r\n fits = np.polyfit(data_log[:,0], data_log[:,1], 1)\r\n a = np.exp(fits[1])\r\n b = fits[0]\r\n if(showplots):\r\n fitdata = np.polyfit(data_log[:,0], data_log[:,1], 1,cov=True)\r\n print(fitdata[1])\r\n x = np.linspace(.4, 50, 1000)\r\n plt.scatter(data[:,0], data[:,1], label=\"Data from Circella\")\r\n plt.loglog(x, a * x **b, color=\"green\", label=\"ax^b fit\")\r\n plt.xlabel(\"Muon Energy (GeV/c)\")\r\n plt.ylabel(\"Differential Intensity (cm^2 s sr Gev / c)^-1\")\r\n plt.title(\"Fitting Flux vs Energy at 15km from Circella et al.\")\r\n plt.legend()\r\n plt.show()\r\n f = lambda x: a * x**b\r\n return f, xbounds"
] | [
"0.71505827",
"0.64535785",
"0.63110274",
"0.62836516",
"0.61950827",
"0.613543",
"0.6083297",
"0.60600305",
"0.6027912",
"0.6003056",
"0.5989711",
"0.59861887",
"0.59799457",
"0.5965218",
"0.59623635",
"0.5953407",
"0.5946081",
"0.59381616",
"0.59283286",
"0.59197503",
"0.591844",
"0.5906441",
"0.58887786",
"0.5885081",
"0.5848473",
"0.5848453",
"0.58352387",
"0.5824276",
"0.58128476",
"0.5812461"
] | 0.6784488 | 1 |
Read and plot a 2D wabbit file. Not suitable for 3D data, use Paraview for that. | def plot_wabbit_file( file, savepng=False, savepdf=False, cmap='rainbow', caxis=None,
caxis_symmetric=False, title=True, mark_blocks=True, block_linewidth=1.0,
gridonly=False, contour=False, ax=None, fig=None, ticks=True,
colorbar=True, dpi=300, block_edge_color='k',
block_edge_alpha=1.0, shading='auto',
colorbar_orientation="vertical",
gridonly_coloring='mpirank', flipud=False, fileContainsGhostNodes=False):
import numpy as np
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import h5py
cb = []
# read procs table, if we want to draw the grid only
if gridonly:
fid = h5py.File(file,'r')
# read procs array from file
b = fid['procs'][:]
procs = np.array(b, dtype=float)
if gridonly_coloring in ['refinement-status', 'refinement_status']:
b = fid['refinement_status'][:]
ref_status = np.array(b, dtype=float)
if gridonly_coloring == 'lgt_id':
b = fid['lgt_ids'][:]
lgt_ids = np.array(b, dtype=float)
fid.close()
# read data
time, x0, dx, box, data, treecode = read_wabbit_hdf5( file )
# get number of blocks and blocksize
N, Bs = data.shape[0], data.shape[1:]
# we need these lists to modify the colorscale, as each block usually gets its own
# and we would rather like to have a global one.
h, c1, c2 = [], [], []
if fig is None:
fig = plt.gcf()
fig.clf()
if ax is None:
ax = fig.gca()
# clear axes
ax.cla()
# if only the grid is plotted, we use grayscale for the blocks, and for
# proper scaling we need to know the max/min level in the grid
jmin, jmax = get_max_min_level( treecode )
if gridonly:
#----------------------------------------------------------------------
# Grid data only (CPU distribution, level, or grid only)
#----------------------------------------------------------------------
cm = plt.cm.get_cmap(cmap)
# loop over blocks and plot them individually
for i in range(N):
# draw some other qtys (mpirank, lgt_id or refinement-status)
if gridonly_coloring in ['mpirank', 'cpu']:
color = cm( procs[i]/max(procs) )
elif gridonly_coloring in ['refinement-status', 'refinement_status']:
color = cm((ref_status[i]+1.0) / 2.0)
elif gridonly_coloring == 'level':
level = treecode_level( treecode[i,:] )
if (jmax-jmin>0):
c = 0.9 - 0.75*(level-jmin)/(jmax-jmin)
color = [c,c,c]
else:
color ='w'
elif gridonly_coloring == 'file-index':
color = cm( float(i)/float(N) )
tag = "%i" % (i)
x = Bs[1]/2*dx[i,1]+x0[i,1]
if not flipud:
y = Bs[0]/2*dx[i,0]+x0[i,0]
else:
y = box[0] - Bs[0]/2*dx[i,0]+x0[i,0]
plt.text( x, y, tag, fontsize=6, horizontalalignment='center', verticalalignment='center')
elif gridonly_coloring == 'lgt_id':
color = cm( lgt_ids[i]/max(lgt_ids) )
tag = "%i" % (lgt_ids[i])
x = Bs[1]/2*dx[i,1]+x0[i,1]
if not flipud:
y = Bs[0]/2*dx[i,0]+x0[i,0]
else:
y = box[0] - Bs[0]/2*dx[i,0]+x0[i,0]
plt.text( x, y, tag, fontsize=6, horizontalalignment='center', verticalalignment='center')
elif gridonly_coloring == 'treecode':
color = 'w'
tag = ""
for jj in range(treecode.shape[1]):
if treecode[i,jj] != -1:
tag += "%1.1i" % treecode[i,jj]
print(tag)
x = Bs[1]/2*dx[i,1]+x0[i,1]
if not flipud:
y = Bs[0]/2*dx[i,0]+x0[i,0]
else:
y = box[0] - Bs[0]/2*dx[i,0]+x0[i,0]
plt.text( x, y, tag, fontsize=6, horizontalalignment='center', verticalalignment='center')
elif gridonly_coloring == 'none':
color = 'w'
else:
raise ValueError("ERROR! The value for gridonly_coloring is unkown")
# draw colored rectangles for the blocks
if not fileContainsGhostNodes:
ax.add_patch( patches.Rectangle( (x0[i,1],x0[i,0]), (Bs[1]-1)*dx[i,1], (Bs[0]-1)*dx[i,0],
fill=True, edgecolor=block_edge_color, alpha=block_edge_alpha,
facecolor=color))
else:
ax.add_patch( patches.Rectangle( (x0[i,1]+6*dx[i,1],x0[i,0]+6*dx[i,0]), (Bs[1]-1-6*2)*dx[i,1], (Bs[0]-1-6*2)*dx[i,0],
fill=True, edgecolor=block_edge_color, alpha=block_edge_alpha,
facecolor=color))
cb = None
hplot = None
else:
#----------------------------------------------------------------------
# Plot real data.
#----------------------------------------------------------------------
# loop over blocks and plot them individually
for i in range(N):
if not flipud :
[X, Y] = np.meshgrid( np.arange(Bs[0])*dx[i,0]+x0[i,0], np.arange(Bs[1])*dx[i,1]+x0[i,1])
else:
[X, Y] = np.meshgrid( box[0]-np.arange(Bs[0])*dx[i,0]+x0[i,0], np.arange(Bs[1])*dx[i,1]+x0[i,1])
# copy block data
block = data[i,:,:].copy().transpose()
if contour:
# --- contour plot ----
hplot = ax.contour( Y, X, block, [0.1, 0.2, 0.5, 0.75] )
else:
# --- pseudocolor plot ----
#hplot=plt.pcolormesh(X,X,X)
hplot = ax.pcolormesh( Y, X, block, cmap=cmap, shading=shading )
# use rasterization for the patch we just draw
hplot.set_rasterized(True)
# unfortunately, each patch of pcolor has its own colorbar, so we have to take care
# that they all use the same.
h.append(hplot)
a = hplot.get_clim()
c1.append(a[0])
c2.append(a[1])
if mark_blocks:
# empty rectangle to mark the blocks border
ax.add_patch( patches.Rectangle( (x0[i,1],x0[i,0]), (Bs[1]-1)*dx[i,1], (Bs[0]-1)*dx[i,0],
fill=False, edgecolor=block_edge_color, alpha=block_edge_alpha,
linewidth=block_linewidth))
# unfortunately, each patch of pcolor has its own colorbar, so we have to take care
# that they all use the same.
if caxis is None:
if not caxis_symmetric:
# automatic colorbar, using min and max throughout all patches
for hplots in h:
hplots.set_clim( (min(c1),max(c2)) )
else:
# automatic colorbar, but symmetric, using the SMALLER of both absolute values
c= min( [abs(min(c1)), max(c2)] )
for hplots in h:
hplots.set_clim( (-c,c) )
else:
# set fixed (user defined) colorbar for all patches
for hplots in h:
hplots.set_clim( (min(caxis),max(caxis)) )
# add colorbar, if desired
cb = None
if colorbar:
cb = plt.colorbar(h[0], ax=ax, orientation=colorbar_orientation)
if title:
plt.title( "t=%f Nb=%i Bs=(%i,%i)" % (time,N,Bs[1],Bs[0]) )
if not ticks:
ax.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False) # labels along the bottom edge are off
ax.tick_params(
axis='y', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
right=False, # ticks along the bottom edge are off
left=False, # ticks along the top edge are off
labelleft=False) # labels along the bottom edge are off
# plt.xlim([0.0, box[0]])
# plt.ylim([0.0, box[1]])
ax.axis('tight')
ax.set_aspect('equal')
fig.canvas.draw()
if not gridonly:
if savepng:
plt.savefig( file.replace('h5','png'), dpi=dpi, transparent=True, bbox_inches='tight' )
if savepdf:
plt.savefig( file.replace('h5','pdf'), bbox_inches='tight', dpi=dpi )
else:
if savepng:
plt.savefig( file.replace('.h5','-grid.png'), dpi=dpi, transparent=True, bbox_inches='tight' )
if savepdf:
plt.savefig( file.replace('.h5','-grid.pdf'), bbox_inches='tight' )
return ax,cb,hplot | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def readpil3d(self):\r\n\r\n # Read the data in as an array.\r\n res = np.loadtxt(self.name, delimiter=' ')\r\n\r\n # Split into useful chunks\r\n self.pos = res[:, 0:3] # Grid point locations\r\n self.Pn = res[:, 3:4] # Normal pressure [Pa]\r\n self.flux = res[:, -1] # Flux\r",
"def SimpleArrayPlotHelper(self,filename):\n #levels = np.linspace(-100.0, 9900.0, 100, endpoint=True)\n plt.figure()\n #plt.contourf(orography_field,levels)\n plt.colorbar()\n pts.invert_y_axis()",
"def read_2d(self):\n try:\n self._brep['ti'].mean()\n except:\n self._getBivecSpline()\n \n self.rsig = {}\n for k in self.signals.keys():\n y = self._brep[k]['spline'](self.rho)\n self.rsig[k] = dict([('signal',y), \n ('rho', self.rho)])\n\n self._tosuperclass()\n \n print(\"\\n\")\n print(\"===================\")\n print(\"END READING 2D\")\n print(\"===================\")\n print(\"\\n\")",
"def plot_stream_from_file(strfile,smooth=1.0):\r\n\tme = \"LE_Plot.plot_stream_from_file: \"\r\n\t# if strfile is None: strfile = argv[1]\r\n\ttry:\r\n\t\tA = np.load(strfile+\".npy\"); grd = A.shape[1]\r\n\t\tx,y,gvx,gvy = A[0],A[1],A[2:grd+2],A[grd+2:]\r\n\t\toargs = np.loadtxt(strfile+\".hdr\")\r\n\t\tprint me+\"File found\",strfile+\".npy\"\r\n\texcept IOError:\r\n\t\traise IOError(me+\"File\\n \"+strfile+\".npy\\n not found. Abort.\") \r\n\tplot_stream( x,y,gvx,gvy, np.append(oargs,smooth), strfile )\r\n\treturn",
"def read_flow(filename):\n with open(filename, 'rb') as f:\n magic = np.fromfile(f, np.float32, count=1)\n if 202021.25 != magic:\n print('Magic number incorrect. Invalid .flo file')\n else:\n w = np.fromfile(f, np.int32, count=1)\n h = np.fromfile(f, np.int32, count=1)\n data = np.fromfile(f, np.float32, count=int(2*w*h))\n # Reshape data into 3D array (columns, rows, bands)\n return np.resize(data, (h[0], w[0], 2))",
"def load_nd2_plane(path:str,frame:str='cyx',axes:str='tz',idx:int=0):\n with ND2Reader(path) as images:\n images.bundle_axes = frame\n images.iter_axes = axes\n img = images[idx]\n return img.squeeze()",
"def plot_file(filename, params):\n\tarr = None\n\twith open(filename) as filep:\n\t\tarr = json.load(filep)\n\tplot_data(arr, params)",
"def file_parser(file_name):\n h = 480\n w = 640\n out = []\n with open(file_name, 'r') as f:\n line_num = 1\n for line in f:\n if line_num < 17:\n # Read to where data starts\n line_num += 1\n continue\n elif line_num > 74:\n break\n # print(list(map(int, line.strip().split(\" \"))))\n vals = line.split()\n # print(list(\"\".join(line)))\n # print(line.split())\n assert(float(vals[2]) < 640)\n assert(float(vals[3]) < 480)\n point = [float(vals[2]) * w, float(vals[3]) * h]\n # print(point)\n out.append(point)\n line_num += 1\n\n out.append([0,0])\n out.append([w-1, 0])\n out.append([0, h-1])\n out.append([w-1, h-2])\n return out",
"def read_flow(filename):\n f = open(filename, 'rb')\n magic = np.fromfile(f, np.float32, count=1)\n data2d = None\n\n if 202021.25 != magic:\n print 'Magic number incorrect. Invalid .flo file'\n raise ValueError\n else:\n w = np.fromfile(f, np.int32, count=1)[0]\n h = np.fromfile(f, np.int32, count=1)[0]\n #print \"Reading %d x %d flo file\" % (h, w)\n data2d = np.fromfile(f, np.float32, count=2 * w * h)\n # reshape data into 3D array (columns, rows, channels)\n data2d = np.resize(data2d, (h, w, 2))\n f.close()\n return data2d",
"def plot_w_from_file(filename='Save_Data_efficiency_vs_w_8_80.txt'):\r\n txt = open(filename)\r\n data = []\r\n for line in txt:\r\n line = line.strip()\r\n line = shlex.split(line)\r\n if len(line) > 0:\r\n data.append(line)\r\n plot_w, length, intensity = [], [], []\r\n for d in data:\r\n plot_w.append(float(d[0]))\r\n length.append(float(d[1]))\r\n intensity.append(float(d[2]))\r\n length, intensity = np.array(length), np.array(intensity)\r\n ratio = 1/(length*intensity)\r\n ratio *= (np.max(intensity)-np.min(intensity))/(np.max(ratio)-np.min(ratio))\r\n ratio += ((np.min(intensity))-np.min(ratio))\r\n fig = plt.figure(figsize=[12, 4])\r\n ax = fig.add_subplot(1, 1, 1)\r\n line1, = ax.plot(plot_w, length, 'r', lw=2, alpha=0.6, label='Target Distance')\r\n ax2 = ax.twinx()\r\n line2, = ax2.plot(plot_w, intensity, 'g', lw=2, alpha=0.6, label='Intensity Required')\r\n line3, = ax2.plot(plot_w, ratio, 'b', lw=2, alpha=0.6, label='Reciprocal Product (no scale)')\r\n ax.set_xlabel('Collimator Width / mm', fontsize=20)\r\n ax.set_ylabel('Target Distance / m', fontsize=20, color='r')\r\n ax2.set_ylabel(r'Intensity / I$_0$', fontsize=20, color='g')\r\n ax.set_xlim(np.min(plot_w), np.max(plot_w))\r\n ax.tick_params(axis='y', colors=line1.get_color())\r\n ax2.tick_params(axis='y', colors=line2.get_color())\r\n# ax.set_ylim(0)\r\n lines = [line1, line2, line3]\r\n ax2.legend(lines, [l.get_label() for l in lines], loc=[0.50, 0.39], fontsize=15)\r\n ax.minorticks_on()\r\n ax2.minorticks_on()\r\n ax.grid()\r\n ax.set_title('Minimum required target distance and proton intensity\\nas a function of beam stopper inner radius', fontsize=16)\r\n plt.show()\r\n fig.savefig(f'Width_vs_Length_Intensity_p_75_E_1e7_Thin_Finer.pdf', bbox_inches='tight')\r\n return",
"def read_2d_analysis_data(f):\n \n data = np.transpose(np.loadtxt(f, dtype=np.float64))\n x = data[0]\n y = data[1]\n\n return x, y",
"def test_read_layout():\n\n layout = read_layout(layout_path=f\"{test_data}/test_mwa.txt\")\n\n assert layout.shape[0] == 3\n assert layout[0][0] == -1.497849999999999966e02\n assert layout[1][0] == 2.658140000000000214e02\n assert layout[2][0] == 3.770110000000000241e02",
"def view(input_file, is_3d, plane, backend, realistic_diameters):\n # pylint: disable=import-outside-toplevel\n is_matplotlib = backend == 'matplotlib'\n if is_matplotlib:\n if is_3d:\n _, ax = matplotlib_utils.get_figure(params={'projection': '3d'})\n plot = partial(matplotlib_impl.plot_morph3d, ax=ax)\n else:\n _, ax = matplotlib_utils.get_figure()\n plot = partial(matplotlib_impl.plot_morph, ax=ax,\n plane=plane, realistic_diameters=realistic_diameters)\n else:\n from neurom.view import plotly_impl\n if is_3d:\n plot = plotly_impl.plot_morph3d\n else:\n plot = partial(plotly_impl.plot_morph, plane=plane)\n\n plot(load_morphology(input_file))\n if is_matplotlib:\n if not is_3d:\n plt.axis('equal')\n plt.show()",
"def plot(path, subjects):\n transformToXYZmm = np.array([[-3.125, 0, 0, 81.250], [0, 3.125, 0, -115.625], [0, 0, 6, -54.000], [0, 0, 0, 1.000]])\n data = data_load.load_data(path, subjects)\n dimx = int(data[0][\"meta\"][\"dimx\"][0])\n dimy = int(data[0][\"meta\"][\"dimy\"][0])\n dimz = int(data[0][\"meta\"][\"dimz\"][0])\n coordToCol = data[0][\"meta\"][\"coordToCol\"][0][0]\n images = {}\n max_val = 0\n voxels = np.load(\"data/general_selected_500_1.npy\")\n directory = os.listdir(\"data/input/\")\n bar = pyprind.ProgBar(len(directory), title='Info extraction and Image Building')\n bar2 = pyprind.ProgBar(len(images.keys()), title='Saving Pictures')\n for file in directory:\n file_name = \"data/input/{}\".format(file)\n fh = open(file_name)\n activation_values = np.asarray(list(map(lambda x: float(x), filter(lambda x: x != '', fh.read().split(\",\")))))\n fh.close()\n plot_matrix = np.zeros((dimx, dimy, dimz))\n for x in range(dimx):\n for y in range(dimy):\n for z in range(dimz):\n indice = coordToCol[x][y][z]\n if indice != 0:\n if indice in list(voxels):\n voxel_indice = list(voxels).index(indice)\n value = activation_values[voxel_indice]\n if abs(value) > max_val:\n max_val = abs(value)\n plot_matrix[x][y][z] = value\n image = nib.Nifti1Image(plot_matrix, transformToXYZmm)\n images[file_name] = image\n bar.update(force_flush=True)\n print(bar)\n for image in images:\n plotting.plot_glass_brain(images[image], display_mode='ortho', vmax=max_val, plot_abs=False, threshold=None, colorbar=True, output_file=\"{}-wom1.png\".format(image))\n bar2.update(force_flush=True)\n print(bar2)",
"def test_2d():\n dic, data = ng.bruker.read(os.path.join(DATA_DIR, \"bruker_2d\"))\n assert dic['FILE_SIZE'] == 3686400\n assert data.shape == (600, 768)\n assert round(data[0, 40].real, 2) == 28.0\n assert round(data[0, 40].imag, 2) == -286.0\n assert round(data[13, 91].real, 2) == -7279.0\n assert round(data[13, 91].imag, 2) == -17680.0\n write_readback(dic, data)",
"def test_3d():\n dic, data = ng.bruker.read(os.path.join(DATA_DIR, \"bruker_3d\"))\n assert dic['FILE_SIZE'] == 91226112\n assert data.shape == (116, 128, 768)\n assert round(data[0, 0, 40].real, 2) == 18.0\n assert round(data[0, 0, 40].imag, 2) == -66.0\n assert round(data[5, 13, 91].real, 2) == 1138.0\n assert round(data[5, 13, 91].imag, 2) == 3482.0\n write_readback(dic, data)",
"def plot3d(self):\n plot_rupture_wire3d(self)",
"def view(filename):\n n, data, data_dB,sr,ch=inputwav(filename)\n t=np.linspace(0,n/sr,n)\n py.close()\n fig, (ax1) = py.subplots(nrows=1) \n ax1.plot(t[0:n:100],data[0:n:100],'k-',linewidth=1,label=filename)\n ax1.legend(loc=1)\n ax1.set_ylabel('Amplitude (Rel. Bit)')\n ax1.set_xlabel('Time (s)')",
"def _onLoad1DData(self, event):\r\n path = None\r\n dlg = wx.FileDialog(self, \"Choose a file\", os.getcwd(), \"\", \"*.txt\", wx.OPEN)\r\n if dlg.ShowModal() == wx.ID_OK:\r\n path = dlg.GetPath()\r\n mypath = os.path.basename(path)\r\n print mypath\r\n dlg.Destroy()\r\n\r\n file_x = []\r\n file_y = []\r\n file_dy = []\r\n file_dx = []\r\n if not path == None:\r\n self.path =path\r\n input_f = open(path,'r')\r\n buff = input_f.read()\r\n lines = buff.split('\\n')\r\n for line in lines:\r\n try:\r\n toks = line.split()\r\n x = float(toks[0])\r\n y = float(toks[1])\r\n #dx = math.sqrt(x)\r\n dx=1/x\r\n if dx >= x:\r\n dx = 0.9*x\r\n #dy = math.sqrt(y)\r\n dy=1/y\r\n if dy >= y:\r\n dy = 0.9*y\r\n file_x.append(x)\r\n file_y.append(y)\r\n file_dy.append(dy)\r\n file_dx.append(dx)\r\n\r\n except:\r\n print \"READ ERROR\", line\r\n\r\n # Sanity check\r\n if not len(file_x) == len(file_dx):\r\n raise ValueError, \"X and dX have different length\"\r\n if not len(file_y) == len(file_dy):\r\n raise ValueError, \"y and dy have different length\"\r\n # reset the graph before loading\r\n self.graph.reset()\r\n self.file_data.x = file_x\r\n self.file_data.y = file_y\r\n self.file_data.dy = file_dy\r\n #self.file_data.dy = None\r\n\r\n #self.file_data.dx = file_dx\r\n self.file_data.dx = None\r\n\r\n self.file_data.reset_view()\r\n\r\n self.file_data.name = \"Loaded 1D data\"\r\n self.graph.xaxis('\\\\rm{q} ', 'A^{-1}')\r\n self.graph.yaxis(\"\\\\rm{Intensity} \",\"cm^{-1}\")\r\n\r\n # Set the scale\r\n self.set_yscale('log')\r\n self.set_xscale('linear')\r\n #Add the default transformation of x and y into Property Dialog\r\n if self.get_xscale()=='log':\r\n xtrans=\"Log(x)\"\r\n if self.get_xscale()=='linear':\r\n xtrans=\"x\"\r\n if self.get_yscale()=='log':\r\n ytrans=\"Log(y)\"\r\n if self.get_yscale()=='linear':\r\n ytrans=\"y\"\r\n self.setTrans(xtrans,ytrans)\r\n\r\n #Plot the data\r\n self.graph.add(self.file_data)\r\n self. _onEVT_FUNC_PROPERTY()\r\n\r\n #self.graph.render(self)\r\n #self.subplot.figure.canvas.draw_idle()\r",
"def Read_FitsPoints(input_path,fname):\n\twave = []; dwave = []\n\tbreak_ind = 0 \t\n\n\tfilename_fullpath = input_path + fname\n\twith open(filename_fullpath) as f:\n\t\tlines = f.readlines()\n\t\tfor i in xrange(1,len(lines)):\n\t\t\tif re.search('a',lines[i]):\n\t\t\t\tbreak_ind = i -1\n\n\twave = np.loadtxt(filename_fullpath,usecols=[0])\n\tdwave = np.loadtxt(filename_fullpath,usecols=[1])\n\t\n\t# Section b\n\twave_b = wave[:break_ind]; dwave_b = dwave[:break_ind]\n\t\n\t# Section a\n\twave_a = wave[break_ind:]; dwave_a = dwave[break_ind:]\n\t\n\tpl.plot(wave_a,dwave_a,'o',label='Segment a')\n\tpl.plot(wave_b,dwave_b,'o',label='Segment b')\n\tpl.legend(loc='best')\n\tpl.ylim([-0.1,0.1])\n\tpl.xlabel(r'Wavelength $\\AA$')\n\tpl.ylabel(r'$\\Delta \\lambda \\AA$')\n\tpl.savefig(input_path + 'plots/' + fname + '.png')\n\n\tpl.clf()\n\n\treturn [wave_a,dwave_a], [wave_b,dwave_b]",
"def parse_surface_probe(fname, yname):\n\n wall_bdy = \"bottomwall\"\n tauwall_field = \"tau_wall\"\n pressure_field = \"pressure\"\n\n # Indices of wall face in element\n ss_node_ids = np.array([0, 1, 2, 3])\n\n # Read in the Exodus II mesh\n msh = Dataset(fname, \"r\")\n ss_names = get_name_list(msh, \"ss_names\")\n field_names = get_name_list(msh, \"name_nod_var\")\n wall_idx = ss_names.index(wall_bdy)\n tau_idx = field_names.index(tauwall_field)\n pressure_idx = field_names.index(pressure_field)\n\n # Get the coordinates and time\n x = msh.variables[\"coordx\"][:]\n y = msh.variables[\"coordy\"][:]\n time = msh.variables[\"time_whole\"][1:]\n\n # Element mapping and wall node ids\n nids = msh.variables[\"connect1\"][:]\n wall_elems = msh.variables[\"elem_ss%d\" % (wall_idx + 1)][:] - 1\n wall_nids_all = np.unique(nids[np.ix_(wall_elems, ss_node_ids)].flatten()) - 1\n\n # Get tau_wall and pressure on the wall\n tau_wall_all = msh.variables[\"vals_nod_var%d\" % (tau_idx + 1)][:][1:, wall_nids_all]\n pressure_all = msh.variables[\"vals_nod_var%d\" % (pressure_idx + 1)][:][\n 1:, wall_nids_all\n ]\n\n # Keep only the last time step in a dataframe\n df = pd.DataFrame()\n df[\"tau_wall\"] = tau_wall_all[-1, :]\n df[\"pressure\"] = pressure_all[-1, :]\n df[\"time\"] = time[-1]\n df[\"x\"] = x[wall_nids_all]\n df[\"y\"] = y[wall_nids_all]\n print(x[wall_nids_all])\n print(y[wall_nids_all])\n\n # Calculate coefficients\n u0, rho0, mu = utilities.parse_ic(yname)\n dynPres = rho0 * 0.5 * u0 * u0\n df[\"cf\"] = df[\"tau_wall\"] / dynPres\n df[\"cp\"] = df[\"pressure\"] / dynPres\n\n return df",
"def readogle(filename, **kw):\n \n # 2008-12-21 18:53 IJC: Created\n\n f = open(filename, 'r')\n raw = f.readlines()\n f.close()\n\n nstars = len(raw)\n\n raw2 = array([line.split() for line in raw])\n ra = raw2[:,1]\n dec = raw2[:,2]\n xref = raw2[:,3]\n yref = raw2[:,4]\n vmag = raw2[:,5]\n imag = raw2[:,7]\n \n xref = [map(float, [x]) for x in xref]\n yref = [map(float, [y]) for y in yref]\n vmag = [map(float, [v]) for v in vmag]\n imag = [map(float, [i]) for i in imag]\n\n return (ra, dec, xref, yref, vmag, imag)",
"def loadAndPlotDecomp1DMassData(dataFile='movingPointMassData/testPointMassDataDecmp000.pkl'):\n\n # Load in modules to handle the 3D plot (which I still do not well understand)\n from matplotlib.collections import PolyCollection as pc\n from mpl_toolkits.mplot3d import Axes3D\n\n # Load the data back (this is the decomposed version of the 1D moving mass data)\n inputDataFile = open(dataFile, \"rb\")\n dataOut = pickle.load(inputDataFile)\n inputDataFile.close()\n gCenters = dataOut[1] # The centers of the Gaussaians\n\n # Load in the original data (the filename is included in the loaded bit) with is the original 1D analog signal\n inputDataFile = open(dataOut[3], \"rb\")\n dataOrig = pickle.load(inputDataFile) # The original 1D mass movement data\n inputDataFile.close()\n\n # Now I need to plot these things out, iterate over the original 1D mass data.\n for i in range(len(dataOrig[0])):\n\n # Plot out the original data\n plt.figure(1)\n plt.plot(dataOrig[0][i][1], dataOrig[0][i][0])\n\n # Now plot out the decoped bits\n segmentedValues = dataOut[0][i]\n fig = plt.figure(2)\n ax = Axes3D(fig) # Because I am using older version\n verts = []\n for j in range(dataOut[1].size):\n segmentedValues[0, j] = 0\n segmentedValues[-1, j] = 0\n # print(list(zip(segmentedValues[:,i],dArray)))\n verts.append(list(zip(segmentedValues[:, j], dataOrig[0][i][1])))\n poly = pc(verts)\n ax.add_collection3d(poly, gCenters, zdir='y')\n ax.set_xlim3d(0, 1.2)\n ax.set_zlim3d(0, 5)\n ax.set_ylim3d(0, 6)\n plt.show()",
"def deimos_spectrum2D_reader(file_name):\n\n hdulist = fits.open(file_name)\n data = Data(label='2D Spectrum')\n hdulist[1].header['CTYPE2'] = 'Spatial Y'\n wcs = WCS(hdulist[1].header)\n # original WCS has both axes named \"LAMBDA\", glue requires unique component names\n\n data.coords = coordinates_from_wcs(wcs)\n data.header = hdulist[1].header\n data.add_component(hdulist[1].data['FLUX'][0], 'Flux')\n data.add_component(1/np.sqrt(hdulist[1].data['IVAR'][0]), 'Uncertainty')\n return data",
"def read_from(self, filename):\n self.x, self.y = np.loadtxt(filename, unpack=True, usecols=(0, 1))",
"def visualize_scan(self):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(self.p1_points[:, 0], self.p1_points[:, 1], self.p1_points[:, 2], c='r')\n ax.scatter(self.p2_points[:, 0], self.p2_points[:, 1], self.p2_points[:, 2], c='g')\n ax.scatter(self.p3_points[:, 0], self.p3_points[:, 1], self.p3_points[:, 2], c='b')\n ax.scatter(self.p4_points[:, 0], self.p4_points[:, 1], self.p4_points[:, 2])\n\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n plt.show()",
"def read_wabbit_hdf5(file, verbose=True, return_iteration=False):\n import h5py\n import numpy as np\n\n if verbose:\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~\")\n print(\"Reading file %s\" % (file) )\n\n fid = h5py.File(file,'r')\n b = fid['coords_origin'][:]\n x0 = np.array(b, dtype=float)\n\n b = fid['coords_spacing'][:]\n dx = np.array(b, dtype=float)\n\n b = fid['blocks'][:]\n data = np.array(b, dtype=float)\n\n b = fid['block_treecode'][:]\n treecode = np.array(b, dtype=float)\n\n # get the dataset handle\n dset_id = fid.get('blocks')\n \n # from the dset handle, read the attributes\n time = dset_id.attrs.get('time')\n iteration = dset_id.attrs.get('iteration')\n box = dset_id.attrs.get('domain-size')\n version=dset_id.attrs.get('version')\n\n\n fid.close()\n\n jmin, jmax = get_max_min_level( treecode )\n N = data.shape[0]\n Bs = data.shape[1:]\n Bs = np.asarray(Bs[::-1]) # we have to flip the array since hdf5 stores in [Nz, Ny, Nx] order\n \n if version == 20200408 or version == 20231602:\n Bs = Bs-1\n #print(\"!!!Warning old (old branch: newGhostNodes) version of wabbit format detected!!!\")\n else:\n print(\"This file includes redundant points\")\n \n if verbose:\n print(\"Time=%e it=%i N=%i Bs[0]=%i Bs[1]=%i Jmin=%i Jmax=%i\" % (time, iteration, N, Bs[0], Bs[1], jmin, jmax) )\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~\")\n\n\n if return_iteration:\n return time, x0, dx, box, data, treecode, iteration[0]\n else:\n return time, x0, dx, box, data, treecode",
"def show_flow(filename):\n flow = read_flow(filename)\n img = flow_to_image(flow)\n plt.imshow(img)\n plt.show()",
"def Plot_Spectrum(Path,borne1 = 0,borne2 = 0) :\n x,y=[],[]\n fs = open(Path, 'r') \n#index_array = 0\n while 1: \n txt = fs.readline()\n if txt =='': \n break\n x.append(float(txt[0:9]))\n y.append(float(txt[10:17]))\n #x[index_array],y[index_array] = float(txt[0:9]),float(txt[10:17])\n #index_array = index_array+1\n \n fs.close()\n x = np.array(x)\n y = np.array(y)\n if ((borne1 == 0) & (borne2 == 0)) :\n pass \n else :\n index_ok = ((x<borne2) & (x>borne1))\n x = x[index_ok]\n y = y[index_ok]\n plt.figure(1)\n plt.plot(x,y)\n plt.xlabel(r\"Nombre d'onde $(cm^{-1})$\")",
"def main(filename: str) -> None:\n # Template from HtDAP, based on composition \n return scatterplot(read(filename))"
] | [
"0.6419704",
"0.5728336",
"0.57164586",
"0.56596476",
"0.558751",
"0.5581649",
"0.55814993",
"0.55613047",
"0.5553209",
"0.5500591",
"0.54784054",
"0.5467062",
"0.5463366",
"0.54342484",
"0.5429662",
"0.53756815",
"0.53702176",
"0.536499",
"0.53567004",
"0.53496575",
"0.5330947",
"0.5328566",
"0.53117794",
"0.52961653",
"0.5294698",
"0.52757686",
"0.52733123",
"0.52602077",
"0.5242986",
"0.5234879"
] | 0.6046627 | 1 |
compute error given two flusi fields | def flusi_error_vs_flusi(fname_flusi1, fname_flusi2, norm=2, dim=2):
import numpy as np
import insect_tools
# read in flusi's reference solution
time_ref, box_ref, origin_ref, data_ref = insect_tools.read_flusi_HDF5( fname_flusi1 )
time, box, origin, data_dense = insect_tools.read_flusi_HDF5( fname_flusi2 )
if len(data_ref) is not len(data_dense):
raise ValueError("ERROR! Both fields are not a the same resolutionn")
err = np.ndarray.flatten(data_dense-data_ref)
exc = np.ndarray.flatten(data_ref)
err = np.linalg.norm(err, ord=norm) / np.linalg.norm(exc, ord=norm)
print( "error was e=%e" % (err) )
return err | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def error_compute(self):\n self.tt_error = np.linalg.norm(self.rel_error)\n if self.global_rank==0:print('Overall error is::',self.tt_error)\n return {'NMF': self.rel_error, 'tt': self.tt_error}",
"def _compute_error(self,expected_out,actual_out,error_func):\n\n error = error_func(expected_out,actual_out)\n return error",
"def calc_error_dist(self):\n pass",
"def calculate_error(self):\n self.network.index_nodes()\n self._calculate_dist()\n _, relative_error = self._relative_error()\n _, absolute_error = self._absolute_error()\n\n return absolute_error, relative_error",
"def get_global_consistency_error(A,B):\n n = float(A.size)\n\n TP = get_truepos(A,B)\n TN = get_trueneg(A,B)\n FP = get_falsepos(A,B)\n FN = get_falseneg(A,B)\n\n E1 = (FN*(FN+2*TP)/(TP+FN) + (FP*(FP+2*TN))/(TN+FP)) / n\n E2 = (FP*(FP+2*TP)/(TP+FP) + FN*(FN+2*TN)/(TN+FN)) / n\n \n return np.min( [E1, E2] )",
"def __error(self,node_set):\n error=0\n for n in node_set:\n if(n.seq_num!=0):\n error+=LA.norm(n.node_vol-node_set[n.neighbor.parent].node_vol-n.impedance*n.branch_cur)\n #print n.node_vol, '\\n', node_set[n.neighbor.parent].node_vol\n \n return error",
"def error_metric(phi_1, phi_2, spherical=False, xpts=None):\n if spherical:\n return sum(abs(phi_1-phi_2)*(xpts**2))/(2.0*sum(abs(phi_1)*(xpts**2)))\n else:\n return sum(abs(phi_1-phi_2))/(2.0*sum(phi_1))",
"def error(Y, X):\n return (Y - X) ** 2",
"def error(ff2, error2):\n\n\tupper_bound_squared = ff2 + error2\n\tupper_bound = upper_bound_squared ** 0.5\n\tff = ff2 ** 0.5\n\treturn upper_bound - ff",
"def error(self): \n if not self.terminal:\n err = sum([v**2 for v in self.state + self.q[:-1]])\n else:\n err = sum([v**2 for v in LIMITS[:9]] + [1.0 - LIMITS[9]**2])\n err *= (self.max_steps - self.steps)\n return err",
"def _compute_errors(self):\n self.errors = np.sqrt(self.data)\n self.errors[self.errors == 0.] = 1.",
"def error(self, documents):\n ###TODO\n sum_1 = 0.0\n for c_id,clust in self.fin_clust.items():\n n = self.sqnorm(self.means[c_id]) \n sum_1 = sum_1 + sum([self.distance(self.docs[dc],self.means[c_id],n) for dc in clust]) \n return round(sum_1,2)",
"def adjacent_error(self, field, exclude=False):\n\n self.log.info('Running the adjacent error computation for quantity %s', field)\n # If we need to exclude calculate the indices\n if exclude:\n start, end = self.get_slice(self.sims[0])\n excluded = '_excluded'\n else:\n start = 0\n end = None\n excluded = ''\n base = self.sims[0].conf['General']['results_dir']\n errpath = os.path.join(base, 'adjacenterror_%s%s.dat' % (field, excluded))\n with open(errpath, 'w') as errfile:\n self.log.info('Computing adjacent error for sweep %s', base)\n # For all other sims in the groups, compare to best estimate\n # and write to error file\n for i in range(1, self.num_sims):\n # Set reference sim\n ref_sim = self.sims[i]\n # Get the comparison vector\n vecs1, normvec = self.get_comp_vec(ref_sim, field, start, end)\n sim2 = self.sims[i - 1]\n vecs2, normvec2 = self.get_comp_vec(sim2, field, start, end)\n self.log.info(\"Computing adjacent error between numbasis %i and numbasis %i\",\n ref_sim.conf['Simulation'][ 'params']['numbasis'],\n sim2.conf['Simulation']['params']['numbasis'])\n # Get the array containing the magnitude of the difference vector at each point\n # in space\n mag_diff_vec = self.diff_sq(vecs1, vecs2)\n # Check for equal lengths between norm array and diff mag\n # array\n if len(mag_diff_vec) != len(normvec):\n self.log.error(\"The normalization vector has an incorrect number of elements!!!\")\n raise ValueError\n # Error as a percentage should be thkkk square root of the ratio of sum of mag diff vec\n # squared to mag efield squared\n error = np.sqrt(np.sum(mag_diff_vec) / np.sum(normvec))\n # self.log.info(str(error))\n errfile.write('%i,%f\\n' % (sim2.conf['Simulation']['params']['numbasis'], error))\n sim2.clear_data()\n ref_sim.clear_data()",
"def compute_errors(gt, pred, selector):\n gt = gt[selector]\n pred = pred[selector]\n\n thresh = np.maximum((gt / pred), (pred / gt))\n a1 = (thresh < 1.25 ).mean()\n a2 = (thresh < 1.25 ** 2).mean()\n a3 = (thresh < 1.25 ** 3).mean()\n\n rmse = (gt - pred) ** 2\n rmse = np.sqrt(rmse.mean())\n\n rmse_log = (np.log(gt) - np.log(pred)) ** 2\n rmse_log = np.sqrt(rmse_log.mean())\n\n abs_rel = np.mean(np.abs(gt - pred) / gt)\n\n sq_rel = np.mean(((gt - pred) ** 2) / gt)\n\n return abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3",
"def _computeError(self, inputs, targets):\n return .5*np.sum((targets-self._pcnfwd(inputs))**2)",
"def abs_rel_err(a, b):\n abs_err = abs(a - b)\n # 1e-8 is to prevent division by zeros.\n # [] is to make sure that if a and b are float16, 1e-8 don't get\n # dowcasted to float16 as that give 0! This would add back the\n # division by zero\n rel_err = abs_err / np.maximum(abs(a) + abs(b), [1e-8])\n # The numpy.asarray are needed as if a or b is a sparse matrix\n # this would result in a numpy.matrix and not a numpy.ndarray\n # and the behave differently causing problem later.\n # In particular a_npy_matrix.flatten().shape == (1, n_element)\n abs_err = np.asarray(abs_err)\n rel_err = np.asarray(rel_err)\n return (abs_err, rel_err)",
"def global_error(self, field, exclude=False):\n\n self.log.info('Running the global error computation for quantity %s', field)\n # If we need to exclude calculate the indices\n if exclude:\n start, end = self.get_slice(self.sims[0])\n excluded = '_excluded'\n else:\n start = 0\n end = None\n excluded = ''\n # base = self.sims[0].conf['General']['base_dir']\n base = self.sims[0].conf['General']['results_dir']\n errpath = os.path.join(base, 'globalerror_%s%s.dat' % (field, excluded))\n with open(errpath, 'w') as errfile:\n self.log.info('Computing global error for sweep %s', base)\n # Set reference sim\n ref_sim = self.sims[-1]\n # Get the comparison vector\n vecs1, normvec = self.get_comp_vec(ref_sim, field, start, end)\n # For all other sims in the groups, compare to best estimate\n # and write to error file\n for i in range(0, self.num_sims - 1):\n sim2 = self.sims[i]\n vecs2, normvec2 = self.get_comp_vec(sim2, field, start, end)\n self.log.info(\"Computing global error between numbasis %i and numbasis %i\",\n ref_sim.conf['Simulation'][ 'params']['numbasis'],\n sim2.conf['Simulation']['params']['numbasis'])\n # Get the array containing the magnitude of the difference vector at each point\n # in space\n mag_diff_vec = self.diff_sq(vecs1, vecs2)\n # Check for equal lengths between norm array and diff mag\n # array\n if len(mag_diff_vec) != len(normvec):\n self.log.error( \"The normalization vector has an incorrect number of elements!!!\")\n raise ValueError\n # Error as a percentage should be the square root of the ratio of sum of mag diff vec\n # squared to mag efield squared\n error = np.sqrt(np.sum(mag_diff_vec) / np.sum(normvec))\n errfile.write('%i,%f\\n' % (sim2.conf['Simulation']['params']['numbasis'], error))\n sim2.clear_data()\n ref_sim.clear_data()",
"def _mer_compute(errors: Tensor, total: Tensor) ->Tensor:\n return errors / total",
"def abs_rel_err(a, b):\r\n abs_err = abs(a - b)\r\n rel_err = abs_err / numpy.maximum(abs(a) + abs(b), 1e-8)\r\n # The numpy.asarray are needed as if a or b is a sparse matrix\r\n # this would result in a numpy.matrix and not a numpy.ndarray\r\n # and the behave differently causing problem later.\r\n # In particular a_npy_matrix.flatten().shape == (1, n_element)\r\n abs_err = numpy.asarray(abs_err)\r\n rel_err = numpy.asarray(rel_err)\r\n return (abs_err, rel_err)",
"def _get_error_rate_and_didi(preds, labels, didi, I):\n error = error_rate(preds, labels)\n ct_violation = utils.didi_c(preds, I) - 0.2 * didi\n return error, [ct_violation]",
"def _df_err(self):\n return self.n - self.k - 1",
"def error2(input_, output):\n error(input_, output)\n layers[-1][\"error2\"] = layers[-1][\"error\"].T @ layers[-1][\"error\"]",
"def difference(first, second, rf, rs, years=(1980, 2000),smooth=1, corpus='bok'):\n try:\n a_first = nb_ngram(first, years=years, smooth=smooth, corpus=corpus)\n a_second = nb_ngram(second, years=years, smooth=smooth, corpus=corpus)\n a = a_first.join(a_second) \n b_first = nb_ngram(rf, years=years, smooth=smooth, corpus=corpus)\n b_second = nb_ngram(rs, years=years, smooth=smooth, corpus=corpus)\n if rf == rs:\n b_second.columns = [rs + '2']\n b = b_first.join(b_second)\n s_a = a.mean()\n s_b = b.mean()\n f1 = s_a[a.columns[0]]/s_a[a.columns[1]]\n f2 = s_b[b.columns[0]]/s_b[b.columns[1]]\n res = f1/f2\n except:\n res = 'Mangler noen data - har bare for: ' + ', '.join([x for x in a.columns.append(b.columns)])\n return res",
"def _wer_compute(errors: Tensor, total: Tensor) ->Tensor:\n return errors / total",
"def err_func(x,rv,valore,specn,lcrop,models='da2014'):\n tmp = tmp_func(x[0], x[1], rv, specn, lcrop, models)\n if tmp != 1: return abs(tmp[3]-(valore+1.)) #this is quantity that gets minimized \n else: return 1E30",
"def calcErr(dicth,dictl,cdli): \n \n \n errh = np.square((cdli.datain_h - np.dot(dicth, np.transpose(cdli.wh))))\n errl = np.square((cdli.datain_l - np.dot(dictl, np.transpose(cdli.wl))))\n \n \n return [errh, errl]",
"def errorEMat(E1, E2):\n E1_normalized = E1 / E1[2][2];\n E2_normalized = E2 / E2[2][2];\n return torch.norm(E1_normalized - E2_normalized)",
"def error(self,pt,eta):\n return self._data[self.__ptBin(pt)][self.__etaBin(eta)][1]",
"def compareError(original,ultimate):\n compare = [abs(original[i]-ultimate[i]) for i in xrange(len(original))]\n return compare",
"def error(self, X, y):\n ans = self.predict(X)\n return np.sum(np.logical_not(np.equal(ans,y))) / len(X)"
] | [
"0.6367832",
"0.62551594",
"0.61732644",
"0.6122448",
"0.60979503",
"0.60338694",
"0.60156256",
"0.6001351",
"0.59415543",
"0.592916",
"0.5922433",
"0.59000087",
"0.58998704",
"0.5872513",
"0.5853913",
"0.5831379",
"0.58297896",
"0.5827409",
"0.5809093",
"0.57668793",
"0.57640123",
"0.5727406",
"0.57271916",
"0.57125413",
"0.57034624",
"0.57016945",
"0.57001275",
"0.5691763",
"0.5685757",
"0.5620912"
] | 0.65988445 | 0 |
Convert a WABBIT grid to a full dense grid in a single matrix. We asssume here that interpolation has already been performed, i.e. all blocks are on the same (finest) level. | def to_dense_grid( fname_in, fname_out = None, dim=2 ):
import numpy as np
import insect_tools
import matplotlib.pyplot as plt
# read data
time, x0, dx, box, data, treecode = read_wabbit_hdf5( fname_in )
# convert blocks to complete matrix
field, box = dense_matrix( x0, dx, data, treecode, dim=dim )
# write data to FLUSI-type hdf file
if fname_out:
insect_tools.write_flusi_HDF5( fname_out, time, box, field)
else:
dx = [b/(np.size(field,k)) for k,b in enumerate(box)]
X = [np.arange(0,np.size(field,k))*dx[k] for k,b in enumerate(box)]
return field, box, dx, X | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def to_sparse_matrix(self, grid, format=None):\n S = self.centered_stencil()\n # print(\"grid :\")\n\n grid = tuple(grid)\n # print(grid)\n if not (np.asarray(S.shape) % 2 == 1).all():\n raise ValueError('all stencil dimensions must be odd')\n\n assert_condition(len(grid) == np.rank(S), ValueError,\n 'stencil rank must equal number of grid dimensions')\n assert_condition(min(grid) >= 1, ValueError,\n 'grid dimensions must be positive')\n\n N_v = np.prod(grid) # number of vertices in the mesh\n N_s = (S != 0).sum() # number of nonzero stencil entries\n\n # diagonal offsets\n diags = np.zeros(N_s, dtype=int)\n\n # compute index offset of each dof within the stencil\n strides = np.cumprod([1] + list(reversed(grid)))[:-1]\n indices = tuple(i.copy() for i in S.nonzero())\n for i,s in zip(indices,S.shape):\n i -= s // 2\n for stride,coords in zip(strides, reversed(indices)):\n diags += stride * coords\n\n #\n data = S[S != 0].repeat(N_v).reshape(N_s, N_v)\n indices = np.vstack(indices).T\n\n # zero boundary connections\n for index,diag in zip(indices,data):\n diag = diag.reshape(grid)\n for n,i in enumerate(index):\n if i > 0:\n s = [ slice(None) ]*len(grid)\n s[n] = slice(0,i)\n diag[s] = 0\n elif i < 0:\n s = [ slice(None) ]*len(grid)\n s[n] = slice(i,None)\n diag[s] = 0\n\n # remove diagonals that lie outside matrix\n mask = abs(diags) < N_v\n if not mask.all():\n diags = diags[mask]\n data = data[mask]\n\n # sum duplicate diagonals\n if len(np.unique(diags)) != len(diags):\n new_diags = np.unique(diags)\n new_data = np.zeros( (len(new_diags),data.shape[1]), dtype=data.dtype)\n for dia,dat in zip(diags,data):\n n = np.searchsorted(new_diags,dia)\n new_data[n,:] += dat\n\n diags = new_diags\n data = new_data\n\n return sprs.dia_matrix((data,diags), shape=(N_v, N_v)).asformat(format)",
"def to_basisgrid(self):\n \n bg = basisgrid.BasisGrid()\n \n for sensor in self.leaves:\n if not isinstance(sensor, sensors.PixelArraySensor):\n raise TypeError('basisgrid representation is only compatible '\n 'with detectors that are entirely comprised of '\n 'PixelArrayElements')\n \n p, s, f = sensor.psf \n bg.add_grid(p, s, f, sensor.shape)\n \n return bg",
"def GLDAS025Cellgrid():\n return GLDAS025Grids(only_land=False)",
"def GLDAS025LandGrid():\n return GLDAS025Grids(only_land=True)",
"def to_basisgrid(self):\n \n bg = basisgrid.BasisGrid()\n asic_shape = (185, 194)\n \n for sensor in self.leaves:\n if not isinstance(sensor, sensors.Cspad2x1):\n raise TypeError('basisgrid representation is only compatible '\n 'with detectors that are entirely comprised of '\n 'PixelArrayElements')\n \n p, s, f = sensor.psf \n \n # add the first ASIC of a 2x1...\n bg.add_grid(p, s, f, (185, 194))\n \n # then translate along the fast-scan dimension and add the second\n # DONT FORGET THE BIG PIXELS!!! (+3 pixels for gap)\n \n bg.add_grid(p + f * 197, s, f, (185, 194))\n \n return bg",
"def test_bilocal(self):\n sv=system_vars_c().init_xyzlike([ [8, [0.0, 0.0, 0.0]], [1, [1.0, 1.0, 1.0] ]])\n atom2rcut=np.array([5.0, 4.0])\n grids = dft.gen_grid.Grids(sv)\n grids.level = 2 # precision as implemented in pyscf\n grids.radi_method=leggauss_ab\n grids.build(atom2rcut=atom2rcut)\n self.assertEqual(len(grids.weights), 20648)",
"def unfold_grid(var):\n if (len(var.shape)==2): # 2-D variable\n work = N.concatenate((N.zeros((var.shape[0],24),float),var),1)\n work[39:68,0:24] = work[39:68,var.shape[1]:]\n work[39:68,var.shape[1]:] = 0.0\n elif (len(var.shape)==3): # 3-D variable\n work = (N.concatenate((N.zeros((var.shape[0],var.shape[1],24),float),\n var),2))\n work[:,39:68,0:24] = work[:,39:68,var.shape[2]:]\n work[:,39:68,var.shape[2]:] = 0.0\n\n return work",
"def unstructured_grid(self, X, Y, Z):\r\n uX = []\r\n uY = []\r\n uZ = []\r\n l = self.size[0] * self.size[1]\r\n self.Grid = vtk.vtkUnstructuredGrid()\r\n for k in range(self.size[2]):\r\n n = 0\r\n for j in range(self.size[1]):\r\n for i in range(self.size[0]):\r\n # SW-B\r\n swb = 8 * k * l + 4 * l + 2 * self.size[0] * (j + 1) + 2 * n\r\n uX.append(X[swb])\r\n uY.append(Y[swb])\r\n uZ.append(Z[swb])\r\n # SE-B\r\n seb = 8 * k * l + 4 * l + 2 * self.size[0] * (j + 1) + 2 * n + 1\r\n uX.append(X[seb])\r\n uY.append(Y[seb])\r\n uZ.append(Z[seb])\r\n # NE-B\r\n neb = 8 * k * l + 4 * l + 2 * self.size[0] * j + 2 * n + 1\r\n uX.append(X[neb])\r\n uY.append(Y[neb])\r\n uZ.append(Z[neb])\r\n # NW-B\r\n nwb = 8 * k * l + 4 * l + 2 * self.size[0] * j + 2 * n\r\n uX.append(X[nwb])\r\n uY.append(Y[nwb])\r\n uZ.append(Z[nwb])\r\n # SW-T\r\n swt = 8 * k * l + 2 * self.size[0] * (j + 1) + 2 * n\r\n uX.append(X[swt])\r\n uY.append(Y[swt])\r\n uZ.append(Z[swt])\r\n # SE-T\r\n sett = 8 * k * l + 2 * self.size[0] * (j + 1) + 2 * n + 1\r\n uX.append(X[sett])\r\n uY.append(Y[sett])\r\n uZ.append(Z[sett])\r\n # NE-T\r\n net = 8 * k * l + 2 * self.size[0] * j + 2 * n + 1\r\n uX.append(X[net])\r\n uY.append(Y[net])\r\n uZ.append(Z[net])\r\n # NW-T\r\n nwt = 8 * k * l + 2 * self.size[0] * j + 2 * n\r\n uX.append(X[nwt])\r\n uY.append(Y[nwt])\r\n uZ.append(Z[nwt])\r\n\r\n n += 1\r\n\r\n # Set points\r\n points = vtk.vtkPoints()\r\n points.SetNumberOfPoints(2 * self.size[0] * 2 * self.size[1] * 2 * self.size[2])\r\n for i in range(len(uX)):\r\n points.SetPoint(i, [uX[i], uY[i], uZ[i]])\r\n self.Grid.SetPoints(points)\r\n\r\n # Set cells\r\n cells = vtk.vtkCellArray()\r\n cell = vtk.vtkHexahedron()\r\n for i in range(len(uX)):\r\n if i > 0 and i % 8 == 0:\r\n cells.InsertNextCell(cell)\r\n cell.GetPointIds().SetId(i % 8, i)\r\n cells.InsertNextCell(cell)\r\n\r\n self.Grid.SetCells(cell.GetCellType(), cells)",
"def _matrix_store_smooth_downhill(self):\n \n import time\n from scipy import sparse as sparse\n from scipy.sparse import linalg as linalgs \n \n\n t = time.clock()\n\n\n size = 0\n for nl in self.neighbour_array_lo_hi:\n size += 3 # len(nl)\n\n row_array = np.empty(size, dtype = int)\n col_array = np.empty(size, dtype = int)\n slope_array = np.zeros(size)\n local_slope_array = np.zeros(64)\n\n\n idx=0 \n for row in range(0, len(self.neighbour_array_lo_hi)): \n neighbours = self.neighbour_array_lo_hi[row] \n npoints = self.tri.points[neighbours]\n\n ## work out (downhill) gradient to (max of three) nearby neighbours\n \n\n for col, column in enumerate(neighbours[0:3]): \n \n delta_h = self.height[column] - self.height[row] \n\n\n if delta_h < 0.0:\n delta_s2 = (self.x[column] - self.x[row])**2 + (self.y[column] - self.y[row])**2\n local_slope_array[col] = ( delta_h**2 / delta_s2 )**5\n\n elif delta_h == 0.0 and self.bmask[row] == False:\n local_slope_array[col] = 1.0e-20\n\n else:\n local_slope_array[col] = 1.0e-20 \n \n # Normalise this so that it conserves mass (note - low points will have no contributions here !) \n \n norm = local_slope_array[0:len(neighbours)].sum()\n if norm != 0.0:\n norm = 1.0 / norm\n\n for col, column in enumerate(neighbours[0:3]): \n row_array[idx] = row\n col_array[idx] = column \n slope_array[idx] = local_slope_array[col] * norm\n\n idx += 1\n\n # We can re-pack this array into a sparse matrix for v. fast computation of downhill operator \n\n slopeCOO = sparse.coo_matrix( (slope_array, (row_array, col_array)) ).T\n slopeMat = slopeCOO.tocsr() \n \n print \"SlopeMat.shape \", slopeMat.shape, size\n\n # slopeNormVec = np.array(slopeMat.sum(axis=1)).T[0]\n # slopeNormVec[slopeNormVec != 0.0] = 1.0 / slopeNormVec[slopeNormVec != 0.0]\n # slopeNormMat = sparse.eye(self.tri.npoints)\n # slopeNormMat.setdiag(slopeNormVec)\n # slopeMat = slopeNormMat.dot(slopeMat)\n\n slopeMat.eliminate_zeros()\n self.smoothDownhillMat = slopeMat\n\n return",
"def block_diag_full(W_):\n assert(W_.ndim == 3)\n bsize = W_.shape[0]\n full = np.concatenate([\n np.concatenate([ np.diag(W_[:,i,j]) for j in range(W_.shape[2]) ], axis=1)\n for i in range(W_.shape[1]) ], axis=0)\n return full",
"def _build_downhill_matrices(self, weight=0.6667):\n\n from scipy import sparse as sparse\n \n\n down_neighbour = np.empty(self.tri.npoints, dtype=np.int)\n\n for node in range (0,self.tri.npoints):\n down_neighbour[node] = self.neighbour_array_lo_hi[node][0]\n\n # Build a matrix of downhill-ness - one entry per node ! \n \n size = self.tri.npoints\n row_array = np.empty(size, dtype = int)\n col_array = np.empty(size, dtype = int)\n down_array = np.ones(size)\n accu_array = np.ones(size)\n\n\n for row in range(0, self.tri.npoints): \n row_array[row] = row\n col_array[row] = down_neighbour[row]\n \n accuMCOO = sparse.coo_matrix( (accu_array, (row_array, col_array)), shape=(size,size) ).T \n\n self.accumulatorMat = accuMCOO.tocsr() \n\n self._build_adjacency_matrix_1()\n self._build_adjacency_matrix_2()\n \n self.downhillMat = weight * self.adjacency1 + (1.0-weight) * self.adjacency2\n\n # A1 = self.downhillMat\n # A2 = self.downhillMat.dot(self.downhillMat)\n # A2a = A1 + A2\n # A4 = A2.dot(A2)\n # A4a = A2a + A2.dot(A2a)\n # A8 = A4.dot(A4)\n # A8a = A4a + A4.dot(A4a)\n # A16 = A8.dot(A8)\n # A16a = A8a + A8.dot(A8a)\n\n # self.downhillMat16 = A16\n # self.downhillMat8 = A8\n # self.downhillMat16a = A16a\n # self.downhillMat8a = A8a\n\n # We make it optional to build these as they are not sparse \n # This cleans up previously stored matrices\n\n self.downhillCumulativeMat = None\n self.sweepDownToOutflowMat = None\n \n return",
"def createDenseUnitsAndGrid(data, thresholdPoints=thresholdPoints, nbBins=nbBins):\n denseUnits1D = []\n grid = [] # this is used for rendering purposes - 绘制网格\n for curDim in range(data.shape[1]):\n minDim = min(data[:, curDim])\n maxDim = max(data[:, curDim])\n binSize = (maxDim - minDim) / nbBins\n points = data[:, curDim]\n g = [] # grid lines for current dimension - 当前特征的网格线\n g.append(minDim)\n for i in range(nbBins):\n endBin = minDim + binSize\n g.append(endBin)\n # Retrieve bin points per dimension\n if i == nbBins - 1: # last bin, make sure all points are included\n binPoints = np.where((points >= minDim) & (points <= maxDim))[0]\n endBin = maxDim\n else:\n binPoints = np.where((points >= minDim) & (points < endBin))[0]\n # Store only dense bins - 仅存储密集的单元\n if len(binPoints) > thresholdPoints:\n denseUnits1D.append([DenseUnit1D(curDim, i, minDim, endBin, binPoints)])\n minDim = endBin\n grid.append(g)\n return denseUnits1D, grid",
"def integration_matrix(grid):\n I_blocks = []\n\n for iseg in range(grid.num_segments):\n i1, i2 = grid.subset_segment_indices['all'][iseg, :]\n indices = grid.subset_node_indices['all'][i1:i2]\n nodes_given = grid.node_stau[indices]\n\n i1, i2 = grid.subset_segment_indices['all'][iseg, :]\n indices = grid.subset_node_indices['all'][i1:i2]\n nodes_eval = grid.node_stau[indices][1:]\n\n _, D_block = lagrange_matrices(nodes_given, nodes_eval)\n I_block = np.linalg.inv(D_block[:, 1:])\n I_blocks.append(I_block)\n\n I = block_diag(*I_blocks)\n\n return I",
"def fullGrid(state):\n return not ((state[:, :, 0] + state[:, :, 1]) == 0).any()",
"def _build_adjacency_matrix_1(self):\n\n from scipy import sparse as sparse\n \n down_neighbour = np.empty(self.tri.npoints)\n\n for node in range (0,self.tri.npoints):\n down_neighbour[node] = self.neighbour_array_lo_hi[node][0]\n\n # Build a matrix of downhill-ness - one entry per node ! \n \n size = self.tri.npoints\n row_array = np.empty(size)\n col_array = np.empty(size)\n down_array = np.ones(size)\n\n # Catch cases where node is local low point (i.e. it is its own low neighbour)\n\n for row in range(0, self.tri.npoints): \n row_array[row] = row\n col_array[row] = down_neighbour[row]\n if row == down_neighbour[row]:\n down_array[row] = 0.0\n \n\n downMCOO = sparse.coo_matrix( (down_array, (row_array, col_array)), shape=(size,size) ).T \n\n self.adjacency1 = downMCOO.tocsr() \n\n # Catch pathological cases - sometimes if there is a flat spot on the boundary, then \n # the filling method above will produce a non-square matrix. This is caused by\n # repetition of values in the COO list which are summed on conversion.\n\n if downMCOO.shape[0] != downMCOO.shape[1]:\n # This approach works but is a lot slower\n\n print \"\"\"\n Warning: the downhill matrices require a slow build method. This is probably\n Because there are degeneracies in the slope - particularly at the boundaries\n A small random perturbation is usually enough to fix this problem\n \"\"\"\n downMat = sparse.lil_matrix((size, size))\n\n for row in range(0, self.tri.npoints): \n downMat[down_neighbour[row],row] = 1.0\n\n for row in range(0, self.tri.npoints): \n if down_neighbour[row] == row:\n downMat[row,row] = 0.0\n \n self.adjacency1 = downMat.T.tocsr() \n \n return",
"def get_hardwired_speed_weights(self):\n \n phase_shift=self.speed_phase_shift\n \n # row 1 has the weights of speed cells to grid cell 1\n self.W_speed_east=np.zeros_like(self.W_ee) \n self.W_speed_west=np.zeros_like(self.W_ee) \n self.W_speed_north=np.zeros_like(self.W_ee) \n self.W_speed_south=np.zeros_like(self.W_ee) \n\n if self.use_eight_directions is True:\n self.W_speed_north_east=np.zeros_like(self.W_ee) \n self.W_speed_north_west=np.zeros_like(self.W_ee) \n self.W_speed_south_east=np.zeros_like(self.W_ee) \n self.W_speed_south_west=np.zeros_like(self.W_ee) \n\n\n for phase_idx,phase in enumerate(self.gp.phases):\n shifted_north_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(np.pi/2.),self.gp.phases)\n shifted_south_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi/2.),self.gp.phases)\n shifted_east_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(0),self.gp.phases)\n shifted_west_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi),self.gp.phases)\n\n self.W_speed_north[phase_idx,:]=self.W_ee[shifted_north_phase_idx,:]\n self.W_speed_south[phase_idx,:]=self.W_ee[shifted_south_phase_idx,:]\n self.W_speed_east[phase_idx,:]=self.W_ee[shifted_east_phase_idx,:]\n self.W_speed_west[phase_idx,:]=self.W_ee[shifted_west_phase_idx,:] \n \n if self.use_eight_directions is True:\n shifted_north_east_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(np.pi/4),self.gp.phases)\n shifted_north_west_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(np.pi*3/4),self.gp.phases)\n shifted_south_east_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi/4),self.gp.phases)\n shifted_south_west_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi*3/4),self.gp.phases)\n \n self.W_speed_north_east[phase_idx,:]=self.W_ee[shifted_north_east_phase_idx,:]\n self.W_speed_north_west[phase_idx,:]=self.W_ee[shifted_north_west_phase_idx,:]\n self.W_speed_south_east[phase_idx,:]=self.W_ee[shifted_south_east_phase_idx,:]\n self.W_speed_south_west[phase_idx,:]=self.W_ee[shifted_south_west_phase_idx,:]",
"def _compute_bc_space_data(\n grid, bary_grid, coarse_space, truncate_at_segment_edge, swapped_normals\n):\n from bempp.api.grid.grid import enumerate_vertex_adjacent_elements\n from scipy.sparse import coo_matrix\n\n coarse_support = _np.zeros(grid.entity_count(0), dtype=_np.bool_)\n coarse_support[coarse_space.support_elements] = True\n\n if not truncate_at_segment_edge:\n for global_dof_index in range(coarse_space.global_dof_count):\n local_dofs = coarse_space.global2local[global_dof_index]\n edge_index = grid.data().element_edges[local_dofs[0][1], local_dofs[0][0]]\n for v in range(2):\n vertex = grid.data().edges[v, edge_index]\n start = grid.vertex_neighbors.indexptr[vertex]\n end = grid.vertex_neighbors.indexptr[vertex + 1]\n for cell in grid.vertex_neighbors.indices[start:end]:\n coarse_support[cell] = True\n\n coarse_support_elements = _np.array([i for i, j in enumerate(coarse_support) if j])\n number_of_support_elements = len(coarse_support_elements)\n\n bary_support_elements = 6 * _np.repeat(coarse_support_elements, 6) + _np.tile(\n _np.arange(6), number_of_support_elements\n )\n\n support = _np.zeros(bary_grid.number_of_elements, dtype=_np.bool_)\n support[bary_support_elements] = True\n\n bary_support_size = len(bary_support_elements)\n\n bary_vertex_to_edge = enumerate_vertex_adjacent_elements(\n bary_grid, bary_support_elements, swapped_normals\n )\n\n edge_vectors = (\n bary_grid.vertices[:, bary_grid.edges[0, :]]\n - bary_grid.vertices[:, bary_grid.edges[1, :]]\n )\n\n edge_lengths = _np.linalg.norm(edge_vectors, axis=0)\n\n normal_multipliers = _np.repeat(coarse_space.normal_multipliers, 6)\n local2global = _np.zeros((bary_grid.number_of_elements, 3), dtype=\"uint32\")\n local_multipliers = _np.zeros((bary_grid.number_of_elements, 3), dtype=\"uint32\")\n\n local2global[support] = _np.arange(3 * bary_support_size).reshape(\n bary_support_size, 3\n )\n\n local_multipliers[support] = 1\n\n coarse_dofs = []\n bary_dofs = []\n values = []\n\n for global_dof_index in range(coarse_space.global_dof_count):\n local_dofs = coarse_space.global2local[global_dof_index]\n edge_index = grid.data().element_edges[local_dofs[0][1], local_dofs[0][0]]\n neighbors = grid.edge_neighbors[edge_index]\n other = neighbors[1] if local_dofs[0][0] == neighbors[0] else neighbors[0]\n if coarse_space.local_multipliers[local_dofs[0][0], local_dofs[0][1]] > 0:\n lower = local_dofs[0][0]\n upper = other\n else:\n lower = other\n upper = local_dofs[0][0]\n vertex1, vertex2 = grid.data().edges[:, edge_index]\n # Re-order the vertices so that they appear in anti-clockwise\n # order.\n for local_index, vertex_index in enumerate(grid.data().elements[:, upper]):\n if vertex_index == vertex1:\n break\n if vertex2 == grid.data().elements[(local_index - 1) % 3, upper]:\n vertex1, vertex2 = vertex2, vertex1\n\n # Get the local indices of vertex1 and vertex2 in upper and lower\n local_vertex1 = -1\n for index, value in enumerate(grid.data().elements[:, upper]):\n if value == vertex1:\n local_vertex1 = index\n break\n else:\n local_vertex1 = -1\n\n for index, value in enumerate(grid.data().elements[:, lower]):\n if value == vertex2:\n local_vertex2 = index\n break\n else:\n local_vertex2 = -1\n\n for vertex_index, bary_element, sign in [\n (vertex1, 6 * upper + 2 * local_vertex1, -1.0),\n (vertex2, 6 * lower + 2 * local_vertex2, 1.0),\n ]:\n # Find the reference element index in elements adjacent to that vertex\n for ind, elem in enumerate(bary_vertex_to_edge[vertex_index]):\n if bary_element == elem[0]:\n break\n\n # Now get all the relevant edges starting to count above\n # ind\n num_bary_elements = len(bary_vertex_to_edge[vertex_index])\n vertex_edges = []\n for index in range(num_bary_elements):\n elem_edge_pair = bary_vertex_to_edge[vertex_index][\n (index + ind) % num_bary_elements\n ]\n for n in range(1, 3):\n vertex_edges.append((elem_edge_pair[0], elem_edge_pair[n]))\n\n # We do not want the reference edge part of this list\n vertex_edges.pop(0)\n vertex_edges.pop(-1)\n\n # We now have a list of edges associated with the vertex counting from edge\n # after the reference edge onwards in anti-clockwise order. We can now\n # assign the coefficients\n\n nc = num_bary_elements // 2 # Number of elements on coarse grid\n # adjacent to vertex.\n\n count = 0\n for index, edge in enumerate(vertex_edges):\n if index % 2 == 0:\n count += 1\n elem_index, local_edge_index = edge[:]\n edge_length = edge_lengths[\n bary_grid.data().element_edges[local_edge_index, elem_index]\n ]\n bary_dofs.append(local2global[elem_index, local_edge_index])\n coarse_dofs.append(global_dof_index)\n values.append(sign * (nc - count) / (2 * nc * edge_length))\n sign *= -1\n\n # Now process the tangential rwgs close to the reference edge\n\n # Get the associated barycentric elements and fill the coefficients in\n # the matrix.\n\n bary_upper_minus = 6 * upper + 2 * local_vertex1\n bary_upper_plus = 6 * upper + 2 * local_vertex1 + 1\n bary_lower_minus = 6 * lower + 2 * local_vertex2\n bary_lower_plus = 6 * lower + 2 * local_vertex2 + 1\n\n # The edge that we need always has local edge index 2.\n # Can compute the edge length now.\n\n edge_length_upper = edge_lengths[\n bary_grid.data().element_edges[2, bary_upper_minus]\n ]\n edge_length_lower = edge_lengths[\n bary_grid.data().element_edges[2, bary_lower_minus]\n ]\n\n # Now assign the dofs in the arrays\n coarse_dofs.append(global_dof_index)\n coarse_dofs.append(global_dof_index)\n coarse_dofs.append(global_dof_index)\n coarse_dofs.append(global_dof_index)\n\n bary_dofs.append(local2global[bary_upper_minus, 2])\n bary_dofs.append(local2global[bary_upper_plus, 2])\n bary_dofs.append(local2global[bary_lower_minus, 2])\n bary_dofs.append(local2global[bary_lower_plus, 2])\n\n values.append(1.0 / (2 * edge_length_upper))\n values.append(-1.0 / (2 * edge_length_upper))\n values.append(-1.0 / (2 * edge_length_lower))\n values.append(1.0 / (2 * edge_length_lower))\n\n nentries = len(coarse_dofs)\n np_coarse_dofs = _np.zeros(nentries, dtype=_np.uint32)\n np_bary_dofs = _np.zeros(nentries, dtype=_np.uint32)\n np_values = _np.zeros(nentries, dtype=_np.float64)\n\n np_coarse_dofs[:] = coarse_dofs\n np_bary_dofs[:] = bary_dofs\n np_values[:] = values\n\n dof_transformation = coo_matrix(\n (np_values, (np_bary_dofs, np_coarse_dofs)),\n shape=(3 * bary_support_size, coarse_space.global_dof_count),\n dtype=_np.float64,\n ).tocsr()\n\n return (\n dof_transformation,\n support,\n normal_multipliers,\n local2global,\n local_multipliers,\n )",
"def convert_to_explicit_structured_grid(grid):\n converter = vtkUnstructuredGridToExplicitStructuredGrid()\n converter.SetInputData(grid)\n converter.SetInputArrayToProcess(0, 0, 0, 1, 'BLOCK_I')\n converter.SetInputArrayToProcess(1, 0, 0, 1, 'BLOCK_J')\n converter.SetInputArrayToProcess(2, 0, 0, 1, 'BLOCK_K')\n converter.Update()\n return converter.GetOutput()",
"def BlockToMatrix(self):\n for h in range(height):\n for w in range(width):\n if self.matrix[h][w] == 2:\n self.matrix[h][w] = 0\n for i in self.coords:\n self.matrix[i[1]][i[0]] = 2",
"def ulab_bilinear_interpolation():\n GRID_DATA[1::2, ::2] = SENSOR_DATA[:-1, :]\n GRID_DATA[1::2, ::2] += SENSOR_DATA[1:, :]\n GRID_DATA[1::2, ::2] /= 2\n GRID_DATA[::, 1::2] = GRID_DATA[::, :-1:2]\n GRID_DATA[::, 1::2] += GRID_DATA[::, 2::2]\n GRID_DATA[::, 1::2] /= 2",
"def _build_adjacency_matrix_2(self):\n\n from scipy import sparse as sparse\n \n down_neighbour = np.empty(self.tri.npoints)\n down_neighbour1 = np.empty(self.tri.npoints)\n\n for node in range (0,self.tri.npoints):\n down_neighbour[node] = self.neighbour_array_lo_hi[node][0]\n down_neighbour1[node] = self.neighbour_array_lo_hi[node][1]\n\n # Build a matrix of downhill-ness - one entry per node ! \n \n size = self.tri.npoints\n row_array = np.empty(size)\n col_array = np.empty(size)\n down_array = np.ones(size)\n\n # Catch cases where node is local low point (i.e. it is its own low neighbour)\n for row in range(0, self.tri.npoints): \n row_array[row] = row\n col_array[row] = down_neighbour1[row]\n if row == down_neighbour[row]:\n down_array[row] = 0.0 \n if row == down_neighbour1[row]:\n col_array[row] = down_neighbour[row]\n\n\n downMCOO = sparse.coo_matrix( (down_array, (row_array, col_array)), shape=(size,size) ).T \n self.adjacency2 = downMCOO.tocsr() \n\n # Catch pathological cases - sometimes if there is a flat spot on the boundary, then \n # the filling method above will produce a non-square matrix. This is caused by\n # repetition of values in the COO list which are summed on conversion.\n\n if downMCOO.shape[0] != downMCOO.shape[1]:\n # This approach works but is a lot slower\n\n print \"\"\"\n Warning: the downhill matrices require a slow build method. This is probably\n Because there are degeneracies in the slope - particularly at the boundaries\n A small random perturbation is usually enough to fix this problem\n \"\"\"\n downMat = sparse.lil_matrix((size, size))\n\n for row in range(0, self.tri.npoints): \n downMat[down_neighbour[row],row] = 1.0\n\n for row in range(0, self.tri.npoints): \n if row == down_neighbour[row] or row == down_neighbour1[row]:\n downMat[row,row] = 0.0\n \n self.adjacency2 = downMat.T.tocsr() \n\n return",
"def getGrid(x,y,w,h,x_step=1, y_step=1):\n X,Y = np.mgrid[x:x+w:x_step, y:y+h:y_step]\n return np.array(np.vstack((X.flatten(),Y.flatten())).transpose(), dtype=np.float32)",
"def transition_function(grid, neighbourstates, neighbourcounts, decay_grid,\n water_decay_grid):\n\n global water_counter\n global ignition_grid\n neighbourstates = np.array(neighbourstates)\n init_grid = initial_grid.astype(int)\n ig_grid = np.array(ignition_grid)\n windspeed_ignition_modifiers = wind_speed_rvalue(\"NE\", 10)\n new_ig_grid = []\n for i, row in enumerate(grid):\n new_ig_grid.append([\n ignite(cell, neighbourstates[:, i, j],\n windspeed_ignition_modifiers) for j, cell in enumerate(row)\n ])\n new_ig_grid = np.array(new_ig_grid)\n started_to_burn = []\n for i, row in enumerate(grid):\n started_to_burn.append([\n started_burning(cell, ig_grid[i, j], new_ig_grid[i, j])\n for j, cell in enumerate(row)\n ])\n grid[started_to_burn] = START_BURN\n ig_grid = np.add(new_ig_grid, ig_grid)\n full_burn = []\n for i, row in enumerate(grid):\n full_burn.append([\n fully_burning(cell, ig_grid[i, j], decay_grid[i, j])\n for j, cell in enumerate(row)\n ])\n grid[full_burn] = BURNING\n end_burning = []\n for i, row in enumerate(grid):\n end_burning.append([\n ending_burn(cell, decay_grid[i, j], decay_values[int(\n initial_grid[i, j])]) for j, cell in enumerate(row)\n ])\n grid[end_burning] = END_BURN\n decay_grid[(grid == BURNING) | (grid == END_BURN)] -= 1\n burnt_out = (decay_grid == 0) # find those which have decayed to 0\n grid[(decay_grid == 0\n )] = BURNT #set all that have decayed to zero to BURNT(7)\n water_counter += 1\n\n if (water_counter == 100):\n grid[120:160, 80:120] = initial_grid[120:160, 80:120]\n water_decay_grid[(grid != LAKE)] -= 1 # take one off their decay value\n grid[(water_decay_grid == 0)] = BURNT # switch their state to 5\n ignition_grid = ig_grid\n return grid",
"def initialize_weights_and_bias(self, X_train):\n n_samples, n_features = np.shape(X_train)\n n_output = 1 \n \n # This is the numeber of gridcells and we want to make one prediction pr cell. \n # It this doesn't work calculate the number of griddcells.\n\n self.b_h = [] #np.ones((self.n_hidden_layers, self.n_hidden[0]))\n self.W_h = []\n\n for i in range(len(self.n_hidden)):\n if (i == 0):\n self.W_h.append(self.random.normal(loc=0.0, scale=0.1, size=(n_features, self.n_hidden[0])))\n self.b_h.append(np.ones(self.n_hidden[0]))\n else:\n self.W_h.append(self.random.normal(loc=0.0, scale=0.1, size=(self.n_hidden[i-1], self.n_hidden[i])))\n self.b_h.append(np.ones(self.n_hidden[i])) \n \n self.b_out = [1]\n self.W_out = self.random.normal(loc=0.0, scale=0.1, size=(self.n_hidden[-1], n_output))",
"def render_grid(grid):\n rows = grid.shape[0]\n cols = grid.shape[1]\n for row in range(rows):\n for col in range(cols):\n if grid[row, col] != 1 and grid[row, col] != 0:\n grid[row, col] = 1\n return grid",
"def _griddata(self):\n res = self.cfg.resolution\n\n # Get area of data\n xmin, xmax = np.nanmin(self.x), np.nanmax(self.x)\n ymin, ymax = np.nanmin(self.y), np.nanmax(self.y)\n\n # Add padding\n width = xmax-xmin\n height = ymax-ymin\n pad = np.amax([self.cfg.grid_pad_fraction*width, self.cfg.grid_pad_fraction*height])\n xmin = np.floor(xmin - pad)\n xmax = np.ceil(xmax + pad)\n ymin = np.floor(ymin - pad)\n ymax = np.ceil(ymax + pad)\n\n # Create Grid and no data mask\n self.lrx = np.arange(xmin, xmax+res, res)\n self.lry = np.arange(ymin, ymax+res, res)\n self.dem_x, self.dem_y = np.meshgrid(self.lrx, self.lry)\n self.nonan = np.where(np.logical_or(np.isfinite(self.x), np.isfinite(self.y)))\n\n # Create regular grid\n gridding_algorithm = self.cfg.griddata[\"algorithm\"]\n if gridding_algorithm == \"scipy.griddata\":\n self.dem_z = griddata((self.x[self.nonan].flatten(), self.y[self.nonan].flatten()),\n self.als.elevation[self.nonan].flatten(),\n (self.dem_x, self.dem_y),\n **self.cfg.griddata[\"keyw\"])\n else:\n raise NotImplementedError(\"Gridding algorithm: %s\" % gridding_algorithm)\n\n self.dem_z = np.ma.array(self.dem_z)\n self.dem_mask = np.zeros(self.dem_z.shape, dtype=np.bool)",
"def grid(self):\r\n dimA = self.dimA ; dimC = self.dimA ; W_grid = self.W_grid\r\n \r\n self.tol = 10e-5\r\n self.Niter = 10000\r\n \r\n a0 = 100 / self.dimA\r\n c0 = 100 / self.dimA\r\n a_grid = np.mgrid[0:(dimA):1] ; a_grid = a0 * a_grid ; self.a_grid = a_grid\r\n c_grid = np.mgrid[0:(dimC):1] ; c_grid = c0 * c_grid ; self.c_grid = c_grid\r\n self.W_grid = W_grid",
"def GLDAS025Grids(only_land=False):\n\n resolution = 0.25\n glob_lons = np.arange(\n -180 + resolution / 2, 180 + resolution / 2, resolution\n )\n glob_lats = np.arange(\n -90 + resolution / 2, 90 + resolution / 2, resolution\n )\n lon, lat = np.meshgrid(glob_lons, glob_lats)\n glob_grid = BasicGrid(lon.flatten(), lat.flatten()).to_cell_grid(\n cellsize=5.0\n )\n\n if only_land:\n ds = Dataset(\n os.path.join(\n os.path.abspath(os.path.dirname(__file__)),\n \"GLDASp4_landmask_025d.nc4\",\n )\n )\n land_lats = ds.variables[\"lat\"][:]\n land_mask = ds.variables[\"GLDAS_mask\"][:].flatten().filled() == 0.0\n dlat = glob_lats.size - land_lats.size\n\n land_mask = np.concatenate((np.ones(dlat * glob_lons.size), land_mask))\n land_points = np.ma.masked_array(\n glob_grid.get_grid_points()[0], land_mask\n )\n\n land_grid = glob_grid.subgrid_from_gpis(\n land_points[~land_points.mask].filled()\n )\n return land_grid\n else:\n return glob_grid",
"def makeCMSgridNodes(x0, y0, azi, dx, dy, z):\n # convert from node calculation to centric calculation\n # first move origin from vertex of grid to center of first grid cell\n\n # first convert to FRF coordinates\n FRF = gp.FRFcoord(x0, y0, coordType='ncsp')\n # shift origin to cell center instead of cell vertex\n x0N = FRF['xFRF'] - dx[0]/2\n y0N = FRF['yFRF'] - dy[0]/2\n # create new dx/dy array spaced with half of each of the 2 cells\n dxN = dx[:-1] + np.diff(dx)/2\n dyN = dy[:-1] + np.diff(dy)/2 # new nodes at the grid center - needed to fit into\n # create new nodes in FRF x and FRF Y using cell centric locations for accurate interpolation\n outXfrf, outYfrf = createGridNodesinFRF(x0N, y0N, dxN, dyN, dx.shape[0], dy.shape[0])\n xFRF, yFRF = np.meshgrid(outXfrf, sorted(outYfrf))\n # new work no need to loop as above\n convert2 = gp.FRFcoord(xFRF.flatten(), yFRF.flatten(), coordType='FRF')\n lat = convert2['Lat'].reshape(xFRF.shape)\n lon = convert2['Lon'].reshape(xFRF.shape)\n easting = convert2['StateplaneE'].reshape(xFRF.shape)\n northing = convert2['StateplaneN'].reshape(yFRF.shape)\n # making i's and j's for cell numbers\n ii = np.linspace(1, xFRF.shape[1], xFRF.shape[1])\n jj = np.linspace(1, yFRF.shape[0], yFRF.shape[0])\n\n BathyPacket = {'i': ii,\n 'j': jj,\n 'latitude': lat,\n 'longitude': lon,\n 'easting': easting,\n 'northing': northing,\n 'xFRF': sorted(xFRF[0, :]),\n 'yFRF': yFRF[:, 0],\n 'azimuth': azi,\n 'x0': x0,\n 'y0': y0,\n 'DX': dxN,\n 'DY': dyN,\n 'ni': len(ii),\n 'nj': len(jj),\n 'elevation': z, # exported as [t, x,y] dimensions\n 'gridFname': 'CMS GRid',\n 'time': 0}\n\n return BathyPacket",
"def interp_matrix_new(qpnts, spnts, npgrid, nsamp, deg_max):\n # Initialize\n A = np.zeros((nsamp,npgrid))\n\n # Create matrix\n for i in xrange(nsamp):\n for j in xrange(npgrid):\n cosTheta = np.dot(spnts[i], qpnts[j])\n if(abs(cosTheta)>1):\n cosTheta = np.sign(cosTheta)\n A[i,j] = inv_funk_radon_even_kernel(cosTheta, deg_max)\n return A"
] | [
"0.5821582",
"0.5726809",
"0.5702125",
"0.5558751",
"0.5553554",
"0.5539239",
"0.5486648",
"0.5427895",
"0.5423916",
"0.537605",
"0.53138",
"0.53079635",
"0.52916086",
"0.52071506",
"0.5201484",
"0.5168702",
"0.5151044",
"0.51481164",
"0.51444805",
"0.51429015",
"0.513387",
"0.5127625",
"0.511065",
"0.5106199",
"0.5101384",
"0.5099263",
"0.5097908",
"0.5093512",
"0.5091243",
"0.5083005"
] | 0.6513067 | 0 |
Compare two grids. The number returned is the % of blocks from treecode1 which have also been found in treecode2 | def compare_two_grids( treecode1, treecode2 ):
import numpy as np
common_blocks = 0
for i in range(treecode1.shape[0]):
# we look for this tree code in the second array
code1 = treecode1[i,:]
for j in range(treecode2.shape[0]):
code2 = treecode2[j,:]
if np.linalg.norm( code2-code1 ) < 1.0e-13:
# found code1 in the second array
common_blocks += 1
break
print( "Nblocks1=%i NBlocks2=%i common blocks=%i" % (treecode1.shape[0], treecode2.shape[0], common_blocks) )
return common_blocks / treecode1.shape[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def PDiffGrids(A, B):\n if (A.xllcorner,A.yllcorner) == (B.xllcorner,B.yllcorner) and (A.ncols,A.nrows)==(B.ncols,B.nrows):\n Bx = numpy.where(B.data != B.nodata, B.data, 1.0)\n Bx = numpy.where(B.data != 0., B.data, 1.0)\n C = 100. * (A.data-Bx)/Bx\n New = grid(C, A.xllcorner, A.yllcorner, A.cellsize, 'pdif.grd', A.nodata)\n return New\n else:\n return \"Error: grid mismatch\"",
"def gridratio( grid1, grid2):\n\n nx1 = grid1.img_width\n ny1 = grid1.img_height\n nx2 = grid2.img_width\n ny2 = grid2.img_height\n\n ratio = 0.\n rms = 0.\n\n if nx1 != nx2:\n print(\"GridRatio: Nx1 != Nx2 (%d, %d)\" % (nx1, nx2))\n return ratio, rms\n\n if ny1 != ny2:\n print(\"GridRatio: Ny1 != Ny2 (%d, %d)\" % (ny1, ny2))\n return ratio, rms\n\n count = 0\n nonzero = np.zeros(nx1*ny1)\n\n # copy to ratio array\n gridratio = copy.deepcopy( grid1)\n\n for iii in range(nx1):\n for jjj in range(ny1):\n # put in zero as default\n gridratio.image[jjj,iii] = 0.\n if grid1.image[jjj,iii] > EPSILON:\n if grid2.image[jjj,iii] > EPSILON:\n nonzero[count] = grid1.image[jjj,iii]/grid2.image[jjj,iii]\n count = count + 1\n if count < 2:\n print (\"No overlap in non-zero samples\")\n return ratio, rms, gridratio\n\n nonzero = nonzero[0:count]\n asum = np.sum( nonzero)\n ratio = asum/float(count)\n rms = np.std( nonzero)\n print (\"Grid Ratio: %.4f +/- %.4f for %d samples\" % (ratio, rms/np.sqrt(count), count))\n # return the ratio grid \n return ratio, rms, gridratio",
"def grid_equal (grid1, grid2):\r\n s=0 \r\n for h in range(4):\r\n for m in range(4):\r\n if grid1[h][m]==grid2[h][m]:\r\n s+=1\r\n else:\r\n ()\r\n if s==16:\r\n return True\r\n else:\r\n return False",
"def compare_trees(first_soup: HTMLStrip, second_soup: HTMLStrip) -> float:\n first_tree = Tree.from_soup_object(first_soup.file_name, first_soup.original_soup)\n second_tree = Tree.from_soup_object(second_soup.file_name, second_soup.original_soup)\n\n common_paths_size: int = first_tree.num_of_common_paths(second_tree)\n target_size: int = second_tree.total_num_of_paths()\n similarity = float(common_paths_size)*100/target_size\n print(f'{similarity:.2f}')\n return similarity",
"def substructure_sim_exact(subtrees_1, subtrees_2):\n assert(len(subtrees_1) == len(subtrees_2))\n n = len(subtrees_1)\n f1 = np.zeros(n)\n for i in range(n):\n f1[i] = subtrees_1[i] == subtrees_2[i] # calculate the number of matching pairs\n\n return float(np.count_nonzero(f1)) / float(len(f1))",
"def npcr(mat1, mat2):\n\tnpcr = 0\n\tw, h = mat1.shape\n\tif mat1.shape != mat2.shape:\n\t\treturn -1\n\tfor i in range(w):\n\t\tfor j in range(h):\n\t\t\tif mat1[i,j] != mat2[i,j]:\n\t\t\t\tnpcr += 1\n\tnpcr /= (w*h)\n\treturn npcr*100",
"def compare_trees(tree1, tree2):\n \tresponse = {}\n \tstart_time = time.time()\n \ttry:\t\n \t\ttns = dendropy.TaxonNamespace() \t\n \t\n \t\ttree_obj1 = dendropy.Tree.get(data=tree1, schema=\"newick\",taxon_namespace=tns)\n \t\ttree_obj2 = dendropy.Tree.get(data=tree2, schema=\"newick\",taxon_namespace=tns)\n\n \t\ttree_obj1.encode_bipartitions()\n \t\ttree_obj2.encode_bipartitions()\n\n \t\t#-----------------------------------------------------------\n \t\t#This method returns the symmetric distance between two trees. \n \t\t#The symmetric distance between two trees is the sum of the number of splits found in one of the trees but not the other. \n \t\t#It is common to see this statistic called the Robinson-Foulds distance\n\n \t\tareSame = True if treecompare.symmetric_difference(tree_obj1, tree_obj2) == 0 else False\n \t\tstatus = 200\n \t\tmessage = \"Success\"\n \t\tresponse['are_same_tree'] = areSame\n \n \texcept Exception, e:\n \t\tif \"Incomplete or improperly-terminated tree statement\" in str(e): #invalid: \"((A,B),C,D));\" valid: ((A,B),(C,D)); \n \t\t\tmessage = \"NewickReaderIncompleteTreeStatementError: \" + str(e)\n \t \t\tstatus = 400\n \t\telif \"Unbalanced parentheses at tree statement\" in str(e): #invalid: \"((A,B),(C,D);\" valid: ((A,B),(C,D)); \n \t\t\tmessage = \"NewickReaderMalformedStatementError: \"+str(e) \n \t \t\tstatus = 400\n \t\telif \"Multiple occurrences of the same taxa\" in str(e): #invalid: \"((A,B),(C,C));\" valid: ((A,B),(C,D));\n \t\t\tmessage = \"NewickReaderDuplicateTaxonError: \"+str(e)\n \t \t\tstatus = 400\n \t\telif \"Unexpected end of stream\" in str(e): # invalid: \"((A,B),(C,D))\" valid: ((A,B),(C,D));\n \t\t\tmessage = \"UnexpectedEndOfStreamError: \"+str(e)\n \t \t\tstatus = 400\n \t\telse:\n \t\t\tmessage = \"Error: Failed to compare trees. \"+str(e)\n \t \t\tstatus = 500\n \t \t\n \tresponse['status_code'] = status\n \tresponse['message'] = message\n\n \tend_time = time.time()\n \texecution_time = end_time-start_time\n #service result creation time\n \tcreation_time = datetime.datetime.now().isoformat()\n \tmeta_data = {'creation_time': creation_time, 'execution_time': float('{:4.2f}'.format(execution_time)), 'source_urls':[\"http://dendropy.org/library/treecompare.html#module-dendropy.calculate.treecompare\"] }\n\n \tresponse['meta_data'] = meta_data\n \tprint response\n \treturn response",
"def compare_tile(t1, t2):\n matches = 0\n\n t1pos = get_all_perms(t1)\n t2pos = get_all_perms(t2)\n\n for t1 in t1pos:\n for t2 in t2pos:\n if t1.tolist() == t2.tolist():\n matches += 1\n\n return matches",
"def countMatches(g1, g2):\n if g1 is None or g2 is None or len(g1) == 0 or len(g1[0]) == 0: # sanity check\n return 0\n count = 0\n for i in range(len(g1)):\n for j in range(len(g1[0])):\n if g1[i][j] == g2[i][j] == 1 and search_grid(g1, g2, i, j):\n count = count + 1\n return count",
"def match_percentage(image1_pixels, image2_pixels):\n\n match, total = 0, 0\n for i in range(len(image1_pixels)):\n if image1_pixels[i] == image2_pixels[i]:\n match += 1\n total += 1\n else:\n total += 1\n return float(match) / float(total)",
"def compare_nodes(n1, n2):\n return n1['g_val'] + n1['h_val'] < n2['g_val'] + n2['h_val']",
"def percent_identity(align_1, align_2):\n matches = 0\n for i in range(len(align_1)):\n if align_1[i] == align_2[i]:\n matches+= 1\n percent_identity = matches / len(align_1)\n return percent_identity",
"def testSpeciesRichnessNear(self):\n self.assertAlmostEqual(1.0, self.tree1.get_number_individuals() / self.tree2.get_number_individuals(), 0)",
"def find_difference(seg1, seg2):\n letter_score = []\n for c1,c2 in zip(seg1, seg2):\n letter_score.append(float(len(breadth_first(key_graph, c1, c2)) - 1))\n return sum(letter_score)/len(letter_score)",
"def grid_equal (grid1, grid2):\r\n if grid1 == grid2:\r\n return True\r\n else:\r\n return False",
"def grid_equal (grid1, grid2):\r\n if grid1 == grid2:\r\n return True\r\n return False",
"def compare_pages(page1, page2):\n s1 = Measurements.link_to_set(page1)\n s2 = Measurements.link_to_set(page2)\n\n alph = Measurements.pages_to_alphabet([page1, page2])\n #Measurements.logger.debug(\"From page1:\",page1)\n str1 = Measurements.page_to_string(page1, alph)\n #Measurements.logger.debug(\"From page2:\"+page2)\n str2 = Measurements.page_to_string(page2, alph)\n # so sanh coi cai nay giong cai kia bao nhieu phan tram\n if len(s1) > len(s2):\n j = float(len(s1.intersection(s2)))/len(s1)\n else:\n j = float(len(s2.intersection(s1)))/len(s2)\n return j",
"def compare_branch_lengths(tree1, tree2):\n stack = [] # stack to store nodes in tree2\n\n for count, node in enumerate(tree1.postorder(include_self=False)):\n if node.is_tip():\n try:\n cur = tree2.find(node.name)\n except MissingNodeError:\n return False\n else:\n if node.id == stack[-1].id:\n cur = stack.pop()\n else:\n return False\n\n if _compare_length(node, cur) is False:\n return False\n if node.parent.id is None and cur.parent.id is None:\n cur.parent.id = node.parent.id = str(count)\n elif (node.parent.id is not None) ^ (cur.parent.id is not None):\n return False\n if cur.parent not in stack:\n stack.append(cur.parent)\n return True",
"def _compare(self, boxlist1, boxlist2):\n\n ycenter1, xcenter1, _, _ = BoxList.get_center_coordinates_and_sizes(boxlist1)\n ycenter2, xcenter2, _, _ = BoxList.get_center_coordinates_and_sizes(boxlist2)\n\n centers1 = tf.transpose(tf.stack((ycenter1, xcenter1)))\n centers2 = tf.transpose(tf.stack((ycenter2, ycenter2)))\n\n centers_diff = tf.expand_dims(centers1, 1) - tf.expand_dims(centers2, 0)\n neg_l2_distance = -tf.norm(centers_diff, axis=2)\n return neg_l2_distance\n #return box_list_ops.iou(boxlist1, boxlist2)",
"def score(stripe1, stripe2):\n scr = 0\n count = 0\n for p1, p2 in zip(stripe1, stripe2):\n r = abs(p1[0] - p2[0])\n g = abs(p1[1] - p2[1])\n b = abs(p1[2] - p2[2])\n scr += r + g + b\n return scr",
"def test_tree_intersection_on_whiteboard_example(one_wb, two_wb):\n expected = [200, 150, 141, 100]\n actual = tree_intersection(one_wb, two_wb)\n assert expected == actual",
"def _compare(self, actual, expected, num_vert):\n # get sparktk res in pandas form and iterate\n actual_pandas = actual.to_pandas()\n for (index, row) in actual_pandas.iterrows():\n # get the row id and deg cen result as floats\n # from the sparktk result\n row_id = float(row[\"id\"])\n row_res = float(row[\"degree_centrality\"])\n\n # now we get the expected result from our calculated edge_counts\n # if that vertex isn't in edge_counts it means we incurred no instances\n # of edges originating or ending there, therefore the edge_count is 0\n if int(row_id) in expected:\n expected_res_for_row = expected[int(row_id)]\n else:\n expected_res_for_row = 0\n\n # ensure that the expected res matches the actual res from sparktk\n self.assertAlmostEqual(row_res, expected_res_for_row / float(num_vert) - 1)",
"def compare(strokes1, strokes2):\n\n score = 0\n for stroke_i in strokes1:\n match = identify(strokes2, stroke_i)\n score += match\n\n # draw1 = concat(strokes1)\n # draw2 = concat(strokes2)\n # draw1_length,_ = draw1.euclidian_length()\n # draw2_length,_ = draw2.euclidian_length()\n\n # tot_length = draw1_length# + draw2_length\n\n return score",
"def search_grid(grid1, grid2, i, j):\n if i < 0 or j < 0 or i >= len(grid1) or j >= len(grid1[0]): # boundary check\n return True\n match = grid1[i][j] == grid2[i][j]\n if grid1[i][j] == 0 or grid2[i][j] == 0:\n return match\n # once a cell becomes a part of a matching region, set it to 0. This makes sure that the cell\n # is not counted for another matching region.\n grid1[i][j] = 0\n grid2[i][j] = 0\n match = search_grid(grid1, grid2, i - 1, j) and match\n match = search_grid(grid1, grid2, i, j - 1) and match\n match = search_grid(grid1, grid2, i + 1, j) and match\n match = search_grid(grid1, grid2, i, j + 1) and match\n return match",
"def same_landmark_images(path_1: str, path_2: str) -> float:\n img_1_greyscale = read_image_greyscale(path_1)\n img_2_greyscale = read_image_greyscale(path_2)\n img_1_rgb_separated = np.array([read_image_color(path_1, component) for component in RGB_COMPONENTS])\n img_2_rgb_separated = np.array([read_image_color(path_2, component) for component in RGB_COMPONENTS])\n\n similarity_hog = similarity_two_images_hog(img_1_greyscale, img_2_greyscale)\n similiarities_rgb = np.array([similarity_two_images_color(img_1_rgb_separated[i], img_2_rgb_separated[i])\n for i in range(0, len(RGB_COMPONENTS))])\n similarity_color = np.mean(similiarities_rgb)\n\n similarity_percentage = np.average([similarity_hog, similarity_color], weights=[1.2, 1])\n return float(similarity_percentage)",
"def score(grid):\n result = 0\n for r in range(WORLD_WIDTH):\n for c in range(WORLD_WIDTH):\n if grid[r, c] != DIRT:\n result += 1\n return result",
"def judge(genA: typing.Iterator[int], genB: typing.Iterator[int], steps: int) -> int:\n res = 0\n for na, nb in it.islice(zip(genA, genB), steps):\n la, lb = lower16(na), lower16(nb)\n if la == lb:\n res += 1\n return res",
"def testNumberIndividuals(self):\n self.assertEqual(self.tree1.get_number_individuals(), self.tree2.get_number_individuals())\n self.assertEqual(472518, self.tree1.get_number_individuals())",
"def compare(seq1, seq2):\n if seq1 == seq2:\n return 1\n len_diff = len(seq1) / len(seq2)\n if len_diff > 1:\n len_diff = 1 / len_diff\n\n ngrams1 = {tuple(ng) for ng in get_all_ngrams(seq1)}\n ngrams2 = {tuple(ng) for ng in get_all_ngrams(seq2)}\n\n overall = len(ngrams1 & ngrams2) / len(ngrams1 | ngrams2)\n if overall == 1 or overall == 0:\n return overall\n\n try:\n max_match = len(max(ngrams1 & ngrams2, key=len)) / len(seq1)\n except ValueError:\n return 0\n\n return (len_diff + max_match + overall) / 3",
"def compare(self):\n len0 = len(self.cluster_lists[0])\n len1 = len(self.cluster_lists[1])\n longer_index = 0 if len0 >= len1 else 1\n shorter_index = 1 if len1 <= len0 else 0\n self.stars_length = len(self.cluster_lists[shorter_index]) \n self.starlets_length = len(self.cluster_lists[longer_index]) \n # build the noeds for shorter cluster list, and get the\n # distribution of cluster size.\n for cluster in self.cluster_lists[shorter_index]:\n len_spectra = len(cluster.get_spectra())\n star = ClusterNode(cluster.id, len_spectra) \n self.stars[cluster.id] = star\n\n self.cluster_spectra_num[shorter_index] += len_spectra\n self.cluster_size_dist[shorter_index][len_spectra] = self.cluster_size_dist[shorter_index].get(len_spectra,0) + 1\n # build the noeds for longer cluster list, and get the\n # distribution of cluster size.\n for cluster in self.cluster_lists[longer_index]:\n len_spectra = len(cluster.get_spectra())\n starlet = ClusterNode(cluster.id, len_spectra) \n self.starlets[cluster.id] = starlet\n\n self.cluster_spectra_num[longer_index] += len_spectra\n self.cluster_size_dist[longer_index][len_spectra] = self.cluster_size_dist[longer_index].get(len_spectra,0) + 1\n # do the comparing, and network building\n for i in range (0, len(self.cluster_lists[shorter_index])):\n cluster0 = self.cluster_lists[shorter_index][i] \n for j in range (i, len(self.cluster_lists[longer_index])):\n cluster1 = self.cluster_lists[longer_index][j] \n (shared_spec_num, similarity) = self.calculate_similarity(cluster0, cluster1)\n if similarity == 0:\n continue\n self.similarity_dist[int(similarity*10)] = self.similarity_dist.get(int(similarity*10),0) + 1\n self.shared_spec_num += shared_spec_num\n\n self.stars[cluster0.id].add_nb_node(cluster1.id, similarity, shared_spec_num)\n self.starlets[cluster1.id].add_nb_node(cluster0.id, similarity, shared_spec_num)\n\n self.ave_star_size = self.cluster_spectra_num[shorter_index]/self.stars_length\n self.ave_starlet_size = self.cluster_spectra_num[longer_index]/self.starlets_length"
] | [
"0.65731466",
"0.63465333",
"0.6300452",
"0.6264339",
"0.61934185",
"0.61712897",
"0.5996527",
"0.5989321",
"0.5979154",
"0.5934525",
"0.59334546",
"0.59026194",
"0.58587474",
"0.58309686",
"0.58261055",
"0.5805267",
"0.57949764",
"0.5784748",
"0.5769305",
"0.5745499",
"0.5742937",
"0.5734661",
"0.56987286",
"0.56919754",
"0.56730723",
"0.56321454",
"0.5603429",
"0.5569566",
"0.55505544",
"0.554546"
] | 0.83875257 | 0 |
On all blocks of the data array, replace any function values by the level of the block | def overwrite_block_data_with_level(treecode, data):
if len(data.shape) == 4:
N = treecode.shape[0]
for i in range(N):
level = treecode_level(treecode[i,:])
data[i,:,:,:] = float( level )
elif len(data.shape) == 3:
N = treecode.shape[0]
for i in range(N):
level = treecode_level(treecode[i,:])
data[i,:,:] = float( level )
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def apply_block(self, block_id, func=..., edges=..., inplace=...): # -> None:\n ...",
"def postSI(self):\n # for cell in self.cells:\n # cell.resetTotOrdFlux()\n self.depth = 0",
"def replace(arr, fixers, data_tag='mydata', logger=None):\n # if logger not provided, create default\n if logger is None:\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.INFO)\n handler = logging.FileHandler('default.log')\n handler.setLevel(logging.INFO)\n logger.addHandler(handler)\n\n for fix in sorted(fixers):\n if fix in function_mapper:\n arr = function_mapper[fix](arr, fixers[fix])\n logger.info(data_tag + ' repaired ' + fix.lower() )\n return arr",
"def apply_fn(self,fn):\r\n \r\n self.check_Data()\r\n for split,data_ in self.processed_data.items():\r\n x = data_['x']\r\n x = np.array([fn(xi) for xi in x])\r\n data_['x'] = x",
"def sprout_leaves(t, vals):",
"def level_data(self):\n self.level(self.data)",
"def process_dataset(dataset, func):\n new_dataset = copy.copy(dataset)\n del new_dataset[\"val\"]\n new_dataset.update(func(dataset))\n return new_dataset",
"def replace(self, index, value):\n index += self.n\n self.data[index] = value\n index //= 2\n while index > 0:\n self.data[index] = self.func(self.data[2*index], self.data[2*index+1])\n index //= 2",
"def __fill_data(self,input_data,data,mask_pattern):\n\t\tsize = len(input_data)\n\t\tup = False\n\t\tdata_index = 0\n\t\tmask_func = self.__get_mask_func(mask_pattern) #Get the mask function based on mask pattern\n\t\tfor col in range(size-1,-1,-2):\n\t\t\tup = not up\n\t\t\tif up:\n\t\t\t\tif col >= size-8:\n\t\t\t\t\trow = size-1\n\t\t\t\t\twhile row >= 9:\n\t\t\t\t\t\tif input_data[row][col] is None:\n\t\t\t\t\t\t\tinput_data[row][col] = int(data[data_index])\n\t\t\t\t\t\t\tdata_index += 1\n\t\t\t\t\t\t\tif mask_func(row,col):\n\t\t\t\t\t\t\t\tinput_data[row][col] = self.__toggle(input_data[row][col])\n\t\t\t\t\t\tif col % 2 == 0:\n\t\t\t\t\t\t\tcol = col - 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tcol = col + 1\n\t\t\t\t\t\t\trow = row - 1\n\t\t\t\telif col >= 8:\n\t\t\t\t\trow = size-1\n\t\t\t\t\twhile row >= 0:\n\t\t\t\t\t\tif input_data[row][col] is None:\n\t\t\t\t\t\t\tinput_data[row][col] = int(data[data_index])\n\t\t\t\t\t\t\tdata_index += 1\n\t\t\t\t\t\t\tif mask_func(row,col):\n\t\t\t\t\t\t\t\tinput_data[row][col] = self.__toggle(input_data[row][col])\n\t\t\t\t\t\tif col % 2 == 0:\n\t\t\t\t\t\t\tcol = col - 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tcol = col + 1\n\t\t\t\t\t\t\trow = row - 1\n\t\t\t\telse:\n\t\t\t\t\trow = size-9\n\t\t\t\t\twhile row >= 9:\n\t\t\t\t\t\tif input_data[row][col] is None:\n\t\t\t\t\t\t\tinput_data[row][col] = int(data[data_index])\n\t\t\t\t\t\t\tdata_index += 1\n\t\t\t\t\t\t\tif mask_func(row,col):\n\t\t\t\t\t\t\t\tinput_data[row][col] = self.__toggle(input_data[row][col])\n\t\t\t\t\t\tif col % 2 == 0:\n\t\t\t\t\t\t\tcol = col - 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tcol = col + 1\n\t\t\t\t\t\t\trow = row - 1\n\n\n\t\t\telse:\n\t\t\t\tif col >= size-8:\n\t\t\t\t\trow = 9\n\t\t\t\t\twhile row <= size-1:\n\t\t\t\t\t\tif input_data[row][col] is None:\n\t\t\t\t\t\t\tinput_data[row][col] = int(data[data_index])\n\t\t\t\t\t\t\tdata_index += 1\n\t\t\t\t\t\t\tif mask_func(row,col):\n\t\t\t\t\t\t\t\tinput_data[row][col] = self.__toggle(input_data[row][col])\n\t\t\t\t\t\tif col % 2 == 0:\n\t\t\t\t\t\t\tcol = col - 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tcol = col + 1\n\t\t\t\t\t\t\trow = row + 1\n\t\t\t\telif col >= 8:\n\t\t\t\t\trow = 0\n\t\t\t\t\twhile row <= size-1:\n\t\t\t\t\t\tif input_data[row][col] is None:\n\t\t\t\t\t\t\tinput_data[row][col] = int(data[data_index])\n\t\t\t\t\t\t\tdata_index += 1\n\t\t\t\t\t\t\tif mask_func(row,col):\n\t\t\t\t\t\t\t\tinput_data[row][col] = self.__toggle(input_data[row][col])\n\t\t\t\t\t\tif col % 2 == 0:\n\t\t\t\t\t\t\tcol = col - 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tcol = col + 1\n\t\t\t\t\t\t\trow = row + 1\t\t\t\n\t\t\t\telse:\n\t\t\t\t\trow = 9\n\t\t\t\t\twhile row <= size-9:\n\t\t\t\t\t\tif input_data[row][col] is None:\n\t\t\t\t\t\t\tinput_data[row][col] = int(data[data_index])\n\t\t\t\t\t\t\tdata_index += 1\n\t\t\t\t\t\t\tif mask_func(row,col):\n\t\t\t\t\t\t\t\tinput_data[row][col] = self.__toggle(input_data[row][col])\n\t\t\t\t\t\tif col % 2 == 0:\n\t\t\t\t\t\t\tcol = col - 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tcol = col + 1\n\t\t\t\t\t\t\trow = row + 1\n\t\treturn input_data",
"def _adjustBlock(self, b):\n raise NotImplementedError",
"def mapDel(block, posMap):\n for (x, y) in block.coords:\n theFallener(x + block.x, y + block.y, 0, posMap)",
"def apply(self, f):\n for v in self.vertices:\n v.x, v.y, v.z = f(v.coords())",
"def modify_body(lines, PE_dims, var_map): \n loop_bodies = []\n # Locate the user statements\n for line_id in range(len(lines)):\n line = lines[line_id]\n if line.find('hls_pipeline') != -1:\n # extract the loop body\n body_start = line_id\n r_minus_l = -1\n nxt_line_id = line_id + 1 \n while nxt_line_id < len(lines):\n nxt_line = lines[nxt_line_id]\n if nxt_line.find('}') != -1:\n r_minus_l += 1\n if nxt_line.find('{') != -1:\n r_minus_l -= 1\n if r_minus_l == 0:\n body_end = nxt_line_id - 1\n break\n nxt_line_id += 1\n loop_body = lines[body_start : body_end + 1]\n #print(loop_body)\n loop_bodies.append({'pos': [body_start, body_end], 'lines': loop_body})\n \n # Modidy the loop bodies\n #for body in loop_bodies:\n body_offset = 0\n for idx in range(len(loop_bodies)):\n body = loop_bodies[idx]\n body_lines = body['lines'] \n group_names = []\n has_data_trans = True\n data_trans_info = extract_data_trans_info(body_lines, PE_dims)\n # Remove the in transfer\n while has_data_trans:\n has_data_trans = False\n for line_id in range(len(body_lines)):\n line = body_lines[line_id]\n if line.find('read_channel_intel') != -1:\n has_data_trans = True\n # Locate the read block and the write block\n block_start, block_end = locate_data_trans_block(line_id, body_lines)\n m = re.search(r'\\((.+?)\\)', line) \n fifo_name = m.group(1)\n group_name = fifo_name.split('_')[1]\n group_names.append(group_name)\n break\n if has_data_trans:\n body_lines = body_lines[:block_start] + body_lines[block_end + 1:]\n # Remove the out transfer\n has_data_trans = True\n while has_data_trans:\n has_data_trans = False\n for line_id in range(len(body_lines)):\n line = body_lines[line_id]\n if line.find('write_channel_intel') != -1:\n m = re.search(r'\\((.+?)\\)', line)\n fifo_name = m.group(1).split(',')[0]\n group_name = fifo_name.split('_')[1]\n if group_name in group_names:\n has_data_trans = True\n block_start, block_end = locate_data_trans_block(line_id, body_lines)\n if has_data_trans:\n body_lines = body_lines[:block_start] + body_lines[block_end + 1:]\n #print(body_lines)\n # Wrap the body with space loops\n for dim_idx in range(len(PE_dims)):\n dim = PE_dims[dim_idx] \n line = f'#pragma unroll\\nfor (int s{dim_idx} = 0; s{dim_idx} < {dim}; s{dim_idx}++) {{\\n'\n body_lines.insert(dim_idx, line) \n for dim in PE_dims:\n body_lines.append('}\\n')\n\n # Modify the index\n body_lines = modify_index(body_lines, var_map, PE_dims)\n #print(body_lines)\n\n # Insert the data transfer stmts\n body_lines = insert_data_trans(body_lines, data_trans_info, PE_dims)\n #loop_bodies[idx]['lines'] = body_lines\n\n # Replace the loop bodies\n body_pos = body['pos'] \n lines = lines[: body_offset + body_pos[0]] \\\n + body_lines \\\n + lines[body_offset + body_pos[1] + 1 :] \n body_offset += len(body_lines) - (body_pos[1] - body_pos[0] + 1)\n\n return lines",
"def ApplyMask(data,mask):\n \n # loop through portions\n for portion in data.keys():\n # match data keys and apply mask \n for key in data[portion].keys():\n if key in 'xyerr':\n if mask != 'UnMasked':\n data[portion][key].mask = data[portion]['UnMasked']\n data[portion][key].mask = data[portion][mask]\n\t\n return data",
"def _permutate(self, table, block):\n return list(map(lambda x: block[x], table))",
"def forEach(self, func):\n for x in range(self._width):\n for y in range(self._height):\n func(self.data[x, y], x, y)",
"def reflect(data, mapfunc = lambda x:x):\n data2 = np.zeros([tsize, npsi])\n # Copy the original data\n for i in np.arange(ntheta):\n data2[i,:] = data[i,:]\n # Now fill in the remainder\n for i in np.arange(ntheta, tsize):\n t0 = tsize - 1 - i\n data2[i,:] = mapfunc(data[t0,:])\n return data2",
"def schedule_nodeflow_update_all(graph, block_id, message_func, reduce_func, apply_func): # -> None:\n ...",
"def _apply_func(data, func, num_rows, base_row_index=0, increment=False):\n row = list(data[base_row_index])\n curr_index = base_row_index\n for _ in range(num_rows):\n data.append(func(row))\n if increment:\n curr_index += 1\n row = list(data[curr_index])\n return data",
"def update_blocks_closure(self, ln, block, fail_bool):\n\n if ln == Line.LINE_GREEN:\n # Check that block isnt already in that state\n if self.blocks_green_arr[block - 1].open == (not fail_bool):\n if fail_bool == True:\n self.blocks_green_arr[block - 1].num_faliures += 1\n else:\n self.blocks_green_arr[block - 1].num_faliures -= 1\n else:\n if fail_bool == True:\n self.blocks_green_arr[block - 1].num_faliures += 1\n else:\n self.blocks_green_arr[block - 1].num_faliures -= 1\n\n\n # Update block if fail\n if self.blocks_green_arr[block - 1].num_faliures > 0:\n if self.blocks_green_arr[block - 1].open:\n signals.ctc_update_failure_blocks_gui.emit(ln, fail_bool)\n self.blocks_green_arr[block - 1].open = False\n else:\n if not self.blocks_green_arr[block - 1].open:\n signals.ctc_update_failure_blocks_gui.emit(ln, fail_bool)\n self.blocks_green_arr[block - 1].open = True\n\n elif ln == Line.LINE_RED:\n # Check that block isnt already in that state\n if self.blocks_red_arr[block - 1].open == (not fail_bool):\n if fail_bool == True:\n self.blocks_red_arr[block - 1].num_faliures += 1\n else:\n self.blocks_red_arr[block - 1].num_faliures -= 1\n else:\n if fail_bool == True:\n self.blocks_red_arr[block - 1].num_faliures += 1\n else:\n self.blocks_red_arr[block - 1].num_faliures -= 1\n\n # Update block if fail\n if self.blocks_red_arr[block - 1].num_faliures > 0:\n if self.blocks_red_arr[block - 1].open:\n signals.ctc_update_failure_blocks_gui.emit(ln, fail_bool)\n self.blocks_red_arr[block - 1].open = False\n else:\n if not self.blocks_red_arr[block - 1].open:\n signals.ctc_update_failure_blocks_gui.emit(ln, fail_bool)\n self.blocks_red_arr[block - 1].open = True\n\n else:\n raise Exception(\"CTC : UPDATE BLOCK CLOSURES (maint. mode from SWTrack \\\n Cont. Send INVALID Line\")",
"def applyToEach(L,f):\n for i in range(len(L)):\n L[i] = f(L[i])",
"def manipulate_data(data):\n log.info(\"Doing some fun stuff here!\")\n return data",
"def ghost_point_processor(data, b_values, log=None):\n if __debug__ and log:\n log.debug(\"original data is \" + repr(data))\n for b_slice, b_data in b_values:\n if __debug__ and log:\n log.debug(\"b_slice is %s\"%(repr(b_slice)))\n log.debug(\"b_data is %s\"%(repr(b_data)))\n data[b_slice] = b_data\n if __debug__ and log:\n log.debug(\"new data is \" + repr(data))",
"def patch_context(data, i, j, k, r):\n idxs = (np.array([i+r,i-r,i,i,i,i]),\n np.array([j,j,j+r,j-r,j,j]),\n np.array([k,k,k,k,k+r,k-r]))\n ctx = data[idxs]\n return ctx",
"def flat_to_nested(self, data: dict, original_data, target, method):\n data[target] = method(original_data)\n return data",
"def preprocess_func(leaf):\n if isinstance(leaf, Repeated):\n new_leaf = leaf.count.value # evaluates and stores value directly\n # CAUTION: +1 as we now start counting at 0, but regex start counting at 1 for groups\n match_transformed.append(match.ends(new_leaf + 1))\n # recursive call\n leaf.structure.map(preprocess_func)\n # from here on everything is executed depth first (by recursion)\n substructs[new_leaf] = leaf.structure\n\n # elif isinstance(leaf, Count):\n else: #there should be no other case\n new_leaf = leaf.value # evaluates and stores value directly\n # CAUTION: +1 as we now start counting at 0, but regex start counting at 1 for groups\n match_transformed.append((match.ends(new_leaf + 1), match.captures(new_leaf + 1)))\n\n return new_leaf # new_leaf is int",
"def applyFuncOnValues(self, func):\r\n self._value = func(self._value)",
"def fold(vyper_module: vy_ast.Module) -> None:\n replace_builtin_constants(vyper_module)\n\n changed_nodes = 1\n while changed_nodes:\n changed_nodes = 0\n changed_nodes += replace_user_defined_constants(vyper_module)\n changed_nodes += replace_literal_ops(vyper_module)\n changed_nodes += replace_subscripts(vyper_module)\n changed_nodes += replace_builtin_functions(vyper_module)",
"def _mutate_node(self, node):\n self.idx += 1\n\n if self.idx != self.r:\n return\n\n # Exclude some things like signatures, etc.\n exclusions = ['signature', 'crc']\n for ex in exclusions:\n if ex in node._pfp__name.lower():\n return\n\n if type(node) == pfp.fields.Dom:\n return\n elif self._base_name(node) == 'Struct':\n # This is a container, interested in\n # its children nodes\n return\n elif self._base_name(node) == 'Array':\n print(\"%s is an Array of %s (%s)\" % (node._pfp__name,\n node.field_cls, node.width))\n # I can change the data at once:\n node.raw_data = \"cacaca\"\n\n # Or iterate through its elements:\n # for e in node:\n # e._pfp__set_value(e._pfp__value + 1)\n else:\n # CORE TYPE\n # This is supposed to cast\n print('CORE TYPE?')\n node._pfp__set_value(1337)",
"def channel_array_blocks(self, opening_width, block_len, block_from_bottom):\n \n params = self.params\n count = 0\n for i in range(len(params['widths'])):\n if params['subsampling']>0:\n back_square = self.coord[i*params['num']]\n else:\n back_square = self.coord[i*params['num']+1].copy()\n back_square = back_square-np.repeat([[params['space'],0]],[back_square.shape[0]],axis = 0)\n \n center_x = 0.5*(np.min(back_square[:,0])+np.max(back_square[:,0]))\n center_y = np.min(back_square[:,1]) \n block = Feature.define_polygon([[center_x-params['widths'][i]/2+opening_width,center_y+block_from_bottom],[center_x+params['widths'][i]/2-opening_width,center_y+block_from_bottom],\n [center_x+params['widths'][i]/2-opening_width,center_y+block_from_bottom+block_len],[center_x-params['widths'][i]/2+opening_width, center_y+block_from_bottom+block_len]])\n \n temp = Feature.reverse_feature(block, back_square)\n for j in range(params['num']):\n if ((params['subsampling']>0) and (np.mod(j,params['subsampling']) ==0)) or ((params['subsampling']<0) and (np.mod(j,-params['subsampling']) != 0)):\n new_coord = temp.coord\n new_coord = [x+np.repeat([[j*params['space'],0]],[x.shape[0]],axis = 0) for x in new_coord]\n self.coord[count] = new_coord\n count+=1\n #self.coord = [item for sublist in self.coord for item in sublist]\n temp = []\n for x in self.coord:\n if(isinstance(x, list)):\n for y in x: \n temp.append(y)\n else:\n temp.append(x)\n self.coord = temp\n\n \n \n '''params = self.params\n myarray2 = Feature.channel_array(length=block_len,num=params['num'],space = params['space'],space_series = params['space_series'],widths = [x-2*opening_width for x in params['widths']],origin=np.array(params['origin'])+np.array([0,-params['length']+block_len+block_from_bottom]), subsampling=params['subsampling'])\n new_feature = Feature()\n for i in range(len(self.coord)):\n back_square = self.coord[i]\n curr_feature = Feature()\n curr_feature.coord = [myarray2.coord[i]]\n\n temp = Feature.reverse_feature(curr_feature, back_square)\n if new_feature.coord:\n new_feature = Feature.combine_features(new_feature,temp)\n else:\n new_feature = temp\n self.coord = new_feature.coord'''\n return self"
] | [
"0.5757027",
"0.5234304",
"0.5134023",
"0.51197505",
"0.5062385",
"0.50325173",
"0.50212735",
"0.48631778",
"0.4849501",
"0.48456857",
"0.47853938",
"0.47664374",
"0.4762566",
"0.4750089",
"0.47483295",
"0.47385266",
"0.47344804",
"0.4733381",
"0.4728765",
"0.47240093",
"0.47203648",
"0.46957195",
"0.46893907",
"0.46824694",
"0.46772",
"0.46641028",
"0.46575868",
"0.46544465",
"0.46464995",
"0.46323672"
] | 0.7074382 | 0 |
This routine performs a shell command on each .h5 file in a given directory! | def command_on_each_hdf5_file(directory, command):
import re
import os
import glob
if not os.path.exists(directory):
err("The given directory does not exist!")
files = glob.glob(directory+'/*.h5')
files.sort()
for file in files:
c = command % file
os.system(c) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list_h5(walk_dir):\n\n file_list = []\n for root, subdirs, files in os.walk(walk_dir):\n\n for filename in files:\n file_path = os.path.join(root, filename)\n if file_path[-2:] == 'h5':\n file_list.append(file_path)\n\n return file_list",
"def h5ls(h5o, maxdepth=None, maxitems=None):\n _h5ls(h5o, 0, maxdepth=maxdepth, maxitems=maxitems, prefix='')",
"def calculate(d):\r\n\r\n # Set correct slashes for the OS\r\n if sys.platform == 'windows':\r\n slash = '\\\\'\r\n elif sys.platform == 'linux':\r\n slash = '/'\r\n else:\r\n print('#Error. Unknown platform.')\r\n return\r\n\r\n print('Files in the current directory and their md5-hashes:\\n')\r\n i = 0\r\n assert i == 0, '#Error. Variable i != 0.'\r\n\r\n for i in range(len(d[2])): # Go through the list of files\r\n full_path = d[0]+slash+d[2][i]\r\n print(full_path) # Get the list of files with full paths\r\n print(md5(full_path))\r\n size(full_path)",
"def loadDirectory(self, dirname):\r\n cachelist=os.listdir(dirname)\r\n testlist=fnmatch.filter(cachelist,'*.hdf5')\r\n \r\n for file_ in testlist:\r\n print(\"Using {0}\".format(file_))\r\n \r\n files = [h5py.File(os.path.join(dirname, fn),'r') for fn in testlist]\r\n return files",
"def h5root():\n with h5py.File('dummy.nxs', mode='w', driver=\"core\", backing_store=False) as f:\n yield f",
"def open_fast5_files(path, mode=\"r\"):\n for filename in find_fast5_files(path):\n try:\n hdf = Fast5File(filename, mode=mode)\n if sanity_check(hdf):\n yield hdf\n except OSError:\n try:\n hdf.close()\n except:\n pass",
"def dir_to_h5df(walk_dir, N):\n\n h5_file_list = list_h5(walk_dir)\n\n if (N == 'all') or (N > len(h5_file_list)):\n files_to_convert = h5_file_list\n else:\n files_to_convert = h5_file_list[:N]\n\n # Convert list of files names to list of dictionaries\n\n h5_df_list = []\n\n for filename in files_to_convert:\n f = h5py.File(filename, 'r')\n h5_df = pd.DataFrame(multi_indexer(h5_to_dict(f)))\n h5_df_list.append(h5_df)\n\n h5df = pd.concat(h5_df_list, ignore_index=True)\n\n return h5df",
"def run_convert(cmd_line_args=None):\n parser = argparse.ArgumentParser(\n description='Convert the features in hdf5 files',\n epilog=\"A hdf5 files is needed\")\n parser.add_argument(\"--outfile\", \"-o\", help=\"Destination to write data (hdf5 file)\", required=True)\n parser.add_argument(\"--infile\", \"-i\", help=\"Source HDF5 files to process\", required=True)\n\n if cmd_line_args is None:\n args = parser.parse_args()\n else:\n args = parser.parse_args(cmd_line_args)\n\n hdf5_process(args.infile, args.outfile)",
"def main():\n parser = ArgumentParser(description=\"write to a file\")\n\n parser.add_argument(\"-i\",\"--input\", type=setup.is_valid_h5_file, required=True, nargs='+',\n help=\"path(s) of HDF5 master file(s)\")\n\n parser.add_argument(\"-b\",\"--beamcenter\", nargs=2, required=True,\n help=\"beam center in X and Y (two arguments)\")\n\n parser.add_argument(\"-r\",\"--oscillation\", type=float, default=1,\n help=\"oscillation angle per well, default = 1\")\n\n parser.add_argument(\"-d\",\"--distance\", type=float, default=100,\n help=\"detector distance in mm\")\n\n parser.add_argument(\"-w\",\"--wavelength\", type=float, default=1.216,\n help=\"Wavelength in Angstrom, default is 1.216\")\n\n parser.add_argument(\"-f\",\"--framesperdegree\", type=int, default=5,\n help=\"Number of frames per degree, default is 5\")\n\n parser.add_argument(\"-t\",\"--totalframes\", type=int, default=0,\n help=\"Total number of frames to be processed, default all\")\n\n parser.add_argument(\"--output\", default=os.getcwd(),\n help=\"Use this option to change output directory, default pwd\")\n\n parser.add_argument(\"-sg\",\"--spacegroup\", type=int, default=0,\n help=\"Space group\")\n\n parser.add_argument(\"-u\",\"--unitcell\", type=str, default=\"50 50 50 90 90 90\",\n help=\"unit cell\")\n\n argslist = parser.parse_args()\n for masterfile in argslist.input:\n master1= Master(argslist,masterfile)\n master1.printDataWells()",
"def read_hdf5(filename, namelist=None, **kwargs):\n\n print('Reading %s...'%filename)\n\n fid = h5py.File(filename, mode='r')\n \n data = read_hdf5_tree(fid, namelist, **kwargs)\n\n fid.close()\n \n print('Finished reading %s.'%filename)\n return data",
"def move_fast5_files(args):\n # Create pandas dataframe with x columns.\n fast5_df = pd.DataFrame(columns=['fast5_file', 'subfolder', 'mv_command'])\n\n fast5_df['fast5_file'] = [fast5_file for fast5_file in os.listdir(READS_DIR) if fast5_file.endswith(\".fast5\")]\n fast5_df['subfolder'] = [standardise_int_length(int(i / 4000)) for i in xrange(len(fast5_df))]\n fast5_df['mv_command'] = [\"mv %s %s/\" % (fast5_file, subfolder)\n for fast5_file, subfolder in izip(fast5_df.fast5_file, fast5_df.subfolder)]\n\n subdirectories = fast5_df.subfolder.unique().tolist()\n print(subdirectories)\n for subdirectory in subdirectories:\n # Create directory\n if os.path.isdir(subdirectory):\n # If directory already exists, make sure nothing is inside\n if len(os.listdir(subdirectory)) > 0:\n sys.exit(\"Directory '%s' exists with files inside\" % subdirectory)\n else:\n os.mkdir(subdirectory)\n\n processes = (subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n for cmd in fast5_df.mv_command.tolist())\n\n # We use the islice command to split our list of mv commands into five smaller lists.\n running_processes = list(itertools.islice(processes, args.num_threads))\n while running_processes:\n for i, process in enumerate(running_processes):\n if process.poll() is not None: # Means that the process is complete!\n stdout, stderr = process.communicate() # Get the output of the completed process\n if not stderr == \"\": # Print stderr if it exists.\n print stderr\n running_processes[i] = next(processes, None)\n # Run the next number in the list.\n if running_processes[i] is None: # No more commands waiting to be processed.\n del running_processes[i] # Not a valid process.\n break\n\n return subdirectories",
"def searchDirectory(_dirname):\n for (path, dirnames, filenames) in os.walk(_dirname):\n for filename in filenames:\n if os.path.splitext(filename)[-1] == \".h\":\n fullname = os.path.join(path, filename)\n md5 = makeMD5(fullname)\n updateMD5(fullname, md5)\n\n if os.path.isfile(fullname + \".tmp\"):\n os.remove(fullname + \".tmp\")",
"def hdf5_container(tmpdir):\n filename = tmpdir.join(\"test.h5\").strpath\n hdcon = SensitivityCubeHDF5Container(filename, mode=\"w\")\n\n # Clever trick to close the file when we're done with it \n yield hdcon\n hdcon.close()",
"def open_h5meta(filepath):\n data = dict()\n h5meta_content = read_h5meta(filepath)\n for file in h5meta_content[\"filelist\"]:\n data[file] = read_detector_data(file)\n\n return data",
"def read_wabbit_hdf5_dir(dir):\n import numpy as np\n import re\n import ntpath\n import os\n\n it=0\n data={'time': [],'x0':[],'dx':[],'treecode':[]}\n # we loop over all files in the given directory\n for file in os.listdir(dir):\n # filter out the good ones (ending with .h5)\n if file.endswith(\".h5\"):\n # from the file we can get the fieldname\n fieldname=re.split('_',file)[0]\n print(fieldname)\n time, x0, dx, box, field, treecode = read_wabbit_hdf5(os.path.join(dir, file))\n #increase the counter\n data['time'].append(time[0])\n data['x0'].append(x0)\n data['dx'].append(dx)\n data['treecode'].append(treecode)\n if fieldname not in data:\n # add the new field to the dictionary\n data[fieldname]=[]\n data[fieldname].append(field)\n else: # append the field to the existing data field\n data[fieldname].append(field)\n it=it+1\n # the size of the domain\n data['box']=box\n #return time, x0, dx, box, data, treecode\n return data",
"def load_h5(filename: str, **kwargs):\n return open_h5(filename, 'r', **kwargs)",
"def run_summarize_h5lmt(args):\n print \"Running %s %s\" % ('bin/summarize_h5lmt.py', ' '.join(args))\n output_str = tokiotest.run_bin(tokiobin.summarize_h5lmt, args)\n assert output_str > 0\n\n if '--json' in args:\n if '--summary' in args:\n verify_json(output_str, ['bins', 'summary'])\n else:\n verify_json(output_str, ['bins'])",
"def md5(dir):\n\n # ugly way to avoid circular imports\n from . import settings\n\n files = [ \n settings.DATA['nation']['file_name'],\n settings.DATA['regions']['file_name'],\n settings.DATA['provinces']['file_name'],\n ]\n\n hash_md5 = hashlib.md5()\n for f in files:\n with open(dir+'/'+f, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n \n return hash_md5.hexdigest()",
"def clean_chunk_files(dirpath):\n workdir = os.getcwd()\n os.chdir(dirpath)\n for filename in glob.glob(\"[0-9]*_[0-9]*_[0-9]*.hdf5\"):\n os.remove(filename)\n os.chdir(workdir)",
"def print_h5(fname: str) -> None:\n try:\n with h5py.File(fname, 'r') as h:\n print(fname)\n recursively_print_structure(h, ' ')\n except IOError as e:\n print(f\"Cannot open HDF5 file {fname}\")\n print(f\"IOError: {e}\")",
"def calculate_mean(data_dir):\n data = ([each for each in os.listdir(data_dir)\n if each.endswith('.h5')])\n all_data = []\n for num_data in data:\n processed_data = os.path.join(data_dir, num_data)\n file = h5py.File(processed_data, 'r') \n data = file.get('Processed_data') \n all_data.append(data)\n all_data = np.array(all_data)\n all_data = np.mean(all_data, axis=0)\n return all_data",
"def calculate_mean_dark(data_dir):\n\n data = ([each for each in os.listdir(data_dir)\n if each.endswith('.h5')])\n \n all_data = []\n for num_data in data:\n #print(num_data)\n processed_data = os.path.join(data_dir, num_data)\n file = h5py.File(processed_data, 'r') \n data = file.get('Processed_data')\n all_data.append(data)\n #print\n\n all_data = np.array(all_data)\n all_data = np.mean(all_data, axis=0)\n return all_data",
"def read_hdf5(path_to_file):\n\n print(\"\\nReading HDF5 file: \", path_to_file)\n file = h5py.File(path_to_file, 'r')\n\n # List the groups\n groups = list(file.keys())\n print(\"Groups available: \", groups)\n\n # Read Zemax Metadata\n zemax_metadata = {}\n print(\"\\nZemax Metadata:\")\n for key in file['Zemax Metadata'].attrs.keys():\n print('{} : {}'.format(key, file['Zemax Metadata'].attrs[key]))\n zemax_metadata[key] = file['Zemax Metadata'].attrs[key]\n\n # Read the analysis groups\n for group_name in groups:\n if group_name != 'Zemax Metadata':\n analysis_group = file[group_name]\n print('\\nAnalysis: ', group_name)\n # For each Analysis Group we loop over subgroups\n for subgroup_key in analysis_group.keys():\n subgroup = analysis_group[subgroup_key]\n print('Subgroup #', subgroup_key)\n # List the metadata of the subgroup\n for att_key in subgroup.attrs.keys():\n print(' {} : {}'.format(att_key, subgroup.attrs[att_key]))\n\n file.close()\n\n return zemax_metadata",
"def find_fast5_files(path):\n for dirpath, dirnames, filenames in os.walk(path):\n for fname in filenames:\n if fname.endswith('.fast5'):\n yield os.path.join(dirpath, fname)",
"def main(argv):\n\n parser = argparse.ArgumentParser(description=\"Convert ascii file(s) to hdf5\")\n\n parser.add_argument('-f', '--force', action='store_true',\n help='overwrite hdf5 file if it exists')\n\n parser.add_argument('-c', '--clobber', action='store_true',\n help='remove input ascii file')\n\n parser.add_argument('-v', '--verbose', action='store_true',\n help='verbose output')\n\n parser.add_argument('name', nargs='*', type=str,\n help='ascii file(s) to convert')\n\n args = parser.parse_args(argv)\n\n for inpname in args.name:\n if not os.path.isfile(inpname):\n print('{} not found, skipping'.format(inpname))\n continue\n\n outpname = replace_ext(inpname, '.hdf5')\n\n if not args.force and os.path.isfile(outpname):\n print('{} exists, not overwriting (use -f to overwrite)'.format(outpname))\n continue\n\n ascii2hdf5(inpname, outpname, clobber=args.clobber,\n overwrite=args.force, verbose=args.verbose)",
"def read_h5_file(folder, filen):\n \n ### file path\n \n fpath = folder + filen + '.h5'\n assert os.path.exists(fpath), \"The out.h5 file does NOT exist for \" + fpath\n fl = h5py.File(fpath, 'r')\n \n ### cell information\n \n xu = np.array(fl['/cells/comu'], dtype=np.float32)\n \n ### simulation information\n \n lx = fl['/info/box/x'][...]\n ly = fl['/info/box/y'][...]\n dt = fl['/info/dt'][...]\n nsteps = fl['/info/nsteps'][...]\n nfils = fl['/info/nfils'][...]\n nbeads = fl['/info/nbeads'][...]\n nsamp = fl['/info/nsamp'][...]\n nbpf = fl['/info/nbpf'][...]\n \n ### simulation parameters\n \n density = fl['/param/density'][...]\n kappa = fl['/param/kappa'][...]\n km = fl['/param/km'][...]\n pa = fl['/param/pa'][...]\n pp = fl['/param/pp'][...]\n bl = fl['/param/bl'][...]\n sigma = fl['/param/sigma'][...]\n \n fl.close()\n \n ### generate classes to submerge data\n \n sim = misc_tools.Simulation(lx, ly, dt, nsteps, nfils, nbeads, nsamp, nbpf, \\\n density, kappa, km, pa, pp, bl, sigma)\n fils = misc_tools.Cells(xu, nbpf, sim)\n \n return sim, fils",
"def count_files_md5hash_indir(self, dir_path):\n for file_name in os.listdir(dir_path):\n file_path = \"{}/{}\".format(dir_path, file_name)\n self.md5hash.add(count_md5hash_file(file_path))",
"def calculate_md5sum_of_a_file(context, file_name, file_path):\n command = \"md5sum \" + file_path + \"/\" + file_name + \" | awk {'print $1'}\"\n return context.cme_session.send_ssh_command(command=command)",
"def apply_to_all_files(basedir,func=lambda x: x,ext='.h5'):\n cnt = 0\n # iterate over all files in all subdirectories\n for root, dirs, files in os.walk(basedir):\n files = glob.glob(os.path.join(root,'*'+ext))\n # count files\n cnt += len(files)\n # apply function to all files\n for f in files :\n func(f)\n \n# if cnt > 2000:\n# break\n \n return cnt",
"def HDF5_to_HDF5(self, **kwds):\n # split extension from HDF5 file\n if isinstance(self.filename, str):\n fileBasename,fileExtension=os.path.splitext(self.filename)\n else:\n fileBasename,fileExtension=os.path.splitext(self.filename.filename)\n # output HDF5 file\n hdf5_file = os.path.expanduser(f'{fileBasename}.h5')\n # copy everything from the HDF5 file\n with h5py.File(self.filename,mode='r') as source:\n dest = h5py.File(hdf5_file,mode='w')\n # value checks on output HDF5\n if not hasattr(dest, 'create_dataset'):\n raise ValueError('dest must be a group, got {!r}'.format(dest))\n # for each key in the root of the hdf5 file structure\n for k in source.keys():\n self.copy_from_HDF5(source[k], dest, name=k, **kwds)"
] | [
"0.69875836",
"0.6177355",
"0.59026027",
"0.57904077",
"0.5747277",
"0.57445157",
"0.5651457",
"0.5643685",
"0.56236434",
"0.5591664",
"0.55789",
"0.555372",
"0.5539781",
"0.5523432",
"0.5506336",
"0.54694766",
"0.54544675",
"0.5440774",
"0.5433494",
"0.5421979",
"0.54104507",
"0.5410175",
"0.5400371",
"0.53840476",
"0.5358579",
"0.53563875",
"0.53382015",
"0.52922636",
"0.528459",
"0.52807933"
] | 0.8061157 | 0 |
Convert directory with flusi h5 files to wabbit h5 files | def flusi_to_wabbit_dir(dir_flusi, dir_wabbit , *args, **kwargs ):
import re
import os
import glob
if not os.path.exists(dir_wabbit):
os.makedirs(dir_wabbit)
if not os.path.exists(dir_flusi):
err("The given directory does not exist!")
files = glob.glob(dir_flusi+'/*.h5')
files.sort()
for file in files:
fname_wabbit = dir_wabbit + "/" + re.split("_\d+.h5",os.path.basename(file))[0]
flusi_to_wabbit(file, fname_wabbit , *args, **kwargs ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def h5root():\n with h5py.File('dummy.nxs', mode='w', driver=\"core\", backing_store=False) as f:\n yield f",
"def read_wabbit_hdf5_dir(dir):\n import numpy as np\n import re\n import ntpath\n import os\n\n it=0\n data={'time': [],'x0':[],'dx':[],'treecode':[]}\n # we loop over all files in the given directory\n for file in os.listdir(dir):\n # filter out the good ones (ending with .h5)\n if file.endswith(\".h5\"):\n # from the file we can get the fieldname\n fieldname=re.split('_',file)[0]\n print(fieldname)\n time, x0, dx, box, field, treecode = read_wabbit_hdf5(os.path.join(dir, file))\n #increase the counter\n data['time'].append(time[0])\n data['x0'].append(x0)\n data['dx'].append(dx)\n data['treecode'].append(treecode)\n if fieldname not in data:\n # add the new field to the dictionary\n data[fieldname]=[]\n data[fieldname].append(field)\n else: # append the field to the existing data field\n data[fieldname].append(field)\n it=it+1\n # the size of the domain\n data['box']=box\n #return time, x0, dx, box, data, treecode\n return data",
"def write_wabbit_hdf5( file, time, x0, dx, box, data, treecode, iteration = 0, dtype=np.float64 ):\n import h5py\n import numpy as np\n\n\n Level = np.size(treecode,1)\n if len(data.shape)==4:\n # 3d data\n Bs = np.zeros([3,1])\n N, Bs[0], Bs[1], Bs[2] = data.shape\n Bs = Bs[::-1]\n print( \"Writing to file=%s max=%e min=%e size=%i %i %i \" % (file, np.max(data), np.min(data), Bs[0], Bs[1], Bs[2]) )\n\n else:\n # 2d data\n Bs = np.zeros([2,1])\n N, Bs[0], Bs[1] = data.shape\n Bs = Bs[::-1]\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~\")\n print(\"Writing file %s\" % (file) )\n print(\"Time=%e it=%i N=%i Bs[0]=%i Bs[1]=%i Level=%i Domain=[%d, %d]\" % (time, iteration, N, Bs[0], Bs[1],Level, box[0], box[1]) )\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~\")\n\n\n fid = h5py.File( file, 'w')\n\n fid.create_dataset( 'coords_origin', data=x0, dtype=dtype )\n fid.create_dataset( 'coords_spacing', data=dx, dtype=dtype )\n fid.create_dataset( 'blocks', data=data, dtype=dtype )\n fid.create_dataset( 'block_treecode', data=treecode, dtype=dtype )\n\n fid.close()\n\n fid = h5py.File(file,'a')\n dset_id = fid.get( 'blocks' )\n dset_id.attrs.create( \"version\", 20200902) # this is used to distinguish wabbit file formats\n dset_id.attrs.create('time', time, dtype=dtype)\n dset_id.attrs.create('iteration', iteration)\n dset_id.attrs.create('domain-size', box, dtype=dtype )\n dset_id.attrs.create('total_number_blocks', N )\n fid.close()",
"def dir_to_h5df(walk_dir, N):\n\n h5_file_list = list_h5(walk_dir)\n\n if (N == 'all') or (N > len(h5_file_list)):\n files_to_convert = h5_file_list\n else:\n files_to_convert = h5_file_list[:N]\n\n # Convert list of files names to list of dictionaries\n\n h5_df_list = []\n\n for filename in files_to_convert:\n f = h5py.File(filename, 'r')\n h5_df = pd.DataFrame(multi_indexer(h5_to_dict(f)))\n h5_df_list.append(h5_df)\n\n h5df = pd.concat(h5_df_list, ignore_index=True)\n\n return h5df",
"def hdf5_container(tmpdir):\n filename = tmpdir.join(\"test.h5\").strpath\n hdcon = SensitivityCubeHDF5Container(filename, mode=\"w\")\n\n # Clever trick to close the file when we're done with it \n yield hdcon\n hdcon.close()",
"def HDF5_to_HDF5(self, **kwds):\n # split extension from HDF5 file\n if isinstance(self.filename, str):\n fileBasename,fileExtension=os.path.splitext(self.filename)\n else:\n fileBasename,fileExtension=os.path.splitext(self.filename.filename)\n # output HDF5 file\n hdf5_file = os.path.expanduser(f'{fileBasename}.h5')\n # copy everything from the HDF5 file\n with h5py.File(self.filename,mode='r') as source:\n dest = h5py.File(hdf5_file,mode='w')\n # value checks on output HDF5\n if not hasattr(dest, 'create_dataset'):\n raise ValueError('dest must be a group, got {!r}'.format(dest))\n # for each key in the root of the hdf5 file structure\n for k in source.keys():\n self.copy_from_HDF5(source[k], dest, name=k, **kwds)",
"def ToH5(self,h5File=None):\r\n\r\n logStr = \"{0:s}.{1:s}: \".format(self.__class__.__name__, sys._getframe().f_code.co_name)\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'Start.')) \r\n \r\n try: \r\n if h5File == None:\r\n h5File=self.h5File\r\n\r\n #Delete .h5 File if exists\r\n if os.path.exists(h5File): \r\n logger.debug(\"{0:s}{1:s}: Delete ...\".format(logStr,h5File)) \r\n os.remove(h5File)\r\n\r\n #Determine .h5 BaseKey\r\n\r\n relPath2XmlromCurDir=os.path.normpath(os.path.relpath(os.path.normpath(self.xmlFile),start=os.path.normpath(os.path.curdir))) # ..\\..\\..\\..\\..\\3S\\Modelle\\....XML\r\n #print(repr(relPath2XmlromCurDir)) # '..\\\\..\\\\..\\\\..\\\\..\\\\3S\\\\Modelle\\\\....XML'\r\n h5KeySep='/'\r\n h5KeyCharForDot='_'\r\n h5KeyCharForMinus='_'\r\n relPath2XmlromCurDirH5BaseKey=re.sub('\\.',h5KeyCharForDot,re.sub(r'\\\\',h5KeySep,re.sub('-',h5KeyCharForMinus,re.sub('.xml','',relPath2XmlromCurDir,flags=re.IGNORECASE))))\r\n #__/__/__/__/__/3S/Modelle/...\r\n\r\n warnings.filterwarnings('ignore',category=pd.io.pytables.PerformanceWarning) #your performance may suffer as PyTables will pickle object types that it cannot map directly to c-types \r\n warnings.filterwarnings('ignore',category=tables.exceptions.NaturalNameWarning) #\\lib\\site-packages\\tables\\path.py:100: NaturalNameWarning: object name is not a valid Python identifier: '3S'; it does not match the pattern ``^[a-zA-Z_][a-zA-Z0-9_]*$``; you will not be able to use natural naming to access this object; using ``getattr()`` will still work, though)\r\n \r\n #Write .h5 File\r\n logger.debug(\"{0:s}pd.HDFStore({1:s}) ...\".format(logStr,h5File)) \r\n with pd.HDFStore(h5File) as h5Store: \r\n #for tableName,table in self.dataFrames.items():\r\n for tableName in sorted(self.dataFrames.keys()):\r\n table=self.dataFrames[tableName]\r\n h5Key=relPath2XmlromCurDirH5BaseKey+h5KeySep+tableName \r\n logger.debug(\"{0:s}{1:s}: Writing DataFrame {2:s} with h5Key={3:s}\".format(logStr,h5File,tableName,h5Key)) \r\n try:\r\n h5Store.put(h5Key,table)#,format='table') \r\n except Exception as e:\r\n logger.error(\"{0:s}{1:s}: Writing DataFrame {2:s} with h5Key={3:s} FAILED!\".format(logStr,h5File,tableName,h5Key)) \r\n raise e\r\n \r\n\r\n except Exception as e:\r\n logStrFinal=\"{:s}Exception: Line: {:d}: {!s:s}: {:s}\".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))\r\n logger.error(logStrFinal) \r\n raise XmError(logStrFinal) \r\n \r\n finally:\r\n h5Store.close()\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'_Done.'))",
"def transform_to_h5():\n # this took about 10 minutes for set1\n for setname in ['set1', 'set2']:\n filename = os.path.join(ltrc_dirname, '{}.h5'.format(setname))\n f = h5py.File(filename, 'w')\n\n for name in ['train', 'valid', 'test']:\n g = f.create_group(name)\n filename = os.path.join(ltrc_dirname, '{}.{}.txt'.format(setname, name))\n X, y, q = load_svmlight_file(filename, query_id=True)\n g.create_dataset('X', data=X.todense(), compression='gzip')\n g.create_dataset('y', data=y, compression='gzip')\n g.create_dataset('q', data=q, compression='gzip')\n f.close()\n # Now you can do this\n # f['/valid/X'].shape\n # Out[24]: (71083, 699)",
"def recad_dir(pattern, vmin, vmax, shape, discard_vol=False,\n only_new=True):\n file_list = glob(pattern)\n file_list.sort()\n for file_name in file_list:\n h5_name = os.path.splitext(file_name)[0] + '.h5'\n print file_name\n if only_new:\n if os.path.exists(h5_name):\n print \"already exists\"\n continue\n recad_to_h5_chunk(file_name, vmin, vmax, shape)\n if discard_vol and os.path.exists(h5_name):\n os.remove(file_name)\n os.remove(file_name + '.info')\n os.remove(file_name + '.xml')",
"def list_h5(walk_dir):\n\n file_list = []\n for root, subdirs, files in os.walk(walk_dir):\n\n for filename in files:\n file_path = os.path.join(root, filename)\n if file_path[-2:] == 'h5':\n file_list.append(file_path)\n\n return file_list",
"def convert(self, out_path: str)->None:\n tape_data_hdf5 = self.createTapeHDF5Dict()\n \n self.deleteFile(out_path)\n self.to_hdf5(tape_data_hdf5, out_path)\n print(\"HDF5 file has been successfully saved at {}\".format(out_path))",
"def make_libfile():\n # wfc3_obsmodes_uvis\n wfc3_uvis = [\n \"f218w\",\n \"f225w\",\n \"f275w\",\n \"f336w\",\n \"f390m\",\n \"f390w\",\n \"f410m\",\n \"f438w\",\n \"f467m\",\n \"f475w\",\n \"f547m\",\n \"f555w\",\n \"f606w\",\n \"f621m\",\n \"f625w\",\n \"f689m\",\n \"f763m\",\n \"f775w\",\n \"f814w\",\n \"f845m\",\n ]\n\n wfc3_ir = [\n \"f098m\",\n \"f105w\",\n \"f110w\",\n \"f125w\",\n \"f127m\",\n \"f139m\",\n \"f140w\",\n \"f153m\",\n \"f160w\",\n ]\n\n wfpc2 = [\n \"f122m\",\n \"f157w\",\n \"f336w\",\n \"f410m\",\n \"f467m\",\n \"f547m\",\n \"f439w\",\n \"f569w\",\n \"f675w\",\n \"f791w\",\n \"f170w\",\n \"f185w\",\n \"f218w\",\n \"f255w\",\n \"f300w\",\n \"f380w\",\n \"f555w\",\n \"f622w\",\n \"f450w\",\n \"f606w\",\n \"f702w\",\n \"f814w\",\n ]\n\n acs_wfc = [\n \"f435w\",\n \"f475w\",\n \"f550m\",\n \"f555w\",\n \"f606w\",\n \"f625w\",\n \"f775w\",\n \"f814w\",\n ]\n # galex\n galex = [\"fuv\", \"nuv\"]\n\n # Open hd5 file for writing\n hf = h5py.File(__ROOT__ + \"filters.hd5\", \"w\")\n\n # Create group for nice hierarchical structure\n f = hf.create_group(\"filters\")\n\n # Define arrays for \"contents\" / descriptive information\n tablenames = []\n observatories = []\n instruments = []\n names = []\n norms = []\n cwaves = []\n pwaves = []\n comments = []\n\n # Loop through WFC3_UVIS filters\n for filt in wfc3_uvis:\n\n # define uvis 1 and uvis2 modes\n mode_1 = \"wfc3, uvis1, \" + filt\n mode_2 = \"wfc3, uvis2, \" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp_1 = stsyn.band(mode_1)\n bp_2 = stsyn.band(mode_2)\n\n # extract the wavelength array\n wave = bp_1.waveset\n\n # compute the average bandpass between uvis1 and uvis2\n bp_avg = np.average([bp_1(wave), bp_2(wave)], axis=0)\n\n # define the filter name\n filter_name = \"HST_WFC3_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp_avg.astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp_avg, name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"HST\")\n instruments.append(\"WFC3\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"avg of uvis1 and uvis2\")\n\n # Loop through WFC3_IR filters\n for filt in wfc3_ir:\n\n # define ir mode\n mode = \"wfc3, ir, \" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp = stsyn.band(mode)\n\n # extract the wavelength array\n wave = bp.waveset\n\n # define the filter name\n filter_name = \"HST_WFC3_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp(wave).astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp(wave), name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"HST\")\n instruments.append(\"WFC3\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"\")\n\n # Loop through WFPC2 filters\n for filt in wfpc2:\n\n # define chips 1, 2, 3, 4 modes\n mode_1 = \"wfpc2, 1, \" + filt\n mode_2 = \"wfpc2, 2, \" + filt\n mode_3 = \"wfpc2, 3, \" + filt\n mode_4 = \"wfpc2, 4, \" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp_1 = stsyn.band(mode_1)\n bp_2 = stsyn.band(mode_2)\n bp_3 = stsyn.band(mode_3)\n bp_4 = stsyn.band(mode_4)\n\n # extract the wavelength array\n wave = bp_1.waveset\n\n # compute the average bandpass between uvis1 and uvis2\n bp_avg = np.average([bp_1(wave), bp_2(wave), bp_3(wave), bp_4(wave)], axis=0)\n\n # define the filter name\n filter_name = \"HST_WFPC2_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp_avg.astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp_avg, name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"HST\")\n instruments.append(\"WFPC2\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"avg of 1, 2, 3, 4\")\n\n # Loop through ACS filters\n for filt in acs_wfc:\n\n # define wfc1, wfc2 modes\n mode_1 = \"acs, wfc1, \" + filt\n mode_2 = \"acs, wfc2, \" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp_1 = stsyn.band(mode_1)\n bp_2 = stsyn.band(mode_2)\n\n # extract the wavelength array\n wave = bp_1.waveset\n\n # compute the average bandpass between uvis1 and uvis2\n bp_avg = np.average([bp_1(wave), bp_2(wave)], axis=0)\n\n # define the filter name\n filter_name = \"HST_ACS_WFC_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp_avg.astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp_avg, name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"HST\")\n instruments.append(\"ACS_WFC\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"avg of wfc1 and wfc2\")\n\n # Loop through GALEX filters:\n for filt in galex:\n # define ir mode\n mode = \"galex,\" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp = stsyn.band(mode)\n\n # extract the wavelength array\n wave = bp.waveset\n\n # define the filter name\n filter_name = \"GALEX_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp(wave).astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp(wave), name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"GALEX\")\n instruments.append(\"GALEX\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"\")\n\n # smash the contents arrays together\n contents = np.array(\n list(\n zip(\n tablenames,\n observatories,\n instruments,\n names,\n norms,\n cwaves,\n pwaves,\n comments,\n )\n ),\n dtype=[\n (\"TABLENAME\", \"S40\"),\n (\"OBSERVATORY\", \"S30\"),\n (\"INSTRUMENT\", \"S30\"),\n (\"NAME\", \"S10\"),\n (\"NORM\", \"<f8\"),\n (\"CWAVE\", \"<f8\"),\n (\"PWAVE\", \"<f8\"),\n (\"COMMENT\", \"S100\"),\n ],\n )\n\n # add the contents array as an hd5 dataset\n hf.create_dataset(\"content\", data=contents)\n\n # close the file\n hf.close()",
"def save_as_hdf5(self, filename):",
"def read_wabbit_hdf5(file, verbose=True, return_iteration=False):\n import h5py\n import numpy as np\n\n if verbose:\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~\")\n print(\"Reading file %s\" % (file) )\n\n fid = h5py.File(file,'r')\n b = fid['coords_origin'][:]\n x0 = np.array(b, dtype=float)\n\n b = fid['coords_spacing'][:]\n dx = np.array(b, dtype=float)\n\n b = fid['blocks'][:]\n data = np.array(b, dtype=float)\n\n b = fid['block_treecode'][:]\n treecode = np.array(b, dtype=float)\n\n # get the dataset handle\n dset_id = fid.get('blocks')\n \n # from the dset handle, read the attributes\n time = dset_id.attrs.get('time')\n iteration = dset_id.attrs.get('iteration')\n box = dset_id.attrs.get('domain-size')\n version=dset_id.attrs.get('version')\n\n\n fid.close()\n\n jmin, jmax = get_max_min_level( treecode )\n N = data.shape[0]\n Bs = data.shape[1:]\n Bs = np.asarray(Bs[::-1]) # we have to flip the array since hdf5 stores in [Nz, Ny, Nx] order\n \n if version == 20200408 or version == 20231602:\n Bs = Bs-1\n #print(\"!!!Warning old (old branch: newGhostNodes) version of wabbit format detected!!!\")\n else:\n print(\"This file includes redundant points\")\n \n if verbose:\n print(\"Time=%e it=%i N=%i Bs[0]=%i Bs[1]=%i Jmin=%i Jmax=%i\" % (time, iteration, N, Bs[0], Bs[1], jmin, jmax) )\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~\")\n\n\n if return_iteration:\n return time, x0, dx, box, data, treecode, iteration[0]\n else:\n return time, x0, dx, box, data, treecode",
"def clean_chunk_files(dirpath):\n workdir = os.getcwd()\n os.chdir(dirpath)\n for filename in glob.glob(\"[0-9]*_[0-9]*_[0-9]*.hdf5\"):\n os.remove(filename)\n os.chdir(workdir)",
"def convert(filepath, duration=100):\n\n # Getting images from HDF5 file\n h5_file = h5py.File(filepath, 'r')\n images = h5_file['entry']['data']['data']\n # Converting to PIL.Image\n images = [Image.fromarray(i).convert() for i in images]\n # Saving as GIF\n images[0].save(filepath.split('/')[-1] + '.gif',\n save_all=True, append_images=images[1:],\n duration=duration, loop=0)\n return",
"def packFiles(source, filesPerBlock, dest):\n\tfileCount = 1\n\t\n\ttmpFileName = \"tmp.h5\"\t\n\n\n\toutFile = createBlockFile(tmpFileName)\t\n\tfor dirname, subdirs, files in os.walk(source):\t\n\t print 'Scanning ' + dirname + '...'\t\n\t for f in files:\t\n\t if f.endswith('.h5'):\t\n\t inFile = h5py.File(os.path.join(dirname, f), 'r')\t\n\t outFile.copy(inFile, outFile['songs'], f)\t\n\t inFile.close()\t\n\t fileCount = fileCount + 1\t\n\t if(fileCount > filesPerBlock):\t\n\t outFile.close()\t\n\t upload(tmpFileName, bucket)\t\n\t fileCount = 1\t\n\t outFile = createBlockFile(tmpFileName)\t\n\n \toutFile.close()\n \tif fileCount > 1:\n\t \tupload(tmpFileName, bucket)\n\n\tos.remove(tmpFileName)",
"def h5ls(h5o, maxdepth=None, maxitems=None):\n _h5ls(h5o, 0, maxdepth=maxdepth, maxitems=maxitems, prefix='')",
"def pack_audio_files_to_hdf5(args):\n\n # Arguments & parameters\n dataset_dir = args.dataset_dir\n workspace = args.workspace\n data_type = args.data_type\n mini_data = args.mini_data\n\n sample_rate = config.sample_rate\n audio_length = config.audio_length\n classes_num = config.classes_num\n lb_to_idx = config.lb_to_idx\n frames_per_second = config.frames_per_second\n frames_num = frames_per_second * config.audio_duration\n\n has_strong_target = data_type in ['testing', 'evaluation']\n\n # Paths\n audios_dir = os.path.join(dataset_dir, data_type)\n weak_label_csv_path = os.path.join(dataset_dir, 'metadata', \n get_weak_csv_filename(data_type))\n\n if data_type == 'testing':\n strong_label_csv_path = os.path.join(dataset_dir, 'metadata', \n 'groundtruth_strong_label_testing_set.csv')\n elif data_type == 'evaluation':\n strong_label_csv_path = os.path.join(dataset_dir, 'metadata', \n 'groundtruth_strong_label_evaluation_set.csv')\n\n if mini_data:\n packed_hdf5_path = os.path.join(workspace, 'features', \n 'minidata_{}.waveform.h5'.format(data_type))\n else:\n packed_hdf5_path = os.path.join(workspace, 'features', \n '{}.waveform.h5'.format(data_type))\n create_folder(os.path.dirname(packed_hdf5_path))\n\n # Read metadata\n weak_meta_list = read_weak_csv(weak_label_csv_path, data_type)\n\n # Use a small amount of data for debugging\n if mini_data:\n random.seed(1234)\n random.shuffle(weak_meta_list)\n weak_meta_list = weak_meta_list[0 : 100]\n\n audios_num = len(weak_meta_list)\n\n feature_time = time.time()\n with h5py.File(packed_hdf5_path, 'w') as hf:\n hf.create_dataset(\n name='audio_name', \n shape=(audios_num,), \n dtype='S80')\n\n hf.create_dataset(\n name='waveform', \n shape=(audios_num, audio_length), \n dtype=np.int32)\n\n hf.create_dataset(\n name='weak_target', \n shape=(audios_num, classes_num), \n dtype=np.float32)\n\n if has_strong_target:\n strong_meta_dict = read_strong_csv(strong_label_csv_path) \n \n hf.create_dataset(\n name='strong_target', \n shape=(0, frames_num, classes_num), \n maxshape=(None, frames_num, classes_num), \n dtype=np.bool)\n\n for n in range(audios_num):\n print(n)\n weak_meta_dict = weak_meta_list[n]\n audio_name = weak_meta_dict['audio_name']\n audio_path = os.path.join(audios_dir, audio_name)\n (audio, fs) = librosa.core.load(audio_path, sr=sample_rate, mono=True)\n audio = pad_truncate_sequence(audio, audio_length)\n\n hf['audio_name'][n] = audio_name.encode()\n hf['waveform'][n] = float32_to_int16(audio)\n hf['weak_target'][n] = weak_target = get_weak_target(\n weak_meta_dict['labels'], lb_to_idx)\n\n if has_strong_target:\n strong_target = get_strong_target(\n weak_meta_dict['audio_name'][1:], strong_meta_dict, \n frames_num, frames_per_second, lb_to_idx)\n \n hf['strong_target'].resize((n + 1, frames_num, classes_num))\n hf['strong_target'][n] = strong_target\n\n print('Write hdf5 to {}'.format(packed_hdf5_path))\n print('Time: {:.3f} s'.format(time.time() - feature_time))",
"def numpy_to_h5py(in_dir=config.dir_npy, split = config.split):\n\n in_files=[x[:-13] for x in os.listdir(in_dir) if x.endswith('_voc_stft.npy') and not x.startswith('._')]\n\n random.shuffle(in_files)\n\n\n num_files = len(in_files)\n\n split_idx = int(num_files*split)\n\n trn_files = in_files[:split_idx]\n\n val_files = in_files[split_idx:]\n\n num_val_files = len(val_files)\n\n print('Processing %d training files' % split_idx)\n logger.info('Processing %d training files' % split_idx)\n\n logger.info('Training file: %s' % config.h5py_file_train)\n\n voc_shape_trn = [split_idx, 5170,config.input_features]\n\n mix_shape_trn = [split_idx, 5170,config.input_features]\n\n feats_shape_trn = [split_idx, 5170,config.output_features]\n\n hdf5_file = h5py.File(config.h5py_file_train, mode='w')\n\n hdf5_file.create_dataset(\"voc_stft\", voc_shape_trn, np.float32)\n\n hdf5_file.create_dataset(\"back_stft\", voc_shape_trn, np.float32)\n\n hdf5_file.create_dataset(\"mix_stft\", mix_shape_trn, np.float32)\n\n hdf5_file.create_dataset(\"feats\", feats_shape_trn, np.float32)\n\n\n i = 0\n\n for f in trn_files:\n\n voc_stft = np.load(in_dir+f+'_voc_stft.npy')\n\n voc_stft = voc_stft.astype('float32')\n\n mix_stft = np.load(in_dir+f+'_mix_stft.npy')\n\n mix_stft = mix_stft.astype('float32')\n\n back_stft = np.load(in_dir+f+'_back_stft.npy')\n\n back_stft = back_stft.astype('float32')\n\n synth_feats = np.load(in_dir+f+'_synth_feats.npy')\n\n synth_feats = synth_feats.astype('float32')\n\n hdf5_file[\"voc_stft\"][i,...] = voc_stft\n\n hdf5_file[\"mix_stft\"][i,...] = mix_stft\n\n hdf5_file[\"back_stft\"][i,...] = back_stft\n\n hdf5_file[\"feats\"][i,...] = synth_feats\n\n i+=1\n utils.progress(i, split_idx)\n\n logger.info('Processed training file: %s' % f)\n\n hdf5_file.close()\n\n print('Processing %d validation files' % num_val_files)\n logger.info('Processing %d validation files' % num_val_files)\n\n logger.info('Validation file: %s' % config.h5py_file_val)\n\n voc_shape_trn = [num_val_files, 5170,config.input_features]\n\n mix_shape_trn = [num_val_files, 5170,config.input_features]\n\n feats_shape_trn = [num_val_files, 5170,config.output_features]\n\n hdf5_file = h5py.File(config.h5py_file_val, mode='w')\n\n hdf5_file.create_dataset(\"voc_stft\", voc_shape_trn, np.float32)\n\n hdf5_file.create_dataset(\"mix_stft\", mix_shape_trn, np.float32)\n\n hdf5_file.create_dataset(\"back_stft\", voc_shape_trn, np.float32)\n\n hdf5_file.create_dataset(\"feats\", feats_shape_trn, np.float32)\n\n\n i = 0\n\n for f in val_files:\n\n voc_stft = np.load(in_dir+f+'_voc_stft.npy')\n\n voc_stft = voc_stft.astype('float32')\n\n mix_stft = np.load(in_dir+f+'_mix_stft.npy')\n\n mix_stft = mix_stft.astype('float32')\n\n synth_feats = np.load(in_dir+f+'_synth_feats.npy')\n\n synth_feats = synth_feats.astype('float32')\n\n back_stft = np.load(in_dir+f+'_back_stft.npy')\n\n back_stft = back_stft.astype('float32')\n\n hdf5_file[\"voc_stft\"][i,...] = voc_stft\n\n hdf5_file[\"mix_stft\"][i,...] = mix_stft\n\n hdf5_file[\"back_stft\"][i,...] = back_stft\n\n hdf5_file[\"feats\"][i,...] = synth_feats\n\n i+=1\n utils.progress(i, num_val_files)\n\n logger.info('Processed validation file: %s' % f)\n\n hdf5_file.close()\n # return original_ffts",
"def hdfpath_to_nifti1image(file_path, h5path):\n with h5py.File(file_path, 'r') as f:\n return hdfgroup_to_nifti1image(f[h5path])",
"def command_on_each_hdf5_file(directory, command):\n import re\n import os\n import glob\n\n if not os.path.exists(directory):\n err(\"The given directory does not exist!\")\n\n files = glob.glob(directory+'/*.h5')\n files.sort()\n for file in files:\n c = command % file\n os.system(c)",
"def h5_to_pb(h5_model, output_dir, model_name, out_prefix=\"output_\", log_tensorboard=True): \n if osp.exists(output_dir) == False:\n os.mkdir(output_dir)\n \n out_nodes = list()\n \n ## get all tensor node.\n for i in range(len(h5_model.outputs)):\n out_nodes.append(out_prefix+str(i+1))\n tf.identity(h5_model.output[i], out_prefix+str(i+1))\n \n sess = K.get_session()\n \n ## Conver to pb file\n init_graph = sess.graph.as_graph_def()\n main_graph = graph_util.convert_variables_to_constants(sess, init_graph, out_nodes)\n graph_io.write_graph(main_graph, output_dir, name=model_name, as_text=False)\n \n if log_tensorboard:\n import_pb_to_tensorboard.import_to_tensorboard(osp.join(output_dir, model_name), output_dir)",
"def generate_all(files, alignement_h5f, input_h5f,\n nframes=7, vad=None):\n def try_remove(fname):\n try:\n os.remove(fname)\n except:\n pass\n try:\n directory = os.path.dirname(os.path.abspath(input_h5f))\n\n # create temporary files:\n _, fb_h5f = tempfile.mkstemp(dir=directory)\n _, fb_mvn_h5f = tempfile.mkstemp(dir=directory)\n os.remove(fb_h5f)\n os.remove(fb_mvn_h5f)\n\n # generate mfccs:\n h5features_compute(files, alignement_h5f, featfunc=do_mfccs)\n\n # generate stacked mvn fbanks:\n h5features_compute(files, fb_h5f, featfunc=do_fbank)\n mean_variance_normalisation(fb_h5f, fb_mvn_h5f, vad=vad)\n h5features_feats2stackedfeats(fb_mvn_h5f, input_h5f, nframes=nframes)\n finally:\n try_remove(fb_h5f)\n try_remove(fb_mvn_h5f)",
"def prepare_hdf5_file(hdf5_file, n_train, n_valid, n_test):\n n_total = n_train + n_valid + n_test\n splits = create_splits(n_train, n_valid, n_test)\n hdf5_file.attrs['split'] = H5PYDataset.create_split_array(splits)\n vlen_dtype = h5py.special_dtype(vlen=numpy.dtype('uint8'))\n hdf5_file.create_dataset('encoded_images', shape=(n_total,),\n dtype=vlen_dtype)\n hdf5_file.create_dataset('targets', shape=(n_total, 1), dtype=numpy.int16)\n hdf5_file.create_dataset('filenames', shape=(n_total, 1), dtype='S32')",
"def transition_to_buffered(self, h5_filepath):\n print(\"transition to buffered\")",
"def dataset_to_hdf5(raw_path: Path, target_path: Path):\n _prepare_target_dir(target_path)\n\n logging.info(\"Creating hdf5 blob file...\")\n with pd.HDFStore(target_path) as store:\n logging.info(\"Converting session by session...\")\n session_paths = [p for p in raw_path.glob(\"*/*/\")]\n\n # loop all session folders\n sessions_index = []\n for p in tqdm(session_paths):\n # Skip files (.DS_Store) and excluded session\n if (not os.path.isdir(p)) or (p.name in EXCLUDE_SESSIONS):\n logging.debug(f\"Skipping {p.resolve()}\")\n continue\n\n # Derive subject and session from path\n subject = p.parent.name\n session = p.name\n session_no = session.split(\"_\")[-1] #\n\n # Read\n df_act = _read_activity(p)\n df_sens = _read_sensors(p)\n\n # Join task/scenario information to sensor data\n df_sens = _join_activity(df_act, df_sens)\n\n # Save to hdf5. Renaming, because keys can't start with digits\n store.put(\n f\"subject_{subject}/session_{subject}_{session_no}/activity\",\n df_act,\n format=\"f\",\n )\n store.put(\n f\"subject_{subject}/session_{subject}_{session_no}/sensors_100hz\",\n df_sens,\n format=\"f\",\n )\n\n # Compose index table\n sessions_index.append(\n {\n \"subject\": subject,\n \"session\": f\"{subject}_session_{session_no}\",\n \"key\": f\"subject_{subject}/session_{subject}_{session_no}\",\n \"task_type\": df_sens[\"task_type\"].max(),\n }\n )\n\n # Save index table to hdf5\n df_index = pd.DataFrame(sessions_index)\n store.put(f\"index\", df_index, format=\"f\")",
"def write_h5(fname: str, data: dict) -> None:\n try:\n with h5py.File(fname, 'w') as f:\n recursively_save_dict_contents_to_group(f,'/',data)\n except IOError as e:\n print(f\"Cannot write HDF5 file {fname}\")\n print(f\"IOError: {e}\")",
"def _get_h5_path(self, name):\n return posixpath.join(self.h5_path, name)",
"def convert_all_in_bmp(self, path, new_path):\n DbWorker.mkdir(new_path)\n for i in os.listdir(path):\n self.convert_and_save_image(path+'/'+i, new_path)"
] | [
"0.63210124",
"0.62002695",
"0.60205424",
"0.59618264",
"0.58658123",
"0.58440155",
"0.57978594",
"0.57177514",
"0.57131207",
"0.56950307",
"0.5659476",
"0.5612546",
"0.55923796",
"0.5582833",
"0.5499074",
"0.5433757",
"0.54336375",
"0.54284495",
"0.53850937",
"0.5381837",
"0.53781873",
"0.5359606",
"0.5359068",
"0.53517044",
"0.5342685",
"0.5336275",
"0.53290826",
"0.53094774",
"0.527634",
"0.52763104"
] | 0.74297935 | 0 |
Convert flusi data file to wabbit data file. | def flusi_to_wabbit(fname_flusi, fname_wabbit , level, dim=2, dtype=np.float64 ):
import numpy as np
import insect_tools
import matplotlib.pyplot as plt
# read in flusi's reference solution
time, box, origin, data_flusi = insect_tools.read_flusi_HDF5( fname_flusi, dtype=dtype )
box = box[1:]
data_flusi = np.squeeze(data_flusi).T
Bs = field_shape_to_bs(data_flusi.shape,level)
dense_to_wabbit_hdf5(data_flusi, fname_wabbit , Bs, box, time, dtype=dtype) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def flusi_to_wabbit_dir(dir_flusi, dir_wabbit , *args, **kwargs ):\n import re\n import os\n import glob\n\n if not os.path.exists(dir_wabbit):\n os.makedirs(dir_wabbit)\n if not os.path.exists(dir_flusi):\n err(\"The given directory does not exist!\")\n\n files = glob.glob(dir_flusi+'/*.h5')\n files.sort()\n for file in files:\n\n fname_wabbit = dir_wabbit + \"/\" + re.split(\"_\\d+.h5\",os.path.basename(file))[0]\n\n flusi_to_wabbit(file, fname_wabbit , *args, **kwargs )",
"def to_file(self, file_path, smirnoff_data):\n pass",
"def rewrite_all_file(self, data):\r\n with open(self.file_name, 'w', encoding='utf-8') as self.file:\r\n self.file.write(data)",
"def data_to_file(data, ta_file):\n file_handle = file(ta_file, \"w\")\n file_handle.write(data_to_string(data))\n file_handle.close()",
"def bin_writer(fpath, fname, data):\n path = fpath + fname + '.dat'\n with open(path, 'ab') as file:\n for row in data:\n file.write(row.encode('utf-8'))\n return None",
"def write_data_to_wav(self, file_name: str, data):\r\n # apply scale and convert to int16\r\n data = np.int16(data/np.max(np.abs(data)) * self.wav_scale)\r\n # write to file\r\n write(file_name, self.audio_sample_rate, data)\r\n print('Sound ', file_name, ' has been saved')",
"def to_file(self, file_path, smirnoff_data):\n xml_string = self.to_string(smirnoff_data)\n with open(file_path, \"w\") as of:\n of.write(xml_string)",
"def data_to_waves(self, data):\n raise NotImplementedError",
"def dat2bin(filename):\r\n\r\n with open(filename) as fdat, open(filename + '.bin', 'wb+') as fbin:\r\n while True:\r\n a = fdat.read(8).strip()\r\n # reached EOF, stop converting\r\n if a == '':\r\n break\r\n\r\n # less than 8 bit, stop converting\r\n if len(a) < 8:\r\n break\r\n\r\n # converting to binary\r\n fbin.write(bitstring.BitArray('0b' + a).bytes)",
"def tofile(self, f):\n raise NotImplementedError(\"ScalableRedisLocalBloomFilter not support tofile\")",
"def tofile(self, f):\n raise NotImplementedError(\"RedisLocalBloomFilter not support tofile\")",
"def _to_data_file(converted_papers):\n\n temp_dir = _TEMP_DIR()\n if not os.path.isdir(temp_dir):\n os.makedirs(temp_dir)\n else: # Clean dir\n shutil.rmtree(temp_dir)\n os.makedirs(temp_dir)\n\n with open(os.path.join(temp_dir, _DATA_FILE), 'w') as f:\n for converted_paper in converted_papers:\n if not converted_paper:\n continue\n f.write(converted_paper)\n f.write('\\n')\n\n return temp_dir",
"def compress(self, file):\n\t\t\n\t\ttext = file.read() \n\t\ttext = text.rstrip() #elimina los espacios en blanco del final\n\n\t\t\n\t\tfrequency = self.make_frequency_dict(text)#obtenemos la frencuencia de cada numero en el texto\n\t\tself.make_heap(frequency)\n\t\tself.merge_nodes()\n\t\tself.make_codes()\n\t\tencoded_text = self.get_encoded_text(text)\n\t\tpadded_encoded_text = self.pad_encoded_text(encoded_text)\n\n\t\tb = self.get_byte_array(padded_encoded_text)\n\n\t\treturn b",
"def bwt_binary_conversion():\n #call bwt function\n sequence,bwt_pattern, file, seq_list = transform()\n seq = bwt_pattern\n #retrieve the dictionnary containing binary code and the binary sequence\n huffman_code, binary_seq = huffman_construction(seq)\n \n #add zeroes\n added = 0 \n while len(binary_seq) % 8 !=0:\n added +=1 \n binary_seq += '0'\n \n #convert binary sequence in utf-8 sequence\n comp_seq = \"\"\n for bit in range(0, len(binary_seq), 8):\n byte = binary_seq[bit:bit+8]\n code = int(byte, 2)\n comp_seq += chr(code)\n \n #save the number of zeroes added \n huffman_code[\"add\"]= added\n \n #save the dictionnary in the file and the compressed sequence\n created_file = os.path.splitext(file)[0]\n file_comp = open(created_file + \"_bwt_compressed.txt\", \"w\") \n json.dump(huffman_code, file_comp)\n file_comp.write(\"\\n\"+comp_seq) \n \n file_comp.close()\n \n messagebox.showinfo(\"Information\", \"Your compression has been saved in \"+created_file +\"_bwt_compressed.txt file.\")\n \n return seq, binary_seq, comp_seq",
"def _toFile(self):\n pass",
"def flush(self) -> None:\n if self.single_file:\n if (\n self.user_data\n or self.chat_data\n or self.bot_data\n or self.callback_data\n or self.conversations\n ):\n self._dump_singlefile()\n else:\n if self.user_data:\n self._dump_file(f\"{self.filename}_user_data\", self.user_data)\n if self.chat_data:\n self._dump_file(f\"{self.filename}_chat_data\", self.chat_data)\n if self.bot_data:\n self._dump_file(f\"{self.filename}_bot_data\", self.bot_data)\n if self.callback_data:\n self._dump_file(f\"{self.filename}_callback_data\", self.callback_data)\n if self.conversations:\n self._dump_file(f\"{self.filename}_conversations\", self.conversations)",
"def spew(path, data):\n with open(path, 'w+') as f:\n f.write(data)",
"def writeDataToFile(self):\n if self.data is not None:\n self.notify.debug('Data is now synced with disk at %s' % \\\n self.filepath)\n if self.wantAnyDbm:\n self.data.sync()\n else:\n try:\n backuppath = self.filepath+ '.bu'\n if os.path.exists(self.filepath):\n os.rename(self.filepath,backuppath)\n \n outfile = open(self.filepath, 'w')\n cPickle.dump(self.data,outfile)\n outfile.close()\n \n if os.path.exists(backuppath):\n os.remove(backuppath)\n except EnvironmentError:\n self.notify.warning(str(sys.exc_info()[1]))\n else:\n self.notify.warning('No data to write. Aborting sync.')",
"def write_data():",
"def bufr_to_dataframe(file=''):\n \n if debug:\n print(\"Running bufr_to_dataframe for: \", file)\n \n check_read_file (file = file, read= False)\n f = open(file)\n #source_file = [l for l in file.split('/') if '.bfr' in l][0]\n read_data = []\n \n \"\"\" Name of the columns as they will appear in the pandas dataframe (not necessarily CDM compliant) \"\"\"\n #column_names = ['report_timestamp' , 'iday', 'station_id', 'latitude', 'longitude', 'pressure', 'value','varno@body']\n \n lat, lon, alt, blockNumber, stationNumber, statid = np.nan, np.nan, np.nan, np.nan, np.nan, np.nan\n \n obs_id, report_id = -1, 0 # progressive observation id\n stations_id = [] \n \n while 1:\n #lista = [] # temporary list\n bufr = codes_bufr_new_from_file(f)\n \n if bufr is None:\n break\n \n codes_set(bufr, 'unpack', 1) # eCcodes must expand all the descriptors and unpack the data section\n \n date = '19'+codes_get_array(bufr, \"typicalDate\")[0][2:]\n timePeriod = codes_get_array(bufr, \"typicalTime\")[0] \n \n year, month, day = date[0:4], date[4:6] , date[6:8]\n hour, minutes = timePeriod[0:2] , timePeriod[2:4]\n \n idate = datetime.strptime(year + month + day + hour + minutes, '%Y%m%d%H%M')\n iday = int(year + month + day )\n\n pressure = codes_get_array(bufr, \"pressure\") \n temperature = codes_get_array(bufr, \"airTemperature\") \n wind_direction = codes_get_array(bufr, \"windDirection\")\n wind_speed = codes_get_array(bufr, \"windSpeed\")\n \n try: # not all the bufr files have the dewpoint \n dew_point = codes_get_array(bufr, \"dewpointTemperature\")\n except:\n dew_point= np.empty((1, len(temperature)))\n dew_point[:] = np.nan\n \n num_lev = len(pressure) # number of distinct pressure levels \n \n try:\n geopotential = codes_get_array(bufr, \"nonCoordinateGeopotentialHeight\") \n except:\n geopotential = np.full( (1,len(temperature)) , np.nan )[0,:]\n \n if report_id == 0:\n ''' Check again but these values should remain the same for all cnt, so it makes no sense to read them every time '''\n lat = codes_get(bufr, \"latitude\")\n lon = codes_get(bufr, \"longitude\")\n alt = float(codes_get(bufr, \"heightOfStation\"))\n blockNumber = codes_get(bufr, \"blockNumber\")\n stationNumber = codes_get(bufr, \"stationNumber\")\n #statid = str(blockNumber*1000+stationNumber) # changed to int instead of str\n statid = blockNumber*1000+stationNumber\n if statid not in stations_id:\n stations_id.append(statid) \n \n codes_release(bufr)\n \n miss_value = -1.e100 \n \n for i in range(len(temperature)):\n obs_id = obs_id + 1 \n airT = temperature[i]\n winds = wind_speed[i]\n windd = wind_direction[i]\n press = pressure[i]\n gph = geopotential[i]\n dp = dew_point[i]\n if press == miss_value:\n press = np.nan \n if dp == miss_value:\n dp = np.nan\n if airT == miss_value : # replacing none values with numpy nans\n airT = np.nan \n if winds == miss_value:\n winds = np.nan\n if gph == miss_value:\n gph = np.nan \n if windd == 2147483647 or windd == -2147483647:\n windd = np.nan \n \n \n for value,var in zip( [gph, airT, winds, windd, dp], ['gph', 'temperature', 'wind_speed', 'wind_direction', 'dew_point'] ):\n obs_id = obs_id + 1 \n if not np.isnan(press): # when pressure is available, z_coord== pressure and z_type==1\n z_type = 1 \n read_data.append( ( 'BUFR'.rjust(10), report_id, int(obs_id), idate, iday, statid, lat, lon, press, value, cdmvar_dic[var]['cdm_var'] , int(cdmvar_dic[var]['cdm_unit']) , num_lev , z_type ) ) \n elif (np.isnan(press) and not np.isnan(gph) ) : # when pressure is not available, z_coord== gph and z_type==2 \n z_type = 2 \n read_data.append( ( 'BUFR'.rjust(10), report_id, int(obs_id), idate, iday, statid, lat, lon, gph, value, cdmvar_dic[var]['cdm_var'] , int(cdmvar_dic[var]['cdm_unit']) , num_lev , z_type ) ) \n else:\n z_type = -2147483648 \n read_data.append( ( 'BUFR'.rjust(10), report_id, int(obs_id), idate, iday, statid, lat, lon, press, value, cdmvar_dic[var]['cdm_var'] , int(cdmvar_dic[var]['cdm_unit']) , num_lev , z_type ) ) \n\n\n report_id += 1\n \n df = pd.DataFrame(data= read_data, columns= column_names) \n \n df['observation_id'] = np.chararray.zfill( (df['observation_id'].astype(int)) .astype('S'+str(id_string_length ) ), id_string_length ) #converting to fixed length bite objects \n df['report_id'] = np.chararray.zfill( (df['report_id'].astype(int)).astype ('S'+str(id_string_length ) ), id_string_length )\n \n df = df.replace([-999.9, -9999, -999, -999.0, -99999.0, -99999.9, 99999.0, -99999.00 ], np.nan)\n \n df = df.sort_values(by = ['record_timestamp', 'vertco_reference_1@body' ] ) \n \n return df, stations_id",
"def process_data(data):\n bio = BytesIO()\n bio.write(data)\n bio.seek(0)\n process(bio)",
"def _save_data(data, file):\n with jsonlines.open(file, mode='w') as writer:\n for conversation in data:\n writer.write(conversation.to_json())",
"def write_wabbit_hdf5( file, time, x0, dx, box, data, treecode, iteration = 0, dtype=np.float64 ):\n import h5py\n import numpy as np\n\n\n Level = np.size(treecode,1)\n if len(data.shape)==4:\n # 3d data\n Bs = np.zeros([3,1])\n N, Bs[0], Bs[1], Bs[2] = data.shape\n Bs = Bs[::-1]\n print( \"Writing to file=%s max=%e min=%e size=%i %i %i \" % (file, np.max(data), np.min(data), Bs[0], Bs[1], Bs[2]) )\n\n else:\n # 2d data\n Bs = np.zeros([2,1])\n N, Bs[0], Bs[1] = data.shape\n Bs = Bs[::-1]\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~\")\n print(\"Writing file %s\" % (file) )\n print(\"Time=%e it=%i N=%i Bs[0]=%i Bs[1]=%i Level=%i Domain=[%d, %d]\" % (time, iteration, N, Bs[0], Bs[1],Level, box[0], box[1]) )\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~\")\n\n\n fid = h5py.File( file, 'w')\n\n fid.create_dataset( 'coords_origin', data=x0, dtype=dtype )\n fid.create_dataset( 'coords_spacing', data=dx, dtype=dtype )\n fid.create_dataset( 'blocks', data=data, dtype=dtype )\n fid.create_dataset( 'block_treecode', data=treecode, dtype=dtype )\n\n fid.close()\n\n fid = h5py.File(file,'a')\n dset_id = fid.get( 'blocks' )\n dset_id.attrs.create( \"version\", 20200902) # this is used to distinguish wabbit file formats\n dset_id.attrs.create('time', time, dtype=dtype)\n dset_id.attrs.create('iteration', iteration)\n dset_id.attrs.create('domain-size', box, dtype=dtype )\n dset_id.attrs.create('total_number_blocks', N )\n fid.close()",
"def output_beat_to_file(file_name, e):\n print(\"Writing to file:\", file_name)\n routine = gp.compile(e,pset)\n with open(file_name+\".raw\",'w') as f:\n for t in range(200000):\n f.write(chr(int(routine(t+1))%256))\n # Now convert to wav\n subprocess.call(SOX_COMMAND + \" \" + file_name + \".raw\" + \" \" + file_name + \".wav\", shell=True)\n subprocess.call(LAME_COMMAND + \" \" + file_name + \".wav\", shell=True)",
"def test_process_5_1_surround_file(self):\n test_path = pathlib.Path(__file__).parent.absolute() / 'data/surround.wav'\n self.default_kwargs['input_file'] = test_path\n self.encoder = FileEncoder(**self.default_kwargs)\n self.encoder.process()",
"def write_data():\n\n data_location = os.path.realpath(os.path.join(os.path.dirname(__file__), \"..\", DATA_DIR))\n\n sbi_file_name = os.path.join(data_location, SBI_FILE)\n\n sbi = SbiInfo(sbi_file_name)\n\n # the test file is stored in the same directory as the script\n test_file = os.path.splitext(os.path.join(os.path.dirname(__file__), SBI_FILE))[0] + \".pkl\"\n _logger.info(\"Writing header object to {}\".format(os.path.join(os.path.dirname(__file__),\n test_file)))\n sbi.data.to_pickle(test_file)",
"def save_data(dfin, outfile=\"./FPeng_prepped\"):\n dfin.to_csv(outfile+'.csv', sep='\\t', index=False)\n # s3.meta.client.upload_file(outfile+\".csv\", 'p3-engine', 'ETL/FPeng_prepped.csv')\n print(\"csv...\", end=\" \")\n\n dfin.to_pickle(outfile+'.pkl' ,protocol=4)\n # s3.meta.client.upload_file(outfile+'.pkl', 'p3-engine', 'ETL/FPeng_prepped.pkl')\n print(\"pkl...\", end=\" \")\n #dfin.to_msgpack(outfile+'.msg')\n #print(\"msg...\", end=\" \")\n\n #s3.meta.client.upload_file(outfile+\".msg\", 'p3-engine', 'ETL/FPeng_prepped.msg')\n\n # print(\"to s3 complete\", end=\" \")",
"def test_basic_mech_write(self):\n\n unit = btmux.parse_from_file(os.path.join(BTMUX_SAMPLE_DIR, 'AS7-D'))\n fobj = StringIO()\n write_to_file(unit, fobj)\n #print fobj.getvalue()\n # TODO: Compare to a golden standard.",
"def convert_txt_to_data():\n pass",
"def load_waifu2x_json(self, data: list):\n self.conv1.load_waifu2x_json(data[0])\n self.conv2.load_waifu2x_json(data[1])\n self.conv3.load_waifu2x_json(data[2])\n self.conv4.load_waifu2x_json(data[3])\n self.conv5.load_waifu2x_json(data[4])\n self.conv6.load_waifu2x_json(data[5])\n self.conv7.load_waifu2x_json(data[6])"
] | [
"0.6466376",
"0.5578025",
"0.5258005",
"0.52233136",
"0.5194225",
"0.5156527",
"0.51492476",
"0.51063263",
"0.50713205",
"0.5061902",
"0.50611764",
"0.5058425",
"0.5032972",
"0.5028185",
"0.50022244",
"0.48981017",
"0.4894581",
"0.48687062",
"0.48647705",
"0.48561457",
"0.48449826",
"0.48369515",
"0.48325542",
"0.4801509",
"0.48008054",
"0.4785059",
"0.47836712",
"0.4779644",
"0.4769922",
"0.47671428"
] | 0.6721087 | 0 |
This function creates a _.h5 file with the wabbit block structure from a given dense data matrix. Therefore the dense data is divided into equal blocks, similar as sparse_to_dense option in wabbitpost. | def dense_to_wabbit_hdf5(ddata, name , Bs, box_size = None, time = 0, iteration = 0, dtype=np.float64):
# concatenate filename in the same style as wabbit does
fname = name + "_%12.12d" % int(time*1e6) + ".h5"
Ndim = ddata.ndim
Nsize = np.asarray(ddata.shape)
level = 0
Bs = np.asarray(Bs)# make sure Bs is a numpy array
Bs = Bs[::-1] # flip Bs such that Bs=[BsY, BsX] the order is the same as for Nsize=[Ny,Nx]
#########################################################
# do some initial checks on the input data
# 1) check if the size of the domain is given
if box_size is None:
box = np.ones(Ndim)
else:
box = np.asarray(box_size)
if (type(Bs) is int):
Bs = [Bs]*Ndim
# 2) check if number of lattice points is block decomposable
# loop over all dimensions
for d in range(Ndim):
# check if Block is devidable by Bs
if (np.remainder(Nsize[d], Bs[d]-1) == 0):
if(is_power2(Nsize[d]//(Bs[d]-1))):
level = int(max(level, np.log2(Nsize[d]/(Bs[d]-1))))
else:
err("Number of Intervals must be a power of 2!")
else:
err("datasize must be multiple of Bs!")
# 3) check dimension of array:
if Ndim < 2 or Ndim > 3:
err("dimensions are wrong")
#########################################################
# assume periodicity:
data = np.zeros(Nsize+1,dtype=dtype)
if Ndim == 2:
data[:-1, :-1] = ddata
# copy first row and column for periodicity
data[-1, :] = data[0, :]
data[:, -1] = data[:, 0]
else:
data[:-1, :-1, :-1] = ddata
# copy for periodicity
data[-1, :, :] = data[0, :, :]
data[:, -1, :] = data[:, 0, :]
data[:, :, -1] = data[:, :, 0]
# number of intervals in each dimension
Nintervals = [int(2**level)]*Ndim # note [val]*3 means [val, val , val]
Lintervals = box[:Ndim]/np.asarray(Nintervals)
Lintervals = Lintervals[::-1]
x0 = []
treecode = []
dx = []
bdata = []
if Ndim == 3:
for ibx in range(Nintervals[0]):
for iby in range(Nintervals[1]):
for ibz in range(Nintervals[2]):
x0.append([ibx, iby, ibz]*Lintervals)
dx.append(Lintervals/(Bs-1))
lower = [ibx, iby, ibz]* (Bs - 1)
lower = np.asarray(lower, dtype=int)
upper = lower + Bs
treecode.append(blockindex2treecode([ibx, iby, ibz], 3, level))
bdata.append(data[lower[0]:upper[0], lower[1]:upper[1], lower[2]:upper[2]])
else:
for ibx in range(Nintervals[0]):
for iby in range(Nintervals[1]):
x0.append([ibx, iby]*Lintervals)
dx.append(Lintervals/(Bs-1))
lower = [ibx, iby]* (Bs - 1)
lower = np.asarray(lower, dtype=int)
upper = lower + Bs
treecode.append(blockindex2treecode([ibx, iby], 2, level))
bdata.append(data[lower[0]:upper[0], lower[1]:upper[1]])
x0 = np.asarray(x0,dtype=dtype)
dx = np.asarray(dx,dtype=dtype)
treecode = np.asarray(treecode, dtype=dtype)
block_data = np.asarray(bdata, dtype=dtype)
write_wabbit_hdf5(fname, time, x0, dx, box, block_data, treecode, iteration, dtype )
return fname | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def to_dense_grid( fname_in, fname_out = None, dim=2 ):\n import numpy as np\n import insect_tools\n import matplotlib.pyplot as plt\n\n # read data\n time, x0, dx, box, data, treecode = read_wabbit_hdf5( fname_in )\n\n # convert blocks to complete matrix\n field, box = dense_matrix( x0, dx, data, treecode, dim=dim )\n\n # write data to FLUSI-type hdf file\n if fname_out:\n insect_tools.write_flusi_HDF5( fname_out, time, box, field)\n else: \n dx = [b/(np.size(field,k)) for k,b in enumerate(box)]\n X = [np.arange(0,np.size(field,k))*dx[k] for k,b in enumerate(box)]\n return field, box, dx, X",
"def write_wabbit_hdf5( file, time, x0, dx, box, data, treecode, iteration = 0, dtype=np.float64 ):\n import h5py\n import numpy as np\n\n\n Level = np.size(treecode,1)\n if len(data.shape)==4:\n # 3d data\n Bs = np.zeros([3,1])\n N, Bs[0], Bs[1], Bs[2] = data.shape\n Bs = Bs[::-1]\n print( \"Writing to file=%s max=%e min=%e size=%i %i %i \" % (file, np.max(data), np.min(data), Bs[0], Bs[1], Bs[2]) )\n\n else:\n # 2d data\n Bs = np.zeros([2,1])\n N, Bs[0], Bs[1] = data.shape\n Bs = Bs[::-1]\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~\")\n print(\"Writing file %s\" % (file) )\n print(\"Time=%e it=%i N=%i Bs[0]=%i Bs[1]=%i Level=%i Domain=[%d, %d]\" % (time, iteration, N, Bs[0], Bs[1],Level, box[0], box[1]) )\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~\")\n\n\n fid = h5py.File( file, 'w')\n\n fid.create_dataset( 'coords_origin', data=x0, dtype=dtype )\n fid.create_dataset( 'coords_spacing', data=dx, dtype=dtype )\n fid.create_dataset( 'blocks', data=data, dtype=dtype )\n fid.create_dataset( 'block_treecode', data=treecode, dtype=dtype )\n\n fid.close()\n\n fid = h5py.File(file,'a')\n dset_id = fid.get( 'blocks' )\n dset_id.attrs.create( \"version\", 20200902) # this is used to distinguish wabbit file formats\n dset_id.attrs.create('time', time, dtype=dtype)\n dset_id.attrs.create('iteration', iteration)\n dset_id.attrs.create('domain-size', box, dtype=dtype )\n dset_id.attrs.create('total_number_blocks', N )\n fid.close()",
"def read_wabbit_hdf5(file, verbose=True, return_iteration=False):\n import h5py\n import numpy as np\n\n if verbose:\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~\")\n print(\"Reading file %s\" % (file) )\n\n fid = h5py.File(file,'r')\n b = fid['coords_origin'][:]\n x0 = np.array(b, dtype=float)\n\n b = fid['coords_spacing'][:]\n dx = np.array(b, dtype=float)\n\n b = fid['blocks'][:]\n data = np.array(b, dtype=float)\n\n b = fid['block_treecode'][:]\n treecode = np.array(b, dtype=float)\n\n # get the dataset handle\n dset_id = fid.get('blocks')\n \n # from the dset handle, read the attributes\n time = dset_id.attrs.get('time')\n iteration = dset_id.attrs.get('iteration')\n box = dset_id.attrs.get('domain-size')\n version=dset_id.attrs.get('version')\n\n\n fid.close()\n\n jmin, jmax = get_max_min_level( treecode )\n N = data.shape[0]\n Bs = data.shape[1:]\n Bs = np.asarray(Bs[::-1]) # we have to flip the array since hdf5 stores in [Nz, Ny, Nx] order\n \n if version == 20200408 or version == 20231602:\n Bs = Bs-1\n #print(\"!!!Warning old (old branch: newGhostNodes) version of wabbit format detected!!!\")\n else:\n print(\"This file includes redundant points\")\n \n if verbose:\n print(\"Time=%e it=%i N=%i Bs[0]=%i Bs[1]=%i Jmin=%i Jmax=%i\" % (time, iteration, N, Bs[0], Bs[1], jmin, jmax) )\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~\")\n\n\n if return_iteration:\n return time, x0, dx, box, data, treecode, iteration[0]\n else:\n return time, x0, dx, box, data, treecode",
"def saveh5(fname, mat, name='data'):\n fp = open_write(fname)\n save_vec(mat, fp, fp.root, name)\n fp.close()",
"def edf_gaze_data_to_hdf(self, \n\t\t\talias = None, \n\t\t\twhich_eye = 0, \n\t\t\tpupil_hp = 0.01, \n\t\t\tpupil_lp = 6,\n\t\t\tsample_rate = 1000.,\n\t\t\tminimal_frequency_filterbank = 0.0025, \n\t\t\tmaximal_frequency_filterbank = 0.1, \n\t\t\tnr_freq_bins_filterbank = 9, \n\t\t\tn_cycles_filterbank = 1, \n\t\t\tcycle_buffer_filterbank = 3,\n\t\t\ttf_decomposition_filterbank ='lp_butterworth' \n\t\t\t):\n\t\t\n\t\t# shell()\n\t\t\n\t\tif not hasattr(self, 'edf_operator'):\n\t\t\tself.add_edf_file(edf_file_name = alias)\n\t\t\n\t\tif alias == None:\n\t\t\talias = os.path.split(self.edf_operator.inputFileName)[-1]\n\t\tself.logger.info('Adding gaze data from %s to group %s to %s' % (os.path.split(self.edf_operator.inputFileName)[-1], alias, self.input_object))\n\t\t\n\t\t#\n\t\t#\tgaze data in blocks\n\t\t#\n\t\twith pd.get_store(self.input_object) as h5_file:\n\t\t\t# shell()\n\t\t\t# recreate the non-gaze data for the block, that is, its sampling rate, eye of origin etc.\n\t\t\tblocks_data_frame = pd.DataFrame([dict([[i,self.edf_operator.blocks[j][i]] for i in self.edf_operator.blocks[0].keys() if i not in ('block_data', 'data_columns')]) for j in range(len(self.edf_operator.blocks))])\n\t\t\th5_file.put(\"/%s/blocks\"%alias, blocks_data_frame)\n\t\t\t\n\t\t\t# gaze data per block\n\t\t\tif not 'block_data' in self.edf_operator.blocks[0].keys():\n\t\t\t\tself.edf_operator.take_gaze_data_for_blocks()\n\t\t\tfor i, block in enumerate(self.edf_operator.blocks):\n\t\t\t\tbdf = pd.DataFrame(block['block_data'], columns = block['data_columns'])\n\t\t\t\n\t\t\t\t#\n\t\t\t\t# preprocess pupil:\n\t\t\t\t#\n\t\t\t\tfor eye in blocks_data_frame.eye_recorded[i]: # this is a string with one or two letters, 'L', 'R' or 'LR'\n\t\t\t\t# create dictionary of data per block:\n\t\t\t\t\tgazeX = bdf[eye+'_gaze_x']\n\t\t\t\t\tgazeY = bdf[eye+'_gaze_y']\n\t\t\t\t\tpupil = bdf[eye+'_pupil']\n\t\t\t\t\teye_dict = {'timepoints':bdf.time, 'gaze_X':gazeX, 'gaze_Y':gazeY, 'pupil':pupil,}\n\t\t\t\t\t\n\t\t\t\t\t# create instance of class EyeSignalOperator, and include the blink data as detected by the Eyelink 1000:\n\t\t\t\t\tif hasattr(self.edf_operator, 'blinks_from_message_file'):\n\t\t\t\t\t\tblink_dict = self.read_session_data(alias, 'blinks_from_message_file')\n\t\t\t\t\t\tblink_dict[blink_dict['eye'] == eye]\n\t\t\t\t\t\tsac_dict = self.read_session_data(alias, 'saccades_from_message_file')\n\t\t\t\t\t\tsac_dict[sac_dict['eye'] == eye]\n\t\t\t\t\t\teso = EyeSignalOperator(input_object=eye_dict, eyelink_blink_data=blink_dict,sample_rate=sample_rate, eyelink_sac_data = sac_dict)\n\t\t\t\t\telse:\n\t\t\t\t\t\teso = EyeSignalOperator(input_object=eye_dict,sample_rate=sample_rate)\n\t\n\t\t\t\t\t# interpolate blinks:\n\t\t\t\t\teso.interpolate_blinks(method='linear')\n\t\t\t\t\teso.interpolate_blinks2()\n\n\t\t\t\t\t# low-pass and band-pass pupil data:\n\t\t\t\t\teso.filter_pupil(hp=pupil_hp, lp=pupil_lp)\n\n\t\t\t\t\t# regress blink and saccade responses\n\t\t\t\t\teso.regress_blinks()\n\n\t\t\t\t\tfor dt in ['lp_filt_pupil','lp_filt_pupil_clean','bp_filt_pupil','bp_filt_pupil_clean']:\n\t\t\t\t\t\t# percent signal change filtered pupil data:\n\t\t\t\t\t\teso.percent_signal_change_pupil(dtype=dt)\n\t\t\t\t\t\teso.zscore_pupil(dtype=dt)\n\t\t\t\t\t\teso.dt_pupil(dtype=dt)\n\t\t\t\t\t\n\t\t\t\t\t# add to existing dataframe:\n\t\t\t\t\tbdf[eye+'_pupil_int'] = eso.interpolated_pupil\n\t\t\t\t\tbdf[eye+'_pupil_hp'] = eso.hp_filt_pupil\n\t\t\t\t\tbdf[eye+'_pupil_lp'] = eso.lp_filt_pupil\n\n\t\t\t\t\tbdf[eye+'_pupil_lp_psc'] = eso.lp_filt_pupil_psc\n\t\t\t\t\tbdf[eye+'_pupil_lp_diff'] = np.concatenate((np.array([0]),np.diff(eso.lp_filt_pupil)))\n\t\t\t\t\tbdf[eye+'_pupil_bp'] = eso.bp_filt_pupil\n\t\t\t\t\tbdf[eye+'_pupil_bp_dt'] = eso.bp_filt_pupil_dt\n\t\t\t\t\tbdf[eye+'_pupil_bp_zscore'] = eso.bp_filt_pupil_zscore\n\t\t\t\t\tbdf[eye+'_pupil_bp_psc'] = eso.bp_filt_pupil_psc\n\t\t\t\t\tbdf[eye+'_pupil_baseline'] = eso.baseline_filt_pupil\n\n\t\t\t\t\tbdf[eye+'_gaze_x_int'] = eso.interpolated_x\n\t\t\t\t\tbdf[eye+'_gaze_y_int'] = eso.interpolated_y\n\n\t\t\t\t\t# blink/saccade regressed versions\n\t\t\t\t\tbdf[eye+'_pupil_lp_clean'] = eso.lp_filt_pupil_clean\n\t\t\t\t\tbdf[eye+'_pupil_lp_clean_psc'] = eso.lp_filt_pupil_clean_psc\n\t\t\t\t\tbdf[eye+'_pupil_lp_clean_zscore'] = eso.lp_filt_pupil_clean_zscore\n\t\t\t\t\tbdf[eye+'_pupil_bp_clean'] = eso.bp_filt_pupil_clean\n\t\t\t\t\tbdf[eye+'_pupil_bp_clean_psc'] = eso.bp_filt_pupil_clean_psc\n\t\t\t\t\tbdf[eye+'_pupil_bp_clean_zscore'] = eso.bp_filt_pupil_clean_zscore\n\t\t\t\t\n\t\t\t\t\t# plot interpolated pupil time series:\n\t\t\t\t\tfig = pl.figure(figsize = (16, 2.5))\n\t\t\t\t\tx = np.linspace(0,eso.raw_pupil.shape[0]/sample_rate, eso.raw_pupil.shape[0])\n\t\t\t\t\tpl.plot(x, eso.raw_pupil, 'b', rasterized=True)\n\t\t\t\t\tpl.plot(x, eso.interpolated_pupil, 'g', rasterized=True)\n\t\t\t\t\tpl.ylabel('pupil size (raw)')\n\t\t\t\t\tpl.xlabel('time (s)')\n\t\t\t\t\tpl.legend(['raw', 'int + filt'])\n\t\t\t\t\tfig.savefig(os.path.join(os.path.split(self.input_object)[0], 'blink_interpolation_1_{}_{}_{}.pdf'.format(alias, i, eye)))\n\t\t\t\t\t\n\t\t\t\t\t# plot results blink detection next to hdf5:\n\t\t\t\t\tfig = pl.figure(figsize = (16, 2.5))\n\t\t\t\t\tpl.plot(eso.pupil_diff, rasterized=True)\n\t\t\t\t\tpl.plot(eso.peaks, eso.pupil_diff[eso.peaks], '+', mec='r', mew=2, ms=8, rasterized=True)\n\t\t\t\t\tpl.ylim(ymin=-200, ymax=200)\n\t\t\t\t\tpl.ylabel('diff pupil size (raw)')\n\t\t\t\t\tpl.xlabel('samples')\n\t\t\t\t\tfig.savefig(os.path.join(os.path.split(self.input_object)[0], 'blink_interpolation_2_{}_{}_{}.pdf'.format(alias, i, eye)))\n\n\t\t\t\t\t# try time-frequency decomposition of the baseline signal\n\t\t\t\t\ttry:\n\t\t\t\t\t\teso.time_frequency_decomposition_pupil(\n\t\t\t\t\t\t\t\tminimal_frequency = minimal_frequency_filterbank, \n\t\t\t\t\t\t\t\tmaximal_frequency = maximal_frequency_filterbank, \n\t\t\t\t\t\t\t\tnr_freq_bins = nr_freq_bins_filterbank, \n\t\t\t\t\t\t\t\tn_cycles = n_cycles_filterbank, \n\t\t\t\t\t\t\t\tcycle_buffer = cycle_buffer_filterbank,\n\t\t\t\t\t\t\t\ttf_decomposition=tf_decomposition_filterbank,\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\tself.logger.info('Performed T-F analysis of type %s'%tf_decomposition_filterbank)\n\t\t\t\t\t\tfor freq in eso.band_pass_filter_bank_pupil.keys():\n\t\t\t\t\t\t\tbdf[eye+'_pupil_filterbank_bp_%2.5f'%freq] = eso.band_pass_filter_bank_pupil[freq]\n\t\t\t\t\t\t\tself.logger.info('Saved T-F analysis %2.5f'%freq)\n\t\t\t\t\texcept:\n\t\t\t\t\t\tself.logger.error('Something went wrong with T-F analysis of type %s'%tf_decomposition_filterbank)\n\t\t\t\t\t\tpass\n\t\t\t\t\t\n\t\t\t\t# put in HDF5:\n\t\t\t\th5_file.put(\"/%s/block_%i\"%(alias, i), bdf)",
"def save_as_hdf5(self, filename):",
"def transform_to_h5():\n # this took about 10 minutes for set1\n for setname in ['set1', 'set2']:\n filename = os.path.join(ltrc_dirname, '{}.h5'.format(setname))\n f = h5py.File(filename, 'w')\n\n for name in ['train', 'valid', 'test']:\n g = f.create_group(name)\n filename = os.path.join(ltrc_dirname, '{}.{}.txt'.format(setname, name))\n X, y, q = load_svmlight_file(filename, query_id=True)\n g.create_dataset('X', data=X.todense(), compression='gzip')\n g.create_dataset('y', data=y, compression='gzip')\n g.create_dataset('q', data=q, compression='gzip')\n f.close()\n # Now you can do this\n # f['/valid/X'].shape\n # Out[24]: (71083, 699)",
"def hdf5_container(tmpdir):\n filename = tmpdir.join(\"test.h5\").strpath\n hdcon = SensitivityCubeHDF5Container(filename, mode=\"w\")\n\n # Clever trick to close the file when we're done with it \n yield hdcon\n hdcon.close()",
"def export_to_hdf5(cls, h5_file, model, eids):\n #comments = []\n pids = []\n nodes = []\n x = []\n g0 = []\n offt = []\n unused_bit = []\n pa = []\n pb = []\n wa = []\n wb = []\n nan = np.full(3, np.nan)\n encoding = model._encoding\n for eid in eids:\n element = model.elements[eid]\n #comments.append(element.comment)\n pids.append(element.pid)\n nodes.append(element.nodes)\n if element.g0 is None:\n x.append(element.x)\n g0.append(-1)\n else:\n x.append(nan)\n g0.append(element.g0)\n\n offti = element.offt\n if isinstance(offti, integer_types):\n offti = str(offti)\n offt.append(offti.encode(encoding))\n pa.append(element.pa)\n pb.append(element.pb)\n wa.append(element.wa)\n wb.append(element.wb)\n #h5_file.create_dataset('_comment', data=comments)\n h5_file.create_dataset('eid', data=eids)\n h5_file.create_dataset('nodes', data=nodes)\n h5_file.create_dataset('pid', data=pids)\n #print('x =', x)\n #print('g0 =', g0)\n h5_file.create_dataset('x', data=x)\n h5_file.create_dataset('g0', data=g0)\n h5_file.create_dataset('offt', data=offt)\n\n h5_file.create_dataset('pa', data=pa)\n h5_file.create_dataset('pb', data=pb)\n\n h5_file.create_dataset('wa', data=wa)\n h5_file.create_dataset('wb', data=wb)",
"def _generate_testdata_h5(cls, test_filepath):\n # Generate some test data\n data = numpy.indices( (10, 100, 200, 3) )\n assert data.shape == (4, 10, 100, 200, 3)\n data = data.astype( numpy.uint32 )\n cls.original_data = data\n\n # Choose names\n cls.dvid_dataset = \"datasetA\"\n cls.data_uuid = \"abcde\"\n cls.data_name = \"indices_data\"\n cls.volume_location = \"/datasets/{dvid_dataset}/volumes/{data_name}\".format( **cls.__dict__ )\n cls.node_location = \"/datasets/{dvid_dataset}/nodes/{data_uuid}\".format( **cls.__dict__ )\n cls.voxels_metadata = voxels.VoxelsMetadata.create_default_metadata(data.shape, data.dtype, \"cxyzt\", 1.0, \"\")\n\n # Write to h5 file\n with H5MockServerDataFile( test_filepath ) as test_h5file:\n test_h5file.add_node( cls.dvid_dataset, cls.data_uuid )\n test_h5file.add_volume( cls.dvid_dataset, cls.data_name, data, cls.voxels_metadata )\n\n test_h5file.add_node( \"datasetB\", \"12345\" )\n test_h5file.add_volume( \"datasetB\", cls.data_name, data, cls.voxels_metadata )",
"def write_hdf5(filename, data):\n \n if '.h5' in filename:\n fid = h5py.File(filename, 'w')\n else:\n filename = filename+'.h5'\n fid = h5py.File(filename, 'w')\n\n print('Writing %s...'%filename)\n\n write_hdf5_group(fid, data)\n\n fid.close()\n print('Finished writting %s.'%filename)\n return",
"def make_libfile():\n # wfc3_obsmodes_uvis\n wfc3_uvis = [\n \"f218w\",\n \"f225w\",\n \"f275w\",\n \"f336w\",\n \"f390m\",\n \"f390w\",\n \"f410m\",\n \"f438w\",\n \"f467m\",\n \"f475w\",\n \"f547m\",\n \"f555w\",\n \"f606w\",\n \"f621m\",\n \"f625w\",\n \"f689m\",\n \"f763m\",\n \"f775w\",\n \"f814w\",\n \"f845m\",\n ]\n\n wfc3_ir = [\n \"f098m\",\n \"f105w\",\n \"f110w\",\n \"f125w\",\n \"f127m\",\n \"f139m\",\n \"f140w\",\n \"f153m\",\n \"f160w\",\n ]\n\n wfpc2 = [\n \"f122m\",\n \"f157w\",\n \"f336w\",\n \"f410m\",\n \"f467m\",\n \"f547m\",\n \"f439w\",\n \"f569w\",\n \"f675w\",\n \"f791w\",\n \"f170w\",\n \"f185w\",\n \"f218w\",\n \"f255w\",\n \"f300w\",\n \"f380w\",\n \"f555w\",\n \"f622w\",\n \"f450w\",\n \"f606w\",\n \"f702w\",\n \"f814w\",\n ]\n\n acs_wfc = [\n \"f435w\",\n \"f475w\",\n \"f550m\",\n \"f555w\",\n \"f606w\",\n \"f625w\",\n \"f775w\",\n \"f814w\",\n ]\n # galex\n galex = [\"fuv\", \"nuv\"]\n\n # Open hd5 file for writing\n hf = h5py.File(__ROOT__ + \"filters.hd5\", \"w\")\n\n # Create group for nice hierarchical structure\n f = hf.create_group(\"filters\")\n\n # Define arrays for \"contents\" / descriptive information\n tablenames = []\n observatories = []\n instruments = []\n names = []\n norms = []\n cwaves = []\n pwaves = []\n comments = []\n\n # Loop through WFC3_UVIS filters\n for filt in wfc3_uvis:\n\n # define uvis 1 and uvis2 modes\n mode_1 = \"wfc3, uvis1, \" + filt\n mode_2 = \"wfc3, uvis2, \" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp_1 = stsyn.band(mode_1)\n bp_2 = stsyn.band(mode_2)\n\n # extract the wavelength array\n wave = bp_1.waveset\n\n # compute the average bandpass between uvis1 and uvis2\n bp_avg = np.average([bp_1(wave), bp_2(wave)], axis=0)\n\n # define the filter name\n filter_name = \"HST_WFC3_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp_avg.astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp_avg, name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"HST\")\n instruments.append(\"WFC3\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"avg of uvis1 and uvis2\")\n\n # Loop through WFC3_IR filters\n for filt in wfc3_ir:\n\n # define ir mode\n mode = \"wfc3, ir, \" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp = stsyn.band(mode)\n\n # extract the wavelength array\n wave = bp.waveset\n\n # define the filter name\n filter_name = \"HST_WFC3_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp(wave).astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp(wave), name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"HST\")\n instruments.append(\"WFC3\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"\")\n\n # Loop through WFPC2 filters\n for filt in wfpc2:\n\n # define chips 1, 2, 3, 4 modes\n mode_1 = \"wfpc2, 1, \" + filt\n mode_2 = \"wfpc2, 2, \" + filt\n mode_3 = \"wfpc2, 3, \" + filt\n mode_4 = \"wfpc2, 4, \" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp_1 = stsyn.band(mode_1)\n bp_2 = stsyn.band(mode_2)\n bp_3 = stsyn.band(mode_3)\n bp_4 = stsyn.band(mode_4)\n\n # extract the wavelength array\n wave = bp_1.waveset\n\n # compute the average bandpass between uvis1 and uvis2\n bp_avg = np.average([bp_1(wave), bp_2(wave), bp_3(wave), bp_4(wave)], axis=0)\n\n # define the filter name\n filter_name = \"HST_WFPC2_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp_avg.astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp_avg, name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"HST\")\n instruments.append(\"WFPC2\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"avg of 1, 2, 3, 4\")\n\n # Loop through ACS filters\n for filt in acs_wfc:\n\n # define wfc1, wfc2 modes\n mode_1 = \"acs, wfc1, \" + filt\n mode_2 = \"acs, wfc2, \" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp_1 = stsyn.band(mode_1)\n bp_2 = stsyn.band(mode_2)\n\n # extract the wavelength array\n wave = bp_1.waveset\n\n # compute the average bandpass between uvis1 and uvis2\n bp_avg = np.average([bp_1(wave), bp_2(wave)], axis=0)\n\n # define the filter name\n filter_name = \"HST_ACS_WFC_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp_avg.astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp_avg, name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"HST\")\n instruments.append(\"ACS_WFC\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"avg of wfc1 and wfc2\")\n\n # Loop through GALEX filters:\n for filt in galex:\n # define ir mode\n mode = \"galex,\" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp = stsyn.band(mode)\n\n # extract the wavelength array\n wave = bp.waveset\n\n # define the filter name\n filter_name = \"GALEX_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp(wave).astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp(wave), name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"GALEX\")\n instruments.append(\"GALEX\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"\")\n\n # smash the contents arrays together\n contents = np.array(\n list(\n zip(\n tablenames,\n observatories,\n instruments,\n names,\n norms,\n cwaves,\n pwaves,\n comments,\n )\n ),\n dtype=[\n (\"TABLENAME\", \"S40\"),\n (\"OBSERVATORY\", \"S30\"),\n (\"INSTRUMENT\", \"S30\"),\n (\"NAME\", \"S10\"),\n (\"NORM\", \"<f8\"),\n (\"CWAVE\", \"<f8\"),\n (\"PWAVE\", \"<f8\"),\n (\"COMMENT\", \"S100\"),\n ],\n )\n\n # add the contents array as an hd5 dataset\n hf.create_dataset(\"content\", data=contents)\n\n # close the file\n hf.close()",
"def block_level_distribution_file( file ):\n import h5py\n import numpy as np\n\n # open the h5 wabbit file\n fid = h5py.File(file,'r')\n\n # read treecode table\n b = fid['block_treecode'][:]\n treecode = np.array(b, dtype=float)\n\n # close file\n fid.close()\n\n # number of blocks\n Nb = treecode.shape[0]\n\n # min/max level. required to allocate list!\n jmin, jmax = get_max_min_level( treecode )\n counter = np.zeros(jmax+1)\n\n # fetch level for each block and count\n for i in range(Nb):\n J = treecode_level(treecode[i,:])\n counter[J] += 1\n\n return counter",
"def to_hdf5(self, filename):\n\n f = h5py.File(filename, 'w')\n f['xyz'] = np.vstack(np.squeeze(self.xyz))\n f.close()\n\n return",
"def to_hdf5(self, filename):\n\n f = h5py.File(filename, 'w')\n f['xyz'] = self.xyz\n f.close()\n\n return",
"def writeH5Dataset( self, foldername, time, nameConvention = \"grid\" ):\n filename = \"{0}/{1}_{2:06}.h5\".format(foldername,nameConvention,time)\n file = h5py.File(filename,'w',driver='mpio',comm=self.global_comm)\n dset = file.create_dataset(\"dset\",self._layout.fullShape, dtype = self._f.dtype)\n slices = tuple([slice(s,e) for s,e in zip(self._layout.starts,self._layout.ends)])\n dset[slices]=self._f[:]\n attr_data = np.array(self._layout.dims_order)\n dset.attrs.create(\"Layout\", attr_data, (self._nDims,), h5py.h5t.STD_I32BE)\n file.close()",
"def create_model(max_vocab_len, max_seq_len, h5_file=None, debug=False):\n assert h5_file is not None\n block_net = BlockNet(max_vocab_len)\n x1 = keras.layers.Input(shape=(max_seq_len,), dtype='int64', name=\"x1\") #字编号\n x2 = keras.layers.Input(shape=(max_seq_len,), dtype='int64', name=\"x2\") \n m1 = keras.layers.Input(shape=(max_seq_len,), dtype='int64', name=\"m1\") #类型\n m2 = keras.layers.Input(shape=(max_seq_len,), dtype='int64', name=\"m2\") \n mark1 = keras.layers.Input(shape=(max_seq_len,2), dtype='int64', name=\"mark1\") #分块后的mark\n mark2 = keras.layers.Input(shape=(max_seq_len,2), dtype='int64', name=\"mark2\")\n block1 = keras.layers.Input(shape=(max_seq_len,), dtype='int64', name=\"block1\") #分块后的块ID\n block2 = keras.layers.Input(shape=(max_seq_len,), dtype='int64', name=\"block2\")\n scale1 = keras.layers.Input(shape=(max_seq_len,), dtype='float32', name=\"scale1\") #分块后的块权重\n scale2 = keras.layers.Input(shape=(max_seq_len,), dtype='float32', name=\"scale2\")\n \n output = block_net([x1, x2, m1, m2, mark1, mark2, block1, block2, scale1, scale2])\n \n model = keras.Model(inputs=[x1, x2, m1, m2, mark1, mark2, block1, block2, scale1, scale2], outputs=output)\n model.build(input_shape=[(None, max_seq_len), (None, max_seq_len),\n (None, max_seq_len), (None, max_seq_len), \n (None, max_seq_len, 2), (None, max_seq_len, 2), \n (None, max_seq_len), (None, max_seq_len), \n (None, max_seq_len), (None, max_seq_len)])\n \n if os.path.exists(h5_file.format(max_vocab_len)) or os.path.isfile(h5_file.format(max_vocab_len)):\n model.load_weights(h5_file.format(max_vocab_len))\n \n model.compile(optimizer=keras.optimizers.Adam(),\n loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=[accuracy, f1])\n# metrics=[keras.metrics.SparseCategoricalAccuracy(name=\"acc\"), f1])\n \n model.summary()\n \n if debug:\n init_logging(debugPath)\n BlockNet.debug = False\n EmbeddingsLayer.debug = True\n return model",
"def flusi_to_wabbit(fname_flusi, fname_wabbit , level, dim=2, dtype=np.float64 ):\n import numpy as np\n import insect_tools\n import matplotlib.pyplot as plt\n\n\n # read in flusi's reference solution\n time, box, origin, data_flusi = insect_tools.read_flusi_HDF5( fname_flusi, dtype=dtype )\n box = box[1:]\n \n data_flusi = np.squeeze(data_flusi).T\n Bs = field_shape_to_bs(data_flusi.shape,level)\n dense_to_wabbit_hdf5(data_flusi, fname_wabbit , Bs, box, time, dtype=dtype)",
"def loadh5(fname, path='/data'):\n fp = open_read(fname)\n slab = fp.get_node(path)\n mat = slab.read()\n fp.close()\n return mat",
"def write_batch_to_h5(splits, h5_file, data_sizes, new_data, new_labels):\n # check that data and labels are the same size\n assert new_data.shape[0] == new_labels.shape[0]\n # make a copy of data_sizes\n data_sizes = data_sizes[:]\n # pick which bin to assign data to\n bin_id = pick_splits(splits)\n bin_name = str(bin_id)\n # get slice indexes\n start_i = data_sizes[bin_id]\n end_i = start_i + new_data.shape[0]\n # resize HDF5 datasets\n h5_file[\"data_\" + bin_name].resize(end_i, 0)\n h5_file[\"labels_\" + bin_name].resize(end_i, 0)\n # write data\n h5_file[\"data_\" + bin_name][start_i:end_i, ...] = new_data\n h5_file[\"labels_\" + bin_name][start_i:end_i, ...] = new_labels\n # create and return updated dictionary of bin counts\n data_sizes[bin_id] = end_i\n return data_sizes",
"def read_wabbit_hdf5_dir(dir):\n import numpy as np\n import re\n import ntpath\n import os\n\n it=0\n data={'time': [],'x0':[],'dx':[],'treecode':[]}\n # we loop over all files in the given directory\n for file in os.listdir(dir):\n # filter out the good ones (ending with .h5)\n if file.endswith(\".h5\"):\n # from the file we can get the fieldname\n fieldname=re.split('_',file)[0]\n print(fieldname)\n time, x0, dx, box, field, treecode = read_wabbit_hdf5(os.path.join(dir, file))\n #increase the counter\n data['time'].append(time[0])\n data['x0'].append(x0)\n data['dx'].append(dx)\n data['treecode'].append(treecode)\n if fieldname not in data:\n # add the new field to the dictionary\n data[fieldname]=[]\n data[fieldname].append(field)\n else: # append the field to the existing data field\n data[fieldname].append(field)\n it=it+1\n # the size of the domain\n data['box']=box\n #return time, x0, dx, box, data, treecode\n return data",
"def write_postprocessing_section(params, hdf5_data):\n\n if params.irf is not None:\n x2 = (' '.join(params.irf)).split()\n dset = require_dataset(hdf5_data, structure.H5_COMPUTE_IRF, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_COMPUTE_IRF_ATTR)\n dset[0] = int(float(x2[0]))\n\n dset = require_dataset(hdf5_data, structure.H5_IRF_TIME_STEP, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_IRF_TIME_STEP_ATTR)\n dset[0] = float(x2[1])\n dset = require_dataset(hdf5_data, structure.H5_IRF_DURATION, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_IRF_DURATION_ATTR)\n dset[0] = float(x2[2])\n\n if params.show_pressure is not None:\n dset = require_dataset(hdf5_data, structure.H5_SHOW_PRESSURE, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_SHOW_PRESSURE_ATTR)\n dset[0] = int(float(x2[0]))\n\n if params.kochin_function is not None:\n x2 = (' '.join(params.kochin_function)).split()\n dset = require_dataset(hdf5_data, structure.H5_KOCHIN_NUMBER, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_KOCHIN_NUMBER_ATTR)\n dset[0] = float(x2[0])\n dset = require_dataset(hdf5_data, structure.H5_KOCHIN_MIN, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_KOCHIN_MIN_ATTR)\n dset[0] = float(x2[1])\n dset = require_dataset(hdf5_data, structure.H5_KOCHIN_MAX, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_KOCHIN_MAX_ATTR)\n dset[0] = float(x2[2])\n\n if params.free_surface_elevation:\n x2 = (' '.join(params.free_surface_elevation)).split()\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_POINTS_X, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_POINTS_X_ATTR)\n dset[0] = int(x2[0])\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_POINTS_Y, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_POINTS_Y_ATTR)\n dset[0] = int(x2[1])\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_DIMENSION_X, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_DIMENSION_X_ATTR)\n dset[0] = float(x2[2])\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_DIMENSION_Y, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_DIMENSION_Y_ATTR)\n dset[0] = float(x2[3])",
"def test_hdf5_design_matrix():\n skip_if_no_h5py()\n import h5py\n\n # save random data to HDF5\n handle, filename = tempfile.mkstemp()\n dataset = random_one_hot_dense_design_matrix(np.random.RandomState(1),\n num_examples=10, dim=5,\n num_classes=3)\n with h5py.File(filename, 'w') as f:\n f.create_dataset('X', data=dataset.get_design_matrix())\n f.create_dataset('y', data=dataset.get_targets())\n\n # instantiate Train object\n trainer = yaml_parse.load(design_matrix_yaml % {'filename': filename})\n trainer.main_loop()\n\n # cleanup\n os.remove(filename)",
"def _setup_h5(self, data_gen_parms):\n\n '''\n Build the group structure down to the channel group\n '''\n # Set up the basic group structure\n root_grp = VirtualGroup('')\n root_parms = dict()\n root_parms['translator'] = 'FAKEBEPS'\n root_parms['data_type'] = data_gen_parms['data_type']\n root_grp.attrs = root_parms\n\n meas_grp = VirtualGroup('Measurement_')\n chan_grp = VirtualGroup('Channel_')\n\n meas_grp.attrs.update(data_gen_parms)\n\n # Create the Position and Spectroscopic datasets for the Raw Data\n ds_pos_inds, ds_pos_vals, ds_spec_inds, ds_spec_vals = self._build_ancillary_datasets()\n\n raw_chunking = calc_chunks([self.n_pixels,\n self.n_spec_bins],\n np.complex64(0).itemsize,\n unit_chunks=[1, self.n_bins])\n\n ds_raw_data = VirtualDataset('Raw_Data', data=None,\n maxshape=[self.n_pixels, self.n_spec_bins],\n dtype=np.complex64,\n compression='gzip',\n chunking=raw_chunking,\n parent=meas_grp)\n\n chan_grp.add_children([ds_pos_inds, ds_pos_vals, ds_spec_inds, ds_spec_vals,\n ds_raw_data])\n meas_grp.add_children([chan_grp])\n root_grp.add_children([meas_grp])\n\n hdf = HDFwriter(self.h5_path)\n hdf.delete()\n h5_refs = hdf.write(root_grp)\n\n # Delete the MicroDatasets to save memory\n del ds_raw_data, ds_spec_inds, ds_spec_vals, ds_pos_inds, ds_pos_vals\n\n # Get the file and Raw_Data objects\n h5_raw = get_h5_obj_refs(['Raw_Data'], h5_refs)[0]\n h5_chan_grp = h5_raw.parent\n\n # Get the Position and Spectroscopic dataset objects\n h5_pos_inds = get_h5_obj_refs(['Position_Indices'], h5_refs)[0]\n h5_pos_vals = get_h5_obj_refs(['Position_Values'], h5_refs)[0]\n h5_spec_inds = get_h5_obj_refs(['Spectroscopic_Indices'], h5_refs)[0]\n h5_spec_vals = get_h5_obj_refs(['Spectroscopic_Values'], h5_refs)[0]\n\n # Link the Position and Spectroscopic datasets as attributes of Raw_Data\n link_as_main(h5_raw, h5_pos_inds, h5_pos_vals, h5_spec_inds, h5_spec_vals)\n\n '''\n Build the SHO Group\n '''\n sho_grp = VirtualGroup('Raw_Data-SHO_Fit_', parent=h5_chan_grp.name)\n\n # Build the Spectroscopic datasets for the SHO Guess and Fit\n sho_spec_starts = np.where(h5_spec_inds[h5_spec_inds.attrs['Frequency']].squeeze() == 0)[0]\n sho_spec_labs = get_attr(h5_spec_inds, 'labels')\n ds_sho_spec_inds, ds_sho_spec_vals = build_reduced_spec_dsets(h5_spec_inds,\n h5_spec_vals,\n keep_dim=sho_spec_labs != 'Frequency',\n step_starts=sho_spec_starts)\n\n sho_chunking = calc_chunks([self.n_pixels,\n self.n_sho_bins],\n sho32.itemsize,\n unit_chunks=[1, 1])\n ds_sho_fit = VirtualDataset('Fit', data=None,\n maxshape=[self.n_pixels, self.n_sho_bins],\n dtype=sho32,\n compression='gzip',\n chunking=sho_chunking,\n parent=sho_grp)\n ds_sho_guess = VirtualDataset('Guess', data=None,\n maxshape=[self.n_pixels, self.n_sho_bins],\n dtype=sho32,\n compression='gzip',\n chunking=sho_chunking,\n parent=sho_grp)\n\n sho_grp.add_children([ds_sho_fit, ds_sho_guess, ds_sho_spec_inds, ds_sho_spec_vals])\n\n # Write the SHO group and datasets to the file and delete the MicroDataset objects\n h5_sho_refs = hdf.write(sho_grp)\n del ds_sho_fit, ds_sho_guess, ds_sho_spec_inds, ds_sho_spec_vals\n\n # Get the dataset handles for the fit and guess\n h5_sho_fit = get_h5_obj_refs(['Fit'], h5_sho_refs)[0]\n h5_sho_guess = get_h5_obj_refs(['Guess'], h5_sho_refs)[0]\n\n # Get the dataset handles for the SHO Spectroscopic datasets\n h5_sho_spec_inds = get_h5_obj_refs(['Spectroscopic_Indices'], h5_sho_refs)[0]\n h5_sho_spec_vals = get_h5_obj_refs(['Spectroscopic_Values'], h5_sho_refs)[0]\n\n # Link the Position and Spectroscopic datasets as attributes of the SHO Fit and Guess\n link_as_main(h5_sho_fit, h5_pos_inds, h5_pos_vals, h5_sho_spec_inds, h5_sho_spec_vals)\n link_as_main(h5_sho_guess, h5_pos_inds, h5_pos_vals, h5_sho_spec_inds, h5_sho_spec_vals)\n\n '''\n Build the loop group\n '''\n loop_grp = VirtualGroup('Fit-Loop_Fit_', parent=h5_sho_fit.parent.name)\n\n # Build the Spectroscopic datasets for the loops\n loop_spec_starts = np.where(h5_sho_spec_inds[h5_sho_spec_inds.attrs['DC_Offset']].squeeze() == 0)[0]\n loop_spec_labs = get_attr(h5_sho_spec_inds, 'labels')\n ds_loop_spec_inds, ds_loop_spec_vals = build_reduced_spec_dsets(h5_sho_spec_inds,\n h5_sho_spec_vals,\n keep_dim=loop_spec_labs != 'DC_Offset',\n step_starts=loop_spec_starts)\n\n # Create the loop fit and guess MicroDatasets\n loop_chunking = calc_chunks([self.n_pixels, self.n_loops],\n loop_fit32.itemsize,\n unit_chunks=[1, 1])\n ds_loop_fit = VirtualDataset('Fit', data=None,\n maxshape=[self.n_pixels, self.n_loops],\n dtype=loop_fit32,\n compression='gzip',\n chunking=loop_chunking,\n parent=loop_grp)\n\n ds_loop_guess = VirtualDataset('Guess', data=None,\n maxshape=[self.n_pixels, self.n_loops],\n dtype=loop_fit32,\n compression='gzip',\n chunking=loop_chunking,\n parent=loop_grp)\n\n # Add the datasets to the loop group then write it to the file\n loop_grp.add_children([ds_loop_fit, ds_loop_guess, ds_loop_spec_inds, ds_loop_spec_vals])\n h5_loop_refs = hdf.write(loop_grp)\n\n # Delete the MicroDatasets\n del ds_loop_spec_vals, ds_loop_spec_inds, ds_loop_guess, ds_loop_fit\n\n # Get the handles to the datasets\n h5_loop_fit = get_h5_obj_refs(['Fit'], h5_loop_refs)[0]\n h5_loop_guess = get_h5_obj_refs(['Guess'], h5_loop_refs)[0]\n h5_loop_spec_inds = get_h5_obj_refs(['Spectroscopic_Indices'], h5_loop_refs)[0]\n h5_loop_spec_vals = get_h5_obj_refs(['Spectroscopic_Values'], h5_loop_refs)[0]\n\n # Link the Position and Spectroscopic datasets to the Loop Guess and Fit\n link_as_main(h5_loop_fit, h5_pos_inds, h5_pos_vals, h5_loop_spec_inds, h5_loop_spec_vals)\n link_as_main(h5_loop_guess, h5_pos_inds, h5_pos_vals, h5_loop_spec_inds, h5_loop_spec_vals)\n\n self.h5_raw = USIDataset(h5_raw)\n self.h5_sho_guess = USIDataset(h5_sho_guess)\n self.h5_sho_fit = USIDataset(h5_sho_fit)\n self.h5_loop_guess = USIDataset(h5_loop_guess)\n self.h5_loop_fit = USIDataset(h5_loop_fit)\n self.h5_spec_vals = h5_spec_vals\n self.h5_spec_inds = h5_spec_inds\n self.h5_sho_spec_inds = h5_sho_spec_inds\n self.h5_sho_spec_vals = h5_sho_spec_vals\n self.h5_loop_spec_inds = h5_loop_spec_inds\n self.h5_loop_spec_vals = h5_loop_spec_vals\n self.h5_file = h5_raw.file\n\n return",
"def write_data_to_h5(data, filename):\n f = h5py.File(filename, 'w', libver='latest')\n dset = f.create_dataset('array', shape=(data.shape), data=data, compression='gzip', compression_opts=9)\n f.close()",
"def dense_block(x, blocks, name):\r\n for i in range(blocks):\r\n x = conv_block(x, 20, name=name + '_block' + str(i + 1))\r\n return x",
"def prepare_hdf5_file(hdf5_file, n_train, n_valid, n_test):\n n_total = n_train + n_valid + n_test\n splits = create_splits(n_train, n_valid, n_test)\n hdf5_file.attrs['split'] = H5PYDataset.create_split_array(splits)\n vlen_dtype = h5py.special_dtype(vlen=numpy.dtype('uint8'))\n hdf5_file.create_dataset('encoded_images', shape=(n_total,),\n dtype=vlen_dtype)\n hdf5_file.create_dataset('targets', shape=(n_total, 1), dtype=numpy.int16)\n hdf5_file.create_dataset('filenames', shape=(n_total, 1), dtype='S32')",
"def to_new_board_state(datafile_name):\n datafile = h5py.File(datafile_name,'a')\n Nkeys = len(datafile.keys())\n print('Processing file', datafile_name)\n ctr = 0\n for k in datafile.keys():\n try:\n if data['new_bs']: continue\n if (10*ctr) / Nkeys < 10: print(100*ctr/Nkeys,'% processed')\n data = datafile[k]\n board_state = data['board_state'][:]\n if data['new_bs']: del data['new_bs']\n \n # The new board state is temporarily stored in bs. bs is a Mx2 tensor.\n # bs[0] is simply the 'salida' (first move).\n bs = np.zeros_like(board_state[:,2:])\n bs[0] = board_state[0,2:]\n h1 = bs[0,0]\n h2 = bs[0,1]\n \n # The point of the new coding is for each move to ONLY store the\n # position of the move (1, 2, -1) and the change in the corresponding\n # head. This is what is done below for each.\n for n, m in enumerate(board_state[1:], 1):\n d = m[2:]\n p = m[1]\n if p == 2:\n d = d if d[0] == h2 else d[::-1]\n h2 = d[1]\n bs[n] = np.array([p, h2])\n elif p == 1:\n d = d if d[1] == h1 else d[::-1]\n h1 = d[0]\n bs[n] = np.array([p, h1])\n elif p == -1:\n bs[n] = np.array([-1, 0])\n else:\n bs[n] = np.zeros(2)\n \n data['new_bs'] = bs\n ctr += 1 \n except:\n print('Deleting', k)\n del datafile[k]",
"def dense_block(x):\n h1 = _conv_block(x, 32)\n h1 = tf.keras.layers.Concatenate()([x, h1])\n\n h2 = _conv_block(h1, 32)\n h2 = tf.keras.layers.Concatenate()([x, h1, h2])\n\n h3 = _conv_block(h2, 32)\n h3 = tf.keras.layers.Concatenate()([x, h1, h2, h3])\n\n h4 = _conv_block(h3, 32)\n h4 = tf.keras.layers.Concatenate()([x, h1, h2, h3, h4])\n\n h5 = _conv_block(h4, 32, activation=False)\n\n h5 = tf.keras.layers.Lambda(lambda x: x * 0.2)(h5)\n h = tf.keras.layers.Add()([h5, x])\n\n return h",
"def print_structure(weight_file_path):\r\n f = h5py.File(\"./mnist_nn_quantized_zeroone_FC.h5\")\r\n file = open(\"datafile.txt\",\"a\")\r\n\r\n try:\r\n if len(f.attrs.items()):\r\n print(\"{} contains: \".format(weight_file_path))\r\n print(\"Root attributes:\")\r\n for key, value in f.attrs.items():\r\n print(\" {}: {}\".format(key, value))\r\n\r\n if len(f.items())==0:\r\n return \r\n\r\n for layer, g in f.items():\r\n print(\" {}\".format(layer))\r\n print(\" Attributes:\")\r\n for key, value in g.attrs.items():\r\n print(\" {}: {}\".format(key, value))\r\n\r\n print(\" Dataset:\")\r\n for p_name in g.keys():\r\n param = g[p_name]\r\n subkeys = param.keys()\r\n for k_name in param.keys():\r\n file.write(\" {}/{}: {}\".format(p_name, k_name, (param.get(k_name)[:]+1)/2))\r\n #print(\" {}/{}: {}\".format(p_name, k_name, param.get(k_name)[:]))\r\n \r\n finally:\r\n f.close()"
] | [
"0.7129957",
"0.7095275",
"0.61546594",
"0.59248036",
"0.56526315",
"0.5634567",
"0.56303704",
"0.5613793",
"0.56120116",
"0.5585933",
"0.5540948",
"0.5519374",
"0.54959834",
"0.5490673",
"0.54355556",
"0.5431243",
"0.5422526",
"0.54188895",
"0.5410818",
"0.5400482",
"0.5395064",
"0.53657037",
"0.53653777",
"0.5355253",
"0.53481627",
"0.5339075",
"0.53369945",
"0.53283584",
"0.53148675",
"0.53040904"
] | 0.7183506 | 0 |
convert hash_str to hash_dec | def hash2dec(hash_str: str) -> int:
length = len(hash_str)
bases = [32 ** i for i in range(length)][::-1]
dec = 0
for i, d in enumerate(hash_str):
dec += ch2int[d] * bases[i]
return dec | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def hash_string_to_int(\r\n k: bytes,\r\n e: str,\r\n) -> int:\r\n return int.from_bytes(hash_string(k, e), 'big')",
"def strhash(s: str) -> int:\n h = hashlib.md5(s.encode('utf-8'))\n h = int(h.hexdigest(), base=16)\n return h",
"def dec2hash(hash_dec: int, pre: int) -> str:\n bases = [32 ** i for i in range(pre)][::-1]\n\n hash_str = \"\"\n v = hash_dec\n for b in bases:\n a = v // b\n v = v % b\n hash_str += ch32[a]\n return hash_str",
"def consistent_unhash(_hash:str) -> str:\n decoded_hash = base64.b64decode(_hash).decode('utf-8')\n _hash_dict = ujson.loads(decoded_hash)\n return _hash_dict",
"def customHashFunc(str):\n return sum(ord(chr) for chr in str)%128",
"def decode_and_hexlify_hashes(hash_str: str) -> typing.Union[str, None]:\n\n return binascii.hexlify(base64.b64decode(hash_str.encode())).decode() if hash_str else None",
"def get_string_sha256(str_to_convert):\n hasher = hashlib.sha256()\n hasher.update(bytearray(str_to_convert.encode('ascii')))\n return base64.b64encode(hasher.digest())",
"def _hash(self, string, hash_type):\n hash_types = {\n 'TABLE_OFFSET': 0,\n 'HASH_A': 1,\n 'HASH_B': 2,\n 'TABLE': 3\n }\n seed1 = 0x7FED7FED\n seed2 = 0xEEEEEEEE\n\n for ch in string.upper():\n if not isinstance(ch, int): ch = ord(ch)\n value = self.encryption_table[(hash_types[hash_type] << 8) + ch]\n seed1 = (value ^ (seed1 + seed2)) & 0xFFFFFFFF\n seed2 = ch + seed1 + seed2 + (seed2 << 5) + 3 & 0xFFFFFFFF\n\n return seed1",
"def get_hash_code(s):\n h = 0\n n = len(s)\n for i, c in enumerate(s):\n h = h + ord(c) * 31 ** (n - 1 - i)\n return StrUtil.convert_4_bytes(h)",
"def computeHash(string):\n\tif isBytes(string):\n\t\tstring = string.decode(\"latin-1\")\n\thash_ = 63689\n\tfor char in string:\n\t\thash_ = hash_ * 378551 + ord(char)\n\treturn hash_ % 65536",
"def hash(string):\n hs = 0\n for s in string:\n hs += ord(s)\n return hs",
"def get_hash(hash_function, x: str):\n hash_function.update(x.encode())\n return int.from_bytes(hash_function.digest(), byteorder=\"big\")",
"def __str_to_hash(string_to_hash: str, errors: str = 'ignore') -> str:\n string_hash = string_to_hash.encode(encoding=\"utf-8\", errors=errors)\n return hashlib.md5(string_hash).hexdigest()",
"def str_to_hash(self, param):\n param = param.encode('utf-8')\n my_hash = hashlib.md5(param)\n return my_hash.hexdigest()",
"def chord_hash(input_string):\n h = hashlib.sha1() # 160 bit string\n encoded_data = input_string.encode('utf-8')\n h.update(encoded_data)\n hex_string = h.hexdigest()\n hex_value = int(hex_string, 16)\n hash_integer_value = hex_value >> (160 - m)\n return hash_integer_value",
"def sha256_2_string(string_to_hash):\n\n # Solution for (1a)\n import hashlib\n first_sha = hashlib.sha256(string_to_hash.encode(\"utf8\"))\n second_sha = hashlib.sha256(first_sha.digest())\n return second_sha.hexdigest()\n\n # Placeholder for (1a)\n return \"deadbeef\"",
"def hash_djb2(string):\n hashval = ctypes.c_uint(5381)\n for char in string:\n hashval.value = ((hashval.value << 5) + hashval.value) + ord(char)\n return hashval.value & 0x7FFFFFFF",
"def hex2dec(string_num):\n if hex_pattern.match(string_num):\n return int(string_num.upper(), 16)\n else:\n return -1",
"def hash_string(input_str):\n input_b = str.encode(input_str)\n input_hash = hashlib.md5(input_b.lower())\n input_hash_str = input_hash.hexdigest()\n\n return input_hash_str",
"def binstr2dec(bstr):\n return int(bstr, base=2)",
"def hash(self, string):\n return self.__scaffydb.hash(string)",
"def get_binary_sha256_hash(hash: str) -> str:\n result = \"\"\n\n for character in hash:\n character_number = int(character, base=16)\n binary_number = bin(character_number)\n # CAVEAT: each hash character is 4 bit size since SHA256 hash is hexidecimal string, so 4 * 64 = 256 bit\n formatted_binary_number = binary_number[2:].ljust(4, \"0\")\n result += formatted_binary_number\n\n return result",
"def coerce(self, value):\n if isinstance(value, bytes) and len(value) == self.bit_length:\n return HashString.from_b256(value)\n elif len(value) == self.b16_length:\n return HashString.from_b16(value)\n elif self.b64_length - len(value) <= 4:\n return HashString.from_b64(value)",
"def decode_hash(self, hash_id, definition, language=\"en\"):\n return self._manifest.decode_hash(hash_id, definition, language)",
"def keyhash(string):\n return hashlib.sha1(string.encode('utf-8')).hexdigest()",
"def hash_string(to_hash):\n\n chars = string.printable\n\n hashed = \"\"\n\n total = 1\n\n counter = 1\n\n for letter in to_hash:\n\n total *= (chars.index(letter) * counter * len(to_hash)*13)\n\n counter += 1\n\n if counter%3 == 0:\n\n total *= total\n\n total = str(total)[:30]\n\n temp_int = \"\"\n\n for i in range(len(total)):\n\n temp_int += total[i]\n\n if i % 2 != 0:\n\n hashed += chars[int(temp_int)]\n\n temp_int = \"\"\n\n return hashed",
"def hash_str(string):\n\n return hmac.new(secret, string).hexdigest()",
"def hash_string(\r\n k: bytes,\r\n e: str,\r\n) -> bytes:\r\n return hmac.new(k, e.encode('utf-8'), hashlib.sha256).digest()",
"def get_hash(s):\n hash_object = hashlib.md5(s.encode())\n return hash_object.hexdigest()",
"def hex_hash(s):\n if not s:\n return '0'\n s = s.encode('utf-8')\n return '{:x}'.format(adler32(s) & 0xffffffff)"
] | [
"0.676085",
"0.6701529",
"0.6644186",
"0.64678264",
"0.64379567",
"0.64147437",
"0.6400152",
"0.63800716",
"0.6376264",
"0.63746643",
"0.63039637",
"0.62607664",
"0.62387604",
"0.62307084",
"0.62161714",
"0.61618865",
"0.61301386",
"0.61214674",
"0.6107932",
"0.60902554",
"0.6083278",
"0.604153",
"0.5975589",
"0.59665096",
"0.5954215",
"0.59438753",
"0.59430236",
"0.59320086",
"0.59249496",
"0.59176564"
] | 0.81465983 | 0 |
convert hash_dec to hash_str | def dec2hash(hash_dec: int, pre: int) -> str:
bases = [32 ** i for i in range(pre)][::-1]
hash_str = ""
v = hash_dec
for b in bases:
a = v // b
v = v % b
hash_str += ch32[a]
return hash_str | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def hash_str(c, hash_length):\n if isinstance(c, float):\n if numpy.isnan(c):\n return c\n raise ValueError(f\"numpy.nan expected, not {c}\")\n m = hashlib.sha256()\n m.update(c.encode(\"utf-8\"))\n r = m.hexdigest()\n if len(r) >= hash_length:\n return r[:hash_length]\n return r",
"def hash_str(self):\n return '___'.join([self.key.kind(), self.key.string_id(),\n self._Hash()])",
"def __str__(self: Hash) -> str:\n return self.to_hex()",
"def printable_hash(h):\n return int(h).to_bytes(32, byteorder='big', signed=False).hex()",
"def toHashable(self) -> str:\r\n\r\n return self.toHashBase().encode('utf-8')",
"def hash_string(to_hash):\n\n chars = string.printable\n\n hashed = \"\"\n\n total = 1\n\n counter = 1\n\n for letter in to_hash:\n\n total *= (chars.index(letter) * counter * len(to_hash)*13)\n\n counter += 1\n\n if counter%3 == 0:\n\n total *= total\n\n total = str(total)[:30]\n\n temp_int = \"\"\n\n for i in range(len(total)):\n\n temp_int += total[i]\n\n if i % 2 != 0:\n\n hashed += chars[int(temp_int)]\n\n temp_int = \"\"\n\n return hashed",
"def get_binary_sha256_hash(hash: str) -> str:\n result = \"\"\n\n for character in hash:\n character_number = int(character, base=16)\n binary_number = bin(character_number)\n # CAVEAT: each hash character is 4 bit size since SHA256 hash is hexidecimal string, so 4 * 64 = 256 bit\n formatted_binary_number = binary_number[2:].ljust(4, \"0\")\n result += formatted_binary_number\n\n return result",
"def hash(self) -> str:\r\n ...",
"def __str_to_hash(string_to_hash: str, errors: str = 'ignore') -> str:\n string_hash = string_to_hash.encode(encoding=\"utf-8\", errors=errors)\n return hashlib.md5(string_hash).hexdigest()",
"def str_sha(raw_sha):\n return hexlify(raw_sha)[:12]",
"def __get_hashstr(_config_object: dict):\n hashobj = hashlib.md5()\n json_str = json.dumps(_config_object, sort_keys=True).encode('utf-8')\n hashobj.update(json_str)\n dig = hashobj.hexdigest()\n return dig\n # return hashobj.update(json.dumps(_config_object, sort_keys=True).encode('utf-8')).hexdigest()",
"def get_string_sha256(str_to_convert):\n hasher = hashlib.sha256()\n hasher.update(bytearray(str_to_convert.encode('ascii')))\n return base64.b64encode(hasher.digest())",
"def _hash_encoder(data: bytes) -> str:\n return base64.urlsafe_b64encode(data).rstrip(b\"=\").decode('ascii')",
"def calc_statistics_hash(self) -> bytes:\n return b\"somehash\"",
"def hex_form(hash):\n final_hash = ''\n for i in range(len(hash)):\n final_hash += format(hash[i], '02x')\n return final_hash",
"def hash_string(self):\n return self._hash_string",
"def hash2dec(hash_str: str) -> int:\n length = len(hash_str)\n bases = [32 ** i for i in range(length)][::-1]\n\n dec = 0\n for i, d in enumerate(hash_str):\n dec += ch2int[d] * bases[i]\n return dec",
"def hashStr(data):\n \n s, d = map_addr_int(data[2], data[3]) \n sp, dp = map_port(data[4], data[5]) \n\n data[2], data[3] = struct.pack('>I', s), struct.pack('>I', d)\n data[4], data[5] = struct.pack('>I', sp)[2:], struct.pack('>I', dp)[2:]\n data[6] = struct.pack('>I', int(data[6]))[-1]\n hash_str = (data[2]\n + data[3]\n + data[4]\n + data[5]\n + data[6]\n )\n return hash_str",
"def str_to_hash(self, param):\n param = param.encode('utf-8')\n my_hash = hashlib.md5(param)\n return my_hash.hexdigest()",
"def sha256_hexoutput(in_str):\r\n return sha256(in_str.encode('ascii')).hexdigest()",
"def hashing_info(string):#KEY HASHING FUNCTION\n nodeInfo = string.encode('utf-8')\n\n #md5 -> 2^7 = 128 bits\n hash_object = hashlib.md5()\n hash_object.update(nodeInfo)\n\n tmp = hash_object.hexdigest()\n tmp = int(tmp,16)\n\n result = tmp >> (128-16)\n return result",
"def coerce(self, value):\n if isinstance(value, bytes) and len(value) == self.bit_length:\n return HashString.from_b256(value)\n elif len(value) == self.b16_length:\n return HashString.from_b16(value)\n elif self.b64_length - len(value) <= 4:\n return HashString.from_b64(value)",
"def encoded_hash(sha):\n return urlsafe_b64encode(sha.digest()).decode('ascii').rstrip('=')",
"def __str__(self) -> str:\n return self.hash",
"def get_hash_code(s):\n h = 0\n n = len(s)\n for i, c in enumerate(s):\n h = h + ord(c) * 31 ** (n - 1 - i)\n return StrUtil.convert_4_bytes(h)",
"def hash(self) -> bytes:",
"def get_hash(s):\n hash_object = hashlib.md5(s.encode())\n return hash_object.hexdigest()",
"def to_h(self):\n return str(self).encode('hex')",
"def hash_string(password):\n return hash(password)",
"def raw_password_to_string(raw_string):\n return hashlib.sha256(str(raw_string).encode('utf-8')).hexdigest()"
] | [
"0.67123157",
"0.6692818",
"0.66889405",
"0.66310173",
"0.6559617",
"0.65580744",
"0.6501489",
"0.64891833",
"0.64672464",
"0.6397518",
"0.6363674",
"0.63632846",
"0.6330039",
"0.63063854",
"0.6293308",
"0.6279598",
"0.627666",
"0.6234947",
"0.62323284",
"0.62225",
"0.6185577",
"0.61724156",
"0.6160998",
"0.6101712",
"0.6082281",
"0.60603446",
"0.6055222",
"0.6031537",
"0.60276926",
"0.6013504"
] | 0.7153087 | 0 |
convert lat, lon coordinate to decimal geohash representation (pre=6) | def coords2geohash_dec(*, lat: float, lon: float, pre: int = 6) -> int:
return hash2dec(encoder(lat, lon, pre)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _decode(geohash):\n lat_val, lng_val, lat_err, lng_err = _decode_val_err(geohash)\r\n precision = _get_precision(lng_err)\n lat_val = \"%.*f\" % (precision, lat_val)\r\n lng_val = \"%.*f\" % (precision, lng_val)\r\n return lat_val, lng_val",
"def geohash_encode(latitude, longitude, precision=12):\n lat_interval, lon_interval = (-90.0, 90.0), (-180.0, 180.0)\n base32 = '0123456789bcdefghjkmnpqrstuvwxyz'\n geohash = []\n bits = [16, 8, 4, 2, 1]\n bit = 0\n ch = 0\n even = True\n while len(geohash) < precision:\n if even:\n mid = (lon_interval[0] + lon_interval[1]) / 2\n if longitude > mid:\n ch |= bits[bit]\n lon_interval = (mid, lon_interval[1])\n else:\n lon_interval = (lon_interval[0], mid)\n else:\n mid = (lat_interval[0] + lat_interval[1]) / 2\n if latitude > mid:\n ch |= bits[bit]\n lat_interval = (mid, lat_interval[1])\n else:\n lat_interval = (lat_interval[0], mid)\n even = not even\n if bit < 4:\n bit += 1\n else:\n geohash += base32[ch]\n bit = 0\n ch = 0\n return ''.join(geohash)",
"def decode(geohash):\r\n try:\r\n lat_val, lng_val, lat_err, lng_err = decode_val_err(geohash)\r\n precision = _get_precision(lng_err)\r\n lat_val = \"%.*f\" % (precision, lat_val)\r\n lng_val = \"%.*f\" % (precision, lng_val)\r\n return lat_val, lng_val\r\n except:\r\n print(\"Unable to decode!\") # TODO better error message\r",
"def geohash_dec2coords(*, geohash_dec: int, pre: int = 6) -> Tuple[float, float]:\n res = decoder(dec2hash(geohash_dec, pre=pre))\n return round(sum(res[0]) / 2, max(3, pre - 3)), round(\n sum(res[1]) / 2, max(3, pre - 3)\n )",
"def encode(latitude, longitude, precision=12):\n lat_interval, lon_interval = (-90.0, 90.0), (-180.0, 180.0)\n geohash = []\n bits = [ 16, 8, 4, 2, 1 ]\n bit = 0\n ch = 0\n even = True\n while len(geohash) < precision:\n if even:\n mid = (lon_interval[0] + lon_interval[1]) / 2\n if longitude > mid:\n ch |= bits[bit]\n lon_interval = (mid, lon_interval[1])\n else:\n lon_interval = (lon_interval[0], mid)\n else:\n mid = (lat_interval[0] + lat_interval[1]) / 2\n if latitude > mid:\n ch |= bits[bit]\n lat_interval = (mid, lat_interval[1])\n else:\n lat_interval = (lat_interval[0], mid)\n even = not even\n if bit < 4:\n bit += 1\n else:\n geohash += __base32[ch]\n bit = 0\n ch = 0\n return ''.join(geohash)",
"def _encode(lat_val, lng_val, length=12):\r\n lat_bits = _coordinate2bits(lat_val, -90, 90, length * 5 // 2)\r\n lng_bits = _coordinate2bits(lng_val, -180, 180, (length * 5 + 1) // 2)\r\n bits = ''.join(itertools.chain.from_iterable(\r\n itertools.zip_longest(lng_bits, lat_bits, fillvalue='')))\r\n numbers = [int(bits[i:i+5], 2) for i in range(0, len(bits), 5)]\r\n hashstr = ''.join(BASE32[i] for i in numbers)\r\n return hashstr",
"def encode(latitude, longitude, precision=12):\r\n lat_interval, lon_interval = (-90.0, 90.0), (-180.0, 180.0)\r\n geohash = []\r\n bits = [ 16, 8, 4, 2, 1 ]\r\n bit = 0\r\n ch = 0\r\n even = True\r\n while len(geohash) < precision:\r\n if even:\r\n mid = (lon_interval[0] + lon_interval[1]) / 2\r\n if longitude > mid:\r\n ch |= bits[bit]\r\n lon_interval = (mid, lon_interval[1])\r\n else:\r\n lon_interval = (lon_interval[0], mid)\r\n else:\r\n mid = (lat_interval[0] + lat_interval[1]) / 2\r\n if latitude > mid:\r\n ch |= bits[bit]\r\n lat_interval = (mid, lat_interval[1])\r\n else:\r\n lat_interval = (lat_interval[0], mid)\r\n even = not even\r\n if bit < 4:\r\n bit += 1\r\n else:\r\n geohash += __base32[ch]\r\n bit = 0\r\n ch = 0\r\n return ''.join(geohash)",
"def encode(lat_val, lng_val, length=12):\r\n hashstr = ''\r\n lat_lo, lat_hi = -90, 90\r\n lng_lo, lng_hi = -180, 180\r\n is_lng = True\r\n masks = [16, 8, 4, 2, 1] # use bit operation to make base32 convert fast\r\n\r\n d = 0\r\n bit = 0\r\n while len(hashstr) < length:\r\n if is_lng:\r\n mid = (lng_lo + lng_hi) / 2\r\n if lng_val > mid:\r\n d |= masks[bit]\r\n lng_lo = mid\r\n else:\r\n lng_hi = mid\r\n else:\r\n mid = (lat_lo + lat_hi) / 2\r\n if lat_val > mid:\r\n d |= masks[bit]\r\n lat_lo = mid\r\n else:\r\n lat_hi = mid\r\n\r\n is_lng = not is_lng\r\n if bit < 4:\r\n bit += 1\r\n else:\r\n hashstr += BASE32[d]\r\n bit = 0\r\n d = 0\r\n return hashstr",
"def convert(coords):\n lat = coords[:4]\n lon = coords[4:]\n\n lat = lat[:2] + \".\" + lat[2:]\n\n if int(lon[0]) > 5:\n lon = \"-\" + lon[:2] + \".\" + lon[2:]\n else:\n lon = \"-1\" + lon[:2] + \".\" + lon[2:]\n\n return (float(lat), float(lon))",
"def _geohash2bits(geohash):\r\n bits = ''.join([_char2bits(c) for c in geohash])\r\n return bits",
"def get_hash(self):\n s = super(Point, self).get_hash()\n for c in self.coordinate:\n s += \"_%f\" % c\n return s",
"def get_position_geohash(points):\n\n # takes in a list as a parameter of [(lat, lng) ... (lat, lng)]\n coords_data = [] # to store the dictionary generated\n\n # do something like a for loop over here\n for point in points:\n geohash_sql = \"SELECT * \" + \\\n \"FROM nyc_crimes_by_geohash \" + \\\n \"WHERE geohash=\" + \\\n \"ST_GeoHash(st_makepoint(%s, %s), 7);\" % \\\n (point[0], point[1])\n\n # execute the raw sql, and there should only be one result... so get that.\n geohash_query = db.engine.execute(geohash_sql).fetchone()\n\n if geohash_query is None:\n # if the geohash isn't found, need to do something,\n # query PostGIS for the geohash (not in db)\n # then assume that there are no crimes in the area\n geohash_of_point = \"SELECT ST_GeoHash(geometry(Point(%s, %s)), 7);\" \\\n % (point[0], point[1])\n\n geohash_found = db.engine.execute(geohash_of_point).fetchone()\n\n geohash_query = [0, geohash_found[0], 0, 0.0]\n\n geohash_query_data = {\n 'geohash': geohash_query[1],\n 'total_crimes': geohash_query[2],\n 'crime_index': float(geohash_query[3]),\n 'point': point\n }\n coords_data.append(geohash_query_data)\n\n # return something like [{dicte}, {dictw}], or {dict}, based on total pts\n return coords_data",
"def hash_point(self, point) -> int:\n\n hash_value = 7\n hash_value = 53 * hash_value + hash(point.id)\n hash_value = 53 * hash_value + hash(point.cat)\n hash_value = 53 * hash_value + int(point.lat * point.lat)\n hash_value = 53 * hash_value + int(point.lon * point.lon)\n return hash_value",
"def lonlat_to_osgb (lon, lat, digits=3):\n\t# NOTE: last test actually fails, due to being off by 1. That's 1\n\t# metre, and I'm not going to worry about it.\n\teast, north = lonlat_to_eastnorth (lon, lat)\n\treturn eastnorth_to_osgb (east, north, digits)",
"def decode_val_err(geohash):\r\n\r\n lat_lo, lat_hi = -90, 90\r\n lng_lo, lng_hi = -180, 180\r\n is_lng = True\r\n masks = [16, 8, 4, 2, 1] # use bit operation to make base32 convert fast\r\n\r\n for c in geohash:\r\n d = CHARMAP[c]\r\n for mask in masks:\r\n if is_lng:\r\n mid = (lng_lo + lng_hi) / 2\r\n if d & mask:\r\n lng_lo = mid\r\n else:\r\n lng_hi = mid\r\n else:\r\n mid = (lat_lo + lat_hi) / 2\r\n if d & mask:\r\n lat_lo = mid\r\n else:\r\n lat_hi = mid\r\n is_lng = not is_lng\r\n\r\n lat_val = (lat_lo + lat_hi) / 2\r\n lng_val = (lng_lo + lng_hi) / 2\r\n lat_err = (lat_hi - lat_lo) / 2\r\n lng_err = (lng_hi - lng_lo) / 2\r\n\r\n return lat_val, lng_val, lat_err, lng_err",
"def convert_degrees_to_decimal(lat, lon):\n # separate by non numbers\n # 32°44′52″N\n # 97°5′34″W\n\n lat_list = [\"\".join(x) for _, x in itertools.groupby(lat, key=str.isdigit)]\n lat = float(lat_list[0]) + (float(lat_list[2]) / 60) + (float(lat_list[4]) / 3600)\n\n lon_list = [\"\".join(x) for _, x in itertools.groupby(lon, key=str.isdigit)]\n lon = -(float(lon_list[0]) + (float(lon_list[2]) / 60) + (float(lon_list[4]) / 3600))\n\n return '{}, {}'.format(round(lat, 6), round(lon, 6))",
"def _decode_val_err(geohash):\r\n bits = _geohash2bits(geohash)\r\n lat_bits = itertools.islice(bits, 1, None, 2)\r\n lat_val, lat_err = _bits2coordinate(lat_bits, -90, 90)\r\n lng_bits = itertools.islice(bits, 0, None, 2)\r\n lng_val, lng_err = _bits2coordinate(lng_bits, -180, 180)\r\n return lat_val, lng_val, lat_err, lng_err",
"def _normalize_location(lat: float, lon: float):\n latitude = \"{0:.3f}\".format(round(lat, 3))\n longitude = \"{0:.3f}\".format(round(lon, 3))\n return latitude + \":\" + longitude",
"def lat_lng(row):\r\n lat = row[\"latitude\"]\r\n lng = row[\"longitude\"]\r\n n = int(lat/GRANULARITY)\r\n nlat_start = n * GRANULARITY\r\n nlat_end = nlat_start + GRANULARITY\r\n nlg=int(lng/GRANULARITY)\r\n nlng_start = nlg * GRANULARITY\r\n nlng_end = nlng_start + GRANULARITY\r\n latlng=[(nlat_start,nlng_start), (nlat_start,nlng_end), (nlat_end,nlng_end), (nlat_end,nlng_start)]\r\n return latlng",
"def lon_to_int(lon):\n lon = int((Decimal(lon) * 10000000).quantize(Decimal('1'), rounding=ROUND_HALF_UP))\n return (lon + 1800000000) % 3600000000 - 1800000000",
"def gpgga_convert(line):\r\n gps = line.strip().split(',')\r\n #check data\r\n if gps[6] == '0' :\r\n return\r\n fix = ''\r\n if gps[6] == '1':\r\n fix = 'GPS fix'\r\n elif gps[6] == '2':\r\n fix = 'DGPS fix'\r\n elif gps[6] == '4':\r\n fix = 'RTK Fix coordinate (centimeter precision)'\r\n elif gps[6] == '5':\r\n fix = 'RTK Float (decimeter precision)'\r\n #utc = gps[1][0:2] + ':' + gps[1][2:4] + ':' + gps[1][4:6]\r\n lat = ddm_dd_convert(gps[2], gps[3])\r\n long = ddm_dd_convert(gps[4], gps[5]) \r\n return [lat, long, fix]",
"def coordinates(latitude, longitude):\r\n location = geolocator.reverse(latitude + \", \" + longitude)\r\n data = location.raw\r\n data = data['address']\r\n state_code = data['state']\r\n return state_code",
"def _coord_to_bin(self,code):\n\t\tbinary = \"\"\n\t\tfor num in code:\n\t\t\tbinary += '{0:02b}'.format(int(num))\n\t\tassert ( len(binary) == 16 )\n\t\treturn binary",
"def encodeCoordinate(number):\n \n number = round(number, 4) # Rounding the coordinate to 4 decimal places, equivalent to a precision of 10m \n number = int(number * 10000) # Multiplying the coordinate by 10000 in order to transform to an integer\n \n array = [None]*3 # Creating an array to store the bytes \n \n if number < 0 : # The if statement treats the case when the coordinate is negative \n number = -number\n array[0] = (number>>16) & 0xff | 0b10000000 # we fill the first byte of the encoded message and the 24th bit is turned to 1 to signify a negative number \n else :\n array[0] = (number>>16) & 0xff # filling byte 0\n\n array[1] = (number>>8) & 0xff # filling byte 1\n array[2] = number & 0xff # filling byte 2\n\n return bytes(array) # returning the coordinate in byte format, necessary for LoRa transmition ",
"def OSGB36toWGS84(lat, lng):\n\n a = 6377563.396\n b = 6356256.909\n eSquared = ab2ecc(a, b)\n\n phi = math.radians(lat)\n lmb = math.radians(lng)\n\n v = a / (math.sqrt(1 - eSquared * sinSquared(phi)))\n H = 0\n x = (v + H) * math.cos(phi) * math.cos(lmb)\n y = (v + H) * math.cos(phi) * math.sin(lmb)\n z = ((1 - eSquared) * v + H) * math.sin(phi)\n\n tx = 446.448\n ty = -124.157\n tz = 542.060\n s = -0.0000204894\n rx = math.radians(0.00004172222)\n ry = math.radians(0.00006861111)\n rz = math.radians(0.00023391666)\n\n xB = tx + (x * (1 + s)) + (-rx * y) + (ry * z)\n yB = ty + (rz * x) + (y * (1 + s)) + (-rx * z)\n zB = tz + (-ry * x) + (rx * y) + (z * (1 + s))\n\n a = 6378137.000\n b = 6356752.3141\n eSquared = ab2ecc(a, b)\n\n lambdaB = math.degrees(math.atan(yB / xB))\n p = math.sqrt((xB * xB) + (yB * yB))\n phiN = math.atan(zB / (p * (1 - eSquared)))\n for i in xrange(1,10):\n v = a / (math.sqrt(1 - eSquared * sinSquared(phiN)))\n phiN1 = math.atan((zB + (eSquared * v * math.sin(phiN))) / p)\n phiN = phiN1\n\n phiB = math.degrees(phiN)\n\n return (phiB, lambdaB)",
"def convert(self, lat, lon):\r\n a = self.a\r\n b = self.b\r\n long0 = self.long0\r\n k0 = self.k0\r\n dx = self.dx\r\n\r\n e = (1 - b ** 2 / a ** 2) ** 0.5\r\n e2 = e ** 2 / (1 - e ** 2)\r\n n = (a - b) / (a + b)\r\n nu = a / (1 - (e ** 2) * (sin(lat) ** 2)) ** 0.5\r\n p = lon - long0\r\n\r\n A = a * (1 - n + (5 / 4.0) * (n ** 2 - n ** 3) + (81 / 64.0)*(n ** 4 - n ** 5))\r\n B = (3 * a * n / 2.0) * (1 - n + (7 / 8.0) * (n ** 2 - n ** 3) + (55 / 64.0) * (n ** 4 - n ** 5))\r\n C = (15 * a * (n ** 2) / 16.0) * (1 - n + (3 / 4.0) * (n ** 2 - n ** 3))\r\n D = (35 * a * (n ** 3) / 48.0) * (1 - n + (11 / 16.0) * (n ** 2 - n ** 3))\r\n E = (315 * a * (n ** 4) / 51.0) * (1 - n)\r\n\r\n S = A * lat - B * sin(2 * lat) + C * sin(4 * lat) - D * sin(6 * lat) + E * sin(8 * lat)\r\n\r\n K1 = S * k0\r\n K2 = k0 * nu * sin(2 * lat)/4.0\r\n K3 = (k0 * nu * sin(lat) * (cos(lat) ** 3) / 24.0) * \\\r\n (5 - tan(lat) ** 2 + 9 * e2 * (cos(lat) ** 2) + 4 * (e2 ** 2) * (cos(lat) ** 4))\r\n\r\n y = K1 + K2 * (p ** 2) + K3 * (p ** 4)\r\n\r\n K4 = k0 * nu * cos(lat)\r\n K5 = (k0 * nu * (cos(lat) ** 3) / 6.0) * (1 - tan(lat) ** 2 + e2 * (cos(lat) ** 2))\r\n\r\n x = K4 * p + K5 * (p ** 3) + dx\r\n return x, y",
"def get_img_coord_str(img):\n\n lat = convert_to_degress(get_gps_details(img)['GPSLatitude'])\n if get_gps_details(img)['GPSLatitudeRef'] == 'S':\n lat = -lat\n\n longitude = convert_to_degress(get_gps_details(img)['GPSLongitude'])\n if get_gps_details(img)['GPSLongitudeRef'] == 'W':\n longitude = -longitude\n\n return str(lat) + ',' + str(longitude)",
"def convert_hex_coords(hex_coords, unit=1):\n x = (hex_coords[0] - hex_coords[1]/2) * unit\n y = (hex_coords[1] * np.sqrt(3)/2) * unit\n return (x, y)",
"def _point_hash((x, y)):\n x, y = map(float, (x, y))\n return hash((atan2(x, y), hypot(x, y)))",
"def coord2pixel(tf, lat, lon):\n x = int(round((lon-tf[0])/tf[1]))\n y = int(round((lat-tf[3])/tf[5]))\n\n return x, y"
] | [
"0.7197927",
"0.7073706",
"0.6947506",
"0.68074983",
"0.67318535",
"0.66749907",
"0.66701984",
"0.65034956",
"0.64117384",
"0.6199895",
"0.61259615",
"0.59396446",
"0.5933387",
"0.59319395",
"0.57969904",
"0.5766618",
"0.5754867",
"0.5753793",
"0.5708975",
"0.56883526",
"0.5653677",
"0.56459475",
"0.55952555",
"0.5592698",
"0.5548429",
"0.5539943",
"0.55345434",
"0.5533947",
"0.550872",
"0.5481587"
] | 0.7521665 | 0 |
convert decimal geohash to lat, lon coordinate (we require pre=6) | def geohash_dec2coords(*, geohash_dec: int, pre: int = 6) -> Tuple[float, float]:
res = decoder(dec2hash(geohash_dec, pre=pre))
return round(sum(res[0]) / 2, max(3, pre - 3)), round(
sum(res[1]) / 2, max(3, pre - 3)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _decode(geohash):\n lat_val, lng_val, lat_err, lng_err = _decode_val_err(geohash)\r\n precision = _get_precision(lng_err)\n lat_val = \"%.*f\" % (precision, lat_val)\r\n lng_val = \"%.*f\" % (precision, lng_val)\r\n return lat_val, lng_val",
"def coords2geohash_dec(*, lat: float, lon: float, pre: int = 6) -> int:\n return hash2dec(encoder(lat, lon, pre))",
"def decode(geohash):\r\n try:\r\n lat_val, lng_val, lat_err, lng_err = decode_val_err(geohash)\r\n precision = _get_precision(lng_err)\r\n lat_val = \"%.*f\" % (precision, lat_val)\r\n lng_val = \"%.*f\" % (precision, lng_val)\r\n return lat_val, lng_val\r\n except:\r\n print(\"Unable to decode!\") # TODO better error message\r",
"def convert(coords):\n lat = coords[:4]\n lon = coords[4:]\n\n lat = lat[:2] + \".\" + lat[2:]\n\n if int(lon[0]) > 5:\n lon = \"-\" + lon[:2] + \".\" + lon[2:]\n else:\n lon = \"-1\" + lon[:2] + \".\" + lon[2:]\n\n return (float(lat), float(lon))",
"def _decode_val_err(geohash):\r\n bits = _geohash2bits(geohash)\r\n lat_bits = itertools.islice(bits, 1, None, 2)\r\n lat_val, lat_err = _bits2coordinate(lat_bits, -90, 90)\r\n lng_bits = itertools.islice(bits, 0, None, 2)\r\n lng_val, lng_err = _bits2coordinate(lng_bits, -180, 180)\r\n return lat_val, lng_val, lat_err, lng_err",
"def gpgga_convert(line):\r\n gps = line.strip().split(',')\r\n #check data\r\n if gps[6] == '0' :\r\n return\r\n fix = ''\r\n if gps[6] == '1':\r\n fix = 'GPS fix'\r\n elif gps[6] == '2':\r\n fix = 'DGPS fix'\r\n elif gps[6] == '4':\r\n fix = 'RTK Fix coordinate (centimeter precision)'\r\n elif gps[6] == '5':\r\n fix = 'RTK Float (decimeter precision)'\r\n #utc = gps[1][0:2] + ':' + gps[1][2:4] + ':' + gps[1][4:6]\r\n lat = ddm_dd_convert(gps[2], gps[3])\r\n long = ddm_dd_convert(gps[4], gps[5]) \r\n return [lat, long, fix]",
"def geohash_encode(latitude, longitude, precision=12):\n lat_interval, lon_interval = (-90.0, 90.0), (-180.0, 180.0)\n base32 = '0123456789bcdefghjkmnpqrstuvwxyz'\n geohash = []\n bits = [16, 8, 4, 2, 1]\n bit = 0\n ch = 0\n even = True\n while len(geohash) < precision:\n if even:\n mid = (lon_interval[0] + lon_interval[1]) / 2\n if longitude > mid:\n ch |= bits[bit]\n lon_interval = (mid, lon_interval[1])\n else:\n lon_interval = (lon_interval[0], mid)\n else:\n mid = (lat_interval[0] + lat_interval[1]) / 2\n if latitude > mid:\n ch |= bits[bit]\n lat_interval = (mid, lat_interval[1])\n else:\n lat_interval = (lat_interval[0], mid)\n even = not even\n if bit < 4:\n bit += 1\n else:\n geohash += base32[ch]\n bit = 0\n ch = 0\n return ''.join(geohash)",
"def decode_val_err(geohash):\r\n\r\n lat_lo, lat_hi = -90, 90\r\n lng_lo, lng_hi = -180, 180\r\n is_lng = True\r\n masks = [16, 8, 4, 2, 1] # use bit operation to make base32 convert fast\r\n\r\n for c in geohash:\r\n d = CHARMAP[c]\r\n for mask in masks:\r\n if is_lng:\r\n mid = (lng_lo + lng_hi) / 2\r\n if d & mask:\r\n lng_lo = mid\r\n else:\r\n lng_hi = mid\r\n else:\r\n mid = (lat_lo + lat_hi) / 2\r\n if d & mask:\r\n lat_lo = mid\r\n else:\r\n lat_hi = mid\r\n is_lng = not is_lng\r\n\r\n lat_val = (lat_lo + lat_hi) / 2\r\n lng_val = (lng_lo + lng_hi) / 2\r\n lat_err = (lat_hi - lat_lo) / 2\r\n lng_err = (lng_hi - lng_lo) / 2\r\n\r\n return lat_val, lng_val, lat_err, lng_err",
"def _geohash2bits(geohash):\r\n bits = ''.join([_char2bits(c) for c in geohash])\r\n return bits",
"def lat_lng(row):\r\n lat = row[\"latitude\"]\r\n lng = row[\"longitude\"]\r\n n = int(lat/GRANULARITY)\r\n nlat_start = n * GRANULARITY\r\n nlat_end = nlat_start + GRANULARITY\r\n nlg=int(lng/GRANULARITY)\r\n nlng_start = nlg * GRANULARITY\r\n nlng_end = nlng_start + GRANULARITY\r\n latlng=[(nlat_start,nlng_start), (nlat_start,nlng_end), (nlat_end,nlng_end), (nlat_end,nlng_start)]\r\n return latlng",
"def encode(latitude, longitude, precision=12):\n lat_interval, lon_interval = (-90.0, 90.0), (-180.0, 180.0)\n geohash = []\n bits = [ 16, 8, 4, 2, 1 ]\n bit = 0\n ch = 0\n even = True\n while len(geohash) < precision:\n if even:\n mid = (lon_interval[0] + lon_interval[1]) / 2\n if longitude > mid:\n ch |= bits[bit]\n lon_interval = (mid, lon_interval[1])\n else:\n lon_interval = (lon_interval[0], mid)\n else:\n mid = (lat_interval[0] + lat_interval[1]) / 2\n if latitude > mid:\n ch |= bits[bit]\n lat_interval = (mid, lat_interval[1])\n else:\n lat_interval = (lat_interval[0], mid)\n even = not even\n if bit < 4:\n bit += 1\n else:\n geohash += __base32[ch]\n bit = 0\n ch = 0\n return ''.join(geohash)",
"def lon_to_int(lon):\n lon = int((Decimal(lon) * 10000000).quantize(Decimal('1'), rounding=ROUND_HALF_UP))\n return (lon + 1800000000) % 3600000000 - 1800000000",
"def get_latlon():\n\t\n iss.compute() # Get the lat/long values from ephem\n long_value = [float(i) for i in str(iss.sublong).split(\":\")]\n if long_value[0] < 0:\n long_value[0] = abs(long_value[0])\n cam.exif_tags['GPS.GPSLongitudeRef'] = \"W\"\n else:\n cam.exif_tags['GPS.GPSLongitudeRef'] = \"E\"\n cam.exif_tags['GPS.GPSLongitude'] = '%d/1,%d/1,%d/10' % (long_value[0], long_value[1], long_value[2]*10)\n lat_value = [float(i) for i in str(iss.sublat).split(\":\")]\n if lat_value[0] < 0:\n lat_value[0] = abs(lat_value[0])\n cam.exif_tags['GPS.GPSLatitudeRef'] = \"S\"\n else:\n cam.exif_tags['GPS.GPSLatitudeRef'] = \"N\"\n cam.exif_tags['GPS.GPSLatitude'] = '%d/1,%d/1,%d/10' % (lat_value[0], lat_value[1], lat_value[2]*10)\n return (iss.sublat / degree, iss.sublong / degree)",
"def encode(latitude, longitude, precision=12):\r\n lat_interval, lon_interval = (-90.0, 90.0), (-180.0, 180.0)\r\n geohash = []\r\n bits = [ 16, 8, 4, 2, 1 ]\r\n bit = 0\r\n ch = 0\r\n even = True\r\n while len(geohash) < precision:\r\n if even:\r\n mid = (lon_interval[0] + lon_interval[1]) / 2\r\n if longitude > mid:\r\n ch |= bits[bit]\r\n lon_interval = (mid, lon_interval[1])\r\n else:\r\n lon_interval = (lon_interval[0], mid)\r\n else:\r\n mid = (lat_interval[0] + lat_interval[1]) / 2\r\n if latitude > mid:\r\n ch |= bits[bit]\r\n lat_interval = (mid, lat_interval[1])\r\n else:\r\n lat_interval = (lat_interval[0], mid)\r\n even = not even\r\n if bit < 4:\r\n bit += 1\r\n else:\r\n geohash += __base32[ch]\r\n bit = 0\r\n ch = 0\r\n return ''.join(geohash)",
"def encode(lat_val, lng_val, length=12):\r\n hashstr = ''\r\n lat_lo, lat_hi = -90, 90\r\n lng_lo, lng_hi = -180, 180\r\n is_lng = True\r\n masks = [16, 8, 4, 2, 1] # use bit operation to make base32 convert fast\r\n\r\n d = 0\r\n bit = 0\r\n while len(hashstr) < length:\r\n if is_lng:\r\n mid = (lng_lo + lng_hi) / 2\r\n if lng_val > mid:\r\n d |= masks[bit]\r\n lng_lo = mid\r\n else:\r\n lng_hi = mid\r\n else:\r\n mid = (lat_lo + lat_hi) / 2\r\n if lat_val > mid:\r\n d |= masks[bit]\r\n lat_lo = mid\r\n else:\r\n lat_hi = mid\r\n\r\n is_lng = not is_lng\r\n if bit < 4:\r\n bit += 1\r\n else:\r\n hashstr += BASE32[d]\r\n bit = 0\r\n d = 0\r\n return hashstr",
"def lonlat_to_osgb (lon, lat, digits=3):\n\t# NOTE: last test actually fails, due to being off by 1. That's 1\n\t# metre, and I'm not going to worry about it.\n\teast, north = lonlat_to_eastnorth (lon, lat)\n\treturn eastnorth_to_osgb (east, north, digits)",
"def parsenwspt(text):\n lat = int(text[0:4]) / 100\n lon = int(text[4:])\n if lon < 1000:\n lon += 10000\n return (lon / -100, lat)",
"def geo2cell(geofile, posfile):",
"def desiredENU2geo(self, x_L, y_L, z):\n\t\tx = cos(self.local_rot)*x_L - sin(self.local_rot)*y_L\n\t\ty = sin(self.local_rot)*x_L + cos(self.local_rot)*y_L\n\n\t\tlat0 = self.origin[0]\n\t\tlon0 = self.origin[1]\n\n\t\tlat, lon, alt = pm.enu2geodetic(x, y, z, lat0, lon0, self.h0)\n\t\treturn lat, lon, alt",
"def get_coordinates(geotags) -> Tuple[float, float]:\n lat = get_decimal_from_dms(\n geotags['GPSLatitude'],\n geotags['GPSLatitudeRef'],\n )\n lon = get_decimal_from_dms(\n geotags['GPSLongitude'],\n geotags['GPSLongitudeRef'],\n )\n\n return lat, lon",
"def parse_lon_lat(grid, lon, lat):\n if lat is None:\n lat = grid.origin_latitude[\"data\"][0]\n if lon is None:\n lon = grid.origin_longitude[\"data\"][0]\n return lon, lat",
"def convert(self, lat, lon):\r\n a = self.a\r\n b = self.b\r\n long0 = self.long0\r\n k0 = self.k0\r\n dx = self.dx\r\n\r\n e = (1 - b ** 2 / a ** 2) ** 0.5\r\n e2 = e ** 2 / (1 - e ** 2)\r\n n = (a - b) / (a + b)\r\n nu = a / (1 - (e ** 2) * (sin(lat) ** 2)) ** 0.5\r\n p = lon - long0\r\n\r\n A = a * (1 - n + (5 / 4.0) * (n ** 2 - n ** 3) + (81 / 64.0)*(n ** 4 - n ** 5))\r\n B = (3 * a * n / 2.0) * (1 - n + (7 / 8.0) * (n ** 2 - n ** 3) + (55 / 64.0) * (n ** 4 - n ** 5))\r\n C = (15 * a * (n ** 2) / 16.0) * (1 - n + (3 / 4.0) * (n ** 2 - n ** 3))\r\n D = (35 * a * (n ** 3) / 48.0) * (1 - n + (11 / 16.0) * (n ** 2 - n ** 3))\r\n E = (315 * a * (n ** 4) / 51.0) * (1 - n)\r\n\r\n S = A * lat - B * sin(2 * lat) + C * sin(4 * lat) - D * sin(6 * lat) + E * sin(8 * lat)\r\n\r\n K1 = S * k0\r\n K2 = k0 * nu * sin(2 * lat)/4.0\r\n K3 = (k0 * nu * sin(lat) * (cos(lat) ** 3) / 24.0) * \\\r\n (5 - tan(lat) ** 2 + 9 * e2 * (cos(lat) ** 2) + 4 * (e2 ** 2) * (cos(lat) ** 4))\r\n\r\n y = K1 + K2 * (p ** 2) + K3 * (p ** 4)\r\n\r\n K4 = k0 * nu * cos(lat)\r\n K5 = (k0 * nu * (cos(lat) ** 3) / 6.0) * (1 - tan(lat) ** 2 + e2 * (cos(lat) ** 2))\r\n\r\n x = K4 * p + K5 * (p ** 3) + dx\r\n return x, y",
"def project_xy_to_latlng(x, y):\n if x and y: # neither are blank\n d = {}\n latlng = NYSP1983_PROJ(int(x), int(y), inverse=True)\n d['longitude'], d['latitude'] = [round(c, 5) for c in latlng] # round em\n return d\n else:\n return {'longitude': None, 'latitude': None}",
"def _encode(lat_val, lng_val, length=12):\r\n lat_bits = _coordinate2bits(lat_val, -90, 90, length * 5 // 2)\r\n lng_bits = _coordinate2bits(lng_val, -180, 180, (length * 5 + 1) // 2)\r\n bits = ''.join(itertools.chain.from_iterable(\r\n itertools.zip_longest(lng_bits, lat_bits, fillvalue='')))\r\n numbers = [int(bits[i:i+5], 2) for i in range(0, len(bits), 5)]\r\n hashstr = ''.join(BASE32[i] for i in numbers)\r\n return hashstr",
"def coordinates(latitude, longitude):\r\n location = geolocator.reverse(latitude + \", \" + longitude)\r\n data = location.raw\r\n data = data['address']\r\n state_code = data['state']\r\n return state_code",
"def _normalize_location(lat: float, lon: float):\n latitude = \"{0:.3f}\".format(round(lat, 3))\n longitude = \"{0:.3f}\".format(round(lon, 3))\n return latitude + \":\" + longitude",
"def get_lat(x):\n lat, lon = x.split(',')\n return float(lat)",
"def get_coord_from_address(code_postal, adresse=None):\n headers = {\"Content-Type\": \"application/json\"}\n if adresse != None:\n url = str((\"http://api-adresse.data.gouv.fr/search/?q=\" + str(adresse) + \"&postcode=\" + str(code_postal)))\n else:\n url = str((\"http://api-adresse.data.gouv.fr/search/?q=\" + str(code_postal)))\n print(url)\n r = requests.get(url, headers=headers, data=\"\")\n js = json.loads(r.text)\n if code_postal == 75001:\n x = js['features'][1]['geometry']['coordinates']\n else:\n \tx = js['features'][0]['geometry']['coordinates']\n longitude = x[0]\n latitude = x[1]\n pos = []\n pos.append(longitude)\n pos.append(latitude)\n print(pos)\n return pos",
"def key_to_coordinates(key):\n stripkey = key.strip(\"(\").strip(\")\").split(\", \")\n point_coordinates = tuple(float(elem) for elem in stripkey)\n return point_coordinates",
"def cr2lonlat_for_geotif(path):\n old_cs, new_cs, gta, local_vars = _create_xform(path)\n transform = osr.CoordinateTransformation(old_cs, new_cs)\n\n def composite(c, r):\n \"\"\"xform from (c, r) to (lon, lat)\"\"\"\n x, y = gta * (c, r)\n lat, lon = transform.TransformPoint(x, y)[:2]\n if not -90 <= lat <= 90:\n raise ValueError('illegal lat value, did you switch coordinates')\n return lon, lat\n \n return composite"
] | [
"0.7526719",
"0.7387216",
"0.7375263",
"0.63441426",
"0.6267863",
"0.624697",
"0.6224477",
"0.6146233",
"0.6016872",
"0.5977346",
"0.592548",
"0.5918737",
"0.5868537",
"0.5856564",
"0.5801432",
"0.5772634",
"0.57722",
"0.5735534",
"0.57101923",
"0.570122",
"0.5676153",
"0.5670575",
"0.5656275",
"0.5651629",
"0.56320643",
"0.5616057",
"0.560557",
"0.55856776",
"0.5579728",
"0.5564494"
] | 0.7478197 | 1 |
uploads file to Google Cloud storage | def _cloud_storage_upload(local_file, bucket, filename_on_bucket):
client = storage.Client()
bucket = client.get_bucket(bucket)
blob = bucket.blob(filename_on_bucket)
blob.upload_from_filename(local_file)
print('uploaded ', bucket, filename_on_bucket) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def upload_to_gcs():\n client = storage.Client(project=\"filmreccommendations\")\n bucket = client.get_bucket(\"filmreccommendations.appspot.com\")\n blob = bucket.blob(os.path.basename(PICKLE_FILENAME))\n blob.upload_from_filename(PICKLE_FILENAME)",
"def gcloud_upload_file(file):\n if not file:\n return None\n\n public_url = storage.upload_file(\n file.read(),\n file.filename,\n file.content_type\n )\n\n current_app.logger.info(\n \"Uploaded file %s as %s.\", file.filename, public_url)\n\n return public_url",
"def __upload(self, filename):\n # Save to local path\n save_img = self.__frame.copy()\n\n # Initialize the bucket for after usage\n image_blob = None\n\n # Make the Google Cloud Storage client\n # and set the storage path\n if self.__yaml[\"bucket\"] is not None:\n client = storage.Client()\n bucket = client.get_bucket(self.__yaml[\"bucket\"])\n image_blob = bucket.blob(filename)\n\n # Upload and save the image\n try:\n if self.__yaml[\"output_path\"] is not None:\n # Save image in local\n LOGGER.info(f\"Saved {filename} in local folder\", )\n path = os.path.sep.join((self.__yaml[\"output_path\"], filename))\n cv2.imwrite(path, save_img)\n\n # Upload to Google Cloud Storage\n # if the user set the \"bucket\" option\n if self.__yaml[\"bucket\"] is not None:\n image_blob.upload_from_filename(os.path.sep.join((self.__yaml[\"output_path\"],\n filename)),\n content_type=\"image/jpeg\")\n\n LOGGER.info(f\"Saved {filename} to google cloud storage\")\n elif self.__yaml[\"bucket\"] is not None:\n # Convert numpy array to bytes\n temp_file = Image.fromarray(cv2.cvtColor(save_img, cv2.COLOR_BGR2RGB))\n temp_file_bytes = io.BytesIO()\n temp_file.save(temp_file_bytes,\n format=\"JPEG\")\n\n # Read the bytes from beginning\n temp_file_bytes.seek(0)\n image_blob.upload_from_file(temp_file_bytes,\n content_type=\"image/jpeg\")\n\n LOGGER.info(f\"Saved {filename} to google cloud storage\")\n except Exception as error:\n # If errors occur, just print the error messages\n # and don't exit the program\n LOGGER.warning(error)",
"def _upload(auth_http, project_id, bucket_name, file_path, object_name, acl):\n with open(file_path, 'rb') as f:\n data = f.read()\n content_type, content_encoding = mimetypes.guess_type(file_path)\n\n headers = {\n 'x-goog-project-id': project_id,\n 'x-goog-api-version': API_VERSION,\n 'x-goog-acl': acl,\n 'Content-Length': '%d' % len(data)\n }\n if content_type: headers['Content-Type'] = content_type\n if content_type: headers['Content-Encoding'] = content_encoding\n\n try:\n response, content = auth_http.request(\n 'http://%s.storage.googleapis.com/%s' % (bucket_name, object_name),\n method='PUT',\n headers=headers,\n body=data)\n except httplib2.ServerNotFoundError, se:\n raise Error(404, 'Server not found.')\n\n if response.status >= 300:\n raise Error(response.status, response.reason)\n\n return content",
"def upload_file(file_stream, filename, content_type):\n client = storage.Client(project=PROJECT_ID)\n bucket = client.bucket(CLOUD_STORAGE_BUCKET)\n\n if content_type=='audio/aac':\n file_fullname = filename+'.m4a'\n\n blob = bucket.blob(file_fullname)\n\n blob.upload_from_string(\n file_stream,\n content_type=content_type)\n\n url = 'gs://{}/{}'.format(CLOUD_STORAGE_BUCKET, file_fullname)\n\n return url",
"def put_upload(self):\n # print \"starting upload...\", self.current_upload['filepath']\n self.touch()\n self.log(\"STARTING_UPLOAD\", level=INFO)\n try:\n Backend.put_file(self.fileobj, self.current_upload[\"gcs_url\"])\n except exceptions.FilePutError as err:\n self.handle_put_error(err, self.fileobj)\n raise",
"def upload_blob(bucket_name, src_file, dst_file_name):\n storage_client = storage.Client()\n bucket = storage_client.get_bucket('fansipan-website-290191')\n blob = bucket.blob('uploaded/'+dst_file_name)\n blob.upload_from_string(src_file, content_type='image/jpg')\n print('File uploaded to uploaded/{}.'.format(dst_file_name))",
"def upload_from_file(self, file_obj, name_on_storage, **keyword_args):\n blob = self.bucket.blob(name_on_storage)\n blob.upload_from_file(file_obj, **keyword_args)\n print(f\"Upload object {name_on_storage}\")",
"def _upload_to_gcs(self, file_to_upload):\n hook = GCSHook(\n gcp_conn_id=self.gcp_conn_id,\n impersonation_chain=self.impersonation_chain,\n )\n is_data_file = file_to_upload.get(\"file_name\") != self.schema_filename\n metadata = None\n if is_data_file and self.upload_metadata:\n metadata = {\"row_count\": file_to_upload[\"file_row_count\"]}\n\n object_name = file_to_upload.get(\"file_name\")\n if is_data_file and self.partition_columns:\n # Add partition column values to object_name\n partition_values = file_to_upload.get(\"partition_values\")\n head_path, tail_path = os.path.split(object_name)\n partition_subprefix = [\n f\"{col}={val}\" for col, val in zip(self.partition_columns, partition_values)\n ]\n object_name = os.path.join(head_path, *partition_subprefix, tail_path)\n\n hook.upload(\n self.bucket,\n object_name,\n file_to_upload.get(\"file_handle\").name,\n mime_type=file_to_upload.get(\"file_mime_type\"),\n gzip=self.gzip if is_data_file else False,\n metadata=metadata,\n )",
"def upload_finish(self, cloud_file):",
"def gcloud_upload_file(audio_data, gcloud_bucket_name):\n bucket = gce_storage_client.get_bucket(gcloud_bucket_name)\n remote_filepath = \"%s\" % uuid4()\n\n blob = bucket.blob(remote_filepath)\n\n # Upload the audio\n blob.upload_from_string(audio_data)\n\n url = blob.public_url\n if isinstance(url, six.binary_type):\n url = url.decode('utf-8')\n\n return url",
"def upload_blob(bucket_name, source_file_name, destination_blob_name):\n bucket_name = \"teststorechakra\"\n source_file_name = \"/Users/demo/Documents/learn/gcp/Setting_gcp_datalabs.sh\"\n destination_blob_name = \"testcloud sdk\"\n\n storage_client = storage.Client()\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(destination_blob_name)\n\n blob.upload_from_filename(source_file_name)\n\n print(\n \"File {} uploaded to {}.\".format(\n source_file_name, destination_blob_name\n )\n )",
"def upload_blob(bucket_name, source_file_name, destination_blob_name):\n # bucket_name = \"your-bucket-name\"\n # source_file_name = \"local/path/to/file\"\n # destination_blob_name = \"storage-object-name\"\n\n storage_client = storage.Client()\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(destination_blob_name)\n\n blob.upload_from_filename(source_file_name)\n uri = f\"gs://{bucket_name}/{destination_blob_name}\"\n\n return uri",
"def upload_blob(self, bucket_name, file_name, contents):\n\n bucket = self.storage_client.bucket(bucket_name)\n blob = bucket.blob(file_name)\n blob.upload_from_string(contents)\n print(\n \"File {} uploaded to bucket {} as file {}.\".format(\n file_name, bucket_name, file_name\n )\n )",
"def upload_to_gcs(file_name, tmp_obj_name, google_cloud_storage_conn_id, gcs_bucket):\n\n gcs_hook = GoogleCloudStorageHook(google_cloud_storage_conn_id=google_cloud_storage_conn_id)\n gcs_hook.upload(bucket=gcs_bucket,\n object=file_name,\n filename=tmp_obj_name,\n gzip=True)\n logging.info(f'new file created {file_name}')",
"def upload_from_filename(self, file_name, name_on_storage, **keyword_args):\n blob = self.bucket.blob(name_on_storage)\n blob.upload_from_filename(file_name, **keyword_args)\n print(f\"Upload file {file_name} and name as {name_on_storage}\")",
"def upload_file(\n self, bucket_id: uplink.Path, filename: uplink.Path, file: uplink.Body\n ):\n pass",
"def upload(self, filename, file_path):\n return",
"def upload():\n uploaded_file = request.files.get('file')\n\n if not uploaded_file:\n return 'No file uploaded.', 400\n\n # Create a Cloud Storage client.\n gcs = storage.Client()\n\n # Get the bucket that the file will be uploaded to.\n bucket = gcs.get_bucket('foodie_helper_bucket_1')\n #app.config['CLOUD_STORAGE_BUCKET']\n # Create a new blob and upload the file's content.\n blob = bucket.blob(uploaded_file.filename)\n\n blob.upload_from_string(\n uploaded_file.read(),\n content_type=uploaded_file.content_type\n )\n\n # The public URL can be used to directly access the uploaded file via HTTP.\n result = runImage(blob.public_url)\n machineResult = getConcept(result)\n return render_template('results.html', url=blob.public_url, machineResult=machineResult)\n #return render_template('results.html', url=\"https://www.foodiesfeed.com/wp-content/uploads/2019/02/pizza-ready-for-baking.jpg\", machineResult=\"Pizza\")",
"def _upload_to_bucket(self, filename, ext_filename):\n if ext_filename is None:\n return\n\n if self.s3:\n self.bucket.upload_file(filename, ext_filename)\n logging.info('Uploaded {} to S3 with name {}'.format(filename, ext_filename))\n if self.gs:\n try:\n client = storage.Client()\n bucket = client.get_bucket(self.bucket_name)\n blob = storage.Blob(ext_filename, bucket)\n blob.upload_from_filename(filename)\n logging.info('Uploaded to {}'.format(ext_filename))\n except:\n logging.warning('Uploading file to bucket failed')",
"def upload_blob(bucket_name, source_file_name, destination_blob_name):\r\n bucket_name = \"my-photos\"\r\n source_file_name = \"./puppy.png\"\r\n estination_blob_name = \"puppy01\"\r\n\r\n storage_client = storage.Client()\r\n bucket = storage_client.bucket(bucket_name)\r\n blob = bucket.blob(destination_blob_name)\r\n\r\n blob.upload_from_filename(source_file_name)\r\n\r\n print(\r\n \"File {} uploaded to {}.\".format(\r\n source_file_name, destination_blob_name\r\n )\r\n )",
"def upload(filename, records):\n client = storage.Client()\n bucket = client.bucket(TEST_BUCKET)\n if records is not None:\n blob = bucket.blob(filename)\n blob.upload_from_string(convert_to_csv(records))\n return bucket",
"def uploadGCS(self, imageName):\n imageIndex = self.imageNames.index(imageName)\n blob = self.gcsBucket.blob('{0}/{1}'.format(self.meta['collectionAsset'],imageName))\n blob.upload_from_filename(self.meta['sources'][imageIndex])\n blob.make_public()\n \n return {'primaryPath': 'gs://{gcsBucket}/{collectionName}/{imageNa}'.format(gcsBucket=self.meta['gcsBucket'],collectionName=self.meta['collectionAsset'],imageNa=imageName)}",
"def upload_blob(source_file_name, destination_blob_name, bucket_name=\"bts-ml-data\"):\n # bucket_name = \"your-bucket-name\"\n # source_file_name = \"local/path/to/file\"\n # destination_blob_name = \"storage-object-name\"\n\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(destination_blob_name)\n\n blob.upload_from_filename(source_file_name)\n\n print(\n \"File {} uploaded to {}.\".format(\n source_file_name, destination_blob_name\n )\n )",
"def upload_file(file_name):\n blob_client = blob_svc_client.get_blob_client(container=container_name, \n blob=file_name.rsplit('/', maxsplit=1)[-1])\n\n # Create blob on storage\n print(f'uploading file - {file_name}')\n with open(file_name, \"rb\") as data:\n blob_client.upload_blob(data, overwrite=True)\n return file_name",
"def upload_blob(source_file_name, destination_blob_name, is_redact=False):\n # source_file_name = \"local/path/to/file\"\n # destination_blob_name = \"storage-object-name\"\n\n storage_client = storage.Client()\n \n # storage the file in the right bucket\n bucket_name = PUBLIC_BUCKET if is_redact else PRIVATE_BUCKET\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(destination_blob_name)\n blob.upload_from_filename(source_file_name)",
"def _upload_to_gcs(self, files_to_upload):\n # Compose mime_type using file format passed as param\n mime_type = 'application/' + self.export_format['file_format']\n hook = GoogleCloudStorageHook(\n google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,\n delegate_to=self.delegate_to)\n for object, tmp_file_handle in files_to_upload.items():\n hook.upload(self.bucket, object, tmp_file_handle.name, mime_type)",
"def upload(self, file_path, bucket_name, file_name):\n\n self.client.upload_file(file_path, bucket_name, file_name)",
"def upload_blob(bucket_name, source_file_name, destination_blob_name):\n # bucket_name = \"your-bucket-name\"\n # source_file_name = \"local/path/to/file\"\n # destination_blob_name = \"storage-object-name\"\n\n storage_client = storage.Client()\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(destination_blob_name)\n img = cv2.imread(source_file_name)\n _, img_str = cv2.imencode('.jpg', img)\n img_bytes = img_str.tobytes()\n # blob.upload_from_filename(source_file_name)\n blob.upload_from_string(img_bytes)\n\n print(\n \"File {} uploaded to {}.\".format(\n source_file_name, destination_blob_name\n )\n )",
"def upload(self, *route, **req_data):\n # Read the FieldStorage.\n file_desc = req_data['file']\n file_mimetype = req_data['mimetype']\n if not isinstance(file_desc, FieldStorage):\n # Python is dangerous when the type is incorrectly assumed.\n return Response(b'invalid request body', status='400 Bad Request')\n\n # Persist the file.\n data_id = get_bucket().put(file_desc.value)\n to_store = StoredFile(\n id=uuid4().hex,\n data_id=data_id,\n mimetype=file_mimetype,\n content_length=len(file_desc.value),\n original_name=file_desc.filename\n )\n StoredFile.collection().put(to_store)\n\n log_activity('%s uploaded file %s'%(\n context.user.link, to_store.access_link\n ))\n\n # Respond.\n return Response(\n bytes(to_store.access_url, 'utf-8'),\n status='201 Created'\n )"
] | [
"0.7862477",
"0.7417298",
"0.73990583",
"0.7352191",
"0.7321791",
"0.7267003",
"0.6985354",
"0.69010127",
"0.6875503",
"0.68445647",
"0.68404883",
"0.6832378",
"0.6829256",
"0.67942363",
"0.67374986",
"0.67214787",
"0.67042154",
"0.66991466",
"0.6689875",
"0.6675743",
"0.66370505",
"0.65937495",
"0.6588665",
"0.65826523",
"0.6573707",
"0.6543756",
"0.6525026",
"0.6506007",
"0.65042436",
"0.64868224"
] | 0.74267185 | 1 |
Lists all the catalystport bindings | def get_all_catalystport_bindings():
LOG.debug("get_all_catalystport_bindings() called")
session = db.get_session()
try:
bindings = session.query
(catalyst_models.CatalystPortBinding).all()
return bindings
except exc.NoResultFound:
return [] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def bindings(self):\n return self.__bindings",
"def list_ports(state):\n\tstate.report()",
"def list_ports(self):\n return self.ironic_client.port.list()",
"def port_list(self):\n return self._port_list",
"def get_all_port(self, conf, dpid):\n\t\tpass",
"def getBindings(self):\n return self.getBindingManager().getBindings()",
"def list_port(self):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/ports.json\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server, while listing ports.\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get port list Failed with status %s\"\n % response.status)\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Port List : %s \" % output)\n return output[\"ports\"]",
"def get_active_bindings(self):\n\n return list(self._active_bindings.values())",
"def list_ports():\n print '\\nHere is the list of available ports on this machine:'\n # lp.comports returns a list of (port, description, hardware ID) tuples\n iterator = sorted(lp.comports())\n for port, desc, hwid in iterator:\n print port\n exit()",
"def ListAccessBindings(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def _get_bindings_list_yang_name(self, bindings_list=None):\n\n yang_name_list = []\n\n for bindings_tuple in bindings_list:\n if self._module_name == bindings_tuple[2]:\n yang_name_list.append(bindings_tuple[0].split('.')[-1].replace('_', '-'))\n \n return yang_name_list",
"def list_network_profile_bindings(self, **params):\r\n return self.get(self.network_profile_bindings_path, params=params)",
"def list_connections(self, show_passthrough=True):\n return self._exprmapper.list_connections(show_passthrough)",
"def list_conf(self, kwargs):\n self.display(\n self.engine.query(\n self.engine.ALL_FILTER(),\n ALL, base=','.join([\"CN=Configuration\", self.engine.base_dn])\n ),\n True\n )",
"def ssh_list_connections(cls):\n for name in cls._ssh_connections.keys():\n print (name)",
"def get_port_binding():\n import docker\n client = docker.from_env()\n return [c.attrs['NetworkSettings']['Ports']['5555/tcp'][0]\n for c in client.containers.list(\n filters={'label': 'org.label-schema.name=profemag/femag'})]",
"def test_listSSLPort(self):\n store = Store(filesdir=self.mktemp())\n factory = DummyFactory(store=store)\n port = SSLPort(\n store=store, factory=factory, portNumber=1234, interface=u\"foo\",\n certificatePath=store.filesdir.child(\"bar\"))\n self.assertSuccessStatus(self._makeConfig(store), [\"list\"])\n self.assertEqual(\n \"%d) %r listening on:\\n\" % (factory.storeID, factory) +\n \" %d) SSL, interface %s, port %d, certificate %s\\n\" % (\n port.storeID, port.interface, port.portNumber,\n port.certificatePath.path),\n sys.stdout.getvalue())",
"def getConnectionList(self):\n return []",
"def display_port(self):\n ports=os.popen(\"sudo netstat -ntlp\").read().strip().splitlines()[2:]\n for port in ports:\n split=re.split('[\\s]+',port)\n self.portDic[\"Protcol\"]=split[0]\n self.portDic[\"Receive Q\"]=split[1]\n self.portDic[\"Send Q\"]=split[2]\n split_port=split[3].split(\":\")\n if split_port[1]==\"\":\n self.portDic[\"port\"]=\"No Port\" \n else:\n self.portDic[\"port\"]=split_port[1]\n self.portDic[\"Foreign Address\"]=split[4]\n self.portDic[\"State\"]=split[5]\n split_ID=split[6].split(\"/\")\n self.portDic[\"PID\"]=split_ID[0]\n self.portDic[\"Programme Name\"]=split_ID[1]\n self.portList.append(self.portDic.copy())\n return self.portList",
"def test_get_bindings_for_deployment(self):\n pass",
"def list_policy_profile_bindings(self, **params):\r\n return self.get(self.policy_profile_bindings_path, params=params)",
"def ls():\n cfgmgr = ConfigManager()\n apps = cfgmgr['apps']\n for i in apps:\n print(fc(\"- {g}{appname}{rst}\", appname=i))",
"def getListOfPorts(self):\n return _libsbml.CompModelPlugin_getListOfPorts(self)",
"def listAll(self):\n red = self.dbConnect()\n return red.keys()",
"def list(self):\n\t\tif self.client is None:\n\t\t\traise UsageError(\"Not connected!\")\n\t\treturn self.client.list_conns()",
"def exposed_ports(self) -> list[\"Port\"]:\n _args: list[Arg] = []\n _ctx = self._select(\"exposedPorts\", _args)\n _ctx = Port(_ctx)._select_multiple(\n _description=\"description\",\n _port=\"port\",\n _protocol=\"protocol\",\n )\n return _ctx.execute_sync(list[Port])",
"def get_port_list(self):\r\n self.ports = Manager().dict()\r\n self.value = Manager().dict()\r\n self.sensors = dict()\r\n for p in self.device.ports['input']:\r\n if p.enabled:\r\n self.ports[p.number] = p\r\n self.value[p.number] = 'Connexion à la carte'\r\n self.sensors[p.number] = Sensor.get(p._type)",
"def test_listSSLPortWithoutAttributes(self):\n store = Store()\n factory = DummyFactory(store=store)\n port = SSLPort(store=store, factory=factory)\n self.assertSuccessStatus(self._makeConfig(store), [\"list\"])\n self.assertEqual(\n \"%d) %r listening on:\\n\" % (factory.storeID, factory) +\n \" %d) SSL, any interface, NO PORT, NO CERTIFICATE\\n\" % (\n port.storeID,),\n sys.stdout.getvalue())",
"def list_programs():\n return list(INFO)",
"def list_ports(bridge):\n cp = _run('ovs-vsctl', 'list-ports', bridge)\n return cp.stdout.splitlines()"
] | [
"0.6451805",
"0.62612075",
"0.58983856",
"0.5897845",
"0.579027",
"0.5786138",
"0.5704574",
"0.56907016",
"0.5677487",
"0.56535304",
"0.56521446",
"0.56430465",
"0.5607622",
"0.56039107",
"0.5600516",
"0.55635554",
"0.55596274",
"0.5467314",
"0.5458655",
"0.5453633",
"0.5441122",
"0.53847986",
"0.53679264",
"0.53041327",
"0.5298101",
"0.52958304",
"0.52626693",
"0.52498794",
"0.5249111",
"0.5238492"
] | 0.78534424 | 0 |
Adds a catalystport binding | def add_catalystport_binding(port_id, vlan_id):
LOG.debug("add_catalystport_binding() called")
session = db.get_session()
binding = catalyst_models.CatalystPortBinding(port_id, vlan_id)
session.add(binding)
session.flush()
return binding | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_binding(ctx, binding_name, pool_name, acl_name, nat_type, twice_nat_id):\n\n entryFound = False\n table = 'NAT_BINDINGS'\n key = binding_name\n dataKey1 = 'access_list'\n dataKey2 = 'nat_pool'\n dataKey3 = 'nat_type'\n dataKey4 = 'twice_nat_id'\n\n if acl_name is None:\n acl_name = \"\"\n\n if len(binding_name) > 32:\n ctx.fail(\"Invalid binding name. Maximum allowed binding name is 32 characters !!\")\n\n config_db = ConfigDBConnector()\n config_db.connect()\n\n data = config_db.get_entry(table, key)\n if data:\n if data[dataKey1] == acl_name and data[dataKey2] == pool_name:\n click.echo(\"Trying to add binding, which is already present.\")\n entryFound = True\n\n binding_dict = config_db.get_table(table)\n if len(binding_dict) == 16:\n click.echo(\"Failed to add binding, as already reached maximum binding limit 16.\")\n entryFound = True\n\n if nat_type is not None:\n if nat_type == \"dnat\":\n click.echo(\"Ignored, DNAT is not yet suported for Binding \")\n entryFound = True\n else:\n nat_type = \"snat\"\n\n if twice_nat_id is None:\n twice_nat_id = \"NULL\"\n\n if entryFound is False:\n count = 0\n if twice_nat_id is not None:\n count = getTwiceNatIdCountWithStaticEntries(twice_nat_id, 'STATIC_NAT', count)\n count = getTwiceNatIdCountWithStaticEntries(twice_nat_id, 'STATIC_NAPT', count)\n count = getTwiceNatIdCountWithDynamicBinding(twice_nat_id, count, key)\n if count > 1:\n ctx.fail(\"Same Twice nat id is not allowed for more than 2 entries!!\")\n\n config_db.set_entry(table, key, {dataKey1: acl_name, dataKey2: pool_name, dataKey3: nat_type, dataKey4: twice_nat_id})",
"def get_catalystport_binding(vland_id):\n LOG.debug(\"get_catlystport_binding() called\")\n session = db.get_session()\n try:\n binding = (session.query(catalyst_models.CatalystPortBinding). \\\n filter_by(vland_id).all())\n return binding\n except exc.NoresultFound:\n raise c_exc.CatalystPortBindingNotFound(vlan_id=vlan_id)",
"def add_port(self, port):\n self._main_model.add_port(port)",
"def add_port_acl(self, port, acl):\n raise NotImplementedError # pragma: no cover",
"def configure_dcbx_app(self, ports, **kwargs):\n pass",
"def bind_acl_to_ports(self, acl_name=None, ports=None):\n pass",
"def add_port(cls, port, ser):\n cls._open_ports[port] = ser",
"def addPort(self, *args):\n return _libsbml.CompModelPlugin_addPort(self, *args)",
"def add_port(self, port):\n self._ports.add(port)",
"def add_bindings(self, configuration, bind_to, typ, bindings):\n wanted = list(bindings.wanted(configuration[typ].values()))\n if not self.get_current(bind_to)[0]:\n log.info(\"Would bind <%s>(%s) to %s\", typ, ', '.join(wanted), bind_to.long_name)\n return\n\n for thing in wanted:\n bound = self.is_bound(typ, thing, bind_to.typ, bind_to.name)\n\n if not bound:\n log.info(\"Binding <%s>(%s) to %s\", typ, thing, bind_to.long_name)\n combined_typ, binding_name_str, name_str = self.combined_typ(bind_to.typ, typ)\n payload = {binding_name_str: bind_to.name, name_str: thing}\n payload.update(configuration[typ][thing].binding_options)\n self.post(combined_typ, {combined_typ: payload, \"params\": {\"action\": \"bind\"}}, content_type=self.content_type(combined_typ))\n else:\n log.debug(\"<%s(%s) already bound to %s\", typ, thing, bind_to.long_name)",
"def AddPortFlag(parser, required=False):\n help_text = \"\"\"\\\n Network port of the database.\n \"\"\"\n parser.add_argument('--port', help=help_text, required=required, type=int)",
"def _bind(self):\n\n pass",
"def addBoundConnection(self, connection):\r\n system_id = connection.system_id\r\n self.log.debug('Adding SMPP binding for %s' % system_id)\r\n if not system_id in self.bound_connections:\r\n self.bound_connections[system_id] = SMPPBindManager(system_id)\r\n self.bound_connections[system_id].addBinding(connection)\r\n bind_type = connection.bind_type\r\n self.log.info(\"Added %s bind for '%s'. Active binds: %s. Max binds: %s\" % (bind_type, system_id, self.getBoundConnectionCountsStr(system_id), self.config.systems[system_id]['max_bindings']))",
"def add_port_mac(self, context, port_dict):\n self._get_driver_for_provider(constants.l2gw\n ).add_port_mac(context, port_dict)",
"def add_binding(self, variable, value):\n # If there's already a binding, update it rather than add a new one.\n for binding in self.bindings:\n if binding.variable.name == variable:\n return self.update_binding(variable, value)\n variable = Variable(self.canvas, self, variable)\n binding = Binding(self.canvas, variable, value)\n self.bindings.append(binding)\n x, y = self.pos\n variable.set_pos(x + 10, y + len(self.bindings) * 20)\n if value.moves_with_binding:\n value.set_pos(x + 140, y + len(self.bindings) * 20)\n self.update()",
"def add_port(bridge, port, external_id=None):\n _run('ip', 'link', 'set', port, 'up')\n _run('ovs-vsctl', 'add-port', bridge, port)\n if external_id:\n ports = SimpleOVSDB('ovs-vsctl', 'port')\n for port in ports.find('name={}'.format(port)):\n ports.set(port['_uuid'],\n 'external_ids:{}'.format(external_id[0]),\n external_id[1])",
"def bind(self, address: Tuple[str, int]) -> None:\n ...",
"def remove_catalystport_binding(vlan_id):\n LOG.debug(\"remove_catalystport_binding() called\")\n session = db.get_session()\n try:\n binding = (session.query(catalyst_models.CatalystPortBinding).\n filter_by(vlan_id=vlan_id).all())\n for bind in binding:\n session.delete(bind)\n session.flush()\n return binding\n except exc.NoResultFound:\n pass",
"def add_service(torconfig, service, port=None):\n # picks a random port until it finds one avaible.\n while not service.tcp:\n port = port or new_port()\n try:\n service.tcp = reactor.listenTCP(port, service.factory)\n except error.CannotListenError:\n pass\n\n service.hs = txtorcon.HiddenService(\n torconfig, os.path.join(config.tor_data, service.name),\n ['%d 127.0.0.1:%d' % (service.port, port)])\n apaf.hiddenservices.append(service)",
"def visit_AttributeBinding(self, node):\n obj = self.stack[-1]\n py_ast = node.binding.expr.py_ast\n op = node.binding.op\n op_compiler = COMPILE_OP_MAP[op]\n code = op_compiler(py_ast, self.filename)\n binding = {\n 'operator': op,\n 'code': code,\n 'name': node.name,\n 'lineno': node.binding.lineno,\n 'filename': self.filename,\n 'block': self.block,\n }\n obj['bindings'].append(binding)",
"def extend_hosting_port_info(self, context, port_db, hosting_info):\n pass",
"def addBindingToFrame(var, val, frame):\n set_car(frame, cons(var, frame_variables(frame)))\n set_cdr(frame, cons(val, frame_values(frame)))\n return",
"def setup_logical_port_connectivity(self, context, port_db):\n pass",
"def add(env, identifier, **args):\n\n mgr = SoftLayer.LoadBalancerManager(env.client)\n uuid, _ = mgr.get_lbaas_uuid_id(identifier)\n\n new_listener = {\n 'backendPort': args.get('backport'),\n 'backendProtocol': args.get('backprotocol') if args.get('backprotocol') else args.get('frontprotocol'),\n 'frontendPort': args.get('frontport'),\n 'frontendProtocol': args.get('frontprotocol'),\n 'loadBalancingMethod': args.get('method'),\n 'maxConn': args.get('connections', None),\n 'sessionType': args.get('sticky'),\n 'tlsCertificateId': args.get('sslcert')\n }\n\n try:\n mgr.add_lb_listener(uuid, new_listener)\n click.secho(\"Success\", fg='green')\n except SoftLayerAPIError as exception:\n click.secho(f\"ERROR: {exception.faultString}\", fg='red')",
"def add_in_port(self, m: int, content: str, **opts) -> None:",
"def bind_sockets(port, address=..., family=..., backlog=..., flags=..., reuse_port=...):\n ...",
"def add_port(self, port):\n self.ports.append(port)\n if port.io_type not in self.port_seqs:\n self.port_seqs[port.io_type] = 0\n self.port_seqs[port.io_type] += 1\n port.sequence = self.port_seqs[port.io_type]\n return self",
"def bind(self,cluster_name,ip_address='',bind_details={},project_id=''):\n project_id = project_id if project_id != '' else self.__project_id\n if ip_address == '':\n headers = { 'User-Agent': 'curl/7.61.0'} # spoof for simple response\n ip = requests.get('http://ifconfig.co', headers)\n ip_address = ip.text.rstrip()\n logger.info(f'bind: looked up ip address: {ip_address}')\n #key = self.create_programatic_apikey(description=description,project_id=project_id)\n db_user = { 'username' : 'foo'\n ,'password' : 'changeme'\n ,'databaseName' : 'admin'\n ,'roles' : [ {'databaseName' : 'admin', 'roleName' : 'dbAdminAnyDatabase'} ] \n }\n user = self.create_database_user(db_user,project_id=project_id) \n cluster = self.get_cluster(cluster_name)\n cs = cluster['mongoURIWithOptions'].split('/',1)\n #conn_str = f'{cs[0]//{key['publicKey']}:{key['privateKey']}@{cs[1]}'\n return conn_str",
"def add_reserved_port(port):\n _free_ports.add(port)",
"def column_bind(arguments):\n return Component(\n \"ColumnBind\",\n arguments=arguments,\n options={\n \n },\n constraints=None)"
] | [
"0.6066342",
"0.6021122",
"0.5959869",
"0.5917959",
"0.5857227",
"0.5852962",
"0.5653113",
"0.56476676",
"0.55153096",
"0.5501996",
"0.5498605",
"0.5429331",
"0.5401681",
"0.53849334",
"0.5362603",
"0.53139096",
"0.52629244",
"0.5251845",
"0.5244616",
"0.5243897",
"0.52343386",
"0.520219",
"0.51781756",
"0.51657325",
"0.5161052",
"0.51316255",
"0.5123875",
"0.51179594",
"0.5111514",
"0.5097052"
] | 0.7463989 | 0 |
Removes a catalystport binding | def remove_catalystport_binding(vlan_id):
LOG.debug("remove_catalystport_binding() called")
session = db.get_session()
try:
binding = (session.query(catalyst_models.CatalystPortBinding).
filter_by(vlan_id=vlan_id).all())
for bind in binding:
session.delete(bind)
session.flush()
return binding
except exc.NoResultFound:
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_binding(ctx, binding_name):\n\n entryFound = False\n table = 'NAT_BINDINGS'\n key = binding_name\n\n if len(binding_name) > 32:\n ctx.fail(\"Invalid binding name. Maximum allowed binding name is 32 characters !!\")\n\n config_db = ConfigDBConnector()\n config_db.connect()\n\n data = config_db.get_entry(table, key)\n if not data:\n click.echo(\"Trying to delete binding, which is not present.\")\n entryFound = True\n\n if entryFound == False:\n config_db.set_entry(table, key, None)",
"def remove_bindings(ctx):\n\n config_db = ConfigDBConnector()\n config_db.connect()\n\n binding_table_name = 'NAT_BINDINGS'\n binding_dict = config_db.get_table(binding_table_name)\n if binding_dict:\n for binding_key_name in binding_dict:\n config_db.set_entry(binding_table_name, binding_key_name, None)",
"def removePort(self, *args):\n return _libsbml.CompModelPlugin_removePort(self, *args)",
"def remove_port(cls, port):\n if port in cls._open_ports:\n if cls._open_ports[port].is_open:\n cls._open_ports[port].close()\n del cls._open_ports[port]",
"def remove_port(self, port):\n if port not in self.ports:\n return False\n del self.ports[port]\n return True",
"def port_delete(switch, port):\n client.port.delete(switch, port)",
"def delete_port(self, port):\r\n return self.delete(self.port_path % (port))",
"def removeConnection(self, connection):\r\n if connection.system_id is None:\r\n self.log.debug(\"SMPP connection attempt failed without binding.\")\r\n else:\r\n system_id = connection.system_id\r\n bind_type = connection.bind_type\r\n self.bound_connections[system_id].removeBinding(connection)\r\n self.log.info(\"Dropped %s bind for '%s'. Active binds: %s. Max binds: %s\" % (bind_type, system_id, self.getBoundConnectionCountsStr(system_id), self.config.systems[system_id]['max_bindings']))\r\n # If this is the last binding for this service then remove the BindManager\r\n if self.bound_connections[system_id].getBindingCount() == 0:\r\n self.bound_connections.pop(system_id)",
"def del_port(bridge, port):\n _run('ovs-vsctl', 'del-port', bridge, port)",
"def remove_console_setting(db, linenum):\n config_db = db.cfgdb\n\n table = \"CONSOLE_PORT\"\n\n data = config_db.get_entry(table, linenum)\n if data:\n config_db.mod_entry(table, linenum, None)\n else:\n ctx = click.get_current_context()\n ctx.fail(\"Trying to delete console port setting, which is not present.\")",
"def unload_bindings(self):\n self.ignoreAll()",
"def delete(self, request, cluster_id, bind_id, service_id=None): # pylint: disable=arguments-differ\n bind = self.get_obj(cluster_id, service_id, bind_id)\n cm.api.unbind(bind)\n return Response(status=status.HTTP_204_NO_CONTENT)",
"def removeOutputBinding(self, factory, product):\n # remove the {product} monitor from my pile of observers\n self.removeObserver(observer=product.pyre_status)\n # and chain up\n return super().removeOutputBinding(factory=factory, product=product)",
"def teardown_logical_port_connectivity(self, context, port_db):\n pass",
"def delete_lag_ports(self, ports, lag):\n pass",
"def delete_port_acl(self, port, acl):\n raise NotImplementedError # pragma: no cover",
"def port_delete_end(self, payload):\n port = self.cache.get_port_by_id(payload['port_id'])\n if port:\n network = self.cache.get_network_by_id(port.network_id)\n self.cache.remove_port(port)\n self.call_driver('reload_allocations', network)",
"def remove_pools(ctx):\n\n config_db = ConfigDBConnector()\n config_db.connect()\n\n entryFound = False\n pool_table_name = 'NAT_POOL'\n binding_table_name = 'NAT_BINDINGS'\n binding_dict = config_db.get_table(binding_table_name)\n pool_dict = config_db.get_table(pool_table_name)\n if pool_dict:\n for pool_key_name in pool_dict:\n entryFound = False\n for binding_name, binding_values in binding_dict.items():\n if binding_values['nat_pool'] == pool_key_name:\n click.echo(\"Pool {} is not removed, as it is mapped to Binding {}, remove the pool binding first !!\".format(pool_key_name,binding_name))\n entryFound = True\n break\n\n if entryFound == False: \n config_db.set_entry(pool_table_name, pool_key_name, None)",
"def delete_port_postcommit(self, context):\n if self.rpc_handler is None:\n return\n port = self._get_port_info(context)\n if port is not None:\n try:\n self.rpc_handler.delete_port(port)\n except:\n pass",
"def test_dhcp_bind_uninstall(self):\n self._common_uninstall_delete(\n 'esg_id|bind_id', dhcp_bind.delete,\n {'bind': {}},\n delete_args=['dhcpStaticBindingID'],\n delete_kwargs={\n 'uri_parameters': {'edgeId': 'esg_id', 'bindingID': 'bind_id'}\n }\n )",
"def remove_app(self):\n \n pass",
"def unbind(cls, name: str):\n if cls.instance() is None:\n return\n\n if not name in cls.instance().m_axis_bindings and not name in cls.instance().m_button_bindings:\n print( 'Unable to unbind: {}. Name not bound to axis or button.'.format( name ) )\n return\n\n if name in cls.instance().m_axis_bindings:\n axis = cls.instance().m_axis_bindings[ name ].axis\n del cls.instance().m_axis_bindings[ name ]\n del cls.instance().m_axis_name_table[ axis ]\n if name in cls.instance().m_button_bindings:\n button = cls.instance().m_button_bindings[ name ].button\n del cls.instance().m_button_bindings[ name ]\n del cls.instance().m_button_name_table[ button ]",
"async def _async_delete_port_mapping(self, external_port: int) -> None:\n entry = external_port\n self.removed_port_mappings.append(entry)",
"def delete_port_mac(self, context, port):\n self._get_driver_for_provider(constants.l2gw\n ).delete_port_mac(context, port)",
"def without_exposed_port(\n self,\n port: int,\n protocol: Optional[NetworkProtocol] = None,\n ) -> \"Container\":\n _args = [\n Arg(\"port\", port),\n Arg(\"protocol\", protocol, None),\n ]\n _ctx = self._select(\"withoutExposedPort\", _args)\n return Container(_ctx)",
"def port_nic_remove(switch, port):\n client.port.detach_nic(switch, port)",
"def _do_backend_unbind(self, backend, port_id):\n\n driver = self.backend_manager.get_backend_driver(backend)\n driver.unbind(port_id)",
"def removeConstraint(self, constraint: Constraint, /) -> None:\n ...",
"def unsetPortRef(self):\n return _libsbml.SBaseRef_unsetPortRef(self)",
"def RemoveIamPolicyBinding(zone_ref, member, role):\n policy = GetIamPolicy(zone_ref)\n iam_util.RemoveBindingFromIamPolicy(policy, member, role)\n return SetIamPolicy(zone_ref, policy)"
] | [
"0.73616654",
"0.6799897",
"0.6344198",
"0.6337611",
"0.617905",
"0.6130057",
"0.61269933",
"0.6104561",
"0.5963844",
"0.5883558",
"0.58721596",
"0.5811271",
"0.5778574",
"0.5777863",
"0.57622343",
"0.5755362",
"0.5706742",
"0.5679512",
"0.5677332",
"0.5649031",
"0.5630688",
"0.55856186",
"0.55844265",
"0.5553671",
"0.5516873",
"0.55082446",
"0.54849905",
"0.54827493",
"0.54577625",
"0.5456618"
] | 0.7389213 | 0 |
Test whether the numpy data type `dt` can be safely cast to an int. | def _safely_castable_to_int(dt):
int_size = np.dtype(int).itemsize
safe = (np.issubdtype(dt, np.signedinteger) and dt.itemsize <= int_size) or (
np.issubdtype(dt, np.unsignedinteger) and dt.itemsize < int_size
)
return safe | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _is_integer(x):\n return (not isinstance(x, (bool, np.bool))) and \\\n isinstance(x, (numbers.Integral, int, np.int, np.long, long)) # no long type in python 3",
"def is_int(x):\n # From sktime: BSD 3-Clause\n # boolean are subclasses of integers in Python, so explicitly exclude them\n return isinstance(x, (int, np.integer)) and not isinstance(x, bool)",
"def is_int(value):\n return isinstance(value, int)",
"def is_convertible_to_int(v: Any) -> bool:\n\n try:\n test = int(v)\n return True\n except:\n return False",
"def isInteger(data):\n\tif type(data) == list or type(data) == np.ndarray:\n\t\tcol = pd.Series(data)\n\telse:\n\t\tcol = data\n\treturn col.dtype == np.int32 or col.dtype == np.int64",
"def _is_int(test_val):\n try:\n int(test_val)\n return True\n except ValueError:\n return False",
"def is_int(self):\n return self.value_type in (int, arrow.JuArrow)",
"def DataIsInteger(self):\n return self.data_type in (\n definitions.REG_DWORD, definitions.REG_DWORD_BIG_ENDIAN,\n definitions.REG_QWORD)",
"def is_int(x):\n # boolean are subclasses of integers in Python, so explicitly exclude them\n return isinstance(x, (int, np.integer)) and not isinstance(x, bool)",
"def isInteger(data):\n\ttry:\n\t\tfrom types import LongType, IntType\n\t\tif type(data) == LongType or type(data) == IntType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type(int(0)):\n\t\t\treturn True\n\treturn False",
"def _check_index(idx):\n return isinstance(idx, _Int)",
"def check_for_int(check):",
"def is_integer(matrix):\n return numpy.issubdtype(matrix.dtype, numpy.integer)",
"def could_be_int(val):\n if val == None:\n return False\n\n if isinstance(val, int):\n return True\n\n # allow coercion from str\n if isinstance(val, (str, unicode)):\n try:\n i = int(val)\n if not isinstance(i, int):\n raise ValueError\n else:\n return True\n except:\n return False\n\n # otherwise\n return False",
"def is_int(x):\n return int(x) == x",
"def is_int(self): \n return (self._den == 1)",
"def check_if_input_is_int(self):\n try:\n int(self.input)\n except ValueError:\n return False\n else:\n return True",
"def _is_valid_target_int(self, target):\n if isinstance(target, (int, np.int, np.int8, np.int16, np.int32, np.int64)):\n return True\n else:\n return False",
"def is_int(n):\n try:\n int(n)\n return True\n except ValueError:\n return False",
"def _is_integer_like(input):\n if _is_boolean_like(input):\n return True\n if type(input) is int:\n return True\n if isinstance(input, _ScalarConstant):\n if input.dtype in _int_like_types:\n return True\n return False",
"def is_int(self, val):\n try:\n int(val)\n return True\n except ValueError:\n return False",
"def is_intscalar(x: Any) -> bool:\r\n return isinstance(x, (\r\n int,\r\n np.int8,\r\n np.int16,\r\n np.int32,\r\n np.int64,\r\n np.uint8,\r\n np.uint16,\r\n np.uint32,\r\n np.uint64,\r\n ))",
"def is_int(value):\n try:\n int(value)\n except ValueError:\n return False\n else:\n return True",
"def is_int(value):\n try:\n int(value)\n return True\n except ValueError:\n return False",
"def is_int(value):\n try:\n int(value)\n return True\n except ValueError:\n return False",
"def is_int(num):\n return int(num) == num",
"def isinteger(self):\n return self.den == 1",
"def is_int(self, size=None):\n return False",
"def isInteger(self):",
"def isInteger(self):"
] | [
"0.68389726",
"0.67490387",
"0.6626902",
"0.6598004",
"0.6508628",
"0.64784265",
"0.64210194",
"0.6398246",
"0.63903487",
"0.6372374",
"0.6356024",
"0.6314418",
"0.63074124",
"0.6301813",
"0.6293644",
"0.62889963",
"0.6231258",
"0.62195265",
"0.6181786",
"0.6180847",
"0.6174092",
"0.61200774",
"0.6115712",
"0.6110412",
"0.6110412",
"0.61072934",
"0.60870695",
"0.6042212",
"0.604044",
"0.604044"
] | 0.8405704 | 0 |
Starts a while loop that sends 'command' to tello every 5 second. | def _sendingCommand(self):
while True:
self.tello.send_command('command')
time.sleep(5) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Listen(self):\n while True:\n time.sleep(1)",
"def run(self):\n while True:\n time.sleep(RTM_READ_DELAY)\n for event in self._slack_client.rtm_read():\n self.handle_event(event)",
"def run():\n # 1 sec delay to allow DHT22 sensor to start as per datasheet\n sleep_ms(1000)\n last_run = ticks_ms()\n _read()\n\n while True:\n if ticks_diff(ticks_ms(), last_run) > _READING_DELAY_MS:\n last_run = ticks_ms()\n _read()\n\n _signal_alive()\n sleep_ms(1000)",
"def run(self):\n self.cmdloop()",
"def init_trig(motion):\n for x in range(40):\n # Loop for 40 iterations.\n time.sleep(0.1)\n # Delay for 10ms.\n if (GPIO.input(motion) == 1):\n # Determines if corresponding motion sensor is triggered.\n TCP.client(str(motion))\n # Sends direction of motion as message to the connected server.\n break",
"async def do(ctx, times : int, *, command):\n msg = copy.copy(ctx.message)\n msg.content = command\n for i in range(times):\n await bot.process_commands(msg)",
"def loop(self):\n while True:\n delay_until_next = self.tick()\n if not delay_until_next:\n break\n self._sleep(delay_until_next)",
"def run(self):\n while self._running:\n temp = self._sense.get_temperature()\n self.display_temperature(temp)\n sleep(10)\n self._sense.clear()",
"def run(self):\n while True:\n print(\"I'm running in the background\")\n time.sleep(self.interval)",
"def feed(self, amount=network.default_listen_time):\n asyncore.loop(timeout=amount, count=1)",
"def loop_forever(self):\n self.client.loop_forever()",
"def run(self):\n run1=0\n while (run1==0):\n Publisher().sendMessage(\"updatetext\", \"\")\n time.sleep(3)",
"def loop_forever(self):\n while self.running:\n time.sleep(0.01)",
"def loop():\n\n load_config_project()\n\n L.debug(\"running with version: %s\", sys.version)\n is_version_2 = sys.version.startswith(\"2\")\n while True:\n response = ''\n if num_version == 2:\n response = raw_input(\"Enter command:\")\n if num_version == 3:\n response = input(\"Enter command:\")\n\n if response != '':\n commander.parse(response)\n sleep(0.5)",
"def run(self):\n while True:\n # Do something\n print('Doing something imporant in the background')\n\n self.loadData()\n time.sleep(self.interval)",
"def loop_forever(self):\n self.running = True\n while self.running:\n time.sleep(0.1)",
"def run(self):\n while True:\n self.sm.run()\n time.sleep(0.05)",
"def run(self):\n time_to_quit = False\n while True:\n time_to_quit = self.run_to_yield_or_quit()\n if time_to_quit:\n print(self, 'quitting')\n break\n else:\n time.sleep(self.polling_interval)",
"async def check():\r\n while True:\r\n if rss.check_new():\r\n item = rss.most_recent()\r\n queue = format_message.format_notes(item)\r\n for message in queue:\r\n await client.send_message(client.get_channel(\"350634825516056577\"), message)\r\n await asyncio.sleep(28800) # Check every 8 hours\r",
"async def loop():\n # ArmDevice.storage.joints_pos = await get_positions() # Use this if encoders are wired up.\n # ArmDevice.storage.joints_pos = simulate_positions() # Use this for testing without position feedback.\n log.debug(\"command: {}\".format(ArmDevice.storage.command))\n ArmDevice.storage.controller.user_command(ArmDevice.storage.mode, *ArmDevice.storage.command)\n ArmDevice.storage.speeds = ArmDevice.storage.controller.update_duties(ArmDevice.storage.joints_pos)\n\n # publish speeds/duty cycles here\n log.debug(\"joints_pos: {}\".format(ArmDevice.storage.joints_pos))\n log.debug(\"speeds: {}\".format(ArmDevice.storage.speeds))\n await send_duties()",
"def ping_moonrat():\n threading.Timer(3600, ping_moonrat).start()\n text = \"Moonrat is still active\\n\"\n slack_client.api_call(\n \"chat.postMessage\",\n channel='G9P7X8Q0H',\n text=text,\n )",
"def loop(self,timeout=1):\n self.stream.loop(timeout)",
"def run(self):\n r = rospy.Rate(100)\n while not rospy.is_shutdown():\n r.sleep()",
"def handle_chat(self):\n while True:\n if self.chat_breakout:\n return\n\n time.sleep(1)\n messages = \"\"\n for i in range(5):\n try:\n messages += f\"{self.queue.popleft()}\\n\"\n except IndexError:\n # Queue is empty but no worries\n continue\n\n if messages != \"\":\n self.loop.create_task(\n self.ingame_cog.send_chat_to_discord(\n self.bot, self.channel, messages\n )\n )",
"def run_aqi(self):\r\n while True:\r\n self.get_aqi()\r\n time.sleep(30 - time.time() % 30)",
"def run_forever(self):\n while True:\n self.run_once()\n\n self.logger.info(f\"Sleeping for {self.config.sleep()} seconds\")\n time.sleep(self.config.sleep())",
"def run(self):\n while not rospy.is_shutdown():\n if self.state is not None:\n # get robot state\n x = self.state.pose.position.x\n y = self.state.pose.position.y\n quat = [\n self.state.pose.orientation.x,\n self.state.pose.orientation.y,\n self.state.pose.orientation.z,\n self.state.pose.orientation.w\n ]\n eulers = tf.transformations.euler_from_quaternion(quat, 'sxyz')\n # generate ping and send\n ping, heading = self.sim.gen_ping((x, y), eulers[2])\n self.__socket.send(self.__create_msg(ping))\n rospy.logdebug(\n \"Sent ping with heading: \" + str(np.rad2deg(heading)))\n self.rate.sleep()",
"def run_chat_client():\r\n while must_run:\r\n print_menu()\r\n action = select_user_action()\r\n perform_user_action(action)\r\n print(\"Thanks for watching. Like and subscribe! 👍\")",
"def __call__(self):\n hub.sleep(random.randint(1, self.interval))\n while True:\n self.send_req()\n self.reply_pending = True\n hub.sleep(self.interval)\n if self.reply_pending:\n self.no_response()",
"def auto_connect(address):\r\n _connected=False\r\n _timeout=100\r\n _count = 0\r\n\r\n while not _connected:\r\n try:\r\n _c = telnetlib.Telnet(address)\r\n if _c.sock is not None:\r\n _connected = True\r\n\r\n except socket.error:\r\n _count = _count + 1\r\n print \"Trima socket not ready, waiting to retry, attempt #\"+str(_count)\r\n time.sleep(5)\r\n\r\n print(\"Trima Telnet Connection Ready\")"
] | [
"0.63576066",
"0.6166633",
"0.60652816",
"0.60415244",
"0.60114336",
"0.59595996",
"0.595747",
"0.5923307",
"0.5898725",
"0.58431983",
"0.58387035",
"0.58356047",
"0.5832241",
"0.56968737",
"0.5675272",
"0.56747895",
"0.56568396",
"0.56439304",
"0.562048",
"0.56196946",
"0.5619503",
"0.5618823",
"0.5618439",
"0.56009734",
"0.55994874",
"0.5590761",
"0.5589073",
"0.5580813",
"0.55764973",
"0.55510527"
] | 0.7657067 | 0 |
Open the cmd window and initial all the button and text. | def openCmdWindow(self):
panel = Toplevel(self.root)
panel.wm_title('Command Panel')
# create text input entry
text0 = tki.Label(panel,
text='This Controller map keyboard inputs to Tello control commands\n'
'Adjust the trackbar to reset distance and degree parameter',
font='Helvetica 10 bold'
)
text0.pack(side='top')
text1 = tki.Label(panel, text=
'W - Move Tello Up\t\t\tArrow Up - Move Tello Forward\n'
'S - Move Tello Down\t\t\tArrow Down - Move Tello Backward\n'
'A - Rotate Tello Counter-Clockwise\tArrow Left - Move Tello Left\n'
'D - Rotate Tello Clockwise\t\tArrow Right - Move Tello Right',
justify='left')
text1.pack(side='top')
self.btn_landing = tki.Button(
panel, text='Land', relief='raised', command=self.telloLanding)
self.btn_landing.pack(side='bottom', fill='both',
expand='yes', padx=10, pady=5)
self.btn_takeoff = tki.Button(
panel, text='Takeoff', relief='raised', command=self.telloTakeOff)
self.btn_takeoff.pack(side='bottom', fill='both',
expand='yes', padx=10, pady=5)
# binding arrow keys to drone control
self.tmp_f = tki.Frame(panel, width=100, height=2)
self.tmp_f.bind('<KeyPress-w>', self.on_keypress_w)
self.tmp_f.bind('<KeyPress-s>', self.on_keypress_s)
self.tmp_f.bind('<KeyPress-a>', self.on_keypress_a)
self.tmp_f.bind('<KeyPress-d>', self.on_keypress_d)
self.tmp_f.bind('<KeyPress-Up>', self.on_keypress_up)
self.tmp_f.bind('<KeyPress-Down>', self.on_keypress_down)
self.tmp_f.bind('<KeyPress-Left>', self.on_keypress_left)
self.tmp_f.bind('<KeyPress-Right>', self.on_keypress_right)
self.tmp_f.pack(side='bottom')
self.tmp_f.focus_set()
self.btn_landing = tki.Button(
panel, text='Flip', relief='raised', command=self.openFlipWindow)
self.btn_landing.pack(side='bottom', fill='both',
expand='yes', padx=10, pady=5)
self.distance_bar = Scale(panel, from_=0.02, to=5, tickinterval=0.01,
digits=3, label='Distance(m)',
resolution=0.01)
self.distance_bar.set(0.2)
self.distance_bar.pack(side='left')
self.btn_distance = tki.Button(panel, text='Reset Distance', relief='raised',
command=self.updateDistancebar,
)
self.btn_distance.pack(side='left', fill='both',
expand='yes', padx=10, pady=5)
self.degree_bar = Scale(panel, from_=1, to=360, tickinterval=10, label='Degree')
self.degree_bar.set(30)
self.degree_bar.pack(side='right')
self.btn_distance = tki.Button(panel, text='Reset Degree', relief='raised',
command=self.updateDegreebar)
self.btn_distance.pack(side='right', fill='both',
expand='yes', padx=10, pady=5) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def open(self):\n self.state = True\n self.mainwindow.sendMessage('a')\n print(\"opening \" + self.name)",
"def build_initial() :\r\n titleframe = T.Frame(ROOT)\r\n TITLE = T.Label(titleframe, text = \"Welcome to Microgp!\")\r\n var = T.StringVar()\r\n INSTRUCTIONS = T.Message(titleframe, textvariable = var, width = 100)\r\n var.set(\"By Erik and Sam\")\r\n instruct_b = T.Button(titleframe, text = \"Instructions\",\r\n command = get_instructions)\r\n instruct_b.pack(side = T.BOTTOM)\r\n TITLE.pack(side = T.TOP)\r\n INSTRUCTIONS.pack(side = T.BOTTOM)\r\n titleframe.pack()",
"def helpButton():\n nuke.tcl(\"start\", HELP_PAGE)",
"def open_launcher(self):\n vim.command('silent! botright split {0}'.format(self.name))\n self.setup_buffer()",
"def buttonPress(self, argv):\n self.entry.insert(END, argv)",
"def exec_init_cmd(self):\n\n sys.argv = ['-c']\n self.push(self.rc.c)",
"def __init__(self, master, text, command=None):\r\n Button.__init__(self, master=master, text=text, height=1, command=command)",
"def openTB1Settings(self):\n self.TB1_Window = QtWidgets.QDialog()\n self.TB1_ui = Ui_robotOneConfig()\n self.TB1_ui.setupUi(self.TB1_Window)\n self.TB1_Window.show()",
"def run_button(self):\r\n self.step = False # Clear step command\r\n self.is_pause = False\r\n self.run_command()",
"def open(self):\n self._command = \"open\"",
"def initGui(self):\n\n icon_path = ':/plugins/MCDM/icon.png'\n self.add_action(\n icon_path,\n text=self.tr(u'MCDM'),\n callback=self.run,\n parent=self.iface.mainWindow())\n\n # will be set False in run()\n self.first_start = True",
"def start(self):\n self.menu()",
"def display(self):\n\t\tprint('The button in the window was clicked!')",
"def focus_on(window):\n return Cmd(\"{}wincmd w\", window)",
"def start(self):\n print(\"*\"*20)\n print(\"*\" + \" \"*18 + \"*\")\n print(\"*\" + \" \"*4 + \"Connect 4X\" + \" \"*4 + \"*\")\n print(\"*\" + \" \" * 18 + \"*\")\n print(\"*\" * 20)\n print(\"\\nConsole Version 1.0.0\\n\")\n self.print_menu()\n self.get_input()",
"def startSelectedMode(self):\n selectedBtn = self.buttons_group.checkedButton()\n if selectedBtn is None:\n QMessageBox.information(self, 'Attention', 'Aucun mode selectionné.\\n'\n 'Vous devez choisir un mode avant de le lancer.')\n return\n\n user = self.mainwindow.currentUser\n try:\n widget = selectedBtn.constructor(user.get_mode(selectedBtn.id))\n except:\n widget = selectedBtn.constructor(None)\n widget.closeRequested.connect(self.closeWidget)\n\n self.mainwindow.setWindowTitle(\"Consmaster\" +\n ' [' + selectedBtn.text().replace('\\n', '') + ']')\n\n self.mainwindow.central_widget.addWidget(widget)\n self.mainwindow.central_widget.setCurrentWidget(widget)",
"def display_main(self):\n self.clear_terminal()\n self.main_menu()\n self.handle_selection_main()",
"def showGUI(self,**kwargs):\n self.baxter.menu.select(self.modes[0])",
"def startapp(self, command):\n e = self.emu\n e.alt(\"F2\")\n e.shortwait()\n e.clickat(self.screen.center)\n e.shortwait()\n e.type(command + \"\\n\")\n e.longwait()",
"def _doOpenTool(self):\n self._cmdOpenTool()",
"def __init__(self, win):\n Menu.__init__(self, ['START', 'QUIT'], FONT_LA_CASA, win)",
"def main_menu_for_testing():\n print(PROMPT_TEXT)",
"def do_activate(self):\n\n Gtk.Application.do_activate(self)\n self.initiate_plugins()\n self.other[\"menu_button\"].set_menu_model(self.prepare_menu())\n self.output_window.show_all()\n self.window.show_all()",
"def show(self):\n\n self.serial = self.parent.board.serial\n self.deiconify() # Show window\n self.visible = True\n\n self.input_entry.focus()\n\n self.start_repl()",
"def _open_window(self):\r\n\t\t# Creating the window\r\n\t\tself._window = Window(self, Locations.RESTAL)",
"def initGui(self):\n\n icon_path = ':/plugins/TMD/icon.png'\n self.add_action(\n icon_path,\n text=self.tr(u'Weather Today'),\n callback=self.run,\n parent=self.iface.mainWindow())\n\n # will be set False in run()\n self.first_start = True",
"def initGui(self):\n\n icon_path = ':/plugins/save_attributes/icon.png'\n self.add_action(\n icon_path,\n text=self.tr(u''),\n callback=self.run,\n parent=self.iface.mainWindow())\n\n # will be set False in run()\n self.first_start = True",
"def open(self, wait=True):\n self.gripper.actuate(1, 1)",
"def launchHelpWindow(self):\r\n self.popup(\"Help\",HELP,geom=\"350x200\")",
"def start(self):\n self.print_separator()\n self.stdscr.box()\n\n box = curses.newwin(4, self.maxx-8, self.pad, self.pad)\n box.addstr(1,1,\"hello\")\n while self.running:\n # Enter the main program loop\n key = self.stdscr.getkey()\n for fn in [self.stdscr.clear,\n lambda: self.handle_key(key),\n self.update_xy,\n self.print_pattern,\n self.print_separator,\n self.stdscr.box,\n self.generate_menu_items,\n self.print_menu_items,\n self.print_current_selection,\n self.stdscr.refresh]:\n fn()"
] | [
"0.61820495",
"0.61781216",
"0.6119948",
"0.60934174",
"0.60702914",
"0.5959592",
"0.5948371",
"0.5943641",
"0.593772",
"0.5910269",
"0.59019053",
"0.5892788",
"0.58925205",
"0.5891495",
"0.5889927",
"0.58737874",
"0.5858974",
"0.5835644",
"0.5834646",
"0.58337766",
"0.58289456",
"0.5808443",
"0.5792496",
"0.57748127",
"0.5762847",
"0.57525045",
"0.57489944",
"0.57435477",
"0.5724686",
"0.57101214"
] | 0.68487906 | 0 |
Open the flip window and initial all the button and text. | def openFlipWindow(self):
panel = Toplevel(self.root)
panel.wm_title('Gesture Recognition')
self.btn_flipl = tki.Button(
panel, text='Flip Left', relief='raised', command=self.telloFlip_l)
self.btn_flipl.pack(side='bottom', fill='both',
expand='yes', padx=10, pady=5)
self.btn_flipr = tki.Button(
panel, text='Flip Right', relief='raised', command=self.telloFlip_r)
self.btn_flipr.pack(side='bottom', fill='both',
expand='yes', padx=10, pady=5)
self.btn_flipf = tki.Button(
panel, text='Flip Forward', relief='raised', command=self.telloFlip_f)
self.btn_flipf.pack(side='bottom', fill='both',
expand='yes', padx=10, pady=5)
self.btn_flipb = tki.Button(
panel, text='Flip Backward', relief='raised', command=self.telloFlip_b)
self.btn_flipb.pack(side='bottom', fill='both',
expand='yes', padx=10, pady=5) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def switch_state():\n\tDmg.OpenWindow()",
"def show(self):\r\n self.wf.Show()",
"def show(self, window):\r\n\r\n return",
"def finish_render():\n get_window().static_display = True\n get_window().flip_count = 0\n get_window().flip()",
"def cb_main_window(self, event):\n self.main_frame.Show()",
"def show_window(self):\n self.show()",
"def show(self):\n self.Show()",
"def flip(self):",
"def debwt_window(self: object) -> None:\n if self.file:\n debwt_window = Toplevel(self)\n debwt_window.title(\"Reversing Burros-Wheeler Transform\")\n debwt_window.geometry(\"1000x1000\")\n debwt_window.configure(bg='#ebebeb')\n controller = BWDecoder(self.file)\n controller.decode()\n protocol = self.DeBW_output(controller)\n prot= list(protocol)\n\n reconstructed = [\"Step 2: Creating the Burros-Wheeler Matrix\" for n in range(len(prot) - 2)]\n\n names = (step for step in [\"Step 1 : Visualizing the sequence\",\n *reconstructed,\n \"Step 3: The original sequence is the one that has a $ sign as a last column\",\n \"Please refer to the main menu to select another sequence\"])\n\n self.step_by_step(debwt_window, iter(prot), names)\n self.program_output(debwt_window, controller.debwt_output)\n\n else:\n self.no_file_error()",
"def show(self, initial = 0):\n self.Show(1)\n# self.update_title()\n# print 'showing'\n if initial:\n self.initial_show()",
"def flip(self):\n \n if self.faceup:\n self.faceup = False\n else:\n self.faceup = True",
"def open(self):\n self.state = True\n self.mainwindow.sendMessage('a')\n print(\"opening \" + self.name)",
"def window(main):\r\n main.title(\"BinCryptor 1.0\")\r\n main.update_idletasks()\r\n width = main.winfo_width() #Width of the current screen\r\n height = main.winfo_height() #Height of the current screen\r\n x = (main.winfo_screenwidth() // 2) - (width // 2)\r\n y = (main.winfo_screenheight() // 2) - (height // 2)\r\n main.geometry(f'{width}x{height}+{x}+{y}') #Adjusts the height and width\r",
"def open_preferences(self, event):\n editPrefFrame = Single_pref(parent=self, ID=997)\n editPrefFrame.Centre()\n editPrefFrame.Show()\n editPrefFrame.ShowModal()\n editPrefFrame.Destroy()",
"def show(self):\n # * displays the window, after using either the iconify or the withdraw methods\n self.wm_deiconify()\n # * this method can be called after the event which needs to happen before the window event\n self.wait_window()",
"def start(self):\n self.delegate.start_preview(fullscreen=False, window = (350, 10, self.size[0] - 350, self.size[1] - 10))",
"def flip_faceup(self):\r\n self.faceup = True",
"def openPremade(self):\n dialog = PremadeFrame(self)\n if dialog.ShowModal() == wx.ID_OK:\n newClass = dialog.selectedClass\n newFrame = newClass(self)\n self.frames.append(newFrame)\n self.names.append(newFrame.experimentName)\n newFrame.Show()\n newFrame.Maximize()\n self.Show(False)",
"def buttonPress(self):\n if self.inPlay and not self.shown:\n self.configure(image = Tile.images[0])",
"def on_activate(self, caller):\n self.window = GameWindow()\n self.add_window(self.window)",
"def _open_window(self):\r\n\t\t# Creating the window\r\n\t\tself._window = Window(self, Locations.RESTAL)",
"def onClick(self):\n self.app.setActiveMode(\"start\")",
"def do_activate(self):\n\n Gtk.Application.do_activate(self)\n self.initiate_plugins()\n self.other[\"menu_button\"].set_menu_model(self.prepare_menu())\n self.output_window.show_all()\n self.window.show_all()",
"def open_generatorWindow(self):\n self.window = generatorWindow(self)\n self.hide()",
"def setHorizontalFlip(self, flag):\n\t\tself.flipHorizontally = flag",
"def selection_form():\n sg.theme('DarkBlue') \n layout = [\n [\n sg.Button(\"Encrypt Files\"),\n sg.Button(\"View Images\")\n ]\n ]\n \n window = sg.Window('Encrypted Image Viewer', layout)\n while True:\n event, values = window.read()\n if event == \"Exit\" or event == sg.WIN_CLOSED:\n sys.exit()\n if event == \"Encrypt Files\":\n window.close()\n encryption_form()\n \n if event == \"View Images\":\n window.close()\n image_viewer()",
"def setupWindow(self):\n\n\t\tself.main_menu_window = MenuFrame.MainMenuFrame(self.uiCoordinator)\n\t\tself.menu_window = self.main_menu_window._mf\n\t\tself.score_window = self.main_menu_window._hf\n\t\tself.instructions_window = self.main_menu_window._if\n\t\tself.menu_window.playButton.focus_set()",
"def display(self):\n\t\tprint('The button in the window was clicked!')",
"def start():\r\n window = loop_menuDolar()\r\n window.close()",
"def flip(self):\n if self.is_face_up:\n arcade.load_texture(self.back_file)\n self.is_face_up = False\n else:\n arcade.load_texture(self.face_file)\n self.is_face_up = True"
] | [
"0.6244811",
"0.61130655",
"0.60361177",
"0.58716136",
"0.5835317",
"0.5802721",
"0.58015156",
"0.57977974",
"0.57581085",
"0.5736185",
"0.5727664",
"0.56942797",
"0.5663911",
"0.5637016",
"0.5626047",
"0.5598497",
"0.55893236",
"0.5587088",
"0.55717754",
"0.55463487",
"0.5534851",
"0.5523327",
"0.55169094",
"0.55123574",
"0.55033803",
"0.5497679",
"0.5496618",
"0.54909104",
"0.54730654",
"0.54510957"
] | 0.8400445 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.