query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
This function adds ZIPCODE and ZIPCODE_AGI columns to the dataFrame.
|
def merge_zipcode_agi(df_turnstiles):
# define URLs for MTA Station data & IRS Income Info
mta_url = "http://web.mta.info/developers/data/nyct/subway/Stations.csv"
irs_url = "https://www.irs.gov/pub/irs-soi/18zpallagi.csv"
# collect & clean MTA station info
mta_station_info = pd.read_csv(mta_url)
mta_station_info.rename(
columns={
"Stop Name": "STATION",
"GTFS Latitude": "Lat",
"GTFS Longitude": "Lon",
},
inplace=True,
)
# rename some columns where the names were different in the two tables
mta_station_info["STATION"] = mta_station_info["STATION"].replace(
"34 ST-PENN STATION", "34 ST-PENN STA"
)
mta_station_info["STATION"] = mta_station_info["STATION"].replace(
"GRD CNTRL-42 ST", "34 ST-PENN STA"
)
mta_station_info["STATION"] = mta_station_info["STATION"].replace(
"34 ST-HERALD SQ", "34 ST-HERALD"
)
# only read data from google if there is no station_zips.json file stored
try:
station_zips = json.load(open("data/station_zips.json", "r"))
assert station_zips["23 ST"] == "10011"
except:
# you may need to get your own API Key, this API key will not work for you.
# get your own at: https://developers.google.com/maps/documentation/geocoding/start
gmaps = googlemaps.Client(key=geocode_api_key)
# initialize dictionary to store zipcodes in
station_zips = {}
mta_station_names = list(df_turnstiles.STATION.unique())
for station in mta_station_names:
address = station + " Station New York City, NY"
geocode_result = gmaps.geocode(address)
# use try/except to avoid errors when Google can't find the zipcode (just keep going)
try:
# geocode_result is in a complex json format and requires us to access it like this
zipcode = geocode_result[0]["address_components"][6]["long_name"]
if len(zipcode) == 5:
station_zips[station.upper()] = str(zipcode)
except:
continue
# add zipcode to df_turnstiles
df_turnstiles["ZIPCODE"] = df_turnstiles["STATION"].map(station_zips)
# get AGI (adjusted gross income) into df_turnstiles
us_zips_agi = pd.read_csv(irs_url)
us_zips_agi.rename(
columns={"A00100": "adj_gross_inc"}, inplace=True
) # in 18zpallagi.csv, A00100 stands for AGI
us_zips_agi = (
us_zips_agi[["zipcode", "adj_gross_inc"]].groupby("zipcode").agg(sum)
) # group by zipcode and sum AGI
# now keep just the data for NYC zipcodes (not the full US)
nyc_zips = pd.read_csv("data/ny_zips.csv")
nyc_zips.dropna(axis=1, how="all", inplace=True)
nyc_agi_by_zip = nyc_zips.join(us_zips_agi, how="inner", on="zipcode")
# must capitalize col name & change from dtype 'object' to 'str' in order to merge into df_turnstiles
nyc_agi_by_zip.columns = nyc_agi_by_zip.columns.str.upper()
nyc_agi_by_zip["ZIPCODE"] = nyc_agi_by_zip.ZIPCODE.astype(str)
# now merge the ZIPCODE_AGI data into df_turnstiles
zipcode_agis = (
nyc_agi_by_zip[["ZIPCODE", "ADJ_GROSS_INC"]]
.set_index("ZIPCODE")
.to_dict()["ADJ_GROSS_INC"]
)
df_turnstiles["ZIPCODE_AGI"] = df_turnstiles["ZIPCODE"].map(zipcode_agis)
return df_turnstiles
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def add_loc_cols(df):\r\n\r\n\tdf['STATE'] = [int(i[1:3]) for i in df.gisjoin]\r\n\tdf['COUNTY'] = [int(i[4:7]) for i in df.gisjoin]\r\n\tdf['TRACT'] = [int(i[7:-4]) for i in df.gisjoin]\r\n\tdf['BLOCK'] = [int(i[-4:]) for i in df.gisjoin]\r\n\r\n\tif df.STATE[0] > 9:\r\n\t\traise Exception(\"Warning! Code might be incorrect for states with fips code > 9\")\r\n\r\n\treturn df",
"def merge_table(t1, t2):\r\n input1 = pd.merge(t1, t2, on=\"zip_code\", how=\"inner\")\r\n covid_zip = gpd.GeoDataFrame(input1)\r\n\r\n #change column name\r\n covid_zip.columns = [\"zip_code\", \"covid_cases\", \"time\", \"geometry\"]\r\n return covid_zip",
"def add_averages_to_map_dataframe(dataframe, map_dataframe):\n\n # List of boroughs\n boroughs = [\"BROOKLYN\", \"BRONX\", \"QUEENS\", \"MANHATTAN\", \"STATEN ISLAND\"]\n \n # Making a copy of the original map_dataframe\n map_dataframe_copy = map_dataframe.copy()\n \n # Renaming columns\n dataframe.columns = boroughs\n # Adding a new column named 'ZIPCODES' and its value will be the actual zipcodes from each borough\n dataframe['ZIPCODES'] = list(dataframe.index)\n \n # \n # Iterating through all columns (each column is a borough)\n # But a zipcode is only located in one of the 5 boroughs. So, one column will \n # hold a useful value while the other 4 will hold NaN\n # Dataframe looks like this:\n # BROOKLYN BRONX QUEENS MANHATTAN STATENISLAND\n # 11214 400k Nan NaN NaN NaN\n # 10303 NaN NaN NaN NaN 700k\n # We are finding that useful value and ignoring the NaN\n # Creating a list that looks like the following, however the zipcode is implicit:\n # AVERAGE\n # 400k (zipcode is 11214)\n # 700k (zipcode is 10303) \n averages = []\n for index, row in dataframe.iterrows():\n average = 0\n for bor in boroughs:\n if (not(math.isnan(row[bor]))):\n average = row[bor]\n averages.append(average)\n # Adding the values found for each zipcode as its own column in the dataframe\n dataframe['AVERAGES'] = averages\n \n # The ZIPCODE column on the dataframe might not exactly match the ZIPCODE column in the map_dataframe\n # (as they were taken from two different sources)\n # so, we need to match them up\n list_of_zipcodes_old = list(dataframe.index)\n list_of_zipcodes_new = list(map_dataframe_copy['ZIPCODE'])\n new_average_list = combine_lists(list_of_zipcodes_old, list_of_zipcodes_new, averages)\n\n # for zip in list_of_zipcodes_new:\n # if zip in list_of_zipcodes_old:\n # index = list_of_zipcodes_old.index(zip)\n # new_average_list.append(averages[index])\n # else:\n # new_average_list.append(0)\n \n # Dropping columns that are not needed (the boroughs)\n dataframe = dataframe.drop(boroughs, axis=1)\n\n # Adding a new column to the shape dataframe to hold the averages\n map_dataframe_copy['AVERAGES'] = new_average_list\n return map_dataframe_copy",
"def augment_dataframe(self, df: pd.DataFrame) -> pd.DataFrame:",
"def fix_zipcode(df):\n zipcode5 = []\n fixnum = 0\n for zipcode in df['Zip'].values:\n if isinstance(zipcode, str) and '-' in zipcode:\n zipcode5.append(int(zipcode.split('-')[0]))\n fixnum += 1\n else:\n zipcode = int(float(zipcode))\n zipcode5.append(zipcode)\n df['zip'] = zipcode5\n # print('Fixing %.2f %% of the data' % (fixnum * 100 / len(zipcode5)))\n return df",
"def add_ll(project_data):\n assert isinstance(project_data, pd.DataFrame)\n \n search = uszipcode.SearchEngine() #Set up SearchEngine() function from uszipcode\n location_list = list(project_data['Location']) #Get list of each report\n longitude_list = [] #Create list to store longitude\n latitude_list = [] #Create list to store latitude\n zip_list = [] #Create list to store zip code\n\n #Iterate through every location and update longitude, latitude, zip code lists\n for location in location_list:\n lo = (re.findall(r\"[-+]?\\d*\\.\\d+|\\d+\", location))[0] #Extract longitude from Location string\n la = (re.findall(r\"[-+]?\\d*\\.\\d+|\\d+\", location))[1] #Extract latitude from Location string\n zp = search.by_coordinates(float(la), float(lo), returns=1)[0].zipcode #Get zip code for coordinate\n longitude_list.append(lo)\n latitude_list.append(la)\n zip_list.append(zp)\n \n #Add the Longitude, Latitude, Zip Code data in new columns in dataframe\n project_data.insert(len(project_data.columns)-1, \"Longitude\", longitude_list, True)\n project_data.insert(len(project_data.columns)-1, \"Latitude\", latitude_list, True)\n project_data.insert(len(project_data.columns)-1, \"Zip\", zip_list, True)\n \n return project_data",
"def geocode(df, col):\r\n pass",
"def _extract_zipcode(self):\n self.col_etl = self.col_etl.apply(lambda x: x[0:2])\n self.col_etl.name = 'deliv_sector'",
"def add_cols_to_cleaned_df(df):\n\n core_cols = ['time','lat','lon','depth','year','month','week','dayofyear','float_id','cycle']\n template_cols = core_cols + bgc_data_columns\n template_df = pd.DataFrame(columns=template_cols)\n df = template_df.append(df)[template_cols]\n return df",
"def add_city_state_to_dataframe(dataframe):\n dataframe[['city', 'state']] = dataframe.apply(parse_city_state_from_row,\n axis=1)\n dataframe = dataframe[dataframe.state != \"NULL\"]\n return dataframe",
"def append_to_df_zipCity_no_address(\n df_address: pd.DataFrame, name: str, df_zipCity_no_address: pd.DataFrame\n) -> pd.DataFrame:\n zipCity_no_address = df_address.loc[\n (df_address[\"city\"].notnull())\n & (df_address[\"zip\"].notnull())\n & (df_address[\"AddressLine1\"].isnull())\n & (df_address[\"PostBox\"].isnull())\n & (df_address[\"Street\"].isnull())\n ][[\"memberid\"]]\n zipCity_no_address[\"source\"] = name\n zipCity_no_address[\"action\"] = \"not deleted\"\n df_zipCity_no_address = pd.concat(\n [df_zipCity_no_address, zipCity_no_address], ignore_index=True\n )\n return df_zipCity_no_address.drop_duplicates()",
"def _add_location_id(df: pd.DataFrame):\n if CommonFields.LOCATION_ID in df.columns:\n raise ValueError(\"location_id already in DataFrame\")\n df[CommonFields.LOCATION_ID] = df[CommonFields.FIPS].apply(pipeline.fips_to_location_id)",
"def _add_fips_if_missing(df: pd.DataFrame):\n if CommonFields.FIPS not in df.columns:\n df[CommonFields.FIPS] = df[CommonFields.LOCATION_ID].apply(pipeline.location_id_to_fips)",
"def __append_columns(self, new_dataframe):\n self.dataframe = pd.merge(self.dataframe, new_dataframe)",
"def get_countries_geo_df() -> geopandas.GeoDataFrame:\n\n geo_df: geopandas.GeoDataFrame = geopandas.read_file(\n GEO_DATA_DIR / \"ne_110m_admin_0_map_units\" / \"ne_110m_admin_0_map_units.shp\"\n )\n\n geo_df = geo_df.rename(columns={\"ADMIN\": CODE}, errors=\"raise\")\n\n # Keys are what's in the geo df, values are what we want to rename them to\n # Values must match the names in the original data source. If you don't like those\n # names, change them there and then come back and change the values here.\n geo_df[CODE] = (\n geo_df[CODE]\n .map(\n {\n \"Central African Republic\": \"Central African Rep.\",\n \"Democratic Republic of the Congo\": \"Dem. Rep. Congo\",\n \"Equatorial Guinea\": \"Eq. Guinea\",\n \"eSwatini\": \"Eswatini\",\n \"Georgia (Country)\": \"Georgia\",\n \"Republic of Serbia\": \"Serbia\",\n \"United Arab Emirates\": \"UAE\",\n \"United Kingdom\": \"Britain\",\n \"United Republic of Tanzania\": \"Tanzania\",\n \"Western Sahara\": \"W. Sahara\",\n \"United States of America\": \"United States\",\n }\n )\n .fillna(geo_df[CODE])\n )\n geo_df = geo_df[geo_df[CODE] != \"Antarctica\"]\n\n colonial_power_main_countries = {\n \"Britain\": \"England\",\n \"France\": \"France, Metropolitan\",\n \"Norway\": \"Norway\",\n \"Papua New Guinea\": \"Papua New Guinea\",\n }\n\n is_main_country_idx = geo_df[CODE].map(colonial_power_main_countries).isna() | (\n geo_df[\"NAME_SORT\"] == geo_df[CODE].map(colonial_power_main_countries)\n )\n\n geo_df[CODE] = geo_df[CODE].where(\n is_main_country_idx, geo_df[CODE].str.cat(geo_df[\"NAME_SORT\"], sep=\" - \"),\n )\n geo_df[\"name\"] = geo_df[CODE]\n\n geo_df = geo_df[\n [\n \"featurecla\",\n \"scalerank\",\n \"LABELRANK\",\n # \"SOVEREIGNT\",\n # \"SOV_A3\",\n # \"ADM0_DIF\",\n \"LEVEL\",\n # \"TYPE\",\n CODE,\n \"name\",\n # \"ADM0_A3\",\n # \"GEOU_DIF\",\n # \"GEOUNIT\",\n # \"GU_A3\",\n # \"SU_DIF\",\n # \"SUBUNIT\",\n # \"SU_A3\",\n # \"BRK_DIFF\",\n # \"NAME\",\n # \"NAME_LONG\",\n # \"BRK_A3\",\n # \"BRK_NAME\",\n # \"BRK_GROUP\",\n \"ABBREV\",\n # \"POSTAL\",\n # \"FORMAL_EN\",\n # \"FORMAL_FR\",\n # \"NAME_CIAWF\",\n # \"NOTE_ADM0\",\n # \"NOTE_BRK\",\n \"NAME_SORT\",\n # \"NAME_ALT\",\n # \"MAPCOLOR7\",\n # \"MAPCOLOR8\",\n # \"MAPCOLOR9\",\n # \"MAPCOLOR13\",\n # \"POP_EST\",\n # \"POP_RANK\",\n # \"GDP_MD_EST\",\n # \"POP_YEAR\",\n # \"LASTCENSUS\",\n # \"GDP_YEAR\",\n \"ECONOMY\",\n \"INCOME_GRP\",\n # \"WIKIPEDIA\",\n # \"FIPS_10_\",\n # \"ISO_A2\",\n # \"ISO_A3\",\n # \"ISO_A3_EH\",\n # \"ISO_N3\",\n # \"UN_A3\",\n # \"WB_A2\",\n # \"WB_A3\",\n # \"WOE_ID\",\n # \"WOE_ID_EH\",\n # \"WOE_NOTE\",\n # \"ADM0_A3_IS\",\n # \"ADM0_A3_US\",\n # \"ADM0_A3_UN\",\n # \"ADM0_A3_WB\",\n \"CONTINENT\",\n \"REGION_UN\",\n \"SUBREGION\",\n \"REGION_WB\",\n # \"NAME_LEN\",\n # \"LONG_LEN\",\n # \"ABBREV_LEN\",\n # \"TINY\",\n # \"HOMEPART\",\n # \"MIN_ZOOM\",\n # \"MIN_LABEL\",\n # \"MAX_LABEL\",\n # \"NE_ID\",\n # \"WIKIDATAID\",\n # \"NAME_AR\",\n # \"NAME_BN\",\n # \"NAME_DE\",\n # \"NAME_EN\",\n # \"NAME_ES\",\n # \"NAME_FR\",\n # \"NAME_EL\",\n # \"NAME_HI\",\n # \"NAME_HU\",\n # \"NAME_ID\",\n # \"NAME_IT\",\n # \"NAME_JA\",\n # \"NAME_KO\",\n # \"NAME_NL\",\n # \"NAME_PL\",\n # \"NAME_PT\",\n # \"NAME_RU\",\n # \"NAME_SV\",\n # \"NAME_TR\",\n # \"NAME_VI\",\n # \"NAME_ZH\",\n \"geometry\",\n ]\n ]\n\n return geo_df",
"def finalize_dataframe(self, dataframe: DataFrame):\n # Drop duplicates (some geospatial datasets, like ZCTAs, include redundant rows)\n geo_names = {'geometry'}\n non_geo_names = set(dataframe.columns) - geo_names\n dataframe = dataframe.drop_duplicates(subset=non_geo_names, ignore_index=True)\n\n # Insert NAs for annotated row values to avoid outlier values like -999,999,999\n dataframe.loc[dataframe['annotation'].notnull(), 'value'] = ''\n dataframe['value'] = pd.to_numeric(dataframe['value'], errors='coerce')\n\n # Create year date column\n dataframe['date'] = pd.to_datetime(\n dataframe['year'].astype('string') + '-12-31', format='%Y-%m-%d'\n )\n\n # Rename and reorder columns\n names_csv = resource_string(__name__, 'resources/names.csv')\n csv_reader = reader(StringIO(names_csv.decode('utf-8')))\n next(csv_reader) # Skip header row\n names = dict(csv_reader) # type: ignore\n if self.geometry in ['points', 'polygons'] and (set(dataframe.columns) & geo_names):\n name_order = [*names.values(), *geo_names]\n else:\n name_order = list(names.values())\n dataframe = dataframe.rename(columns=names)[name_order]\n\n return dataframe",
"def add_facility_id_unit_id_epa(df):\n if \"facility_id\" not in df.columns:\n df[\"facility_id\"] = np.NaN\n if \"unit_id_epa\" not in df.columns:\n df[\"unit_id_epa\"] = np.NaN\n return df",
"def append_to_df_zip_no_city(\n df_address: pd.DataFrame, name: str, df_zip_no_city: pd.DataFrame\n) -> pd.DataFrame:\n zip_no_city = df_address.loc[\n (df_address[\"city\"].isnull()) & df_address[\"zip\"].notnull()\n ][[\"memberid\"]]\n zip_no_city[\"source\"] = name\n zip_no_city[\"action\"] = \"not deleted\"\n df_city_no_zip = pd.concat([df_zip_no_city, zip_no_city], ignore_index=True)\n return df_city_no_zip.drop_duplicates()",
"def add_climatology_cols(df):\n return df",
"def append_to_df_city_no_zip(\n df_address: pd.DataFrame, name: str, df_city_no_zip: pd.DataFrame\n) -> pd.DataFrame:\n city_no_zip = df_address.loc[\n (df_address[\"zip\"].isnull()) & df_address[\"city\"].notnull()\n ][[\"memberid\"]]\n city_no_zip[\"source\"] = name\n city_no_zip[\"action\"] = \"not deleted\"\n df_city_no_zip = pd.concat([df_city_no_zip, city_no_zip], ignore_index=True)\n return df_city_no_zip.drop_duplicates()",
"def addRDGfaresinfo(df,lookupdf,postfix):\n #copy of data frame made to avoid SettingWithCopyWarning by making the copy explicit\n df_dt = df.copy()\n\n #datatyping of RDG info to match later match with superfile\n df_dt.loc[:,'Origin Code'] = df.loc[:,'Origin Code'].astype(str)\n df_dt.loc[:,'Destination Code'] = df.loc[:,'Destination Code'].astype(str)\n df_dt.loc[:,'Route Code'] = df.loc[:,'Route Code'].astype(str)\n df_dt.loc[:,'Product Code'] = df.loc[:,'Product Code'].astype(str)\n\n #merge of the RDG info with superfile\n df_dt = pd.merge(left=df_dt,right=lookupdf[['ORIGIN_CODE','DESTINATION_CODE','ROUTE_CODE','Lennon product code (CTOT)','FARE']],\n how='left',\n left_on=['Origin Code','Destination Code','Route Code','Product Code'],\n right_on=['ORIGIN_CODE','DESTINATION_CODE','ROUTE_CODE','Lennon product code (CTOT)'])\n\n #drop unnecessary columns from the merge process\n df_dt.drop(['ORIGIN_CODE','DESTINATION_CODE','ROUTE_CODE','Lennon product code (CTOT)'],axis=1,inplace=True)\n\n #rename columns as needed\n df_dt.rename(columns={'FARE':'RDG_FARES'+postfix,'Fares ticket type description':'RDG_Fares ticket type description'+postfix},inplace=True)\n\n return df_dt",
"def _add_column(df_main, serie, name):\n df = serie.to_frame(name=name)\n df_main_new = df_main.merge(df, left_on='customerId2', right_index='customerId2', how='left')\n return df_main_new",
"def zip_code(self, zip_code):\n\n self._zip_code = zip_code",
"def join_country_code_data(daily_data, country_code_data):\n #new columns: country, country_code, geometry\n return country_code_data.merge(daily_data, left_on = 'country', right_on = 'Country/Region').drop(['country'], axis=1)",
"def add_region_feature(data):\n\n data.loc[:, 'region'] = data.loc[:, 'district'].apply(\n lambda x: mapping.SOFIA_NEIGHBOURHOOD_TO_REGION_MAPPING[x]\n )\n\n return data",
"def zipcode(self, zipcode):\n self._zipcode = zipcode",
"def country_code_update(df):\n from pycountry import countries as ct\n new_df = country_grouping(df)\n # country names in the data set that are not fit ISO standard\n completion = pd.DataFrame(np.array([['Bolivia', 'BO'],\n ['Brunei', 'BN'],\n ['Congo (Brazzaville)', 'CG'],\n ['Congo (Kinshasa)', 'CD'],\n ['Cote d\\'Ivoire', 'CI'],\n ['Holy See', 'VA'],\n ['Iran', 'IR'],\n ['Korea, South', 'KR'],\n ['Moldova', 'MD'],\n ['Russia', 'RU'],\n ['Taiwan*', 'TW'],\n ['Tanzania', 'TZ'],\n ['US', 'US'],\n ['Venezuela', 'VE'],\n ['Vietnam', 'VN'],\n ['Syria', 'SY'],\n ['Laos', 'LA'],\n ['West Bank and Gaza', 'PS'],\n ['Kosovo', 'XK'],\n ['Burma', 'MM']\n ]),\n columns=['c_name', 'c_code']\n )\n country_code_list = []\n for country_name in new_df['Country/Region']:\n try:\n if country_name in completion['c_name'].tolist():\n # print('exception covered: ', country_name)\n country_code = completion['c_code'].loc[completion['c_name'] == country_name].item()\n # identifies the cruise ships in the data set considered as a 'country'\n elif country_name == 'Diamond Princess' or country_name == 'MS Zaandam':\n country_code = 'Cruise Ship'\n else:\n country_code = ct.get(name=country_name).alpha_2\n except KeyError:\n print('no result: ', country_name)\n country_code = 'None'\n pass\n country_code_list.append(country_code)\n # print(country_code_list)\n new_df.insert(0, \"country_code\", country_code_list, True)\n new_df = new_df.drop(columns='Country/Region')\n unknown_index = new_df[new_df['country_code'] == 'Cruise Ship'].index\n new_df.drop(unknown_index, inplace=True) # drop when country_code = 'None', most likely are Cruise ships\n # new_df.set_index(new_df['country_code'])\n return new_df",
"def pandas_address_view(base_directory, filter_to_locality=None):\n\n # Define the paths required\n street_locality_file = os.path.join(\n base_directory, 'Standard', 'SA_STREET_LOCALITY_psv.psv')\n address_detail_file = os.path.join(\n base_directory, 'Standard', 'SA_ADDRESS_DETAIL_psv.psv')\n address_default_geocode_file = os.path.join(\n base_directory, 'Standard', 'SA_ADDRESS_DEFAULT_GEOCODE_psv.psv')\n\n # Load the data\n #\n # Only keep these columns as things like the creation date aren't needed.\n street_locality_columns = [\n \"STREET_LOCALITY_PID\", \"STREET_CLASS_CODE\", \"STREET_NAME\",\n 'STREET_TYPE_CODE', 'STREET_SUFFIX_CODE',\n ]\n\n address_detail_columns_to_ignore = {\n 'DATE_CREATED', 'DATE_LAST_MODIFIED', 'DATE_RETIRED', 'GNAF_PROPERTY_PID',\n }\n\n geocode_columns = [\n 'ADDRESS_DETAIL_PID', 'LONGITUDE', 'LATITUDE',\n # GEOCODE_TYPE_CODE helps identifier where it refers to.\n ]\n\n def should_keep_address_detail_column(column):\n return column not in address_detail_columns_to_ignore\n\n street_locality = pandas.read_csv(street_locality_file,\n sep='|',\n usecols=street_locality_columns)\n address_detail = pandas.read_csv(address_detail_file,\n sep='|',\n dtype={\n 'BUILDING_NAME': str,\n 'NUMBER_FIRST': str,\n 'NUMBER_FIRST_SUFFIX': str,\n },\n keep_default_na=False,\n usecols=should_keep_address_detail_column)\n address_geocode = pandas.read_csv(address_default_geocode_file,\n sep='|',\n usecols=geocode_columns)\n\n if filter_to_locality:\n # Filter address detail down to a specific locality\n address_detail = address_detail.loc[\n address_detail['LOCALITY_PID'] == filter_to_locality]\n\n merged = address_detail.join(\n street_locality.set_index('STREET_LOCALITY_PID'),\n on='STREET_LOCALITY_PID',\n lsuffix='_address', rsuffix='_street')\n\n merged = merged.join(\n address_geocode.set_index('ADDRESS_DETAIL_PID'),\n on='ADDRESS_DETAIL_PID',\n rsuffix='_geocode')\n\n return merged",
"def join_columns(self, other: \"MultiRegionTimeseriesDataset\") -> \"MultiRegionTimeseriesDataset\":\n if not other.latest_data.empty:\n raise NotImplementedError(\"No support for joining other with latest_data\")\n other_df = other.data_with_fips.set_index([CommonFields.LOCATION_ID, CommonFields.DATE])\n self_df = self.data_with_fips.set_index([CommonFields.LOCATION_ID, CommonFields.DATE])\n other_geo_columns = set(other_df.columns) & set(GEO_DATA_COLUMNS)\n other_ts_columns = (\n set(other_df.columns) - set(GEO_DATA_COLUMNS) - set(TimeseriesDataset.INDEX_FIELDS)\n )\n common_ts_columns = other_ts_columns & set(self.data_with_fips.columns)\n if common_ts_columns:\n # columns to be joined need to be disjoint\n raise ValueError(f\"Columns are in both dataset: {common_ts_columns}\")\n common_geo_columns = list(set(self.data_with_fips.columns) & other_geo_columns)\n # TODO(tom): fix geo columns check, no later than when self.data is changed to contain only\n # timeseries\n # self_common_geo_columns = self_df.loc[:, common_geo_columns].fillna(\"\")\n # other_common_geo_columns = other_df.loc[:, common_geo_columns].fillna(\"\")\n # try:\n # if (self_common_geo_columns != other_common_geo_columns).any(axis=None):\n # unequal_rows = (self_common_geo_columns != other_common_geo_columns).any(axis=1)\n # _log.info(\n # \"Geo data unexpectedly varies\",\n # self_rows=self_df.loc[unequal_rows, common_geo_columns],\n # other_rows=other_df.loc[unequal_rows, common_geo_columns],\n # )\n # raise ValueError(\"Geo data unexpectedly varies\")\n # except Exception:\n # _log.exception(f\"Comparing df {self_common_geo_columns} to {other_common_geo_columns}\")\n # raise\n combined_df = pd.concat([self_df, other_df[list(other_ts_columns)]], axis=1)\n return MultiRegionTimeseriesDataset.from_timeseries_df(\n combined_df.reset_index()\n ).append_latest_df(self.latest_data_with_fips.reset_index())",
"def add_extra_column(self, prof_gas, retrieval_date, mod_data, **kwargs):\n pass"
] |
[
"0.6326425",
"0.58691585",
"0.58307797",
"0.5718286",
"0.56968427",
"0.5463691",
"0.5426602",
"0.5325125",
"0.5292596",
"0.518979",
"0.5161211",
"0.51520586",
"0.51448625",
"0.51438683",
"0.5116092",
"0.5104685",
"0.50368077",
"0.5005452",
"0.49896863",
"0.49113354",
"0.48995107",
"0.48560637",
"0.48376036",
"0.4829966",
"0.48256126",
"0.48068666",
"0.4791397",
"0.4779645",
"0.4758644",
"0.47513688"
] |
0.59902793
|
1
|
Clean entries and exits column. Returns a dataFrame grouped by individual turnstile and AM/PM. Entries & exit columns converted from cumulative > change from previous value
|
def fixup_entries_exits(df_turnstiles):
# group data by AMPM, taking the maximum entries/exits for each date
ampm_station_group = df_turnstiles.groupby(
["C/A", "UNIT", "SCP", "STATION", "DATE", "AMPM", "DAY_NAME",],
as_index=False,
)
df_ampm = ampm_station_group.ENTRIES.max()
ampm_station_exits = ampm_station_group.EXITS.max()
df_ampm["EXITS"] = ampm_station_exits["EXITS"]
# create prev_date and prev_entries cols by shifting these columns forward one day
# if shifting date and entries, don't group by date
df_ampm[["PREV_DATE", "PREV_ENTRIES", "PREV_EXITS"]] = df_ampm.groupby(
["C/A", "UNIT", "SCP", "STATION"]
)[["DATE", "ENTRIES", "EXITS"]].apply(lambda grp: grp.shift(1))
# Drop the rows for the earliest date in the df, which are now NaNs for prev_date and prev_entries cols
df_ampm.dropna(subset=["PREV_DATE"], axis=0, inplace=True)
df_ampm.head(3)
def add_counts(row, max_counter, column_name):
"""
Counts the variance between entries/exits and prev timeframe. entries/exits
Max counter is the maximum difference between entries & prev. entries that
we will allow.
"""
counter = row[column_name] - row[f"PREV_{column_name}"]
if counter < 0:
# Maybe counter is reversed?
counter = -counter
if counter > max_counter:
# Maybe counter was reset to 0?
# take the lower value as the counter for this row
counter = min(row[column_name], row[f"PREV_{column_name}"])
if counter > max_counter:
# Check it again to make sure we're not still giving a counter that's too big
return 0
return counter
# we will use a 200k counter - anything more seems incorrect.
df_ampm["TMP_ENTRIES"] = df_ampm.apply(
add_counts, axis=1, max_counter=200000, column_name="ENTRIES"
)
# we will use a 200k counter - anything more seems incorrect.
df_ampm["TMP_EXITS"] = df_ampm.apply(
add_counts, axis=1, max_counter=200000, column_name="EXITS"
)
df_ampm["TRAFFIC"] = df_ampm.TMP_ENTRIES + df_ampm.TMP_EXITS
# add zipcode to df_ampm
station_zips = json.load(open("data/station_zips.json", "r"))
df_ampm["ZIPCODE"] = df_ampm["STATION"].map(station_zips)
station_agis = dict(zip(df_turnstiles["STATION"], df_turnstiles["ZIPCODE_AGI"]))
df_ampm["ZIPCODE_AGI"] = df_ampm["STATION"].map(station_agis)
return df_ampm
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def clean_data(df_turnstiles):\n\n # sort values in a such a way that the duplicate values sit directly below the originals, so they will be removed.\n df_turnstiles.sort_values(\n [\"C/A\", \"UNIT\", \"SCP\", \"STATION\", \"DATE_TIME\"],\n inplace=True,\n ascending=False,\n )\n # keeps top row, deletes others\n df_turnstiles.drop_duplicates(\n subset=[\"C/A\", \"UNIT\", \"SCP\", \"STATION\", \"DATE_TIME\"], inplace=True\n )\n\n # remove DESC column\n df_turnstiles = df_turnstiles.drop([\"DESC\"], axis=1, errors=\"ignore\")\n\n # remove the many spaces in the EXITS column name\n df_turnstiles.rename(\n columns={\n \"EXITS \": \"EXITS\"\n },\n inplace=True,\n )\n\n return df_turnstiles",
"def main():\n df_turnstiles = get_mta_data2()\n df_turnstiles = clean_data(df_turnstiles)\n df_turnstiles = add_dt_cols(df_turnstiles)\n df_turnstiles = merge_zipcode_agi(df_turnstiles)\n\n df_ampm = fixup_entries_exits(df_turnstiles)\n return df_turnstiles, df_ampm",
"def clean_time(df):\n df.rename({\"Entry number\": \"entry\", \"Time\": \"time\"},\n axis='columns', inplace=True)\n\n df.time = pd.to_timedelta(df.time, unit='s')\n df.time = df.time - df.time[0]\n df.time = df.time.dt.total_seconds()\n\n df.rename({\"time\": \"time (s)\"},\n axis='columns', inplace=True)\n df = df.groupby(\"time (s)\").sum()\n\n return df",
"def clean(df):\n df['RH'] = df['stand'].apply(lambda x: 1 if x == 'R' else 0)\n\n df['hit'] = df['events'].apply(lambda x: 1 if x in\n ('single', 'double', 'triple'\n 'home_run') else 0)\n\n return df",
"def get_cut_day_dataframe(start, end):\n entries = get_dataframe(start, end)\n data = entries.copy()\n data['datetime'] = data['to'].map(ts2date)\n\n point = arrow.Arrow.range('day', ts2datetime(start), ts2datetime(end))\n point = [x.timestamp for x in point]\n ind = 0\n newRow = 0\n for index, row in entries.iterrows():\n if row['to'] == point[ind]:\n ind += 1\n elif row['from'] < point[ind] < row['to']:\n if index == 0:\n data.loc[0, 'from'] = start\n data.loc[0, 'delta'] = data.loc[0, 'to'] - data.loc[0, 'from']\n elif index == len(entries)-1:\n data.loc[data.index[-1:], 'to'] = end\n data.loc[data.index[-1:], 'delta'] = data.loc[data.index[-1:], 'to'] - data.loc[data.index[-1:], 'from']\n data.loc[data.index[-1:], 'datetime'] = ts2date(data.loc[data.index[-1:], 'from'])\n break\n else:\n row = entries.iloc[[index]].copy()\n row.loc[index, 'delta'] = abs(point[ind]-row.loc[index, 'from'])\n row.loc[index, 'to'] = point[ind]\n row.loc[index, 'datetime'] = ts2date(row.loc[index, 'from'])\n newInd = index + newRow\n data.loc[newInd, 'delta'] = abs(point[ind]-data.loc[newInd, 'to'])\n data.loc[newInd, 'from'] = point[ind]\n data = pd.concat([data[:newInd], row, data[newInd:]])\n data = data.reset_index(drop=True)\n newRow += 1\n ind += 1\n if ind >= len(point):\n break\n data['datetime'] = data['from'].map(ts2datetime)\n return data",
"def clean_ferc714_hourly_demand_matrix(df: pd.DataFrame) -> pd.DataFrame:\n ts = pudl.analysis.timeseries_cleaning.Timeseries(df)\n ts.flag_ruggles()\n return ts.to_dataframe(copy=False)",
"def clean_and_enhance_dataframe(grouped, due_date_cutoff, euctr_url):\n grouped.replace('nan', np.nan, inplace=True)\n grouped['full_title'] = grouped.full_title.str.replace(r'\\r','')\n grouped['full_title'] = grouped.full_title.str.replace(r'\\n','')\n\n grouped.rename(columns={'eudract_number':'trial_id'}, inplace=True)\n grouped['min_end_date'] = pd.to_datetime(grouped['min_end_date'])\n grouped['max_end_date'] = pd.to_datetime(grouped['max_end_date'])\n grouped['has_results'] = (grouped.has_results == grouped.number_of_countries).astype(int)\n grouped['includes_pip'] = (grouped.includes_pip > 0).astype(int)\n grouped['exempt'] = ((grouped.includes_pip == 0) & (grouped.phase_1 == grouped.number_of_countries)).astype(int)\n\n sb_cond = [\n (grouped.single_blind == grouped.number_of_countries),\n (grouped.not_single_blind == grouped.number_of_countries)] \n sb_vals = [1,0]\n grouped['single_blind'] = np.select(sb_cond,sb_vals, default = 2)\n\n rd_cond = [\n (grouped.rare_disease == grouped.number_of_countries),\n (grouped.not_rare_disease == grouped.number_of_countries),\n (grouped.rare_disease_blank == grouped.number_of_countries)]\n rd_vals = [1,0,3]\n grouped['rare_disease'] = np.select(rd_cond,rd_vals, default = 2)\n\n ph_cond = [\n (grouped.phase_1 == grouped.number_of_countries),\n (grouped.phase_2 == grouped.number_of_countries),\n (grouped.phase_3 == grouped.number_of_countries),\n (grouped.phase_4 == grouped.number_of_countries)]\n ph_vals = [1,2,3,4]\n grouped['phase'] = np.select(ph_cond,ph_vals, default = 0)\n\n be_cond = [\n (grouped.bioequivalence == grouped.number_of_countries),\n (grouped.not_bioequivalence == grouped.number_of_countries)]\n be_vals = [1,0]\n grouped['bioequivalence_study'] = np.select(be_cond,be_vals, default = 2)\n\n hv_cond = [\n (grouped.healthy_volunteers == grouped.number_of_countries),\n (grouped.not_healthy_volunteers == grouped.number_of_countries)]\n hv_vals = [1,0]\n grouped['health_volunteers'] = np.select(hv_cond,hv_vals, default = 2)\n\n ts_cond = [\n (grouped.ongoing == grouped.number_of_countries),\n ((grouped.completed) + (grouped.terminated) == grouped.number_of_countries),\n (((grouped.completed) + (grouped.terminated)) > 0) & (((grouped.completed) + (grouped.terminated)) < grouped.number_of_countries),\n (grouped.no_status == grouped.number_of_countries)]\n ts_vals = [0,1,2,4]\n grouped['trial_status'] = np.select(ts_cond,ts_vals, default = 3)\n\n grouped['any_terminated'] = (grouped.terminated > 0).astype(int)\n grouped['all_terminated'] = (grouped.terminated == grouped.number_of_countries).astype(int)\n grouped['results_expected'] = (((grouped.completed) + (grouped.terminated) == grouped.number_of_countries) & \n (grouped.comp_date > 0) &\n (grouped.max_end_date < due_date_cutoff) &\n ~((grouped.includes_pip == 0) & (grouped.phase_1 == grouped.number_of_countries))).astype(int)\n grouped['all_completed_no_comp_date'] = (((grouped.completed) + (grouped.terminated) == grouped.number_of_countries) &\n (grouped.comp_date == 0)).astype(int)\n title_cond = [\n ((pd.isnull(grouped.full_title)) & (pd.notnull(grouped.abbreviated_title))),\n ((pd.isnull(grouped.full_title)) & (pd.isnull(grouped.abbreviated_title))),\n ((pd.notnull(grouped.full_title)) & (grouped.full_title.str.len() > 200))]\n title_vals = [grouped.abbreviated_title, 'No Title', grouped.full_title.str.slice(stop=200) + '...']\n grouped['trial_title'] = np.select(title_cond, title_vals, grouped.full_title)\n\n grouped['trial_url'] = euctr_url + grouped.trial_id\n grouped['comp_date_while_ongoing'] = ((grouped.comp_date > 0) & \n (((grouped.completed) + (grouped.terminated)) > 0) & \n (((grouped.completed) + (grouped.terminated)) < grouped.number_of_countries)).astype(int)\n grouped['contains_non_eu'] = (grouped.non_eu > 0).astype(int)\n grouped['only_non_eu'] = (grouped.non_eu == grouped.number_of_countries).astype(int)",
"def get_hourly_entries(df):\n df['ENTRIESn_hourly'] = (df['ENTRIESn'] - df['ENTRIESn'].shift(1)).fillna(1)\n return df",
"def get_cut_level_dataframe(start, end, level):\n entries = get_cut_dataframe(start, end)\n data = entries.copy()\n break_point = break_level(start, end, level)\n if len(break_point) != 0:\n data['datetime'] = data['to'].map(ts2date)\n\n ind = 0\n new_row = 0\n for index, row in entries.iterrows():\n if row['to'] == break_point[ind]:\n ind += 1\n elif row['from'] < break_point[ind] < row['to']:\n row = entries.iloc[[index]].copy()\n row.loc[index, 'delta'] = abs(break_point[ind]-row.loc[index, 'from'])\n row.loc[index, 'to'] = break_point[ind]\n row.loc[index, 'datetime'] = ts2date(row.loc[index, 'from'])\n new_ind = index + new_row\n data.loc[new_ind, 'delta'] = abs(break_point[ind]-data.loc[new_ind, 'to'])\n data.loc[new_ind, 'from'] = break_point[ind]\n data = pd.concat([data[:new_ind], row, data[new_ind:]])\n data = data.reset_index(drop=True)\n new_row += 1\n ind += 1\n if ind >= len(break_point):\n break\n\n data['datetime'] = data['from'].map(ts2datetime)\n data['date_agg'] = data['from'].map(lambda x: ts2str_level(x, level))\n return data",
"def time_of_visit_bins(df):\n mask_8_5 = (df[\"admit_hour_code\"] >= 8) & (df[\"admit_hour_code\"] < 17)\n mask_5_11 = (df[\"admit_hour_code\"] >= 17) & (df[\"admit_hour_code\"] < 23)\n mask_11_8 = (df[\"admit_hour_code\"] >= 23) | (df[\"admit_hour_code\"] < 8)\n\n df[\"hour_category\"] = np.where(mask_8_5, \"Open Hours\", \"Unknown\")\n df[\"hour_category\"] = np.where(mask_5_11, \"Evening\", df[\"hour_category\"])\n df[\"hour_category\"] = np.where(mask_11_8, \"Overnight\", df[\"hour_category\"])\n\n return df",
"def _preprocess(self):\n\n self.df = self.df[(self.df['days_b_screening_arrest'] <= 30)\n & (self.df['days_b_screening_arrest'] >= -30)\n & (self.df['is_recid'] != -1)\n & (self.df['c_charge_degree'] != 'O')\n & (self.df['score_text'] != 'N/A')]\n\n self.df['c_jail_out'] = pd.to_datetime(self.df['c_jail_out'])\n self.df['c_jail_in'] = pd.to_datetime(self.df['c_jail_in'])\n self.df['length_of_stay'] = (self.df['c_jail_out']\n - self.df['c_jail_in'])\n\n self.df['score_factor'] = np.where(self.df['score_text']\n != 'Low',\n 'HighScore', 'LowScore')\n self.df['y_pred'] = (self.df['score_factor'] == 'HighScore')",
"def clean(df):",
"def clean_weather_csv(df: pd.DataFrame) -> pd.DataFrame:\n if 'Min_VisibilitykM' in df.columns:\n df.rename(columns={'Min_VisibilitykM': 'Min_VisibilityKm'},\n inplace=True)\n if 'Min_DewpointC' in df.columns:\n df.rename(columns={'Min_DewpointC': 'MinDew_pointC'}, inplace=True)\n\n cols = map(convert_to_snake_case, df.columns)\n df.columns = cols\n\n for col in ['max_visibility_km', 'min_visibility_km', 'mean_visibility_km',\n 'max_gust_speed_km_h', 'cloud_cover']:\n df[col] = df[col].fillna(df[col].mean())\n\n df['events'] = df.events.fillna('No Events')\n return df",
"def _dataframe_preprocess(self):\n # 1. add baisc feature like date, time in day, ....\n if self.data_type != 'porto':\n self.df['TIMESTAMP'] = self.df.apply(lambda df: df['TIMESTAMPS'][0], axis=1)\n self.df['TIME'] = pd.to_datetime(self.df['TIMESTAMP'], unit='s', utc=True)\n \n self.df.TIME = self.df.TIME.dt.tz_convert(self.timezone)\n # 2. group df for specific driver analysis\n self.grouped_df = self.df.groupby('LABEL')\n if self.count_od_info:\n if 'SD' not in self.df.columns:\n self._add_OD_info()\n self.grouped_od = self.df.groupby('SD')",
"def filter_for_trade_time(df: pd.DataFrame, group_var: str = \"symbol\", time_var: str = \"timestamp\") -> pd.DataFrame:\n # if we only observe cash in the balances, that means the game has only just kicked off or they haven't ordered.\n if set(df[group_var].unique()) == {'Cash'}:\n min_time = df[time_var].min()\n max_time = df[time_var].max()\n trade_days_df = get_trading_calendar(min_time, max_time)\n if trade_days_df.empty:\n return df\n\n # this bit of logic checks whether any trading hours have happened, if if the user hasn't ordered\n trade_days_df = trade_days_df[\n (trade_days_df[\"market_close\"] >= min_time) & (trade_days_df[\"market_open\"] <= max_time)]\n if trade_days_df.empty:\n return df\n\n days = df[time_var].dt.normalize().unique()\n schedule_df = get_trading_calendar(min(days).date(), max(days).date())\n schedule_df['start'] = schedule_df['market_open'].apply(datetime_to_posix)\n schedule_df['end'] = schedule_df['market_close'].apply(datetime_to_posix)\n df['timestamp_utc'] = df['timestamp'].dt.tz_convert(\"UTC\")\n df['timestamp_epoch'] = df['timestamp_utc'].astype('int64') // 1e9\n df[\"mask\"] = False\n for start, end in zip(schedule_df['start'], schedule_df['end']):\n df[\"mask\"] = df[\"mask\"] | mask_time_creator(df, start, end)\n df = df[df[\"mask\"]]\n return df.drop([\"timestamp_utc\", \"timestamp_epoch\", \"mask\"], axis=1)",
"def clean_trace(trace):\n\n df = pd.read_csv(trace, delimiter=' ', header=None)\n \n # Meaningful column names\n df.columns = ['Timestamp', 'Direction', 'dummy1', 'Circuit', \n 'dummy2', 'Stream', 'dummy3', 'Command',\n 'dummy4', 'Length']\n\n # Drop unnecessary columns\n df.drop(['dummy1', 'dummy2', 'dummy3', 'dummy4'], axis=1, inplace=True)\n\n # Dump trash commas and fix data types\n df['Circuit'] = df['Circuit'].apply(lambda x: x.rstrip(',')).astype('int')\n df['Stream'] = df['Stream'].apply(lambda x: x.rstrip(',')).astype('int')\n df['Command'] = df['Command'].apply(lambda x: x.rstrip(','))\n \n # Make an elapsed time in seconds column\n df['Elapsed Time'] = df['Timestamp'] - df['Timestamp'].iloc[0]\n return df",
"def fix_annotation(csv_data, time_offset = 0):\n # step 1: eliminate rows with same starttime and endtime\n csv_data = csv_data[csv_data.STARTTIME != csv_data.ENDTIME]\n\n # step 2: elminate nan in starttime and endtime\n csv_data = csv_data.dropna(axis=0,subset=[st_col,et_col])\n\n # step 3: fill \"blank\" cells\n csv_data = csv_data.reset_index(drop=True)\n csv_data[puff_col] = csv_data[puff_col].fillna(value='no-puff')\n csv_data[activity_col] = csv_data[activity_col].fillna(value='no-activity')\n csv_data[post_col] = csv_data[post_col].fillna(method='backfill')\n csv_data[post_col] = csv_data[post_col].fillna(method='ffill')\n csv_data[smoke_col] = csv_data[smoke_col].fillna(value='not-smoking')\n \n # step 4: fill 'no-activity' cells whose length is less than 3s with backfill\n csv_data = csv_data.reset_index(drop=True)\n filt = csv_data.apply(lambda x: x[et_col] - x[st_col] <= timedelta(seconds=2) and x[activity_col] == 'no-activity', axis=1)\n csv_data.ix[csv_data[filt].index, activity_col] = csv_data.ix[csv_data[filt].index+1, activity_col].values\n csv_data[activity_col] = csv_data[activity_col].fillna(value='no-activity')\n # step 5: change isolated single \"smoking\" cells into proper label\n bshift_smoke = csv_data[smoke_col].shift(1).fillna(method='backfill')\n fshift_smoke = csv_data[smoke_col].shift(-1).fillna(method='ffill')\n filt = np.logical_and(csv_data[smoke_col] != bshift_smoke, csv_data[smoke_col] != fshift_smoke)\n # print csv_data[filt]\n # ind = csv_data[filt].index\n filt1 = np.logical_and(filt, csv_data[smoke_col] == 'smoking')\n csv_data.ix[filt1, smoke_col] = 'not-smoking'\n filt = np.logical_and(csv_data[smoke_col] != bshift_smoke, csv_data[smoke_col] != fshift_smoke)\n filt2 = np.logical_and(np.logical_and(filt, csv_data[smoke_col] == 'not-smoking'), csv_data.apply(lambda x: x[et_col] - x[st_col] < timedelta(minutes=1),axis=1))\n csv_data.ix[filt2, smoke_col] = 'smoking'\n # print csv_data.iloc[ind]\n\n # step 6: turn smoking sequence without puffs into \"not smoking\"\n st_filt = np.logical_and(csv_data[smoke_col] != csv_data[smoke_col].shift(1), csv_data[smoke_col] == 'smoking')\n et_filt = np.logical_and(csv_data[smoke_col] != csv_data[smoke_col].shift(-1), csv_data[smoke_col] == 'smoking')\n cig_st = csv_data[st_filt]\n cig_et = csv_data[et_filt]\n for i in range(0,len(cig_st.index)):\n puff_flag = csv_data[cig_st.index[i]:cig_et.index[i]+1][puff_col] == 'no-puff'\n if puff_flag.all():\n csv_data[cig_st.index[i]:cig_et.index[i]+1][smoke_col] = 'not-smoking'\n\n # step 7: add offset to starttime and endtime\n # print csv_data.head()\n csv_data[et_col] = csv_data[et_col] + timedelta(seconds=time_offset)\n csv_data[st_col] = csv_data[st_col] + timedelta(seconds=time_offset)\n # print csv_data.head()\n\n # step 8: reindex from 0\n csv_data = csv_data.reset_index(drop=True)\n return csv_data",
"def clean_data(df, to_pm=TO_PM, drop_cols=DROP_COLS, min_seasons=5):\n cleaning_pipe = Pipeline(\n [\n (\"to_per_minute\", cp.ToPerMinute(to_pm)),\n (\"get_month\", cp.GetDebutMonth()),\n (\"fill_na\", cp.FillNA()),\n (\"drop_columns\", cp.DropFeatures(drop_cols)),\n (\"create_labels\", cp.CreateLabels(min_seasons)),\n ]\n )\n return cleaning_pipe.fit_transform(df)",
"def process_compustat(fund):\n # dataframe schema\n required = ['at', 'che', 'act', 'lct', 'lt', 'sale']\n defaults = ['dlc', 'dltt', 'ivao', 'ivst', 'oiadp', 'pstk'] # per sloan 2005\n non_zeros = ['at']\n keep = ['dwc', 'dnco', 'dnoa', 'dfin', 'tacc', 'tacc2', 'oa', 'dacy', 'dac1', 'dac2', 'dac3']\n\n total = fund.shape[0]\n print(f'Total rows: {total}')\n print(f'{fund.date.min()} to {fund.date.max()}')\n\n # unique permno, time idx\n fund['time_idx'] = fund.date + MonthEnd(0)\n fund = fund.sort_values(['permno', 'time_idx'])\n fund = fund.groupby(['permno', 'time_idx'], as_index=False).last() # use the latest for each time idx\n\n # handle missing value\n fund.dropna(how='any', subset=required, inplace=True)\n fund = fund.fillna({col: 0 for col in defaults})\n print(f'Handle NAs: {fund.shape[0]} ({fund.shape[0] / total:.2%})')\n\n # force non-zero on specified columns\n print('Check zeros')\n for col in non_zeros:\n zero = (fund[col] == 0).sum()\n print(f' {col} has zeros: {zero}')\n fund = fund[fund[col] != 0]\n print(f' Drop {col} zeros: {fund.shape[0]} ({fund.shape[0] / total:.2%})')\n\n fund = fund[fund.time_idx > '1970-01-01']\n print(f'After 1970: {fund.shape[0]} ({fund.shape[0] / total:.2%})')\n\n # ========== Before Join ==========\n # extended definition of accruals\n fund['coa'] = fund.act - fund.che\n fund['col'] = fund.lct - fund.dlc\n fund['wc'] = fund.coa - fund.col\n fund['ncoa'] = fund['at'] - fund.act - fund.ivao\n fund['ncol'] = fund['lt'] - fund.lct - fund.dltt\n fund['nco'] = fund.ncoa - fund.ncol\n fund['fina'] = fund.ivst + fund.ivao\n fund['finl'] = fund.dltt + fund.dlc + fund.pstk\n fund['fin'] = fund.fina - fund.finl\n\n # ========== Use sentinel ==========\n # to allow monthly record. not the most efficiency way. But trade time for accuracy\n start = time.time()\n sentinel = []\n whole_range = pd.date_range(fund.time_idx.min(), fund.time_idx.max(), freq='m')\n whole_range = pd.DataFrame({'date': whole_range}, index=whole_range)\n\n time_range = fund.groupby('permno').agg({'time_idx': ['min', 'max']})\n for permno, times in time_range['time_idx'].iterrows():\n dates = whole_range.loc[times['min']: times['max']].values.flatten()\n sentinel.append(pd.DataFrame({'time_idx': dates, 'permno': permno}))\n sentinel = pd.concat(sentinel, axis=0)\n print(time.time() - start, 's')\n\n fund = pd.merge(fund, sentinel, on=['time_idx', 'permno'], how='outer')\n fund = fund.set_index(['permno', 'time_idx']).sort_index()\n fund = fund.groupby(level=0).fillna(method='ffill')\n total = fund.shape[0]\n print(f'Expended rows: {total}')\n\n # ========== After Join ==========\n # operating accruals\n fund['dca'] = fund.act - lag(fund, 'act')\n fund['dcash'] = fund.che - lag(fund, 'che')\n fund['dcl'] = fund.lct - lag(fund, 'lct')\n fund['dstd'] = fund.dlc - lag(fund, 'dlc')\n fund['dtp'] = (fund.txp - lag(fund, 'txp')).fillna(0) # set to 0 if missing\n fund['oa'] = ((fund.dca - fund.dcash) - (fund.dcl - fund.dstd - fund.dtp) - fund.dp) / lag(fund, 'at')\n\n # DAC\n fund['avg_at'] = (fund['at'] + lag(fund, 'at')) / 2\n fund['dsale'] = fund.sale - lag(fund, 'sale')\n fund['drec'] = fund.rect - lag(fund, 'rect')\n fund['dacy'] = fund.oa / fund.avg_at\n fund['dac1'] = 1 / fund.avg_at\n fund['dac2'] = (fund.dsale - fund.drec) / fund.avg_at\n fund['dac3'] = fund.ppegt / fund.avg_at\n\n # extended defintion of accruals\n fund['dwc'] = (fund.wc - lag(fund, 'wc')) / fund.avg_at\n fund['dnco'] = (fund.nco - lag(fund, 'nco')) / fund.avg_at\n fund['dnoa'] = fund.dwc + fund.dnco\n fund['dfin'] = (fund.fin - lag(fund, 'fin')) / fund.avg_at\n fund['tacc'] = fund.dwc + fund.dnco + fund.dfin\n fund['tacc2'] = fund.dwc + fund.dnco - fund.dfin\n\n fund = fund[keep].dropna().reset_index()\n print(f'Final rows: {fund.shape[0]} ({fund.shape[0] / total:.2%})')\n return fund",
"def prepare_fatal_infection_data(meta: XLMeta) -> pd.DataFrame:\n\n INFECTION_TO_DEATH_DAYS = 28\n\n # get the raw death data #####################################################\n\n filename = DATA_DIR / meta.workbook\n skiprows = compute_skiprows(meta.start_row, meta.end_row)\n\n df = read_ONS_daily_registrations(workbook=filename, skiprows=skiprows)\n total_deaths = df[meta.region].sum()\n\n # compute fatal infection from death #########################################\n\n # extend dates to allow deaths to be shifted forward by INFECTION_TO_DEATH_DAYS\n idx = pd.date_range(\n df.index[0] - pd.Timedelta(days=INFECTION_TO_DEATH_DAYS), df.index[-1]\n )\n df = df.reindex(idx)\n\n # compute fatal infections\n infections = df[meta.region].shift(periods=-INFECTION_TO_DEATH_DAYS)\n\n # smooth: this discards a small number of infections, so scale back up\n infections_smoothed = infections.rolling(window=7, center=True).mean()\n correction_factor = infections.sum() / infections_smoothed.sum()\n infections_smoothed *= correction_factor\n assert abs(infections_smoothed.sum() / total_deaths) > 0.9999999\n\n # log\n infections_log = np.log10(infections_smoothed)\n\n # construct dataframe\n data = {\n \"deaths (raw)\": df[meta.region],\n \"infections\": infections_smoothed,\n \"infections (log)\": infections_log,\n }\n df = pd.concat(data, axis=1)\n\n return df",
"def clean_dataframe(self, df_in , what = ''): \n \n if what == 'era5fb': # cleaning the era5 feedback only \n df = df_in[np.isfinite(df_in['obsvalue@body'])]\n try: \n df = df.loc[ df['vertco_type@body'] != 2 ] \n except:\n pass\n df = df.reindex()\n df = df[np.isfinite(df['vertco_reference_1@body'])]\n #print('check lengths: ' , len(df_in) , len(df) )\n new_ind = np.array ( range(len(df))) \n df['index'] =new_ind\n df = df.set_index('index')\n \n else: \n ### check if can be optimized ???\n df = df_in.loc[ df_in['z_coordinate_type'] != 2 ] # case where the levels are given in terms of geopotential only (pressure not available)\n \n df = df.loc[ (df['observation_value'] != -99999.0) \n & (df['observation_value'] != -999.0) \n & (df['observation_value'] != -9999) \n & (df['observation_value'] != -9999.0) \n & (df['observation_value'] != -999.9) \n & (df['observation_value'] != -8888 )\n & (df['observation_value'] != -8888.0 )\n \n #& (df['z_coordinate_type'] != 2) \n & (df['z_coordinate'] != -99999.0) \n & (df['z_coordinate'] != -9999.0 )\n & (df['z_coordinate'] != 999 )\n & (df['z_coordinate'] != 999.0 )\n \n \n ] #cleaning the values \n #clean = clean.loc[ (clean['z_coordinate_type'] != 2)] #cleaning the values\n #clean = clean.loc[ (clean['z_coordinate'] != -99999.0 )] #cleaning the values\n \n df = df[np.isfinite(df['observation_value'])] # excluding nan values \n df = df[np.isfinite(df['z_coordinate'])]\n \n return df",
"def tidy_val_df(df_val, m_sample_scm):\n \n df_val = df_val.copy()\n # Set values to 0 prior to isothermal\n # look from min_search_start minutes to min_search_end minutes\n min_search_start = 60 \n min_search_end = 600\n idx_min = np.argmin(\n df_val['Power1,W'].values[min_search_start:min_search_end]) + min_search_start\n if idx_min >= 599:\n idx_min = 0\n# idx_min = 0\n df_val = df_val[idx_min:]\n df_val['Heat1,J'] = df_val['Heat1,J'].apply(lambda x: x - df_val['Heat1,J'].values[0])\n \n # create time in decimal days for RG charts 20180111\n # header names require numbers for cc1 data exported with cc2\n df_val['Power/SCM,W/g'] = df_val['Power1,W'].values / m_sample_scm\n df_val['Heat/SCM,J/g'] = df_val['Heat1,J'].values / m_sample_scm\n# df_val['Tmix,s'] = df_val['Tlog,s'].values + time_difference\n df_val['Tmix,days'] = df_val['Tmix1,s'].values / 86400 # 60 * 60 * 24\n# df_val = df_val.drop('Tlog,s', axis=1) # remove for cc1 data exported with cc2\n\n# rearrange columns to place Tmixs first \n cols = df_val.columns.tolist()\n cols = cols[0:1] + cols[-1:] + cols[1:-1]\n# cols = cols[0:1] + cols[-1:] + cols[1:-1] # For cc1 data exported with cc2\n df_val = df_val[cols]\n \n return df_val",
"def create_dataframe():\n df = pd.read_csv(\"data/311-calls.csv\", parse_dates=[\"created\"])\n df[\"created\"] = df[\"created\"].dt.date\n df.drop(columns=[\"incident_zip\"], inplace=True)\n num_complaints = df[\"complaint_type\"].value_counts()\n to_remove = num_complaints[num_complaints <= 30].index\n df.replace(to_remove, np.nan, inplace=True)\n return df",
"def data_preparation(self) -> None:\n self.logger.info('data cleaning')\n self.logger.info('num of secs: {}, num of ipo_dates: {}, num of secs with prices: {}'.format(\n len(self.data),\n len(self.ipo_dates),\n len(self.prices)\n ))\n excluded = []\n excluded = [i.lower() for i in excluded]\n self.logger.info(f'number of excluded: {len(excluded)}')\n for i in excluded:\n self.data.pop(i)\n for s in self.data:\n # columns with empty assets sum (empty columns and other situations)\n self.data[s].dropna(axis='columns', how='any', subset=['A_0'], inplace=True)\n # columns with descriptions (polish and english names of values)\n self.data[s].drop(self.data[s].columns[[0, 1]], inplace=True, axis=1)\n\n self.logger.info(f'number of secs after cleaning: {len(self.data)}')\n data_list = [k for k in self.data.values()]\n self.uber_data = pd.concat(data_list, ignore_index=True, axis=1)\n self.uber_data = self.uber_data.transpose()\n self.uber_data = self.uber_data.loc[:, pd.notnull(self.uber_data.columns)]",
"def errdump_filter(errdump_aggregated_df):\n\n # mask exclude logs that older than 6 months from collection date\n mask_period = errdump_aggregated_df['Message_date'] >= \\\n errdump_aggregated_df['config_collection_date'] - pd.DateOffset(months=6)\n # mask exclude igonored messages\n mask_not_ignored = errdump_aggregated_df['Message_status'] != 'ignored'\n errdump_filtered_df = errdump_aggregated_df.loc[mask_period & mask_not_ignored].copy()\n\n # External_sequence_number is removed to group devices with equal messages behind the same port\n errdump_grp_columns = ['configname', 'chassis_name', 'chassis_wwn', \n 'Message_date', 'Message_ID', \n 'Security audit flag', 'Severity',\n 'switchName', 'switchWwn', 'Fabric_name', 'Fabric_label',\n 'config_collection_date', 'Message_portIndex', 'Message_portType',\n 'slot', 'port', 'Condition', 'Current_value', 'Dashboard_category',\n 'obj', 'Message_status', 'portIndex', 'Index_slot_port', 'portType', 'portState',\n 'speed', 'tx_port', 'rx_port', 'sid', 'did', 'wwn', 'IP_Address',\n 'Connected_portId', 'Connected_portWwn', 'Device_Host_Name_Port', 'alias', 'deviceType']\n\n errdump_filtered_df = errdump_filtered_df.reindex(columns=errdump_grp_columns)\n \n # join multiple Device_Host_Name_Ports behind one port\n # groupby drop rows with nan values\n errdump_filtered_df.fillna('na_cell', inplace=True)\n errdump_filtered_df = errdump_filtered_df.groupby(by=errdump_grp_columns[:-5]).agg(', '.join)\n errdump_filtered_df.reset_index(inplace=True)\n \n \"\"\"remove duplicate values from Connected_portId and Device_Host_Name_Port cells\n thus all identical events for the device behind the single port with idenitical Message_date\n considered to be one event\"\"\"\n for column in ['Connected_portId', 'Connected_portWwn', 'Device_Host_Name_Port', 'alias', 'deviceType']:\n errdump_filtered_df[column] = errdump_filtered_df[column].str.split(', ').apply(set).str.join(', ')\n\n return errdump_filtered_df",
"def get_campus_entry_leave_times(file):\n #Read the data in to data frame\n df = pd.read_csv(file)\n\n\n #consider only university wifi router address records and split the record_time in to date and time\n df_with_UofS_Wifi = df.loc[df.ssid.isin(['uofs-secure','uofs-public','uofs-guest'])]\n df_with_UofS_Wifi['Date'],df_with_UofS_Wifi['Time'] = zip(*df_with_UofS_Wifi['record_time'].map(lambda x:x.split(' ')))\n\n #Group by Id, Date\n grouped= df_with_UofS_Wifi.groupby(['user_id','Date'])\n\n\n #From the aggreagation get the min, max times i.e campues entry, leave times\n lst_campus_entry_leaving_timings = [(key[0],key[1], min(value['Time']), max(value['Time'])) for (key, value) in grouped.__iter__()]\n\n # create data frame out of three features.\n df = pd.DataFrame(lst_campus_entry_leaving_timings, columns=['ID','Date', 'EntryTime','LeavingTime'])\n df['Time_In_School']= df['EntryTime'] - df['LeavingTime']\n\n return df",
"def _finalize_cells(self):\n # Order by time (as path) and then drilldown dimension value (group)\n # The key[0] is a list of paths: time, another_drilldown\n\n order = lambda left, right: cmp(left[0], right[0])\n cells = self.time_cells.items()\n cells.sort(order)\n\n # compute the current datetime, convert to path\n current_time_path = time_to_path(\n pytz.timezone('UTC').localize(datetime.utcnow()).astimezone(self.browser.timezone).strftime(\"%Y-%m-%d %H:00:00\"), \n self.last_time_level, \n self.time_hierarchy)\n\n self.cells = []\n for key, cell in cells:\n # If we are aggregating at finer granularity than \"all\":\n time_key = key[0]\n if time_key:\n # if time_key ahead of current time path, discard\n if time_key > current_time_path:\n continue\n cell.update(zip(self.time_levels, time_key))\n\n # append the drilldown_on attribute ref\n if self.drilldown_on:\n cell[self.drilldown_on] = self.drilldown_on_value_func(key[1])\n\n self.cells.append(cell)",
"def clean_transactions(transactions):\n\n #Preprocessing\n df = create_datetime_col(transactions)\n\n #Dropping transactions at stations and entry point\n to_drop_transactions = {\"02000\" : \"(09) PLATAFORMA 2 DESALIMENTACI\", \"10000\" : \"(02) DESALIME\",\n \"09000\" : \"DESALIME\", \"05100\" : \"(MA) DESALIMENTACION CASTIL\",\n \"10005\" : \"(04) PISO 2 DESALIME\", \"08000\" : \"(05) DESALIME\",\n \"08100\" : None, \"40000\" : None, \"22000\": None} #22000 is \"Estacion virtual\"\n\n# cols = ['fechaclearing','fechatransaccion','horatransaccion','nombrefase', 'nombreemisor','nombrelinea',\n# 'nombreestacion','nombreaccesoestacion', 'nombredispositivo','nombreperfil','numerotarjeta',\n# 'valor','nombretipotarjeta', 'dt', 'diff_seconds']\n# cols = ['dt', 'nombreestacion','nombreaccesoestacion','numerotarjeta',\n# 'valor' , 'diff_seconds']\n\n for key, value in to_drop_transactions.items():\n df = drop_transactions(df, key, value)\n\n df = drop_transafers(df)\n df = drop_duplicates(df)\n return df",
"def preprocess_raw_data(df):\n def convert_date_to_datetime(_df):\n return _df.assign(Date=pd.to_datetime(_df['Date']))\n\n def fill_missing_miles_with_zero(_df):\n return _df.fillna({'Miles': 0})\n\n def filter_dates_prior_to_today(_df):\n return _df[_df['Date'] < datetime.datetime.today()]\n\n def calculate_rolling_averages(_df):\n _df['MA_10day'] = _df['Miles'].rolling(window=10).mean().fillna(0)\n _df['MA_30day'] = _df['Miles'].rolling(window=30).mean().fillna(0)\n return _df.sort_values('Date')\n\n pipeline = [\n convert_date_to_datetime,\n fill_missing_miles_with_zero,\n filter_dates_prior_to_today,\n calculate_rolling_averages,\n ]\n for func in pipeline:\n df = func(df)\n\n df['date_str_label'] = df['Date'].dt.strftime('%b-%d')\n\n return df",
"def open_to_close_move(df):\n df_temp = df.copy()\n df_temp['close_open'] = 1 - df['close'].shift(1) / df['open']\n df_temp['open_close'] = 1 - df['open'] / df['close']\n df_temp['oc_diff'] = df_temp['open_close'] - df_temp['close_open']\n # print pd.cut(df_temp['close_open'], 10)\n df_temp['close_open'] = df_temp['close_open'].replace(-np.inf, 0)\n df_temp['open_close'] = df_temp['open_close'].replace(-np.inf, 0)\n df_temp['close_open'] = df_temp['close_open'].replace(np.inf, 0)\n df_temp['open_close'] = df_temp['open_close'].replace(np.inf, 0)\n\n df_temp['q-cut'] = pd.qcut(df_temp['close_open'], 5)\n df_temp = df_temp.dropna()\n group = df_temp.groupby('q-cut')\n\n oc_stat = []\n for g in group:\n oc_stat.append({\n 'close_open': [float(n) for n in g[0][1:-1].split(', ')],\n 'open_close': sorted([\n [float(n) for n in b[1:-1].split(', ')]\n for b in pd.qcut(g[1]['oc_diff'], 5).unique()\n ], key=lambda x: x[1])\n })\n\n return oc_stat"
] |
[
"0.6312388",
"0.57322955",
"0.55727816",
"0.5407285",
"0.53056943",
"0.5223147",
"0.5200911",
"0.51957583",
"0.5163521",
"0.5142505",
"0.51417726",
"0.51290923",
"0.51139414",
"0.5108265",
"0.50709075",
"0.5058216",
"0.5057824",
"0.50575924",
"0.50515586",
"0.50307417",
"0.50204635",
"0.5007077",
"0.49369803",
"0.49086708",
"0.4907554",
"0.48882982",
"0.48814097",
"0.48575655",
"0.48427996",
"0.4838358"
] |
0.66307956
|
0
|
Proxy function for the tracking pts logger service. Used to start and stop logging.
|
def tracking_pts_logger_proxy(namespace,cmd,filename):
srv = '{0}/logging_cmd'.format(namespace)
proxy = rospy.ServiceProxy(srv,LoggingCmd)
try:
resp = proxy(cmd,filename)
flag = resp.flag
except rospy.ServiceException, e:
flag = False
return flag
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def on_start(self):\r\n self.log()",
"def _start_logging(self):\n raise NotImplementedException()",
"def start():\n log(\"=========== hook: start ===========\")",
"def setup_logger():\n logger = logging.getLogger('tracking_log')\n logger.setLevel(logging.INFO)\n #Where to Store needs to be identified?\n f_handler = logging.FileHandler(PROCESSED_LOGFILE, mode='a', encoding = None, delay = False)\n f_handler.setLevel(logging.INFO)\n f_format = logging.Formatter('%(asctime)s\\t%(message)s\\t%(dataset_id)s\\t%(status)s')\n f_handler.setFormatter(f_format)\n logger.addHandler(f_handler)\n return logger",
"def startLogging (self):\n self.isLogging = True\n self.startCallback ()",
"def log_start():\n\n scriptDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n scriptName = os.path.splitext(os.path.basename(__file__))[0]\n log = logging.getLogger('cam_server')\n hdlr = logging.FileHandler(scriptDir+'/logs/'+scriptName+'.log')\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n hdlr.setFormatter(formatter)\n log.addHandler(hdlr)\n log.setLevel(logging.INFO)\n return log",
"def _get_logger(self):",
"def logger(self):\n pass",
"def logged(meth):\n def wrapper(*args):\n print(\"LOGGING {meth} {args}\".format(**locals()))\n return meth(*args) #self, ... other args\n return wrapper",
"def start_access_logging(ContainerName=None):\n pass",
"def _log_some_info(self):\n logging.info('info')",
"def start(self):\n log.startLoggingWithObserver(self.emit, setStdout=0)",
"def startLogFiles():\n #global techLog\n global userl\n _generic_startLogFiles(True)",
"def __init__(self, logger=logging.getLogger(\"dummy\")):\n super(RemoteObserver, self).__init__()\n self.logger = logger",
"def __init__(self):\n\n self.log = logger.getLogger(name=\"directord\")",
"def _log(fn):\n @wraps(fn)\n def wrapper(self, *args, **kwargs):\n self.log.append(fn.__name__ + ' :: args={} kwargs={}'.format(args, kwargs))\n return fn(self, *args, **kwargs)\n return wrapper",
"def enableCLangLogger(self):",
"def __init__(self, api_path=None, log_path=None, log_level=\"DEBUG\"):\n\n # Construct the log path. \n if log_path:\n self.log_path = log_path\n else:\n defaultlog_path = \"~/Spirent/CTA/Logs/\"\n\n now = datetime.datetime.now()\n defaultlog_path += now.strftime(\"%Y-%m-%d-%H-%M-%S\")\n defaultlog_path += \"_PID\"\n defaultlog_path += str(os.getpid())\n defaultlog_path = os.path.expanduser(defaultlog_path)\n \n # The environment variable overwrites the default path. \n self.log_path = os.getenv(\"CTA_LOG_OUTPUT_DIRECTORY\", defaultlog_path) \n\n self.log_path = os.path.abspath(self.log_path)\n self.logfile = os.path.join(self.log_path, \"cta_python.log\") \n\n if not os.path.exists(self.log_path):\n os.makedirs(self.log_path)\n\n # NOTE: Consider limiting the number of log directories that are created.\n # It would mean deleting older directories.\n\n #16/05/18 11:03:53.717 INFO 3078268608 - user.scripting - stc::get automationoptions -suppressTclErrors\n #16/05/18 11:03:53.717 INFO 3078268608 - user.scripting - return false\n #2016-05-19 14:05:56,382 UserID =mjefferson\n #2016-05-19 14:05:56,382 Log Level=INFO\n\n if log_level == \"CRITICAL\":\n log_level = logging.CRITICAL\n elif log_level == \"ERROR\":\n log_level = logging.ERROR\n elif log_level == \"WARNING\":\n log_level = logging.WARNING\n elif log_level == \"INFO\": \n log_level = logging.INFO\n else:\n # DEBUG is the default log level.\n log_level = logging.DEBUG \n \n logging.basicConfig(filename=self.logfile, filemode=\"w\", level=log_level, format=\"%(asctime)s %(levelname)s %(message)s\")\n #logging.Formatter(fmt='%(asctime)s.%(msecs)03d',datefmt='%Y/%m/%d %H:%M:%S')\n # Add timestamps to each log message.\n #logging.basicConfig()\n # The logger is now ready. \n\n logging.info(\"Spirent TestCenter Conformance Application Python API is starting up...\")\n logging.info(\"OS Type = \" + os.name)\n logging.info(\"API Path = \" + api_path)\n logging.info(\"UserID = \" + getpass.getuser())\n logging.info(\"Log Level = \" + logging.getLevelName(log_level)) \n logging.info(\"Current Path = \" + os.path.abspath(os.getcwd())) \n logging.info(\"Log Path = \" + self.log_path)\n\n # Instantiate the Tcl interpreter.\n self.tcl = Tcl()\n\n self.tcl.eval(\"lappend ::auto_path {\" + api_path + \"}\")\n\n logging.info(\"Tcl Version = \" + self.tcl.eval(\"info patchlevel\"))\n logging.info(\"Tcl ::auto_path = \" + self.tcl.eval('set ::auto_path'))\n logging.info(\"Loading the Spirent TestCenter Conformance Application in the Tcl interpreter...\")\n self.Exec(\"package require SpirentTestCenterConformance\")\n\n return",
"def logStarted(build, step, log):",
"def _log(fn):\n @wraps(fn)\n def wrapper(self, *args, **kwargs):\n a = fn(self, *args, **kwargs)\n self.log.append(fn.__name__ + ' :: args={} kwargs={}'.format(args, kwargs))\n return a\n return wrapper",
"def logging(self):\r\n return None",
"def logtool(self, action, **options):\n pass",
"def __call__(self, *args, **kwargs):\n self.logger.info(*args, **kwargs)",
"def _stop_logging(self):\n raise NotImplementedException()",
"def start_track(self):\n \n self.open()\n fcntl.ioctl(self.f_dev, self.MMLOG_SET_LOG_LEVEL, KERN_ALERT)",
"def on_start(self):\n self.logger.debug(\"Starting...\")\n pass",
"def magic_logstart(self,parameter_s=''):\n\n #FIXME. This function should all be moved to the Logger class.\n \n valid_modes = qw('over backup append rotate')\n if self.LOG:\n print 'Logging is already in place. Logfile:',self.LOG\n return\n\n par = parameter_s.strip()\n if not par:\n logname = self.LOGDEF\n logmode = 'rotate' # use rotate for the auto-generated logs\n else:\n try:\n logname,logmode = par.split()\n except:\n try:\n logname = par\n logmode = 'backup'\n except:\n warn('Usage: @log [log_name [log_mode]]')\n return\n if not logmode in valid_modes:\n warn('Logging NOT activated.\\n'\n 'Usage: @log [log_name [log_mode]]\\n'\n 'Valid modes: '+str(valid_modes))\n return\n\n # If we made it this far, I think we're ok:\n print 'Activating auto-logging.'\n print 'Current session state plus future input saved to:',logname\n print 'Logging mode: ',logmode\n # put logname into rc struct as if it had been called on the command line,\n # so it ends up saved in the log header\n old_logfile = self.rc.opts.get('logfile','') # in case we need to restore it\n logname = os.path.expanduser(logname)\n self.rc.opts.logfile = logname\n self.LOGMODE = logmode # FIXME: this should be set through a function.\n try:\n header = str(self.LOGHEAD)\n self.create_log(header,logname)\n self.logstart(header,logname)\n except:\n self.LOG = '' # we are NOT logging, something went wrong\n self.rc.opts.logfile = old_logfile\n warn(\"Couldn't start log: \"+str(sys.exc_info()[1]))\n else: # log input history up to this point\n self.logfile.write(self.shell.user_ns['_ih'][1:])\n self.logfile.flush()",
"def on_L1(self):\r\n self.log()",
"def do_log(self, arg):\n arg = \" %s :custom log\" % (arg)\n log(arg)",
"def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.log = logging.getLogger(logger_name(__name__))"
] |
[
"0.6483767",
"0.6436041",
"0.62074864",
"0.60591334",
"0.6058873",
"0.5985169",
"0.5953372",
"0.594782",
"0.5884326",
"0.5873426",
"0.58346224",
"0.58311236",
"0.5822701",
"0.58113503",
"0.580145",
"0.5779789",
"0.5722892",
"0.56905645",
"0.5668255",
"0.56460786",
"0.56447643",
"0.56334114",
"0.56327194",
"0.56088173",
"0.55988187",
"0.5588597",
"0.55779654",
"0.5576841",
"0.55740404",
"0.5563505"
] |
0.73594636
|
0
|
Path to the small test database.
|
def db_small_path():
return os.path.join(_here, 'fixtures/databases/db-small/database')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_test_database_path() -> str:\n test_suite_dir = os.path.abspath(os.path.dirname(__file__))\n return os.path.join(test_suite_dir, \"testDb.sqlite\")",
"def db_path_with_improper_files():\n return os.path.join(_here, 'fixtures/databases/db-improper/database')",
"def db_python_only():\n return os.path.join(_here, 'fixtures/databases/db-python-only/database')",
"def get_db_path():\n \n return(db_run.db_abs_path)",
"def get_db_path():\n return os.path.join(sys.path[0], \"my_db.db\")",
"def get_database_directory(self):\n pass",
"def db_file():\n return abspath('vmchecker.db')",
"def getDBPath():\n return os.path.join(CONFIG_DIR, CONFIG_DICT['common']['local_db'])",
"def set_db_file():\n\n return os.path.join(db_path, db_file)",
"def get_database_path():\n\treturn _paths[_DATABASE_PATH_KEY]",
"def db_path(self, host: str) -> str:\n app_path = os.path.abspath(os.getcwd())\n folder = 'data'\n path = os.path.join(app_path, folder)\n return os.path.normpath(os.path.join(path, host))",
"def generate_sqlite_db_path():\n tmp_dir = str(tempfile.mkdtemp())\n abspath = os.path.abspath( # noqa: PTH100\n os.path.join( # noqa: PTH118\n tmp_dir,\n \"sqlite_db\"\n + \"\".join(\n [random.choice(string.ascii_letters + string.digits) for _ in range(8)]\n )\n + \".db\",\n )\n )\n return abspath",
"def get_test_data_path():\n return os.path.abspath(os.path.join(os.path.dirname(__file__), \"data\") + os.path.sep)",
"def setupTestDbEnv():\n baseDirPath = setupTmpBaseDir()\n baseDirPath = os.path.join(baseDirPath, \"db/bluepea\")\n os.makedirs(baseDirPath)\n return setupDbEnv(baseDirPath=baseDirPath)",
"def test_data_dir(self):\n return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')",
"def test_get_db_filepath(dbh):\n assert dbh.get_db_filepath() == currentdir + \"/test_data/trading_log.json\"\n\n mock_path = '/tmp/test.json'\n assert dbh.write_data(mock_path)\n assert os.path.isfile(mock_path)\n dbh.read_data(mock_path)\n assert dbh.get_db_filepath() == mock_path",
"def test_data_directory(test_directory):\n return os.path.join(test_directory, \"test_data\")",
"def test_database(client, test_db):\n tester = os.path.exists(\"flaskr.db\")\n assert tester",
"def test_open_database(self):\n self.assertTrue(self.ss.open_database(self.test_database))\n self.assertEqual(self.ss.data_file(), self.test_database)\n self.assertTrue(os.path.exists(self.test_database))\n # database should be created if it doesn't exist\n self.assertTrue(self.ss.open_database(self.alt_database))\n self.assertEqual(self.ss.data_file(), self.alt_database)\n self.assertTrue(os.path.exists(self.alt_database))",
"def test_create_database(self):\n self.assertTrue(self.ss.create_database(self.test_database),\n \"Create testing database\")\n self.assertTrue(os.path.exists(self.test_database))\n self.assertNotEqual(self.test_database, self.ss.data_file(),\n \"Should not connect to created database\")",
"def dburl(\n tmp_path_factory: pytest.TempPathFactory,\n person_data: pandas.DataFrame,\n student_data: pandas.DataFrame,\n school_data: pandas.DataFrame,\n ) -> str:\n path = tmp_path_factory.mktemp('alchemy') / 'test.db'\n url = f'sqlite:///{path.absolute()}'\n connection = sqlalchemy.create_engine(url)\n person_data.to_sql('person', connection, index=False)\n student_data.to_sql('student', connection, index=False)\n school_data.to_sql('school', connection, index=False)\n return url",
"def test_load_database_from_path(tmp_path):\n path = tmp_path / \"test.db\"\n database = load_database(path_or_database=path, fast_logging=False)\n assert isinstance(database, DataBase)\n assert database.path is not None\n assert database.fast_logging is False",
"def test_url():\n return TEST_DATABASE_URL",
"def get_sample_data_dir():\n \n return resource_filename('cdat_lite.test.test_cdms', 'sample_data')",
"def data_dir():\n return os.path.join(os.path.dirname(_here), 'test', 'data')",
"def create_test_db(self, verbosity=1, autoclobber=False):\n # Don't import django.core.management if it isn't needed.\n test_database_name = self._get_test_db_name()\n \n if self.connection.settings_dict.get('ENGINE', '').endswith('.sqlite3')\\\n and test_database_name != ':memory:':\n if os.access(test_database_name, os.F_OK):\n print \"sqlite test database found !\"\n \n #self._create_test_db(verbosity, autoclobber)\n \n self.connection.close()\n self.connection.settings_dict[\"NAME\"] = test_database_name\n \n # Confirm the feature set of the test database\n self.connection.features.confirm()\n \n # Get a cursor (even though we don't need one yet). This has\n # the side effect of initializing the test database.\n self.connection.cursor()\n\n return test_database_name",
"def get_test_path():\n path, name = os.path.split(__file__)\n return os.path.join(path,\"..\", 'test-data')",
"def data_dir():\n return os.path.join(os.path.dirname(__file__), 'test', 'data')",
"def test_database_creation(self):\r\n not_existing_wsp = \"doesnotexist.wsp\"\r\n path = os.path.join(settings.SALMON_WHISPER_DB_PATH, not_existing_wsp)\r\n self.assertEqual(os.path.exists(path), False)\r\n graph.WhisperDatabase(path)\r\n self.assertEqual(os.path.exists(path), True)",
"def __get_location(self) -> str:\n\t\treturn os.getenv('SQLITE_DRIVER_LOCATION', 'db.sqlite')"
] |
[
"0.7880446",
"0.7255324",
"0.71025",
"0.7018129",
"0.6852007",
"0.6844908",
"0.6765523",
"0.6755608",
"0.661674",
"0.66091156",
"0.6603943",
"0.6476906",
"0.6379316",
"0.63708115",
"0.63703257",
"0.6369142",
"0.63338923",
"0.6288185",
"0.6270612",
"0.6251157",
"0.6239606",
"0.6216336",
"0.6200053",
"0.61861664",
"0.6153627",
"0.61511064",
"0.6148542",
"0.61426157",
"0.6120977",
"0.6120811"
] |
0.8609781
|
0
|
Path to the test database with improper files.
|
def db_path_with_improper_files():
return os.path.join(_here, 'fixtures/databases/db-improper/database')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def db_python_only():\n return os.path.join(_here, 'fixtures/databases/db-python-only/database')",
"def get_test_database_path() -> str:\n test_suite_dir = os.path.abspath(os.path.dirname(__file__))\n return os.path.join(test_suite_dir, \"testDb.sqlite\")",
"def db_small_path():\n return os.path.join(_here, 'fixtures/databases/db-small/database')",
"def db_file():\n return abspath('vmchecker.db')",
"def get_db_path():\n \n return(db_run.db_abs_path)",
"def test_410_000_non_existant_db(self):\n with TDC() as temp_dir:\n file = Path(temp_dir) / 'database.db'\n self.assertFalse(file.exists(),'Database file exists pre test')\n eng = Engine(file)\n con = eng.connect()\n self.assertTrue(file.exists(), 'Database file does not exists post test')",
"def set_db_file():\n\n return os.path.join(db_path, db_file)",
"def test_get_db_filepath(dbh):\n assert dbh.get_db_filepath() == currentdir + \"/test_data/trading_log.json\"\n\n mock_path = '/tmp/test.json'\n assert dbh.write_data(mock_path)\n assert os.path.isfile(mock_path)\n dbh.read_data(mock_path)\n assert dbh.get_db_filepath() == mock_path",
"def getDBPath():\n return os.path.join(CONFIG_DIR, CONFIG_DICT['common']['local_db'])",
"def generate_sqlite_db_path():\n tmp_dir = str(tempfile.mkdtemp())\n abspath = os.path.abspath( # noqa: PTH100\n os.path.join( # noqa: PTH118\n tmp_dir,\n \"sqlite_db\"\n + \"\".join(\n [random.choice(string.ascii_letters + string.digits) for _ in range(8)]\n )\n + \".db\",\n )\n )\n return abspath",
"def get_db_path():\n return os.path.join(sys.path[0], \"my_db.db\")",
"def get_database_directory(self):\n pass",
"def db_path(self, host: str) -> str:\n app_path = os.path.abspath(os.getcwd())\n folder = 'data'\n path = os.path.join(app_path, folder)\n return os.path.normpath(os.path.join(path, host))",
"def test_get_filepath(self):\r\n filepath = self.profile.get_filepath('testing.db')\r\n self.assertTrue(filepath.startswith(self.profile_path))",
"def invalid_record_path():\n return os.path.join(_here, 'fixtures/records/invalid.yaml')",
"def test_database_creation(self):\r\n not_existing_wsp = \"doesnotexist.wsp\"\r\n path = os.path.join(settings.SALMON_WHISPER_DB_PATH, not_existing_wsp)\r\n self.assertEqual(os.path.exists(path), False)\r\n graph.WhisperDatabase(path)\r\n self.assertEqual(os.path.exists(path), True)",
"def __test_db_dir(self):\n db_dir_path = self.driftwood.config[\"database\"][\"root\"]\n try:\n # Create db directory\n if not os.path.isdir(db_dir_path):\n self.driftwood.log.info(\"Database\", \"creating database directory\", db_dir_path)\n os.mkdir(db_dir_path)\n except:\n self.driftwood.log.msg(\"FATAL\", \"Database\", \"cannot create database directory\", db_dir_path)\n return False\n\n try:\n # Try openning the dir\n files = os.listdir(db_dir_path)\n except:\n self.driftwood.log.msg(\"FATAL\", \"Database\", \"cannot open database directory\", db_dir_path)\n return False\n\n return True",
"def test_invalid_database_file(self):\n with self.assertRaises(Exception):\n app = App(__file__)",
"def get_database_filename() -> str:\n config_dir = get_config_dir()\n return os.path.join(config_dir, DATABASE_FILENAME)",
"def database_file(file):\r\n fpath = path.join('databases', '{0}'.format(file))\r\n db_path = path.join(mod_path, fpath)\r\n return db_path",
"def test_database(client, test_db):\n tester = os.path.exists(\"flaskr.db\")\n assert tester",
"def test_missing_database_file(self):\n # Technically there's a race condition here, but... I'm not\n # particularly fussed about it.\n\n filename = '/%s' % (uuid.uuid4())\n while os.path.exists(filename): # pragma: no cover\n filename = '/%s' % (uuid.uuid4())\n\n with self.assertRaises(Exception):\n app = App(filename)",
"def test_initiate_tested_path_does_not_exists(self):\n\n expected = {self.file_to_test: {}}\n\n self.inactive_db.database = {}\n self.inactive_db.initiate()\n\n self.assertEqual(expected, self.inactive_db.database)",
"def get_path_db():\n\taiqc_config = get_config()\n\tif aiqc_config is None:\n\t\t# get_config() will print a null condition.\n\t\tpass\n\telse:\n\t\tdb_path = aiqc_config['db_path']\n\t\treturn db_path",
"def test_load_file_does_not_exists(self):\n\n self.inactive_db.load()\n expected = {self.file_to_test: {}}\n\n self.assertEqual(expected, self.inactive_db.database)",
"def default_bug_db(configfile=None):\n\n configs=None\n if not configfile:\n configs=default_configs()\n else:\n configs=configparser.ConfigParser()\n configs.read(configfile)\n\n db_file=os.path.normpath(configs.get(\"bug_db\",\"db_file\"))\n return db_file",
"def test_pep8_conformance_test_db_storage(self):\n pep8s = pep8.StyleGuide(quiet=True)\n result = pep8s.check_files(['tests/test_models/test_engine/\\\ntest_db_storage.py'])\n self.assertEqual(result.total_errors, 0,\n \"Found code style errors (and warnings).\")",
"def test_pep8_conformance_test_db_storage(self):\n pep8s = pep8.StyleGuide(quiet=True)\n result = pep8s.check_files(['tests/test_models/test_engine/\\\ntest_db_storage.py'])\n self.assertEqual(result.total_errors, 0,\n \"Found code style errors (and warnings).\")",
"def test_creation_when_invalid_database_exists_and_overwrite(self):\n database_filename = \"test.db\"\n\n # Delete the test database if it exists.\n test_database = os.path.join(os.getcwd(), database_filename)\n if os.path.exists(test_database):\n os.remove(test_database)\n\n # Create our pre-existing, _invalid_, database.\n database_creation_statement = \"\"\"\n CREATE TABLE data(\n row_ID INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n ID VARCHAR,\n Time DATETIME\n );\n \"\"\"\n\n with sqlite3.connect(database_filename) as conn:\n cur = conn.cursor()\n cur.execute(database_creation_statement)\n\n # Generate the database.\n database = app.database.Database(database_filename, overwrite=True)\n database.create_database()\n\n # Pull out the table names from the database we've created.\n column_names = extract_column_names(database_filename)\n\n # Assert that they are as expected:\n for column_name in app.database.database_columns:\n self.assertEqual(\n True,\n column_name in column_names,\n \"Database creation process did not yield the column names expected. Missing: {0}\".format(column_name)\n )",
"def get_database_path():\n\treturn _paths[_DATABASE_PATH_KEY]"
] |
[
"0.75037533",
"0.7484556",
"0.7308964",
"0.72868377",
"0.7141627",
"0.70591956",
"0.68684435",
"0.68129104",
"0.65792465",
"0.65683746",
"0.6477624",
"0.64596736",
"0.64382106",
"0.6410621",
"0.63923675",
"0.63674766",
"0.63598526",
"0.635115",
"0.63402563",
"0.63394076",
"0.6323107",
"0.6315367",
"0.63047653",
"0.6266228",
"0.6199779",
"0.6179275",
"0.6155384",
"0.6155384",
"0.6134213",
"0.6131332"
] |
0.8216641
|
0
|
Path to the pythononly test database.
|
def db_python_only():
return os.path.join(_here, 'fixtures/databases/db-python-only/database')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_test_database_path() -> str:\n test_suite_dir = os.path.abspath(os.path.dirname(__file__))\n return os.path.join(test_suite_dir, \"testDb.sqlite\")",
"def db_path_with_improper_files():\n return os.path.join(_here, 'fixtures/databases/db-improper/database')",
"def get_db_path():\n return os.path.join(sys.path[0], \"my_db.db\")",
"def get_db_path():\n \n return(db_run.db_abs_path)",
"def db_small_path():\n return os.path.join(_here, 'fixtures/databases/db-small/database')",
"def db_file():\n return abspath('vmchecker.db')",
"def getDBPath():\n return os.path.join(CONFIG_DIR, CONFIG_DICT['common']['local_db'])",
"def get_database_directory(self):\n pass",
"def test_database(client, test_db):\n tester = os.path.exists(\"flaskr.db\")\n assert tester",
"def set_db_file():\n\n return os.path.join(db_path, db_file)",
"def generate_sqlite_db_path():\n tmp_dir = str(tempfile.mkdtemp())\n abspath = os.path.abspath( # noqa: PTH100\n os.path.join( # noqa: PTH118\n tmp_dir,\n \"sqlite_db\"\n + \"\".join(\n [random.choice(string.ascii_letters + string.digits) for _ in range(8)]\n )\n + \".db\",\n )\n )\n return abspath",
"def get_database_path():\n\treturn _paths[_DATABASE_PATH_KEY]",
"def setupTestDbEnv():\n baseDirPath = setupTmpBaseDir()\n baseDirPath = os.path.join(baseDirPath, \"db/bluepea\")\n os.makedirs(baseDirPath)\n return setupDbEnv(baseDirPath=baseDirPath)",
"def db_path(self, host: str) -> str:\n app_path = os.path.abspath(os.getcwd())\n folder = 'data'\n path = os.path.join(app_path, folder)\n return os.path.normpath(os.path.join(path, host))",
"def test_get_db_filepath(dbh):\n assert dbh.get_db_filepath() == currentdir + \"/test_data/trading_log.json\"\n\n mock_path = '/tmp/test.json'\n assert dbh.write_data(mock_path)\n assert os.path.isfile(mock_path)\n dbh.read_data(mock_path)\n assert dbh.get_db_filepath() == mock_path",
"def get_test_data_path():\n return os.path.abspath(os.path.join(os.path.dirname(__file__), \"data\") + os.path.sep)",
"def test_data_dir(self):\n return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')",
"def locate_db(self):\r\n full_path = os.path.join(APP_DATA, \r\n 'Google\\\\Chrome\\\\User Data\\\\Default\\\\Login Data')\r\n temp_path = os.path.join(APP_DATA,'sqlite_file')\r\n if os.path.exists(temp_path): os.remove(temp_path)\r\n shutil.copyfile(full_path, temp_path)\r\n return full_path",
"def getDBPath(readonly=False):\n path=None\n if sys.platform == 'darwin':\n path = '%s/Library/Application Support/Klip/' % os.getenv(\"HOME\")\n else:\n home = os.getenv('HOME')\n if home is None:\n raise Exception(\"Platform %s not support.\" % sys.platform)\n else:\n path = '%s/.config/klip/' % (os.getenv('HOME'))\n\n if not os.path.exists(path):\n os.mkdir(path)\n path += 'klip.db'\n\n if readonly:\n path += '?mode = ro'\n\n PDEBUG('DB_PATH: %s', path)\n return path",
"def test_url():\n return TEST_DATABASE_URL",
"def test_load_database_from_path(tmp_path):\n path = tmp_path / \"test.db\"\n database = load_database(path_or_database=path, fast_logging=False)\n assert isinstance(database, DataBase)\n assert database.path is not None\n assert database.fast_logging is False",
"def database():\n return conf().database",
"def database_file(file):\r\n fpath = path.join('databases', '{0}'.format(file))\r\n db_path = path.join(mod_path, fpath)\r\n return db_path",
"def test_db(app):\n assert app.config['DATABASE'] == 'sqlite:///:memory:'",
"def test_data_directory(test_directory):\n return os.path.join(test_directory, \"test_data\")",
"def dburl(\n tmp_path_factory: pytest.TempPathFactory,\n person_data: pandas.DataFrame,\n student_data: pandas.DataFrame,\n school_data: pandas.DataFrame,\n ) -> str:\n path = tmp_path_factory.mktemp('alchemy') / 'test.db'\n url = f'sqlite:///{path.absolute()}'\n connection = sqlalchemy.create_engine(url)\n person_data.to_sql('person', connection, index=False)\n student_data.to_sql('student', connection, index=False)\n school_data.to_sql('school', connection, index=False)\n return url",
"def __get_location(self) -> str:\n\t\treturn os.getenv('SQLITE_DRIVER_LOCATION', 'db.sqlite')",
"def get_test_path():\n path, name = os.path.split(__file__)\n return os.path.join(path,\"..\", 'test-data')",
"def test_410_000_non_existant_db(self):\n with TDC() as temp_dir:\n file = Path(temp_dir) / 'database.db'\n self.assertFalse(file.exists(),'Database file exists pre test')\n eng = Engine(file)\n con = eng.connect()\n self.assertTrue(file.exists(), 'Database file does not exists post test')",
"def test_datadir(self):\n self.chck_triple('datadir')"
] |
[
"0.83466035",
"0.7774428",
"0.7629329",
"0.761447",
"0.75845534",
"0.7301873",
"0.7227108",
"0.7013593",
"0.69737864",
"0.69364893",
"0.6922749",
"0.6880376",
"0.6847117",
"0.68102425",
"0.67964154",
"0.667084",
"0.66114485",
"0.65440637",
"0.65155536",
"0.6482962",
"0.64818877",
"0.6469846",
"0.6440745",
"0.6430148",
"0.64050436",
"0.6403923",
"0.63997364",
"0.6383168",
"0.637056",
"0.636461"
] |
0.87227327
|
0
|
Path to the Java CVE record.
|
def java_record_path():
return os.path.join(_here, 'fixtures/records/java-2018-10237.yaml')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_eve_path():\n return '{}\\\\CCP\\\\EVE'.format(get_appdata())",
"def python_record_path():\n return os.path.join(_here, 'fixtures/records/python-2016-10516.yaml')",
"def filenameAsPath(self, app):\n return app.recordingsPath.child(self.filename).path",
"def _filepath(self, filename):\n return os.path.join(self.root, self.version, filename)",
"def get_file_path(obj):\n return PATH + obj.strftime('%d-%B-%Y-comments.txt')",
"def FilePath(self) -> str:",
"def getProgramFile(self) -> java.io.File:\n ...",
"def path(self):\n return self.file_path()",
"def filepath(self):\n return self._filepath.path",
"def file_path(self):\n return self._obs_file()",
"def filepath(self):\n return self.file.path",
"def case_path(self, **case_kws):\n\n return self.data_dir",
"def get_vrc_processed_manifest_path(self) -> Path:\n return self.flow_data_paths.vrc_processed_manifest_path",
"def path(self) -> str:\n return self.src + \"/\"",
"def video_path(self):\n return self.file_path",
"def video_path(self):\n return self.file_path",
"def filepath(self):\n return self.filepath_",
"def path(self):\n return self._data_file",
"def file_path(self) -> global___Expression:",
"def dump_file_path(self) -> str:\n return pulumi.get(self, \"dump_file_path\")",
"def _get_filepath(self) -> str:\n return os.path.join(\n os.sep.join(\n [\n self.period.value,\n 'activities',\n f'activities_{self._dt_string}.json'\n ]\n )\n )",
"def path(self) -> str:\n return pulumi.get(self, \"path\")",
"def path(self) -> str:\n return pulumi.get(self, \"path\")",
"def get_own_cert_path(self):\n# _log.debug(\"get_own_cert_path: node_name={}\".format(self.node_name))\n cert_dir = os.path.join(self.runtime_dir, \"mine\")\n return os.path.join(cert_dir, self.node_id+\".pem\")",
"def filepath(self):\n return self._filepath",
"def filepath(self):\n return self._filepath",
"def path(self):\n return os.path.join(FLOWJS_PATH, self.filename)",
"def file_path(self):\n return self.lib.file_path",
"def track_path(self, filename):\n return os.path.join(os.path.dirname(__file__), 'testdata', filename)",
"def track_path(self, filename):\n return os.path.join(os.path.dirname(__file__), 'testdata', filename)"
] |
[
"0.62098986",
"0.5912817",
"0.5730348",
"0.55741614",
"0.54508793",
"0.5395311",
"0.5369696",
"0.5326624",
"0.5291892",
"0.52387875",
"0.5238363",
"0.51714534",
"0.5161415",
"0.5160295",
"0.5158992",
"0.5158992",
"0.51548153",
"0.51473683",
"0.5144279",
"0.51361686",
"0.51209354",
"0.51204646",
"0.51204646",
"0.51141",
"0.51131046",
"0.51131046",
"0.5110861",
"0.5106614",
"0.5072592",
"0.5072592"
] |
0.67759377
|
0
|
Path to the Python CVE record.
|
def python_record_path():
return os.path.join(_here, 'fixtures/records/python-2016-10516.yaml')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def java_record_path():\n return os.path.join(_here, 'fixtures/records/java-2018-10237.yaml')",
"def get_eve_path():\n return '{}\\\\CCP\\\\EVE'.format(get_appdata())",
"def filenameAsPath(self, app):\n return app.recordingsPath.child(self.filename).path",
"def _filepath(self, filename):\n return os.path.join(self.root, self.version, filename)",
"def get_file_path(obj):\n return PATH + obj.strftime('%d-%B-%Y-comments.txt')",
"def FilePath(self) -> str:",
"def invalid_record_path():\n return os.path.join(_here, 'fixtures/records/invalid.yaml')",
"def file_path(self):\n return self._obs_file()",
"def filepath(self):\n return self._filepath.path",
"def track_path(self, filename):\n return os.path.join(os.path.dirname(__file__), 'testdata', filename)",
"def track_path(self, filename):\n return os.path.join(os.path.dirname(__file__), 'testdata', filename)",
"def track_path(self, filename):\n return os.path.join(os.path.dirname(__file__), 'testdata', filename)",
"def db_file():\n return abspath('vmchecker.db')",
"def path(self):\n return self._data_file",
"def file_path(self):\n return self.lib.file_path",
"def unparseable_record_path():\n return os.path.join(_here, 'fixtures/records/unparseable.yaml')",
"def aovsPath(self):\n\t\treturn fl.File( self._path + '/aovs.data' )",
"def _generate_vcl_file(stackname, content, key, extension='vcl'):\n with _open(stackname, key, extension=extension, mode='w') as fp:\n fp.write(str(content))\n return '${file(\"%s\")}' % os.path.basename(fp.name)",
"def file_path(self) -> global___Expression:",
"def video_path(self):\n return self.file_path",
"def video_path(self):\n return self.file_path",
"def path(self):\n return self.file_path()",
"def BrocCVSPath(self):\n return self._module.broc_cvspath",
"def get_own_cert_path(self):\n# _log.debug(\"get_own_cert_path: node_name={}\".format(self.node_name))\n cert_dir = os.path.join(self.runtime_dir, \"mine\")\n return os.path.join(cert_dir, self.node_id+\".pem\")",
"def filepath(self):\n return self.file.path",
"def filepath(self):\n return self.filepath_",
"def get_vernissagecmd_path():\n return vernissagecmd_path",
"def get_vrc_processed_manifest_path(self) -> Path:\n return self.flow_data_paths.vrc_processed_manifest_path",
"def dump_file_path(self) -> str:\n return pulumi.get(self, \"dump_file_path\")",
"def get_record_fullpath(\n kapture_dirpath: str = '',\n record_filename: Optional[str] = None) -> str:\n feature_filename = record_filename or ''\n return path_secure(path.join(kapture_dirpath, RECORD_DATA_DIRNAME, feature_filename))"
] |
[
"0.64519113",
"0.6400784",
"0.5669879",
"0.56179076",
"0.55499196",
"0.5471142",
"0.5426507",
"0.5385525",
"0.53746593",
"0.5369389",
"0.5369389",
"0.5369389",
"0.536584",
"0.53491783",
"0.5332675",
"0.5330011",
"0.5306925",
"0.53048134",
"0.5290578",
"0.5289849",
"0.5289849",
"0.5287266",
"0.5247484",
"0.524205",
"0.52203894",
"0.5218567",
"0.5215746",
"0.5194613",
"0.51944596",
"0.5181845"
] |
0.6816917
|
0
|
Path to the invalid CVE record.
|
def invalid_record_path():
return os.path.join(_here, 'fixtures/records/invalid.yaml')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def errpath(self):\n return None",
"def get_eve_path():\n return '{}\\\\CCP\\\\EVE'.format(get_appdata())",
"def path_emergency(self) -> Path:\n return self.path_supervisor / EMERGENCY_DATA",
"def keyfunc(invalid: utils.TraceTuple) -> Path:\n traces_path = invalid.traces_path\n assert isinstance(traces_path, Path)\n return traces_path",
"def reason(self) -> InvalidS3Path.Reason:\n return self._reason",
"def get_error_file(self):\n pass",
"def error_file(self) -> 'outputs.FileMetadataResponse':\n return pulumi.get(self, \"error_file\")",
"def unparseable_record_path():\n return os.path.join(_here, 'fixtures/records/unparseable.yaml')",
"def incorrect_card_path() -> str:\n\n with path(\"music_flash_cards.cards\", \"Incorrect.png\") as card_path:\n return str(card_path)",
"def path_extern_emergency(self) -> PurePath:\n return self.path_extern_supervisor / EMERGENCY_DATA",
"def instrument_fail(self, req, where):\n\n if where in req[\"file_details\"][\"backend_filename\"]:\n raise Exception(\"Instrumented Failure: %s\" % where)",
"def error_file(self) -> pulumi.Output['outputs.FileMetadataResponse']:\n return pulumi.get(self, \"error_file\")",
"def yaml_file_must_exist(cls, v: pathlib.Path):\n if not v.exists():\n raise ValueError(f\"Path object not found in filesystem : {v}\")\n return v",
"def validate_path_exists(v: pathlib.Path, dt: pathlib.Path) -> pathlib.Path:\n p = pathlib.Path(dt).parent/v\n if not p.exists():\n raise ValueError(f\"Path object does not exist on disk : {p}\")\n return p",
"def errorpath():\n stdoutfile=pdbid()+\".error.log\"\n stdout = os.path.join(output_dir(), stdoutfile)\n\n return stdout",
"def errfile(self):\n\n return f\"{self.name}.err.out\"",
"async def client_path_error() -> web.Response:\n html = get_client_file_error_template().render()\n return web.Response(body=html, content_type=\"text/html\")",
"def test_vmcp_file_not_found(self):\r\n res = self.app.get('api/vmcp', follow_redirects=True)\r\n err = json.loads(res.data)\r\n assert res.status_code == 501, err\r\n assert err['status_code'] == 501, err\r\n assert err['status'] == \"failed\", err\r\n assert err['target'] == \"vmcp\", err\r\n assert err['action'] == \"GET\", err",
"def get_vernissagecmd_path():\n return vernissagecmd_path",
"def stderr_path(self):\n return self.log_path\n # return self.path / 'stderr.txt'",
"def failure_template_path(self) -> ConfigNodePropertyString:\n return self._failure_template_path",
"def errpath(filename, tree, elm):\n if elm is None:\n return \"\"\n path = \"\"\n if 'class' in elm.attrib:\n path += elm.attrib['class']\n oid = elm.attrib.get('id')\n if oid is not None:\n oid = oid.encode('ascii','ignore').decode('ascii')\n path = \"//\" + path + \"[@id='%s']\" % oid\n else:\n if lxml:\n elm = elm.getparent()\n while elm is not None:\n step = step_elm(elm)\n path = step + path\n elm = elm.getparent()\n else:\n path = find_elm(tree.getroot(), elm)[:-1]\n path = filename + ':' + path\n return path",
"def verify_non_existing_path(self) -> None:\n path = \"/some/non/existing/path\"\n with self.assertRaises(NotFoundException):\n verify_file_path(path)",
"def check_path(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"check_path\")",
"def _get_vul(self):\n if (self.cve):\n return u\"CVE-%s\" % self.cve \n else:\n return u\"%s\" % self.cert_id",
"def path_validate(path):\n # functionality to be added later\n return path",
"def validation_path(self):\n return self._validation_path",
"def file_corruption(self):\n return self._file_corrupt",
"def recovery_file_name():\n return \".failed_download_batches.json\"",
"def get_vrc_processed_manifest_path(self) -> Path:\n return self.flow_data_paths.vrc_processed_manifest_path"
] |
[
"0.66394675",
"0.5727181",
"0.5622843",
"0.5510485",
"0.54649556",
"0.545422",
"0.54506046",
"0.5437321",
"0.5424419",
"0.53868675",
"0.53067327",
"0.5286599",
"0.5256299",
"0.524939",
"0.51857644",
"0.5179442",
"0.5176023",
"0.5119216",
"0.50469637",
"0.5045826",
"0.5044293",
"0.5012337",
"0.50088036",
"0.4998419",
"0.49779958",
"0.49704224",
"0.49694222",
"0.49503782",
"0.49480292",
"0.49456206"
] |
0.68413717
|
0
|
Path to the unparseable CVE record.
|
def unparseable_record_path():
return os.path.join(_here, 'fixtures/records/unparseable.yaml')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_eve_path():\n return '{}\\\\CCP\\\\EVE'.format(get_appdata())",
"def invalid_record_path():\n return os.path.join(_here, 'fixtures/records/invalid.yaml')",
"def get_vernissagecmd_path():\n return vernissagecmd_path",
"def filenameAsPath(self, app):\n return app.recordingsPath.child(self.filename).path",
"def python_record_path():\n return os.path.join(_here, 'fixtures/records/python-2016-10516.yaml')",
"def filepath(self):\n return self._filepath.path",
"def video_path(self):\n return self.file_path",
"def video_path(self):\n return self.file_path",
"def video_path(self):\n return os.path.splitext(self.file_path)[0]",
"def _filepath(self, filename):\n return os.path.join(self.root, self.version, filename)",
"def path(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.str]:",
"def file_path(self) -> global___Expression:",
"def field_path(self) -> str:\n return pulumi.get(self, \"field_path\")",
"def field_path(self) -> str:\n return pulumi.get(self, \"field_path\")",
"def field_path(self) -> str:\n return pulumi.get(self, \"field_path\")",
"def field_path(self) -> str:\n return pulumi.get(self, \"field_path\")",
"def field_path(self) -> str:\n return pulumi.get(self, \"field_path\")",
"def field_path(self) -> str:\n return pulumi.get(self, \"field_path\")",
"def field_path(self) -> str:\n return pulumi.get(self, \"field_path\")",
"def field_path(self) -> str:\n return pulumi.get(self, \"field_path\")",
"def field_path(self) -> str:\n return pulumi.get(self, \"field_path\")",
"def field_path(self) -> str:\n return pulumi.get(self, \"field_path\")",
"def field_path(self) -> str:\n return pulumi.get(self, \"field_path\")",
"def field_path(self) -> str:\n return pulumi.get(self, \"field_path\")",
"def field_path(self) -> str:\n return pulumi.get(self, \"field_path\")",
"def field_path(self) -> str:\n return pulumi.get(self, \"field_path\")",
"def field_path(self) -> str:\n return pulumi.get(self, \"field_path\")",
"def field_path(self) -> str:\n return pulumi.get(self, \"field_path\")",
"def field_path(self) -> str:\n return pulumi.get(self, \"field_path\")",
"def field_path(self) -> str:\n return pulumi.get(self, \"field_path\")"
] |
[
"0.6018208",
"0.5807608",
"0.55535054",
"0.55206513",
"0.54571867",
"0.5447501",
"0.54112256",
"0.54112256",
"0.5389032",
"0.5367391",
"0.52851504",
"0.5257984",
"0.52340305",
"0.52340305",
"0.52340305",
"0.52340305",
"0.52340305",
"0.52340305",
"0.52340305",
"0.52340305",
"0.52340305",
"0.52340305",
"0.52340305",
"0.52340305",
"0.52340305",
"0.52340305",
"0.52340305",
"0.52340305",
"0.52340305",
"0.52340305"
] |
0.6446508
|
0
|
GIT URL with YAML data.
|
def git_url():
return "https://github.com/tisnik/victimsdb-sample-data.git"
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_api_url(self):\n\n url = 'https://api.{}/repos/{}/{}/git/'.format(HOST_GITHUB, \\\n self.repo, self.product)\n return url",
"def format_url(self, data):\n git_url = urlparse(data[\"git_url\"])\n\n url = \"oauth2:{0}@{1}\".format(data[\"token\"], git_url.netloc)\n return git_url._replace(netloc=url).geturl()",
"def set_git_url(context, url):\n context.url = url",
"def build_url(cls, config, namespace, name):\n return \"hxxp://mock.repo.url/\" + namespace + \"/\" + name + \".git\"",
"def git_url(fp: str) -> str:\n return f\"https://github.com/pantsbuild/pants/blob/release_{PANTS_SEMVER}/{fp}\"",
"def getProjectURL():",
"def build_gitlab_homepage_url(purl):\n purl_data = PackageURL.from_string(purl)\n\n namespace = purl_data.namespace\n name = purl_data.name\n version = purl_data.version\n subpath = purl_data.subpath\n\n if not (name and namespace):\n return\n\n url = \"https://gitlab.com/{namespace}/{name}\".format(namespace=namespace, name=name)\n\n if version:\n url = \"{url}/-/tree/{version}\".format(url=url, version=version)\n\n if subpath:\n url = \"{url}/{subpath}\".format(url=url, subpath=subpath)\n\n return url",
"def url(self):\n\n return maybe_string(C.git_remote_url(self._remote))",
"def create_link(repository, project_name):\n beginning_url = \"https://api.github.com/repos/\"\n separator_url = \"/\"\n end_url = \"/commits\"\n\n base_url = beginning_url+repository+separator_url+project_name+end_url\n return base_url",
"def get_url_tag_commit(self, git_sha):\n\n url = 'https://{}/{}/{}/commit/{}'.format(\n HOST_GITHUB,\n self.repo,\n self.product,\n git_sha\n )\n return url",
"def get_url(self):\n return (\n \"https://raw.githubusercontent.com\"\n \"/benoitbryon/django-downloadview\"\n \"/b7f660c5e3f37d918b106b02c5af7a887acc0111\"\n \"/demo/demoproject/download/fixtures/hello-world.txt\"\n )",
"async def _api_url(self) -> URL:\n return await self._gitlab_api_url(\"\")",
"def get_from_git(project, obj, params={}, verbose=0):\n\n url = \"%s%s/raw/%s\" % (GIT_URL, project, obj)\n return load_yaml(requester(url, params=params,\n headers={'Accept': 'application/json'},\n verbose=verbose).text)",
"def github_url(self):\n return self.github.replace('.git', '')",
"def test_giturl_missing(self):\r\n response = self.client.get(self.test_url)\r\n self.assertEqual(200, response.status_code)\r\n self.assertIn(\r\n ('giturl must be defined in your '\r\n 'course settings before you can export to git.'),\r\n response.content\r\n )\r\n\r\n response = self.client.get('{}?action=push'.format(self.test_url))\r\n self.assertEqual(200, response.status_code)\r\n self.assertIn(\r\n ('giturl must be defined in your '\r\n 'course settings before you can export to git.'),\r\n response.content\r\n )",
"def git():\n pass",
"def _transform_github_url(self):\n self.url = (\n self.url\n .replace('/blob/', '/')\n .replace(self.GITHUB_NETLOC, self.GITHUB_RAW_NETLOC)\n )",
"def getBuildbotURL():",
"def repo_url(self):\n return self._repo_url",
"def __init__(self, url='https://gitlab.com'):\n self._url = url\n self._gitlab = None",
"def repo_link(repo):\n return \"https://github.com/\" + repo",
"def git_remote_url(self):\n return self._git_remote_url",
"def config_url(config):\n if 'url' not in config:\n raise Exception('The config file does not contain \"url\"')\n return config['url']",
"def _get_base_url(self):\n template = config.baseurl_template\n # get distro name and arch\n base_url = template.format(\n host=config.gitbuilder_host,\n proj=self.project,\n pkg_type=self.pkg_type,\n arch=self.arch,\n dist=self.distro,\n flavor=self.flavor,\n uri=self.uri_reference,\n )\n return base_url",
"def get_config_from_remote_git(git_url):\n raise ConfigError('%s is an URL to a git repo but this functionality is '\n 'currently unsupported' % (git_url))",
"def github_link(self):\n if self.test_type == TestType.commit:\n test_type = 'commit'\n test_id = self.commit\n else:\n test_type = 'pull'\n test_id = self.pr_nr\n\n return f\"{self.fork.github_url}/{test_type}/{test_id}\"",
"def test_fix_repo_url():\n repo_url_git = 'git://github.com/Tinche/bower-cache'\n repo_url_https = 'https://github.com/Tinche/bower-cache'\n fixed_url_https = 'https://:@github.com/Tinche/bower-cache'\n assert repo_url_git == gitwrapper._fix_repo_url(repo_url_git)\n assert fixed_url_https == gitwrapper._fix_repo_url(repo_url_https)",
"def test_repo_field_values_git(self):\n fields = self._get_repository_fields('Git', fields={\n 'beanstalk_account_domain': 'mydomain',\n 'beanstalk_repo_name': 'myrepo',\n })\n self.assertEqual(\n fields['path'],\n '[email protected]:/mydomain/myrepo.git')\n self.assertEqual(\n fields['mirror_path'],\n 'https://mydomain.git.beanstalkapp.com/myrepo.git')",
"def get_repo_url(repo, access_protocol, github_login):\n prop = {\n 'https': repo.clone_url,\n 'ssh': repo.ssh_url\n }[access_protocol]\n if access_protocol == 'https' and github_login:\n # we were provided explicit github login. For ssh access it is\n # impossible to specify different login within ssh RI, but it is\n # possible to do so for https logins\n url = URL(prop)\n assert url.scheme in ('http', 'https')\n url.username = github_login\n prop = url.as_str()\n return prop",
"def build_github_homepage_url(purl):\n purl_data = PackageURL.from_string(purl)\n\n namespace = purl_data.namespace\n name = purl_data.name\n version = purl_data.version\n subpath = purl_data.subpath\n\n if not (name and namespace):\n return\n\n url = \"https://github.com/{namespace}/{name}\".format(namespace=namespace, name=name)\n\n if version:\n url = \"{url}/tree/{version}\".format(url=url, version=version)\n\n if subpath:\n url = \"{url}/{subpath}\".format(url=url, subpath=subpath)\n\n return url"
] |
[
"0.6493995",
"0.6401901",
"0.63599044",
"0.6350393",
"0.623588",
"0.6124202",
"0.5908427",
"0.5831547",
"0.5816461",
"0.57776284",
"0.5769506",
"0.57230633",
"0.5700079",
"0.56966275",
"0.5680488",
"0.5672194",
"0.5633531",
"0.5631661",
"0.56257445",
"0.56098515",
"0.5595114",
"0.5590184",
"0.5588248",
"0.55766106",
"0.55532455",
"0.5523891",
"0.55182683",
"0.5496051",
"0.5495844",
"0.5479309"
] |
0.7103082
|
0
|
Mock hue entry setup.
|
def hue_setup_fixture():
with patch("homeassistant.components.hue.async_setup_entry", return_value=True):
yield
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_colormap_single_hue():\n with TestingCanvas(size=size, bgcolor='w') as c:\n idata = np.linspace(255, 0, size[0]*size[1]).astype(np.ubyte)\n data = idata.reshape((size[0], size[1]))\n image = Image(cmap=get_colormap('single_hue', 255),\n clim='auto', parent=c.scene)\n image.set_data(data)\n assert_image_approved(c.render(), \"visuals/colormap_hue.png\")",
"async def test_hue_bridge_setup(hass):\n accessories = await setup_accessories_from_file(hass, \"hue_bridge.json\")\n config_entry, pairing = await setup_test_accessories(hass, accessories)\n\n entity_registry = await hass.helpers.entity_registry.async_get_registry()\n\n # Check that the battery is correctly found and set up\n battery_id = \"sensor.hue_dimmer_switch_battery\"\n battery = entity_registry.async_get(battery_id)\n assert battery.unique_id == \"homekit-6623462389072572-644245094400\"\n\n battery_helper = Helper(\n hass, \"sensor.hue_dimmer_switch_battery\", pairing, accessories[0], config_entry\n )\n battery_state = await battery_helper.poll_and_get_state()\n assert battery_state.attributes[\"friendly_name\"] == \"Hue dimmer switch Battery\"\n assert battery_state.attributes[\"icon\"] == \"mdi:battery\"\n assert battery_state.state == \"100\"\n\n device_registry = await hass.helpers.device_registry.async_get_registry()\n\n device = device_registry.async_get(battery.device_id)\n assert device.manufacturer == \"Philips\"\n assert device.name == \"Hue dimmer switch\"\n assert device.model == \"RWL021\"\n assert device.sw_version == \"45.1.17846\"",
"async def init_integration(\n hass: HomeAssistant, mock_config_entry: MockConfigEntry, mock_tailscale: MagicMock\n) -> MockConfigEntry:\n mock_config_entry.add_to_hass(hass)\n\n await hass.config_entries.async_setup(mock_config_entry.entry_id)\n await hass.async_block_till_done()\n\n return mock_config_entry",
"async def test_setup_entry(\n hass: HomeAssistant, client: MagicMock, integration: MockConfigEntry\n) -> None:\n assert integration.state == ConfigEntryState.LOADED\n await hass.config_entries.async_unload(integration.entry_id)\n assert integration.state == ConfigEntryState.NOT_LOADED",
"def setUp(self):\n self.hass = get_test_home_assistant()",
"def setUp(self):\n self.hass = get_test_home_assistant()",
"async def setup_mocked_integration(hass: HomeAssistant) -> MockConfigEntry:\n\n mock_config_entry = MockConfigEntry(**FIXTURE_CONFIG_ENTRY)\n mock_config_entry.add_to_hass(hass)\n\n assert await hass.config_entries.async_setup(mock_config_entry.entry_id)\n await hass.async_block_till_done()\n\n return mock_config_entry",
"async def test_async_setup_entry(hass):\n await init_integration(hass)\n\n state = hass.states.get(\"air_quality.home\")\n assert state is not None\n assert state.state != STATE_UNAVAILABLE\n assert state.state == \"14\"",
"def setUp(self):\n self.hass = get_test_home_assistant()\n controller_mock = mock.MagicMock()\n dev_dict = {\"address\": \"a1\", \"name\": \"fake_light\", \"brightness_levels\": 32}\n self.light = mochad.MochadLight(self.hass, controller_mock, dev_dict)",
"def setUp(self):\n self.hass = get_test_home_assistant()\n controller_mock = mock.MagicMock()\n dev_dict = {\"address\": \"a1\", \"name\": \"fake_light\", \"brightness_levels\": 256}\n self.light = mochad.MochadLight(self.hass, controller_mock, dev_dict)",
"def setUp(self):\n self.hass = get_test_home_assistant()\n controller_mock = mock.MagicMock()\n dev_dict = {\"address\": \"a1\", \"name\": \"fake_light\", \"brightness_levels\": 64}\n self.light = mochad.MochadLight(self.hass, controller_mock, dev_dict)",
"def create_entry(hass: HomeAssistant) -> MockConfigEntry:\n entry = MockConfigEntry(\n domain=DOMAIN,\n data={\n CONF_URL: URL,\n CONF_API_KEY: API_KEY,\n CONF_VERIFY_SSL: False,\n },\n )\n\n entry.add_to_hass(hass)\n return entry",
"def mock_config_entry() -> MockConfigEntry:\n return MockConfigEntry(\n title=\"homeassistant.github\",\n domain=DOMAIN,\n data={CONF_TAILNET: \"homeassistant.github\", CONF_API_KEY: \"tskey-MOCK\"},\n unique_id=\"homeassistant.github\",\n )",
"async def test_unload_entry(_, hass: HomeAssistant) -> None:\n mock_entry_data = {\n \"device\": \"/dev/USB0\",\n \"model\": \"LUGCUH50\",\n \"device_number\": \"12345\",\n }\n mock_entry = MockConfigEntry(\n domain=\"landisgyr_heat_meter\",\n title=\"LUGCUH50\",\n entry_id=\"987654321\",\n data=mock_entry_data,\n )\n mock_entry.add_to_hass(hass)\n\n assert await hass.config_entries.async_setup(mock_entry.entry_id)\n await hass.async_block_till_done()\n assert \"landisgyr_heat_meter\" in hass.config.components\n\n assert await hass.config_entries.async_remove(mock_entry.entry_id)",
"def setup_method(self):\n self.hass = get_test_home_assistant()",
"async def test_creating_entry_removes_entries_for_same_host_or_bridge(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n create_mock_api_discovery(aioclient_mock, [(\"2.2.2.2\", \"id-1234\")])\n orig_entry = MockConfigEntry(\n domain=\"hue\",\n data={\"host\": \"0.0.0.0\", \"api_key\": \"123456789\"},\n unique_id=\"id-1234\",\n )\n orig_entry.add_to_hass(hass)\n\n MockConfigEntry(\n domain=\"hue\",\n data={\"host\": \"1.2.3.4\", \"api_key\": \"123456789\"},\n unique_id=\"id-5678\",\n ).add_to_hass(hass)\n\n assert len(hass.config_entries.async_entries(\"hue\")) == 2\n\n result = await hass.config_entries.flow.async_init(\n \"hue\",\n data={\"host\": \"2.2.2.2\"},\n context={\"source\": config_entries.SOURCE_IMPORT},\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"link\"\n\n with patch(\n \"homeassistant.components.hue.config_flow.create_app_key\",\n return_value=\"123456789\",\n ), patch(\"homeassistant.components.hue.async_unload_entry\", return_value=True):\n result = await hass.config_entries.flow.async_configure(result[\"flow_id\"], {})\n\n assert result[\"type\"] == \"create_entry\"\n assert result[\"title\"] == \"Hue Bridge id-1234\"\n assert result[\"data\"] == {\n \"host\": \"2.2.2.2\",\n \"api_key\": \"123456789\",\n \"api_version\": 1,\n }\n entries = hass.config_entries.async_entries(\"hue\")\n assert len(entries) == 2\n new_entry = entries[-1]\n assert orig_entry.entry_id != new_entry.entry_id\n assert new_entry.unique_id == \"id-1234\"",
"def test_set_up(self):\n # set_up has been called for the test suite\n # (see runtests) so there is not much that\n # can be tested\n with patch('react.utils.shortcuts.set_up', autospec=True) as s:\n utils.set_up()\n s.assert_called_once_with()",
"async def test_setup_entry(hass):\n await setup_axis_integration(hass)\n assert len(hass.data[AXIS_DOMAIN]) == 1\n assert format_mac(MAC) in hass.data[AXIS_DOMAIN]",
"async def setup_bridge(hass, mock_bridge, hostname=None):\n if hostname is None:\n hostname = 'mock-host'\n hass.config.components.add(hue.DOMAIN)\n hass.data[hue.DOMAIN] = {hostname: mock_bridge}\n config_entry = config_entries.ConfigEntry(1, hue.DOMAIN, 'Mock Title', {\n 'host': hostname\n }, 'test', config_entries.CONN_CLASS_LOCAL_POLL)\n await hass.config_entries.async_forward_entry_setup(\n config_entry, 'binary_sensor')\n await hass.config_entries.async_forward_entry_setup(\n config_entry, 'sensor')\n # and make sure it completes before going further\n await hass.async_block_till_done()",
"async def test_migrate_entry(_, hass: HomeAssistant) -> None:\n\n mock_entry_data = {\n \"device\": \"/dev/USB0\",\n \"model\": \"LUGCUH50\",\n \"device_number\": \"12345\",\n }\n mock_entry = MockConfigEntry(\n domain=\"landisgyr_heat_meter\",\n title=\"LUGCUH50\",\n entry_id=\"987654321\",\n data=mock_entry_data,\n )\n assert mock_entry.data == mock_entry_data\n assert mock_entry.version == 1\n\n mock_entry.add_to_hass(hass)\n\n # Create entity entry to migrate to new unique ID\n registry = er.async_get(hass)\n registry.async_get_or_create(\n SENSOR_DOMAIN,\n LANDISGYR_HEAT_METER_DOMAIN,\n \"landisgyr_heat_meter_987654321_measuring_range_m3ph\",\n suggested_object_id=\"heat_meter_measuring_range\",\n config_entry=mock_entry,\n )\n\n assert await hass.config_entries.async_setup(mock_entry.entry_id)\n await hass.async_block_till_done()\n assert \"landisgyr_heat_meter\" in hass.config.components\n\n # Check if entity unique id is migrated successfully\n assert mock_entry.version == 2\n entity = registry.async_get(\"sensor.heat_meter_measuring_range\")\n assert entity.unique_id == \"12345_measuring_range_m3ph\"",
"async def test_configuring_sonos_creates_entry(hass: HomeAssistant) -> None:\n with patch(\n \"homeassistant.components.sonos.async_setup_entry\",\n return_value=True,\n ) as mock_setup:\n await async_setup_component(\n hass,\n sonos.DOMAIN,\n {\"sonos\": {\"media_player\": {\"interface_addr\": \"127.0.0.1\"}}},\n )\n await hass.async_block_till_done()\n\n assert len(mock_setup.mock_calls) == 1",
"def setUp(self):\n self.hass = get_test_home_assistant()\n self.key = \"foo\"\n self.lat = self.hass.config.latitude = 37.8267\n self.lon = self.hass.config.longitude = -122.423\n self.entities = []\n self.addCleanup(self.tear_down_cleanup)",
"async def async_setup_entry(\n hass: HomeAssistant,\n config_entry: ConfigEntry,\n async_add_entities: AddEntitiesCallback,\n) -> None:\n\n @callback\n def async_add_cover(coordinator: SwitcherDataUpdateCoordinator) -> None:\n \"\"\"Add cover from Switcher device.\"\"\"\n if coordinator.data.device_type.category == DeviceCategory.SHUTTER:\n async_add_entities([SwitcherCoverEntity(coordinator)])\n\n config_entry.async_on_unload(\n async_dispatcher_connect(hass, SIGNAL_DEVICE_ADD, async_add_cover)\n )",
"async def test_entry_diagnostics(\n hass: HomeAssistant,\n config_entry,\n hass_client: ClientSessionGenerator,\n setup_config_entry,\n) -> None:\n assert await get_diagnostics_for_config_entry(hass, hass_client, config_entry) == {\n \"entry\": {\n \"entry_id\": config_entry.entry_id,\n \"version\": 2,\n \"domain\": \"ridwell\",\n \"title\": REDACTED,\n \"data\": {\"username\": REDACTED, \"password\": REDACTED},\n \"options\": {},\n \"pref_disable_new_entities\": False,\n \"pref_disable_polling\": False,\n \"source\": \"user\",\n \"unique_id\": REDACTED,\n \"disabled_by\": None,\n },\n \"data\": [\n {\n \"_async_request\": None,\n \"event_id\": \"event_123\",\n \"pickup_date\": {\n \"__type\": \"<class 'datetime.date'>\",\n \"isoformat\": \"2022-01-24\",\n },\n \"pickups\": [\n {\n \"name\": \"Plastic Film\",\n \"offer_id\": \"offer_123\",\n \"priority\": 1,\n \"product_id\": \"product_123\",\n \"quantity\": 1,\n \"category\": {\n \"__type\": \"<enum 'PickupCategory'>\",\n \"repr\": \"<PickupCategory.STANDARD: 'standard'>\",\n },\n }\n ],\n \"state\": {\n \"__type\": \"<enum 'EventState'>\",\n \"repr\": \"<EventState.INITIALIZED: 'initialized'>\",\n },\n }\n ],\n }",
"async def test_manual_flow_works(hass: HomeAssistant) -> None:\n disc_bridge = get_discovered_bridge(bridge_id=\"id-1234\", host=\"2.2.2.2\")\n\n MockConfigEntry(\n domain=\"hue\", source=config_entries.SOURCE_IGNORE, unique_id=\"bla\"\n ).add_to_hass(hass)\n\n with patch(\n \"homeassistant.components.hue.config_flow.discover_nupnp\",\n return_value=[disc_bridge],\n ):\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"init\"\n\n result = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={\"id\": \"manual\"}\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"manual\"\n\n with patch.object(config_flow, \"discover_bridge\", return_value=disc_bridge):\n result = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"], {\"host\": \"2.2.2.2\"}\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"link\"\n\n with patch.object(config_flow, \"create_app_key\", return_value=\"123456789\"), patch(\n \"homeassistant.components.hue.async_unload_entry\", return_value=True\n ):\n result = await hass.config_entries.flow.async_configure(result[\"flow_id\"], {})\n\n assert result[\"type\"] == \"create_entry\"\n assert result[\"title\"] == f\"Hue Bridge {disc_bridge.id}\"\n assert result[\"data\"] == {\n \"host\": \"2.2.2.2\",\n \"api_key\": \"123456789\",\n \"api_version\": 1,\n }\n entries = hass.config_entries.async_entries(\"hue\")\n assert len(entries) == 2\n entry = entries[-1]\n assert entry.unique_id == \"id-1234\"",
"def mock_setup_entry() -> Generator[AsyncMock, None, None]:\n with patch(\n \"homeassistant.components.tailscale.async_setup_entry\", return_value=True\n ) as mock_setup:\n yield mock_setup",
"async def test_color_light(\n hass: HomeAssistant, bulb: MagicMock, transition: float | None\n) -> None:\n already_migrated_config_entry = MockConfigEntry(\n domain=DOMAIN, data={}, unique_id=MAC_ADDRESS\n )\n already_migrated_config_entry.add_to_hass(hass)\n bulb.color_temp = None\n with _patch_discovery(device=bulb), _patch_single_discovery(device=bulb):\n await async_setup_component(hass, tplink.DOMAIN, {tplink.DOMAIN: {}})\n await hass.async_block_till_done()\n\n entity_id = \"light.my_bulb\"\n KASA_TRANSITION_VALUE = transition * 1_000 if transition is not None else None\n\n BASE_PAYLOAD = {ATTR_ENTITY_ID: entity_id}\n if transition:\n BASE_PAYLOAD[ATTR_TRANSITION] = transition\n\n state = hass.states.get(entity_id)\n assert state.state == \"on\"\n attributes = state.attributes\n assert attributes[ATTR_BRIGHTNESS] == 128\n assert attributes[ATTR_COLOR_MODE] == \"hs\"\n assert attributes[ATTR_SUPPORTED_COLOR_MODES] == [\"brightness\", \"color_temp\", \"hs\"]\n assert attributes[ATTR_MIN_MIREDS] == 111\n assert attributes[ATTR_MAX_MIREDS] == 250\n assert attributes[ATTR_HS_COLOR] == (10, 30)\n assert attributes[ATTR_RGB_COLOR] == (255, 191, 178)\n assert attributes[ATTR_XY_COLOR] == (0.42, 0.336)\n\n await hass.services.async_call(\n LIGHT_DOMAIN, \"turn_off\", BASE_PAYLOAD, blocking=True\n )\n bulb.turn_off.assert_called_once_with(transition=KASA_TRANSITION_VALUE)\n\n await hass.services.async_call(LIGHT_DOMAIN, \"turn_on\", BASE_PAYLOAD, blocking=True)\n bulb.turn_on.assert_called_once_with(transition=KASA_TRANSITION_VALUE)\n bulb.turn_on.reset_mock()\n\n await hass.services.async_call(\n LIGHT_DOMAIN,\n \"turn_on\",\n {**BASE_PAYLOAD, ATTR_BRIGHTNESS: 100},\n blocking=True,\n )\n bulb.set_brightness.assert_called_with(39, transition=KASA_TRANSITION_VALUE)\n bulb.set_brightness.reset_mock()\n\n await hass.services.async_call(\n LIGHT_DOMAIN,\n \"turn_on\",\n {**BASE_PAYLOAD, ATTR_COLOR_TEMP_KELVIN: 6666},\n blocking=True,\n )\n bulb.set_color_temp.assert_called_with(\n 6666, brightness=None, transition=KASA_TRANSITION_VALUE\n )\n bulb.set_color_temp.reset_mock()\n\n await hass.services.async_call(\n LIGHT_DOMAIN,\n \"turn_on\",\n {**BASE_PAYLOAD, ATTR_COLOR_TEMP_KELVIN: 6666},\n blocking=True,\n )\n bulb.set_color_temp.assert_called_with(\n 6666, brightness=None, transition=KASA_TRANSITION_VALUE\n )\n bulb.set_color_temp.reset_mock()\n\n await hass.services.async_call(\n LIGHT_DOMAIN,\n \"turn_on\",\n {**BASE_PAYLOAD, ATTR_HS_COLOR: (10, 30)},\n blocking=True,\n )\n bulb.set_hsv.assert_called_with(10, 30, None, transition=KASA_TRANSITION_VALUE)\n bulb.set_hsv.reset_mock()",
"async def init_integration(\n hass: HomeAssistant,\n *,\n data: dict = ENTRY_CONFIG,\n skip_entry_setup: bool = False,\n) -> MockConfigEntry:\n entry = MockConfigEntry(domain=DOMAIN, data=data)\n entry.add_to_hass(hass)\n\n if not skip_entry_setup:\n await hass.config_entries.async_setup(entry.entry_id)\n await hass.async_block_till_done()\n\n return entry",
"def mock_config_entry() -> MockConfigEntry:\n return MockConfigEntry(\n title=\"Home\",\n domain=DOMAIN,\n data={CONF_ZONE: \"zone.home\"},\n unique_id=\"zone.home\",\n )",
"async def test_setup(hass, setup_sensor):\n state = hass.states.get(\"sensor.ethereum\")\n assert state is not None\n\n assert state.name == \"Ethereum\"\n assert state.state == \"493.455\"\n assert state.attributes.get(\"symbol\") == \"ETH\"\n assert state.attributes.get(\"unit_of_measurement\") == \"EUR\""
] |
[
"0.6190617",
"0.59994584",
"0.5958978",
"0.59334356",
"0.585053",
"0.585053",
"0.5840002",
"0.57558155",
"0.57501155",
"0.5749036",
"0.57405436",
"0.57301694",
"0.57248044",
"0.56999147",
"0.5684357",
"0.5675351",
"0.56750894",
"0.5665969",
"0.5647237",
"0.56080663",
"0.55635846",
"0.55428016",
"0.55277133",
"0.5511808",
"0.54837525",
"0.5471993",
"0.5455128",
"0.5432463",
"0.541754",
"0.5413556"
] |
0.70570976
|
0
|
Return a mocked Discovered Bridge.
|
def get_discovered_bridge(bridge_id="aabbccddeeff", host="1.2.3.4", supports_v2=False):
return Mock(host=host, id=bridge_id, supports_v2=supports_v2)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def mock_bridge(hass):\n return create_mock_bridge()",
"def create_mock_bridge():\n bridge = Mock(\n available=True,\n allow_unreachable=False,\n allow_groups=False,\n api=Mock(),\n spec=hue.HueBridge\n )\n bridge.mock_requests = []\n # We're using a deque so we can schedule multiple responses\n # and also means that `popleft()` will blow up if we get more updates\n # than expected.\n bridge.mock_sensor_responses = deque()\n\n async def mock_request(method, path, **kwargs):\n kwargs['method'] = method\n kwargs['path'] = path\n bridge.mock_requests.append(kwargs)\n\n if path == 'sensors':\n return bridge.mock_sensor_responses.popleft()\n return None\n\n bridge.api.config.apiversion = '9.9.9'\n bridge.api.sensors = Sensors({}, mock_request)\n return bridge",
"def create_mock_api_discovery(aioclient_mock, bridges):\n aioclient_mock.get(\n URL_NUPNP,\n json=[{\"internalipaddress\": host, \"id\": id} for (host, id) in bridges],\n )\n for host, bridge_id in bridges:\n aioclient_mock.get(\n f\"http://{host}/api/config\",\n json={\"bridgeid\": bridge_id},\n )\n # mock v2 support if v2 found in id\n aioclient_mock.get(\n f\"https://{host}/clip/v2/resources\",\n status=403 if \"v2\" in bridge_id else 404,\n )",
"def connection():\n return _MockConnection()",
"async def test_flow_discovered_bridges(opp, aioclient_mock):\n aioclient_mock.get(\n pydeconz.utils.URL_DISCOVER,\n json=[\n {\"id\": BRIDGEID, \"internalipaddress\": \"1.2.3.4\", \"internalport\": 80},\n {\"id\": \"1234E567890A\", \"internalipaddress\": \"5.6.7.8\", \"internalport\": 80},\n ],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN, context={\"source\": SOURCE_USER}\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"user\"\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={CONF_HOST: \"1.2.3.4\"}\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"link\"\n\n aioclient_mock.post(\n \"http://1.2.3.4:80/api\",\n json=[{\"success\": {\"username\": API_KEY}}],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={}\n )\n\n assert result[\"type\"] == RESULT_TYPE_CREATE_ENTRY\n assert result[\"title\"] == BRIDGEID\n assert result[\"data\"] == {\n CONF_HOST: \"1.2.3.4\",\n CONF_PORT: 80,\n CONF_API_KEY: API_KEY,\n }",
"async def test_bridge_connection_failed(\n hass: HomeAssistant,\n aioclient_mock: AiohttpClientMocker,\n caplog: pytest.LogCaptureFixture,\n) -> None:\n create_mock_api_discovery(aioclient_mock, [])\n\n with patch(\n \"homeassistant.components.hue.config_flow.discover_bridge\",\n side_effect=ClientError,\n ):\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n result = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={\"host\": \"blah\"}\n )\n\n # a warning message should have been logged that the bridge could not be reached\n assert \"Error while attempting to retrieve discovery information\" in caplog.text\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"cannot_connect\"\n\n # test again with zeroconf discovered wrong bridge IP\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_ZEROCONF},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"blah\",\n addresses=[\"1.2.3.4\"],\n port=443,\n hostname=\"Philips-hue.local\",\n type=\"_hue._tcp.local.\",\n name=\"Philips Hue - ABCABC._hue._tcp.local.\",\n properties={\n \"_raw\": {\"bridgeid\": b\"ecb5fafffeabcabc\", \"modelid\": b\"BSB002\"},\n \"bridgeid\": \"ecb5fafffeabcabc\",\n \"modelid\": \"BSB002\",\n },\n ),\n )\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"cannot_connect\"\n\n # test again with homekit discovered wrong bridge IP\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_HOMEKIT},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"0.0.0.0\",\n addresses=[\"0.0.0.0\"],\n hostname=\"mock_hostname\",\n name=\"mock_name\",\n port=None,\n properties={zeroconf.ATTR_PROPERTIES_ID: \"aa:bb:cc:dd:ee:ff\"},\n type=\"mock_type\",\n ),\n )\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"cannot_connect\"\n\n # repeat test with import flow\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_IMPORT},\n data={\"host\": \"blah\"},\n )\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"cannot_connect\"",
"async def test_new_sensor_discovered(hass, mock_bridge):\n mock_bridge.mock_sensor_responses.append(SENSOR_RESPONSE)\n\n await setup_bridge(hass, mock_bridge)\n assert len(mock_bridge.mock_requests) == 1\n assert len(hass.states.async_all()) == 6\n\n new_sensor_response = dict(SENSOR_RESPONSE)\n new_sensor_response.update({\n \"7\": PRESENCE_SENSOR_3_PRESENT,\n \"8\": LIGHT_LEVEL_SENSOR_3,\n \"9\": TEMPERATURE_SENSOR_3,\n })\n\n mock_bridge.mock_sensor_responses.append(new_sensor_response)\n\n # Force updates to run again\n sm_key = hue_sensor_base.SENSOR_MANAGER_FORMAT.format('mock-host')\n sm = hass.data[hue.DOMAIN][sm_key]\n await sm.async_update_items()\n\n # To flush out the service call to update the group\n await hass.async_block_till_done()\n\n assert len(mock_bridge.mock_requests) == 2\n assert len(hass.states.async_all()) == 9\n\n presence = hass.states.get('binary_sensor.bedroom_sensor_motion')\n assert presence is not None\n assert presence.state == 'on'\n temperature = hass.states.get('sensor.bedroom_sensor_temperature')\n assert temperature is not None\n assert temperature.state == '17.75'",
"def test_get_connection_new(self, connection):\n socket_path = \"little red riding hood\"\n module = MagicMock(spec=[\n 'fail_json',\n ])\n module._socket_path = socket_path\n\n connection().get_capabilities.return_value = '{\"network_api\": \"cliconf\"}'\n returned_connection = slxos.get_connection(module)\n connection.assert_called_with(socket_path)\n self.assertEqual(returned_connection, module.slxos_connection)",
"def test_pluggable_client_creation(self, mocker):\n config = {\n 'labelsEnabled': False,\n 'impressionListener': 123,\n 'storageType': 'pluggable',\n 'storageWrapper': StorageMockAdapter()\n }\n factory = get_factory('some_api_key', config=config)\n assert isinstance(factory._get_storage('splits'), pluggable.PluggableSplitStorage)\n assert isinstance(factory._get_storage('segments'), pluggable.PluggableSegmentStorage)\n assert isinstance(factory._get_storage('impressions'), pluggable.PluggableImpressionsStorage)\n assert isinstance(factory._get_storage('events'), pluggable.PluggableEventsStorage)\n\n adapter = factory._get_storage('splits')._pluggable_adapter\n assert adapter == factory._get_storage('segments')._pluggable_adapter\n assert adapter == factory._get_storage('impressions')._pluggable_adapter\n assert adapter == factory._get_storage('events')._pluggable_adapter\n\n assert factory._labels_enabled is False\n assert isinstance(factory._recorder, StandardRecorder)\n assert isinstance(factory._recorder._impressions_manager, ImpressionsManager)\n assert isinstance(factory._recorder._event_sotrage, pluggable.PluggableEventsStorage)\n assert isinstance(factory._recorder._impression_storage, pluggable.PluggableImpressionsStorage)\n try:\n factory.block_until_ready(1)\n except:\n pass\n assert factory.ready\n factory.destroy()",
"def mock_controller_connect(mock_unique_id):\n with patch(\"homeassistant.components.asuswrt.bridge.AsusWrtLegacy\") as service_mock:\n service_mock.return_value.connection.async_connect = AsyncMock()\n service_mock.return_value.is_connected = True\n service_mock.return_value.connection.disconnect = Mock()\n service_mock.return_value.async_get_nvram = AsyncMock(\n return_value=mock_unique_id\n )\n yield service_mock",
"def mock_sync_hardware_api(decoy: Decoy) -> SyncHardwareAPI:\n return decoy.mock(cls=SyncHardwareAPI)",
"async def test_hue_bridge_setup(hass):\n accessories = await setup_accessories_from_file(hass, \"hue_bridge.json\")\n config_entry, pairing = await setup_test_accessories(hass, accessories)\n\n entity_registry = await hass.helpers.entity_registry.async_get_registry()\n\n # Check that the battery is correctly found and set up\n battery_id = \"sensor.hue_dimmer_switch_battery\"\n battery = entity_registry.async_get(battery_id)\n assert battery.unique_id == \"homekit-6623462389072572-644245094400\"\n\n battery_helper = Helper(\n hass, \"sensor.hue_dimmer_switch_battery\", pairing, accessories[0], config_entry\n )\n battery_state = await battery_helper.poll_and_get_state()\n assert battery_state.attributes[\"friendly_name\"] == \"Hue dimmer switch Battery\"\n assert battery_state.attributes[\"icon\"] == \"mdi:battery\"\n assert battery_state.state == \"100\"\n\n device_registry = await hass.helpers.device_registry.async_get_registry()\n\n device = device_registry.async_get(battery.device_id)\n assert device.manufacturer == \"Philips\"\n assert device.name == \"Hue dimmer switch\"\n assert device.model == \"RWL021\"\n assert device.sw_version == \"45.1.17846\"",
"async def test_homeassistant_bridge_fan_setup(hass):\n accessories = await setup_accessories_from_file(\n hass, \"home_assistant_bridge_fan.json\"\n )\n config_entry, pairing = await setup_test_accessories(hass, accessories)\n\n entity_registry = er.async_get(hass)\n\n # Check that the fan is correctly found and set up\n fan_id = \"fan.living_room_fan\"\n fan = entity_registry.async_get(fan_id)\n assert fan.unique_id == \"homekit-fan.living_room_fan-8\"\n\n fan_helper = Helper(\n hass,\n \"fan.living_room_fan\",\n pairing,\n accessories[0],\n config_entry,\n )\n\n fan_state = await fan_helper.poll_and_get_state()\n assert fan_state.attributes[\"friendly_name\"] == \"Living Room Fan\"\n assert fan_state.state == \"off\"\n assert fan_state.attributes[\"supported_features\"] == (\n SUPPORT_DIRECTION | SUPPORT_SET_SPEED | SUPPORT_OSCILLATE\n )\n\n device_registry = dr.async_get(hass)\n\n device = device_registry.async_get(fan.device_id)\n assert device.manufacturer == \"Home Assistant\"\n assert device.name == \"Living Room Fan\"\n assert device.model == \"Fan\"\n assert device.sw_version == \"0.104.0.dev0\"\n\n bridge = device = device_registry.async_get(device.via_device_id)\n assert bridge.manufacturer == \"Home Assistant\"\n assert bridge.name == \"Home Assistant Bridge\"\n assert bridge.model == \"Bridge\"\n assert bridge.sw_version == \"0.104.0.dev0\"",
"def mock_hub(hass):\n hub = MagicMock(\n hass=hass,\n account=MagicMock(),\n logged_in=True,\n coordinator=MagicMock(spec=DataUpdateCoordinator),\n spec=litterrobot.LitterRobotHub,\n )\n hub.coordinator.last_update_success = True\n hub.account.robots = [create_mock_robot(hass)]\n return hub",
"def test_home_bridge(mock_pre_serv):\n bridge = HomeBridge('TestBridge', 'test.bridge', b'123-45-678')\n\n assert bridge.display_name == 'TestBridge'\n assert bridge.pincode == b'123-45-678'\n assert len(bridge.services) == 2\n\n assert bridge.services[0].display_name == SERV_ACCESSORY_INFO\n assert bridge.services[1].display_name == SERV_BRIDGING_STATE\n\n char_model = bridge.services[0].get_characteristic(CHAR_MODEL)\n assert char_model.get_value() == 'test.bridge'",
"async def setup_bridge(hass, mock_bridge, hostname=None):\n if hostname is None:\n hostname = 'mock-host'\n hass.config.components.add(hue.DOMAIN)\n hass.data[hue.DOMAIN] = {hostname: mock_bridge}\n config_entry = config_entries.ConfigEntry(1, hue.DOMAIN, 'Mock Title', {\n 'host': hostname\n }, 'test', config_entries.CONN_CLASS_LOCAL_POLL)\n await hass.config_entries.async_forward_entry_setup(\n config_entry, 'binary_sensor')\n await hass.config_entries.async_forward_entry_setup(\n config_entry, 'sensor')\n # and make sure it completes before going further\n await hass.async_block_till_done()",
"def conn(mocker):\n from kafka.conn import ConnectionStates\n from kafka.future import Future\n from kafka.protocol.metadata import MetadataResponse\n conn = mocker.patch('kafka.client_async.BrokerConnection')\n conn.return_value = conn\n conn.state = ConnectionStates.CONNECTED\n conn.send.return_value = Future().success(\n MetadataResponse[0](\n [(0, 'foo', 12), (1, 'bar', 34)], # brokers\n [])) # topics\n conn.blacked_out.return_value = False\n def _set_conn_state(state):\n conn.state = state\n return state\n conn._set_conn_state = _set_conn_state\n conn.connect.side_effect = lambda: conn.state\n conn.connect_blocking.return_value = True\n conn.connecting = lambda: conn.state in (ConnectionStates.CONNECTING,\n ConnectionStates.HANDSHAKE)\n conn.connected = lambda: conn.state is ConnectionStates.CONNECTED\n conn.disconnected = lambda: conn.state is ConnectionStates.DISCONNECTED\n return conn",
"async def test_flow_bridges_discovered(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n # Add ignored config entry. Should still show up as option.\n MockConfigEntry(\n domain=\"hue\", source=config_entries.SOURCE_IGNORE, unique_id=\"bla\"\n ).add_to_hass(hass)\n\n create_mock_api_discovery(\n aioclient_mock, [(\"1.2.3.4\", \"bla\"), (\"5.6.7.8\", \"beer_v2\")]\n )\n\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"init\"\n\n with pytest.raises(vol.Invalid):\n assert result[\"data_schema\"]({\"id\": \"not-discovered\"})\n\n result[\"data_schema\"]({\"id\": \"bla\"})\n result[\"data_schema\"]({\"id\": \"beer_v2\"})\n result[\"data_schema\"]({\"id\": \"manual\"})",
"def test_connect_opens_connection(self):\n\n mock_connector = MagicMock()\n database = Database()\n\n database.connect(connector_impl=mock_connector)\n\n self.assertTrue(mock_connector.connect.called)",
"def test_redisdb_get_client():\n test_redisdb = RedisClient()\n test_redisdb.client = \"mock_client\"\n\n test_redisdb_client = test_redisdb.get_client()\n assert test_redisdb_client == \"mock_client\"",
"async def test_ryse_smart_bridge_setup(hass):\n accessories = await setup_accessories_from_file(hass, \"ryse_smart_bridge.json\")\n config_entry, pairing = await setup_test_accessories(hass, accessories)\n\n entity_registry = er.async_get(hass)\n\n # Check that the cover.master_bath_south is correctly found and set up\n cover_id = \"cover.master_bath_south\"\n cover = entity_registry.async_get(cover_id)\n assert cover.unique_id == \"homekit-00:00:00:00:00:00-2-48\"\n\n cover_helper = Helper(\n hass,\n cover_id,\n pairing,\n accessories[0],\n config_entry,\n )\n\n cover_state = await cover_helper.poll_and_get_state()\n assert cover_state.attributes[\"friendly_name\"] == \"Master Bath South\"\n assert cover_state.state == \"closed\"\n\n device_registry = dr.async_get(hass)\n\n device = device_registry.async_get(cover.device_id)\n assert device.manufacturer == \"RYSE Inc.\"\n assert device.name == \"Master Bath South\"\n assert device.model == \"RYSE Shade\"\n assert device.sw_version == \"3.0.8\"\n\n bridge = device_registry.async_get(device.via_device_id)\n assert bridge.manufacturer == \"RYSE Inc.\"\n assert bridge.name == \"RYSE SmartBridge\"\n assert bridge.model == \"RYSE SmartBridge\"\n assert bridge.sw_version == \"1.3.0\"\n\n # Check that the cover.ryse_smartshade is correctly found and set up\n cover_id = \"cover.ryse_smartshade\"\n cover = entity_registry.async_get(cover_id)\n assert cover.unique_id == \"homekit-00:00:00:00:00:00-3-48\"\n\n cover_helper = Helper(\n hass,\n cover_id,\n pairing,\n accessories[0],\n config_entry,\n )\n\n cover_state = await cover_helper.poll_and_get_state()\n assert cover_state.attributes[\"friendly_name\"] == \"RYSE SmartShade\"\n assert cover_state.state == \"open\"\n\n device_registry = dr.async_get(hass)\n\n device = device_registry.async_get(cover.device_id)\n assert device.manufacturer == \"RYSE Inc.\"\n assert device.name == \"RYSE SmartShade\"\n assert device.model == \"RYSE Shade\"\n assert device.sw_version == \"\"",
"def mock_bus(hass):\n hass.bus.listen = mock.MagicMock()",
"async def test_get_scanner(\n hass: HomeAssistant, mocked_opnsense, mock_device_tracker_conf: list[legacy.Device]\n) -> None:\n interface_client = mock.MagicMock()\n mocked_opnsense.InterfaceClient.return_value = interface_client\n interface_client.get_arp.return_value = [\n {\n \"hostname\": \"\",\n \"intf\": \"igb1\",\n \"intf_description\": \"LAN\",\n \"ip\": \"192.168.0.123\",\n \"mac\": \"ff:ff:ff:ff:ff:ff\",\n \"manufacturer\": \"\",\n },\n {\n \"hostname\": \"Desktop\",\n \"intf\": \"igb1\",\n \"intf_description\": \"LAN\",\n \"ip\": \"192.168.0.167\",\n \"mac\": \"ff:ff:ff:ff:ff:fe\",\n \"manufacturer\": \"OEM\",\n },\n ]\n network_insight_client = mock.MagicMock()\n mocked_opnsense.NetworkInsightClient.return_value = network_insight_client\n network_insight_client.get_interfaces.return_value = {\"igb0\": \"WAN\", \"igb1\": \"LAN\"}\n\n result = await async_setup_component(\n hass,\n DOMAIN,\n {\n DOMAIN: {\n CONF_URL: \"https://fake_host_fun/api\",\n CONF_API_KEY: \"fake_key\",\n CONF_API_SECRET: \"fake_secret\",\n CONF_VERIFY_SSL: False,\n }\n },\n )\n await hass.async_block_till_done()\n assert result\n device_1 = hass.states.get(\"device_tracker.desktop\")\n assert device_1 is not None\n assert device_1.state == \"home\"\n device_2 = hass.states.get(\"device_tracker.ff_ff_ff_ff_ff_ff\")\n assert device_2.state == \"home\"",
"def fetch_description(self, bridge):\n url = self.bridge_description_url(bridge)\n try:\n response = requests.get(url, timeout=5)\n response.raise_for_status()\n return PHueBridge(url, response.text)\n except requests.exceptions.RequestException as err:\n _LOGGER.warning('Could not query server %s: %s',\n url, err)",
"def test_get_device_detects_garagedoor_switch(hass, mock_openzwave):\n node = MockNode()\n value = MockValue(\n data=False, node=node, command_class=const.COMMAND_CLASS_SWITCH_BINARY\n )\n values = MockEntityValues(primary=value, node=node)\n\n device = cover.get_device(hass=hass, node=node, values=values, node_config={})\n assert isinstance(device, cover.ZwaveGarageDoorSwitch)\n assert device.device_class == \"garage\"\n assert device.supported_features == SUPPORT_OPEN | SUPPORT_CLOSE",
"def mock_open_port(monkeypatch):\n mocked_open_port = mock.Mock()\n monkeypatch.setattr(\"libgitlab.hookenv.open_port\", mocked_open_port)\n return mocked_open_port",
"def _get_mock(self):\n return mock.patch(\n \"aea.cli.upgrade.get_latest_version_available_in_registry\",\n side_effect=self.mock_get_latest_version_available_in_registry,\n )",
"def generate_all_mocks(self):\n netconf = netconf_connection.connection()\n netconf.ssh = mock.Mock()\n netconf.ssh.close = mock.MagicMock()\n netconf.chan = mock.Mock()\n netconf.chan.close = mock.MagicMock()\n # 256 - send by 256 bytes\n netconf.chan.send = mock.MagicMock(return_value=256)\n netconf.chan.recv = mock.MagicMock(\n return_value=netconf_connection.NETCONF_1_0_END\n )\n return netconf",
"def generate_all_mocks(self):\n netconf = netconf_connection.NetConfConnection()\n netconf.ssh = mock.Mock()\n netconf.ssh.close = mock.MagicMock()\n netconf.conn = mock.Mock()\n netconf.conn.close = mock.MagicMock()\n # 256 - send by 256 bytes\n netconf.conn.send = mock.MagicMock(return_value=256)\n netconf.conn.recv = mock.MagicMock(\n return_value=netconf_connection.NETCONF_1_0_END\n )\n netconf.conn.closed = True\n return netconf",
"def mock_engine_client(decoy: Decoy) -> EngineClient:\n return decoy.mock(cls=EngineClient)"
] |
[
"0.7372151",
"0.73244363",
"0.6197295",
"0.6095032",
"0.57564074",
"0.56794703",
"0.56739575",
"0.5600137",
"0.5596264",
"0.5588057",
"0.54905665",
"0.54791147",
"0.5407656",
"0.5389388",
"0.53728324",
"0.5369918",
"0.5355989",
"0.5288491",
"0.5288298",
"0.5277364",
"0.5271598",
"0.52432317",
"0.52209854",
"0.52153534",
"0.52131206",
"0.51774895",
"0.5176964",
"0.5137402",
"0.51292294",
"0.5128795"
] |
0.78755337
|
0
|
Test config flow discovers two bridges.
|
async def test_flow_two_bridges_discovered_one_new(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
create_mock_api_discovery(aioclient_mock, [("1.2.3.4", "bla"), ("5.6.7.8", "beer")])
MockConfigEntry(
domain="hue", unique_id="bla", data={"host": "1.2.3.4"}
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "init"
assert result["data_schema"]({"id": "beer"})
assert result["data_schema"]({"id": "manual"})
with pytest.raises(vol.error.MultipleInvalid):
assert not result["data_schema"]({"id": "bla"})
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"async def test_flow_discovered_bridges(opp, aioclient_mock):\n aioclient_mock.get(\n pydeconz.utils.URL_DISCOVER,\n json=[\n {\"id\": BRIDGEID, \"internalipaddress\": \"1.2.3.4\", \"internalport\": 80},\n {\"id\": \"1234E567890A\", \"internalipaddress\": \"5.6.7.8\", \"internalport\": 80},\n ],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN, context={\"source\": SOURCE_USER}\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"user\"\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={CONF_HOST: \"1.2.3.4\"}\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"link\"\n\n aioclient_mock.post(\n \"http://1.2.3.4:80/api\",\n json=[{\"success\": {\"username\": API_KEY}}],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={}\n )\n\n assert result[\"type\"] == RESULT_TYPE_CREATE_ENTRY\n assert result[\"title\"] == BRIDGEID\n assert result[\"data\"] == {\n CONF_HOST: \"1.2.3.4\",\n CONF_PORT: 80,\n CONF_API_KEY: API_KEY,\n }",
"async def test_flow_bridges_discovered(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n # Add ignored config entry. Should still show up as option.\n MockConfigEntry(\n domain=\"hue\", source=config_entries.SOURCE_IGNORE, unique_id=\"bla\"\n ).add_to_hass(hass)\n\n create_mock_api_discovery(\n aioclient_mock, [(\"1.2.3.4\", \"bla\"), (\"5.6.7.8\", \"beer_v2\")]\n )\n\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"init\"\n\n with pytest.raises(vol.Invalid):\n assert result[\"data_schema\"]({\"id\": \"not-discovered\"})\n\n result[\"data_schema\"]({\"id\": \"bla\"})\n result[\"data_schema\"]({\"id\": \"beer_v2\"})\n result[\"data_schema\"]({\"id\": \"manual\"})",
"async def test_flow_all_discovered_bridges_exist(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n mock_host = \"1.2.3.4\"\n mock_id = \"bla\"\n create_mock_api_discovery(aioclient_mock, [(mock_host, mock_id)])\n\n MockConfigEntry(\n domain=\"hue\", unique_id=mock_id, data={\"host\": mock_host}\n ).add_to_hass(hass)\n\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"manual\"",
"async def test_bridge_connection_failed(\n hass: HomeAssistant,\n aioclient_mock: AiohttpClientMocker,\n caplog: pytest.LogCaptureFixture,\n) -> None:\n create_mock_api_discovery(aioclient_mock, [])\n\n with patch(\n \"homeassistant.components.hue.config_flow.discover_bridge\",\n side_effect=ClientError,\n ):\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n result = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={\"host\": \"blah\"}\n )\n\n # a warning message should have been logged that the bridge could not be reached\n assert \"Error while attempting to retrieve discovery information\" in caplog.text\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"cannot_connect\"\n\n # test again with zeroconf discovered wrong bridge IP\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_ZEROCONF},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"blah\",\n addresses=[\"1.2.3.4\"],\n port=443,\n hostname=\"Philips-hue.local\",\n type=\"_hue._tcp.local.\",\n name=\"Philips Hue - ABCABC._hue._tcp.local.\",\n properties={\n \"_raw\": {\"bridgeid\": b\"ecb5fafffeabcabc\", \"modelid\": b\"BSB002\"},\n \"bridgeid\": \"ecb5fafffeabcabc\",\n \"modelid\": \"BSB002\",\n },\n ),\n )\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"cannot_connect\"\n\n # test again with homekit discovered wrong bridge IP\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_HOMEKIT},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"0.0.0.0\",\n addresses=[\"0.0.0.0\"],\n hostname=\"mock_hostname\",\n name=\"mock_name\",\n port=None,\n properties={zeroconf.ATTR_PROPERTIES_ID: \"aa:bb:cc:dd:ee:ff\"},\n type=\"mock_type\",\n ),\n )\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"cannot_connect\"\n\n # repeat test with import flow\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_IMPORT},\n data={\"host\": \"blah\"},\n )\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"cannot_connect\"",
"async def test_bridge_zeroconf(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n create_mock_api_discovery(aioclient_mock, [(\"192.168.1.217\", \"ecb5fafffeabcabc\")])\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_ZEROCONF},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"192.168.1.217\",\n addresses=[\"192.168.1.217\"],\n port=443,\n hostname=\"Philips-hue.local\",\n type=\"_hue._tcp.local.\",\n name=\"Philips Hue - ABCABC._hue._tcp.local.\",\n properties={\n \"_raw\": {\"bridgeid\": b\"ecb5fafffeabcabc\", \"modelid\": b\"BSB002\"},\n \"bridgeid\": \"ecb5fafffeabcabc\",\n \"modelid\": \"BSB002\",\n },\n ),\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"link\"",
"async def test_bridge_zeroconf_ipv6(hass: HomeAssistant) -> None:\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_ZEROCONF},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"fd00::eeb5:faff:fe84:b17d\",\n addresses=[\"fd00::eeb5:faff:fe84:b17d\"],\n port=443,\n hostname=\"Philips-hue.local\",\n type=\"_hue._tcp.local.\",\n name=\"Philips Hue - ABCABC._hue._tcp.local.\",\n properties={\n \"_raw\": {\"bridgeid\": b\"ecb5faabcabc\", \"modelid\": b\"BSB002\"},\n \"bridgeid\": \"ecb5faabcabc\",\n \"modelid\": \"BSB002\",\n },\n ),\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"invalid_host\"",
"def test_wrong_bridge_config(self):\n stored_target_deploy_status = self.test_config.get(\n 'target_deploy_status', {})\n new_target_deploy_status = stored_target_deploy_status.copy()\n new_target_deploy_status[self.application_name] = {\n 'workload-status': 'blocked',\n 'workload-status-message': 'Wrong format',\n }\n if 'target_deploy_status' in self.test_config:\n self.test_config['target_deploy_status'].update(\n new_target_deploy_status)\n else:\n self.test_config['target_deploy_status'] = new_target_deploy_status\n\n with self.config_change(\n self.config_current(\n application_name=self.application_name,\n keys=['bridge-interface-mappings']),\n {'bridge-interface-mappings': 'incorrect'}):\n logging.info('Charm went into blocked state as expected, restore '\n 'configuration')\n self.test_config[\n 'target_deploy_status'] = stored_target_deploy_status",
"def create_mock_api_discovery(aioclient_mock, bridges):\n aioclient_mock.get(\n URL_NUPNP,\n json=[{\"internalipaddress\": host, \"id\": id} for (host, id) in bridges],\n )\n for host, bridge_id in bridges:\n aioclient_mock.get(\n f\"http://{host}/api/config\",\n json={\"bridgeid\": bridge_id},\n )\n # mock v2 support if v2 found in id\n aioclient_mock.get(\n f\"https://{host}/clip/v2/resources\",\n status=403 if \"v2\" in bridge_id else 404,\n )",
"async def test_flow_manual_configuration_decision(opp, aioclient_mock):\n aioclient_mock.get(\n pydeconz.utils.URL_DISCOVER,\n json=[{\"id\": BRIDGEID, \"internalipaddress\": \"1.2.3.4\", \"internalport\": 80}],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN, context={\"source\": SOURCE_USER}\n )\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={CONF_HOST: CONF_MANUAL_INPUT}\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"manual_input\"\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"],\n user_input={CONF_HOST: \"1.2.3.4\", CONF_PORT: 80},\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"link\"\n\n aioclient_mock.post(\n \"http://1.2.3.4:80/api\",\n json=[{\"success\": {\"username\": API_KEY}}],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n aioclient_mock.get(\n f\"http://1.2.3.4:80/api/{API_KEY}/config\",\n json={\"bridgeid\": BRIDGEID},\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={}\n )\n\n assert result[\"type\"] == RESULT_TYPE_CREATE_ENTRY\n assert result[\"title\"] == BRIDGEID\n assert result[\"data\"] == {\n CONF_HOST: \"1.2.3.4\",\n CONF_PORT: 80,\n CONF_API_KEY: API_KEY,\n }",
"async def test_manual_configuration_timeout_get_bridge(opp, aioclient_mock):\n aioclient_mock.get(\n pydeconz.utils.URL_DISCOVER,\n json=[],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN, context={\"source\": SOURCE_USER}\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"manual_input\"\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"],\n user_input={CONF_HOST: \"1.2.3.4\", CONF_PORT: 80},\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"link\"\n\n aioclient_mock.post(\n \"http://1.2.3.4:80/api\",\n json=[{\"success\": {\"username\": API_KEY}}],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n aioclient_mock.get(\n f\"http://1.2.3.4:80/api/{API_KEY}/config\", exc=asyncio.TimeoutError\n )\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={}\n )\n\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"no_bridges\"",
"async def test_sensors_with_multiple_bridges(hass, mock_bridge):\n mock_bridge_2 = create_mock_bridge()\n mock_bridge_2.mock_sensor_responses.append({\n \"1\": PRESENCE_SENSOR_3_PRESENT,\n \"2\": LIGHT_LEVEL_SENSOR_3,\n \"3\": TEMPERATURE_SENSOR_3,\n })\n mock_bridge.mock_sensor_responses.append(SENSOR_RESPONSE)\n await setup_bridge(hass, mock_bridge)\n await setup_bridge(hass, mock_bridge_2, hostname='mock-bridge-2')\n\n assert len(mock_bridge.mock_requests) == 1\n assert len(mock_bridge_2.mock_requests) == 1\n # 3 \"physical\" sensors with 3 virtual sensors each\n assert len(hass.states.async_all()) == 9",
"async def test_bridge_zeroconf_already_exists(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n create_mock_api_discovery(\n aioclient_mock, [(\"0.0.0.0\", \"ecb5faabcabc\"), (\"192.168.1.217\", \"ecb5faabcabc\")]\n )\n entry = MockConfigEntry(\n domain=\"hue\",\n source=config_entries.SOURCE_HOMEKIT,\n data={\"host\": \"0.0.0.0\"},\n unique_id=\"ecb5faabcabc\",\n )\n entry.add_to_hass(hass)\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_ZEROCONF},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"192.168.1.217\",\n addresses=[\"192.168.1.217\"],\n port=443,\n hostname=\"Philips-hue.local\",\n type=\"_hue._tcp.local.\",\n name=\"Philips Hue - ABCABC._hue._tcp.local.\",\n properties={\n \"_raw\": {\"bridgeid\": b\"ecb5faabcabc\", \"modelid\": b\"BSB002\"},\n \"bridgeid\": \"ecb5faabcabc\",\n \"modelid\": \"BSB002\",\n },\n ),\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"already_configured\"\n assert entry.data[\"host\"] == \"192.168.1.217\"",
"def test_connectivity(self):\n vlan_net1 = helpers.get_not_used_vlan(self.tester.bridge, VLAN_RANGE)\n vlan_net2 = helpers.get_not_used_vlan(self.tester.bridge, VLAN_RANGE)\n trunk_mac = net.get_random_mac('fa:16:3e:00:00:00'.split(':'))\n sub_port_mac = net.get_random_mac('fa:16:3e:00:00:00'.split(':'))\n sub_port_segmentation_id = helpers.get_not_used_vlan(\n self.tester.bridge, VLAN_RANGE)\n LOG.debug(\"Using %(n1)d vlan tag as local vlan ID for net1 and %(n2)d \"\n \"for local vlan ID for net2\", {\n 'n1': vlan_net1, 'n2': vlan_net2})\n self.tester.set_peer_tag(vlan_net1)\n self.trunk_manager.create_trunk(self.trunk.trunk_id,\n self.trunk.port_id,\n trunk_mac)\n\n # tag the patch port, this should be done by the ovs agent but we mock\n # it for this test\n conn_testers.OVSBaseConnectionTester.set_tag(\n self.trunk.patch_port_int_name, self.tester.bridge, vlan_net1)\n\n self.tester.wait_for_connection(self.tester.INGRESS)\n self.tester.wait_for_connection(self.tester.EGRESS)\n\n self.tester.add_vlan_interface_and_peer(sub_port_segmentation_id,\n self.net2_cidr)\n conn_testers.OVSBaseConnectionTester.set_tag(\n self.tester._peer2.port.name, self.tester.bridge, vlan_net2)\n\n sub_port = trunk_manager.SubPort(self.trunk.trunk_id,\n uuidutils.generate_uuid(),\n sub_port_mac,\n sub_port_segmentation_id)\n\n self.trunk_manager.add_sub_port(sub_port.trunk_id,\n sub_port.port_id,\n sub_port.port_mac,\n sub_port.segmentation_id)\n # tag the patch port, this should be done by the ovs agent but we mock\n # it for this test\n conn_testers.OVSBaseConnectionTester.set_tag(\n sub_port.patch_port_int_name, self.tester.bridge, vlan_net2)\n\n self.tester.wait_for_sub_port_connectivity(self.tester.INGRESS)\n self.tester.wait_for_sub_port_connectivity(self.tester.EGRESS)\n\n self.trunk_manager.remove_sub_port(sub_port.trunk_id,\n sub_port.port_id)\n self.tester.wait_for_sub_port_no_connectivity(self.tester.INGRESS)\n self.tester.wait_for_sub_port_no_connectivity(self.tester.EGRESS)\n\n self.trunk_manager.remove_trunk(self.trunk.trunk_id,\n self.trunk.port_id)\n self.tester.wait_for_no_connection(self.tester.INGRESS)",
"async def test_bridge_homekit(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n create_mock_api_discovery(aioclient_mock, [(\"0.0.0.0\", \"bla\")])\n\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_HOMEKIT},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"0.0.0.0\",\n addresses=[\"0.0.0.0\"],\n hostname=\"mock_hostname\",\n name=\"mock_name\",\n port=None,\n properties={zeroconf.ATTR_PROPERTIES_ID: \"aa:bb:cc:dd:ee:ff\"},\n type=\"mock_type\",\n ),\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"link\"\n\n flow = next(\n flow\n for flow in hass.config_entries.flow.async_progress()\n if flow[\"flow_id\"] == result[\"flow_id\"]\n )\n assert flow[\"context\"][\"unique_id\"] == config_entries.DEFAULT_DISCOVERY_UNIQUE_ID",
"def test_get_networks(self):\n pass",
"async def test_bridge_homekit_already_configured(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n create_mock_api_discovery(aioclient_mock, [(\"0.0.0.0\", \"aabbccddeeff\")])\n MockConfigEntry(\n domain=\"hue\", unique_id=\"aabbccddeeff\", data={\"host\": \"0.0.0.0\"}\n ).add_to_hass(hass)\n\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_HOMEKIT},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"0.0.0.0\",\n addresses=[\"0.0.0.0\"],\n hostname=\"mock_hostname\",\n name=\"mock_name\",\n port=None,\n properties={zeroconf.ATTR_PROPERTIES_ID: \"aa:bb:cc:dd:ee:ff\"},\n type=\"mock_type\",\n ),\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"already_configured\"",
"async def test_ryse_smart_bridge_setup(hass):\n accessories = await setup_accessories_from_file(hass, \"ryse_smart_bridge.json\")\n config_entry, pairing = await setup_test_accessories(hass, accessories)\n\n entity_registry = er.async_get(hass)\n\n # Check that the cover.master_bath_south is correctly found and set up\n cover_id = \"cover.master_bath_south\"\n cover = entity_registry.async_get(cover_id)\n assert cover.unique_id == \"homekit-00:00:00:00:00:00-2-48\"\n\n cover_helper = Helper(\n hass,\n cover_id,\n pairing,\n accessories[0],\n config_entry,\n )\n\n cover_state = await cover_helper.poll_and_get_state()\n assert cover_state.attributes[\"friendly_name\"] == \"Master Bath South\"\n assert cover_state.state == \"closed\"\n\n device_registry = dr.async_get(hass)\n\n device = device_registry.async_get(cover.device_id)\n assert device.manufacturer == \"RYSE Inc.\"\n assert device.name == \"Master Bath South\"\n assert device.model == \"RYSE Shade\"\n assert device.sw_version == \"3.0.8\"\n\n bridge = device_registry.async_get(device.via_device_id)\n assert bridge.manufacturer == \"RYSE Inc.\"\n assert bridge.name == \"RYSE SmartBridge\"\n assert bridge.model == \"RYSE SmartBridge\"\n assert bridge.sw_version == \"1.3.0\"\n\n # Check that the cover.ryse_smartshade is correctly found and set up\n cover_id = \"cover.ryse_smartshade\"\n cover = entity_registry.async_get(cover_id)\n assert cover.unique_id == \"homekit-00:00:00:00:00:00-3-48\"\n\n cover_helper = Helper(\n hass,\n cover_id,\n pairing,\n accessories[0],\n config_entry,\n )\n\n cover_state = await cover_helper.poll_and_get_state()\n assert cover_state.attributes[\"friendly_name\"] == \"RYSE SmartShade\"\n assert cover_state.state == \"open\"\n\n device_registry = dr.async_get(hass)\n\n device = device_registry.async_get(cover.device_id)\n assert device.manufacturer == \"RYSE Inc.\"\n assert device.name == \"RYSE SmartShade\"\n assert device.model == \"RYSE Shade\"\n assert device.sw_version == \"\"",
"def test_bay_bridge(self):\n # import the experiment variable from the example\n exp = bay_bridge_example(render=False)\n\n # run the experiment for a few time steps to ensure it doesn't fail\n exp.run(1, 5)\n\n # import the experiment variable from the example with inflows\n exp = bay_bridge_example(render=False, use_inflows=True)\n\n # run the experiment for a few time steps to ensure it doesn't fail\n exp.run(1, 5)\n\n # import the experiment variable from the example with traffic lights\n exp = bay_bridge_example(render=False, use_traffic_lights=True)\n\n # run the experiment for a few time steps to ensure it doesn't fail\n exp.run(1, 5)",
"async def test_bridge_import_already_configured(hass: HomeAssistant) -> None:\n MockConfigEntry(\n domain=\"hue\", unique_id=\"aabbccddeeff\", data={\"host\": \"0.0.0.0\"}\n ).add_to_hass(hass)\n\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_IMPORT},\n data={\"host\": \"0.0.0.0\", \"properties\": {\"id\": \"aa:bb:cc:dd:ee:ff\"}},\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"already_configured\"",
"async def test_get_scanner(\n hass: HomeAssistant, mocked_opnsense, mock_device_tracker_conf: list[legacy.Device]\n) -> None:\n interface_client = mock.MagicMock()\n mocked_opnsense.InterfaceClient.return_value = interface_client\n interface_client.get_arp.return_value = [\n {\n \"hostname\": \"\",\n \"intf\": \"igb1\",\n \"intf_description\": \"LAN\",\n \"ip\": \"192.168.0.123\",\n \"mac\": \"ff:ff:ff:ff:ff:ff\",\n \"manufacturer\": \"\",\n },\n {\n \"hostname\": \"Desktop\",\n \"intf\": \"igb1\",\n \"intf_description\": \"LAN\",\n \"ip\": \"192.168.0.167\",\n \"mac\": \"ff:ff:ff:ff:ff:fe\",\n \"manufacturer\": \"OEM\",\n },\n ]\n network_insight_client = mock.MagicMock()\n mocked_opnsense.NetworkInsightClient.return_value = network_insight_client\n network_insight_client.get_interfaces.return_value = {\"igb0\": \"WAN\", \"igb1\": \"LAN\"}\n\n result = await async_setup_component(\n hass,\n DOMAIN,\n {\n DOMAIN: {\n CONF_URL: \"https://fake_host_fun/api\",\n CONF_API_KEY: \"fake_key\",\n CONF_API_SECRET: \"fake_secret\",\n CONF_VERIFY_SSL: False,\n }\n },\n )\n await hass.async_block_till_done()\n assert result\n device_1 = hass.states.get(\"device_tracker.desktop\")\n assert device_1 is not None\n assert device_1.state == \"home\"\n device_2 = hass.states.get(\"device_tracker.ff_ff_ff_ff_ff_ff\")\n assert device_2.state == \"home\"",
"def connection_test():\n connections_result_passed = []\n connections_result_failed = []\n with open(f'endpoints/{dc_number}/endpoints.yaml', 'r') as ep_file:\n try:\n yaml_object = yaml.safe_load(ep_file)\n for components in yaml_object.values():\n for host_info in components.values():\n if host_info is None:\n pass\n else:\n for hostname, port in host_info.items():\n if ',' in str(port):\n port_list = str(port).split(',')\n for items in port_list:\n connections_result_passed, connections_result_failed = connect(hostname, items, connections_result_passed, connections_result_failed)\n else:\n connections_result_passed, connections_result_failed = connect(hostname, port, connections_result_passed, connections_result_failed)\n message, color = message_content(connections_result_passed, connections_result_failed)\n if str2bool(slack_enabled) is True and str2bool(email_enabled) is True:\n send_message_slack(message, color)\n send_email(message)\n elif str2bool(slack_enabled) is True and str2bool(email_enabled) is False:\n send_message_slack(message, color)\n elif str2bool(slack_enabled) is False and str2bool(email_enabled) is True:\n send_email(message)\n else:\n pass\n except yaml.YAMLError as exc:\n print(exc)",
"async def test_discovered_by_dhcp_or_integration_discovery(\n hass: HomeAssistant, source, data, bulb_type, extended_white_range, name\n) -> None:\n with _patch_wizlight(\n device=None, extended_white_range=extended_white_range, bulb_type=bulb_type\n ):\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": source}, data=data\n )\n await hass.async_block_till_done()\n\n assert result[\"type\"] == FlowResultType.FORM\n assert result[\"step_id\"] == \"discovery_confirm\"\n\n with _patch_wizlight(\n device=None, extended_white_range=extended_white_range, bulb_type=bulb_type\n ), patch(\n \"homeassistant.components.wiz.async_setup_entry\",\n return_value=True,\n ) as mock_setup_entry, patch(\n \"homeassistant.components.wiz.async_setup\", return_value=True\n ) as mock_setup:\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n {},\n )\n await hass.async_block_till_done()\n\n assert result2[\"type\"] == \"create_entry\"\n assert result2[\"title\"] == name\n assert result2[\"data\"] == {\n CONF_HOST: \"1.1.1.1\",\n }\n assert len(mock_setup.mock_calls) == 1\n assert len(mock_setup_entry.mock_calls) == 1",
"async def test_flow_works(hass: HomeAssistant) -> None:\n disc_bridge = get_discovered_bridge(supports_v2=True)\n\n with patch(\n \"homeassistant.components.hue.config_flow.discover_nupnp\",\n return_value=[disc_bridge],\n ):\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"init\"\n\n result = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={\"id\": disc_bridge.id}\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"link\"\n\n flow = next(\n flow\n for flow in hass.config_entries.flow.async_progress()\n if flow[\"flow_id\"] == result[\"flow_id\"]\n )\n assert flow[\"context\"][\"unique_id\"] == \"aabbccddeeff\"\n\n with patch.object(config_flow, \"create_app_key\", return_value=\"123456789\"):\n result = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={}\n )\n\n assert result[\"type\"] == \"create_entry\"\n assert result[\"title\"] == \"Hue Bridge aabbccddeeff\"\n assert result[\"data\"] == {\n \"host\": \"1.2.3.4\",\n \"api_key\": \"123456789\",\n \"api_version\": 2,\n }",
"async def test_manual_configuration_after_discovery_ResponseError(opp, aioclient_mock):\n aioclient_mock.get(pydeconz.utils.URL_DISCOVER, exc=pydeconz.errors.ResponseError)\n\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN, context={\"source\": SOURCE_USER}\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"manual_input\"\n assert not opp.config_entries.flow._progress[result[\"flow_id\"]].bridges",
"def test_parse_config(self):\n config_file = os.path.join('top', 'conf', 'top.conf')\n\n self._c.set_config_file(config_file)\n self._c.parse_config()\n\n received = self._c.adp_loop\n expected = 30\n msg = 'AdpB2CConfig.adp_loop error'\n self.assertEqual(received, expected, msg)\n\n received = self._c.adp_dirs\n expected = ['/var/ftp/pub/nparcel/adp/in']\n msg = 'AdpB2CConfig.adp_dirs error'\n self.assertListEqual(received, expected, msg)\n\n received = self._c.archive_dir\n expected = '/data/top/archive'\n msg = 'AdpB2CConfig.archive_dir error'\n self.assertEqual(received, expected, msg)\n\n received = self._c.adp_file_formats\n expected = []\n msg = 'AdpB2CConfig.adp_file_formats error'\n self.assertListEqual(received, expected, msg)\n\n # For the default configuration file the [db] section is blank\n received = self._c.db_kwargs()\n msg = 'AdpB2CConfig.db_kwargs error'\n self.assertIsNone(received, msg)\n\n received = self._c.code_header\n expected = 'TP Code'\n msg = 'AdpB2CConfig.code_header error'\n self.assertEqual(received, expected, msg)\n\n received = self._c.adp_headers\n expected = {'agent.code': 'TP Code',\n 'agent.dp_code': 'DP Code',\n 'agent.name': 'ADP Name',\n 'agent.address': 'Address',\n 'agent.suburb': 'Suburb',\n 'agent.state': 'State',\n 'agent.postcode': 'Postcode',\n 'agent.opening_hours': 'Opening Hours',\n 'agent.notes': 'Notes',\n 'agent.parcel_size_code': 'ADP Accepts Parcel Size',\n 'agent.phone_nbr': 'Phone',\n 'agent.contact_name': 'Contact',\n 'agent.email': 'Email',\n 'agent.fax_nbr': 'Fax',\n 'agent.latitude': 'Latitude',\n 'agent.longitude': 'Longitude',\n 'agent.status': 'Active',\n 'delivery_partner.id': 'DP Id',\n 'login_account.username': 'Username'}\n msg = 'AdpB2CConfig.adp.headers error'\n self.assertDictEqual(received, expected, msg)\n\n received = self._c.delivery_partners\n expected = ['Nparcel', 'ParcelPoint', 'Toll', 'National Storage']\n msg = 'AdpB2CConfig.adp.delivery_partners error'\n self.assertListEqual(received, expected, msg)\n\n received = self._c.adp_default_passwords\n expected = {'nparcel': 'aaaa',\n 'parcelpoint': 'bbbb',\n 'toll': 'cccc',\n 'national storage': 'dddd'}\n msg = 'AdpB2CConfig.adp_default_passwords error'\n self.assertDictEqual(received, expected, msg)",
"async def test_manual_flow_bridge_exist(hass: HomeAssistant) -> None:\n MockConfigEntry(\n domain=\"hue\", unique_id=\"id-1234\", data={\"host\": \"2.2.2.2\"}\n ).add_to_hass(hass)\n\n with patch(\n \"homeassistant.components.hue.config_flow.discover_nupnp\",\n return_value=[],\n ):\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"manual\"\n\n result = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"], {\"host\": \"2.2.2.2\"}\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"already_configured\"",
"def test_config(self):\n\n # We start in uninitialized state.\n # In this state there is no driver process.\n state = self._ia_client.get_agent_state()\n self.assertEqual(state, ResourceAgentState.UNINITIALIZED)\n \n # Ping the agent.\n retval = self._ia_client.ping_agent()\n log.info(retval)\n\n # Initialize the agent.\n # The agent is spawned with a driver config, but you can pass one in\n # optinally with the initialize command. This validates the driver\n # config, launches a driver process and connects to it via messaging.\n # If successful, we switch to the inactive state.\n cmd = AgentCommand(command=ResourceAgentEvent.INITIALIZE)\n retval = self._ia_client.execute_agent(cmd)\n state = self._ia_client.get_agent_state()\n self.assertEqual(state, ResourceAgentState.INACTIVE)\n\n # Ping the driver proc.\n retval = self._ia_client.ping_resource()\n log.info(retval)\n\n decoder = IonObjectDeserializer(obj_registry=get_obj_registry())\n\n # Grab the alarms defined in the config.\n retval = decoder.deserialize(self._ia_client.get_agent(['alarms'])['alarms'])\n\n \"\"\"\n {'status': None, 'stream_name': 'parsed', 'name': 'test_sim_warning',\n 'upper_bound': 5.0, 'expr': 'x<5.0', 'upper_rel_op': '<',\n 'lower_rel_op': None, 'type_': 'IntervalAlarmDef', 'value_id': 'temp',\n 'lower_bound': None, 'message': 'Temperature is above test range of 5.0.',\n 'current_val': None, 'type': 1}\n \"\"\"\n self.assertEqual(retval[0].type_, 'IntervalAlarmDef')\n self.assertEqual(retval[0].upper_bound, 5.0)\n self.assertEqual(retval[0].expr, 'x<5.0')\n \n # Reset the agent. This causes the driver messaging to be stopped,\n # the driver process to end and switches us back to uninitialized.\n cmd = AgentCommand(command=ResourceAgentEvent.RESET)\n retval = self._ia_client.execute_agent(cmd)\n state = self._ia_client.get_agent_state()\n self.assertEqual(state, ResourceAgentState.UNINITIALIZED)",
"def bridge(gwc = 0, brc = bridge_int):\n# bridge interface list\n br_interface = []\n# bridge ip addresses list\n gw_ipaddr = []\n# bridge network list\n gw_network = []\n# gatweway start number list\n gw_number = 0\n\n# fill all lists for bridge\n for i in netifaces.ifaddresses(bridge_int)[netifaces.AF_INET]:\n br_interface.append([gw_number, ' ', i['addr'], i['netmask']])\n gw_ipaddr.append(i['addr'])\n gw_network.append(i['netmask'])\n gw_number = gw_number + 1\n br_interface[0][1] = bridge_int\n\n if gwc == 'check':\n return (br_interface, gw_ipaddr, gw_network)\n\n# print jadm gateways table\n br_menu = [\"Number\", \"Bridge name\", \"Gateway IP Address\", \"Gatewy Network Mask\"]\n print tabulate(br_interface, br_menu)\n\n# return bridge interface name, ip addresses and network mask\n return (br_interface, gw_ipaddr, gw_network)",
"async def test_flow_manual_configuration(opp, aioclient_mock):\n aioclient_mock.get(\n pydeconz.utils.URL_DISCOVER,\n json=[],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN, context={\"source\": SOURCE_USER}\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"manual_input\"\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"],\n user_input={CONF_HOST: \"1.2.3.4\", CONF_PORT: 80},\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"link\"\n\n aioclient_mock.post(\n \"http://1.2.3.4:80/api\",\n json=[{\"success\": {\"username\": API_KEY}}],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n aioclient_mock.get(\n f\"http://1.2.3.4:80/api/{API_KEY}/config\",\n json={\"bridgeid\": BRIDGEID},\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={}\n )\n\n assert result[\"type\"] == RESULT_TYPE_CREATE_ENTRY\n assert result[\"title\"] == BRIDGEID\n assert result[\"data\"] == {\n CONF_HOST: \"1.2.3.4\",\n CONF_PORT: 80,\n CONF_API_KEY: API_KEY,\n }",
"def test_nics_to_access_configs(neo4j_session):\n _ensure_local_neo4j_has_test_instance_data(neo4j_session)\n ac_query = \"\"\"\n MATCH (nic:GCPNetworkInterface)-[r:RESOURCE]->(ac:GCPNicAccessConfig)\n return nic.nic_id, ac.access_config_id, ac.public_ip\n \"\"\"\n nodes = neo4j_session.run(ac_query)\n\n nic_id1 = 'projects/project-abc/zones/europe-west2-b/instances/instance-1-test/networkinterfaces/nic0'\n ac_id1 = f\"{nic_id1}/accessconfigs/ONE_TO_ONE_NAT\"\n nic_id2 = 'projects/project-abc/zones/europe-west2-b/instances/instance-1/networkinterfaces/nic0'\n ac_id2 = f\"{nic_id2}/accessconfigs/ONE_TO_ONE_NAT\"\n\n actual_nodes = {(n['nic.nic_id'], n['ac.access_config_id'], n['ac.public_ip']) for n in nodes}\n expected_nodes = {\n (nic_id1, ac_id1, '1.3.4.5'),\n (nic_id2, ac_id2, '1.2.3.4'),\n }\n assert actual_nodes == expected_nodes"
] |
[
"0.7181192",
"0.657215",
"0.6402847",
"0.6280944",
"0.6279817",
"0.5987878",
"0.59858084",
"0.59460175",
"0.59324795",
"0.5932003",
"0.5878794",
"0.5875763",
"0.5835963",
"0.58070344",
"0.5779187",
"0.5733481",
"0.57093203",
"0.5626492",
"0.56254184",
"0.5592134",
"0.5588778",
"0.54865354",
"0.547681",
"0.5470379",
"0.54692006",
"0.54608405",
"0.5443912",
"0.53905404",
"0.53770435",
"0.5369733"
] |
0.672303
|
1
|
Test if a unknown error happened during the linking processes.
|
async def test_flow_link_unknown_error(hass: HomeAssistant) -> None:
disc_bridge = get_discovered_bridge()
with patch(
"homeassistant.components.hue.config_flow.discover_nupnp",
return_value=[disc_bridge],
):
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch.object(config_flow, "create_app_key", side_effect=Exception):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"id": disc_bridge.id}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "form"
assert result["step_id"] == "link"
assert result["errors"] == {"base": "linking"}
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _errcheck_link(value, func, args): # pylint: disable=W0613\n # The windows api returns nonzero if the call was successful\n if value != 0:\n return\n\n last_error = ctypes.windll.kernel32.GetLastError()\n # Somehow CreateSymbolicLinkW and CreateHardLinkW retuns zero\n # and the last error is 2 (The system cannot find the file specified)\n # but the link is created successfuly\n # it seems like a bug in the WinAPI\n if last_error == 0 or last_error == 2:\n return\n if last_error == 183:\n raise OSError(errno.EEXIST,\n \"Cannot create a file when that file already exists\",\n args[0])",
"def is_unknown_error(self):\n return self._tag == 'unknown_error'",
"def _is_bad_link(info, base):\r\n # Links are interpreted relative to the directory containing the link\r\n tip = resolved(joinpath(base, dirname(info.name)))\r\n return _is_bad_path(info.linkname, base=tip)",
"def is_broken_link(path):\r\n path = os.readlink(path)\r\n return not os.path.exists(path)",
"def test_invalid_link(self):\r\n\r\n # Setup the peer grading module with no linked locations.\r\n peer_grading = self._create_peer_grading_with_linked_problem(self.coe_location, valid_linked_descriptor=False)\r\n\r\n self.assertFalse(peer_grading.use_for_single_location_local)\r\n self.assertTrue(peer_grading.use_for_single_location)",
"def test_unexpected_error_in_exists(self):\n # TODO\n one_process_workflow = \"\"\"buggy://B <- file://A\n echo A produces B > B\n \"\"\"\n process = run_first_process(one_process_workflow, extra_resource=BuggyExistsResource)\n assert process.success is False, process.error_message\n assert process.error_message.find('An unexpected error have happen in tuttle while checking existence of '\n 'output resources' ) >= 0, process.error_message\n assert process.error_message.find('Traceback (most recent call last):') >= 0, process.error_message\n assert process.error_message.find('raise Exception(\"Unexpected error in exists()\")') >= 0, process.error_message\n assert process.error_message.find('Process cannot be considered complete.') >= 0, process.error_message",
"def is_broken_link(self):\n if not os.path.exists(self.dst):\n if os.path.lexists(self.dst):\n return True\n return False",
"def _nupicHyperSearchHasErrors(hyperSearchJob):\n # TODO flesh me out\n\n # Get search ID for our latest hypersearch\n\n # Query Nupic for experiment failures in the given search\n\n return False",
"def has_error(self):\r\n return self._arm.has_error",
"def unknown_error(self, message):\n print('Status Unknown: ' + message)\n sys.exit(3)",
"def check_linking(self):\n\n # This one checks if the linking command works out of the box or\n # if any specific flag is required. For example if the linker if the\n # Intel FORTRAN compiler, then the \"-nofor_main\" is usually required.\n # This function only checks if linker works but does not automatically\n # detect the required flags\n print 'Checking loader...',\n sys.stdout.flush()\n writefile('tmpf.f',\"\"\"\n subroutine fsub()\n write(*,*)'success'\n stop\n end\\n\"\"\")\n writefile('tmpc.c',\"\"\"\n #if defined ADD_\n #define fsub fsub_\n #elif defined NOCHANGE\n #define fsub fsub\n #elif defined fcIsF2C\n #define fsub fsub_\n #elif defined UPCASE\n #define fsub FSUB\n #endif\n void main(){\n fsub();}\\n\"\"\")\n\n ccomm = self.config.cc+' '+self.config.ccflags+' '+self.mangling+' -c -o tmpc.o tmpc.c'\n fcomm = self.config.fc+' '+self.config.fcflags+' -c -o tmpf.o tmpf.f'\n lcomm = self.config.fc+' '+self.config.ldflags_fc+' '+self.config.ld_fcmain+' -o lnk tmpf.o tmpc.o'\n\n (output, error, retz) = runShellCommand(ccomm)\n if retz:\n print '\\n\\nCOMMON: in check_linking: cannot compile'\n print 'command is: ',ccomm\n print 'error is:\\n','*'*40,'\\n',error,'\\n','*'*40\n sys.exit()\n\n (output, error, retz) = runShellCommand(fcomm)\n if retz:\n print '\\n\\nCOMMON: in check_linking: cannot compile'\n print 'command is: ',fcomm\n print 'error is:\\n','*'*40,'\\n',error,'\\n','*'*40\n sys.exit()\n\n (output, error, retz) = runShellCommand(lcomm)\n if retz:\n print \"\"\"\\n\\nCOMMON: in check_linking: cannot link\n Cannot link a C main program to a Fortran77 subroutine\n Make sure that the appropriate flags are passed to the linker.\"\"\"\n print 'command is: ',lcomm\n print 'error is:\\n','*'*40,'\\n',error,'\\n','*'*40\n sys.exit()\n\n\n killfiles(['lnk', 'tmpf.f', 'tmpf.o', 'tmpc.c', 'tmpc.o'])\n\n print 'works'\n return 1;",
"def has_errors_fatal(self) -> bool:",
"def update_linkage_error(links=None):\n links = links or Linkage.objects.all()\n for idx, link in enumerate(links):\n link.error_check(depth=0)\n update_task_info('PROGRESS', meta={'current': idx, 'total': len(links)})",
"def other_native_crash(self) -> bool:\n return pulumi.get(self, \"other_native_crash\")",
"def other_native_crash(self) -> bool:\n return pulumi.get(self, \"other_native_crash\")",
"def testBrokenLinks(self):\n with h5py.File(self.h5_fname, 'a') as f:\n f[\"/Mars/BrokenSoftLink\"] = h5py.SoftLink(\"/Idontexists\")\n f[\"/Mars/BrokenExternalLink\"] = h5py.ExternalLink(\"notexistingfile.h5\", \"/Idontexists\")\n\n ddict = h5todict(self.h5_fname, path=\"/Mars\", errors='ignore')\n self.assertFalse(ddict)\n\n with LoggingValidator(dictdump_logger, error=2):\n ddict = h5todict(self.h5_fname, path=\"/Mars\", errors='log')\n self.assertFalse(ddict)\n\n with self.assertRaises(KeyError):\n h5todict(self.h5_fname, path=\"/Mars\", errors='raise')",
"def prnt_error():\n print \"Error!\\n\"\n return False",
"def test_unexpected_error_in_processor(self):\n\n one_process_workflow = \"\"\"file://B <- file://A ! buggy_processor\n echo A does not produce B\n \"\"\"\n process = run_first_process(one_process_workflow, BuggyProcessor())\n assert process.success is False, process.error_message\n assert process.error_message.find('An unexpected error have happen in tuttle processor '\n 'buggy_processor :') >= 0, process.error_message\n assert process.error_message.find('Traceback (most recent call last):') >= 0, process.error_message\n assert process.error_message.find('raise Exception(\"Unexpected error in processor\")') >= 0, process.error_message\n assert process.error_message.find('will not complete.') >= 0, process.error_message",
"def have_error(self):\n return (hasattr(self, \"got_error\") and\n self.got_error)",
"def test_with_bad_resolution():\n try:\n site_parser.get_wallpapers_urls(\"07-2020\", \"1920x10\")\n except SystemExit:\n assert True\n else:\n assert False",
"def has_errors(self) -> bool:",
"def check_errors(self) -> None:",
"def error(self):\n error = int(self._dll.JLINKARM_HasError())\n if error == 0:\n return None\n return error",
"def not_found(error):\n pass",
"def path_link_errors(self):\n return self._path_link_errors",
"def _processMissBrokenLinks(self, target):\n\n xrdDocs = self._collsXRDMiss[target].find({'storage.location': 'XRD',\n 'target': target,\n 'issue': 'brokenLink'})\n\n self._collsXRDNoLink[target].insert_many(xrdDocs)\n\n self._collsXRDMiss[target].delete_many({'storage.location': 'XRD',\n 'target': target,\n 'issue': 'brokenLink'})",
"def handle_unrecoverable_failure(self, node):\n if node.is_failed and node.exit_status < 400:\n self.report_error_handled(node, 'unrecoverable error, aborting...')\n return ProcessHandlerReport(True, self.exit_codes.ERROR_UNRECOVERABLE_FAILURE)",
"def is_error_url(self, url):\n self._load_error_urls()\n return url in self.errorurls",
"def conditionally_raise(self, error: ImageNotFound) -> None:",
"def check_error_protocol_exists(self):\n p = self.test_proto.parse()\n if p.messages is not None:\n for k, m in p.messages.items():\n self.assertIsNotNone(m.errors, f\"Message {k} did not have the expected implicit string error protocol.\")"
] |
[
"0.6474779",
"0.64479744",
"0.6337244",
"0.62498707",
"0.6116203",
"0.61068827",
"0.60577756",
"0.60174847",
"0.5844965",
"0.5829207",
"0.5748955",
"0.5735846",
"0.5717332",
"0.5702572",
"0.5702572",
"0.56888384",
"0.56444484",
"0.56255955",
"0.5616288",
"0.5600202",
"0.55956733",
"0.5590074",
"0.55565715",
"0.5552336",
"0.5542992",
"0.554243",
"0.55199474",
"0.55061775",
"0.54960275",
"0.5478092"
] |
0.6570556
|
0
|
Test that we clean up entries for same host and bridge. An IP can only hold a single bridge and a single bridge can only be accessible via a single IP. So when we create a new entry, we'll remove all existing entries that either have same IP or same bridge_id.
|
async def test_creating_entry_removes_entries_for_same_host_or_bridge(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
create_mock_api_discovery(aioclient_mock, [("2.2.2.2", "id-1234")])
orig_entry = MockConfigEntry(
domain="hue",
data={"host": "0.0.0.0", "api_key": "123456789"},
unique_id="id-1234",
)
orig_entry.add_to_hass(hass)
MockConfigEntry(
domain="hue",
data={"host": "1.2.3.4", "api_key": "123456789"},
unique_id="id-5678",
).add_to_hass(hass)
assert len(hass.config_entries.async_entries("hue")) == 2
result = await hass.config_entries.flow.async_init(
"hue",
data={"host": "2.2.2.2"},
context={"source": config_entries.SOURCE_IMPORT},
)
assert result["type"] == "form"
assert result["step_id"] == "link"
with patch(
"homeassistant.components.hue.config_flow.create_app_key",
return_value="123456789",
), patch("homeassistant.components.hue.async_unload_entry", return_value=True):
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == "create_entry"
assert result["title"] == "Hue Bridge id-1234"
assert result["data"] == {
"host": "2.2.2.2",
"api_key": "123456789",
"api_version": 1,
}
entries = hass.config_entries.async_entries("hue")
assert len(entries) == 2
new_entry = entries[-1]
assert orig_entry.entry_id != new_entry.entry_id
assert new_entry.unique_id == "id-1234"
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_delete_collection_host_subnet(self):\n pass",
"async def test_remove_orphaned_entries_service(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n data = {\n \"lights\": {\n \"1\": {\n \"name\": \"Light 1 name\",\n \"state\": {\"reachable\": True},\n \"type\": \"Light\",\n \"uniqueid\": \"00:00:00:00:00:00:00:01-00\",\n }\n },\n \"sensors\": {\n \"1\": {\n \"name\": \"Switch 1\",\n \"type\": \"ZHASwitch\",\n \"state\": {\"buttonevent\": 1000, \"gesture\": 1},\n \"config\": {\"battery\": 100},\n \"uniqueid\": \"00:00:00:00:00:00:00:03-00\",\n },\n },\n }\n with patch.dict(DECONZ_WEB_REQUEST, data):\n config_entry = await setup_deconz_integration(hass, aioclient_mock)\n\n device_registry = dr.async_get(hass)\n device = device_registry.async_get_or_create(\n config_entry_id=config_entry.entry_id,\n connections={(dr.CONNECTION_NETWORK_MAC, \"123\")},\n )\n\n assert (\n len(\n [\n entry\n for entry in device_registry.devices.values()\n if config_entry.entry_id in entry.config_entries\n ]\n )\n == 5 # Host, gateway, light, switch and orphan\n )\n\n entity_registry = er.async_get(hass)\n entity_registry.async_get_or_create(\n SENSOR_DOMAIN,\n DECONZ_DOMAIN,\n \"12345\",\n suggested_object_id=\"Orphaned sensor\",\n config_entry=config_entry,\n device_id=device.id,\n )\n\n assert (\n len(async_entries_for_config_entry(entity_registry, config_entry.entry_id))\n == 3 # Light, switch battery and orphan\n )\n\n await hass.services.async_call(\n DECONZ_DOMAIN,\n SERVICE_REMOVE_ORPHANED_ENTRIES,\n service_data={CONF_BRIDGE_ID: BRIDGEID},\n )\n await hass.async_block_till_done()\n\n assert (\n len(\n [\n entry\n for entry in device_registry.devices.values()\n if config_entry.entry_id in entry.config_entries\n ]\n )\n == 4 # Host, gateway, light and switch\n )\n\n assert (\n len(async_entries_for_config_entry(entity_registry, config_entry.entry_id))\n == 2 # Light and switch battery\n )",
"def test_delete_host_subnet(self):\n pass",
"def test_duplicates(self):\n\n for i in xrange(0, 99):\n inventory = get_inventory()\n ips = collections.defaultdict(int)\n hostvars = inventory['_meta']['hostvars']\n\n for host, var_dict in hostvars.items():\n nets = var_dict['container_networks']\n for net, vals in nets.items():\n if 'address' in vals.keys():\n\n addr = vals['address']\n ips[addr] += 1\n\n self.assertEqual(1, ips[addr],\n msg=\"IP %s duplicated.\" % addr)",
"def test_remove(self):\n cons_hash = ConsistentHash(2)\n cons_hash.add('192.168.1.1') \n self.assertEquals(len(cons_hash), 2) \n cons_hash.remove('192.168.1.1') \n self.assertEquals(len(cons_hash), 0) \n \n self.assertTrue(cons_hash._is_consistent())",
"def test_destroy_nas_share_by_pool(self):\n pass",
"def test_duplicate_heartbeats_are_deleted(self):\n self._assert_duplicates_are_deleted(HeartBeat)",
"def test_ipam_ip_addresses_delete(self):\n pass",
"def test_delete_buckets(self):\n pass",
"def test_delete_device_group_by_id1(self):\n pass",
"def test_unique_host(self):\n\n mode = \"unique_host_same_device\"\n with mock.patch.object(utils.jax, \"host_id\", return_value=0):\n host_id_devices = utils.host_id_devices_for_rng(mode)\n specialize_func = jax.pmap(functools.partial(\n utils.specialize_rng_host_device, axis_name=\"i\",\n mode=mode), axis_name=\"i\")\n rng0 = specialize_func(self.rng, host_id_devices)\n with mock.patch.object(utils.jax, \"host_id\", return_value=1):\n host_id_devices = utils.host_id_devices_for_rng(mode)\n specialize_func = jax.pmap(functools.partial(\n utils.specialize_rng_host_device, axis_name=\"i\",\n mode=mode), axis_name=\"i\")\n rng1 = specialize_func(self.rng, host_id_devices)\n\n self.assertEqual(\n np.unique(np.concatenate([rng0, rng1], axis=0), axis=0).shape[0], 2)",
"def check_remove_hosts(self, export_details):\n\n playbook_host_dict = self.create_current_host_dict_playbook()\n remove_host_dict = dict()\n host_type_list = ['no_access_hosts', 'read_only_hosts',\n 'read_write_hosts', 'read_only_root_hosts',\n 'read_write_root_hosts']\n\n for host_type in host_type_list:\n if playbook_host_dict[host_type]:\n hosts_to_remove = list()\n ipv4_hosts, ipv6_hosts, fqdn_hosts = \\\n self.get_export_hosts(export_details[host_type])\n for host in playbook_host_dict[host_type]:\n version = check_ipv4_ipv6_fqdn(host)\n\n # Check if host is FQDN/Netgroup or IP\n if version:\n if version == 4:\n # IPv4 host is provided\n ipv4_host = self.get_ipv4_host(host)\n # Check if given host is member of already added\n # network\n if ipv4_host in ipv4_hosts:\n if str(ipv4_host.with_netmask) not in \\\n hosts_to_remove:\n hosts_to_remove.append(\n str(ipv4_host.with_netmask))\n else:\n # IPv6 host is provided\n ipv6_host = self.get_ipv6_host(host)\n # Check if given host is member of already added\n # network\n if ipv6_host in ipv6_hosts:\n if str(ipv6_host.with_prefixlen) not in \\\n hosts_to_remove:\n hosts_to_remove.append(\n str(ipv6_host.with_prefixlen))\n else:\n # FQDN/Netgroup is provided\n if host in fqdn_hosts:\n if host not in hosts_to_remove:\n hosts_to_remove.append(host)\n\n if hosts_to_remove:\n remove_host_dict['remove_' + host_type] = hosts_to_remove\n\n LOG.info(\"Host list to remove: %s\", remove_host_dict)\n return remove_host_dict",
"async def test_many_groups_same_address_ignored(hass: HomeAssistant) -> None:\n entry = MockConfigEntry(\n domain=DOMAIN,\n )\n entry.add_to_hass(hass)\n\n assert await hass.config_entries.async_setup(entry.entry_id)\n await hass.async_block_till_done()\n\n inject_bluetooth_service_info(hass, BLUECHARM_BEACON_SERVICE_INFO)\n await hass.async_block_till_done()\n\n assert (\n hass.states.get(\"sensor.bluecharm_177999_8105_estimated_distance\") is not None\n )\n\n for i in range(12):\n service_info = BluetoothServiceInfo(\n name=\"BlueCharm_177999\",\n address=\"61DE521B-F0BF-9F44-64D4-75BBE1738105\",\n rssi=-63,\n service_data={},\n manufacturer_data={\n 76: b\"\\x02\\x15BlueCharmBeacons\" + bytearray([i]) + b\"\\xfe\\x13U\\xc5\"\n },\n service_uuids=[],\n source=\"local\",\n )\n inject_bluetooth_service_info(hass, service_info)\n\n await hass.async_block_till_done()\n assert hass.states.get(\"sensor.bluecharm_177999_8105_estimated_distance\") is None",
"def check_post_delete_purge_links_metadata(integrated_ff):\n post_data = {\n 'biosource_type': 'immortalized cell line',\n 'award': '1U01CA200059-01',\n 'lab': '4dn-dcic-lab'\n }\n post_res = ff_utils.post_metadata(post_data, 'biosource', key=integrated_ff['ff_key'])\n post_item = post_res['@graph'][0]\n assert 'uuid' in post_item\n assert post_item['biosource_type'] == post_data['biosource_type']\n # make sure there is a 409 when posting to an existing item\n post_data['uuid'] = post_item['uuid']\n with pytest.raises(Exception) as exec_info:\n ff_utils.post_metadata(post_data, 'biosource', key=integrated_ff['ff_key'])\n assert '409' in str(exec_info.value) # 409 is conflict error\n\n # make a biosample that links to the biosource\n bios_data = {'biosource': [post_data['uuid']], 'status': 'deleted',\n 'lab': '4dn-dcic-lab', 'award': '1U01CA200059-01'}\n bios_res = ff_utils.post_metadata(bios_data, 'biosample', key=integrated_ff['ff_key'])\n bios_item = bios_res['@graph'][0]\n assert 'uuid' in bios_item\n\n # delete the biosource\n del_res = ff_utils.delete_metadata(post_item['uuid'], key=integrated_ff['ff_key'])\n assert del_res['status'] == 'success'\n assert del_res['@graph'][0]['status'] == 'deleted'\n\n # test get_metadata_links function (this will ensure everything is indexed, as well)\n links = []\n while not links or ff_utils.stuff_in_queues(integrated_ff['ff_env'], True):\n time.sleep(5)\n post_links = ff_utils.get_metadata_links(post_item['uuid'], key=integrated_ff['ff_key'])\n links = post_links.get('uuids_linking_to', [])\n assert len(links) == 1\n assert links[0]['uuid'] == bios_item['uuid']\n assert links[0]['field'] == 'biosource[0].uuid'\n\n # purge biosource first, which will failed because biosample is still linked\n purge_res1 = ff_utils.purge_metadata(post_item['uuid'], key=integrated_ff['ff_key'])\n assert purge_res1['status'] == 'error'\n assert bios_item['uuid'] in [purge['uuid'] for purge in purge_res1['comment']]\n\n # purge biosample and then biosource\n purge_res2 = ff_utils.purge_metadata(bios_item['uuid'], key=integrated_ff['ff_key'])\n assert purge_res2['status'] == 'success'\n\n # wait for indexing to catch up\n while len(links) > 0 or ff_utils.stuff_in_queues(integrated_ff['ff_env'], True):\n time.sleep(5)\n post_links = ff_utils.get_metadata_links(post_item['uuid'], key=integrated_ff['ff_key'])\n links = post_links.get('uuids_linking_to', [])\n assert len(links) == 0\n\n purge_res3 = ff_utils.purge_metadata(post_item['uuid'], key=integrated_ff['ff_key'])\n assert purge_res3['status'] == 'success'\n # make sure it is purged\n with pytest.raises(Exception) as exec_info:\n ff_utils.get_metadata(post_item['uuid'], key=integrated_ff['ff_key'],\n add_on='datastore=database')\n assert 'The resource could not be found' in str(exec_info.value)",
"async def test_cleanup_orphaned_devices(\n hass: HomeAssistant, entry, setup_plex_server\n) -> None:\n test_device_id = {(DOMAIN, \"temporary_device_123\")}\n\n device_registry = dr.async_get(hass)\n entity_registry = er.async_get(hass)\n entry.add_to_hass(hass)\n\n test_device = device_registry.async_get_or_create(\n config_entry_id=entry.entry_id,\n identifiers=test_device_id,\n )\n assert test_device is not None\n\n test_entity = entity_registry.async_get_or_create(\n Platform.MEDIA_PLAYER, DOMAIN, \"entity_unique_id_123\", device_id=test_device.id\n )\n assert test_entity is not None\n\n # Ensure device is not removed with an entity\n await setup_plex_server()\n device = device_registry.async_get_device(identifiers=test_device_id)\n assert device is not None\n\n await hass.config_entries.async_unload(entry.entry_id)\n\n # Ensure device is removed without an entity\n entity_registry.async_remove(test_entity.entity_id)\n await setup_plex_server()\n device = device_registry.async_get_device(identifiers=test_device_id)\n assert device is None",
"def test_host_routes_create_two_subnets_then_delete_one(self):\n gateway_ips = ['10.0.1.1', '10.0.2.1']\n cidrs = ['10.0.1.0/24', '10.0.2.0/24']\n net, subnet0, subnet1 = self._create_subnets_segments(gateway_ips,\n cidrs)\n\n sh_req = self.new_show_request('subnets', subnet1['id'])\n raw_res = sh_req.get_response(self.api)\n sub_res = self.deserialize(self.fmt, raw_res)\n self.assertEqual([{'destination': cidrs[0],\n 'nexthop': gateway_ips[1]}],\n sub_res['subnet']['host_routes'])\n\n del_req = self.new_delete_request('subnets', subnet0['id'])\n del_req.get_response(self.api)\n\n sh_req = self.new_show_request('subnets', subnet1['id'])\n raw_res = sh_req.get_response(self.api)\n sub_res = self.deserialize(self.fmt, raw_res)\n\n self.assertEqual([], sub_res['subnet']['host_routes'])",
"def test_destroy_nas_share_by_nas(self):\n pass",
"def test_topo_remove_herbivore():\n cell = topo.Topography()\n testherbi = animals.Herbivores()\n testlist = [animals.Herbivores() for _ in range(10)]\n cell.herbivore_list = testlist\n cell.add_animal(testherbi)\n cell.remove_animal(testherbi)\n assert testherbi not in cell.herbivore_list",
"def test_no_duplicate_ips(bf: Session) -> None:\n dup_ip_owners = bf.q.ipOwners(duplicatesOnly=True).answer().frame()\n dup_ip_groups = dup_ip_owners.groupby(\"IP\")\n assert len(dup_ip_groups) == 0, \"Found duplicate IPs: {}\".format(\n \", \".join([_dup_ip_msg(dup_ip, dup_ip_group) for dup_ip, dup_ip_group in dup_ip_groups]))",
"def test_clear_leave_distinct(self):\n p1 = make_package()\n p2 = make_package(filename=\"another-1.2.tar.gz\")\n self.db.save(p1)\n self.db.save(p2)\n key = self.db.redis_key(p1.filename)\n self.db.clear(p1)\n val = self.redis.get(key)\n self.assertIsNone(val)\n count = self.redis.scard(self.db.redis_set)\n self.assertEqual(count, 1)",
"def delete_host(host):\n\n lines = []\n with open(known_hosts_path, \"r\") as f:\n lines = f.readlines()\n\n with open(known_hosts_path, \"w\") as f:\n\n for line in lines:\n if host != line.split()[0]:\n f.write(line)",
"def test_kyc_delete_legal_share_holder_natural(self):\n pass",
"def test_striping_patch(self):\n self.create_simple_filesystem(synthetic_host(\"myserver\"))\n hosts = [synthetic_host(\"myserver{0:d}\".format(n)) for n in range(4)] * 2\n # keep hosts in alternating order, but supply them grouped\n objects = [\n {\"kind\": \"OST\", \"filesystem_id\": self.fs.id, \"volume_id\": synthetic_volume_full(host).id}\n for host in sorted(hosts, key=str)\n ]\n response = self.api_client.patch(\"/api/target/\", data={\"deletions\": [], \"objects\": objects})\n self.assertHttpAccepted(response)\n content = json.loads(response.content)\n self.assertEqual(map(str, hosts), list(self._target_hosts(content[\"targets\"])))",
"def remove_pools(ctx):\n\n config_db = ConfigDBConnector()\n config_db.connect()\n\n entryFound = False\n pool_table_name = 'NAT_POOL'\n binding_table_name = 'NAT_BINDINGS'\n binding_dict = config_db.get_table(binding_table_name)\n pool_dict = config_db.get_table(pool_table_name)\n if pool_dict:\n for pool_key_name in pool_dict:\n entryFound = False\n for binding_name, binding_values in binding_dict.items():\n if binding_values['nat_pool'] == pool_key_name:\n click.echo(\"Pool {} is not removed, as it is mapped to Binding {}, remove the pool binding first !!\".format(pool_key_name,binding_name))\n entryFound = True\n break\n\n if entryFound == False: \n config_db.set_entry(pool_table_name, pool_key_name, None)",
"def test_delete_device_group_by_id(self):\n pass",
"def cleanup(self):\n all_aps_info = self.zd.get_all_ap_info()\n all_aps_ins = self.testbed.components['AP']\n for ap_ins in all_aps_ins:\n for ap_info in all_aps_info:\n if ap_ins.base_mac_addr.upper() == ap_info.get('mac').upper() and ap_info.get('ip_addr') != '':\n ap_ins.ip_addr = ap_info.get('ip_addr')",
"def keepHostNames(networkItems_):\n for i in networkItems_[:]:\n try:\n ip = netaddr.IPAddress(i)\n networkItems_.remove(i)\n except:\n pass\n return networkItems_",
"def test_duplicate_entries(self):",
"def test_05_delete_client(self):\n try:\n for k, v in self.test_data.items():\n client = Client()\n test_str = v.split(',')\n client.user_id = test_str[0]\n client.host_name = test_str[1]\n client.user_name = test_str[2]\n ClientsUnitTest._client_dao.add(client)\n self.assertTrue(\n ClientsUnitTest._client_dao.get_client(client.user_id))\n\n for k, v in self.test_data.items():\n client = Client()\n test_str = v.split(',')\n client.user_id = test_str[0]\n self.assertTrue(ClientsUnitTest._client_dao.delete(client))\n\n for k, v in self.test_data.items():\n client = Client()\n test_str = v.split(',')\n client.user_id = test_str[0]\n self.assertTrue(ClientsUnitTest._client_dao.delete(client))\n\n except ClientAlreadyExistsException as error:\n print(error.get_message())\n\n except ClientNotFoundException as error:\n print(error.get_message())\n\n except DBException as error:\n print(error.get_message())",
"def cleanup_dict_infos(self, list_del_sha1s):\n for sha1 in list_del_sha1s:\n try:\n del self.dict_sha1_infos[str(sha1)]\n except:\n # could happen when cleaning up duplicates or image processed by another process\n pass"
] |
[
"0.58510834",
"0.5813717",
"0.57812136",
"0.5701259",
"0.5596435",
"0.55728924",
"0.55418134",
"0.55372703",
"0.5518171",
"0.54749167",
"0.5468669",
"0.5466749",
"0.5457224",
"0.54543275",
"0.54192454",
"0.53716856",
"0.5364132",
"0.5360512",
"0.5357036",
"0.53381026",
"0.5337002",
"0.5322833",
"0.53210926",
"0.5318037",
"0.5298939",
"0.5298515",
"0.5294439",
"0.5288287",
"0.52780294",
"0.5249991"
] |
0.58459246
|
1
|
Test a bridge being discovered via HomeKit.
|
async def test_bridge_homekit(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
create_mock_api_discovery(aioclient_mock, [("0.0.0.0", "bla")])
result = await hass.config_entries.flow.async_init(
const.DOMAIN,
context={"source": config_entries.SOURCE_HOMEKIT},
data=zeroconf.ZeroconfServiceInfo(
host="0.0.0.0",
addresses=["0.0.0.0"],
hostname="mock_hostname",
name="mock_name",
port=None,
properties={zeroconf.ATTR_PROPERTIES_ID: "aa:bb:cc:dd:ee:ff"},
type="mock_type",
),
)
assert result["type"] == "form"
assert result["step_id"] == "link"
flow = next(
flow
for flow in hass.config_entries.flow.async_progress()
if flow["flow_id"] == result["flow_id"]
)
assert flow["context"]["unique_id"] == config_entries.DEFAULT_DISCOVERY_UNIQUE_ID
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_home_bridge(mock_pre_serv):\n bridge = HomeBridge('TestBridge', 'test.bridge', b'123-45-678')\n\n assert bridge.display_name == 'TestBridge'\n assert bridge.pincode == b'123-45-678'\n assert len(bridge.services) == 2\n\n assert bridge.services[0].display_name == SERV_ACCESSORY_INFO\n assert bridge.services[1].display_name == SERV_BRIDGING_STATE\n\n char_model = bridge.services[0].get_characteristic(CHAR_MODEL)\n assert char_model.get_value() == 'test.bridge'",
"async def test_homeassistant_bridge_fan_setup(hass):\n accessories = await setup_accessories_from_file(\n hass, \"home_assistant_bridge_fan.json\"\n )\n config_entry, pairing = await setup_test_accessories(hass, accessories)\n\n entity_registry = er.async_get(hass)\n\n # Check that the fan is correctly found and set up\n fan_id = \"fan.living_room_fan\"\n fan = entity_registry.async_get(fan_id)\n assert fan.unique_id == \"homekit-fan.living_room_fan-8\"\n\n fan_helper = Helper(\n hass,\n \"fan.living_room_fan\",\n pairing,\n accessories[0],\n config_entry,\n )\n\n fan_state = await fan_helper.poll_and_get_state()\n assert fan_state.attributes[\"friendly_name\"] == \"Living Room Fan\"\n assert fan_state.state == \"off\"\n assert fan_state.attributes[\"supported_features\"] == (\n SUPPORT_DIRECTION | SUPPORT_SET_SPEED | SUPPORT_OSCILLATE\n )\n\n device_registry = dr.async_get(hass)\n\n device = device_registry.async_get(fan.device_id)\n assert device.manufacturer == \"Home Assistant\"\n assert device.name == \"Living Room Fan\"\n assert device.model == \"Fan\"\n assert device.sw_version == \"0.104.0.dev0\"\n\n bridge = device = device_registry.async_get(device.via_device_id)\n assert bridge.manufacturer == \"Home Assistant\"\n assert bridge.name == \"Home Assistant Bridge\"\n assert bridge.model == \"Bridge\"\n assert bridge.sw_version == \"0.104.0.dev0\"",
"def test_home_bridge_setup_message(hk_driver) -> None:\n bridge = HomeBridge(\"hass\", hk_driver, \"test_name\")\n assert bridge.display_name == \"test_name\"\n assert len(bridge.services) == 2\n # setup_message\n bridge.setup_message()",
"def test_homekit_class(self, mock_acc_driver):\n with patch(PATH_HOMEKIT + '.accessories.HomeBridge') as mock_bridge:\n homekit = HomeKit(self.hass, 51826)\n homekit.setup_bridge(b'123-45-678')\n\n mock_bridge.reset_mock()\n self.hass.states.set('demo.demo1', 'on')\n self.hass.states.set('demo.demo2', 'off')\n\n with patch(PATH_HOMEKIT + '.get_accessory') as mock_get_acc, \\\n patch(PATH_HOMEKIT + '.import_types') as mock_import_types, \\\n patch('homeassistant.util.get_local_ip') as mock_ip:\n mock_get_acc.side_effect = ['TempSensor', 'Window']\n mock_ip.return_value = IP_ADDRESS\n homekit.start_driver(Event(EVENT_HOMEASSISTANT_START))\n\n path = self.hass.config.path(HOMEKIT_FILE)\n\n self.assertEqual(mock_import_types.call_count, 1)\n self.assertEqual(mock_get_acc.call_count, 2)\n self.assertEqual(mock_bridge.mock_calls,\n [call().add_accessory('TempSensor'),\n call().add_accessory('Window')])\n self.assertEqual(mock_acc_driver.mock_calls,\n [call(homekit.bridge, 51826, IP_ADDRESS, path),\n call().start()])\n mock_acc_driver.reset_mock()\n\n self.hass.bus.fire(EVENT_HOMEASSISTANT_STOP)\n self.hass.block_till_done()\n\n self.assertEqual(mock_acc_driver.mock_calls, [call().stop()])",
"async def test_bridge_homekit_already_configured(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n create_mock_api_discovery(aioclient_mock, [(\"0.0.0.0\", \"aabbccddeeff\")])\n MockConfigEntry(\n domain=\"hue\", unique_id=\"aabbccddeeff\", data={\"host\": \"0.0.0.0\"}\n ).add_to_hass(hass)\n\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_HOMEKIT},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"0.0.0.0\",\n addresses=[\"0.0.0.0\"],\n hostname=\"mock_hostname\",\n name=\"mock_name\",\n port=None,\n properties={zeroconf.ATTR_PROPERTIES_ID: \"aa:bb:cc:dd:ee:ff\"},\n type=\"mock_type\",\n ),\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"already_configured\"",
"async def test_hue_bridge_setup(hass):\n accessories = await setup_accessories_from_file(hass, \"hue_bridge.json\")\n config_entry, pairing = await setup_test_accessories(hass, accessories)\n\n entity_registry = await hass.helpers.entity_registry.async_get_registry()\n\n # Check that the battery is correctly found and set up\n battery_id = \"sensor.hue_dimmer_switch_battery\"\n battery = entity_registry.async_get(battery_id)\n assert battery.unique_id == \"homekit-6623462389072572-644245094400\"\n\n battery_helper = Helper(\n hass, \"sensor.hue_dimmer_switch_battery\", pairing, accessories[0], config_entry\n )\n battery_state = await battery_helper.poll_and_get_state()\n assert battery_state.attributes[\"friendly_name\"] == \"Hue dimmer switch Battery\"\n assert battery_state.attributes[\"icon\"] == \"mdi:battery\"\n assert battery_state.state == \"100\"\n\n device_registry = await hass.helpers.device_registry.async_get_registry()\n\n device = device_registry.async_get(battery.device_id)\n assert device.manufacturer == \"Philips\"\n assert device.name == \"Hue dimmer switch\"\n assert device.model == \"RWL021\"\n assert device.sw_version == \"45.1.17846\"",
"async def test_bridge_zeroconf_ipv6(hass: HomeAssistant) -> None:\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_ZEROCONF},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"fd00::eeb5:faff:fe84:b17d\",\n addresses=[\"fd00::eeb5:faff:fe84:b17d\"],\n port=443,\n hostname=\"Philips-hue.local\",\n type=\"_hue._tcp.local.\",\n name=\"Philips Hue - ABCABC._hue._tcp.local.\",\n properties={\n \"_raw\": {\"bridgeid\": b\"ecb5faabcabc\", \"modelid\": b\"BSB002\"},\n \"bridgeid\": \"ecb5faabcabc\",\n \"modelid\": \"BSB002\",\n },\n ),\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"invalid_host\"",
"async def test_bridge_connection_failed(\n hass: HomeAssistant,\n aioclient_mock: AiohttpClientMocker,\n caplog: pytest.LogCaptureFixture,\n) -> None:\n create_mock_api_discovery(aioclient_mock, [])\n\n with patch(\n \"homeassistant.components.hue.config_flow.discover_bridge\",\n side_effect=ClientError,\n ):\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n result = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={\"host\": \"blah\"}\n )\n\n # a warning message should have been logged that the bridge could not be reached\n assert \"Error while attempting to retrieve discovery information\" in caplog.text\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"cannot_connect\"\n\n # test again with zeroconf discovered wrong bridge IP\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_ZEROCONF},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"blah\",\n addresses=[\"1.2.3.4\"],\n port=443,\n hostname=\"Philips-hue.local\",\n type=\"_hue._tcp.local.\",\n name=\"Philips Hue - ABCABC._hue._tcp.local.\",\n properties={\n \"_raw\": {\"bridgeid\": b\"ecb5fafffeabcabc\", \"modelid\": b\"BSB002\"},\n \"bridgeid\": \"ecb5fafffeabcabc\",\n \"modelid\": \"BSB002\",\n },\n ),\n )\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"cannot_connect\"\n\n # test again with homekit discovered wrong bridge IP\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_HOMEKIT},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"0.0.0.0\",\n addresses=[\"0.0.0.0\"],\n hostname=\"mock_hostname\",\n name=\"mock_name\",\n port=None,\n properties={zeroconf.ATTR_PROPERTIES_ID: \"aa:bb:cc:dd:ee:ff\"},\n type=\"mock_type\",\n ),\n )\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"cannot_connect\"\n\n # repeat test with import flow\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_IMPORT},\n data={\"host\": \"blah\"},\n )\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"cannot_connect\"",
"def mock_bridge(hass):\n return create_mock_bridge()",
"async def test_bridge_zeroconf(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n create_mock_api_discovery(aioclient_mock, [(\"192.168.1.217\", \"ecb5fafffeabcabc\")])\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_ZEROCONF},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"192.168.1.217\",\n addresses=[\"192.168.1.217\"],\n port=443,\n hostname=\"Philips-hue.local\",\n type=\"_hue._tcp.local.\",\n name=\"Philips Hue - ABCABC._hue._tcp.local.\",\n properties={\n \"_raw\": {\"bridgeid\": b\"ecb5fafffeabcabc\", \"modelid\": b\"BSB002\"},\n \"bridgeid\": \"ecb5fafffeabcabc\",\n \"modelid\": \"BSB002\",\n },\n ),\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"link\"",
"def test_verify_connection_to_a_device():",
"def test_client_home():\n time.sleep(2.0) # prevent healthcheck + home == double tap home()\n c.home()",
"async def test_flow_all_discovered_bridges_exist(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n mock_host = \"1.2.3.4\"\n mock_id = \"bla\"\n create_mock_api_discovery(aioclient_mock, [(mock_host, mock_id)])\n\n MockConfigEntry(\n domain=\"hue\", unique_id=mock_id, data={\"host\": mock_host}\n ).add_to_hass(hass)\n\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"manual\"",
"def test_connect(rgd):\n assert rgd.connected is True",
"def test_home_accessory(mock_pre_serv):\n acc = HomeAccessory('TestAccessory', 'test.accessory', 'WINDOW')\n\n assert acc.display_name == 'TestAccessory'\n assert acc.category == 13 # Category.WINDOW\n assert len(acc.services) == 1\n\n serv = acc.services[0]\n assert serv.display_name == SERV_ACCESSORY_INFO\n char_model = serv.get_characteristic(CHAR_MODEL)\n assert char_model.get_value() == 'test.accessory'",
"def test_setup_parameters(self, mock_homekit):\n self.assertTrue(setup.setup_component(\n self.hass, 'homekit', CONFIG))\n\n self.assertEqual(mock_homekit.mock_calls,\n [call(self.hass, 11111),\n call().setup_bridge(b'987-65-432')])",
"async def setup_bridge(hass, mock_bridge, hostname=None):\n if hostname is None:\n hostname = 'mock-host'\n hass.config.components.add(hue.DOMAIN)\n hass.data[hue.DOMAIN] = {hostname: mock_bridge}\n config_entry = config_entries.ConfigEntry(1, hue.DOMAIN, 'Mock Title', {\n 'host': hostname\n }, 'test', config_entries.CONN_CLASS_LOCAL_POLL)\n await hass.config_entries.async_forward_entry_setup(\n config_entry, 'binary_sensor')\n await hass.config_entries.async_forward_entry_setup(\n config_entry, 'sensor')\n # and make sure it completes before going further\n await hass.async_block_till_done()",
"async def test_bridge_zeroconf_already_exists(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n create_mock_api_discovery(\n aioclient_mock, [(\"0.0.0.0\", \"ecb5faabcabc\"), (\"192.168.1.217\", \"ecb5faabcabc\")]\n )\n entry = MockConfigEntry(\n domain=\"hue\",\n source=config_entries.SOURCE_HOMEKIT,\n data={\"host\": \"0.0.0.0\"},\n unique_id=\"ecb5faabcabc\",\n )\n entry.add_to_hass(hass)\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_ZEROCONF},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"192.168.1.217\",\n addresses=[\"192.168.1.217\"],\n port=443,\n hostname=\"Philips-hue.local\",\n type=\"_hue._tcp.local.\",\n name=\"Philips Hue - ABCABC._hue._tcp.local.\",\n properties={\n \"_raw\": {\"bridgeid\": b\"ecb5faabcabc\", \"modelid\": b\"BSB002\"},\n \"bridgeid\": \"ecb5faabcabc\",\n \"modelid\": \"BSB002\",\n },\n ),\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"already_configured\"\n assert entry.data[\"host\"] == \"192.168.1.217\"",
"async def test_get_scanner(\n hass: HomeAssistant, mocked_opnsense, mock_device_tracker_conf: list[legacy.Device]\n) -> None:\n interface_client = mock.MagicMock()\n mocked_opnsense.InterfaceClient.return_value = interface_client\n interface_client.get_arp.return_value = [\n {\n \"hostname\": \"\",\n \"intf\": \"igb1\",\n \"intf_description\": \"LAN\",\n \"ip\": \"192.168.0.123\",\n \"mac\": \"ff:ff:ff:ff:ff:ff\",\n \"manufacturer\": \"\",\n },\n {\n \"hostname\": \"Desktop\",\n \"intf\": \"igb1\",\n \"intf_description\": \"LAN\",\n \"ip\": \"192.168.0.167\",\n \"mac\": \"ff:ff:ff:ff:ff:fe\",\n \"manufacturer\": \"OEM\",\n },\n ]\n network_insight_client = mock.MagicMock()\n mocked_opnsense.NetworkInsightClient.return_value = network_insight_client\n network_insight_client.get_interfaces.return_value = {\"igb0\": \"WAN\", \"igb1\": \"LAN\"}\n\n result = await async_setup_component(\n hass,\n DOMAIN,\n {\n DOMAIN: {\n CONF_URL: \"https://fake_host_fun/api\",\n CONF_API_KEY: \"fake_key\",\n CONF_API_SECRET: \"fake_secret\",\n CONF_VERIFY_SSL: False,\n }\n },\n )\n await hass.async_block_till_done()\n assert result\n device_1 = hass.states.get(\"device_tracker.desktop\")\n assert device_1 is not None\n assert device_1.state == \"home\"\n device_2 = hass.states.get(\"device_tracker.ff_ff_ff_ff_ff_ff\")\n assert device_2.state == \"home\"",
"async def test_discovered_by_dhcp_or_integration_discovery(\n hass: HomeAssistant, source, data, bulb_type, extended_white_range, name\n) -> None:\n with _patch_wizlight(\n device=None, extended_white_range=extended_white_range, bulb_type=bulb_type\n ):\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": source}, data=data\n )\n await hass.async_block_till_done()\n\n assert result[\"type\"] == FlowResultType.FORM\n assert result[\"step_id\"] == \"discovery_confirm\"\n\n with _patch_wizlight(\n device=None, extended_white_range=extended_white_range, bulb_type=bulb_type\n ), patch(\n \"homeassistant.components.wiz.async_setup_entry\",\n return_value=True,\n ) as mock_setup_entry, patch(\n \"homeassistant.components.wiz.async_setup\", return_value=True\n ) as mock_setup:\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n {},\n )\n await hass.async_block_till_done()\n\n assert result2[\"type\"] == \"create_entry\"\n assert result2[\"title\"] == name\n assert result2[\"data\"] == {\n CONF_HOST: \"1.1.1.1\",\n }\n assert len(mock_setup.mock_calls) == 1\n assert len(mock_setup_entry.mock_calls) == 1",
"def test_connect(self, gateway):\n assert not gateway._devs",
"async def test_flow_discovered_bridges(opp, aioclient_mock):\n aioclient_mock.get(\n pydeconz.utils.URL_DISCOVER,\n json=[\n {\"id\": BRIDGEID, \"internalipaddress\": \"1.2.3.4\", \"internalport\": 80},\n {\"id\": \"1234E567890A\", \"internalipaddress\": \"5.6.7.8\", \"internalport\": 80},\n ],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN, context={\"source\": SOURCE_USER}\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"user\"\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={CONF_HOST: \"1.2.3.4\"}\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"link\"\n\n aioclient_mock.post(\n \"http://1.2.3.4:80/api\",\n json=[{\"success\": {\"username\": API_KEY}}],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={}\n )\n\n assert result[\"type\"] == RESULT_TYPE_CREATE_ENTRY\n assert result[\"title\"] == BRIDGEID\n assert result[\"data\"] == {\n CONF_HOST: \"1.2.3.4\",\n CONF_PORT: 80,\n CONF_API_KEY: API_KEY,\n }",
"async def test_discovered_during_onboarding(hass: HomeAssistant, source, data) -> None:\n with _patch_wizlight(), patch(\n \"homeassistant.components.wiz.async_setup_entry\",\n return_value=True,\n ) as mock_setup_entry, patch(\n \"homeassistant.components.wiz.async_setup\", return_value=True\n ) as mock_setup, patch(\n \"homeassistant.components.onboarding.async_is_onboarded\", return_value=False\n ):\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": source}, data=data\n )\n await hass.async_block_till_done()\n\n assert result[\"type\"] == \"create_entry\"\n assert result[\"title\"] == \"WiZ Dimmable White ABCABC\"\n assert result[\"data\"] == {\n CONF_HOST: \"1.1.1.1\",\n }\n assert len(mock_setup.mock_calls) == 1\n assert len(mock_setup_entry.mock_calls) == 1",
"async def test_ryse_smart_bridge_setup(hass):\n accessories = await setup_accessories_from_file(hass, \"ryse_smart_bridge.json\")\n config_entry, pairing = await setup_test_accessories(hass, accessories)\n\n entity_registry = er.async_get(hass)\n\n # Check that the cover.master_bath_south is correctly found and set up\n cover_id = \"cover.master_bath_south\"\n cover = entity_registry.async_get(cover_id)\n assert cover.unique_id == \"homekit-00:00:00:00:00:00-2-48\"\n\n cover_helper = Helper(\n hass,\n cover_id,\n pairing,\n accessories[0],\n config_entry,\n )\n\n cover_state = await cover_helper.poll_and_get_state()\n assert cover_state.attributes[\"friendly_name\"] == \"Master Bath South\"\n assert cover_state.state == \"closed\"\n\n device_registry = dr.async_get(hass)\n\n device = device_registry.async_get(cover.device_id)\n assert device.manufacturer == \"RYSE Inc.\"\n assert device.name == \"Master Bath South\"\n assert device.model == \"RYSE Shade\"\n assert device.sw_version == \"3.0.8\"\n\n bridge = device_registry.async_get(device.via_device_id)\n assert bridge.manufacturer == \"RYSE Inc.\"\n assert bridge.name == \"RYSE SmartBridge\"\n assert bridge.model == \"RYSE SmartBridge\"\n assert bridge.sw_version == \"1.3.0\"\n\n # Check that the cover.ryse_smartshade is correctly found and set up\n cover_id = \"cover.ryse_smartshade\"\n cover = entity_registry.async_get(cover_id)\n assert cover.unique_id == \"homekit-00:00:00:00:00:00-3-48\"\n\n cover_helper = Helper(\n hass,\n cover_id,\n pairing,\n accessories[0],\n config_entry,\n )\n\n cover_state = await cover_helper.poll_and_get_state()\n assert cover_state.attributes[\"friendly_name\"] == \"RYSE SmartShade\"\n assert cover_state.state == \"open\"\n\n device_registry = dr.async_get(hass)\n\n device = device_registry.async_get(cover.device_id)\n assert device.manufacturer == \"RYSE Inc.\"\n assert device.name == \"RYSE SmartShade\"\n assert device.model == \"RYSE Shade\"\n assert device.sw_version == \"\"",
"async def _setup_bridge(self, websession: ClientSession) -> None:\n self._bridge = aiohue.Bridge(\n self.config.hue.ip,\n websession,\n username=self.config.hue.username,\n )\n LOGGER.info(f\"Connecting to Hue Bridge at {self.config.hue.ip}\")\n await self._bridge.initialize()",
"def test_multi_ap_backhaul_roam_with_bridge(dev, apdev):\n br_ifname = 'sta-br0'\n ifname = 'wlan5'\n try:\n run_multi_ap_backhaul_roam_with_bridge(dev, apdev)\n finally:\n subprocess.call(['ip', 'link', 'set', 'dev', br_ifname, 'down'])\n subprocess.call(['brctl', 'delif', br_ifname, ifname])\n subprocess.call(['brctl', 'delbr', br_ifname])\n subprocess.call(['iw', ifname, 'set', '4addr', 'off'])",
"async def test_sensors_with_multiple_bridges(hass, mock_bridge):\n mock_bridge_2 = create_mock_bridge()\n mock_bridge_2.mock_sensor_responses.append({\n \"1\": PRESENCE_SENSOR_3_PRESENT,\n \"2\": LIGHT_LEVEL_SENSOR_3,\n \"3\": TEMPERATURE_SENSOR_3,\n })\n mock_bridge.mock_sensor_responses.append(SENSOR_RESPONSE)\n await setup_bridge(hass, mock_bridge)\n await setup_bridge(hass, mock_bridge_2, hostname='mock-bridge-2')\n\n assert len(mock_bridge.mock_requests) == 1\n assert len(mock_bridge_2.mock_requests) == 1\n # 3 \"physical\" sensors with 3 virtual sensors each\n assert len(hass.states.async_all()) == 9",
"async def test_manual_flow_bridge_exist(hass: HomeAssistant) -> None:\n MockConfigEntry(\n domain=\"hue\", unique_id=\"id-1234\", data={\"host\": \"2.2.2.2\"}\n ).add_to_hass(hass)\n\n with patch(\n \"homeassistant.components.hue.config_flow.discover_nupnp\",\n return_value=[],\n ):\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"manual\"\n\n result = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"], {\"host\": \"2.2.2.2\"}\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"already_configured\"",
"def create_mock_api_discovery(aioclient_mock, bridges):\n aioclient_mock.get(\n URL_NUPNP,\n json=[{\"internalipaddress\": host, \"id\": id} for (host, id) in bridges],\n )\n for host, bridge_id in bridges:\n aioclient_mock.get(\n f\"http://{host}/api/config\",\n json={\"bridgeid\": bridge_id},\n )\n # mock v2 support if v2 found in id\n aioclient_mock.get(\n f\"https://{host}/clip/v2/resources\",\n status=403 if \"v2\" in bridge_id else 404,\n )",
"def test_connectivity(self):\n vlan_net1 = helpers.get_not_used_vlan(self.tester.bridge, VLAN_RANGE)\n vlan_net2 = helpers.get_not_used_vlan(self.tester.bridge, VLAN_RANGE)\n trunk_mac = net.get_random_mac('fa:16:3e:00:00:00'.split(':'))\n sub_port_mac = net.get_random_mac('fa:16:3e:00:00:00'.split(':'))\n sub_port_segmentation_id = helpers.get_not_used_vlan(\n self.tester.bridge, VLAN_RANGE)\n LOG.debug(\"Using %(n1)d vlan tag as local vlan ID for net1 and %(n2)d \"\n \"for local vlan ID for net2\", {\n 'n1': vlan_net1, 'n2': vlan_net2})\n self.tester.set_peer_tag(vlan_net1)\n self.trunk_manager.create_trunk(self.trunk.trunk_id,\n self.trunk.port_id,\n trunk_mac)\n\n # tag the patch port, this should be done by the ovs agent but we mock\n # it for this test\n conn_testers.OVSBaseConnectionTester.set_tag(\n self.trunk.patch_port_int_name, self.tester.bridge, vlan_net1)\n\n self.tester.wait_for_connection(self.tester.INGRESS)\n self.tester.wait_for_connection(self.tester.EGRESS)\n\n self.tester.add_vlan_interface_and_peer(sub_port_segmentation_id,\n self.net2_cidr)\n conn_testers.OVSBaseConnectionTester.set_tag(\n self.tester._peer2.port.name, self.tester.bridge, vlan_net2)\n\n sub_port = trunk_manager.SubPort(self.trunk.trunk_id,\n uuidutils.generate_uuid(),\n sub_port_mac,\n sub_port_segmentation_id)\n\n self.trunk_manager.add_sub_port(sub_port.trunk_id,\n sub_port.port_id,\n sub_port.port_mac,\n sub_port.segmentation_id)\n # tag the patch port, this should be done by the ovs agent but we mock\n # it for this test\n conn_testers.OVSBaseConnectionTester.set_tag(\n sub_port.patch_port_int_name, self.tester.bridge, vlan_net2)\n\n self.tester.wait_for_sub_port_connectivity(self.tester.INGRESS)\n self.tester.wait_for_sub_port_connectivity(self.tester.EGRESS)\n\n self.trunk_manager.remove_sub_port(sub_port.trunk_id,\n sub_port.port_id)\n self.tester.wait_for_sub_port_no_connectivity(self.tester.INGRESS)\n self.tester.wait_for_sub_port_no_connectivity(self.tester.EGRESS)\n\n self.trunk_manager.remove_trunk(self.trunk.trunk_id,\n self.trunk.port_id)\n self.tester.wait_for_no_connection(self.tester.INGRESS)"
] |
[
"0.75312334",
"0.6870456",
"0.68665636",
"0.6690778",
"0.6649442",
"0.6622296",
"0.6615364",
"0.6443438",
"0.6431749",
"0.6419735",
"0.64003897",
"0.63273555",
"0.6083087",
"0.6046865",
"0.60348",
"0.59840834",
"0.59795445",
"0.5970852",
"0.5957151",
"0.5952589",
"0.59462017",
"0.5864992",
"0.58188665",
"0.5787958",
"0.57854766",
"0.5776125",
"0.57656205",
"0.5741562",
"0.57355434",
"0.5731633"
] |
0.70850176
|
1
|
Test if a HomeKit discovered bridge has already been configured.
|
async def test_bridge_homekit_already_configured(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
create_mock_api_discovery(aioclient_mock, [("0.0.0.0", "aabbccddeeff")])
MockConfigEntry(
domain="hue", unique_id="aabbccddeeff", data={"host": "0.0.0.0"}
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
const.DOMAIN,
context={"source": config_entries.SOURCE_HOMEKIT},
data=zeroconf.ZeroconfServiceInfo(
host="0.0.0.0",
addresses=["0.0.0.0"],
hostname="mock_hostname",
name="mock_name",
port=None,
properties={zeroconf.ATTR_PROPERTIES_ID: "aa:bb:cc:dd:ee:ff"},
type="mock_type",
),
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def is_connected(cls, device_config):\n if \"console_port_name\" in device_config[\"persistent\"]:\n address = device_config[\"persistent\"][\"console_port_name\"]\n else:\n address = device_config[\"persistent\"][\"hub_port_name\"]\n return os.path.exists(address)",
"async def is_configured(hass: HomeAssistant) -> bool:\n manager = await async_get_manager(hass)\n if manager.data is None:\n return False\n return bool(manager.data != manager.default_preferences())",
"def homekit_enabled(self) -> bool:\n return bool(self._device_info[\"HomeKit\"])",
"def check_any_light_on(bridge):\n for i,group in bridge.get_group().items():\n if group['state']['any_on']:\n return True\n return False",
"def is_home(self):\n return bool([\n device for device in self.devices\n if device.is_home and device.is_phone\n ])",
"def is_connected():\r\n ipconfig_output = terminal('ipconfig | findstr /i gateway')\r\n if ipconfig_output != None:\r\n return any(i for i in ipconfig_output if i.isdigit())\r\n \r\n # Alternative way if ipconfig has error in some systems\r\n ## Slower than ipconfig workaround\r\n try:\r\n socket().connect(('8.8.8.8', 53))\r\n return True\r\n except:\r\n return False",
"def check_connected(self):\n return\\\n (self.setup is not None) and\\\n (self.design is not None) and\\\n (self.project is not None) and\\\n (self.desktop is not None) and\\\n (self.app is not None)",
"def CheckIfConnectionIsABridge(self, node1_idx, node2_idx):\n\n is_bridge = False\n\n num_of_commmon_components_before_disconecting = len(self.GetCommonComponents())\n\n self.Disconnect(node1_idx, node2_idx)\n\n num_of_commmon_components_after_disconecting = len(self.GetCommonComponents())\n\n if num_of_commmon_components_after_disconecting > num_of_commmon_components_before_disconecting:\n is_bridge = True\n self.Connect(node1_idx, node2_idx)\n\n return is_bridge",
"def is_connected(self) -> bool:\n return self.arduino is not None",
"def is_connected(self):\n try:\n if self.coordinator.data[self._system_id][\"devices\"][self._item_id].get(\n \"connected\"\n ):\n connected_ap = self.coordinator.data[self._system_id][\"devices\"][\n self._item_id\n ].get(\"apId\")\n if connected_ap:\n connected_ap = self.coordinator.data[self._system_id][\n \"access_points\"\n ][connected_ap][\"accessPointSettings\"][\"accessPointOtherSettings\"][\n \"roomData\"\n ][\n \"name\"\n ]\n self._attrs[\"connected_ap\"] = connected_ap\n else:\n self._attrs[\"connected_ap\"] = \"NA\"\n\n self._attrs[\"ip_address\"] = self.coordinator.data[self._system_id][\n \"devices\"\n ][self._item_id].get(\"ipAddress\", \"NA\")\n\n self._mac = self.coordinator.data[self._system_id][\"devices\"][\n self._item_id\n ].get(\"macAddress\")\n\n self._attrs[\"mac\"] = self._mac if self._mac else \"NA\"\n\n self._is_connected = True\n else:\n self._is_connected = False\n except TypeError:\n pass\n except KeyError:\n pass\n # self.hass.async_create_task(\n # self.hass.config_entries.async_reload(self.coordinator.entry.entry_id)\n # )\n\n return self._is_connected",
"def is_connected(self) -> bool:",
"def isconnected(self) -> bool:",
"async def test_bridge_zeroconf_already_exists(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n create_mock_api_discovery(\n aioclient_mock, [(\"0.0.0.0\", \"ecb5faabcabc\"), (\"192.168.1.217\", \"ecb5faabcabc\")]\n )\n entry = MockConfigEntry(\n domain=\"hue\",\n source=config_entries.SOURCE_HOMEKIT,\n data={\"host\": \"0.0.0.0\"},\n unique_id=\"ecb5faabcabc\",\n )\n entry.add_to_hass(hass)\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_ZEROCONF},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"192.168.1.217\",\n addresses=[\"192.168.1.217\"],\n port=443,\n hostname=\"Philips-hue.local\",\n type=\"_hue._tcp.local.\",\n name=\"Philips Hue - ABCABC._hue._tcp.local.\",\n properties={\n \"_raw\": {\"bridgeid\": b\"ecb5faabcabc\", \"modelid\": b\"BSB002\"},\n \"bridgeid\": \"ecb5faabcabc\",\n \"modelid\": \"BSB002\",\n },\n ),\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"already_configured\"\n assert entry.data[\"host\"] == \"192.168.1.217\"",
"def device_is_configured(self):\n\n\t\ttry:\n\t\t\t_ = self._dev\n\t\texcept:\n\t\t\treturn False\n\n\t\treturn True",
"async def connected(self) -> bool:\n args = ['-t', f\"DEVICE INFO,{self.conf['device_address']}\"]\n output = await self.run_vh(args)\n return \"IN USE BY: NO ONE\" not in output",
"def isconnected(self) -> bool:\n ...",
"def is_configured(self):\n pass",
"def test_home_bridge(mock_pre_serv):\n bridge = HomeBridge('TestBridge', 'test.bridge', b'123-45-678')\n\n assert bridge.display_name == 'TestBridge'\n assert bridge.pincode == b'123-45-678'\n assert len(bridge.services) == 2\n\n assert bridge.services[0].display_name == SERV_ACCESSORY_INFO\n assert bridge.services[1].display_name == SERV_BRIDGING_STATE\n\n char_model = bridge.services[0].get_characteristic(CHAR_MODEL)\n assert char_model.get_value() == 'test.bridge'",
"def is_bridge_burning(self):\n # Bridge should be the last room in the list\n last_room = self.room_list[-1]\n if last_room.type == const.BRIDGE:\n return last_room.fire_level == 2\n # iterate through just in case it's not last in the list\n else:\n for room in self.room_list:\n if room.type == const.BRIDGE:\n return room.fire_level == 2\n return False # if this is reached, then the ship doesn't have a bridge",
"def CheckIfConnecting(self):\n if self.CheckIfWiredConnecting() or self.CheckIfWirelessConnecting():\n return True\n else:\n return False",
"def isConnected():",
"async def test_bridge_import_already_configured(hass: HomeAssistant) -> None:\n MockConfigEntry(\n domain=\"hue\", unique_id=\"aabbccddeeff\", data={\"host\": \"0.0.0.0\"}\n ).add_to_hass(hass)\n\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_IMPORT},\n data={\"host\": \"0.0.0.0\", \"properties\": {\"id\": \"aa:bb:cc:dd:ee:ff\"}},\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"already_configured\"",
"def setup_unused_bridge_network(self):\n out = utils.run_script('conjure-up.lxc network show conjureup0')\n if out.returncode == 0:\n return # already configured\n\n out = utils.run_script('conjure-up.lxc network create conjureup0 '\n 'ipv4.address=auto '\n 'ipv4.nat=true '\n 'ipv6.address=none '\n 'ipv6.nat=false')\n\n if out.returncode != 0:\n raise Exception(\n \"Failed to create conjureup0 network bridge: \"\n \"{}\".format(out.stderr.decode()))",
"def is_incall_connected(self) -> bool:",
"def is_connected():\r\n global connection\r\n if connection is None:\r\n return False\r\n else:\r\n return True",
"def is_configured(self):\n return True",
"def is_configured(self):\n return self._session is not None",
"def sees_home_tag(self):\n detections = self.swarmie.get_latest_targets().detections\n\n for detection in detections:\n if detection.id == 256:\n return True\n\n return False",
"def is_connected(self) -> bool:\n return False if self._snitun is None else self._snitun.is_connected",
"def exists(self):\n if self.host.exists(self.remote_path):\n print 'Yes, config exists already.'\n return True\n else:\n print 'Config doesn\\'t exist yet'\n return False"
] |
[
"0.6457371",
"0.61604476",
"0.6104228",
"0.6002925",
"0.5971744",
"0.59514815",
"0.5920931",
"0.59096086",
"0.5874031",
"0.58673847",
"0.5864205",
"0.5858051",
"0.58545786",
"0.58428514",
"0.58357936",
"0.58042425",
"0.5801472",
"0.5788955",
"0.57613575",
"0.5750078",
"0.5734285",
"0.5706146",
"0.5682551",
"0.567734",
"0.56698596",
"0.56539273",
"0.5631892",
"0.5621889",
"0.5618452",
"0.5600415"
] |
0.67370534
|
0
|
Test options config flow for a V1 bridge.
|
async def test_options_flow_v1(hass: HomeAssistant) -> None:
entry = MockConfigEntry(
domain="hue",
unique_id="aabbccddeeff",
data={"host": "0.0.0.0"},
)
entry.add_to_hass(hass)
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == "form"
assert result["step_id"] == "init"
schema = result["data_schema"].schema
assert (
_get_schema_default(schema, const.CONF_ALLOW_HUE_GROUPS)
== const.DEFAULT_ALLOW_HUE_GROUPS
)
assert (
_get_schema_default(schema, const.CONF_ALLOW_UNREACHABLE)
== const.DEFAULT_ALLOW_UNREACHABLE
)
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
const.CONF_ALLOW_HUE_GROUPS: True,
const.CONF_ALLOW_UNREACHABLE: True,
},
)
assert result["type"] == "create_entry"
assert result["data"] == {
const.CONF_ALLOW_HUE_GROUPS: True,
const.CONF_ALLOW_UNREACHABLE: True,
}
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"async def test_option_flow(opp, aioclient_mock):\n config_entry = await setup_deconz_integration(opp, aioclient_mock)\n\n result = await opp.config_entries.options.async_init(config_entry.entry_id)\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"deconz_devices\"\n\n result = await opp.config_entries.options.async_configure(\n result[\"flow_id\"],\n user_input={\n CONF_ALLOW_CLIP_SENSOR: False,\n CONF_ALLOW_DECONZ_GROUPS: False,\n CONF_ALLOW_NEW_DEVICES: False,\n },\n )\n\n assert result[\"type\"] == RESULT_TYPE_CREATE_ENTRY\n assert result[\"data\"] == {\n CONF_ALLOW_CLIP_SENSOR: False,\n CONF_ALLOW_DECONZ_GROUPS: False,\n CONF_ALLOW_NEW_DEVICES: False,\n CONF_MASTER_GATEWAY: True,\n }",
"async def test_options_flow_v2(hass: HomeAssistant) -> None:\n entry = MockConfigEntry(\n domain=\"hue\",\n unique_id=\"aabbccddeeff\",\n data={\"host\": \"0.0.0.0\", \"api_version\": 2},\n )\n entry.add_to_hass(hass)\n\n dev_reg = dr.async_get(hass)\n mock_dev_id = \"aabbccddee\"\n dev_reg.async_get_or_create(\n config_entry_id=entry.entry_id, identifiers={(const.DOMAIN, mock_dev_id)}\n )\n\n result = await hass.config_entries.options.async_init(entry.entry_id)\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"init\"\n schema = result[\"data_schema\"].schema\n assert _get_schema_default(schema, const.CONF_IGNORE_AVAILABILITY) == []\n\n result = await hass.config_entries.options.async_configure(\n result[\"flow_id\"],\n user_input={const.CONF_IGNORE_AVAILABILITY: [mock_dev_id]},\n )\n\n assert result[\"type\"] == \"create_entry\"\n assert result[\"data\"] == {\n const.CONF_IGNORE_AVAILABILITY: [mock_dev_id],\n }",
"async def test_flow_manual_configuration_decision(opp, aioclient_mock):\n aioclient_mock.get(\n pydeconz.utils.URL_DISCOVER,\n json=[{\"id\": BRIDGEID, \"internalipaddress\": \"1.2.3.4\", \"internalport\": 80}],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN, context={\"source\": SOURCE_USER}\n )\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={CONF_HOST: CONF_MANUAL_INPUT}\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"manual_input\"\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"],\n user_input={CONF_HOST: \"1.2.3.4\", CONF_PORT: 80},\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"link\"\n\n aioclient_mock.post(\n \"http://1.2.3.4:80/api\",\n json=[{\"success\": {\"username\": API_KEY}}],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n aioclient_mock.get(\n f\"http://1.2.3.4:80/api/{API_KEY}/config\",\n json={\"bridgeid\": BRIDGEID},\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={}\n )\n\n assert result[\"type\"] == RESULT_TYPE_CREATE_ENTRY\n assert result[\"title\"] == BRIDGEID\n assert result[\"data\"] == {\n CONF_HOST: \"1.2.3.4\",\n CONF_PORT: 80,\n CONF_API_KEY: API_KEY,\n }",
"async def test_flow_manual_configuration(opp, aioclient_mock):\n aioclient_mock.get(\n pydeconz.utils.URL_DISCOVER,\n json=[],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN, context={\"source\": SOURCE_USER}\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"manual_input\"\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"],\n user_input={CONF_HOST: \"1.2.3.4\", CONF_PORT: 80},\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"link\"\n\n aioclient_mock.post(\n \"http://1.2.3.4:80/api\",\n json=[{\"success\": {\"username\": API_KEY}}],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n aioclient_mock.get(\n f\"http://1.2.3.4:80/api/{API_KEY}/config\",\n json={\"bridgeid\": BRIDGEID},\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={}\n )\n\n assert result[\"type\"] == RESULT_TYPE_CREATE_ENTRY\n assert result[\"title\"] == BRIDGEID\n assert result[\"data\"] == {\n CONF_HOST: \"1.2.3.4\",\n CONF_PORT: 80,\n CONF_API_KEY: API_KEY,\n }",
"async def test_options_flow_router(hass: HomeAssistant) -> None:\n config_entry = MockConfigEntry(\n domain=DOMAIN,\n data={**CONFIG_DATA, CONF_MODE: \"router\"},\n )\n config_entry.add_to_hass(hass)\n\n with PATCH_SETUP_ENTRY:\n await hass.config_entries.async_setup(config_entry.entry_id)\n await hass.async_block_till_done()\n result = await hass.config_entries.options.async_init(config_entry.entry_id)\n\n assert result[\"type\"] == data_entry_flow.FlowResultType.FORM\n assert result[\"step_id\"] == \"init\"\n assert CONF_REQUIRE_IP not in result[\"data_schema\"].schema\n\n result = await hass.config_entries.options.async_configure(\n result[\"flow_id\"],\n user_input={\n CONF_CONSIDER_HOME: 20,\n CONF_TRACK_UNKNOWN: True,\n CONF_INTERFACE: \"aaa\",\n CONF_DNSMASQ: \"bbb\",\n },\n )\n\n assert result[\"type\"] == data_entry_flow.FlowResultType.CREATE_ENTRY\n assert config_entry.options[CONF_CONSIDER_HOME] == 20\n assert config_entry.options[CONF_TRACK_UNKNOWN] is True\n assert config_entry.options[CONF_INTERFACE] == \"aaa\"\n assert config_entry.options[CONF_DNSMASQ] == \"bbb\"",
"def test_adapter_opts_set(self):\n conn = self._get_conn()\n\n discovery = {\n \"versions\": {\n \"values\": [\n {\n \"status\": \"stable\",\n \"updated\": \"2019-06-01T00:00:00Z\",\n \"media-types\": [\n {\n \"base\": \"application/json\",\n \"type\": \"application/vnd.openstack.heat-v2+json\", # noqa: E501\n }\n ],\n \"id\": \"v2.0\",\n \"links\": [\n {\n \"href\": \"https://example.org:8888/heat/v2\",\n \"rel\": \"self\",\n }\n ],\n }\n ]\n }\n }\n self.register_uris(\n [\n dict(\n method='GET',\n uri='https://example.org:8888/heat/v2',\n json=discovery,\n ),\n dict(\n method='GET',\n uri='https://example.org:8888/heat/v2/foo',\n json={'foo': {}},\n ),\n ]\n )\n\n adap = conn.orchestration\n self.assertEqual('SpecialRegion', adap.region_name)\n self.assertEqual('orchestration', adap.service_type)\n self.assertEqual('internal', adap.interface)\n self.assertEqual(\n 'https://example.org:8888/heat/v2', adap.endpoint_override\n )\n\n adap.get('/foo')\n self.assert_calls()",
"async def test_option_flow(hass):\n config_entry = MockConfigEntry(\n domain=\"cast\", data={\"known_hosts\": [\"192.168.0.10\", \"192.168.0.11\"]}\n )\n config_entry.add_to_hass(hass)\n await hass.async_block_till_done()\n\n result = await hass.config_entries.options.async_init(config_entry.entry_id)\n assert result[\"type\"] == data_entry_flow.RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"options\"\n data_schema = result[\"data_schema\"].schema\n assert get_suggested(data_schema, \"known_hosts\") == \"192.168.0.10,192.168.0.11\"\n\n result = await hass.config_entries.options.async_configure(\n result[\"flow_id\"],\n user_input={\"known_hosts\": \"192.168.0.1, , 192.168.0.2 \"},\n )\n assert result[\"type\"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY\n assert result[\"data\"] is None\n assert config_entry.data == {\"known_hosts\": [\"192.168.0.1\", \"192.168.0.2\"]}",
"async def test_bridge_zeroconf(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n create_mock_api_discovery(aioclient_mock, [(\"192.168.1.217\", \"ecb5fafffeabcabc\")])\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_ZEROCONF},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"192.168.1.217\",\n addresses=[\"192.168.1.217\"],\n port=443,\n hostname=\"Philips-hue.local\",\n type=\"_hue._tcp.local.\",\n name=\"Philips Hue - ABCABC._hue._tcp.local.\",\n properties={\n \"_raw\": {\"bridgeid\": b\"ecb5fafffeabcabc\", \"modelid\": b\"BSB002\"},\n \"bridgeid\": \"ecb5fafffeabcabc\",\n \"modelid\": \"BSB002\",\n },\n ),\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"link\"",
"async def test_options_flow_ap(hass: HomeAssistant) -> None:\n config_entry = MockConfigEntry(\n domain=DOMAIN,\n data=CONFIG_DATA,\n options={CONF_REQUIRE_IP: True},\n )\n config_entry.add_to_hass(hass)\n\n with PATCH_SETUP_ENTRY:\n await hass.config_entries.async_setup(config_entry.entry_id)\n await hass.async_block_till_done()\n result = await hass.config_entries.options.async_init(config_entry.entry_id)\n\n assert result[\"type\"] == data_entry_flow.FlowResultType.FORM\n assert result[\"step_id\"] == \"init\"\n assert CONF_REQUIRE_IP in result[\"data_schema\"].schema\n\n result = await hass.config_entries.options.async_configure(\n result[\"flow_id\"],\n user_input={\n CONF_CONSIDER_HOME: 20,\n CONF_TRACK_UNKNOWN: True,\n CONF_INTERFACE: \"aaa\",\n CONF_DNSMASQ: \"bbb\",\n CONF_REQUIRE_IP: False,\n },\n )\n\n assert result[\"type\"] == data_entry_flow.FlowResultType.CREATE_ENTRY\n assert config_entry.options[CONF_CONSIDER_HOME] == 20\n assert config_entry.options[CONF_TRACK_UNKNOWN] is True\n assert config_entry.options[CONF_INTERFACE] == \"aaa\"\n assert config_entry.options[CONF_DNSMASQ] == \"bbb\"\n assert config_entry.options[CONF_REQUIRE_IP] is False",
"async def test_manual_configuration_timeout_get_bridge(opp, aioclient_mock):\n aioclient_mock.get(\n pydeconz.utils.URL_DISCOVER,\n json=[],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN, context={\"source\": SOURCE_USER}\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"manual_input\"\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"],\n user_input={CONF_HOST: \"1.2.3.4\", CONF_PORT: 80},\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"link\"\n\n aioclient_mock.post(\n \"http://1.2.3.4:80/api\",\n json=[{\"success\": {\"username\": API_KEY}}],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n aioclient_mock.get(\n f\"http://1.2.3.4:80/api/{API_KEY}/config\", exc=asyncio.TimeoutError\n )\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={}\n )\n\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"no_bridges\"",
"async def test_options_flow(hass: HomeAssistant) -> None:\n entry = MockConfigEntry(domain=DOMAIN, unique_id=\"12345\", data={}, version=2)\n entry.add_to_hass(hass)\n\n with patch(\n \"homeassistant.components.verisure.async_setup_entry\",\n return_value=True,\n ):\n assert await hass.config_entries.async_setup(entry.entry_id)\n await hass.async_block_till_done()\n\n result = await hass.config_entries.options.async_init(entry.entry_id)\n\n assert result.get(\"type\") == FlowResultType.FORM\n assert result.get(\"step_id\") == \"init\"\n\n result = await hass.config_entries.options.async_configure(\n result[\"flow_id\"],\n user_input={CONF_LOCK_CODE_DIGITS: 4},\n )\n\n assert result.get(\"type\") == FlowResultType.CREATE_ENTRY\n assert result.get(\"data\") == {CONF_LOCK_CODE_DIGITS: DEFAULT_LOCK_CODE_DIGITS}",
"async def test_flow_bridges_discovered(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n # Add ignored config entry. Should still show up as option.\n MockConfigEntry(\n domain=\"hue\", source=config_entries.SOURCE_IGNORE, unique_id=\"bla\"\n ).add_to_hass(hass)\n\n create_mock_api_discovery(\n aioclient_mock, [(\"1.2.3.4\", \"bla\"), (\"5.6.7.8\", \"beer_v2\")]\n )\n\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"init\"\n\n with pytest.raises(vol.Invalid):\n assert result[\"data_schema\"]({\"id\": \"not-discovered\"})\n\n result[\"data_schema\"]({\"id\": \"bla\"})\n result[\"data_schema\"]({\"id\": \"beer_v2\"})\n result[\"data_schema\"]({\"id\": \"manual\"})",
"async def test_options_flow(hass):\n # Create a new MockConfigEntry and add to HASS (we're bypassing config\n # flow entirely)\n entry = MockConfigEntry(domain=DOMAIN, data=MOCK_CONFIG, entry_id=\"test\")\n entry.add_to_hass(hass)\n\n # Initialize an options flow\n result = await hass.config_entries.options.async_init(entry.entry_id)\n\n # Verify that the first options step is a user form\n assert result[\"type\"] == data_entry_flow.RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"user\"\n\n # Enter some fake data into the form\n result = await hass.config_entries.options.async_configure(\n result[\"flow_id\"],\n user_input={platform: platform != SENSOR for platform in PLATFORMS},\n )\n\n # Verify that the flow finishes\n assert result[\"type\"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY\n assert result[\"title\"] == \"test_username\"\n\n # Verify that the options were updated\n assert entry.options == {BINARY_SENSOR: True, SENSOR: False, SWITCH: True}",
"async def test_bridge_homekit(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n create_mock_api_discovery(aioclient_mock, [(\"0.0.0.0\", \"bla\")])\n\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_HOMEKIT},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"0.0.0.0\",\n addresses=[\"0.0.0.0\"],\n hostname=\"mock_hostname\",\n name=\"mock_name\",\n port=None,\n properties={zeroconf.ATTR_PROPERTIES_ID: \"aa:bb:cc:dd:ee:ff\"},\n type=\"mock_type\",\n ),\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"link\"\n\n flow = next(\n flow\n for flow in hass.config_entries.flow.async_progress()\n if flow[\"flow_id\"] == result[\"flow_id\"]\n )\n assert flow[\"context\"][\"unique_id\"] == config_entries.DEFAULT_DISCOVERY_UNIQUE_ID",
"def configure(self, options, conf):",
"async def test_flow_discovered_bridges(opp, aioclient_mock):\n aioclient_mock.get(\n pydeconz.utils.URL_DISCOVER,\n json=[\n {\"id\": BRIDGEID, \"internalipaddress\": \"1.2.3.4\", \"internalport\": 80},\n {\"id\": \"1234E567890A\", \"internalipaddress\": \"5.6.7.8\", \"internalport\": 80},\n ],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN, context={\"source\": SOURCE_USER}\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"user\"\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={CONF_HOST: \"1.2.3.4\"}\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"link\"\n\n aioclient_mock.post(\n \"http://1.2.3.4:80/api\",\n json=[{\"success\": {\"username\": API_KEY}}],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={}\n )\n\n assert result[\"type\"] == RESULT_TYPE_CREATE_ENTRY\n assert result[\"title\"] == BRIDGEID\n assert result[\"data\"] == {\n CONF_HOST: \"1.2.3.4\",\n CONF_PORT: 80,\n CONF_API_KEY: API_KEY,\n }",
"async def test_flow_two_bridges_discovered_one_new(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n create_mock_api_discovery(aioclient_mock, [(\"1.2.3.4\", \"bla\"), (\"5.6.7.8\", \"beer\")])\n MockConfigEntry(\n domain=\"hue\", unique_id=\"bla\", data={\"host\": \"1.2.3.4\"}\n ).add_to_hass(hass)\n\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"init\"\n assert result[\"data_schema\"]({\"id\": \"beer\"})\n assert result[\"data_schema\"]({\"id\": \"manual\"})\n with pytest.raises(vol.error.MultipleInvalid):\n assert not result[\"data_schema\"]({\"id\": \"bla\"})",
"def test_valid_settings() -> None:\n SwaggerTesterSettings()",
"async def test_manual_configuration_update_configuration(opp, aioclient_mock):\n config_entry = await setup_deconz_integration(opp, aioclient_mock)\n\n aioclient_mock.get(\n pydeconz.utils.URL_DISCOVER,\n json=[],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN, context={\"source\": SOURCE_USER}\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"manual_input\"\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"],\n user_input={CONF_HOST: \"2.3.4.5\", CONF_PORT: 80},\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"link\"\n\n aioclient_mock.post(\n \"http://2.3.4.5:80/api\",\n json=[{\"success\": {\"username\": API_KEY}}],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n aioclient_mock.get(\n f\"http://2.3.4.5:80/api/{API_KEY}/config\",\n json={\"bridgeid\": BRIDGEID},\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={}\n )\n\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"already_configured\"\n assert config_entry.data[CONF_HOST] == \"2.3.4.5\"",
"def test_config_opts(sc):\n assert sc.server_name is not None\n assert sc.deployment == Deployment.stg\n assert sc.admins is not None\n assert sc.command_handler is not None\n assert sc.command_handler_work_dir is not None\n assert sc.command_handler_pvc_env_var is not None\n assert sc.command_handler_image_reference is not None\n assert sc.command_handler_k8s_namespace is not None\n assert sc.fas_password is not None\n assert sc.testing_farm_secret is not None\n assert sc.github_requests_log_path is not None\n assert sc.webhook_secret is not None\n assert sc.validate_webhooks is not None\n assert sc.gitlab_token_secret is not None",
"async def test_manual_configuration_dont_update_configuration(opp, aioclient_mock):\n await setup_deconz_integration(opp, aioclient_mock)\n\n aioclient_mock.get(\n pydeconz.utils.URL_DISCOVER,\n json=[],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN, context={\"source\": SOURCE_USER}\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"manual_input\"\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"],\n user_input={CONF_HOST: \"1.2.3.4\", CONF_PORT: 80},\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"link\"\n\n aioclient_mock.post(\n \"http://1.2.3.4:80/api\",\n json=[{\"success\": {\"username\": API_KEY}}],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n aioclient_mock.get(\n f\"http://1.2.3.4:80/api/{API_KEY}/config\",\n json={\"bridgeid\": BRIDGEID},\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={}\n )\n\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"already_configured\"",
"def configure_test(self, test, config_json):\n pass",
"async def test_full_user_flow_advanced_options(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n mock_connection(aioclient_mock)\n\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={CONF_SOURCE: SOURCE_USER, \"show_advanced_options\": True}\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"user\"\n\n user_input = {\n **MOCK_USER_INPUT,\n CONF_VERIFY_SSL: True,\n }\n\n with _patch_async_setup_entry():\n result = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n user_input=user_input,\n )\n\n assert result[\"type\"] == RESULT_TYPE_CREATE_ENTRY\n assert result[\"title\"] == HOST\n\n assert result[\"data\"]\n assert result[\"data\"][CONF_HOST] == HOST\n assert result[\"data\"][CONF_VERIFY_SSL]",
"def t1_configure(**kwargs):\n sessiontoken = kwargs['sessiontoken']\n proxy = kwargs['proxy']\n t1_id = kwargs[\"tier1_id\"]\n json_data = {\"type\": kwargs[\"t1type\"]}\n status = configure_t1_json(proxy, sessiontoken, t1_id, json_data)\n if status == 200:\n print(f'Tier1 gateway {t1_id} has been configured as {kwargs[\"t1type\"]}')\n else:\n print(\"T1 was not created. Please check your syntax and try again.\")\n sys.exit(1)",
"def t0_switch_config_helper(test_obj: 'T0TestBase'):\n configer = SwitchConfiger(test_obj)\n test_obj.dut.switch_id = configer.start_switch()",
"async def test_flow_all_discovered_bridges_exist(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n mock_host = \"1.2.3.4\"\n mock_id = \"bla\"\n create_mock_api_discovery(aioclient_mock, [(mock_host, mock_id)])\n\n MockConfigEntry(\n domain=\"hue\", unique_id=mock_id, data={\"host\": mock_host}\n ).add_to_hass(hass)\n\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"manual\"",
"def config_setup(self, config):\n super(PushGatewayApiV1TestCase, self).config_setup(config)\n config[\"apps\"][\"com.example.spqr\"] = {\n \"type\": \"tests.test_pushgateway_api_v1.TestPushkin\"\n }",
"def test_read_config_option(self):\n # set up config\n config.set_config_file(os.path.join(path_to_module, \"test_config.conf\"))\n config.setup()\n # Test that all the parameters loaded from file are correct\n self.assertEqual(config.read_config_option('client_id'), 'uploader')\n self.assertEqual(config.read_config_option('client_secret'), 'secret')\n self.assertEqual(config.read_config_option('username'), 'admin')\n self.assertEqual(config.read_config_option('password'), 'password1')\n self.assertEqual(config.read_config_option('base_url'), 'http://localhost:8080/irida-latest/api/')\n self.assertEqual(config.read_config_option('parser'), 'miseq')\n self.assertEqual(config.read_config_option('readonly', bool), False)",
"def test_cli_with_comparator_config_from_config(runner):\n\n @click.command()\n @common_options\n @options_from_config(ComparatorConfig, ComparatorConfigCli)\n def my_cmd_fun(**kwargs):\n config = ComparatorConfig.from_dict(kwargs)\n print(config)\n\n with tempfile.TemporaryDirectory() as temp_dir:\n config_path = Path(temp_dir) / \"config.yaml\"\n with YamlConfigFile(config_path) as config_file:\n config_file.save_config(COMPARATOR_CONFIG1)\n\n expected_config_str = str(COMPARATOR_CONFIG1)\n result = runner.invoke(my_cmd_fun, [\"--config-path\", config_path.resolve().as_posix()])\n assert result.output.splitlines() == [expected_config_str]\n assert not result.exception\n assert result.exit_code == 0",
"def do_genconfig(args):\n\n print(\"========= DEFAULT ========\")\n debug = utils.get_input(\n \"Enable agent in debug mode [y/N]: \") or 'n'\n retry_interval = utils.get_input(\n \"Type the polling interval in seconds for daemon to manage the nodes: \")\n batch_publishing_interval = utils.get_input(\n \"Type the publishing interval in seconds for daemon to push the metrics: \")\n refresh_interval = utils.get_input(\n \"Type the polling interval in seconds to get health status directly from OneView: \")\n scmb_certificate_dir = utils.get_input(\n \"Type the certificates directory to register in OneView SCMB [/var/run/oneview-monasca]: \")\n auth_retry_limit = utils.get_input(\n \"Type the maximum number of attempts to try authenticate in REST API: \")\n\n debug = 'false' if debug == 'n' else 'true'\n retry_interval = retry_interval if retry_interval else \"300\"\n refresh_interval = refresh_interval if refresh_interval else \"180\"\n batch_publishing_interval = batch_publishing_interval if batch_publishing_interval else \"60\"\n\n auth_retry_limit = auth_retry_limit if auth_retry_limit else \"5\"\n scmb_certificate_dir = scmb_certificate_dir if scmb_certificate_dir else \"/var/run/oneview-monasca\"\n\n scmb_certificate_dir = os.path.realpath(os.path.expanduser(scmb_certificate_dir))\n utils.makedirs(scmb_certificate_dir)\n\n print(\"========= Openstack =========\")\n auth_url = utils.get_input(\"Type the Keystone url for authentication: \")\n auth_user = utils.get_input(\"Type the name of your OpenStack user: \")\n auth_password = getpass.getpass(\"Type the password for your OpenStack user: \")\n auth_tenant_name = utils.get_input(\"Type the tenant name that the OpenStack user will be authenticated: \")\n monasca_api_version = utils.get_input(\"Type a version of Monasca API that you want to use [2_0]: \")\n\n monasca_api_version = monasca_api_version if monasca_api_version else \"2_0\"\n\n print(\"========= OneView =========\")\n oneview_manager_url = utils.get_input(\"Type the manager_url for the OneView services: \")\n oneview_username = utils.get_input(\"Type your OneView username: \")\n oneview_password = getpass.getpass(\"Type your OneView user's password: \")\n oneview_insecure = utils.get_input(\"Would you like to allow insecure connections to OneView? [Y/n]: \") or \"Y\"\n max_polling_attempts = utils.get_input(\"Max polling attempts OneView requests: \")\n tls_cacert_file = utils.get_input(\"Path to your CA OneView certificate file, if any: \")\n\n oneview_host = utils.extract_domain_from_service_url(oneview_manager_url)\n oneview_insecure = \"true\" if oneview_insecure.lower() == 'y' else \"false\"\n max_polling_attempts = max_polling_attempts if max_polling_attempts else \"15\"\n\n fault_tolerance_enable = False\n group_name = coordinator_url = None\n while True:\n create = utils.get_input(\"Would you like to enable fault tolerance in the agent? [Y/n] \") or 'y'\n\n if create.lower() == 'y':\n print(\"========= Tooz =========\")\n\n group_name = utils.get_input(\"The group name for tooz configuration: \")\n coordinator_url = utils.get_input(\"The coordinator url for tooz configuration: \")\n fault_tolerance_enable = True\n break\n elif create.lower() == 'n':\n break\n else:\n print(\"Invalid option.\\n\")\n\n config_drivers = {}\n try:\n names = utils.list_names_driver(const.NAMESPACE_DISCOVERY_NODES, log=False)\n except Exception as ex:\n print('\\nCannot load installed drivers - Error caused by %s\\n' % str(ex))\n names = []\n\n for name in names:\n try:\n conf = utils.load_class_by_alias(\n const.NAMESPACE_DISCOVERY_NODES, name, log=False).genconfig()\n\n config_drivers[name.split('_')[-1]] = conf\n except Exception as ex:\n print('\\nCannot generating config file session to driver: %s - Error caused by %s\\n' % (name, str(ex)))\n\n # Write Configuration file #\n config = ConfigParser()\n config.set(\"DEFAULT\", \"debug\", debug)\n config.set(\"DEFAULT\", \"retry_interval\", retry_interval)\n config.set(\"DEFAULT\", \"periodic_refresh_interval\", refresh_interval)\n config.set(\"DEFAULT\", \"batch_publishing_interval\", batch_publishing_interval)\n\n config.set(\"DEFAULT\", \"auth_retry_limit\", auth_retry_limit)\n config.set(\"DEFAULT\", \"scmb_certificate_dir\", scmb_certificate_dir)\n\n if fault_tolerance_enable:\n config.add_section(\"tooz\")\n config.set(\"tooz\", \"group_name\", group_name)\n config.set(\"tooz\", \"coordinator_url\", coordinator_url)\n\n config.add_section(\"openstack\")\n config.set(\"openstack\", \"auth_url\", auth_url)\n config.set(\"openstack\", \"auth_user\", auth_user)\n config.set(\"openstack\", \"auth_password\", auth_password)\n config.set(\"openstack\", \"auth_tenant_name\", auth_tenant_name)\n config.set(\"openstack\", \"monasca_api_version\", monasca_api_version)\n\n config.add_section(\"oneview\")\n config.set(\"oneview\", \"host\", oneview_host)\n config.set(\"oneview\", \"manager_url\", oneview_manager_url)\n config.set(\"oneview\", \"username\", oneview_username)\n config.set(\"oneview\", \"password\", oneview_password)\n config.set(\"oneview\", \"allow_insecure_connections\", oneview_insecure)\n config.set(\"oneview\", \"max_polling_attempts\", max_polling_attempts)\n config.set(\"oneview\", \"tls_cacert_file\", tls_cacert_file)\n\n for driver in config_drivers:\n config.add_section(driver)\n for option, value in config_drivers[driver].items():\n config.set(driver, option, value)\n\n if not args.config_file:\n args.config_file = '~' + os.path.sep + 'oneview_monasca.conf'\n\n filename = utils.get_input(\n \"Type the path of the new configuration file [%s]: \" % args.config_file) or args.config_file\n full_filename = os.path.realpath(os.path.expanduser(filename))\n\n config_dir = os.path.dirname(full_filename)\n utils.makedirs(config_dir)\n\n with open(full_filename, 'w') as configfile:\n config.write(configfile)\n print(\"======\\nFile created successfully on '%s'!\\n======\" % filename)"
] |
[
"0.6737381",
"0.65399843",
"0.6378295",
"0.6286918",
"0.61853635",
"0.6151957",
"0.614278",
"0.6120484",
"0.5999094",
"0.5926487",
"0.58837944",
"0.58702725",
"0.5867645",
"0.5861611",
"0.576889",
"0.5757",
"0.57449085",
"0.5726978",
"0.56929123",
"0.5679204",
"0.5670719",
"0.5665243",
"0.5611832",
"0.55881476",
"0.5572987",
"0.5512681",
"0.5496923",
"0.5481877",
"0.54813254",
"0.5475065"
] |
0.66397613
|
1
|
Test options config flow for a V2 bridge.
|
async def test_options_flow_v2(hass: HomeAssistant) -> None:
entry = MockConfigEntry(
domain="hue",
unique_id="aabbccddeeff",
data={"host": "0.0.0.0", "api_version": 2},
)
entry.add_to_hass(hass)
dev_reg = dr.async_get(hass)
mock_dev_id = "aabbccddee"
dev_reg.async_get_or_create(
config_entry_id=entry.entry_id, identifiers={(const.DOMAIN, mock_dev_id)}
)
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == "form"
assert result["step_id"] == "init"
schema = result["data_schema"].schema
assert _get_schema_default(schema, const.CONF_IGNORE_AVAILABILITY) == []
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={const.CONF_IGNORE_AVAILABILITY: [mock_dev_id]},
)
assert result["type"] == "create_entry"
assert result["data"] == {
const.CONF_IGNORE_AVAILABILITY: [mock_dev_id],
}
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"async def test_option_flow(opp, aioclient_mock):\n config_entry = await setup_deconz_integration(opp, aioclient_mock)\n\n result = await opp.config_entries.options.async_init(config_entry.entry_id)\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"deconz_devices\"\n\n result = await opp.config_entries.options.async_configure(\n result[\"flow_id\"],\n user_input={\n CONF_ALLOW_CLIP_SENSOR: False,\n CONF_ALLOW_DECONZ_GROUPS: False,\n CONF_ALLOW_NEW_DEVICES: False,\n },\n )\n\n assert result[\"type\"] == RESULT_TYPE_CREATE_ENTRY\n assert result[\"data\"] == {\n CONF_ALLOW_CLIP_SENSOR: False,\n CONF_ALLOW_DECONZ_GROUPS: False,\n CONF_ALLOW_NEW_DEVICES: False,\n CONF_MASTER_GATEWAY: True,\n }",
"async def test_options_flow_v1(hass: HomeAssistant) -> None:\n entry = MockConfigEntry(\n domain=\"hue\",\n unique_id=\"aabbccddeeff\",\n data={\"host\": \"0.0.0.0\"},\n )\n entry.add_to_hass(hass)\n\n result = await hass.config_entries.options.async_init(entry.entry_id)\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"init\"\n schema = result[\"data_schema\"].schema\n assert (\n _get_schema_default(schema, const.CONF_ALLOW_HUE_GROUPS)\n == const.DEFAULT_ALLOW_HUE_GROUPS\n )\n assert (\n _get_schema_default(schema, const.CONF_ALLOW_UNREACHABLE)\n == const.DEFAULT_ALLOW_UNREACHABLE\n )\n\n result = await hass.config_entries.options.async_configure(\n result[\"flow_id\"],\n user_input={\n const.CONF_ALLOW_HUE_GROUPS: True,\n const.CONF_ALLOW_UNREACHABLE: True,\n },\n )\n\n assert result[\"type\"] == \"create_entry\"\n assert result[\"data\"] == {\n const.CONF_ALLOW_HUE_GROUPS: True,\n const.CONF_ALLOW_UNREACHABLE: True,\n }",
"async def test_option_flow(hass):\n config_entry = MockConfigEntry(\n domain=\"cast\", data={\"known_hosts\": [\"192.168.0.10\", \"192.168.0.11\"]}\n )\n config_entry.add_to_hass(hass)\n await hass.async_block_till_done()\n\n result = await hass.config_entries.options.async_init(config_entry.entry_id)\n assert result[\"type\"] == data_entry_flow.RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"options\"\n data_schema = result[\"data_schema\"].schema\n assert get_suggested(data_schema, \"known_hosts\") == \"192.168.0.10,192.168.0.11\"\n\n result = await hass.config_entries.options.async_configure(\n result[\"flow_id\"],\n user_input={\"known_hosts\": \"192.168.0.1, , 192.168.0.2 \"},\n )\n assert result[\"type\"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY\n assert result[\"data\"] is None\n assert config_entry.data == {\"known_hosts\": [\"192.168.0.1\", \"192.168.0.2\"]}",
"async def test_options_flow_router(hass: HomeAssistant) -> None:\n config_entry = MockConfigEntry(\n domain=DOMAIN,\n data={**CONFIG_DATA, CONF_MODE: \"router\"},\n )\n config_entry.add_to_hass(hass)\n\n with PATCH_SETUP_ENTRY:\n await hass.config_entries.async_setup(config_entry.entry_id)\n await hass.async_block_till_done()\n result = await hass.config_entries.options.async_init(config_entry.entry_id)\n\n assert result[\"type\"] == data_entry_flow.FlowResultType.FORM\n assert result[\"step_id\"] == \"init\"\n assert CONF_REQUIRE_IP not in result[\"data_schema\"].schema\n\n result = await hass.config_entries.options.async_configure(\n result[\"flow_id\"],\n user_input={\n CONF_CONSIDER_HOME: 20,\n CONF_TRACK_UNKNOWN: True,\n CONF_INTERFACE: \"aaa\",\n CONF_DNSMASQ: \"bbb\",\n },\n )\n\n assert result[\"type\"] == data_entry_flow.FlowResultType.CREATE_ENTRY\n assert config_entry.options[CONF_CONSIDER_HOME] == 20\n assert config_entry.options[CONF_TRACK_UNKNOWN] is True\n assert config_entry.options[CONF_INTERFACE] == \"aaa\"\n assert config_entry.options[CONF_DNSMASQ] == \"bbb\"",
"async def test_flow_manual_configuration_decision(opp, aioclient_mock):\n aioclient_mock.get(\n pydeconz.utils.URL_DISCOVER,\n json=[{\"id\": BRIDGEID, \"internalipaddress\": \"1.2.3.4\", \"internalport\": 80}],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN, context={\"source\": SOURCE_USER}\n )\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={CONF_HOST: CONF_MANUAL_INPUT}\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"manual_input\"\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"],\n user_input={CONF_HOST: \"1.2.3.4\", CONF_PORT: 80},\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"link\"\n\n aioclient_mock.post(\n \"http://1.2.3.4:80/api\",\n json=[{\"success\": {\"username\": API_KEY}}],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n aioclient_mock.get(\n f\"http://1.2.3.4:80/api/{API_KEY}/config\",\n json={\"bridgeid\": BRIDGEID},\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={}\n )\n\n assert result[\"type\"] == RESULT_TYPE_CREATE_ENTRY\n assert result[\"title\"] == BRIDGEID\n assert result[\"data\"] == {\n CONF_HOST: \"1.2.3.4\",\n CONF_PORT: 80,\n CONF_API_KEY: API_KEY,\n }",
"async def test_manual_configuration_timeout_get_bridge(opp, aioclient_mock):\n aioclient_mock.get(\n pydeconz.utils.URL_DISCOVER,\n json=[],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN, context={\"source\": SOURCE_USER}\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"manual_input\"\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"],\n user_input={CONF_HOST: \"1.2.3.4\", CONF_PORT: 80},\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"link\"\n\n aioclient_mock.post(\n \"http://1.2.3.4:80/api\",\n json=[{\"success\": {\"username\": API_KEY}}],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n aioclient_mock.get(\n f\"http://1.2.3.4:80/api/{API_KEY}/config\", exc=asyncio.TimeoutError\n )\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={}\n )\n\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"no_bridges\"",
"async def test_flow_manual_configuration(opp, aioclient_mock):\n aioclient_mock.get(\n pydeconz.utils.URL_DISCOVER,\n json=[],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN, context={\"source\": SOURCE_USER}\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"manual_input\"\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"],\n user_input={CONF_HOST: \"1.2.3.4\", CONF_PORT: 80},\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"link\"\n\n aioclient_mock.post(\n \"http://1.2.3.4:80/api\",\n json=[{\"success\": {\"username\": API_KEY}}],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n aioclient_mock.get(\n f\"http://1.2.3.4:80/api/{API_KEY}/config\",\n json={\"bridgeid\": BRIDGEID},\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={}\n )\n\n assert result[\"type\"] == RESULT_TYPE_CREATE_ENTRY\n assert result[\"title\"] == BRIDGEID\n assert result[\"data\"] == {\n CONF_HOST: \"1.2.3.4\",\n CONF_PORT: 80,\n CONF_API_KEY: API_KEY,\n }",
"async def test_options_flow(hass):\n # Create a new MockConfigEntry and add to HASS (we're bypassing config\n # flow entirely)\n entry = MockConfigEntry(domain=DOMAIN, data=MOCK_CONFIG, entry_id=\"test\")\n entry.add_to_hass(hass)\n\n # Initialize an options flow\n result = await hass.config_entries.options.async_init(entry.entry_id)\n\n # Verify that the first options step is a user form\n assert result[\"type\"] == data_entry_flow.RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"user\"\n\n # Enter some fake data into the form\n result = await hass.config_entries.options.async_configure(\n result[\"flow_id\"],\n user_input={platform: platform != SENSOR for platform in PLATFORMS},\n )\n\n # Verify that the flow finishes\n assert result[\"type\"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY\n assert result[\"title\"] == \"test_username\"\n\n # Verify that the options were updated\n assert entry.options == {BINARY_SENSOR: True, SENSOR: False, SWITCH: True}",
"async def test_options_flow(hass: HomeAssistant) -> None:\n entry = MockConfigEntry(domain=DOMAIN, unique_id=\"12345\", data={}, version=2)\n entry.add_to_hass(hass)\n\n with patch(\n \"homeassistant.components.verisure.async_setup_entry\",\n return_value=True,\n ):\n assert await hass.config_entries.async_setup(entry.entry_id)\n await hass.async_block_till_done()\n\n result = await hass.config_entries.options.async_init(entry.entry_id)\n\n assert result.get(\"type\") == FlowResultType.FORM\n assert result.get(\"step_id\") == \"init\"\n\n result = await hass.config_entries.options.async_configure(\n result[\"flow_id\"],\n user_input={CONF_LOCK_CODE_DIGITS: 4},\n )\n\n assert result.get(\"type\") == FlowResultType.CREATE_ENTRY\n assert result.get(\"data\") == {CONF_LOCK_CODE_DIGITS: DEFAULT_LOCK_CODE_DIGITS}",
"async def test_options_flow_ap(hass: HomeAssistant) -> None:\n config_entry = MockConfigEntry(\n domain=DOMAIN,\n data=CONFIG_DATA,\n options={CONF_REQUIRE_IP: True},\n )\n config_entry.add_to_hass(hass)\n\n with PATCH_SETUP_ENTRY:\n await hass.config_entries.async_setup(config_entry.entry_id)\n await hass.async_block_till_done()\n result = await hass.config_entries.options.async_init(config_entry.entry_id)\n\n assert result[\"type\"] == data_entry_flow.FlowResultType.FORM\n assert result[\"step_id\"] == \"init\"\n assert CONF_REQUIRE_IP in result[\"data_schema\"].schema\n\n result = await hass.config_entries.options.async_configure(\n result[\"flow_id\"],\n user_input={\n CONF_CONSIDER_HOME: 20,\n CONF_TRACK_UNKNOWN: True,\n CONF_INTERFACE: \"aaa\",\n CONF_DNSMASQ: \"bbb\",\n CONF_REQUIRE_IP: False,\n },\n )\n\n assert result[\"type\"] == data_entry_flow.FlowResultType.CREATE_ENTRY\n assert config_entry.options[CONF_CONSIDER_HOME] == 20\n assert config_entry.options[CONF_TRACK_UNKNOWN] is True\n assert config_entry.options[CONF_INTERFACE] == \"aaa\"\n assert config_entry.options[CONF_DNSMASQ] == \"bbb\"\n assert config_entry.options[CONF_REQUIRE_IP] is False",
"def test_ssh_config2(self):\n self.assertEqual(\n parse(self.f_in['ssh_config2'], quiet=True),\n self.f_json['ssh_config2']\n )",
"def test_adapter_opts_set(self):\n conn = self._get_conn()\n\n discovery = {\n \"versions\": {\n \"values\": [\n {\n \"status\": \"stable\",\n \"updated\": \"2019-06-01T00:00:00Z\",\n \"media-types\": [\n {\n \"base\": \"application/json\",\n \"type\": \"application/vnd.openstack.heat-v2+json\", # noqa: E501\n }\n ],\n \"id\": \"v2.0\",\n \"links\": [\n {\n \"href\": \"https://example.org:8888/heat/v2\",\n \"rel\": \"self\",\n }\n ],\n }\n ]\n }\n }\n self.register_uris(\n [\n dict(\n method='GET',\n uri='https://example.org:8888/heat/v2',\n json=discovery,\n ),\n dict(\n method='GET',\n uri='https://example.org:8888/heat/v2/foo',\n json={'foo': {}},\n ),\n ]\n )\n\n adap = conn.orchestration\n self.assertEqual('SpecialRegion', adap.region_name)\n self.assertEqual('orchestration', adap.service_type)\n self.assertEqual('internal', adap.interface)\n self.assertEqual(\n 'https://example.org:8888/heat/v2', adap.endpoint_override\n )\n\n adap.get('/foo')\n self.assert_calls()",
"async def test_flow_two_bridges_discovered_one_new(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n create_mock_api_discovery(aioclient_mock, [(\"1.2.3.4\", \"bla\"), (\"5.6.7.8\", \"beer\")])\n MockConfigEntry(\n domain=\"hue\", unique_id=\"bla\", data={\"host\": \"1.2.3.4\"}\n ).add_to_hass(hass)\n\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"init\"\n assert result[\"data_schema\"]({\"id\": \"beer\"})\n assert result[\"data_schema\"]({\"id\": \"manual\"})\n with pytest.raises(vol.error.MultipleInvalid):\n assert not result[\"data_schema\"]({\"id\": \"bla\"})",
"async def test_full_user_flow_advanced_options(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n mock_connection(aioclient_mock)\n\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={CONF_SOURCE: SOURCE_USER, \"show_advanced_options\": True}\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"user\"\n\n user_input = {\n **MOCK_USER_INPUT,\n CONF_VERIFY_SSL: True,\n }\n\n with _patch_async_setup_entry():\n result = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n user_input=user_input,\n )\n\n assert result[\"type\"] == RESULT_TYPE_CREATE_ENTRY\n assert result[\"title\"] == HOST\n\n assert result[\"data\"]\n assert result[\"data\"][CONF_HOST] == HOST\n assert result[\"data\"][CONF_VERIFY_SSL]",
"def configure(self, options, conf):",
"def test_read_config_option(self):\n # set up config\n config.set_config_file(os.path.join(path_to_module, \"test_config.conf\"))\n config.setup()\n # Test that all the parameters loaded from file are correct\n self.assertEqual(config.read_config_option('client_id'), 'uploader')\n self.assertEqual(config.read_config_option('client_secret'), 'secret')\n self.assertEqual(config.read_config_option('username'), 'admin')\n self.assertEqual(config.read_config_option('password'), 'password1')\n self.assertEqual(config.read_config_option('base_url'), 'http://localhost:8080/irida-latest/api/')\n self.assertEqual(config.read_config_option('parser'), 'miseq')\n self.assertEqual(config.read_config_option('readonly', bool), False)",
"async def test_bridge_zeroconf(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n create_mock_api_discovery(aioclient_mock, [(\"192.168.1.217\", \"ecb5fafffeabcabc\")])\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_ZEROCONF},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"192.168.1.217\",\n addresses=[\"192.168.1.217\"],\n port=443,\n hostname=\"Philips-hue.local\",\n type=\"_hue._tcp.local.\",\n name=\"Philips Hue - ABCABC._hue._tcp.local.\",\n properties={\n \"_raw\": {\"bridgeid\": b\"ecb5fafffeabcabc\", \"modelid\": b\"BSB002\"},\n \"bridgeid\": \"ecb5fafffeabcabc\",\n \"modelid\": \"BSB002\",\n },\n ),\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"link\"",
"async def test_flow_discovered_bridges(opp, aioclient_mock):\n aioclient_mock.get(\n pydeconz.utils.URL_DISCOVER,\n json=[\n {\"id\": BRIDGEID, \"internalipaddress\": \"1.2.3.4\", \"internalport\": 80},\n {\"id\": \"1234E567890A\", \"internalipaddress\": \"5.6.7.8\", \"internalport\": 80},\n ],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN, context={\"source\": SOURCE_USER}\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"user\"\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={CONF_HOST: \"1.2.3.4\"}\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"link\"\n\n aioclient_mock.post(\n \"http://1.2.3.4:80/api\",\n json=[{\"success\": {\"username\": API_KEY}}],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={}\n )\n\n assert result[\"type\"] == RESULT_TYPE_CREATE_ENTRY\n assert result[\"title\"] == BRIDGEID\n assert result[\"data\"] == {\n CONF_HOST: \"1.2.3.4\",\n CONF_PORT: 80,\n CONF_API_KEY: API_KEY,\n }",
"async def test_manual_configuration_dont_update_configuration(opp, aioclient_mock):\n await setup_deconz_integration(opp, aioclient_mock)\n\n aioclient_mock.get(\n pydeconz.utils.URL_DISCOVER,\n json=[],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN, context={\"source\": SOURCE_USER}\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"manual_input\"\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"],\n user_input={CONF_HOST: \"1.2.3.4\", CONF_PORT: 80},\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"link\"\n\n aioclient_mock.post(\n \"http://1.2.3.4:80/api\",\n json=[{\"success\": {\"username\": API_KEY}}],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n aioclient_mock.get(\n f\"http://1.2.3.4:80/api/{API_KEY}/config\",\n json={\"bridgeid\": BRIDGEID},\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={}\n )\n\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"already_configured\"",
"async def test_flow_bridges_discovered(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n # Add ignored config entry. Should still show up as option.\n MockConfigEntry(\n domain=\"hue\", source=config_entries.SOURCE_IGNORE, unique_id=\"bla\"\n ).add_to_hass(hass)\n\n create_mock_api_discovery(\n aioclient_mock, [(\"1.2.3.4\", \"bla\"), (\"5.6.7.8\", \"beer_v2\")]\n )\n\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"init\"\n\n with pytest.raises(vol.Invalid):\n assert result[\"data_schema\"]({\"id\": \"not-discovered\"})\n\n result[\"data_schema\"]({\"id\": \"bla\"})\n result[\"data_schema\"]({\"id\": \"beer_v2\"})\n result[\"data_schema\"]({\"id\": \"manual\"})",
"async def test_manual_configuration_update_configuration(opp, aioclient_mock):\n config_entry = await setup_deconz_integration(opp, aioclient_mock)\n\n aioclient_mock.get(\n pydeconz.utils.URL_DISCOVER,\n json=[],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN, context={\"source\": SOURCE_USER}\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"manual_input\"\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"],\n user_input={CONF_HOST: \"2.3.4.5\", CONF_PORT: 80},\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"link\"\n\n aioclient_mock.post(\n \"http://2.3.4.5:80/api\",\n json=[{\"success\": {\"username\": API_KEY}}],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n aioclient_mock.get(\n f\"http://2.3.4.5:80/api/{API_KEY}/config\",\n json={\"bridgeid\": BRIDGEID},\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={}\n )\n\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"already_configured\"\n assert config_entry.data[CONF_HOST] == \"2.3.4.5\"",
"def test_config_opts(sc):\n assert sc.server_name is not None\n assert sc.deployment == Deployment.stg\n assert sc.admins is not None\n assert sc.command_handler is not None\n assert sc.command_handler_work_dir is not None\n assert sc.command_handler_pvc_env_var is not None\n assert sc.command_handler_image_reference is not None\n assert sc.command_handler_k8s_namespace is not None\n assert sc.fas_password is not None\n assert sc.testing_farm_secret is not None\n assert sc.github_requests_log_path is not None\n assert sc.webhook_secret is not None\n assert sc.validate_webhooks is not None\n assert sc.gitlab_token_secret is not None",
"def configure_test(self, test, config_json):\n pass",
"async def test_options_flow(hass: HomeAssistant) -> None:\n\n config_entry = MockConfigEntry(\n domain=DOMAIN,\n unique_id=\"abcde12345\",\n data=VALID_CONFIG,\n )\n config_entry.add_to_hass(hass)\n\n with patch(\"homeassistant.components.nut.async_setup_entry\", return_value=True):\n result = await hass.config_entries.options.async_init(config_entry.entry_id)\n\n assert result[\"type\"] == data_entry_flow.FlowResultType.FORM\n assert result[\"step_id\"] == \"init\"\n\n result = await hass.config_entries.options.async_configure(\n result[\"flow_id\"], user_input={}\n )\n\n assert result[\"type\"] == data_entry_flow.FlowResultType.CREATE_ENTRY\n assert config_entry.options == {\n CONF_SCAN_INTERVAL: 60,\n }\n\n with patch(\"homeassistant.components.nut.async_setup_entry\", return_value=True):\n result2 = await hass.config_entries.options.async_init(config_entry.entry_id)\n\n assert result2[\"type\"] == data_entry_flow.FlowResultType.FORM\n assert result2[\"step_id\"] == \"init\"\n\n result2 = await hass.config_entries.options.async_configure(\n result2[\"flow_id\"],\n user_input={CONF_SCAN_INTERVAL: 12},\n )\n\n assert result2[\"type\"] == data_entry_flow.FlowResultType.CREATE_ENTRY\n assert config_entry.options == {\n CONF_SCAN_INTERVAL: 12,\n }",
"async def test_options_flow_network_failure(hass):\n\n entry = await setup_platform(hass)\n\n with patch(\n \"aussiebb.asyncio.AussieBB.get_services\", side_effect=ClientConnectionError()\n ):\n\n result1 = await hass.config_entries.options.async_init(entry.entry_id)\n assert result1[\"type\"] == RESULT_TYPE_ABORT\n assert result1[\"reason\"] == \"cannot_connect\"",
"async def test_bridge_homekit(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n create_mock_api_discovery(aioclient_mock, [(\"0.0.0.0\", \"bla\")])\n\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_HOMEKIT},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"0.0.0.0\",\n addresses=[\"0.0.0.0\"],\n hostname=\"mock_hostname\",\n name=\"mock_name\",\n port=None,\n properties={zeroconf.ATTR_PROPERTIES_ID: \"aa:bb:cc:dd:ee:ff\"},\n type=\"mock_type\",\n ),\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"link\"\n\n flow = next(\n flow\n for flow in hass.config_entries.flow.async_progress()\n if flow[\"flow_id\"] == result[\"flow_id\"]\n )\n assert flow[\"context\"][\"unique_id\"] == config_entries.DEFAULT_DISCOVERY_UNIQUE_ID",
"def test_parse_config(self):\n config_file = os.path.join('top', 'conf', 'top.conf')\n\n self._c.set_config_file(config_file)\n self._c.parse_config()\n\n received = self._c.adp_loop\n expected = 30\n msg = 'AdpB2CConfig.adp_loop error'\n self.assertEqual(received, expected, msg)\n\n received = self._c.adp_dirs\n expected = ['/var/ftp/pub/nparcel/adp/in']\n msg = 'AdpB2CConfig.adp_dirs error'\n self.assertListEqual(received, expected, msg)\n\n received = self._c.archive_dir\n expected = '/data/top/archive'\n msg = 'AdpB2CConfig.archive_dir error'\n self.assertEqual(received, expected, msg)\n\n received = self._c.adp_file_formats\n expected = []\n msg = 'AdpB2CConfig.adp_file_formats error'\n self.assertListEqual(received, expected, msg)\n\n # For the default configuration file the [db] section is blank\n received = self._c.db_kwargs()\n msg = 'AdpB2CConfig.db_kwargs error'\n self.assertIsNone(received, msg)\n\n received = self._c.code_header\n expected = 'TP Code'\n msg = 'AdpB2CConfig.code_header error'\n self.assertEqual(received, expected, msg)\n\n received = self._c.adp_headers\n expected = {'agent.code': 'TP Code',\n 'agent.dp_code': 'DP Code',\n 'agent.name': 'ADP Name',\n 'agent.address': 'Address',\n 'agent.suburb': 'Suburb',\n 'agent.state': 'State',\n 'agent.postcode': 'Postcode',\n 'agent.opening_hours': 'Opening Hours',\n 'agent.notes': 'Notes',\n 'agent.parcel_size_code': 'ADP Accepts Parcel Size',\n 'agent.phone_nbr': 'Phone',\n 'agent.contact_name': 'Contact',\n 'agent.email': 'Email',\n 'agent.fax_nbr': 'Fax',\n 'agent.latitude': 'Latitude',\n 'agent.longitude': 'Longitude',\n 'agent.status': 'Active',\n 'delivery_partner.id': 'DP Id',\n 'login_account.username': 'Username'}\n msg = 'AdpB2CConfig.adp.headers error'\n self.assertDictEqual(received, expected, msg)\n\n received = self._c.delivery_partners\n expected = ['Nparcel', 'ParcelPoint', 'Toll', 'National Storage']\n msg = 'AdpB2CConfig.adp.delivery_partners error'\n self.assertListEqual(received, expected, msg)\n\n received = self._c.adp_default_passwords\n expected = {'nparcel': 'aaaa',\n 'parcelpoint': 'bbbb',\n 'toll': 'cccc',\n 'national storage': 'dddd'}\n msg = 'AdpB2CConfig.adp_default_passwords error'\n self.assertDictEqual(received, expected, msg)",
"async def test_entity_options(\n hass: HomeAssistant,\n target_domain: Platform,\n) -> None:\n registry = er.async_get(hass)\n\n switch_entity_entry = registry.async_get_or_create(\n \"switch\", \"test\", \"unique\", original_name=\"ABC\"\n )\n registry.async_update_entity(\n switch_entity_entry.entity_id, entity_category=EntityCategory.CONFIG\n )\n\n # Add the config entry\n switch_as_x_config_entry = MockConfigEntry(\n data={},\n domain=DOMAIN,\n options={\n CONF_ENTITY_ID: switch_entity_entry.id,\n CONF_TARGET_DOMAIN: target_domain,\n },\n title=\"ABC\",\n )\n switch_as_x_config_entry.add_to_hass(hass)\n\n assert await hass.config_entries.async_setup(switch_as_x_config_entry.entry_id)\n await hass.async_block_till_done()\n\n entity_entry = registry.async_get(f\"{target_domain}.abc\")\n assert entity_entry\n assert entity_entry.device_id == switch_entity_entry.device_id\n assert entity_entry.options == {\n DOMAIN: {\"entity_id\": switch_entity_entry.entity_id}\n }",
"def configure(self, options, conf):\n pass",
"async def test_flow_works(hass: HomeAssistant) -> None:\n disc_bridge = get_discovered_bridge(supports_v2=True)\n\n with patch(\n \"homeassistant.components.hue.config_flow.discover_nupnp\",\n return_value=[disc_bridge],\n ):\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"init\"\n\n result = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={\"id\": disc_bridge.id}\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"link\"\n\n flow = next(\n flow\n for flow in hass.config_entries.flow.async_progress()\n if flow[\"flow_id\"] == result[\"flow_id\"]\n )\n assert flow[\"context\"][\"unique_id\"] == \"aabbccddeeff\"\n\n with patch.object(config_flow, \"create_app_key\", return_value=\"123456789\"):\n result = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={}\n )\n\n assert result[\"type\"] == \"create_entry\"\n assert result[\"title\"] == \"Hue Bridge aabbccddeeff\"\n assert result[\"data\"] == {\n \"host\": \"1.2.3.4\",\n \"api_key\": \"123456789\",\n \"api_version\": 2,\n }"
] |
[
"0.67560863",
"0.6373548",
"0.6286991",
"0.6278226",
"0.62578523",
"0.6233213",
"0.61113554",
"0.60163224",
"0.59915006",
"0.59855986",
"0.5981519",
"0.595006",
"0.58520335",
"0.5813534",
"0.58015335",
"0.5766455",
"0.57646006",
"0.57201713",
"0.5719225",
"0.57179165",
"0.5711263",
"0.56475323",
"0.5580126",
"0.55129415",
"0.5489016",
"0.5487408",
"0.548203",
"0.5453624",
"0.5450397",
"0.54039526"
] |
0.7129589
|
0
|
Test a bridge being discovered by zeroconf already exists.
|
async def test_bridge_zeroconf_already_exists(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
create_mock_api_discovery(
aioclient_mock, [("0.0.0.0", "ecb5faabcabc"), ("192.168.1.217", "ecb5faabcabc")]
)
entry = MockConfigEntry(
domain="hue",
source=config_entries.SOURCE_HOMEKIT,
data={"host": "0.0.0.0"},
unique_id="ecb5faabcabc",
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
const.DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=zeroconf.ZeroconfServiceInfo(
host="192.168.1.217",
addresses=["192.168.1.217"],
port=443,
hostname="Philips-hue.local",
type="_hue._tcp.local.",
name="Philips Hue - ABCABC._hue._tcp.local.",
properties={
"_raw": {"bridgeid": b"ecb5faabcabc", "modelid": b"BSB002"},
"bridgeid": "ecb5faabcabc",
"modelid": "BSB002",
},
),
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert entry.data["host"] == "192.168.1.217"
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"async def test_bridge_homekit_already_configured(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n create_mock_api_discovery(aioclient_mock, [(\"0.0.0.0\", \"aabbccddeeff\")])\n MockConfigEntry(\n domain=\"hue\", unique_id=\"aabbccddeeff\", data={\"host\": \"0.0.0.0\"}\n ).add_to_hass(hass)\n\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_HOMEKIT},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"0.0.0.0\",\n addresses=[\"0.0.0.0\"],\n hostname=\"mock_hostname\",\n name=\"mock_name\",\n port=None,\n properties={zeroconf.ATTR_PROPERTIES_ID: \"aa:bb:cc:dd:ee:ff\"},\n type=\"mock_type\",\n ),\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"already_configured\"",
"async def test_bridge_zeroconf_ipv6(hass: HomeAssistant) -> None:\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_ZEROCONF},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"fd00::eeb5:faff:fe84:b17d\",\n addresses=[\"fd00::eeb5:faff:fe84:b17d\"],\n port=443,\n hostname=\"Philips-hue.local\",\n type=\"_hue._tcp.local.\",\n name=\"Philips Hue - ABCABC._hue._tcp.local.\",\n properties={\n \"_raw\": {\"bridgeid\": b\"ecb5faabcabc\", \"modelid\": b\"BSB002\"},\n \"bridgeid\": \"ecb5faabcabc\",\n \"modelid\": \"BSB002\",\n },\n ),\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"invalid_host\"",
"def is_connected(cls, device_config):\n if \"console_port_name\" in device_config[\"persistent\"]:\n address = device_config[\"persistent\"][\"console_port_name\"]\n else:\n address = device_config[\"persistent\"][\"hub_port_name\"]\n return os.path.exists(address)",
"def setup_unused_bridge_network(self):\n out = utils.run_script('conjure-up.lxc network show conjureup0')\n if out.returncode == 0:\n return # already configured\n\n out = utils.run_script('conjure-up.lxc network create conjureup0 '\n 'ipv4.address=auto '\n 'ipv4.nat=true '\n 'ipv6.address=none '\n 'ipv6.nat=false')\n\n if out.returncode != 0:\n raise Exception(\n \"Failed to create conjureup0 network bridge: \"\n \"{}\".format(out.stderr.decode()))",
"async def test_bridge_zeroconf(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n create_mock_api_discovery(aioclient_mock, [(\"192.168.1.217\", \"ecb5fafffeabcabc\")])\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_ZEROCONF},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"192.168.1.217\",\n addresses=[\"192.168.1.217\"],\n port=443,\n hostname=\"Philips-hue.local\",\n type=\"_hue._tcp.local.\",\n name=\"Philips Hue - ABCABC._hue._tcp.local.\",\n properties={\n \"_raw\": {\"bridgeid\": b\"ecb5fafffeabcabc\", \"modelid\": b\"BSB002\"},\n \"bridgeid\": \"ecb5fafffeabcabc\",\n \"modelid\": \"BSB002\",\n },\n ),\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"link\"",
"async def test_bridge_connection_failed(\n hass: HomeAssistant,\n aioclient_mock: AiohttpClientMocker,\n caplog: pytest.LogCaptureFixture,\n) -> None:\n create_mock_api_discovery(aioclient_mock, [])\n\n with patch(\n \"homeassistant.components.hue.config_flow.discover_bridge\",\n side_effect=ClientError,\n ):\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n result = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={\"host\": \"blah\"}\n )\n\n # a warning message should have been logged that the bridge could not be reached\n assert \"Error while attempting to retrieve discovery information\" in caplog.text\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"cannot_connect\"\n\n # test again with zeroconf discovered wrong bridge IP\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_ZEROCONF},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"blah\",\n addresses=[\"1.2.3.4\"],\n port=443,\n hostname=\"Philips-hue.local\",\n type=\"_hue._tcp.local.\",\n name=\"Philips Hue - ABCABC._hue._tcp.local.\",\n properties={\n \"_raw\": {\"bridgeid\": b\"ecb5fafffeabcabc\", \"modelid\": b\"BSB002\"},\n \"bridgeid\": \"ecb5fafffeabcabc\",\n \"modelid\": \"BSB002\",\n },\n ),\n )\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"cannot_connect\"\n\n # test again with homekit discovered wrong bridge IP\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_HOMEKIT},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"0.0.0.0\",\n addresses=[\"0.0.0.0\"],\n hostname=\"mock_hostname\",\n name=\"mock_name\",\n port=None,\n properties={zeroconf.ATTR_PROPERTIES_ID: \"aa:bb:cc:dd:ee:ff\"},\n type=\"mock_type\",\n ),\n )\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"cannot_connect\"\n\n # repeat test with import flow\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_IMPORT},\n data={\"host\": \"blah\"},\n )\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"cannot_connect\"",
"def bridge_no_such_device(self, event):\n ret = self.get_results_stats(event.results)\n if ret:\n return {'bridge-no-such-device': ret}, 'ovs-vswitchd'",
"def bridge_network_check(ip, bridge_ip, bridge_netmask):\n# convert vars to unicode \n ip = unicode(ip)\n bridge_ip = unicode(bridge_ip)\n bridge_netmask = unicode(bridge_netmask)\n# by default ip is not in bridge network \n brctl = 0\n\n# bridge insterface ip network\n brdige_network = IPv4Interface('%s/%s' % (bridge_ip, bridge_netmask)).network\n\n# check if ip is from bridge network and return bridge control var (brctl) = true\n if IPv4Address(ip) in list(IPv4Network(brdige_network)):\n brctl = 1\n\n# return brctl and bridge ip network \n return brctl, brdige_network",
"def CheckIfConnectionIsABridge(self, node1_idx, node2_idx):\n\n is_bridge = False\n\n num_of_commmon_components_before_disconecting = len(self.GetCommonComponents())\n\n self.Disconnect(node1_idx, node2_idx)\n\n num_of_commmon_components_after_disconecting = len(self.GetCommonComponents())\n\n if num_of_commmon_components_after_disconecting > num_of_commmon_components_before_disconecting:\n is_bridge = True\n self.Connect(node1_idx, node2_idx)\n\n return is_bridge",
"def test_connect(self, gateway):\n assert not gateway._devs",
"def test_resource_not_existing(self):\n result = self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"set\",\n \"connections.non_existing_connection.name\",\n \"value\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"Resource connections/non_existing_connection does not exist.\"\n )",
"def isConnected():",
"def test_resource_not_existing(self):\n result = self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"get\",\n \"connections.non_existing_connection.name\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"Resource connections/non_existing_connection does not exist.\"\n )",
"def IsLinkup(nic,timeout):\n nic = nic.strip()\n current = time.time()\n timeout += current\n while current < timeout:\n data = os.popen(\"ipconfig\").read().split(\"Ethernet adapter\")\n for item in data:\n if item.count(nic) and item.count(\"isconnected\") == 0: #Connected\n return 1\n time.sleep(0.5)\n current = time.time()\n return 0",
"async def test_configure_service_with_faulty_bridgeid(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n await setup_deconz_integration(hass, aioclient_mock)\n aioclient_mock.clear_requests()\n\n data = {\n CONF_BRIDGE_ID: \"Bad bridge id\",\n SERVICE_FIELD: \"/lights/1\",\n SERVICE_DATA: {\"on\": True},\n }\n\n await hass.services.async_call(\n DECONZ_DOMAIN, SERVICE_CONFIGURE_DEVICE, service_data=data\n )\n await hass.async_block_till_done()\n\n assert len(aioclient_mock.mock_calls) == 0",
"async def connected(self) -> bool:\n args = ['-t', f\"DEVICE INFO,{self.conf['device_address']}\"]\n output = await self.run_vh(args)\n return \"IN USE BY: NO ONE\" not in output",
"def exists(identifier, network):\n foo = next(load(identifier, network), None)\n return foo is not None",
"def has_descriptor(self, uuid):",
"async def test_bridge_import_already_configured(hass: HomeAssistant) -> None:\n MockConfigEntry(\n domain=\"hue\", unique_id=\"aabbccddeeff\", data={\"host\": \"0.0.0.0\"}\n ).add_to_hass(hass)\n\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_IMPORT},\n data={\"host\": \"0.0.0.0\", \"properties\": {\"id\": \"aa:bb:cc:dd:ee:ff\"}},\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"already_configured\"",
"def is_connected(self) -> bool:",
"async def test_manual_flow_bridge_exist(hass: HomeAssistant) -> None:\n MockConfigEntry(\n domain=\"hue\", unique_id=\"id-1234\", data={\"host\": \"2.2.2.2\"}\n ).add_to_hass(hass)\n\n with patch(\n \"homeassistant.components.hue.config_flow.discover_nupnp\",\n return_value=[],\n ):\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"manual\"\n\n result = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"], {\"host\": \"2.2.2.2\"}\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"already_configured\"",
"def _check(self):\n\t\tif not self._raven:\n\t\t\traise NoDeviceFoundException",
"def isconnected(self) -> bool:",
"async def test_zeroconf_flow_already_configured(hass):\n device = await setup_axis_integration(hass)\n assert device.host == \"1.2.3.4\"\n\n result = await hass.config_entries.flow.async_init(\n AXIS_DOMAIN,\n data={\n CONF_HOST: \"1.2.3.4\",\n CONF_PORT: 80,\n \"hostname\": \"name\",\n \"properties\": {\"macaddress\": MAC},\n },\n context={\"source\": \"zeroconf\"},\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"already_configured\"\n assert device.host == \"1.2.3.4\"",
"def test_home_bridge(mock_pre_serv):\n bridge = HomeBridge('TestBridge', 'test.bridge', b'123-45-678')\n\n assert bridge.display_name == 'TestBridge'\n assert bridge.pincode == b'123-45-678'\n assert len(bridge.services) == 2\n\n assert bridge.services[0].display_name == SERV_ACCESSORY_INFO\n assert bridge.services[1].display_name == SERV_BRIDGING_STATE\n\n char_model = bridge.services[0].get_characteristic(CHAR_MODEL)\n assert char_model.get_value() == 'test.bridge'",
"def is_ready(self, addr: int, /) -> bool:",
"def device_exists(device):\n return os.path.exists('/sys/class/net/%s' % device)",
"def test_target_existence(self):\n self.create_ptr(\n ip_str='128.193.0.2', fqdn='nonexistent.oregonstate.edu',\n ip_type='4')",
"async def test_zeroconf_flow_ignore_link_local_address(hass):\n result = await hass.config_entries.flow.async_init(\n AXIS_DOMAIN,\n data={CONF_HOST: \"169.254.3.4\", \"properties\": {\"macaddress\": MAC}},\n context={\"source\": \"zeroconf\"},\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"link_local_address\"",
"def is_connected_drm():\n drm_status = xbee.atcmd(AT_CMD_DI)\n if drm_status is None or drm_status not in drm_status_connected:\n return False\n return True"
] |
[
"0.65536493",
"0.6400834",
"0.63210285",
"0.6163109",
"0.6107604",
"0.59410834",
"0.58979404",
"0.5866441",
"0.58326805",
"0.5749012",
"0.56838477",
"0.56618524",
"0.565966",
"0.5654871",
"0.5614883",
"0.5600673",
"0.5578344",
"0.5554906",
"0.55545014",
"0.5549597",
"0.55493474",
"0.55444527",
"0.5514762",
"0.55119604",
"0.5509033",
"0.5479953",
"0.5471873",
"0.547022",
"0.54613113",
"0.54330915"
] |
0.71763957
|
0
|
Test a bridge being discovered by zeroconf and ipv6 address.
|
async def test_bridge_zeroconf_ipv6(hass: HomeAssistant) -> None:
result = await hass.config_entries.flow.async_init(
const.DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=zeroconf.ZeroconfServiceInfo(
host="fd00::eeb5:faff:fe84:b17d",
addresses=["fd00::eeb5:faff:fe84:b17d"],
port=443,
hostname="Philips-hue.local",
type="_hue._tcp.local.",
name="Philips Hue - ABCABC._hue._tcp.local.",
properties={
"_raw": {"bridgeid": b"ecb5faabcabc", "modelid": b"BSB002"},
"bridgeid": "ecb5faabcabc",
"modelid": "BSB002",
},
),
)
assert result["type"] == "abort"
assert result["reason"] == "invalid_host"
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def SupportsIPv6(self) -> bool:",
"def test_ipv6_in_net(self):\n test_ip = ip_address.IPAddress(\"2001:0db8:85a3:08d3:1319:8a2e:0370:7344/24\")\n assert test_ip.in_network(\"2001:0d00::/24\")\n assert test_ip.in_network(\"2001:0d00::/29\")",
"def bridge_network_check(ip, bridge_ip, bridge_netmask):\n# convert vars to unicode \n ip = unicode(ip)\n bridge_ip = unicode(bridge_ip)\n bridge_netmask = unicode(bridge_netmask)\n# by default ip is not in bridge network \n brctl = 0\n\n# bridge insterface ip network\n brdige_network = IPv4Interface('%s/%s' % (bridge_ip, bridge_netmask)).network\n\n# check if ip is from bridge network and return bridge control var (brctl) = true\n if IPv4Address(ip) in list(IPv4Network(brdige_network)):\n brctl = 1\n\n# return brctl and bridge ip network \n return brctl, brdige_network",
"def OSSupportsIPv6(self) -> bool:",
"def check_public_ipv6(node):\n\n ok = False\n for ii in node.ifaces:\n# try:\n for ip in ii.addrs:\n ip = ip.split('/')[0]\n if ipaddress.ip_address(ip).version == 6:\n if ip[0] not in ['f',\":\"]:\n ok = True\n\n# except:\n# print(ok)\n\n return ok",
"def test_ipv6_validation_success():\n assert is_ipv6('2001:db8::ff00:42:8329')",
"def find_ipv6():\n\n test_host = '2600::' # Sprint.net\n try:\n with socket.socket(socket.AF_INET6, socket.SOCK_DGRAM) as s:\n s.connect((test_host, 53))\n ipv6 = s.getsockname()[0]\n except:\n if cfg['debug']:\n print(\"Couldn't create a socket to %s\" % test_host)\n print(\"Check that you have a valid IPv6 default route\")\n ipv6 = None\n\n return ipv6",
"async def test_ipv6_configuration(\n ip6config_service: IP6ConfigService, dbus_session_bus: MessageBus\n):\n ip6 = IpConfiguration(\"/org/freedesktop/NetworkManager/IP6Config/1\", ip4=False)\n\n assert ip6.gateway is None\n assert ip6.nameservers is None\n\n await ip6.connect(dbus_session_bus)\n\n assert ip6.gateway == IPv6Address(\"fe80::da58:d7ff:fe00:9c69\")\n assert ip6.nameservers == [\n IPv6Address(\"2001:1620:2777:1::10\"),\n IPv6Address(\"2001:1620:2777:2::20\"),\n ]\n\n ip6config_service.emit_properties_changed({\"Gateway\": \"2001:1620:2777:1::10\"})\n await ip6config_service.ping()\n assert ip6.gateway == IPv6Address(\"2001:1620:2777:1::10\")\n\n ip6config_service.emit_properties_changed({}, [\"Gateway\"])\n await ip6config_service.ping()\n await ip6config_service.ping()\n assert ip6.gateway == IPv6Address(\"fe80::da58:d7ff:fe00:9c69\")",
"def test_empty(self):\n self.assertFalse(isIPv6Address(\"\"))",
"def test_ipv6_validation_failure():\n with pytest.raises(socket.error):\n is_ipv6('2001::0234:C1ab::A0:aabc:003F')",
"def testIPv6(self):\n self.assertEqual([\"http://[2001:a68:104:1337:250:daff:fe72:871c]/toimia\"], grab('foo http://[2001:a68:104:1337:250:daff:fe72:871c]/toimia', self.needScheme))",
"def is_host_ip6(value):\n try:\n return bool(ipaddress.IPv6Address(value))\n\n except:\n pass",
"def test_ipv4_in_net_internal_v6(self):\n test_ip = ip_address.IPAddress(\"192.168.178.4\")\n assert test_ip.in_network(\"192.168.178.0/24\")\n assert test_ip.in_network(\"192.168.178.0/29\")\n \n test_ip = ip_address.IPAddress(\"192.168.178.4/2\")\n assert test_ip.in_network(\"192.0.0.0/2\")\n\n test_ip = ip_address.IPAddress(\"192.168.178.4\")\n assert test_ip.in_network(\"10.0.11.0/4\") == False\n assert test_ip.in_network(\"192.169.178.0/24\") == False\n \n \n test_ip = ip_address.IPAddress(\"192.168.67.3\")\n assert test_ip.in_network(\"192.168.0.0/16\")",
"def test_lb_ip6_gre6(self):\n\n self.cli(0, \"lb vip 2001::/16 encap gre6\")\n for asid in self.ass:\n self.cli(0, \"lb as 2001::/16 2002::%u\" % (asid))\n\n self.pg_add_stream(0, self.generatePackets(0))\n self.pg_enable_capture([0,1])\n self.pg_start()\n\n # Scapy fails parsing IPv6 over GRE and IPv6 over GRE.\n # This check is therefore disabled for now.\n #self.checkCapture(0, 0)\n\n for asid in self.ass:\n self.cli(0, \"lb as 2001::/16 2002::%u del\" % (asid))\n self.cli(0, \"lb vip 2001::/16 encap gre6 del\")",
"def test_01_verify_ipv6_network(self):\n\n self.createIpv6NetworkOffering()\n self.createIpv6NetworkOfferingForUpdate()\n self.createTinyServiceOffering()\n self.deployNetwork()\n self.deployNetworkVm()\n self.checkIpv6Network()\n self.checkIpv6NetworkVm()\n self.prepareRoutingTestResourcesInBackground()\n self.restartNetworkWithCleanup()\n self.checkIpv6Network()\n self.updateNetworkWithOffering()\n self.checkIpv6Network()\n self.checkIpv6NetworkRouting()\n self.checkIpv6FirewallRule()",
"def IsLinkup(nic,timeout):\n nic = nic.strip()\n current = time.time()\n timeout += current\n while current < timeout:\n data = os.popen(\"ipconfig\").read().split(\"Ethernet adapter\")\n for item in data:\n if item.count(nic) and item.count(\"isconnected\") == 0: #Connected\n return 1\n time.sleep(0.5)\n current = time.time()\n return 0",
"def test_read_host_subnet(self):\n pass",
"def test_ipv6_from_binary(self):\n ip1 = ip_address.IPAddress(\"2001:0db8:85a3:08d3:1319:8a2e:0370:7344\")\n ip1_2 = ip_address.IPAddress(ip1.bytes, binary=True)\n assert ip1 == ip1_2",
"def check_neighbor_kernel_route(host, asicnum, ipaddr, interface, present=True):\n if \":\" in ipaddr:\n ipver = \"ipv6\"\n prefix = ipaddr + \"/128\"\n else:\n ipver = \"ip\"\n prefix = ipaddr + \"/32\"\n\n check_bgp_kernel_route(host, asicnum, prefix, ipver, interface, present)\n check_host_kernel_route(host, asicnum, ipaddr, ipver, interface, present)",
"def is_net_ip6(value):\n for test in [lambda x: ipaddress.IPv6Network(x)._prefixlen != 128,\n lambda x: ipaddress.IPv6Interface(x)._prefixlen != 128]:\n try:\n return bool(test(value))\n\n except:\n pass\n\n return False",
"def test_replace_host_subnet(self):\n pass",
"def setup_unused_bridge_network(self):\n out = utils.run_script('conjure-up.lxc network show conjureup0')\n if out.returncode == 0:\n return # already configured\n\n out = utils.run_script('conjure-up.lxc network create conjureup0 '\n 'ipv4.address=auto '\n 'ipv4.nat=true '\n 'ipv6.address=none '\n 'ipv6.nat=false')\n\n if out.returncode != 0:\n raise Exception(\n \"Failed to create conjureup0 network bridge: \"\n \"{}\".format(out.stderr.decode()))",
"def ipv6_native(self) -> bool:\n return pulumi.get(self, \"ipv6_native\")",
"def checklan(ipaddr, network):\n return True",
"async def test_bridge_zeroconf(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n create_mock_api_discovery(aioclient_mock, [(\"192.168.1.217\", \"ecb5fafffeabcabc\")])\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_ZEROCONF},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"192.168.1.217\",\n addresses=[\"192.168.1.217\"],\n port=443,\n hostname=\"Philips-hue.local\",\n type=\"_hue._tcp.local.\",\n name=\"Philips Hue - ABCABC._hue._tcp.local.\",\n properties={\n \"_raw\": {\"bridgeid\": b\"ecb5fafffeabcabc\", \"modelid\": b\"BSB002\"},\n \"bridgeid\": \"ecb5fafffeabcabc\",\n \"modelid\": \"BSB002\",\n },\n ),\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"link\"",
"def testIPv6noscheme(self):\n if self.needScheme: return\n \n self.assertEqual([\"[2001:a68:104:1337:250:daff:fe72:871c]/toimia\"], grab('foo [2001:a68:104:1337:250:daff:fe72:871c]/toimia', self.needScheme))",
"def is_ipv6(addr):\n try:\n socket.inet_pton(socket.AF_INET6, addr)\n return True\n except socket.error:\n return False",
"def ipv6_networks(view):\n return \"ipv6network?\" \\\n \"_return_fields=\" \\\n \"extattrs,\" \\\n \"comment,\" \\\n \"network,\" \\\n \"network_view,\" \\\n \"utilization&\" \\\n \"network_view=\" + view + \\\n \"&_max_results=-25000\"",
"def test_ipv6_in_range(self):\n test_ip = ip_address.IPAddress(\"2001:0db8:85a3:08d3:1319:8a2e:0370:7344\")\n \n assert test_ip.in_range(\"2000:0db8:85a3:08d3:1319:8a2e:0370:7344\",\"2002:0db8:85a3:08d3:1319:8a2e:0370:7344\")\n assert test_ip.in_range(\"2001:0db8:85a3:07d3:1319:8a2e:0370:7344\",\"2001:0db8:85a3:08d3:1319:8a2e:0370:7344\")\n assert test_ip.in_range(\"::ffff:1.1.1.1\",\"2501:0db8:85a3:08d3:1319:8a2e:0370:7344\")",
"def check_ip6_dns(zone, zoneinfo):\n for nsobj in zoneinfo.nameservers:\n if len(nsobj.ip6) == 0:\n zoneinfo.dprint(\"NS: %s has no IPv6 address record\" % nsobj.name)\n return"
] |
[
"0.69296867",
"0.6873178",
"0.6855335",
"0.68408835",
"0.67723507",
"0.6649084",
"0.65144736",
"0.6484365",
"0.6461369",
"0.63507825",
"0.6304988",
"0.6245266",
"0.6233388",
"0.6186134",
"0.61457056",
"0.6034072",
"0.6014273",
"0.6006413",
"0.5968635",
"0.59401035",
"0.59390944",
"0.59299535",
"0.59267104",
"0.5889561",
"0.58853734",
"0.58584285",
"0.5857368",
"0.58199894",
"0.5811419",
"0.5789296"
] |
0.80193996
|
0
|
Return an error for given Y and T. Differs for classification and multiclass.
|
def _error(self, Y, T):
err = np.mean((Y - T)**2)
return err
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def errors(self, y):\r\n\r\n # check if y has same dimension of y_pred\r\n if y.ndim != self.y_pred.ndim:\r\n raise TypeError('y should have the same shape as self.y_pred',\r\n ('y', target.type, 'y_pred', self.y_pred.type))\r\n # check if y is of the correct datatype\r\n if y.dtype.startswith('int'):\r\n # the T.neq operator returns a vector of 0s and 1s, where 1\r\n # represents a mistake in prediction\r\n return T.mean(T.neq(self.y_pred, y))\r\n else:\r\n raise NotImplementedError()",
"def errors(self, y):\r\n\r\n # check if y has same dimension of y_pred\r\n if y.ndim != self.y_pred.ndim:\r\n raise TypeError('y should have the same shape as self.y_pred',\r\n ('y', target.type, 'y_pred', self.y_pred.type))\r\n # check if y is of the correct datatype\r\n if y.dtype.startswith('int'):\r\n # the T.neq operator returns a vector of 0s and 1s, where 1\r\n # represents a mistake in prediction\r\n return T.mean(T.neq(self.y_pred, y))\r\n else:\r\n raise NotImplementedError()",
"def errors(self, y):\n\n # check if y has same dimension of y_pred\n if y.ndim != self.y_out.ndim:\n raise TypeError('y should have the same shape as self.y_out',\n ('y', y.type, 'y_out', self.y_out.type))\n # check if y is of the correct datatype\n if y.dtype.startswith('int'):\n # the T.neq operator returns a vector of 0s and 1s, where 1\n # represents a mistake in prediction\n return T.mean(T.neq(self.y_out, y))\n else:\n raise NotImplementedError()",
"def errors(self, y):\r\n \r\n # check if y has same dimension of y_pred\r\n if y.ndim != self.y_pred.ndim:\r\n raise TypeError(\r\n 'y should have the same shape as self.y_pred',\r\n ('y', y.type, 'y_pred', self.y_pred.type)\r\n )\r\n # check if y is of the correct datatype\r\n if y.dtype.startswith('int'):\r\n # the T.neq operator returns a vector of 0s and 1s, where 1\r\n # represents a mistake in prediction\r\n return T.mean(T.neq(self.y_pred, y))\r\n else:\r\n raise NotImplementedError()",
"def errors(self, y):\r\n\r\n # check if y has same dimension of y_pred\r\n if y.ndim != self.y_pred.ndim:\r\n raise TypeError('y should have the same shape as self.y_pred', ('y', y.type, 'y_pred', self.y_pred.type))\r\n # check if y is of the correct datatype\r\n if y.dtype.startswith('int'):\r\n return T.mean(T.neq(self.y_pred, y))\r\n else:\r\n raise NotImplementedError()",
"def errors(self, y):\r\n\r\n # check if y has same dimension of y_pred\r\n if y.ndim != self.y_pred.ndim:\r\n raise TypeError( 'y should have the same shape as self.y_pred', ('y', y.type, 'y_pred', self.y_pred.type) )\r\n\r\n # check if y is of the correct datatype\r\n if y.dtype.startswith('int'):\r\n # the T.neq operator returns a vector of 0s and 1s, where 1\r\n # represents a mistake in prediction\r\n return T.mean(T.neq(self.y_pred, y))\r\n else:\r\n raise NotImplementedError()",
"def errors(self, y):\n\n # Check that y has the same dimension as y_pred\n if y.ndim != self.y_pred.ndim:\n raise TypeError(\n 'y should have the same shape as self.y_pred',\n ('y', y.type, 'y_pred', self.y_pred.type)\n )\n # Check that y is the correct datatype\n if y.dtype.startswith('int'):\n return T.mean(T.neq(self.y_pred, y))\n else:\n raise NotImplementedError()",
"def errors(self, y):\n\n if y.ndim != self.y_pred.ndim:\n raise TypeError('y must be same size of y_pred', 'y', y.type, 'y_pred', y_pred.type)\n if y.dtype.startswith('int'):\n return T.mean(T.neq(self.y_pred, y))\n else:\n raise NotImplementedError",
"def errors(self, y):\n return T.mean(T.neq(self.y_pred, y))",
"def error(clf, X, y, ntrials=100, t_s=0.2) :\n \n ### ========== TODO : START ========== ###\n # compute cross-validation error over ntrials\n # hint: use train_test_split (be careful of the parameters)\n \n train_error = 0\n test_error = 0 \n for i in range(ntrials):\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=t_s, random_state=i)\n clf.fit(X_train, y_train)\n y_pred_train = clf.predict(X_train)\n y_pred_test = clf.predict(X_test)\n train_error += (1 - metrics.accuracy_score(y_train, y_pred_train, normalize=True))\n test_error += (1 - metrics.accuracy_score(y_test, y_pred_test, normalize=True))\n train_error /= ntrials\n test_error /= ntrials \n ### ========== TODO : END ========== ###\n \n return train_error, test_error",
"def errors(self, y):\n return T.mean(T.neq(self.y_pred, y),\n dtype=theano.config.floatX,\n acc_dtype=theano.config.floatX)",
"def get_error(self, y_pred, y):\n assert y_pred.shape==y.shape\n y_pred = y_pred.type_as(y)\n err = (y_pred!=y).sum().type(torch.FloatTensor).div_(y.shape[0])\n return err",
"def error(self, X, y):\n predicted = self.predict(X)\n y = self.transformy(y)\n return 1 - (y == predicted).sum() / predicted.size",
"def error(clf, X, y, ntrials=100, test_size=0.2) :\n\n ### ========== TODO : START ========== ###\n # compute cross-validation error using StratifiedShuffleSplit over ntrials\n # hint: use train_test_split (be careful of the parameters)\n train_error = 0\n test_error = 0\n f1_score = 0\n sss = StratifiedShuffleSplit(n_splits = ntrials, test_size = test_size, random_state = 0)\n for train_index, test_index in sss.split(X, y):\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n clf.fit(X_train, y_train)\n y_pred_train = clf.predict(X_train)\n y_pred_test = clf.predict(X_test)\n train_error += float(1 - metrics.accuracy_score(y_train, y_pred_train, normalize=True))\n test_error += float(1 - metrics.accuracy_score(y_test, y_pred_test, normalize=True))\n f1_score += metrics.f1_score(y_test, y_pred_test, average = \"micro\")\n\n train_error = train_error/ntrials\n test_error = test_error/ntrials\n f1_score = f1_score/ntrials\n ### ========== TODO : END ========== ###\n\n return train_error, test_error, f1_score",
"def error(self, X, y):\n ans = self.predict(X)\n return np.sum(np.logical_not(np.equal(ans,y))) / len(X)",
"def error(Y, X):\n return (Y - X) ** 2",
"def error(self, x, t):\n predict = self.model.predict(x)\n if t == predict:\n return 0\n else:\n return 1",
"def error(self, X, y):\n y_hat = self.predict(X)\n y = np.array(y)\n return sum(y[i] != y_hat[i] for i in range(len(y))) / len(y)",
"def errors(self, target):\n\n return T.mean(T.neq(self.y_pred, T.argmax(target, axis=1)))",
"def compute_error(self, X, Y):\n\n if self.method != 'knn':\n accuracy = self.classifier.score(X, Y)\n error = 1 - accuracy\n return error\n else:\n distances, indices = self.classifier.kneighbors(X)\n error = 0\n for index, ground_truth in zip(indices, Y):\n classes = [self.train_Y[neigbhor] for neigbhor in index]\n mode, _ = stats.mode(classes)\n if mode != ground_truth:\n error += 1\n\n return error / len(Y)",
"def cross_validation(T, y):\r\n from sklearn.model_selection import LeaveOneOut\r\n y = np.array(y)\r\n judge = list()\r\n for train_index, valid_index in LeaveOneOut().split(T):\r\n T_train = T[train_index]\r\n T_valid = T[valid_index]\r\n y_train = y[train_index]\r\n y_valid = y[valid_index]\r\n\r\n T_train, mean, std = feature.normalize(T_train)\r\n T_principle, T_principle_index, dist, AUC = feature_select(T_train,\r\n y_train, k=3)\r\n ts = threshold(dist, y_train)\r\n C = gen_center(T_principle, y_train)\r\n T_valid = (T_valid - mean) / std\r\n dist_valid = util.distance(T_valid.T[T_principle_index].T, C)\r\n if y_valid[0] == 1:\r\n if dist_valid[0] < ts:\r\n judge.append(1)\r\n else:\r\n judge.append(0)\r\n else:\r\n if dist_valid[0] < ts:\r\n judge.append(0)\r\n else:\r\n judge.append(1)\r\n accuracy = sum(judge) / len(judge)\r\n return accuracy",
"def _error(self, Y_pred, Y):\r\n error=numpy.zeros(len(Y_pred))\r\n for i in range(len(Y_pred)):\r\n if(Y_pred[i]!=Y[i]):\r\n error[i]=1\r\n return numpy.mean(error)",
"def eval_error_metric(predt, dtrain: xgb.DMatrix):\n label = dtrain.get_label()\n r = np.zeros(predt.shape)\n gt = predt > 0.5\n if predt.size == 0:\n return \"CustomErr\", 0\n r[gt] = 1 - label[gt]\n le = predt <= 0.5\n r[le] = label[le]\n return 'CustomErr', np.sum(r)",
"def eval_metrics(y, pred):\n classification_error = np.sum(pred != y) / float(y.shape[0])\n return classification_error",
"def error(self, x, t):\n predict = self.model.predict(x.reshape(1, -1))\n if t == predict:\n return 0\n else:\n return 1",
"def error(clf, X, y, ntrials=100, test_size=0.2) :\n\n train_error = 0\n test_error = 0\n ### ========== TODO : START ========== ###\n # compute cross-validation error over ntrials\n # hint: use train_test_split (be careful of the parameters)\n for i in range(0,ntrials, 1):\n #get the value of the error for each division\n #train on the test data for the clf\n #test also on the data\n #X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.33, random_state = 42)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size= test_size, random_state=i)\n #now find the error\n #first train the model\n #then predict\n #check the accuracy\n clf.fit(X_train,y_train)\n y_pred = clf.predict(X_train)\n #now find the error for the train_error\n train_err = 1 - metrics.accuracy_score(y_train, y_pred, normalize=True)\n train_error += train_err\n\n y_pred = clf.predict(X_test)\n test_err = 1 - metrics.accuracy_score(y_test, y_pred, normalize=True)\n test_error += test_err\n\n\n #get the average\n train_error = float(train_error)/((1-test_size)*len(X))\n test_error = float(test_error)/((test_size)*len(X))\n ### ========== TODO : END ========== ###\n\n return train_error, test_error",
"def max_error(y_true, y_pred):\n ...",
"def error(self, y_predicted, y):\n errors = []\n for i in range(y.size): \n errors.append((y[i]-y_predicted[i]) ** 2)\n return mean(errors)",
"def error(self, X, y):\n matches=[]\n pred = self.predict(X)\n for i in range(len(X)):\n if y[i]!= pred[i]:\n matches.append(1)\n else:\n matches.append(0)\n matches = np.array(matches)\n return np.sum(matches)*(1/(len(matches)*1.0))",
"def evaluate(X, yt, cls, name='data'):\n yp = cls.predict(X)\n acc = metrics.accuracy_score(yt, yp)\n return acc"
] |
[
"0.705394",
"0.705394",
"0.70179045",
"0.7015995",
"0.70074224",
"0.69555795",
"0.6893458",
"0.68518806",
"0.6773689",
"0.6564925",
"0.653388",
"0.64286697",
"0.63042307",
"0.6298643",
"0.6283704",
"0.6282769",
"0.6233783",
"0.6225041",
"0.6223552",
"0.61783606",
"0.61619973",
"0.61514777",
"0.6151177",
"0.61439854",
"0.6142449",
"0.6118935",
"0.60664916",
"0.6046123",
"0.60041386",
"0.60015756"
] |
0.7826117
|
0
|
Leave only neurons with the given indexes.
|
def _prune(self, idx):
idx = list(idx)
neurons = []
for nold in self.neurons:
k = nold[1] # number of neurons
ix1 = [i for i in idx if i < k] # index for current neuron type
idx = [i-k for i in idx if i >= k]
func = nold[0]
number = len(ix1)
W = nold[2][:, ix1]
bias = nold[3][ix1]
neurons.append((func, number, W, bias))
self.neurons = neurons
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def remove_indexes(self, indexes):\n # Create a set of the rows (as int) to delete\n selected_rows = set()\n for index in indexes:\n selected_rows.add(index.row())\n\n # Delete all of them one by one (easy but maybe not the best performance-wise)\n for index, row in enumerate(sorted(selected_rows)):\n self.removeRow(row - index) # The actual target row to be removed decreases by one when a previous is removed",
"def remove_objects(self, indexes):\n fields = [\n \"object_position\",\n \"object_velocity\",\n \"object_radius\",\n \"object_rotation\",\n \"object_type\",\n \"object_steps\",\n ]\n for field in fields:\n setattr(\n self,\n field,\n [x for i, x in enumerate(getattr(self, field)) if i not in indexes],\n )",
"def prune_connections(net, subsample_indices):\n new_connections = []\n new_subsample_indices = []\n for i in range(len(subsample_indices)):\n if len(subsample_indices[i]) > 0:\n new_connections.append(net.connections[i])\n new_subsample_indices.append(subsample_indices[i])\n\n net.connections = new_connections\n return new_subsample_indices",
"def hide(self, indices):\n traj_ids = set(traj.id for traj in self._trajlist)\n\n for index in indices:\n comp_id = self._ngl_component_ids[index]\n if comp_id in traj_ids:\n traj = self._get_traj_by_id(comp_id)\n traj.shown = False\n self._remote_call(\n \"setVisibility\",\n target='compList',\n args=[\n False,\n ],\n kwargs={'component_index': index})",
"def disconnect_nodes(self):\n for src_id, trg_id in itertools.product(self.selected_nodes, repeat=2):\n if src_id != trg_id:\n # `discard` ignores non-existing elements (unlike `remove`)\n app.edges[src_id].discard(trg_id)\n self.mark_as_unsaved()\n self.update()",
"def exclude_nodes(self, nodes):",
"def _delete_command_idxs(indexes, from_cmake):\n for index in sorted(indexes, reverse=True):\n del from_cmake[index]",
"def add_skip_connections(self):\n for term_ in self.inverted_index.keys():\n postingsList=self.inverted_index[term_]\n postingsList.add_skip_connections()",
"def reset_indexes(self) -> None:\n assert self.indexes is not None, 'Cannot reset indexes because they have not been enabled.'\n self.indexes.force_clear_all()\n self.update_best_block_tips_cache(None)\n self._all_tips_cache = None",
"def drop_non_unique_indexes(self):\n for idx in self.droppable_indexes:\n log.info(\"Dropping index '{}' on intermediate table\".format(idx.name))\n self.ddl_guard()\n self.execute_sql(sql.drop_index(idx.name, self.new_table_name))",
"def prune_layers(net, subsample_indices):\n new_layers = []\n\n discarded_names = []\n for layer in net.layers:\n if layer.m > 0:\n new_layers.append(layer)\n else:\n discarded_names.append(layer.name)\n\n new_connections = []\n new_subsample_indices = []\n for i in range(len(net.connections)):\n connection = net.connections[i]\n if connection.pre.name not in discarded_names and connection.post.name not in discarded_names:\n new_connections.append(connection)\n new_subsample_indices.append(subsample_indices[i])\n\n net.layers = new_layers\n net.connections = new_connections\n\n return new_subsample_indices",
"def prune_train_dataset(self, all_labels, train_idxs):\n\n # -- prune samples if necessary to have equal sized splits\n neg_idxs = [idx for idx in train_idxs if all_labels[idx] == self.NEG_LABEL]\n pos_idxs = [idx for idx in train_idxs if all_labels[idx] == self.POS_LABEL]\n n_samples = min(len(neg_idxs), len(pos_idxs))\n\n rstate = np.random.RandomState(7)\n rand_idxs_neg = rstate.permutation(neg_idxs)\n rand_idxs_pos = rstate.permutation(pos_idxs)\n\n neg_idxs = rand_idxs_neg[:n_samples]\n pos_idxs = rand_idxs_pos[:n_samples]\n train_idxs = np.concatenate((pos_idxs, neg_idxs))\n\n return train_idxs",
"def _exclude_indices(self):\n idx = self._next_idx\n exclude = np.arange(idx - 1, idx + self.obs_len) % self._maxsize\n return exclude",
"def neighbours(ar, cur_index, cnt_of_neiboors=3, exclude_from_neibors_index=[]):\n rmax = np.max([0, cur_index + cnt_of_neiboors - len(ar)])\n lmin = np.max([cur_index - (cnt_of_neiboors + rmax), 0])\n\n excl = set(exclude_from_neibors_index) | {cur_index}\n nbs = [i for i in range(lmin, len(ar)) if i not in excl]\n return ar[nbs[:cnt_of_neiboors * 2]]",
"def selectOfSample(self, indexes):\n index_set = set()\n for idx in indexes:\n i = list(self.sample[self.sample['masked'] == False].index)[idx]\n index_set.add(i)\n for ind in list(self.sample[self.sample['masked'] == False].index):\n if ind not in index_set:\n self.sample.at[ind, 'masked'] = True\n return index_set",
"def clear_indexes(self):\n for keypoints in self:\n keypoints.clear_index()",
"def prune(neuron,\n number_of_nodes):\n n = len(neuron.nodes_list)\n for i in range(n - number_of_nodes):\n index = shortest_tips(neuron)\n neuron = remove_node(neuron, index)\n return neuron",
"def deactivate_all(self):\n\t self.active_nodes = [False for i in range(self.nodes)]",
"def delete_rows(self, indexes: Union[Index, List[Index]]):\n indexes = [self.index_location(idx) for idx in to_list(indexes)]\n\n unknown = set(indexes) - set(self.index)\n if unknown:\n names = \", \".join(str(name) for name in unknown)\n raise ValueError(f\"Unable to remove unknown rows: {names}\")\n\n for index in sorted(indexes, reverse=True):\n del self._data[index]",
"def truncate_net(net, indices):\n if len(indices) > 1:\n tmp = list(net.children())[:indices[0]]\n next = list(net.children())[indices[0]]\n return nn.Sequential(*(tmp+[truncate_net(next, indices[1:])]))\n elif len(indices) == 1:\n return nn.Sequential(*list(net.children())[:indices[0]+1])\n else:\n raise ValueError(\"Layer indices must not be empty!\")",
"def remove_clusters(self, indices):\n keep_indices = ind_retain_elements(indices, self.num_comp)\n\n self.num_comp = self.num_comp - len(indices)\n\n self.latent_resp = self.latent_resp[:, keep_indices]\n self.latent_scale = self.latent_scale[:, keep_indices]\n self.latent_log_scale = self.latent_log_scale[:, keep_indices]\n self.latent_scaled_resp = self.latent_scaled_resp[:, keep_indices]\n\n self.log_smm_mixweight = self.log_smm_mixweight[keep_indices]\n self.log_det_precision = self.log_det_precision[keep_indices]\n\n self.gamma_param_alpha = self.gamma_param_alpha[keep_indices]\n self.gamma_param_beta = self.gamma_param_beta[:, keep_indices]",
"def disconnect(nodes, ignores=tuple()):\n for node in nodes:\n channelBox = ChannelBox(node, *ignores)\n channelBox.disconnect()",
"def dropIndices(df, indices):\n df_result = df.copy()\n sorted_indices = list(indices)\n sorted_indices.sort()\n sorted_indices.reverse()\n for idx in sorted_indices:\n df_result = df_result.drop(idx, axis=0)\n return df_result",
"def trim(network, pores=[], throats=[]):\n pores = network._parse_indices(pores)\n throats = network._parse_indices(throats)\n Pkeep = np.copy(network['pore.all'])\n Tkeep = np.copy(network['throat.all'])\n if np.size(pores) > 0:\n Pkeep[pores] = False\n if not np.any(Pkeep):\n raise Exception('Cannot delete ALL pores')\n # # Performing customized find_neighbor_throats which is much faster, but\n # # not general for other types of queries\n # temp = np.in1d(network['throat.conns'].flatten(), pores)\n # temp = np.reshape(temp, (network.Nt, 2))\n # Ts = np.any(temp, axis=1)\n # Ts = network.Ts[Ts]\n Ts = network.find_neighbor_throats(pores=~Pkeep, mode='union')\n if len(Ts) > 0:\n Tkeep[Ts] = False\n if np.size(throats) > 0:\n Tkeep[throats] = False\n # The following IF catches the special case of deleting ALL throats\n # It removes all throat props, adds 'all', and skips rest of function\n if not np.any(Tkeep):\n logger.info('Removing ALL throats from network')\n for item in list(network.keys()):\n if item.split('.')[0] == 'throat':\n del network[item]\n network['throat.all'] = np.array([], ndmin=1)\n return\n\n # Temporarily store throat conns and pore map for processing later\n Np_old = network.Np\n Nt_old = network.Nt\n Pkeep_inds = np.where(Pkeep)[0]\n Tkeep_inds = np.where(Tkeep)[0]\n Pmap = np.ones((network.Np,), dtype=int)*-1\n tpore1 = network['throat.conns'][:, 0]\n tpore2 = network['throat.conns'][:, 1]\n\n # Delete specified pores and throats from all objects\n for obj in network.project[::-1]:\n if (obj.Np == Np_old) and (obj.Nt == Nt_old):\n Ps = Pkeep_inds\n Ts = Tkeep_inds\n else: # If subdomain object then Np/Nt < Np/Nt_old\n Ps = obj.to_local(pores=Pkeep_inds, missing_vals=None)\n Ts = obj.to_local(throats=Tkeep_inds, missing_vals=None)\n for key in list(obj.keys()):\n temp = obj.pop(key)\n if key.split('.')[0] == 'throat':\n obj.update({key: temp[Ts]})\n if key.split('.')[0] == 'pore':\n obj.update({key: temp[Ps]})\n\n # Remap throat connections\n Pmap[Pkeep] = np.arange(0, np.sum(Pkeep))\n Tnew1 = Pmap[tpore1[Tkeep]]\n Tnew2 = Pmap[tpore2[Tkeep]]\n network.update({'throat.conns': np.vstack((Tnew1, Tnew2)).T})\n\n # Clear adjacency and incidence matrices which will be out of date now\n network._am.clear()\n network._im.clear()",
"def breaks_connectivity(level, index, axis=0):\n new_level = remove_index(level, index, axis=axis)\n return not is_solvable(new_level)",
"def unhide_nodes(node_names, network=None, base_url=DEFAULT_BASE_URL):\n res = clear_node_property_bypass(node_names, 'NODE_VISIBLE', network=network, base_url=base_url)\n return res",
"def unhide_edges(edge_names, network=None, base_url=DEFAULT_BASE_URL):\n res = clear_edge_property_bypass(edge_names, 'EDGE_VISIBLE', network=network, base_url=base_url)\n return res",
"def run_removing_edges(self):\n indices = np.where(self.X==1)\n idx=[]\n for i in range(len(indices[0])):\n idx.append((indices[0][i],indices[1][i]))\n idx = np.array(idx)\n return self.node_equivalent(idx)",
"def prune_to(self, names, merge_monotomies=True):\n self.prune_to_nodes(self.get_nodes(names), merge_monotomies)",
"def reset_indexes(self) -> None:\n raise NotImplementedError"
] |
[
"0.63015276",
"0.5982674",
"0.558918",
"0.54869",
"0.54803425",
"0.5414516",
"0.5411668",
"0.5348167",
"0.5242817",
"0.52350545",
"0.5226981",
"0.5212443",
"0.5177332",
"0.5151017",
"0.51317",
"0.5111102",
"0.5095578",
"0.50760925",
"0.5072626",
"0.50687325",
"0.5067967",
"0.5060697",
"0.5049798",
"0.5013019",
"0.5011286",
"0.50072604",
"0.49923584",
"0.4960318",
"0.49522662",
"0.49424496"
] |
0.6700615
|
0
|
Function to transform dataframe to array
|
def df_to_array(datasample):
return np.array(datasample)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def dataframe_to_array(df: pd.DataFrame) -> np.ndarray:\n\n if isinstance(df, pd.Series):\n x_array = df\n elif isinstance(df, pd.DataFrame):\n nb_vars = df.shape[1]\n varlist = df.columns\n x_array = df[varlist[0]]\n if nb_vars > 1:\n for k in range(1, nb_vars):\n x_array = x_array.astype(str) + \"_&_\" + df[varlist[k]].astype(str)\n else:\n raise AssertionError(\"The input data is not a pandas dataframe\")\n\n return np.asarray(x_array.to_numpy())",
"def _df_meta_to_arr(df):\n\n if len(df.columns):\n if isinstance(df.columns[0], str):\n columns = df.columns.values.astype(\"S\")\n else:\n columns = df.columns.values\n else:\n columns = []\n\n if len(df.index):\n if isinstance(df.index[0], str):\n index = df.index.values.astype(\"S\")\n else:\n index = df.index.values\n else:\n index = []\n\n return columns, index",
"def get_data_matrix(df):\n return df[[\"Open\", \"High\", 'Low', \"Close\"]].to_numpy()",
"def convert_dataframe_to_array(df_or_series):\n if isinstance(df_or_series, pd.DataFrame) or isinstance(\n df_or_series, pd.Series\n ):\n dat = np.array(df_or_series)\n if len(dat.shape) == 1:\n return dat[:, np.newaxis]\n else:\n return dat\n if isinstance(df_or_series, np.ndarray):\n return df_or_series\n else:\n raise TypeError(\n f\"InputData error:\\n\"\n f\"type should be of np.ndarray and is currently type: {type(df_or_series)}\"\n )",
"def _to_ndarray(data):\n return np.atleast_1d(getattr(data, 'values', data))",
"def _df_to_ndarray(data_df, dataset_type):\n\tdata = data_df.loc[:, data_df.columns != 'target'].apply(lambda row: row.to_numpy(dtype='float32').reshape(784,1), axis=1).tolist()\n\n\tif dataset_type == 'training':\n\t\ttargets = data_df.loc[:, data_df.columns == 'target'].apply(_digit_to_10array, axis=1).tolist()\n\n\telse:\n\t\ttargets = data_df['target'].tolist()\n\n\tdata_list = list(zip(data, targets))\n\n\treturn data_list",
"def sparse_matrix_to_array(data_frame, sparse_column):\n array = data_frame[[sparse_column]]\n array[sparse_column] = array[sparse_column].apply(lambda x: x.toarray())\n array[sparse_column] = array[sparse_column].apply(lambda x: x[0])\n array = np.stack(array[sparse_column].values, axis=0) # over write array df as an np array\n return array",
"def __array__(self):\n return pa.column(\"dummy\", self.data).to_pandas().values",
"def _dataframe_to_recarray(dataframe, dtypes):\n names = [dt[0] for dt in dtypes]\n events = dataframe.to_records(index=False)\n # Make sure that all the columns are in the correct order\n events = events[names].astype(dtypes)\n events.dtype.names = [str(name) for name in events.dtype.names]\n return events",
"def _as_numpy(_1d_h2o_frame):\n f = _check_is_1d_frame(_1d_h2o_frame)\n \n nm = str(f.columns[0])\n return f[nm].as_data_frame(use_pandas=True)[nm].values",
"def __array__(self, copy=None):\n return self.data.to_pandas().values",
"def transform(self, dataframe: DataFrame) -> DataFrame:",
"def to_records_array(df):\n meta_arrays = []\n dtypes = []\n for c_name, c_data in df.iteritems():\n dtype = get_dtype(c_data)\n\n if np.issubdtype(dtype, np.bytes_):\n data = c_data.astype(str).str.encode('utf-8').values\n else:\n data = c_data.values\n\n arr = np.array(data, dtype=dtype)\n meta_arrays.append(arr)\n dtypes.append((c_name, dtype))\n\n return np.core.records.fromarrays(meta_arrays, dtype=dtypes)",
"def features_to_array(features_table, scaler):\n\n # Check arguments\n X = features_to_unscaled_matrix(features_table)\n return scaler.transform(X)",
"def pd_to_np(df, squeeze):\n # Extract the labels and create a samples vector out of it\n labels = df.iloc[:, df.columns.get_level_values(0) == 'label']\n labels = labels.droplevel('event')\n if squeeze:\n labels = labels[~labels.index.duplicated(keep='first')]\n labels_np = np.squeeze(labels.values)\n else:\n labels_np = labels.values\n\n # Drop the labels from data\n dataframe = df.drop('label', axis=1)\n\n dim_0 = len(dataframe.index.get_level_values(0).unique())\n dim_1 = int(len(dataframe.index.get_level_values(1)) / dim_0)\n dim_2 = dataframe.shape[1]\n\n dataframe_np = dataframe.values.reshape((dim_0, dim_1, dim_2))\n\n return dataframe_np, labels_np",
"def generate(data: pd.DataFrame) -> np.ndarray:\n # TODO tests\n return data.apply(pd.to_numeric, errors='coerce')",
"def prepare_arrays(series: pd.Series) -> np.array:\n\n series = series.map(string_to_array)\n\n # transform the array of array into a 2d-array\n return np.stack(np.array(series.array))",
"def toarray(x):\n if is_SparseDataFrame(x):\n x = x.to_coo().toarray()\n elif is_SparseSeries(x):\n x = x.to_dense().to_numpy()\n elif isinstance(x, (pd.DataFrame, pd.Series, pd.Index)):\n x = x.to_numpy()\n elif isinstance(x, sparse.spmatrix):\n x = x.toarray()\n elif isinstance(x, np.matrix):\n x = x.A\n elif isinstance(x, list):\n x_out = []\n for xi in x:\n try:\n xi = toarray(xi)\n except TypeError:\n # recursed too far\n pass\n x_out.append(xi)\n # convert x_out from list to array\n x = np.array(x_out, dtype=_check_numpy_dtype(x_out))\n elif isinstance(x, (np.ndarray, numbers.Number)):\n pass\n else:\n raise TypeError(\"Expected array-like. Got {}\".format(type(x)))\n return x",
"def nparray(self):\r\n # Convert pandas data frame to numpy record array (a type of structured array)\r\n _nparray = self.df.to_records()\r\n # Change datatypes to ones compatible with ArcPy\r\n # This will also subset and re-order columns according to order in array\r\n dtypes = self.base_dtype + self.veg_dtype\r\n # Create NumPy data types object\r\n td_dtype = np.dtype(dtypes)\r\n # Apply data types to NumPy array\r\n _nparray = _nparray.astype(td_dtype)\r\n return _nparray",
"def _to_arraylike(data):\n _load_objects()\n if data is None:\n raise ValueError('Cannot convert None data.')\n return None\n if not isinstance(data, (ndarray, DataArray, DataFrame, Series, Index)):\n data = np.asarray(data)\n if not np.iterable(data):\n data = np.atleast_1d(data)\n return data",
"def values(self) -> ndarray:\n if len(self._data) == 1:\n kind: str = next(iter(self._data))\n order: List[int] = [self._column_info[col].loc for col in self._columns]\n arr = self._data[kind][:, order]\n if kind == 'b':\n return arr == 1\n else:\n return arr\n\n if {'b', 'S', 'm', 'M'} & self._data.keys():\n arr_dtype: str = 'O'\n else:\n arr_dtype = 'float64'\n\n v: ndarray = np.empty(self.shape, dtype=arr_dtype, order='F')\n\n for col, dtype, loc, order, col_arr in self._col_info_iter(with_order=True, with_arr=True):\n if dtype == 'S':\n cur_list_map = self._str_reverse_map[loc]\n _va.make_object_str_array(cur_list_map, v, col_arr, order)\n elif dtype == 'M':\n unit = col_arr.dtype.name.replace(']', '').split('[')[1]\n # changes array in place\n _va.make_object_datetime_array(v, col_arr.view('uint64'), order, unit)\n elif dtype == 'm':\n unit = col_arr.dtype.name.replace(']', '').split('[')[1]\n _va.make_object_timedelta_array(v, col_arr.view('uint64'), order, unit)\n else:\n v[:, order] = col_arr\n return v",
"def get_array(obj, col=None):\n if isinstance(obj, Series) and (col is None or obj.name == col):\n arr = obj._values\n else:\n assert col is not None\n icol = obj.columns.get_loc(col)\n assert isinstance(icol, int)\n arr = obj._get_column_array(icol)\n if isinstance(arr, BaseMaskedArray):\n return arr._data\n return arr",
"def to_numpy_dataset(df: DataFrame, backend: Optional[\"Backend\"] = None) -> Dict[str, np.ndarray]: # noqa: F821\n dataset = {}\n for col in df.columns:\n res = df[col]\n if backend and is_dask_backend(backend):\n res = res.compute()\n if len(df.index) != 0:\n dataset[col] = np.stack(res.to_numpy())\n else:\n # Dataframe is empty.\n # Use to_list() directly, as np.stack() requires at least one array to stack.\n dataset[col] = res.to_list()\n return dataset",
"def get_transformed_data(self, df):\n temp_df = pd.DataFrame(self.fa.transform(df))\n return temp_df",
"def row_to_array(r):\n a = np.ma.array([i for i in r.as_void()])\n return a",
"def to_numpy(self) -> np.ndarray:\n log_advice(\n \"`to_numpy` loads all data into the driver's memory. \"\n \"It should only be used if the resulting NumPy ndarray is expected to be small.\"\n )\n return cast(np.ndarray, self._to_pandas().values)",
"def dataframe_to_list(df: pandas.DataFrame) -> list:\n return json.loads(df.to_json(orient=\"records\"))",
"def as_numpy_array(self):\n return self.frame",
"def spark_df_to_records(df):\n return [tuple(r) for r in df.collect()]",
"def pandas_df_to_records(df):\n return df.to_records(index=False).tolist()"
] |
[
"0.76691246",
"0.7365685",
"0.70576155",
"0.7006761",
"0.6754656",
"0.6746705",
"0.67400545",
"0.6668439",
"0.6607527",
"0.65974987",
"0.65742385",
"0.6560906",
"0.65563977",
"0.644972",
"0.6419316",
"0.6397516",
"0.638746",
"0.63369447",
"0.6327966",
"0.6318903",
"0.6294463",
"0.62561464",
"0.6228589",
"0.6208194",
"0.6190317",
"0.6165539",
"0.615723",
"0.6152471",
"0.6142312",
"0.6141362"
] |
0.79419214
|
0
|
Generate the full PDS path for a given HRSC data file
|
def generatePdsPath(filePrefix):
# File prefix looks like this: hHXXX_DDDD_SSS
fileType = '.img'
# Extract the run number --> HXXX
runNum = filePrefix[1:5]
filename = filePrefix + fileType
baseUrl = "http://pds-geosciences.wustl.edu/mex/mex-m-hrsc-5-refdr-mapprojected-v2/mexhrsc_1001/data/"
fullUrl = baseUrl + runNum +"/"+ filename
#print filePrefix + fileType + ' -> ' + fullUrl
return fullUrl
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def getDataPath():\n\treturn \"..\" + os.sep + \"data\" + os.sep",
"def get_data_path():\n return os.getcwd() + \"/data/\"",
"def get_path(data_path):\n\treturn os.path.dirname(os.path.realpath(__file__)) + os.sep + data_path",
"def pathFromCompetition(competition):\n config = Config()\n ret = Path(config.d['data_path']+'/'+competition+'/')\n if not ret.exists(): raise Exception('Please download the competition data first.')\n return ret",
"def get_data_path(path):\n\n data_path = Path(self.kard.meta.get('data_path', 'data'))\n\n if data_path.is_absolute():\n return str(data_path / path)\n\n return str(self.kard_folder_path / self.kard.name / data_path /\n path)",
"def get_data_file(f):\n if os.path.isfile(f):\n path = f\n\n else:\n p = pkg_resources.resource_filename('PaSDqc', \"db/{}\".format(f))\n \n if os.path.isfile(p):\n path = p\n else:\n raise IOError(\"{} is neither a system file nor a site-package file. Are you sure you have the right file name?\".format(f))\n\n return path",
"def datafilepath(*filename):\r\n import os\r\n return makepath(os.path.join(base_dir, *filename))",
"def sas_file(self):\n\n return os.path.normpath(self.path +'\\\\'+ cfg_dict['format_pgm'])",
"def _get_dataset_path(self, field):\n return '{0}{1}||{2}'.format(self.path_in_hdf5, field, self.uuid)",
"def construct_data_path (self, scene_rpath, category):\n dirname, basename = os.path.split (scene_rpath)\n baseprefix, basesuffix = os.path.splitext (basename)\n data_rpath = os.path.join ('data', category, baseprefix + '.dsf')\n return data_rpath",
"def dataPath(self):\n return ''",
"def get_data_path(file_name=None):\n if file_name is None:\n file_name = \"\"\n return os.path.join(DATA_DIR, file_name)",
"def dataset_path(dataset, work_dir=consts.WORK_DIR):\r\n return join_path(work_dir, consts.DATA_DIR, dataset)",
"def data_path(scope=\"session\"):\n return join(dirname(__file__), pardir, \"new_data\")",
"def get_oc_path(cfg):\n return os.path.join(\n BASE_DATA_DIR,\n \"castp\",\n \"pH\" + str(cfg.pH),\n str(cfg.mut),\n \"oc\" + str(cfg.probe) + \".csv\")",
"def data_path(self):\n raise NotImplementedError",
"def get_data_path():\n\treturn _paths[_DATA_DIRECTORY_KEY]",
"def get_datapath(filename, data_type):\n return (\n Path(__file__)\n .resolve()\n .parent.parent.parent.joinpath(\n \"data\", \"proc_test_data\", data_type.upper(), filename\n )\n )",
"def get_data_folder_path(challenge_short_name):\n return safe_join(settings.MEDIA_ROOT, challenge_short_name)",
"def data_full_filename(filename):\n return os.path.join(os.path.dirname(__file__), 'data', filename)",
"def get_output_path():\n\n path = rs.DocumentPath()\n name = rs.DocumentName()\n \n if gc.operating_system == \"mac\":\n\n path = path[:-len(name)] + \"_system.dat\"\n\n elif gc.operating_system == \"win\":\n\n i = path.rfind(\"\\\\\")\n\n path = path[:i] + \"/_system.dat\" \n\n return path",
"def get_data_path():\n\n # Get pathname absolute or relative.\n path = os.path.join(\n os.path.dirname(__file__), __malstor_data_directory__)\n\n abs_data_path = os.path.abspath(path)\n if not os.path.exists(abs_data_path):\n raise project_path_not_found\n\n return abs_data_path",
"def get_data_file():\n this_directory = os.path.dirname(__file__)\n parent_directory = os.path.dirname(this_directory)\n return os.path.join(parent_directory, '_data/fortunes.txt')",
"def path_helper(location, date, time, slc_dir='slc', data_dir='/media/bup/Data'):\n\n base_folder = data_dir + '/' + location + '/' + date + '/'\n name = date + '_' + time\n def_path = base_folder + slc_dir + '/' + name\n return def_path",
"def data_path(filename):\n data_dir = os.path.join(os.path.dirname(__file__), 'data')\n return os.path.join(data_dir, filename)",
"def get_data_path():\n\n import rospkg\n rospack = rospkg.RosPack()\n return os.path.join(rospack.get_path('testing_tools'), 'data')",
"def get_data_filename(relative_path): #TODO put in utils\n\n import os\n from pkg_resources import resource_filename\n fn = resource_filename('mdfptools', os.path.join('data', relative_path))\n\n if not os.path.exists(fn):\n raise ValueError(\"Sorry! %s does not exist. If you just added it, you'll have to re-install\" % fn)\n\n return fn",
"def data_filename(self) -> str: # type: ignore[return-value]\n return os.path.abspath(self.name) # type: ignore",
"def dataset_path(cls):\n basepath = os.path.dirname(__file__)\n filepath = os.path.abspath(\n os.path.join(basepath, \"..\", \"datasets\", get_env('DATA_FILENAME')))\n return filepath",
"def path(self):\n return self._data_file"
] |
[
"0.6609946",
"0.6476409",
"0.6458018",
"0.6431442",
"0.6423816",
"0.6364348",
"0.63640827",
"0.6339666",
"0.632974",
"0.62803036",
"0.6274267",
"0.62587535",
"0.6227221",
"0.61917484",
"0.6113514",
"0.60960364",
"0.60952705",
"0.6090089",
"0.6077048",
"0.607591",
"0.60563326",
"0.60490274",
"0.60214543",
"0.60039675",
"0.5994459",
"0.5920272",
"0.59117234",
"0.5909962",
"0.5893459",
"0.58887804"
] |
0.71917576
|
0
|
Numpy version of vplane_2_vparam
|
def np_vplane_2_vparam(vplane):
vparam = np.cross(
vplane,
np.stack([-vplane[...,1], vplane[...,0], np.zeros_like(vplane[...,0])], axis=-1),
axis=-1)
return vparam[..., :2] / vparam[..., [2]]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def np_vparam_2_vplane(vparam):\n d = np.linalg.norm(vparam, ord=2, axis=-1, keepdims=True)\n a = vparam[..., [0]] / d\n b = vparam[..., [1]] / d\n neg_sign = (a < 0)\n a[neg_sign] = -a[neg_sign]\n b[neg_sign] = -b[neg_sign]\n c = -(a * vparam[..., [0]] + b * vparam[..., [1]])\n vplane = np.concatenate([a, b, c], axis=-1)\n vplane[np.isnan(vplane)] = 0\n return vplane",
"def plane_point_side_v3(p: np.ndarray, v: np.ndarray) -> Any:\n return p[:3].dot(v) + p[3]",
"def topo_plane_paramEval(self, param):\n # Create an empty numpy array with the same number as pixels as the real data.\n self.topo_plane_fit_data = np.zeros((self.y_res, self.x_res))\n for y in range(0, self.y_res): # Iterate over the y-axis pixels.\n for x in range(0, self.x_res): # Iterate over the x-axis pixels.\n self.topo_plane_fit_data[y, x] = param[0]*x + param[1]*y + param[2] # Generate plane value.\n return self.topo_plane_fit_data # Return entire array.",
"def plane(self):\n return plane(self.N, self.o)",
"def _prepare_plane(self):\n verticies = [\n # main plane - note that the mainplane is scaled so the mat_plane\n # matrix will it transform to the correct coordinates\n -self.i_border[0]/self._scaling[0], self.i_border[1]/self._scaling[1],\n -self.i_border[0]/self._scaling[0], -(self.o_wh[1]-self.i_border[1])/self._scaling[1],\n (self.o_wh[0]-self.i_border[0])/self._scaling[0], -(self.o_wh[1]-self.i_border[1])/self._scaling[1],\n (self.o_wh[0]-self.i_border[0])/self._scaling[0], -(self.o_wh[1]-self.i_border[1])/self._scaling[1],\n (self.o_wh[0]-self.i_border[0])/self._scaling[0], self.i_border[1]/self._scaling[1],\n -self.i_border[0]/self._scaling[0], self.i_border[1]/self._scaling[1],\n\n # coord plane\n 0, 0,\n 0, -self.o_wh[1],\n self.o_wh[0], -self.o_wh[1],\n self.o_wh[0], -self.o_wh[1],\n self.o_wh[0], 0,\n 0, 0,\n\n # axes\n 0, -self.o_wh[1], self.o_wh[0], -self.o_wh[1], #x\n 0, 0, 0, -self.o_wh[1], #y\n ]\n\n colors = [\n 1.0, 1.0, 1.0, 1.0, # outer box XXX Remove outer box...\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n .9, .9, .9, 9.0, # plot box\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n 0.0, 0.0, 0.0, 1.0, #lines\n 0.0, 0.0, 0.0, 1.0,\n 0.0, 0.0, 0.0, 1.0,\n 0.0, 0.0, 0.0, 1.0,\n ]\n\n self._fonts = []\n for u in range(1, self._unit_count[0]+1):\n verticies.append(self._unit_w[0]*u)\n verticies.append(-self.o_wh[1]+0.02)\n verticies.append(self._unit_w[0]*u)\n verticies.append(-self.o_wh[1]-0.02)\n colors += [0.0, 0.0, 0.0, 1.0]\n colors += [0.0, 0.0, 0.0, 1.0]\n self._fonts.append([\n '{:.2f}'.format(u*(self.i_axis[0]/self._unit_count[0])-self.i_origin[0]),\n (self._unit_w[0]*u+self.i_border[0]-0.05)*self._scaling[0],\n (-self.o_wh[1]+(self.i_border[3])*0.5)\n ])\n for u in range(0, self._unit_count[1]):\n verticies.append(0.02)\n verticies.append(-self._unit_w[1]*u)\n verticies.append(-0.02)\n verticies.append(-self._unit_w[1]*u)\n colors += [0.0, 0.0, 0.0, 1.0]\n colors += [0.0, 0.0, 0.0, 1.0]\n self._fonts.append([\n '{:.2f}'.format(self.i_axis[1]-u*self.i_axis[1]/self._unit_count[1]-self.i_origin[1]),\n (0.025)*self._scaling[0],\n (-(self._unit_w[1])*u-self.i_border[1]+0.01)*self._scaling[1]\n ])\n\n self._draw_plane_indicies = (0, 12)\n self._draw_line_indicies = (12, 4+self._unit_count[0]*2+self._unit_count[1]*2)\n\n # convert data into valid data format\n verticies = numpy.array(verticies, dtype=numpy.float32)\n colors = numpy.array(colors, dtype=numpy.float32)\n\n self._plane_vao = util.VAO()\n self._plane_vbo = util.VBO(2)\n\n with self._plane_vao:\n # plane verticies\n with self._plane_vbo.get(0):\n glBufferData(GL_ARRAY_BUFFER, ArrayDatatype.arrayByteCount(verticies), verticies, GL_STATIC_DRAW)\n glVertexAttribPointer(self.plane_shader.attributeLocation('vertex_position'), 2, GL_FLOAT, GL_FALSE, 0, None)\n glEnableVertexAttribArray(0)\n\n # place vertex colors\n with self._plane_vbo.get(1):\n glBufferData(GL_ARRAY_BUFFER, ArrayDatatype.arrayByteCount(colors), colors, GL_STATIC_DRAW)\n glVertexAttribPointer(self.plane_shader.attributeLocation('vertex_color'), 4, GL_FLOAT, GL_FALSE, 0, None)\n glEnableVertexAttribArray(1)",
"def find_plane_eq(p1, p2, p3):\n\n p1 = np.asarray(p1)\n p2 = np.asarray(p2)\n p3 = np.asarray(p3)\n\n # These two vectors are in the plane\n v1 = p3 - p1\n v2 = p2 - p1\n\n # the cross product is a vector normal to the plane\n cp = np.cross(v1, v2)\n a, b, c = cp\n\n # This evaluates a * x3 + b * y3 + c * z3 which equals d\n d = np.dot(cp, p3)\n\n plane_eq = np.array([a, b, c, d])\n\n return plane_eq",
"def getParamsToVector(self):\n #vectorise and concat weights arrays\n weights = np.concatenate( ( self.w_ih.flatten(), self.w_ho.flatten() ) )\n # concat biases vectors\n biases = np.concatenate( ( self.b_h, self.b_o ) )\n # concat weights and biases into params\n params = np.concatenate( ( weights, biases ) )\n return params",
"def invert_points_on_plane(pnts, plane):\n ptol = Settings.ptol\n vu = plane.vu.ijk\n vv = plane.vv.ijk\n p0 = plane.p0.xyz\n vu2 = dot(vu, vu)\n vv2 = dot(vv, vv)\n\n pnts = array(pnts, dtype=float64)\n npts = pnts.shape[0]\n params = zeros((npts, 2), dtype=float64)\n for i in range(0, npts):\n pi = pnts[i]\n u = dot(pi - p0, vu) / vu2\n v = dot(pi - p0, vv) / vv2\n if abs(u) <= ptol:\n u = 0.\n if abs(v) <= ptol:\n v = 0.\n params[i, :] = [u, v]\n\n return params",
"def diriv(x, params):\n return np.array([x,1])",
"def vector_perp(v):\n assert len(v) == 2\n x, y = v\n return Vector(-y, x)",
"def vectorize_params(g_BM, p_H):\n params = np.zeros((10,))\n params[0:4] = tf.transformations.quaternion_from_matrix(g_BM)\n params[4:7] = g_BM[0:3,3]\n params[7:10] = p_H[0:3]\n return params",
"def two_plane_obj_points(grid_size, dx):\r\n objp_xy = np.zeros((grid_size[0]*grid_size[1], 3), np.float32)\r\n objp_yz = np.zeros((grid_size[1]*grid_size[2], 3), np.float32)\r\n objp_xy[:,:2] = np.mgrid[0:grid_size[0], 0:grid_size[1]].T.reshape(-1, 2)\r\n objp_yz[:,1:3] = np.mgrid[0:grid_size[1], 0:grid_size[2]].T.reshape(-1, 2)\r\n\r\n return objp_xy*dx, objp_yz*dx",
"def get_plane(self, scalar, plane, pval):\n\n if plane == 'yz' or plane == 'zy':\n # z along rows, y along columns\n return scalar[:, pval, :]\n elif plane == 'xz' or plane == 'zx':\n # x along columns, z along rows\n return scalar[:, :, pval]\n elif plane == 'xy' or plane == 'yx':\n # x along rows, y along columns\n return scalar[pval, :, :]",
"def plane(self):\r\n from lsst.analysis import utils\r\n return utils.fitplane(self.points, self.z)",
"def bfplane(x, y, z):\n n = float(len(x))\n A = np.array([[sum(x*x),sum(x*y),sum(x)],[sum(x*y),sum(y*y),sum(y)],[sum(x),sum(y),n]])\n B = np.array([sum(x*z),sum(y*z),sum(z)])\n res = np.linalg.solve(A,B)\n return res",
"def VParameters(self, *args):\n return _Adaptor3d.Adaptor3d_TopolTool_VParameters(self, *args)",
"def vec2param(self):\n pos_ix = 0\n vec = self.parameters[self.current_ix] # get current vector of params\n self.W_k = vec[:self.d_in * self.d].reshape((self.d_in,self.d))\n pos_ix += self.d_in * self.d\n self.W_q = vec[pos_ix : pos_ix + self.d_in * self.d].reshape((self.d_in,self.d))\n pos_ix += self.d_in * self.d\n\n for p in self.controller.parameters():\n t_shape = p.data.shape\n prod = np.prod(t_shape)\n if len(t_shape) == 1: p.data = torch.from_numpy(vec[pos_ix:pos_ix + prod].flatten())\n else: p.data = torch.from_numpy(vec[pos_ix:pos_ix + prod].reshape(t_shape[0],-1))\n pos_ix += prod",
"def __init__(self, m,r,v):\n self.m = m\n self.r = r\n self.v = v\n self.rv = np.array([r,0,0,v])",
"def get_params_array(self):\n return np.array(self.W), np.array(self.b)",
"def ret2dva(self, x, y):\n raise NotImplementedError",
"def get_v(r_div_R, z_div_L, Pi_div_DLP, k, alpha_ast, Bp, Bm, gp, gm, membrane_geometry):\n return CT.get_v(r_div_R, z_div_L, Pi_div_DLP, k, alpha_ast, Bp, Bm, gp, gm, membrane_geometry)",
"def GetPlane(plane):\r\n pass",
"def project_onto_plane(vect):\n x, y, z = vect\n \n return (x, y, 0.)",
"def get_normal_vector_of_plane(p1, p2, p3):\n v12 = np.array(p1) - np.array(p2)\n v13 = np.array(p1) - np.array(p3)\n nvec = np.cross(v12, v13)\n ## print 'norm: '+str(np.linalg.norm(nvec))\n return nvec / np.linalg.norm(nvec)",
"def test_create(self):\n f = azplugins.restrain.plane(group=hoomd.group.all(), point=(0,0,0), normal=(1,0,0), k=2.0)\n\n f.set_params(k=5.0)\n f.set_params(k=8)\n\n f.set_params(point=(0,0,1))\n f.set_params(point=[0,0,1])\n f.set_params(point=np.array([0,0,1]))\n\n f.set_params(normal=(0,0,1))\n f.set_params(normal=[0,0,1])\n f.set_params(normal=np.array([0,0,1]))\n\n f.set_params(point=(0,0,0), normal=(1,0,0), k=10.0)",
"def setParamsFromVector(self, params):\n #starting point of w_ih weights in vectorised params\n w_ih_start_pos = 0\n #end point of w_ih weights in vectorised params\n w_ih_end_pos = self.hiddenLayerSize * self.inputLayerSize\n\n self.w_ih = np.reshape( params[ w_ih_start_pos : w_ih_end_pos ], \\\n ( self.inputLayerSize, self.hiddenLayerSize ) )\n\n #end point of w_ho weights in vectorised params\n w_ho_end_pos = w_ih_end_pos + self.hiddenLayerSize * self.outputLayerSize\n\n self.w_ho = np.reshape( params[ w_ih_end_pos : w_ho_end_pos ], \\\n ( self.hiddenLayerSize, self.outputLayerSize))\n\n #end point of b_h biases in vectorised params\n b_h_end_pos = w_ho_end_pos + self.hiddenLayerSize\n \n self.b_h = params[ w_ho_end_pos : b_h_end_pos ]\n \n #end point of b_o biases in vectorised params\n b_o_end_pos = b_h_end_pos + self.outputLayerSize\n \n self.b_o = params[ b_h_end_pos : b_o_end_pos ]",
"def polyPlane(*args, axis: Union[List[float, float, float], bool]=None, createUVs: Union[int,\n bool]=1, height: Union[float, bool]=1.0, subdivisionsHeight: Union[int, bool]=0,\n subdivisionsWidth: Union[int, bool]=10, subdivisionsX: Union[int, bool]=5,\n subdivisionsY: Union[int, bool]=5, texture: Union[int, bool]=1, width:\n Union[float, bool]=1.0, caching: bool=True, constructionHistory: bool=True, name:\n AnyStr=\"\", nodeState: Union[int, bool]=0, object: bool=True, q=True, query=True,\n e=True, edit=True, **kwargs)->Union[List[AnyStr], Any]:\n pass",
"def uvmap(self, p):\n # bottom left corner of the plane\n p00 = self.position - (self.sx * self.n0) / 2 - (self.sy * self.n1) / 2\n dif_vector = p - p00\n u = np.dot(dif_vector, self.n0) / self.sx\n v = np.dot(dif_vector, self.n1) / self.sy\n return u, v",
"def p2d(V,x,y):\n def s(a,N):\n \"\"\"Shortcut function to convert array x into a coluumn vector.\"\"\"\n a=np.reshape(a,(1,N**2),order='F').T\n return a\n N=V.shape[1]\n con=np.ones((x.shape[0],x.shape[1])) # constant terms\n xx,yy,xy=x*x,y*y,x*y\n xxx,yyy,xxy,xyy=xx*x,yy*y,xx*y,x*yy\n xxxx,yyyy,xxxy,xxyy,xyyy=xx*xx,yy*yy,xxx*y,xx*yy,x*yyy\n V2=s(V,N) \n lst=[yyyy,xxxy,xxyy,xyyy,xxx,yyy,xxy,xyy,xx,yy,xy,x,y,con]\n Q=s(xxxx,N)\n count = 0\n for elem in lst:\n elem=s(elem,N)\n count+=1\n Q=np.hstack((Q,elem))\n c=np.linalg.lstsq(Q,V2) \n c=c[0]\n theta=-0.5*np.arctan(c[11]/(c[10]-c[9]))\n Af=0.5*(c[9]*(1+1./np.cos(2*theta))+c[10]*(1-1./np.cos(2*theta)))\n Bf=0.5*(c[9]*(1-1./np.cos(2*theta))+c[10]*(1+1./np.cos(2*theta)))\n theta=180.*theta/np.pi\n return (Af, Bf, theta)",
"def plane_equation(point_a, point_b, point_c):\n v1 = np.subtract(point_a, point_c)\n v2 = np.subtract(point_a, point_b)\n normal = np.cross(v1, v2)\n # print 'b4 norm', normal\n unit_normal = norm_vect(normal)\n # print 'unityyy', unit_normal\n return unit_normal"
] |
[
"0.8257363",
"0.63768786",
"0.59504795",
"0.5945344",
"0.5901103",
"0.58194584",
"0.57758623",
"0.5767143",
"0.57617015",
"0.5703245",
"0.56223404",
"0.5606822",
"0.5601651",
"0.5597385",
"0.5542641",
"0.55421805",
"0.55402094",
"0.5523554",
"0.5511729",
"0.55012995",
"0.5484476",
"0.5468831",
"0.5432853",
"0.54259586",
"0.5416366",
"0.54128593",
"0.5408922",
"0.53774744",
"0.5338742",
"0.53323936"
] |
0.8056421
|
1
|
Numpy version of vparam_2_vplane
|
def np_vparam_2_vplane(vparam):
d = np.linalg.norm(vparam, ord=2, axis=-1, keepdims=True)
a = vparam[..., [0]] / d
b = vparam[..., [1]] / d
neg_sign = (a < 0)
a[neg_sign] = -a[neg_sign]
b[neg_sign] = -b[neg_sign]
c = -(a * vparam[..., [0]] + b * vparam[..., [1]])
vplane = np.concatenate([a, b, c], axis=-1)
vplane[np.isnan(vplane)] = 0
return vplane
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def np_vplane_2_vparam(vplane):\n vparam = np.cross(\n vplane,\n np.stack([-vplane[...,1], vplane[...,0], np.zeros_like(vplane[...,0])], axis=-1),\n axis=-1)\n return vparam[..., :2] / vparam[..., [2]]",
"def plane_point_side_v3(p: np.ndarray, v: np.ndarray) -> Any:\n return p[:3].dot(v) + p[3]",
"def _prepare_plane(self):\n verticies = [\n # main plane - note that the mainplane is scaled so the mat_plane\n # matrix will it transform to the correct coordinates\n -self.i_border[0]/self._scaling[0], self.i_border[1]/self._scaling[1],\n -self.i_border[0]/self._scaling[0], -(self.o_wh[1]-self.i_border[1])/self._scaling[1],\n (self.o_wh[0]-self.i_border[0])/self._scaling[0], -(self.o_wh[1]-self.i_border[1])/self._scaling[1],\n (self.o_wh[0]-self.i_border[0])/self._scaling[0], -(self.o_wh[1]-self.i_border[1])/self._scaling[1],\n (self.o_wh[0]-self.i_border[0])/self._scaling[0], self.i_border[1]/self._scaling[1],\n -self.i_border[0]/self._scaling[0], self.i_border[1]/self._scaling[1],\n\n # coord plane\n 0, 0,\n 0, -self.o_wh[1],\n self.o_wh[0], -self.o_wh[1],\n self.o_wh[0], -self.o_wh[1],\n self.o_wh[0], 0,\n 0, 0,\n\n # axes\n 0, -self.o_wh[1], self.o_wh[0], -self.o_wh[1], #x\n 0, 0, 0, -self.o_wh[1], #y\n ]\n\n colors = [\n 1.0, 1.0, 1.0, 1.0, # outer box XXX Remove outer box...\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n .9, .9, .9, 9.0, # plot box\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n 0.0, 0.0, 0.0, 1.0, #lines\n 0.0, 0.0, 0.0, 1.0,\n 0.0, 0.0, 0.0, 1.0,\n 0.0, 0.0, 0.0, 1.0,\n ]\n\n self._fonts = []\n for u in range(1, self._unit_count[0]+1):\n verticies.append(self._unit_w[0]*u)\n verticies.append(-self.o_wh[1]+0.02)\n verticies.append(self._unit_w[0]*u)\n verticies.append(-self.o_wh[1]-0.02)\n colors += [0.0, 0.0, 0.0, 1.0]\n colors += [0.0, 0.0, 0.0, 1.0]\n self._fonts.append([\n '{:.2f}'.format(u*(self.i_axis[0]/self._unit_count[0])-self.i_origin[0]),\n (self._unit_w[0]*u+self.i_border[0]-0.05)*self._scaling[0],\n (-self.o_wh[1]+(self.i_border[3])*0.5)\n ])\n for u in range(0, self._unit_count[1]):\n verticies.append(0.02)\n verticies.append(-self._unit_w[1]*u)\n verticies.append(-0.02)\n verticies.append(-self._unit_w[1]*u)\n colors += [0.0, 0.0, 0.0, 1.0]\n colors += [0.0, 0.0, 0.0, 1.0]\n self._fonts.append([\n '{:.2f}'.format(self.i_axis[1]-u*self.i_axis[1]/self._unit_count[1]-self.i_origin[1]),\n (0.025)*self._scaling[0],\n (-(self._unit_w[1])*u-self.i_border[1]+0.01)*self._scaling[1]\n ])\n\n self._draw_plane_indicies = (0, 12)\n self._draw_line_indicies = (12, 4+self._unit_count[0]*2+self._unit_count[1]*2)\n\n # convert data into valid data format\n verticies = numpy.array(verticies, dtype=numpy.float32)\n colors = numpy.array(colors, dtype=numpy.float32)\n\n self._plane_vao = util.VAO()\n self._plane_vbo = util.VBO(2)\n\n with self._plane_vao:\n # plane verticies\n with self._plane_vbo.get(0):\n glBufferData(GL_ARRAY_BUFFER, ArrayDatatype.arrayByteCount(verticies), verticies, GL_STATIC_DRAW)\n glVertexAttribPointer(self.plane_shader.attributeLocation('vertex_position'), 2, GL_FLOAT, GL_FALSE, 0, None)\n glEnableVertexAttribArray(0)\n\n # place vertex colors\n with self._plane_vbo.get(1):\n glBufferData(GL_ARRAY_BUFFER, ArrayDatatype.arrayByteCount(colors), colors, GL_STATIC_DRAW)\n glVertexAttribPointer(self.plane_shader.attributeLocation('vertex_color'), 4, GL_FLOAT, GL_FALSE, 0, None)\n glEnableVertexAttribArray(1)",
"def vectorize_params(g_BM, p_H):\n params = np.zeros((10,))\n params[0:4] = tf.transformations.quaternion_from_matrix(g_BM)\n params[4:7] = g_BM[0:3,3]\n params[7:10] = p_H[0:3]\n return params",
"def getParamsToVector(self):\n #vectorise and concat weights arrays\n weights = np.concatenate( ( self.w_ih.flatten(), self.w_ho.flatten() ) )\n # concat biases vectors\n biases = np.concatenate( ( self.b_h, self.b_o ) )\n # concat weights and biases into params\n params = np.concatenate( ( weights, biases ) )\n return params",
"def diriv(x, params):\n return np.array([x,1])",
"def vector_perp(v):\n assert len(v) == 2\n x, y = v\n return Vector(-y, x)",
"def topo_plane_paramEval(self, param):\n # Create an empty numpy array with the same number as pixels as the real data.\n self.topo_plane_fit_data = np.zeros((self.y_res, self.x_res))\n for y in range(0, self.y_res): # Iterate over the y-axis pixels.\n for x in range(0, self.x_res): # Iterate over the x-axis pixels.\n self.topo_plane_fit_data[y, x] = param[0]*x + param[1]*y + param[2] # Generate plane value.\n return self.topo_plane_fit_data # Return entire array.",
"def plane(self):\n return plane(self.N, self.o)",
"def find_plane_eq(p1, p2, p3):\n\n p1 = np.asarray(p1)\n p2 = np.asarray(p2)\n p3 = np.asarray(p3)\n\n # These two vectors are in the plane\n v1 = p3 - p1\n v2 = p2 - p1\n\n # the cross product is a vector normal to the plane\n cp = np.cross(v1, v2)\n a, b, c = cp\n\n # This evaluates a * x3 + b * y3 + c * z3 which equals d\n d = np.dot(cp, p3)\n\n plane_eq = np.array([a, b, c, d])\n\n return plane_eq",
"def __init__(self, m,r,v):\n self.m = m\n self.r = r\n self.v = v\n self.rv = np.array([r,0,0,v])",
"def invert_points_on_plane(pnts, plane):\n ptol = Settings.ptol\n vu = plane.vu.ijk\n vv = plane.vv.ijk\n p0 = plane.p0.xyz\n vu2 = dot(vu, vu)\n vv2 = dot(vv, vv)\n\n pnts = array(pnts, dtype=float64)\n npts = pnts.shape[0]\n params = zeros((npts, 2), dtype=float64)\n for i in range(0, npts):\n pi = pnts[i]\n u = dot(pi - p0, vu) / vu2\n v = dot(pi - p0, vv) / vv2\n if abs(u) <= ptol:\n u = 0.\n if abs(v) <= ptol:\n v = 0.\n params[i, :] = [u, v]\n\n return params",
"def get_params_array(self):\n return np.array(self.W), np.array(self.b)",
"def p2d(V,x,y):\n def s(a,N):\n \"\"\"Shortcut function to convert array x into a coluumn vector.\"\"\"\n a=np.reshape(a,(1,N**2),order='F').T\n return a\n N=V.shape[1]\n con=np.ones((x.shape[0],x.shape[1])) # constant terms\n xx,yy,xy=x*x,y*y,x*y\n xxx,yyy,xxy,xyy=xx*x,yy*y,xx*y,x*yy\n xxxx,yyyy,xxxy,xxyy,xyyy=xx*xx,yy*yy,xxx*y,xx*yy,x*yyy\n V2=s(V,N) \n lst=[yyyy,xxxy,xxyy,xyyy,xxx,yyy,xxy,xyy,xx,yy,xy,x,y,con]\n Q=s(xxxx,N)\n count = 0\n for elem in lst:\n elem=s(elem,N)\n count+=1\n Q=np.hstack((Q,elem))\n c=np.linalg.lstsq(Q,V2) \n c=c[0]\n theta=-0.5*np.arctan(c[11]/(c[10]-c[9]))\n Af=0.5*(c[9]*(1+1./np.cos(2*theta))+c[10]*(1-1./np.cos(2*theta)))\n Bf=0.5*(c[9]*(1-1./np.cos(2*theta))+c[10]*(1+1./np.cos(2*theta)))\n theta=180.*theta/np.pi\n return (Af, Bf, theta)",
"def ret2dva(self, x, y):\n raise NotImplementedError",
"def bfplane(x, y, z):\n n = float(len(x))\n A = np.array([[sum(x*x),sum(x*y),sum(x)],[sum(x*y),sum(y*y),sum(y)],[sum(x),sum(y),n]])\n B = np.array([sum(x*z),sum(y*z),sum(z)])\n res = np.linalg.solve(A,B)\n return res",
"def two_plane_obj_points(grid_size, dx):\r\n objp_xy = np.zeros((grid_size[0]*grid_size[1], 3), np.float32)\r\n objp_yz = np.zeros((grid_size[1]*grid_size[2], 3), np.float32)\r\n objp_xy[:,:2] = np.mgrid[0:grid_size[0], 0:grid_size[1]].T.reshape(-1, 2)\r\n objp_yz[:,1:3] = np.mgrid[0:grid_size[1], 0:grid_size[2]].T.reshape(-1, 2)\r\n\r\n return objp_xy*dx, objp_yz*dx",
"def vec2param(self):\n pos_ix = 0\n vec = self.parameters[self.current_ix] # get current vector of params\n self.W_k = vec[:self.d_in * self.d].reshape((self.d_in,self.d))\n pos_ix += self.d_in * self.d\n self.W_q = vec[pos_ix : pos_ix + self.d_in * self.d].reshape((self.d_in,self.d))\n pos_ix += self.d_in * self.d\n\n for p in self.controller.parameters():\n t_shape = p.data.shape\n prod = np.prod(t_shape)\n if len(t_shape) == 1: p.data = torch.from_numpy(vec[pos_ix:pos_ix + prod].flatten())\n else: p.data = torch.from_numpy(vec[pos_ix:pos_ix + prod].reshape(t_shape[0],-1))\n pos_ix += prod",
"def get_v(r_div_R, z_div_L, Pi_div_DLP, k, alpha_ast, Bp, Bm, gp, gm, membrane_geometry):\n return CT.get_v(r_div_R, z_div_L, Pi_div_DLP, k, alpha_ast, Bp, Bm, gp, gm, membrane_geometry)",
"def uvmap(self, p):\n # bottom left corner of the plane\n p00 = self.position - (self.sx * self.n0) / 2 - (self.sy * self.n1) / 2\n dif_vector = p - p00\n u = np.dot(dif_vector, self.n0) / self.sx\n v = np.dot(dif_vector, self.n1) / self.sy\n return u, v",
"def project_onto_plane(vect):\n x, y, z = vect\n \n return (x, y, 0.)",
"def get_plane(self, scalar, plane, pval):\n\n if plane == 'yz' or plane == 'zy':\n # z along rows, y along columns\n return scalar[:, pval, :]\n elif plane == 'xz' or plane == 'zx':\n # x along columns, z along rows\n return scalar[:, :, pval]\n elif plane == 'xy' or plane == 'yx':\n # x along rows, y along columns\n return scalar[pval, :, :]",
"def plane(self):\r\n from lsst.analysis import utils\r\n return utils.fitplane(self.points, self.z)",
"def fit_to_plane(pts):\n # Compute x_mean, y_mean, z_mean\n \n n = len(pts)\n \n x_total = 0\n y_total = 0\n z_total = 0\n\n for i in range(n):\n x_total += pts[i][0]\n y_total += pts[i][1]\n z_total += pts[i][2]\n\n x_mean = x_total * 1.0 / n\n y_mean = y_total * 1.0 / n\n z_mean = z_total * 1.0 / n\n\n # Compute the p[i] = [x[i]-x_mean,y[i]-y.mean,z[i]-z.mean]\n p = []\n for i in range(n):\n p1 = pts[i][0] - x_mean\n p2 = pts[i][1] - y_mean\n p3 = pts[i][2] - z_mean\n p.append([p1, p2, p3])\n \n # Compute the matrix A\n a1 = 0\n a2 = 0\n a3 = 0\n a4 = 0\n a5 = 0\n a6 = 0\n for i in range(n):\n a1 += p[i][0] * p[i][0]\n a2 += p[i][0] * p[i][1]\n a3 += p[i][0] * p[i][2]\n a4 += p[i][1] * p[i][1]\n a5 += p[i][1] * p[i][2]\n a6 += p[i][2] * p[i][2]\n\n A = np.array([[a1, a2, a3], [a2, a4, a5], [a3, a5, a6]])\n\n # Compute the smallest eigen value and accordingly eigen vector of A\n w, v = np.linalg.eigh(A)\n\n # The minimal eigenvalue is w[0]\n eig = w[0]\n\n # The norm is eigenvector v[:,0]\n norm = v[:,0].tolist()\n d = -norm[0] * x_mean - norm[1] * y_mean - norm[2] * z_mean\n\n return norm, d",
"def fit_to_plane(pts):\n # Compute x_mean, y_mean, z_mean\n \n n = len(pts)\n \n x_total = 0\n y_total = 0\n z_total = 0\n\n for i in range(n):\n x_total += pts[i][0]\n y_total += pts[i][1]\n z_total += pts[i][2]\n\n x_mean = x_total * 1.0 / n\n y_mean = y_total * 1.0 / n\n z_mean = z_total * 1.0 / n\n\n # Compute the p[i] = [x[i]-x_mean,y[i]-y.mean,z[i]-z.mean]\n p = []\n for i in range(n):\n p1 = pts[i][0] - x_mean\n p2 = pts[i][1] - y_mean\n p3 = pts[i][2] - z_mean\n p.append([p1, p2, p3])\n \n # Compute the matrix A\n a1 = 0\n a2 = 0\n a3 = 0\n a4 = 0\n a5 = 0\n a6 = 0\n for i in range(n):\n a1 += p[i][0] * p[i][0]\n a2 += p[i][0] * p[i][1]\n a3 += p[i][0] * p[i][2]\n a4 += p[i][1] * p[i][1]\n a5 += p[i][1] * p[i][2]\n a6 += p[i][2] * p[i][2]\n\n A = np.array([[a1, a2, a3], [a2, a4, a5], [a3, a5, a6]])\n\n # Compute the smallest eigen value and accordingly eigen vector of A\n w, v = np.linalg.eigh(A)\n\n # The minimal eigenvalue is w[0]\n eig = w[0]\n\n # The norm is eigenvector v[:,0]\n norm = v[:,0].tolist()\n d = -norm[0] * x_mean - norm[1] * y_mean - norm[2] * z_mean\n\n return norm, d",
"def setParamsFromVector(self, params):\n #starting point of w_ih weights in vectorised params\n w_ih_start_pos = 0\n #end point of w_ih weights in vectorised params\n w_ih_end_pos = self.hiddenLayerSize * self.inputLayerSize\n\n self.w_ih = np.reshape( params[ w_ih_start_pos : w_ih_end_pos ], \\\n ( self.inputLayerSize, self.hiddenLayerSize ) )\n\n #end point of w_ho weights in vectorised params\n w_ho_end_pos = w_ih_end_pos + self.hiddenLayerSize * self.outputLayerSize\n\n self.w_ho = np.reshape( params[ w_ih_end_pos : w_ho_end_pos ], \\\n ( self.hiddenLayerSize, self.outputLayerSize))\n\n #end point of b_h biases in vectorised params\n b_h_end_pos = w_ho_end_pos + self.hiddenLayerSize\n \n self.b_h = params[ w_ho_end_pos : b_h_end_pos ]\n \n #end point of b_o biases in vectorised params\n b_o_end_pos = b_h_end_pos + self.outputLayerSize\n \n self.b_o = params[ b_h_end_pos : b_o_end_pos ]",
"def get_normal_vector_of_plane(p1, p2, p3):\n v12 = np.array(p1) - np.array(p2)\n v13 = np.array(p1) - np.array(p3)\n nvec = np.cross(v12, v13)\n ## print 'norm: '+str(np.linalg.norm(nvec))\n return nvec / np.linalg.norm(nvec)",
"def v_o(A,vd):\n return A*vd",
"def get_uv(u, v):\n uv = np.zeros((2, 2))\n uv[0][0] = u[0]\n uv[1][0] = u[1]\n uv[0][1] = v[0]\n uv[1][1] = v[1]\n return uv",
"def _VRF(self) -> array:\n pass"
] |
[
"0.81128526",
"0.63427055",
"0.58030635",
"0.57959634",
"0.5778007",
"0.5763449",
"0.5689331",
"0.56818783",
"0.56719047",
"0.56514084",
"0.56460345",
"0.5639332",
"0.5572977",
"0.5532811",
"0.5504314",
"0.5495645",
"0.54833466",
"0.54692966",
"0.54263157",
"0.5412707",
"0.5334838",
"0.5326467",
"0.53222376",
"0.53097796",
"0.53097796",
"0.5304497",
"0.53038085",
"0.5298461",
"0.5296445",
"0.5265891"
] |
0.8370257
|
0
|
Binary Lovasz hinge loss (negative positive invert version)
|
def neg_lovasz_hinge(logits, labels):
# Positive negative convert
logits = -logits
labels = 1 - labels.float()
# Compute lovasz hinge as usual
signs = 2 * labels - 1
errors = (1 - signs * logits)
errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
perm = perm.data
gt_sorted = labels[perm]
grad = lovasz_grad(gt_sorted)
loss = torch.dot(F.relu(errors_sorted), grad)
return loss
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_hinge_loss_backward():\n from your_code import HingeLoss\n X = np.array([[-1, 2, 1], [-3, 4, 1]])\n w = np.array([1, 2, 3])\n y = np.array([1, -1])\n\n loss = HingeLoss(regularization=None)\n\n _true = np.array([-1.5, 2, 0.5])\n _est = loss.backward(X, w, y)\n print(_est)",
"def lovasz_hinge(self, logits, labels, per_image=True, ignore=None):\n if per_image:\n loss = (self.lovasz_hinge_flat(*self.flatten_binary_scores(log.unsqueeze(0), lab.unsqueeze(0), ignore)) for\n log, lab in zip(logits, labels)).mean()\n else:\n loss = self.lovasz_hinge_flat(*self.flatten_binary_scores(logits, labels, ignore))\n return loss",
"def lovasz_hinge_flat(logits, labels, ignore_index):\n logits = logits.contiguous().view(-1)\n labels = labels.contiguous().view(-1)\n \n errors = hinge(logits, labels)\n errors_sorted, perm = torch.sort(errors, dim=0, descending=True)\n perm = perm.data\n gt_sorted = labels[perm]\n grad = lovasz_grad(gt_sorted)\n loss = torch.dot(F.elu(errors_sorted) + 1, grad)\n return loss",
"def __call__(self, a, y, hinge=1.0):\n\n check_loss_inputs(a, y)\n self.variables = (a,)\n scores = a.data\n correct_labels = (range(len(y)), y)\n correct_class_scores = scores[correct_labels] # Nx1\n\n M = scores - correct_class_scores[:, np.newaxis] + hinge # NxC margins\n not_thresh = np.where(M <= 0)\n Lij = M\n Lij[not_thresh] = 0\n Lij[correct_labels] = 0\n if _tracking.TRACK_GRAPH:\n TMP = np.ones(M.shape, dtype=float)\n TMP[not_thresh] = 0\n TMP[correct_labels] = 0 # NxC; 1 where margin > 0\n TMP[correct_labels] = -1 * TMP.sum(axis=-1)\n self.back = TMP\n self.back /= scores.shape[0]\n return np.sum(Lij) / scores.shape[0]",
"def objective(self):\n hinge_loss = tf.losses.hinge_loss(self.labels, self.predictions)\n\n return hinge_loss",
"def grad_hinge2(w, X, y, **kwargs):\n err = np.maximum(0, 1. - y * (np.dot(X, w)))\n return -1 * np.dot(X.T, err * y) / X.shape[0]",
"def lovasz_hinge(logits, labels, per_image=True, ignore=None):\n if per_image:\n def treat_image(log_lab):\n log, lab = log_lab\n log, lab = tf.expand_dims(log, 0), tf.expand_dims(lab, 0)\n log, lab = flatten_binary_scores(log, lab, ignore)\n return lovasz_hinge_flat(log, lab)\n losses = tf.map_fn(treat_image, (logits, labels), dtype=tf.float32)\n loss = tf.reduce_mean(losses)\n else:\n loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore))\n return loss",
"def hinge_loss(self, y_true=None, y_pred=None, decimal=5, **kwargs):\n y_true, y_pred, binary, representor, decimal = self.get_processed_data2(y_true, y_pred, decimal)\n if binary:\n # replacing 0 = -1\n y_true[y_true == 0] = -1\n y_pred[y_pred == 0] = -1\n res = np.mean([max(0, 1 - x * y) ** 2 for x, y in zip(y_true, y_pred)])\n return np.round(res, decimal)\n else:\n # Convert y_true to one-hot encoded array\n num_classes = len(np.unique(y_true))\n y_true = np.eye(num_classes)[y_true]\n neg = np.max((1 - y_true) * y_pred, axis=1)\n pos = np.sum(y_true * y_pred, axis=1)\n temp = neg - pos + 1\n temp[temp < 0] = 0\n return np.round(np.mean(temp), decimal)",
"def hinge(datax,datay,w):\n #hinge: si bien classifie, -y<w.x><0 sinon >0. On ne compte celui sont mal-classifies\n fx = np.dot(datax, w.T)\n return np.mean(np.where(-datay*fx<0,0,-datay*fx))",
"def binary_svm_loss(theta, X, y, C):\n\n m, d = X.shape\n grad = np.zeros(theta.shape)\n J = 0\n\n ############################################################################\n # TODO #\n # Implement the binary SVM hinge loss function here #\n # 4 - 5 lines of vectorized code expected #\n ############################################################################\n J=0.5*np.sum(theta**2)/m\n J=J+C*np.sum(np.maximum(0,1-np.multiply(y,(np.dot(X,theta)))))/m\n \n grad=theta/m\n temp_1=np.dot(X,theta)\n temp_2=np.multiply(y,temp_1)\n\n temp_3=y[temp_2<1]\n temp_4=X[temp_2<1,:]\n temp_5=np.dot(temp_4.T,temp_3)\n grad=grad-temp_5*C/m\n\n\n# for j in range(d):\n# \tgrad[j]=float(theta[j]/m)\n# \tfor k in range(m):\n#\t \tif (y[k]*(np.dot(theta,X[k,:]))<1):\n#\t \t\tgrad[j]=grad[j]-float(C*y[k]*X[k,j]/m)\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return J, grad",
"def lovasz_hinge_flat(self, logits, labels):\n if len(labels) == 0:\n # only void pixels, the gradients should be 0\n return logits.sum() * 0.\n signs = 2. * labels.float() - 1.\n errors = (1. - logits * torch.tensor(signs, requires_grad=True))\n errors_sorted, perm = torch.sort(errors, dim=0, descending=True)\n perm = perm.data\n gt_sorted = labels[perm]\n grad = _lovasz_grad(gt_sorted)\n loss = torch.dot(F.relu(errors_sorted), grad)\n return loss",
"def hinge(y_true, y_pred):\n y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)\n y_true = math_ops.cast(y_true, y_pred.dtype)\n y_true = _maybe_convert_labels(y_true)\n return backend.mean(math_ops.maximum(1. - y_true * y_pred, 0.), axis=-1)",
"def binary_svm_loss(theta, X, y, C):\n\n m, d = X.shape\n grad = np.zeros(theta.shape)\n J = 0\n\n ############################################################################\n # TODO #\n # Implement the binary SVM hinge loss function here #\n # 4 - 5 lines of vectorized code expected #\n ############################################################################\n h = np.dot(X, theta)\n J = 1.0 / 2 / m * np.sum(theta**2) + 1.0 * C / m * np.sum(np.max([np.zeros(m), 1 - y * h], axis = 0))\n\n grad = 1.0 / m * theta + 1.0 * C / m * np.dot(X.T, -y * (y * h < 1))\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return J, grad",
"def obj(beta, lambd, x, y, h=0.5):\n yt = y*x.dot(beta)\n hinge_loss = (1+h-yt)**2/(4*h)*(np.abs(1-yt) <= h) + (1-yt)*(yt < (1-h)) \n\n return np.mean(hinge_loss) + lambd*np.dot(beta, beta)",
"def activation(h):\n\n if(h > 0):\n return 1\n\n else:\n return 0",
"def hinge_g(datax,datay,w):\n #chaque exemplaire donne un gradient\n fx=-datay*np.dot(datax,w.T)\n fx2=-datay.reshape(-1,1)*datax\n res=[]\n for i in range(len(fx)):\n if fx[i]<0:\n res.append(np.zeros(len(w)))\n else:\n res.append(fx2[i])\n return np.mean(np.array(res), axis=0)\n\n\n if len(datax.shape)==1:\n datax = datax.reshape(1,-1)\n n,d=datax.shape\n yx=[-datay[i].reshape(1,-1)*datax[i,:] if -datay[i]*np.dot(datax[i,:],w.T)>0 else np.zeros(d) for i in range(n)]\n\n return np.mean(yx,axis=0)",
"def loss(A, Y):\n return A - Y",
"def lovasz_hinge_flat(logits, labels):\n\n def compute_loss():\n labelsf = tf.cast(labels, logits.dtype)\n signs = 2. * labelsf - 1.\n errors = 1. - logits * tf.stop_gradient(signs)\n errors_sorted, perm = tf.nn.top_k(errors, k=tf.shape(errors)[0], name=\"descending_sort\")\n gt_sorted = tf.gather(labelsf, perm)\n grad = lovasz_grad(gt_sorted)\n loss = tf.tensordot(tf.nn.relu(errors_sorted), tf.stop_gradient(grad), 1, name=\"loss_non_void\")\n return loss\n\n # deal with the void prediction case (only void pixels)\n loss = tf.cond(tf.equal(tf.shape(logits)[0], 0),\n lambda: tf.reduce_sum(logits) * 0.,\n compute_loss,\n strict=True,\n name=\"loss\"\n )\n return loss",
"def __loss(self, h, y):\n return (-y*np.log(h)-(1-y)*np.log(1-h)).mean()",
"def get_loss(self, xs, y):\n \"*** YOUR CODE HERE question 4 ***\"\n return nn.SoftmaxLoss(self.run(xs), y)",
"def get_hinge_loss_val(w, C, y, vals):\n \n outlier_error = 0\n for i in range(len(y)):\n outlier_error += max(0, 1 - y[i] * vals[i])\n\n return 0.5 * pow(norm(w), 2) + C * outlier_error",
"def log_lp_hinge(p, v, inplace=False):\n if inplace:\n out = v\n else:\n out = np.zeros_like(v)\n out[:] = -(np.maximum(1.0 - v, 0) ** p)\n return out",
"def squared_hinge(y_true, y_pred):\n y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)\n y_true = math_ops.cast(y_true, y_pred.dtype)\n y_true = _maybe_convert_labels(y_true)\n return backend.mean(\n math_ops.square(math_ops.maximum(1. - y_true * y_pred, 0.)), axis=-1)",
"def binary_hinge_loss(predictions, targets, delta=1, log_odds=None,\n binary=True):\n if log_odds is None: # pragma: no cover\n raise FutureWarning(\n \"The `log_odds` argument to `binary_hinge_loss` will change \"\n \"its default to `False` in a future version. Explicitly give \"\n \"`log_odds=True` to retain current behavior in your code, \"\n \"but also check the documentation if this is what you want.\")\n log_odds = True\n if not log_odds:\n predictions = theano.tensor.log(predictions / (1 - predictions))\n if binary:\n targets = 2 * targets - 1\n predictions, targets = align_targets(predictions, targets)\n return theano.tensor.nnet.relu(delta - predictions * targets)",
"def inverse_sigmoid(x):\n y = -1 * np.log((1-x)/x)\n return y",
"def svm_loss(x, y):\n x = np.squeeze(x)\n N = x.shape[0]\n yt = y\n yt[y==0]=-1\n tmp = 1-yt*x\n mask = np.ones_like(tmp)\n mask[tmp<=0] = 0\n tmp = tmp*mask\n loss = np.sum(tmp)/N\n \n dx = -yt*mask/N\n # dx = np.reshape(dx,[dx.shape[0],1])\n return loss, dx",
"def forcing_binary_loss(h):\n n_batch = h.data.shape[0]\n n_units = h.data.shape[1]\n\n loss = -1 * F.sum((h - 0.5) ** 2) / (n_units * n_batch)\n return loss",
"def get_lp_hinge(p):\n\n def f(v, inplace=False):\n return Transform.lp_hinge(p, v, inplace)\n\n return f",
"def compute_loss_lasso(y, tx, w, lambda_):\n e = y - tx.dot(w)\n\n return e.dot(e)/(2 * len(e)) + lambda_ * sum(abs(w))",
"def tversky_loss(yhat, ytrue):\n return torch.mean(1 - tversky_index(yhat, ytrue))"
] |
[
"0.7503922",
"0.6930668",
"0.6631124",
"0.6568263",
"0.65180033",
"0.64716583",
"0.6459475",
"0.6447354",
"0.64316416",
"0.64218694",
"0.6414487",
"0.63204765",
"0.6296678",
"0.62932754",
"0.62772226",
"0.61665654",
"0.6147692",
"0.61436665",
"0.6130597",
"0.6096201",
"0.6085601",
"0.60828763",
"0.6064079",
"0.60205317",
"0.60096455",
"0.59962046",
"0.59944",
"0.59811884",
"0.59790426",
"0.59243906"
] |
0.73268783
|
1
|
Override private function that sets all the links for the contents to download on FOMC website from from_year (=min(2015, from_year)) to the current most recent year
|
def _get_links(self, from_year):
self.links = []
self.titles = []
self.speakers = []
self.dates = []
r = requests.get(self.calendar_url)
soup = BeautifulSoup(r.text, "html.parser")
if self.verbose:
print("Getting links for press conference scripts...")
presconfs = soup.find_all(
"a", href=re.compile("^/monetarypolicy/fomcpresconf\d{8}.htm")
)
presconf_urls = [
self.base_url + presconf.attrs["href"] for presconf in presconfs
]
for presconf_url in presconf_urls:
r_presconf = requests.get(presconf_url)
soup_presconf = BeautifulSoup(r_presconf.text, "html.parser")
contents = soup_presconf.find_all(
"a", href=re.compile("^/mediacenter/files/FOMCpresconf\d{8}.pdf")
)
for content in contents:
# print(content)
self.links.append(content.attrs["href"])
self.speakers.append(
self._speaker_from_date(self._date_from_link(content.attrs["href"]))
)
self.titles.append("FOMC Press Conference Transcript")
self.dates.append(
datetime.strptime(
self._date_from_link(content.attrs["href"]), "%Y-%m-%d"
)
)
if self.verbose:
print("{} links found in current page.".format(len(self.links)))
# Archived before 2015
if from_year <= 2014:
print("Getting links from archive pages...")
for year in range(from_year, 2015):
yearly_contents = []
fomc_yearly_url = (
self.base_url
+ "/monetarypolicy/fomchistorical"
+ str(year)
+ ".htm"
)
r_year = requests.get(fomc_yearly_url)
soup_yearly = BeautifulSoup(r_year.text, "html.parser")
presconf_hists = soup_yearly.find_all(
"a", href=re.compile("^/monetarypolicy/fomcpresconf\d{8}.htm")
)
presconf_hist_urls = [
self.base_url + presconf_hist.attrs["href"]
for presconf_hist in presconf_hists
]
for presconf_hist_url in presconf_hist_urls:
# print(presconf_hist_url)
r_presconf_hist = requests.get(presconf_hist_url)
soup_presconf_hist = BeautifulSoup(
r_presconf_hist.text, "html.parser"
)
yearly_contents = soup_presconf_hist.find_all(
"a",
href=re.compile("^/mediacenter/files/FOMCpresconf\d{8}.pdf"),
)
for yearly_content in yearly_contents:
# print(yearly_content)
self.links.append(yearly_content.attrs["href"])
self.speakers.append(
self._speaker_from_date(
self._date_from_link(yearly_content.attrs["href"])
)
)
self.titles.append("FOMC Press Conference Transcript")
self.dates.append(
datetime.strptime(
self._date_from_link(yearly_content.attrs["href"]),
"%Y-%m-%d",
)
)
if self.verbose:
print(
"YEAR: {} - {} links found.".format(
year, len(presconf_hist_urls)
)
)
print("There are total ", len(self.links), " links for ", self.content_type)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _download_backwards(self, date_str):\n self.url = f\"http://example.com/new/url/{date_str}\"\n self.html = self._download()",
"def get_year_with_links():\n response = get_response(MAIN_PAGE)\n if response.ok:\n soup = BeautifulSoup(response.text, 'html.parser')\n years_li = soup.find_all(\n 'md-card-footer'\n )\n years_dict = {}\n # Not including the last <a> tag because that is not relevant.\n for years_html in years_li[:-1]:\n year = [num for num in years_html.text.split() if num.isdigit()][0]\n relative_link = years_html.select('a')[0].get('href')\n full_link = HOME_PAGE + relative_link\n years_dict[year] = full_link\n return years_dict\n else:\n print('Something Went Wrong')\n print(f'Status Code: {response.status_code}')\n sys.exit(1)",
"def date_setup(date, page_offset, url,c):\r\n\r\n if date <= 10:\r\n page_offset = 0\r\n url = \"http://data.terapeak.com/?id=0&search=1&view=item_browse&query=iphone+5s&date=2015-02-1&date_range=1&buyer_country_id=1&condition=rollup_3&type%5Bfixed%5D=1&from_start_price=100&to_start_price=800&from_end_price=100&to_end_price=800&seller_country_id=1&txn_site_id=0&numPages=12&siteID=0&offset={0}\".format(page_offset)\r\n u = list(url)\r\n new = str(date)\r\n u[86] = new #this will update the date from date=2014-09-1 to date=2014-09-2\r\n date_ed_url = \"\".join(u)\r\n #print(edited)\r\n page_offset_update(date, page_offset, date_ed_url, c) # the date has now been updated and the page_offset has been reset to 0\r\n else:\r\n with open(\"5s_Feb_2015_.csv\", \"w\", newline='', encoding='UTF-8') as f:\r\n writer = csv.writer(f)\r\n writer.writerows(listof_listof_lists)\r\n print(\"done\")\r\n quit",
"def update_links(self):\n for a in self.book.xpath(\"//a[@href]\"):\n href = a.xpath(\"@href\")[0]\n index_list = a.xpath(\"@data-index\")\n \n ### If there is no data-index it is assumed link comes from initial book landing page (the index page)\n if index_list == []:\n index = self.manager.get_page_index(\"index.html\")\n else:\n index = index_list[0]\n \n ### Fix people who are bad at links\n if href.startswith(\"www.\"):\n href = \"https://\" + href\n a.set(\"href\", href)\n \n ## Correct for ambiguity (Naive assumption that this error only occours on index page)\n if href == \"./\":\n href = \"index.html\"\n \n if not href:\n return None\n \n href = self.manager.convert_link(href, index)\n a.set(\"href\", href)",
"async def All_orgs():\n\n links_13 = []\n links_14 = []\n valid_url = \"/?archive/?gsoc/\\d+[0-9]/orgs/[a-zA-Z]+\"\n for year in range(2009, 2016):\n year_url = melange + \"/archive/gsoc/{}\".format(year)\n soup = await get_page(year_url)\n\n for url in soup.find_all('a'):\n if re.match(valid_url, url.get(\"href\")):\n if year <= 2013:\n links_13.append(join(melange, url.get(\"href\")[1:]))\n else:\n links_14.append(join(melange, url.get(\"href\")[1:]))\n return links_13, links_14",
"def extract_links():\n br = mechanize.Browser()\n br.open(BASE_URL)\n f = open('data/svodki/alllinks.csv', 'w')\n calurls = []\n # Collect all calendar urls with reports\n for year in range(2005, 2013):\n for month in range(1, 13):\n calurls.append([year, month, CALEND_URLPAT %(year, month)])\n\n # Update for current year (needs fixes later)\n for year in range(2013, 2014):\n for month in range(1, 3):\n calurls.append([year, month, CALEND_URLPAT %(year, month)])\n # Process calendar urls one by one\n for year, month, calurl in calurls:\n print calurl\n u = br.open(calurl)\n data = u.read()\n u.close()\n soup = BeautifulSoup(data)\n slist = soup.find('ul', attrs={'class': 'emergency_list'})\n urls = slist.findAll('a')\n for url in urls:\n s = '%s\\t%s\\t%s\\t%s\\t' % (unicode(year), unicode(month), url.text, urljoin(BASE_URL, url['href']))\n f.write((s + '\\n').encode('utf8'))\n print s\n f.close()",
"def get_track_urls(year):\r\n # assert int(year) >= 2023, f\"only support year >= 2023, but get {year}!!!\"\r\n project_root_folder = os.path.abspath(\r\n os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\r\n dat_file_pathname = os.path.join(\r\n project_root_folder, 'urls', f'track_archive_url_AAAI_{year}.dat'\r\n )\r\n proceeding_th_dict = {\r\n 1980: 1,\r\n 1902: 2,\r\n 1983: 3,\r\n 1984: 4,\r\n 1986: 5,\r\n 1987: 6,\r\n 1988: 7,\r\n 1990: 8,\r\n 1991: 9,\r\n 1992: 10,\r\n 1993: 11,\r\n 1994: 12,\r\n 1996: 13,\r\n 1997: 14,\r\n 1998: 15,\r\n 1999: 16,\r\n 2000: 17,\r\n 2002: 18,\r\n 2004: 19,\r\n 2005: 20,\r\n 2006: 21,\r\n 2007: 22,\r\n 2008: 23\r\n }\r\n if year >= 2023:\r\n base_url = r'https://ojs.aaai.org/index.php/AAAI/issue/archive'\r\n headers = {\r\n 'User-Agent': user_agents[-1],\r\n 'Host': 'ojs.aaai.org',\r\n 'Referer': \"https://ojs.aaai.org\",\r\n 'GET': base_url\r\n }\r\n if os.path.exists(dat_file_pathname):\r\n with open(dat_file_pathname, 'rb') as f:\r\n content = pickle.load(f)\r\n else:\r\n req = urllib.request.Request(url=base_url, headers=headers)\r\n content = urllib.request.urlopen(req).read()\r\n # content = open(f'..\\\\AAAI_{year}.html', 'rb').read()\r\n with open(dat_file_pathname, 'wb') as f:\r\n pickle.dump(content, f)\r\n soup = BeautifulSoup(content, 'html5lib')\r\n tracks = soup.find('ul', {'class': 'issues_archive'}).find_all('li')\r\n track_urls = dict()\r\n for tr in tracks:\r\n h2 = tr.find('h2')\r\n this_track = slugify(h2.a.text)\r\n if this_track.startswith(f'aaai-{year-2000}'):\r\n this_track += slugify(h2.div.text) + '-' + this_track\r\n this_url = h2.a.get('href')\r\n track_urls[this_track] = this_url\r\n print(f'find track: {this_track}({this_url})')\r\n else:\r\n if year >= 2010:\r\n proceeding_th = year - 1986\r\n elif year in proceeding_th_dict:\r\n proceeding_th = proceeding_th_dict[year]\r\n else:\r\n print(f'ERROR: AAAI proceeding was not held in year {year}!!!')\r\n return\r\n\r\n base_url = f'https://aaai.org/proceeding/aaai-{proceeding_th:02d}-{year}/'\r\n headers = {\r\n 'User-Agent': user_agents[-1],\r\n 'Host': 'aaai.org',\r\n 'Referer': \"https://aaai.org\",\r\n 'GET': base_url\r\n }\r\n if os.path.exists(dat_file_pathname):\r\n with open(dat_file_pathname, 'rb') as f:\r\n content = pickle.load(f)\r\n else:\r\n req = urllib.request.Request(url=base_url, headers=headers)\r\n content = urllib.request.urlopen(req).read()\r\n # content = open(f'..\\\\AAAI_{year}.html', 'rb').read()\r\n with open(dat_file_pathname, 'wb') as f:\r\n pickle.dump(content, f)\r\n soup = BeautifulSoup(content, 'html5lib')\r\n tracks = soup.find('main', {'class': 'content'}).find_all('li')\r\n track_urls = dict()\r\n for tr in tracks:\r\n this_track = slugify(tr.a.text)\r\n this_url = tr.a.get('href')\r\n track_urls[this_track] = this_url\r\n print(f'find track: {this_track}({this_url})')\r\n return track_urls",
"def BLS_QCEW_URL_helper(*, build_url, year, **_):\n urls = []\n\n url = build_url\n url = url.replace('__year__', str(year))\n urls.append(url)\n\n return urls",
"def download_earning_reports(self, form='10-Q', year_range=3, force_update=False):\n if self.components.empty:\n self.get_compo_list()\n\n cik_series = self.components['CIK'].astype(str)\n cik_to_ticker = pd.Series(cik_series.index.values, index=cik_series).to_dict()\n\n sec_archive_base = 'https://www.sec.gov/Archives'\n xbrl_idx_base = sec_archive_base + '/edgar/full-index'\n xbrl_pattern = re.compile(r'([0-9]+)\\|(.*)\\|%s\\|(.*)\\|(.*)'%form)\n link_pattern = re.compile(r'[-\\.txt]')\n #instance_pattern = re.compile(r'instance=[\\'\\\"]*([\\w\\-]+\\.xml)[\\'\\\"]*') # e.g. <Report instance=\"amtd-20170630.xml\">\n instance_pattern = re.compile(r'>([\\w]+-[0-9]+\\.xml)<') # e.g. <File>bebe-20140104.xml</File>\n year_end = dt.datetime.today().year\n year_start = year_end - year_range\n for year in range(year_start, year_end+1):\n for quarter in ['QTR1', 'QTR2', 'QTR3', 'QTR4']:\n xbrl_idx = '%s/%s/%s/xbrl.idx' %(xbrl_idx_base, year, quarter)\n try:\n r = requests.get(xbrl_idx)\n except requests.exceptions.RequestException as e:\n print('Error: xbrl.idx request exception, link %s' %xbrl_idx)\n print(e)\n continue\n if r.status_code != requests.codes.ok:\n print('Error: requests get failure, url %s, status_code %d' %(xbrl_idx, r.status_code))\n continue\n # Parse each line and extract lines with specified form(e.g.10-Q).\n #\n # Example:\n # CIK|Company Name|Form Type|Date Filed|Filename\n # 1173313|American BriVision (Holding) Corp|10-K/A|2017-09-22|edgar/data/1173313/0001213900-17-009907.txt\n # 1173313|American BriVision (Holding) Corp|10-Q|2017-08-21|edgar/data/1173313/0001213900-17-009012.txt\n # 1173313|American BriVision (Holding) Corp|S-1/A|2017-07-17|edgar/data/1173313/0001213900-17-007661.txt\n # 1173313|American BriVision (Holding) Corp|S-1/A|2017-09-22|edgar/data/1173313/0001213900-17-009909.txt\n # 1173431|TD AMERITRADE HOLDING CORP|10-Q|2017-07-24|edgar/data/1173431/0001173431-17-000108.txt\n # 1173431|TD AMERITRADE HOLDING CORP|8-K|2017-07-18|edgar/data/1173431/0001173431-17-000104.txt\n all_edgar_links = dict() # CIK-to-link dict\n for line in r.text.splitlines():\n m = xbrl_pattern.findall(line)\n if len(m) > 0:\n all_edgar_links[m[0][0]] = m[0][-1]\n # Download links\n for cik in all_edgar_links.keys():\n if cik not in cik_to_ticker.keys():\n #print('Skip CIK ' + cik) # FIXME: TEST ONLY\n continue\n link = all_edgar_links[cik] # e.g. 'edgar/data/1173431/0001173431-17-000108.txt'\n link=link.split('/') # e.g. ['edgar', 'data', '1173431', '0001173431-17-000108.txt']\n link[-1] = link_pattern.sub('', link[-1]) # e.g. '000117343117000108'\n link = '/'.join(link) # e.g. 'edgar/data/1173431/000117343117000108'\n url = sec_archive_base+'/'+link+'/FilingSummary.xml'\n try:\n r = requests.get(url)\n except requests.exceptions.RequestException as e:\n print('%s: FilingSummary request failure, link %s' %(cik_to_ticker[cik], url))\n print(e)\n continue\n m = instance_pattern.search(r.text)\n if m and len(m.groups()) > 0:\n xbrl_file = m.groups()[0]\n print('%s => %s => %s' %(cik_to_ticker[cik], cik, xbrl_file)) # FIXME: TEST ONLY\n # download file url = sec_archive_base+'/'+link+'/'+xbrl_file\n ticker = Symbol(cik_to_ticker[cik])\n ticker.download_earning(sec_archive_base+'/'+link, xbrl_file, form, force_update=force_update)\n else:\n print('Error: failed to find XBRL file for %s, url %s, status_code %d' %(cik_to_ticker[cik], url, r.status_code))\n continue",
"def test_no_exception_when_from_year_before_1900(self):\n req = MockRequest(self.env, args={\n 'from': '1899-12-23',\n 'daysback': 7,\n })\n\n TimelineModule(self.env).process_request(req)\n\n self.assertIn('prev', req.chrome['links'])",
"def test_spider_gets_specific_year(self):\n spider = Eia923Spider()\n resp = factories.TestResponseFactory(eia923=True)\n\n result = spider.form_for_year(resp, 2007)\n\n assert result is not None\n assert result.url == \"https://www.eia.gov/electricity/data/eia923/\" \\\n \"archive/xls/f906920_2007.zip\"\n assert result.meta[\"year\"] == 2007\n\n for year in range(2001, 2019):\n result = spider.form_for_year(resp, year)\n assert result is not None",
"def browsepage(year,view,slow=True):\n params={\n \"year_select\":year\n , \"view_select\":view\n }\n r=post(URLS['form-action'],params)\n x=fromstring(r.content)\n orglinks=x.cssselect('a.backtolist')\n d=[]\n for orglink in orglinks:\n if \"#\"==orglink.attrib['href'][0]:\n #Link just brings you to the top of the page\n pass\n else:\n d.append({\"year\":year,\"view\":view,\"name\":orglink.text,\"href\":orglink.attrib['href']})\n\n save(['href'],d,'links')",
"def browsepage(year,view,slow=True):\n params={\n \"year_select\":year\n , \"view_select\":view\n }\n r=post(URLS['form-action'],params)\n x=fromstring(r.content)\n orglinks=x.cssselect('a.backtolist')\n d=[]\n for orglink in orglinks:\n if \"#\"==orglink.attrib['href'][0]:\n #Link just brings you to the top of the page\n pass\n else:\n d.append({\"year\":year,\"view\":view,\"name\":orglink.text,\"href\":orglink.attrib['href']})\n\n save(['href'],d,'links')",
"def pangea_scrape(target, landing_zone=os.path.join('static', 'data')):\n with open(target) as open_download_list:\n for line in open_download_list:\n items = [x.strip() for x in line.split('\\t')]\n if re.match('^\\d{4}-.+', items[0]): # if download list line starts with a date\n link = items[-1]\n download = requests.get(link)\n download_path = os.path.join(landing_zone,\n os.path.basename(link))\n with open(download_path, 'w') as fid:\n print(link, download.status_code)\n fid.write(download.content)",
"def scrape_article_links(year: int) -> List[str]:\n # Take into considerations leap years and days when no articles are published\n pass",
"def getfullURL(date):\n\tbase_url = \"https://www.gpo.gov/fdsys/pkg/CREC-\"+date+\"/pdf/CREC-\"+date+\".pdf\"\n\treturn base_url",
"def _parse_links(self, item, start, links_list):\n result_list = []\n target_str_1 = start.strftime(\"%m-%d-%Y\").replace(\" 0\", \" \")\n target_str_2 = start.strftime(\"%m-%d-%y\").replace(\" 0\", \" \")\n for item in links_list:\n if item[\"date\"] in target_str_1 or item[\"date\"] in target_str_2:\n new_dict = {}\n new_dict[\"href\"] = item[\"href\"]\n new_dict[\"title\"] = item[\"title\"]\n result_list.append(new_dict)\n return result_list",
"def create_urls(years):\n urls = []\n for year in years:\n url = f\"http://billboardtop100of.com/{year}-2/\"\n urls.append(url)\n return urls",
"def start_requests(self):\n initial_year = self.start_date.year\n end_year = datetime.date.today().year\n for year in range(initial_year, end_year + 1):\n yield Request(\n f\"{self.GAZETTE_URL}?dir={year}\",\n meta={\"year\": year},\n callback=self.parse_year,\n )",
"def setAnchorDateYear(self, value):\n return self._set(anchorDateYear=value)",
"def getURL(date):\n\tbase_url = \"https://www.gpo.gov/fdsys/pkg/CREC-\"+date+\"/pdf/CREC-\"+date+\".pdf\"\n\tprint base_url",
"def increment_year(self):",
"def _parse_links(self, response, start):\n links = self.document_date_map[start.date()]\n for link in response.css(\".agenda-min-pres .field a\"):\n link_url = response.urljoin(link.xpath(\"@href\").extract_first())\n title = link.xpath(\"./text()\").extract_first()\n if title.strip().startswith(\"Agenda\"):\n title = \"Agenda\"\n links.append(\n {\"title\": re.sub(r\"\\s+\", \" \", title).strip(), \"href\": link_url}\n )\n return links",
"def download_files(valid_links: list) -> list:\n print('Starting process...')\n print('')\n\n year_month_filepath = []\n\n for link_info in valid_links:\n\n # Get file extension\n extension = link_info[0].split('.')[-1]\n\n # Link to download\n link_to_download = link_info[0]\n\n # Get month\n month = link_info[1]\n\n # Get year\n year = link_info[2]\n\n # Create a standard filename to save\n file_name = f'{year}-{month}.{extension}'\n\n print(f'Downloading... {link_to_download} Saving... {file_name}')\n\n # Create a link to save into ./file directory\n link_to_save = f'./file/{file_name}'\n\n # Download file and save it\n wget.download(link_to_download, out=link_to_save)\n\n\n # Special treatment to zip and xlsx file\n if extension == 'zip':\n\n # Get right link to save (.csv) from zip function\n link_to_save = get_file_into_zip(link_to_save)\n\n elif extension == 'xlsx':\n # Get right link to save (.csv) from xlsx function\n link_to_save = excel2csv(link_to_save)\n\n # Include the tuple into a list\n year_month_filepath.append((year, month, link_to_save))\n\n print('Finishing process...')\n\n return year_month_filepath",
"def get_organizations_list_with_links(year_link):\n response = get_response(year_link)\n if response.ok:\n soup = BeautifulSoup(response.text, 'html.parser')\n orgs_li = soup.find_all(\n 'li', attrs={'class': 'organization-card__container'})\n orgs_dict = {}\n for orgs_html in orgs_li:\n org_name = orgs_html.select('h4')[0].text.replace('\\n', '')\n relative_link = orgs_html.select('a')[0].get('href')\n full_link = HOME_PAGE + relative_link\n orgs_dict[org_name] = full_link\n return orgs_dict\n else:\n print('Something Went Wrong')\n print(f'Status Code: {response.status_code}')\n sys.exit(1)",
"def download_data(self):\n content = requests.get(self.TOP_250_LIST)\n soup = BeautifulSoup(content.content, 'lxml')\n movies = soup.select('tbody.lister-list tr')\n for m in movies:\n title_column = m.select('td.titleColumn')\n link = self.format_link(title_column[0].a['href'])\n title = self.format_title(title_column[0].a.string.encode('utf-8'))\n path = 'pages/{}.html'.format(title)\n if os.path.isfile(path):\n continue\n response = requests.get(link)\n with open(path, 'wr') as f:\n f.write(response.content)",
"def download():\n now_dt = dt.datetime.now()\n return render_template(\n 'resume/home.html',\n age=relativedelta(now_dt, dt.datetime(day=19, month=3, year=1983)).years,\n current_year=now_dt.year,\n )",
"def get_all_links_from_html(website: str) -> list:\n\n # Get website\n r = requests.get(website)\n\n # Parse it into html variable\n html = parser.fromstring(r.text)\n\n # Get all items of internal-link class (This class define each month of each year in website)\n items_from_class = html.xpath(\"//a[@class='internal-link']\")\n\n # Get all links from items into a list of tuples where the first element is the link to download, second element is\n # the month and the third is the year.\n # - href is the link to download\n # - text is the month (as string)\n # - year is taken from the link to download\n all_links = [(item.attrib['href'], item.text, item.attrib['href'].split('/')[-2]) for item in items_from_class]\n\n return all_links",
"def setUp(self):\n self.url = reverse(\"td_biblio:entry_list\")\n self.paginate_by = 20\n self.n_publications_per_year = 3\n self.start_year = 2000\n self.end_year = 2014\n self.n_publications = self.end_year - self.start_year\n self.n_publications *= self.n_publications_per_year\n self.n_authors = self.n_publications * 3\n self.publications_years = []\n self.max_page_num = self.n_publications / self.paginate_by\n if self.n_publications % self.paginate_by:\n self.max_page_num += 1\n\n # Entry (14 * 3 = 42)\n for y in range(self.start_year, self.end_year, 1):\n for i in range(1, 1 + self.n_publications_per_year):\n date = datetime.date(y, i, 1)\n EntryWithAuthorsFactory(publication_date=date)\n self.publications_years.append(y)",
"def build_url_dates(self, pattern, ext, rename=None, date_type=\"Monthly\"):\n\n\n if ext in ('.gdx', '.XML', '.pdf'):\n single_mode = True\n else:\n single_mode = False\n\n if date_type == \"Monthly\":\n self.url_dates = [self.parse_monthly_dates(x['href'], pattern, ext,\n rename=rename)for x in self.pattern_files]\n\n self.url_base = {self.parse_monthly_dates(x['href'], pattern, ext,\n rename=rename): x['href'] for\n x in self.pattern_files}\n\n\n elif date_type == \"Daily\":\n all_dates = [self.parse_daily_dates(x['href'], pattern, ext,\n rename=rename) for\n x in self.pattern_files]\n\n self.url_dates = list(itertools.chain.from_iterable(all_dates))\n\n self.url_base = {self.parse_daily_dates(x['href'], pattern, ext,\n rename=rename,\n single_mode=single_mode): x['href'] for\n x in self.pattern_files}\n\n return self"
] |
[
"0.62690467",
"0.5965184",
"0.58395374",
"0.5766525",
"0.56891763",
"0.5681498",
"0.5655357",
"0.5437689",
"0.5355802",
"0.53454053",
"0.53197294",
"0.531165",
"0.531165",
"0.5307396",
"0.525201",
"0.52506924",
"0.52495944",
"0.5240057",
"0.52300364",
"0.5228289",
"0.52204984",
"0.5214434",
"0.5210521",
"0.52021813",
"0.51995754",
"0.51767516",
"0.51761496",
"0.516431",
"0.51612914",
"0.5140019"
] |
0.6953851
|
0
|
Check if the folder contain the expected number of episodes and if they are complete.
|
def check_folder(env_name, number_episodes):
path = os.path.join(os.environ["SRL_DATASET_PATH"], 'sample_benchmark2', env_name)
# List number of folders check if match expected
environments_count = 0
for filename in os.listdir(path):
try:
int_filename = int(filename)
environments_count += 1
except:
pass
assert environments_count == number_episodes
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def objectsReady(self, n):\n return len(self.files) >= n",
"def in_folder(self):\n return len(os.path.split(self.file_path)) > 1",
"def is_done(self):\n return_val = False\n for name in os.listdir(self.results_dir_path):\n if name.startswith('top_genes_per_phenotype'):\n return_val = True\n return return_val",
"def assert_data_correct(self) -> bool:\n if not self.training_folder.exists():\n return False\n # 27: number of characters\n # 27*2: 27 original font characters and 27 folders with morphed version\n if len(list(self.training_folder.iterdir())) not in [27, 27 * 2]:\n return False\n # assert that each character folder has the expected number of images inside\n # expected number is repetitions + original, or just original if no morphing\n # took place\n for directory in self.training_folder.iterdir():\n img_count = len(list(directory.iterdir()))\n if img_count != self.repetitions + 1 and img_count != 1:\n return False\n return True",
"def check_dataset(number_episode_dics):\n\n for env_name in number_episode_dics.keys():\n\n check_folder(env_name, number_episode_dics[env_name])",
"def check_finish(experiment, ln):\n # Generate the system flag\n flag = False\n # If the library is in the results folder, it has already been formatted\n name = \"library\" + str(ln) \n if name in os.listdir(experiment[\"Folder\"] + \"results/\"):\n return flag\n # Create a shortcut for the folder name\n folder = experiment[\"Folder\"] + name + \"/\"\n # Get the total number of iterations completed\n iteration = SHARING.iteration_counter(folder, False)\n # If the iterations completed is identical to the amount that should have\n # been completed, the library is finished\n if iteration == experiment['IPRO Iterations']:\n flag = True \n return flag",
"def validate(base_path: Path,\n max_size: int) -> None:\n valid = invalid = 0\n for path, is_valid in validate_videos(base_path, max_size):\n print(colorama.Fore.GREEN if is_valid else colorama.Fore.RED,\n \"Processing\", end='', sep='')\n\n if is_valid:\n valid += 1\n print(f\"{get_info(path, short=True)} is valid\".rjust(50, '.'))\n else:\n invalid += 1\n print(f\"{get_info(path, short=True)} is invalid\".rjust(50, '.'))\n\n print(colorama.Fore.GREEN, \"=\" * 60, colorama.Fore.RESET, sep='')\n print(f\"Total files count: {len(os.listdir(base_path))}\")\n if valid == invalid == 0:\n print(f\"No video found\")\n return\n\n print(f\"Total videos count: {valid + invalid}\\n\")\n if invalid == 0:\n print(f\"All {valid} videos are valid\")\n else:\n print(f\"Total valid videos: {valid}\")\n print(f\"Total invalid videos: {invalid}\")",
"def _check_episode_end_condition(self):\n vehicles = self._get_available_vehicles()\n if np.sum(vehicles == 0) < self.episode_threshold:\n return True\n else:\n return False",
"def _isDone(self):\n return self.steps >= self.max_steps or len(self.food_ids) <= 0",
"def check_validation_results():\n with open('prep/datapackage_validation.json') as report_file:\n report = json.load(report_file)\n\n tasks = report['tasks']\n assert len(tasks) == 5\n\n for task in tasks:\n\n errors = task['errors']\n\n # as a first approximation, allow up to 300 errors on the appearances file\n # this is to account for a common foreign key exception caused by the source data\n if task['resource']['name'] == 'appearances':\n errors_threshold = 300\n # for the rest of the files do nor allow errors at all\n else:\n errors_threshold = 0\n\n if len(errors) > errors_threshold:\n print(f\">={len(errors)} rows did not pass validations!\")\n return False\n else:\n return True",
"def get_tv_episodes(self) -> int:\n return len(glob.glob(os.path.join(\n os.path.dirname(self.file),\n f\"*{os.path.splitext(self.file)[-1]}\"\n )))",
"def _check_episode_start_condition(self):\n vehicles = self._get_available_vehicles()\n if np.sum(vehicles == 0) >= self.episode_threshold:\n return True\n else:\n return False",
"def badExitPrevMolecule(self):\n if self.molecules > 0:\n # collect list of any atoms where num departed is not expected num per molecule\n departErrors = [(atom.name, count) for atom, count in self.departed.items() if self.departed[atom] != atom.value]\n if len(departErrors) > 0:\n print(\"too many or too few atoms exited between previous and this molecule creations.\")\n print( \"Exit counts:\", departErrors)\n return False\n return True",
"def is_download_finished(folder):\n firefox_temp_file = sorted(pathlib.Path(folder).glob('*.part'))\n chrome_temp_file = sorted(pathlib.Path(folder).glob('*.crdownload'))\n downloaded_files = sorted(pathlib.Path(folder).glob('*.*'))\n if (len(firefox_temp_file) == 0) and \\\n (len(chrome_temp_file) == 0) and \\\n (len(downloaded_files) >= 1):\n return True\n else:\n return False",
"def episodes_done(self):\n with _MonitorEnv._lock:\n return self._episodes_done",
"def check_footage():\n\n ok = True\n\n for clip in bpy.data.movieclips:\n abspath = bpy.path.abspath(clip.filepath, clip.library)\n if not os.path.exists(abspath):\n print(\"Clip {} is not found\" . format(abspath))\n ok = False\n\n return ok",
"def check_expectations(self):\n self.load_results()\n\n for (benchmark, producer), result in self.results.items():\n if not result.reports:\n print('No results found for ' + benchmark + ' ' + producer)\n result.test_passed = False\n else:\n for report in result.reports:\n if check_benchmark_result(report, result.expectation):\n print('Test passed: ' + result.directory)\n result.test_passed = True\n else:\n print('Test failed: ' + result.directory)\n result.test_passed = False",
"def _check_done(self, env_done):\n self._episode_steps += 1\n if (self._episode_steps >= self._episode_length_step) or env_done:\n self._actuator_comms['UR5'].actuator_buffer.write(self._stopj_packet)\n return True\n else:\n return False",
"def assert_train_augmented(self) -> bool:\n dalet = Path(os.environ[\"DATA_PATH\"]) / \"characters\" / \"train\" / \"Dalet\"\n truth_value = False\n try:\n if len(list(dalet.iterdir())) != 72: # downloaded number of chars\n truth_value = True\n except FileNotFoundError:\n pass # this is ok because we handle the truth_value\n return truth_value",
"def count_directory(gws_folder):\n count_num_gw = 0\n for file in os.listdir(gws_folder):\n if file.startswith('gw'): # eg: '.txt'\n count_num_gw += 1\n print(f'There are {count_num_gw} gameweek files that can be merged')\n if count_num_gw == 47: # Hardcoded solution for \n count_num_gw = delete_directory(gws_folder)\n print(f'After deleting, there are now {count_num_gw} gameweek files that can be merged')\n else:\n print('No issues; continuing to merge gameweek files')\n return count_num_gw",
"def episodes_done_inc(self):\n with _MonitorEnv._lock:\n self._episodes_done += 1\n return self._episodes_done",
"def test_identify_contents_5(self):\n Path(self.base_dir, \"new_dir1\").mkdir()\n Path(self.base_dir, \"new_dir2\").mkdir()\n Path(self.base_dir, \"file1.txt\").touch()\n Path(self.base_dir, \".DS_Store\").touch()\n list_of_items = basic.identify_contents(self.base_dir, kind=None)\n exp_num_items = 4\n self.assertEqual(len(list_of_items), exp_num_items)",
"def final_check(self, schedule: Schedule) -> bool:\n for day in range(schedule.n_weekdays):\n for track in range(schedule.n_tracks):\n if schedule.count_courses_on_day(day, track) < 2 and schedule.count_courses_on_day(day, track) != 0: \n return False\n\n return True",
"def episode_done(self):\n if self.get_status() == AssignState.STATUS_DONE:\n return False\n else:\n return True",
"def check_video_paths(self):\n\n exercises_found = [True, True, True]\n self.all_video_paths = [(\"A\", []), (\"B\", []), (\"C\", [])]\n\n def create_exercise_paths(self, ex_label):\n ex_index = ord(ex_label) - ord('A')\n found_videos = True\n path_template = join(self.video_dir, self.video_path_template[ex_index][0])\n max_idx = self.video_path_template[ex_index][1]\n ex_path_created = []\n\n for i in range(1, max_idx + 1):\n full_path = path_template.format(i)\n ex_path_created.append(full_path)\n\n if not exists(full_path):\n found_videos = False\n break\n\n self.all_video_paths[ex_index][1].extend(ex_path_created)\n return found_videos\n\n if self.ex_a_check.isChecked():\n exercises_found[0] = create_exercise_paths(self, \"A\")\n if self.ex_b_check.isChecked():\n exercises_found[1] = create_exercise_paths(self, \"B\")\n if self.ex_c_check.isChecked():\n exercises_found[2] = create_exercise_paths(self, \"C\")\n\n return exercises_found",
"def is_finished(game: List[int]) -> bool :\n return sum(game) == 0",
"def test_check_torrent_status_complete(self):\n\n episode = self._get_episode()\n torrent_filename = self.fetcher.download_specific_episode(episode)\n self.fetcher.check_downloading_torrents()\n\n episode_torrent = (\n self.fetcher.session.query(EpisodeTorrent)\n .filter(EpisodeTorrent.torrent_name == torrent_filename)\n ).first()\n self.assertIsNotNone(episode_torrent)\n self.assertEqual(episode_torrent.complete, True)",
"def _episode_success(self, observations):\n dist = self._env.get_metrics()[\"object_to_goal_distance\"]\n if (\n abs(dist) > self._success_distance\n or observations[\"gripped_object_id\"] != -1\n ):\n return False\n return True",
"def isfinished(self) -> bool:\n for pile in self._piles:\n if len(pile.pile) != 13:\n return False\n return True",
"def is_done(self, agent, world) -> bool:\n if self.steps_from_last_reset / self.num_agents > self.episode_length:\n return True\n return False"
] |
[
"0.612574",
"0.6075443",
"0.60746336",
"0.60218",
"0.60132325",
"0.6002474",
"0.59599173",
"0.5942792",
"0.59202325",
"0.59194344",
"0.58748317",
"0.5805734",
"0.5788102",
"0.576801",
"0.5702322",
"0.56581885",
"0.56397027",
"0.5637409",
"0.5630495",
"0.56222147",
"0.5608263",
"0.5581359",
"0.5580871",
"0.55751157",
"0.55217654",
"0.5508546",
"0.5489788",
"0.5478914",
"0.5474492",
"0.5448984"
] |
0.7175833
|
0
|
Perform all useful analysis and direct the output via the Analysis'formatter. It analyses the scratch memory usage in DM1, DM2 and none, per priority and per task. [in] self Pointer to the current object
|
def run_all(self):
self.formatter.section_start('Scratch Memory Info')
self.formatter.section_start('Per priority')
self.analyse_per_priority()
self.formatter.section_end()
self.formatter.section_start('Per task')
self.analyse_per_task()
self.formatter.section_end()
self.formatter.section_end()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def analyse_per_priority(self):\n dm1_total = 0\n dm2_total = 0\n none_total = 0\n dm1_size = []\n dm2_size = []\n none_size = []\n users = []\n pdd = self.chipdata.get_var_strict('$_per_prio_data')\n num_entries = len(pdd.members)\n\n for current in range(0, num_entries):\n dm1_size.append(\n pdd.members[current].get_member('alloc_info_dm1')\n .get_member('alloc_size').value\n )\n dm2_size.append(\n pdd.members[current].get_member('alloc_info_dm2')\n .get_member('alloc_size').value\n )\n none_size.append(\n pdd.members[current].get_member('alloc_info_none')\n .get_member('alloc_size').value\n )\n users.append(pdd.members[current].get_member('refcount').value)\n dm1_total = dm1_total + dm1_size[current]\n dm2_total = dm2_total + dm2_size[current]\n none_total = none_total + none_size[current]\n\n total_mem = dm1_total + dm2_total + none_total\n self.formatter.output('Total scratch memory used: ' + str(total_mem))\n self.formatter.output('DM1 total: ' + str(dm1_total))\n self.formatter.output('DM2 total: ' + str(dm2_total))\n self.formatter.output('none total: ' + str(none_total))\n\n for current in range(0, num_entries):\n mem = dm1_size[current] + dm2_size[current] + none_size[current]\n self.formatter.output(\n 'For priority ' + str(current) + ' the memory allocated is ' +\n str(mem) + ' and the total no of users is ' +\n str(users[current])\n )\n self.formatter.output('DM1 ' + str(dm1_size[current]))\n self.formatter.output('DM2 ' + str(dm2_size[current]))\n self.formatter.output('none ' + str(none_size[current]))",
"def report_total_usage(self):\n work_time = 0\n if self.type == 'normal':\n work_time = self.fwk.fwk_global_time - self.start_exec_time\n elif self.type == 'sandia_work':\n self.total_time += self.fwk.fwk_global_time - self.start_exec_time\n self.total_usage = self.total_time * self.nproc\n if self.state == \"running\":\n # update total work done\n self.sim.completed_work += self.fwk.fwk_global_time - self.start_exec_time\n elif self.state == \"failed\":\n # add this work to the work to be redone\n self.sim.rework_todo += self.fwk.fwk_global_time - self.start_exec_time\n self.state = \"not_ready\"\n self.num_faults += 1\n elif self.type == 'sandia_rework':\n self.total_rework_time += self.fwk.fwk_global_time - self.start_exec_time\n self.total_rework_usage = self.total_rework_time * self.nproc\n if self.state == \"running\":\n # update total work done\n self.sim.next_ckpt = self.sim.ckpt_interval - (self.fwk.fwk_global_time - self.start_exec_time)\n self.sim.rework_todo -= self.fwk.fwk_global_time - self.start_exec_time\n elif self.state == \"failed\":\n # add this work to the work to be redone\n self.state = \"not_ready\"\n self.num_faults += 1\n elif self.type == 'sandia_ckpt':\n self.total_ckpt_time += self.fwk.fwk_global_time - self.start_exec_time\n self.total_ckpt_usage = self.total_ckpt_time * self.nproc\n if self.state == \"running\":\n # update last ckpt\n self.sim.last_ckpt = self.sim.completed_work\n elif self.state == \"failed\":\n # add work to rework\n self.sim.rework_todo += self.sim.next_ckpt\n self.state = \"not_ready\"\n self.num_faults += 1\n elif self.type == 'sandia_restart':\n print(\"time spent in rework\", self.fwk.fwk_global_time - self.start_exec_time)\n self.total_restart_time += self.fwk.fwk_global_time - self.start_exec_time\n self.total_restart_usage = self.total_restart_time * self.nproc\n #if self.state == \"running\":\n # nothing to do?\n # pass\n if self.state == \"failed\":\n # gotta try again\n self.state = \"ready\"\n self.num_faults += 1\n else:\n print(\"problems updating state in report_total_usage\")\n raise\n if self.type == 'normal':\n if self.sim.state == 'rework':\n self.total_rework_time += work_time\n self.total_rework_usage = self.total_rework_time * self.nproc\n else: # sim.state == 'work'\n if self.retry:\n self.total_rework_time += work_time\n self.total_rework_usage = self.total_rework_time * self.nproc\n else:\n self.total_time += work_time\n self.total_usage = self.total_time * self.nproc",
"def profiler(self):\r\n\r\n class Task(object):\r\n \"Private class to nicely wrap up the profile data\"\r\n def __init__(self, block, addr):\r\n self.block = block\r\n self.addr = addr\r\n self.name = None\r\n def tidy(self, sym):\r\n self.name = sym.varfind(self.addr).name\r\n self.CPU_FRAC = sym.constfind(\"$profiler.CPU_FRACTION_FIELD\").value\r\n def __repr__(self):\r\n if self.name is None:\r\n raise Exception(\"Need to call the tidy method before using\")\r\n return \"%-50s - %2.1f %%\" % (self.name, self.block[self.CPU_FRAC]/1000)\r\n\r\n\r\n # get the head of the list and a couple of constants\r\n head = self._core.sym.varfind(\"$profiler.last_addr\").addr\r\n NULL = self._core.sym.constfind(\"$profiler.LAST_ENTRY\").value\r\n SIZE = self._core.sym.constfind(\"$profiler.STRUC_SIZE\").value\r\n NEXT = self._core.sym.constfind(\"$profiler.NEXT_ADDR_FIELD\").value\r\n\r\n # get the first address\r\n curr = self._core.dm[head]\r\n\r\n # read all the structures off the chip as fast as we can\r\n tasks = []\r\n while curr != NULL:\r\n block = self._core.dm[curr:(curr+SIZE)]\r\n tasks.append(self.Task(block, curr))\r\n curr = block[NEXT]\r\n\r\n # now fill in the other bits\r\n for t in tasks:\r\n t.tidy(self._core.sym)\r\n\r\n # finally return\r\n return tasks",
"def make_metrics(self):\n num_batches = self.data_loader.number_of_batches()\n dose_score_vec = np.zeros(num_batches)\n\n # Only make calculations if data_loader is not empty\n if not self.data_loader.file_paths_list:\n print('No patient information was given to calculate metrics')\n else:\n # Change batch size to 1\n self.data_loader.batch_size = 1 # Loads data related to ground truth patient information\n if self.dose_loader is not None:\n self.dose_loader.batch_size = 1 # Loads data related to ground truth patient information\n\n for idx in tqdm.tqdm(range(num_batches)):\n # Get roi masks for patient\n self.get_constant_patient_features(idx)\n # Get dose tensors for reference dose and evaluate criteria\n reference_dose = self.get_patient_dose_tensor(self.data_loader)\n if reference_dose is not None:\n self.reference_dose_metric_df = self.calculate_metrics(self.reference_dose_metric_df, reference_dose)\n # If a dose loader was provided, calculate the score\n if self.dose_loader is not None:\n new_dose = self.get_patient_dose_tensor(self.dose_loader)\n # Make metric data frames\n self.new_dose_metric_df = self.calculate_metrics(self.new_dose_metric_df, new_dose)\n # Evaluate mean absolute error of 3D dose\n dose_score_vec[idx] = np.sum(np.abs(reference_dose - new_dose)) / np.sum(self.possible_dose_mask)\n # Save metrics at the patient level (this is a template for how DVH stream participants could save\n # their files\n # self.dose_metric_df.loc[self.patient_list[0]].to_csv('{}.csv'.format(self.patient_list[0]))\n\n if self.dose_loader is not None:\n dvh_score = np.nanmean(np.abs(self.reference_dose_metric_df - self.new_dose_metric_df).values)\n dose_score = dose_score_vec.mean()\n return dvh_score, dose_score\n else:\n print('No new dose provided. Metrics were only calculated for the provided dose.')",
"def __init__(self):\n super().__init__()\n self.printTag = 'POSTPROCESSOR Metrics'\n self.dynamic = False # is it time-dependent?\n self.features = None # list of feature variables\n self.targets = None # list of target variables\n self.metricsDict = {} # dictionary of metrics that are going to be assembled\n self.multiOutput = 'mean'# defines aggregating of multiple outputs for HistorySet\n # currently allow mean, max, min, raw_values\n self.weight = None # 'mean' is provided for self.multiOutput, weights can be used\n # for each individual output when all outputs are averaged\n self.pivotParameter = None\n self.pivotValues = []\n # assembler objects to be requested\n self.addAssemblerObject('Metric', InputData.Quantity.one_to_infinity)",
"def test00(self):\n\n # Obtain memory info (only for Linux 2.6.x)\n for line in Path(\"/proc/self/status\").read_text().splitlines():\n if line.startswith(\"VmSize:\"):\n vmsize = int(line.split()[1])\n elif line.startswith(\"VmRSS:\"):\n vmrss = int(line.split()[1])\n elif line.startswith(\"VmData:\"):\n vmdata = int(line.split()[1])\n elif line.startswith(\"VmStk:\"):\n vmstk = int(line.split()[1])\n elif line.startswith(\"VmExe:\"):\n vmexe = int(line.split()[1])\n elif line.startswith(\"VmLib:\"):\n vmlib = int(line.split()[1])\n print(\"\\nWallClock time:\", clock() - self.tref)\n print(\"Memory usage: ******* %s *******\" % self._getName())\n print(f\"VmSize: {vmsize:>7} kB\\tVmRSS: {vmrss:>7} kB\")\n print(f\"VmData: {vmdata:>7} kB\\tVmStk: {vmstk:>7} kB\")\n print(f\"VmExe: {vmexe:>7} kB\\tVmLib: {vmlib:>7} kB\")",
"def main():\n\n\t# Parse the file\n\tmem_file = advanced_analysis('../data_1/mempages.dat.out')",
"def measure(self):\n pass",
"def main():\n\n args = parse_args()\n metric_sender = MetricSender(verbose=args.verbose, debug=args.debug)\n\n discovery_key_disk = 'disc.disk'\n interval = 3\n pcp_disk_dev_metrics = ['disk.dev.total', 'disk.dev.avactive']\n item_prototype_macro_disk = '#OSO_DISK'\n item_prototype_key_tps = 'disc.disk.tps'\n item_prototype_key_putil = 'disc.disk.putil'\n\n disk_metrics = pminfo.get_sampled_data(pcp_disk_dev_metrics, interval, 2)\n\n pcp_metrics_divided = {}\n for metric in pcp_disk_dev_metrics:\n pcp_metrics_divided[metric] = {k: v for k, v in disk_metrics.items() if metric in k}\n\n # do TPS checks; use disk.dev.total\n filtered_disk_totals = clean_up_metric_dict(pcp_metrics_divided[pcp_disk_dev_metrics[0]],\n pcp_disk_dev_metrics[0] + '.')\n\n # Add dynamic items\n metric_sender.add_dynamic_metric(discovery_key_disk, item_prototype_macro_disk, filtered_disk_totals.keys())\n\n # calculate the TPS and add them to the ZaggSender\n for disk, totals in filtered_disk_totals.iteritems():\n disk_tps = (totals[1] - totals[0]) / interval\n metric_sender.add_metric({'%s[%s]' % (item_prototype_key_tps, disk): disk_tps})\n\n # do % Util checks; use disk.dev.avactive\n filtered_disk_totals = clean_up_metric_dict(pcp_metrics_divided[pcp_disk_dev_metrics[1]],\n pcp_disk_dev_metrics[1] + '.')\n\n # calculate the % Util and add them to the ZaggSender\n for disk, totals in filtered_disk_totals.iteritems():\n total_active = (float)(totals[1] - totals[0]) / 1000.0\n putil = 100 * total_active / interval\n\n metric_sender.add_metric({'%s[%s]' % (item_prototype_key_putil, disk): putil})\n\n metric_sender.send_metrics()",
"def full_analysis(self):\n print('FULL ANALYSIS\\n' +\n '----------------------------------\\n')\n #print('Basic Statistics') # Remove this and run 'basic_stats'\n results.append('FULL ANALYSIS\\n' +\n '----------------------------------\\n')\n print('Basic Information\\n' +\n '----------------------------')\n results.append('Basic Information\\n' +\n '----------------------------')\n self.info_density()\n self.calc_total_rows()\n self.show_empty()\n self.calc_null()\n self.calc_col_len()\n self.calc_row_len()\n self.calc_col_info()\n self.regex_info()",
"def gather_info_and_display():\n # Obtain total rss displayed in memory.stat for each group,\n # container and service.\n try:\n output_mem = pipe_command(GREP_CMD, AWK_CMD, cwd=MEMPATH)\n LOG.debug(\n 'command: %s\\n%s',\n \"grep -rs total_rss '/sys/fs/cgroup/memory/' \"\n \"| awk '$2>0{print$0}' \",\n output_mem)\n except subprocess.CalledProcessError as error:\n LOG.error('Could not get total_rss memory, error=%s', error)\n return 1\n\n mem_info = get_meminfo()\n pt_groups = gather_groups_memory(output_mem)\n pt_cont = gather_containers_memory(output_mem)\n pt_serv = sys_service_memory()\n\n # Dump the tables out\n print('\\nPer groups memory usage:')\n\n # Get string to be printed and create list of elements separated by \\n\n list_of_table_lines = pt_groups.get_string().split('\\n')\n\n # Use the first line (+---+-- ...) as horizontal rule to insert later\n horizontal_line = list_of_table_lines[0]\n\n # Print the table, except last two lines ( \"Total\" row + final separator).\n print(\"\\n\".join(list_of_table_lines[:-2]))\n # Print separator, and finally the \"Total\" row.\n print(horizontal_line)\n print(\"\\n\".join(list_of_table_lines[-2:]))\n\n pt_namespc = prettytable.PrettyTable(\n ['Namespace',\n 'Resident Set Size (MiB)',\n ], caching=False)\n pt_namespc.align = 'l'\n pt_namespc.align['Resident Set Size (MiB)'] = 'r'\n\n print('\\nPer namespace memory usage:')\n for n_s in MEMORY['namespaces']:\n pt_namespc.add_row(\n [n_s,\n MEMORY['namespaces'][n_s],\n ])\n print(pt_namespc)\n\n print('\\nPer container memory usage:')\n print(pt_cont)\n\n print('\\nPer service memory usage:')\n print(pt_serv)\n\n base_mebib = 0.0\n k8s_system = 0.0\n k8s_addon = 0.0\n platform_memory_percent = 0.0\n\n # Calculate base memory usage (i.e., normal memory, exclude K8S and VMs)\n # e.g., docker, system.slice, user.slice\n for group in MEMORY['cgroups']:\n if group in BASE_GROUPS:\n base_mebib += float(MEMORY['cgroups'][group])\n\n # K8S platform system usage (essential) and addons usage (non-essential)\n for n_s in MEMORY['namespaces']:\n if n_s in K8S_NAMESPACE_SYSTEM:\n k8s_system += MEMORY['namespaces'][n_s]\n elif n_s in K8S_NAMESPACE_ADDON:\n k8s_addon += MEMORY['namespaces'][n_s]\n\n # Calculate platform memory usage\n platform_mebib = base_mebib + k8s_system\n\n anon_mebib = float(mem_to_mebibytes(\n mem_info['Active(anon)'] + mem_info['Inactive(anon)'])) * KBYTE\n avail_mebib = float(mem_to_mebibytes(\n mem_info['MemAvailable'])) * KBYTE\n total_mebib = float(anon_mebib + avail_mebib)\n\n anon_percent = py2_round(100 * anon_mebib / total_mebib, DECIMAL_DIGITS) # pylint: disable=W1619\n\n reserved_mebib = get_platform_reserved_memory()\n # Calculate platform memory in terms of percent reserved\n if reserved_mebib > 0.0:\n platform_memory_percent = py2_round(\n 100 * platform_mebib / reserved_mebib, DECIMAL_DIGITS) # pylint: disable=W1619\n\n pt_platf = prettytable.PrettyTable(\n ['Reserved',\n 'Platform',\n 'Base',\n 'K8s Platform system',\n 'k8s-addon'\n ], caching=False)\n pt_platf.align = 'l'\n\n pt_platf.add_row(\n [reserved_mebib,\n '{} ({}%)'.format(platform_mebib, platform_memory_percent),\n base_mebib,\n k8s_system,\n k8s_addon\n ])\n print('\\nPlatform memory usage in MiB:')\n print(pt_platf)\n\n pt_4k = prettytable.PrettyTable(\n ['Anon',\n 'Cgroup-rss',\n 'Available',\n 'Total'\n ], caching=False)\n pt_4k.align = 'l'\n\n pt_4k.add_row(\n ['{} ({}%)'.format(anon_mebib, anon_percent),\n MEMORY['cgroups']['total_rss'],\n avail_mebib,\n total_mebib\n ])\n\n print('\\n4K memory usage in MiB:')\n print(pt_4k)\n\n return 0",
"def collect(self):\n if not self._btime:\n return\n\n try:\n with open(\"/proc/self/stat\", 'rb') as stat:\n parts = (stat.read().split(b')')[-1].split())\n\n self.process_metrics[\"vmem\"].set(\"\", float(parts[20]))\n self.process_metrics[\"rss\"].set(\"\", float(parts[21]) * _PAGESIZE)\n start_time_secs = float(parts[19]) / self._ticks\n self.process_metrics[\"start_time\"].set(\n \"\", start_time_secs + self._btime\n )\n utime = float(parts[11]) / self._ticks\n stime = float(parts[12]) / self._ticks\n self.process_metrics[\"cpu\"].set(\"\", utime + stime)\n except IOError:\n pass\n\n try:\n with open(\"/proc/self/limits\", 'rb') as limits:\n for line in limits:\n if line.startswith(b'Max open file'):\n self.process_metrics[\"max_fds\"].set(\n \"\", float(line.split()[3])\n )\n break\n\n self.process_metrics[\"open_fds\"].set(\n \"\", float(len(os.listdir(\"/proc/self/fd\")))\n )\n except (IOError, OSError):\n pass\n\n # Update gc metrics if enabled.\n if \"collected\" in self.process_metrics:\n for generation, stat in enumerate(gc.get_stats()):\n generation = {\"generation\": str(generation)}\n self.process_metrics[\"collected\"].set(\n generation, stat[\"collected\"]\n )\n self.process_metrics[\"uncollectable\"].set(\n generation, stat[\"uncollectable\"]\n )\n self.process_metrics[\"collections\"].set(\n generation, stat[\"collections\"]\n )",
"def stats_compute(self, *args, **kwargs):\n totalElements = 0\n totalKeys = 0\n totalSize = 0\n l_stats = []\n d_report = {}\n str_report = \"\"\n l_range = []\n\n if int(self.verbosityLevel) and self.toConsole():\n l_range = tqdm(sorted(self.d_inputTreeCallback.items(),\n key = lambda kv: (kv[1]['diskUsage_raw']),\n reverse = self.b_statsReverse),\n desc = ' Processing stats')\n else:\n l_range = sorted(self.d_inputTreeCallback.items(),\n key = lambda kv: (kv[1]['diskUsage_raw']),\n reverse = self.b_statsReverse)\n\n for k, v in l_range:\n try:\n if not self.args['du'] and not self.args['duf']:\n str_report += \"files: %5d│ raw_size: %12d│ human_size: %8s│ dir: %s\\n\" % (\\\n len(self.d_inputTree[k]),\n self.d_inputTreeCallback[k]['diskUsage_raw'],\n self.d_inputTreeCallback[k]['diskUsage_human'],\n k)\n else:\n str_report += '%-10s%s\\n' % (\n self.d_inputTreeCallback[k]['diskUsage_human'], k)\n except:\n pass\n d_report = {\n 'files': len(self.d_inputTree[k]),\n 'diskUsage_raw': self.d_inputTreeCallback[k]['diskUsage_raw'],\n 'diskUsage_human': self.d_inputTreeCallback[k]['diskUsage_human'],\n 'path': k\n }\n l_stats.append(d_report)\n totalElements += len(v)\n totalKeys += 1\n totalSize += self.d_inputTreeCallback[k]['diskUsage_raw']\n str_totalSize_human = self.sizeof_fmt(totalSize)\n return {\n 'status': True,\n 'report': str_report,\n 'dirs': totalKeys,\n 'files': totalElements,\n 'totalSize': totalSize,\n 'totalSize_human': str_totalSize_human,\n 'l_stats': l_stats,\n 'runTime': other.toc()\n }",
"def run(self):\n\n step = self.steps['diagnostics_files']\n step.cores = self.config.getint('make_diagnostics_files', 'cores')\n\n # run the step\n super().run()",
"def measure(self,command_exe, command_args, measure_out):\n pass",
"def __call__(self, output, target, *args, **kwargs):\n _, y_pred = output.topk(1, 1, True, True)\n y_pred = y_pred.t().detach().cpu().numpy()[0]\n y_true = target.detach().cpu().numpy()\n self.pfm = self.metric_func(y_true, y_pred)\n return self.pfm",
"def memory_usage(self):\n\n def multiply_iter(iterable):\n res = 1\n for x in iterable:\n res *= x\n return res\n\n def add_params(parameter):\n res = 0\n for x in parameter:\n res += multiply_iter(x.shape)\n return res\n\n feat = add_params(self.features.parameters())\n clsf = add_params(self.classifier.parameters())\n total = feat + clsf\n\n mb_f = 4 / 1024 ** 2\n\n print(\"Conv : {0}\".format(feat))\n print(\"FC : {0}\".format(clsf))\n print(\"-----------------\")\n print(\"Total : {0}\".format(total))\n print(\"Memory : {0:.2f}MB\".format(total * mb_f))\n print(\"\")",
"def calculate(self):\n start = timeit.default_timer()\n linux_common.set_plugin_members(self)\n\n self._validate_config()\n pidstr = self._config.PID\n\n tasks = []\n for task in linux_pslist.linux_pslist.calculate(self):\n if _is_python_task(task, pidstr):\n tasks.append(task)\n\n alpha = 0.10\n export_path = './volatility_dumps_pytorch/'\n\n for task in tasks:\n find_model(task, [\"MobileNetV2\", \"VGG16\", \"MobileNetV1\"], export_path, alpha)\n dump_heaps(task, export_path, alpha)\n\n stop = timeit.default_timer()\n print(\"\\nRuntime: {0} seconds\".format(stop - start))\n sys.exit(0)",
"def reset_memory_statistics(sender, **kwargs): # pylint: disable=unused-argument\n MemoryUsageData.start_counting()",
"def compute_metrics(self):\n pass",
"def run_all(self):\n\n self.run_mash() ###Run MASH analysis\n self.filter_query() ###Filter fasta sequences out based on p value\n self.build_index(self.filtered_out_path) ###Build index for off-target analysis\n os.remove(self.filtered_out_path) ###Clean up intermediate fasta file\n self.format_gRNA(self.path1) ###Format everything in the right order\n self.run_OTF() ###Run off-target analysis\n self.output_parse() ###Parse output values and update table",
"def summary(self) -> None:\n print(\"Model manager summary:\")\n print(\"Preprocessor:\")\n print(self.preprocessor)\n print(\"Model summary:\")\n self.model.summary()\n print(\"Postprocessor:\")\n print(self.postprocessor)",
"def __call__(self, data_streams):\n # Use worker interval.\n if self.app_state.episode % self.app_state.args.logging_interval == 0:\n\n # Calculate all four statistics.\n confusion_matrix, precisions, recalls, f1scores, supports = self.calculate_statistics(data_streams)\n\n if self.show_confusion_matrix:\n self.logger.info(\"Confusion matrix:\\n{}\".format(confusion_matrix))\n\n # Calculate weighted averages.\n support_sum = sum(supports)\n if support_sum > 0:\n precision_avg = sum([pi*si for (pi,si) in zip(precisions,supports)]) / support_sum \n recall_avg = sum([ri*si for (ri,si) in zip(recalls,supports)]) / support_sum\n f1score_avg = sum([fi*si for (fi,si) in zip(f1scores,supports)]) / support_sum\n else:\n precision_avg = 0\n recall_avg = 0\n f1score_avg = 0\n\n # Log class scores.\n if self.show_class_scores:\n log_str = \"\\n| Precision | Recall | F1Score | Support | Label\\n\"\n log_str+= \"|-----------|--------|---------|---------|-------\\n\"\n for i in range(self.num_classes):\n log_str += \"| {:05.4f} | {:05.4f} | {:05.4f} | {:5d} | {}\\n\".format(\n precisions[i], recalls[i], f1scores[i], supports[i], self.labels[i])\n log_str+= \"|-----------|--------|---------|---------|-------\\n\"\n log_str += \"| {:05.4f} | {:05.4f} | {:05.4f} | {:5d} | Weighted Avg\\n\".format(\n precision_avg, recall_avg, f1score_avg, support_sum)\n self.logger.info(log_str)",
"def compute(self) -> Any:\n # ddp hotfix, could be done better\n # but metric must handle DDP on it's own\n if self._ddp_backend == \"xla\":\n device = get_device()\n for key in self.statistics:\n key_statistics = torch.tensor([self.statistics[key]], device=device)\n key_statistics = xm.all_gather(key_statistics).sum(dim=0).cpu().numpy()\n self.statistics[key] = key_statistics\n elif self._ddp_backend == \"ddp\":\n for key in self.statistics:\n value: List[np.ndarray] = all_gather(self.statistics[key])\n value: np.ndarray = np.sum(np.vstack(value), axis=0)\n self.statistics[key] = value\n\n per_class, micro, macro, weighted = get_aggregated_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n support=self.statistics[\"support\"],\n zero_division=self.zero_division,\n )\n if self.compute_per_class_metrics:\n return per_class, micro, macro, weighted\n else:\n return [], micro, macro, weighted",
"def compute(self) -> Any:\n # ddp hotfix, could be done better\n # but metric must handle DDP on it's own\n if self._ddp_backend == \"xla\":\n device = get_device()\n for key in self.statistics:\n key_statistics = torch.tensor([self.statistics[key]], device=device)\n key_statistics = xm.all_gather(key_statistics).sum(dim=0).cpu().numpy()\n self.statistics[key] = key_statistics\n elif self._ddp_backend == \"ddp\":\n for key in self.statistics:\n value: List[np.ndarray] = all_gather(self.statistics[key])\n value: np.ndarray = np.sum(np.vstack(value), axis=0)\n self.statistics[key] = value\n\n per_class, micro, macro, weighted = get_aggregated_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n support=self.statistics[\"support\"],\n zero_division=self.zero_division,\n )\n if self.compute_per_class_metrics:\n return per_class, micro, macro, weighted\n else:\n return [], micro, macro, weighted",
"def run(self):\n\t\terrors = {}\n\t\tvalues = {}\n\t\tcpu = System.CPUUsage()\n\t\tmem = System.MemoryUsage()\n\t\tdisk = System.DiskUsage()\n\t\tif cpu > self.cpu:\n\t\t\terrors[\"cpu\"] = (cpu, self.cpu)\n\t\telse:\n\t\t\tvalues[\"cpu\"] = (cpu, self.cpu)\n\t\tif mem > self.mem:\n\t\t\terrors[\"cpu\"] = (mem, self.mem)\n\t\telse:\n\t\t\tvalues[\"mem\"] = (mem, self.mem)\n\t\tfor mount, usage in disk.items():\n\t\t\tif usage > self.disk:\n\t\t\t\terrors.setdefault(\"disk\", {})\n\t\t\t\terrors[\"disk\"][mount] = (usage, self.disk)\n\t\t\telse:\n\t\t\t\tvalues.setdefault(\"disk\", {})\n\t\t\t\tvalues[\"disk\"][mount] = (usage, self,disk)\n\t\tif errors:\n\t\t\treturn Failure(\"errors with %s\" % (\", \".join(errors.keys())), value=dict(values=values, errors=errors))\n\t\telse:\n\t\t\treturn Success(value=dict(values=values))",
"def summarize(self):\n self.smalltalk += \"\\n Data IDs in this bundle: \\n\"\n self._files = {}\n inv_dict = {}\n # sort IDs to make sure pdfs are printed in same oder as they were\n # taken\n for k, v in self.stage_summaries.items():\n for qc_id in flatten_list(v):\n inv_dict[qc_id] = k\n sorted_ids = list(flatten_list(self.stage_summaries.values()))\n sorted_ids.sort(key=int)\n # for stage, value in self.stage_summaries.items():\n for qc_run_id in sorted_ids:\n # stage = inv_dict[qc_run_id]\n # if stage[0:7] == 'failed_':\n # stage = stage[7:]\n # try:\n # s = self.comments[qc_run_id]\n # except KeyError:\n # s = ''\n # self.comments[qc_run_id] = 'Classified as poor result.\\n' + s\n ds = Dataset(qc_run_id, self.db_name)\n device_name = ds.device_name\n f_folder = os.path.join(self.db_folder, \"tuning_results\", device_name)\n # for qc_run_id in flatten_list(value):\n self.smalltalk += str(qc_run_id) + \", \"\n\n # filename = stage + '_fit_ds'\n # filename += str(qc_run_id) + '.png'\n filename = os.path.join(f_folder, str(ds.ds.guid) + \".png\")\n\n self._files[str(qc_run_id)] = filename",
"def __call__(self):\n #Losses and optimizers\n for epoch in range(self.nb_epochs): # loop over the dataset multiple times\n self.train_loss = 0.0\n self.gan_loss = 0.0\n self.loss_discrim = 0.0\n val_loss = 0.0\n nb_data = 0.\n nb_data_val = 0.\n for i, data in enumerate(self.trainloader, 0):\n # get the batch; data is a list of [inputs, labels]\n inputs, real = data\n inputs, real = inputs.to(device), real.to(device)\n if i%self.discrimTrainPeriod==0:\n self.trainDiscrim(inputs, real)\n else:\n self.trainGen(inputs, real)\n nb_data += 1.\n #occasionnally save an example target/generated\n if i%self.displayPeriod==0:\n self.gen.eval()\n real = self.unNormalize(real[0,:,:,:].detach().cpu())\n self.transformToImage(real).save(self.targetFile)\n fake = self.gen(inputs)\n fake = self.unNormalize(fake[0,:,:,:].detach().cpu())\n self.transformToImage(fake).save(self.generatedFile)\n\n self.gen.eval()\n for i, data in enumerate(self.valloader, 0):\n with torch.no_grad():\n # get the inputs; data is a list of [inputs, labels]\n inputs, real = data\n inputs, real = inputs.to(device), real.to(device)\n #compute L1 loss\n fake = self.gen(inputs)\n lossGenL1 = self.criterionL1(fake, real)\n #statistics\n val_loss += lossGenL1.item()\n nb_data_val += 1.\n self.gan_loss = self.gan_loss / nb_data\n self.train_loss = self.train_loss / nb_data\n self.loss_discrim = self.loss_discrim / nb_data\n val_loss = val_loss / nb_data_val\n self.gan_loss_list.append(self.gan_loss)\n self.train_loss_list.append(self.train_loss)\n self.val_loss_list.append(val_loss)\n print(\"Epoch \", epoch, \"; train loss = \", self.train_loss,\n \"; val loss = \", val_loss, \"; gan loss = \", self.gan_loss,\n \"; loss discrim = \", self.loss_discrim)\n\n plt.plot(range(len(self.train_loss_list)), self.train_loss_list,\n self.val_loss_list, self.gan_loss_list)\n plt.xlabel(\"Epochs\")\n plt.ylabel(\"Generator Loss\")\n plt.savefig(self.graphFile)\n #save the weights\n torch.save(self.gen.state_dict(), self.savefileGen)\n torch.save(self.discrim.state_dict(), self.savefileDiscrim)",
"def run(self) -> Dict[str, Union[float, str]]:\n try:\n self.is_run = True\n deque(self, maxlen=0) # feed the entire iterator into a zero-length deque\n info = gather_info(\n self.start_time, self.train_collector, self.test_collector,\n self.best_reward, self.best_reward_std\n )\n finally:\n self.is_run = False\n\n return info",
"def run(self, confidence_level: dict) -> None:\n\n # initialize local variables\n static_n = copy(self.static_n)\n final_result = []\n df = pd.DataFrame(columns=self.OUTPUT_COLUMNS)\n df_metrics = pd.DataFrame(columns=self.OUTPUT_COLUMNS_METRICS)\n df_metrics_critical = pd.DataFrame(columns=self.OUTPUT_COLUMNS_METRICS_CRITICAL)\n\n # apply logic main loop\n while static_n <= self.static_n_maximum:\n self.grouped_data.apply(\n lambda grp: self.get_appropriate_subset(static_n, grp, confidence_level, final_result)\n )\n static_n += 1\n\n # create results\n if len(final_result) > 0:\n for idx, row in enumerate(final_result):\n row[OUTLIER_NO] = idx+1\n\n if self.print_debug:\n for row in final_result:\n self.print_to_console(row, confidence_level)\n\n df = pd.DataFrame(final_result)\n df_metrics = df.groupby([SUBSET_SIZE]).count().reset_index()\n df_metrics_critical = self.format_metrics_critical(df)\n\n # save results to files\n self.save_file.run(df[self.OUTPUT_COLUMNS], confidence_level[KEY] + \"_metrics_details\")\n self.save_file.run(df_metrics[self.OUTPUT_COLUMNS_METRICS], confidence_level[KEY] + \"_metrics_summary\")\n self.save_file.run(\n df_metrics_critical[self.OUTPUT_COLUMNS_METRICS_CRITICAL], confidence_level[KEY] + \"_metrics_critical\"\n )"
] |
[
"0.6257934",
"0.53452003",
"0.5278709",
"0.5274121",
"0.52575785",
"0.5248487",
"0.5191664",
"0.5152627",
"0.5093256",
"0.50777346",
"0.5058658",
"0.5053167",
"0.5052041",
"0.50365144",
"0.5028099",
"0.50278026",
"0.49993584",
"0.49851224",
"0.49738514",
"0.49705818",
"0.49490076",
"0.4948183",
"0.4941268",
"0.49362805",
"0.49362805",
"0.49359724",
"0.49349806",
"0.4920659",
"0.48985434",
"0.48979586"
] |
0.66813105
|
0
|
analyse_per_priority() displays the total scratch memory allocations in DM1, DM2 and none and their values per each priority level. Furthermore, it also displays the total number of users per each priority level. The function reads and stores the allocations info from per_prio_data in the first for loop in order to display the total values first. In the second for loop it simply displays the stored values for the scratch memory (total, DM1, DM2, none) for each priority. [in] self Pointer to the current object
|
def analyse_per_priority(self):
dm1_total = 0
dm2_total = 0
none_total = 0
dm1_size = []
dm2_size = []
none_size = []
users = []
pdd = self.chipdata.get_var_strict('$_per_prio_data')
num_entries = len(pdd.members)
for current in range(0, num_entries):
dm1_size.append(
pdd.members[current].get_member('alloc_info_dm1')
.get_member('alloc_size').value
)
dm2_size.append(
pdd.members[current].get_member('alloc_info_dm2')
.get_member('alloc_size').value
)
none_size.append(
pdd.members[current].get_member('alloc_info_none')
.get_member('alloc_size').value
)
users.append(pdd.members[current].get_member('refcount').value)
dm1_total = dm1_total + dm1_size[current]
dm2_total = dm2_total + dm2_size[current]
none_total = none_total + none_size[current]
total_mem = dm1_total + dm2_total + none_total
self.formatter.output('Total scratch memory used: ' + str(total_mem))
self.formatter.output('DM1 total: ' + str(dm1_total))
self.formatter.output('DM2 total: ' + str(dm2_total))
self.formatter.output('none total: ' + str(none_total))
for current in range(0, num_entries):
mem = dm1_size[current] + dm2_size[current] + none_size[current]
self.formatter.output(
'For priority ' + str(current) + ' the memory allocated is ' +
str(mem) + ' and the total no of users is ' +
str(users[current])
)
self.formatter.output('DM1 ' + str(dm1_size[current]))
self.formatter.output('DM2 ' + str(dm2_size[current]))
self.formatter.output('none ' + str(none_size[current]))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def run_all(self):\n self.formatter.section_start('Scratch Memory Info')\n self.formatter.section_start('Per priority')\n self.analyse_per_priority()\n self.formatter.section_end()\n self.formatter.section_start('Per task')\n self.analyse_per_task()\n self.formatter.section_end()\n self.formatter.section_end()",
"def gather_info_and_display():\n # Obtain total rss displayed in memory.stat for each group,\n # container and service.\n try:\n output_mem = pipe_command(GREP_CMD, AWK_CMD, cwd=MEMPATH)\n LOG.debug(\n 'command: %s\\n%s',\n \"grep -rs total_rss '/sys/fs/cgroup/memory/' \"\n \"| awk '$2>0{print$0}' \",\n output_mem)\n except subprocess.CalledProcessError as error:\n LOG.error('Could not get total_rss memory, error=%s', error)\n return 1\n\n mem_info = get_meminfo()\n pt_groups = gather_groups_memory(output_mem)\n pt_cont = gather_containers_memory(output_mem)\n pt_serv = sys_service_memory()\n\n # Dump the tables out\n print('\\nPer groups memory usage:')\n\n # Get string to be printed and create list of elements separated by \\n\n list_of_table_lines = pt_groups.get_string().split('\\n')\n\n # Use the first line (+---+-- ...) as horizontal rule to insert later\n horizontal_line = list_of_table_lines[0]\n\n # Print the table, except last two lines ( \"Total\" row + final separator).\n print(\"\\n\".join(list_of_table_lines[:-2]))\n # Print separator, and finally the \"Total\" row.\n print(horizontal_line)\n print(\"\\n\".join(list_of_table_lines[-2:]))\n\n pt_namespc = prettytable.PrettyTable(\n ['Namespace',\n 'Resident Set Size (MiB)',\n ], caching=False)\n pt_namespc.align = 'l'\n pt_namespc.align['Resident Set Size (MiB)'] = 'r'\n\n print('\\nPer namespace memory usage:')\n for n_s in MEMORY['namespaces']:\n pt_namespc.add_row(\n [n_s,\n MEMORY['namespaces'][n_s],\n ])\n print(pt_namespc)\n\n print('\\nPer container memory usage:')\n print(pt_cont)\n\n print('\\nPer service memory usage:')\n print(pt_serv)\n\n base_mebib = 0.0\n k8s_system = 0.0\n k8s_addon = 0.0\n platform_memory_percent = 0.0\n\n # Calculate base memory usage (i.e., normal memory, exclude K8S and VMs)\n # e.g., docker, system.slice, user.slice\n for group in MEMORY['cgroups']:\n if group in BASE_GROUPS:\n base_mebib += float(MEMORY['cgroups'][group])\n\n # K8S platform system usage (essential) and addons usage (non-essential)\n for n_s in MEMORY['namespaces']:\n if n_s in K8S_NAMESPACE_SYSTEM:\n k8s_system += MEMORY['namespaces'][n_s]\n elif n_s in K8S_NAMESPACE_ADDON:\n k8s_addon += MEMORY['namespaces'][n_s]\n\n # Calculate platform memory usage\n platform_mebib = base_mebib + k8s_system\n\n anon_mebib = float(mem_to_mebibytes(\n mem_info['Active(anon)'] + mem_info['Inactive(anon)'])) * KBYTE\n avail_mebib = float(mem_to_mebibytes(\n mem_info['MemAvailable'])) * KBYTE\n total_mebib = float(anon_mebib + avail_mebib)\n\n anon_percent = py2_round(100 * anon_mebib / total_mebib, DECIMAL_DIGITS) # pylint: disable=W1619\n\n reserved_mebib = get_platform_reserved_memory()\n # Calculate platform memory in terms of percent reserved\n if reserved_mebib > 0.0:\n platform_memory_percent = py2_round(\n 100 * platform_mebib / reserved_mebib, DECIMAL_DIGITS) # pylint: disable=W1619\n\n pt_platf = prettytable.PrettyTable(\n ['Reserved',\n 'Platform',\n 'Base',\n 'K8s Platform system',\n 'k8s-addon'\n ], caching=False)\n pt_platf.align = 'l'\n\n pt_platf.add_row(\n [reserved_mebib,\n '{} ({}%)'.format(platform_mebib, platform_memory_percent),\n base_mebib,\n k8s_system,\n k8s_addon\n ])\n print('\\nPlatform memory usage in MiB:')\n print(pt_platf)\n\n pt_4k = prettytable.PrettyTable(\n ['Anon',\n 'Cgroup-rss',\n 'Available',\n 'Total'\n ], caching=False)\n pt_4k.align = 'l'\n\n pt_4k.add_row(\n ['{} ({}%)'.format(anon_mebib, anon_percent),\n MEMORY['cgroups']['total_rss'],\n avail_mebib,\n total_mebib\n ])\n\n print('\\n4K memory usage in MiB:')\n print(pt_4k)\n\n return 0",
"def compute_metrics(self):\n overall_ret = OrderedDict()\n for ap_iou_thresh in self.ap_iou_thresh:\n ret_dict = OrderedDict()\n rec, prec, ap = eval_det_multiprocessing(self.pred_map_cls, self.gt_map_cls, ovthresh=ap_iou_thresh)\n for key in sorted(ap.keys()):\n clsname = self.class2type_map[key] if self.class2type_map else str(key)\n ret_dict[\"%s Average Precision\" % (clsname)] = ap[key]\n ap_vals = np.array(list(ap.values()), dtype=np.float32)\n ap_vals[np.isnan(ap_vals)] = 0\n ret_dict[\"mAP\"] = ap_vals.mean()\n rec_list = []\n for key in sorted(ap.keys()):\n clsname = self.class2type_map[key] if self.class2type_map else str(key)\n try:\n ret_dict[\"%s Recall\" % (clsname)] = rec[key][-1]\n rec_list.append(rec[key][-1])\n except:\n ret_dict[\"%s Recall\" % (clsname)] = 0\n rec_list.append(0)\n ret_dict[\"AR\"] = np.mean(rec_list)\n overall_ret[ap_iou_thresh] = ret_dict\n return overall_ret",
"def update_stats(self):\n self.stats = []\n for index in range(self.priority_list.count()):\n self.stats.append(self.priority_list.item(index).text())",
"def do_stats(self, args):\n total_cpu = free_cpu = in_use_cpu = 0\n\n summary = self._qm.get_all_host_summary()\n for host_id, host_info in summary.viewitems():\n host_cpu = int(host_info['total cores'])\n total_cpu += host_cpu\n locked = host_info.get('locked by')\n if locked:\n # If host is locked then all CPUs are in use.\n in_use_cpu += host_cpu\n else:\n free_host_cpu = int(host_info['free cores'])\n in_use_cpu += (host_cpu - free_host_cpu)\n free_cpu += free_host_cpu\n\n print('total CPU: ', total_cpu)\n print('used/locked CPU: ', in_use_cpu)\n print('free CPU: ', free_cpu)\n capacity = float(in_use_cpu) / float(total_cpu)\n print('capacity used: %.1f%%' % (capacity * 100,))\n capacity = float(free_cpu) / float(total_cpu)\n print('capacity remaining: %.1f%%' % (capacity * 100,))",
"def calc_resources(self):\n self.popula = self.energy = self.popula_used = self.energy_used = 0\n self.cnt_public = self.cnt_shop = self.cnt_1 = self.cnt_2 = self.cnt_3 = self.cnt_4 = self.cnt_5 = self.cnt_office = 0\n self.popula += self.extra_pop\n for i in range(20):\n b = self.b[i]\n if b == 'T':\n self.popula += self.f[i] * 2\n self.energy_used += 1\n elif b == 'O':\n self.popula_used += 1\n self.energy_used += 1\n self.cnt_office += self.f[i]\n elif b == 'U':\n self.popula_used += 1\n self.cnt_public += 1\n elif b == 'S':\n self.energy_used += 1\n self.cnt_shop += 1\n elif b == '1':\n self.popula += 1\n self.energy += 1\n self.popula_used += 1\n self.cnt_1 += 1\n elif b == '2':\n self.popula_used += 1\n self.cnt_2 += 1\n elif b == '3':\n self.popula_used += 1\n self.cnt_3 += 1\n elif b == '4':\n self.popula += 2\n self.popula_used += 1\n self.cnt_4 += 1\n elif b == '5':\n self.energy += 2\n self.popula_used += 1\n self.cnt_5 += 1\n elif b == 'A':\n self.energy += 2\n self.popula_used += 1\n elif b == 'F':\n self.energy += 3\n self.popula_used += 1\n elif b == 'G':\n self.popula += 1\n if 'tvst' in args.exp:\n self.popula += self.cnt_shop\n if 'ward' in args.exp:\n self.popula += 3\n if 'elec' in args.exp:\n self.energy += 3\n if 'capi' in args.exp:\n self.popula_used += 2\n if 'fire' in args.exp:\n self.popula_used += 1\n if 'park' in args.exp:\n self.popula_used += 1",
"def stats_compute(self, *args, **kwargs):\n totalElements = 0\n totalKeys = 0\n totalSize = 0\n l_stats = []\n d_report = {}\n str_report = \"\"\n l_range = []\n\n if int(self.verbosityLevel) and self.toConsole():\n l_range = tqdm(sorted(self.d_inputTreeCallback.items(),\n key = lambda kv: (kv[1]['diskUsage_raw']),\n reverse = self.b_statsReverse),\n desc = ' Processing stats')\n else:\n l_range = sorted(self.d_inputTreeCallback.items(),\n key = lambda kv: (kv[1]['diskUsage_raw']),\n reverse = self.b_statsReverse)\n\n for k, v in l_range:\n try:\n if not self.args['du'] and not self.args['duf']:\n str_report += \"files: %5d│ raw_size: %12d│ human_size: %8s│ dir: %s\\n\" % (\\\n len(self.d_inputTree[k]),\n self.d_inputTreeCallback[k]['diskUsage_raw'],\n self.d_inputTreeCallback[k]['diskUsage_human'],\n k)\n else:\n str_report += '%-10s%s\\n' % (\n self.d_inputTreeCallback[k]['diskUsage_human'], k)\n except:\n pass\n d_report = {\n 'files': len(self.d_inputTree[k]),\n 'diskUsage_raw': self.d_inputTreeCallback[k]['diskUsage_raw'],\n 'diskUsage_human': self.d_inputTreeCallback[k]['diskUsage_human'],\n 'path': k\n }\n l_stats.append(d_report)\n totalElements += len(v)\n totalKeys += 1\n totalSize += self.d_inputTreeCallback[k]['diskUsage_raw']\n str_totalSize_human = self.sizeof_fmt(totalSize)\n return {\n 'status': True,\n 'report': str_report,\n 'dirs': totalKeys,\n 'files': totalElements,\n 'totalSize': totalSize,\n 'totalSize_human': str_totalSize_human,\n 'l_stats': l_stats,\n 'runTime': other.toc()\n }",
"def display_data(data):\n\n index = 0\n for details in data:\n index += 1\n print(\"{5:1}{0}. {1:10} in {2:15} priority {3:>3}\".format(index, *details))",
"def monitor(self):\n procdata = self.collect_userprocs_info()\n now = int(time.time())\n #-------------------\n proclist = []\n for name in procdata:\n mem = procdata[name]['rss']\n pcode = self.DB.get_code(name)\n proclist.append((now, pcode, mem))\n self.DB.add_proc_info(proclist)\n #-------------------\n totmem = psutil.virtual_memory()\n self.DB.add_total_mem_info(now, totmem.used, totmem.available, totmem.free)\n #-------------------\n disk = psutil.disk_usage('/')\n dinfo = {\n \"utime\" : now,\n \"total\" : disk.total,\n \"used\" : disk.used,\n \"free\" : disk.free,\n \"percent\" : disk.percent\n }\n self.DB.add_diskuse_info(dinfo)\n #-------------------\n cpu = json.dumps(psutil.cpu_percent(None, True))\n self.DB.add_total_cpu(now, cpu)\n #-------------------\n net = psutil.net_io_counters()\n ninfo = {\n \"utime\" : now,\n \"brecv\" : net.bytes_recv,\n \"bsent\" : net.bytes_sent,\n \"precv\" : net.packets_recv,\n \"psent\" : net.packets_sent,\n \"errin\" : net.errin,\n \"errin\" : net.errout\n }\n self.DB.add_net_info(ninfo)",
"def collect(self):\n if not self._btime:\n return\n\n try:\n with open(\"/proc/self/stat\", 'rb') as stat:\n parts = (stat.read().split(b')')[-1].split())\n\n self.process_metrics[\"vmem\"].set(\"\", float(parts[20]))\n self.process_metrics[\"rss\"].set(\"\", float(parts[21]) * _PAGESIZE)\n start_time_secs = float(parts[19]) / self._ticks\n self.process_metrics[\"start_time\"].set(\n \"\", start_time_secs + self._btime\n )\n utime = float(parts[11]) / self._ticks\n stime = float(parts[12]) / self._ticks\n self.process_metrics[\"cpu\"].set(\"\", utime + stime)\n except IOError:\n pass\n\n try:\n with open(\"/proc/self/limits\", 'rb') as limits:\n for line in limits:\n if line.startswith(b'Max open file'):\n self.process_metrics[\"max_fds\"].set(\n \"\", float(line.split()[3])\n )\n break\n\n self.process_metrics[\"open_fds\"].set(\n \"\", float(len(os.listdir(\"/proc/self/fd\")))\n )\n except (IOError, OSError):\n pass\n\n # Update gc metrics if enabled.\n if \"collected\" in self.process_metrics:\n for generation, stat in enumerate(gc.get_stats()):\n generation = {\"generation\": str(generation)}\n self.process_metrics[\"collected\"].set(\n generation, stat[\"collected\"]\n )\n self.process_metrics[\"uncollectable\"].set(\n generation, stat[\"uncollectable\"]\n )\n self.process_metrics[\"collections\"].set(\n generation, stat[\"collections\"]\n )",
"def get_scores(self):\n hist = self.confusion_matrix\n # hist = [TN,FP;FN,TP]\n acc = np.diag(hist).sum() / hist.sum()\n acc_cls = np.diag(hist) / hist.sum(axis=1)\n acc_cls = np.nanmean(acc_cls)\n iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))\n # iou = iu.sum() / self.n_classes\n mean_iou = np.nanmean(iu) # if classes = 2: iou = miou\n freq = hist.sum(axis=1) / hist.sum()\n fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()\n cls_iou = dict(zip(range(self.n_classes), iu))\n\n ##############################################\n tn = hist[0, 0]\n tp = np.diag(hist).sum() - tn\n fp = np.triu(hist, 1).sum()\n fn = np.tril(hist, -1).sum()\n precision = tp / (tp + fp)\n recall = tp / (tp + fn)\n f1 = 2 * precision * recall / (precision + recall)\n\n # for medical img, img_seg \\in [0,1]\n dice = 2 * tp / (tp + tp + fn + fp)\n # dice = f1-score\n dsc = 2 * tp / (tp + fn + fp)\n # dsc = jaccard\n # voe = 2 * abs(fp + fn) / (tp + tp + fn + fp)\n # voe = 1 - dsc\n\n k2 = {\n # \"Overall Acc: \\t\": acc,\n 'Mean Acc': float(judge_nan(acc_cls)),\n # \"FreqW Acc : \\t\": fwavacc,\n 'Mean IoU': float(judge_nan(mean_iou)),\n 'F1-score': float(judge_nan(f1)),\n 'DSC': float(judge_nan(dsc)),\n 'Precision': float(judge_nan(precision)),\n 'Recall': float(judge_nan(recall)),\n }\n\n return k2",
"def evaluate_data():\n try:\n # General system related info\n ram = psutil.virtual_memory()\n total_ram = round((ram.total / 1024 / 1024),2)\n free_ram = round((ram.available / 1024 / 1024),2)\n used_ram = round((ram.used / 1024 / 1024),2)\n cpu_total = psutil.cpu_count(logical=True)\n cpu_loadavg = round([x / cpu_total * 100 for x in psutil.getloadavg()][0],2)\n acs_8080 = sp.getoutput(\"netstat -an|grep -c 8080\")\n acs_8181 = sp.getoutput(\"netstat -an|grep -c 8181\")\n acs_8443 = sp.getoutput(\"netstat -an|grep -c 8443\")\n mysql = sp.getoutput(\"netstat -an|grep -c 3306\")\n oracle = sp.getoutput(\"netstat -an|grep -c 1521\")\n logging.info('General system info obtained')\n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)\n # Process specific details\n try:\n iis_pid = SystemInformation.get_pid(\"w3wp.exe\")\n iis_ram = SystemInformation.get_ram_usage(iis_pid)\n iis_cpu = SystemInformation.get_cpu_usage(iis_pid)\n java_pid = SystemInformation.get_pid(\"java.exe\")\n java_ram = SystemInformation.get_ram_usage(java_pid)\n java_cpu = SystemInformation.get_cpu_usage(java_pid)\n mysqld_pid = SystemInformation.get_pid(\"mysqld.exe\")\n mysqld_ram = SystemInformation.get_ram_usage(mysqld_pid) \n mysqld_cpu = SystemInformation.get_cpu_usage(mysqld_pid)\n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)\n\n try:\n dictionary = {}\n now = datetime.datetime.now()\n timestampt = now.strftime(\"%Y-%m-%d-%H:%M:%S\")\n fieldnames = ['timestampt','total_ram','free_ram','used_ram','cpu_total','cpu_loadavg','acs_8080','acs_8181','acs_8443','mysql','oracle','iis_ram','iis_cpu','java_ram','java_cpu','mysqld_ram','mysqld_cpu']\n for var in fieldnames:\n dictionary[var] = eval(var)\n \n logging.info('Data for report generated')\n return dictionary\n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)",
"def _calc_desc_and_queues(\n total_numa_nodes, total_ports_per_numa, total_rx_queues, ports_per_numa_value\n ):\n\n # Get the number of rx queues\n rx_queues = max(1, total_rx_queues)\n tx_queues = rx_queues * total_numa_nodes + 1\n\n # Get the descriptor entries\n desc_entries = 1024\n ports_per_numa_value[\"rx_queues\"] = rx_queues\n total_mbufs = (\n (rx_queues * desc_entries) + (tx_queues * desc_entries)\n ) * total_ports_per_numa\n\n return total_mbufs",
"def __calculate_statistics(self, candidates):\n pdf = {}\n for candidate in candidates:\n neighbors = list(self.G.neighbors(candidate))\n capacity = sum([self.G.get_edge_data(candidate, n)[\"satoshis\"] for n in neighbors])\n average = capacity / len(neighbors)\n pdf[candidate] = average\n cumsum = sum(pdf.values())\n pdf = {k:v/cumsum for k,v in pdf.items()}\n w = 0.7\n print(\"percentage smoothed percentage capacity numchannels alias\")\n print(\"----------------------------------------------------------------------\")\n res_pdf = {}\n for k,v in pdf.items():\n neighbors = list(self.G.neighbors(k))\n capacity = sum([self.G.get_edge_data(k, n)[\"satoshis\"] for n in neighbors])\n name = k\n if \"alias\" in self.G.node[k]:\n name = self.G.node[k][\"alias\"]\n print(\"{:12.2f} \".format(100*v), \"{:12.2f} \".format(100*(w * v + (1-w)/len(candidates))) ,\"{:10} {:10} \".format( capacity, len(neighbors)), name)\n res_pdf[k] = (w * v + (1-w)/len(candidates))\n return res_pdf",
"def CalculateProcessingCapacity(self, problemManager, mineDataManager):\n \n self.oreProcessed = np.zeros(len(mineDataManager.theMiningSystem.oreMined)) \n self.processingPower = np.zeros(len(mineDataManager.theMiningSystem.oreMined)) \n self.processingCapacity = mineDataManager.theMiningSystem.mineOreProductionCapacity # ore is processed at a constant rate\n carryOver = 0.0\n for year in range( len(mineDataManager.theMiningSystem.oreMined )-1 ):\n processedOre = carryOver + mineDataManager.theMiningSystem.oreMined[year]\n \n if(processedOre > self.processingCapacity):\n carryOver = processedOre - self.processingCapacity\n processedOre = self.processingCapacity\n else:\n carryOver = 0.0\n self.oreProcessed[year] = processedOre\n \n self.oreProcessed[-1] = carryOver + mineDataManager.theMiningSystem.oreMined[-1] # final year\n \n \n # convert tonnes processed each year to the number of Mwh based on powerlaw fit\n self.processingPower = 3.96*(self.oreProcessed )**0.703 # in Mwh\n \n referenceMetalStr = mineDataManager.theOreBody.type[:2] \n # first two letters of orebody type is assumed to be reference metal for determining processing grade\n # eg AuCu -> gold is reference metal - note that user must select correct method\n \n \n referenceMetalOreConcentration = mineDataManager.theOreBody.metalGrades[referenceMetalStr]\n\n self.concentrateMetalConcentration = 1.0\n \n # lookup concentrateMetalConcentrations based on reference metal type\n \n concentrateConcentrations = {\"Au\":0.75,\"Ag\":0.85,\"Ni\":0.1,\"Cu\":0.25,\"Pb\":0.5}\n \n # find the minimum amount of concentration needed to bring concentrate to market\n minConcentrationFactor = 1e64\n \n for metal,metalOreGrade in mineDataManager.theOreBody.metalGrades.iteritems():\n if metal in concentrateConcentrations:\n concentrateGrade = concentrateConcentrations[metal]\n concFactor = concentrateGrade/(metalOreGrade/(1.0+ mineDataManager.theMiningSystem.dilution) +1e-64)\n if concFactor < 1.0:\n concFactor = 1.0\n #print \"concFactor\", metal, concFactor, metalOreGrade, concentrateGrade\n if(concFactor < minConcentrationFactor ):\n minConcentrationFactor = concFactor\n self.concentrateMetalConcentration = concentrateGrade\n \n # concentrate is calculated based on estimate of mineral content\n self.concentrateProduced = (1.0 - self.processingLoss) \\\n *np.array(mineDataManager.theMiningSystem.oreMined)/minConcentrationFactor \n \n \n return self.processingCapacity",
"def _total_priority(self):\n return self.nodes[0]",
"def analyze(allocs, stackstr, progname, depth, threshold_mallocs, threshold_score):\n if len(allocs) < int(threshold_mallocs):\n # Ignore call sites with too few mallocs\n return []\n analyzed_list = []\n # The set of sizes of allocated objects.\n sizes = set()\n # A histogram of the # of objects allocated of each size.\n size_histogram = defaultdict(int)\n # mallocs - frees (of things allocated in this context)\n actual_footprint = 0\n # max actual_footprint\n peak_footprint = 0\n # index of alloc w/max footprint\n peak_footprint_index = 0\n # sum(mallocs) = the amount of memory used if frees were ignored\n nofree_footprint = 0\n # set of all thread ids used for malloc/free\n tids = set()\n # set of all (currently) allocated objects from this site\n mallocs = set()\n # total number of allocations\n num_allocs = 0\n # was size ever invoked? true iff size was invoked\n size_taken = False\n # true iff all size requests were properly aligned\n all_aligned = True\n # amount of space that would leak if frees were ignored\n would_leak = 0\n for (index, i) in enumerate(allocs):\n # If a size was taken, record this fact and continue.\n if i[\"action\"] == \"S\":\n size_taken = True\n continue\n if len(i[\"stack\"]) < depth:\n continue\n sizes.add(i[\"size\"])\n size_histogram[i[\"size\"]] += 1\n tids.add(i[\"tid\"])\n if i[\"action\"] == \"M\":\n if i[\"reqsize\"] == 0 or i[\"reqsize\"] % 16 != 0:\n # if all_aligned:\n # print(\"FIXME first reqsize not aligned: \" + str(i[\"reqsize\"]))\n all_aligned = False\n num_allocs += 1\n # Compute actual footprint (taking into account mallocs and frees).\n actual_footprint += i[\"size\"]\n if actual_footprint > peak_footprint:\n peak_footprint = actual_footprint\n peak_footprint_index = index\n # Compute total 'no-free' memory footprint (excluding frees) This\n # is how much memory would be consumed if we didn't free anything\n # until the end (as with regions/arenas). We use this to compute a\n # \"region score\" later.\n nofree_footprint += i[\"size\"]\n # Record the malloc so we can check it when freed.\n mallocs.add(i[\"address\"])\n elif i[\"action\"] == \"F\":\n if i[\"address\"] in mallocs:\n # Only reclaim memory that we have already allocated\n # (others are frees to other call sites).\n actual_footprint -= i[\"size\"]\n mallocs.remove(i[\"address\"])\n else:\n would_leak += i[\"size\"]\n # print(mallocs)\n # print(str(i[\"address\"]) + \" not found\")\n # Compute region_score (0 is worst, 1 is best - for region replacement).\n region_score = 0\n if nofree_footprint != 0:\n region_score = peak_footprint / nofree_footprint\n if region_score >= float(threshold_score):\n stk = eval(stackstr)\n output = {\n \"stack\": stk,\n \"allocs\": num_allocs,\n \"region_score\": region_score,\n \"threads\": tids,\n \"sizes\": sizes,\n \"size_histogram\": size_histogram,\n \"peak_footprint\": peak_footprint,\n \"nofree_footprint\": nofree_footprint,\n \"potential_leaks\": would_leak,\n \"size_taken\": size_taken,\n \"all_aligned\": all_aligned,\n }\n analyzed_list.append(output)\n return analyzed_list",
"def _process(self, data: np.ndarray) -> np.ndarray:\n\n # Step 1. Reorder the data.\n memory = self._reorder(data)\n\n # Step 2. Do the restless classification into counts.\n counts = [defaultdict(int) for _ in range(self._n_circuits)]\n prev_shot = \"0\" * self._num_qubits\n header = {\"memory_slots\": self._num_qubits}\n\n for idx, shot in enumerate(memory):\n shot = format_counts_memory(shot, header)\n\n restless_adjusted_shot = RestlessToCounts._restless_classify(shot, prev_shot)\n\n circuit_idx = idx % self._n_circuits\n\n counts[circuit_idx][restless_adjusted_shot] += 1\n\n prev_shot = shot\n\n return np.array([dict(counts_dict) for counts_dict in counts])",
"def detect_nash_balancing_profil(dico_profs_Vis_Perf_t, \r\n arr_pl_M_T_vars_modif, t):\r\n nash_profils = list()\r\n dico_nash_profils = dict()\r\n cpt_nash = 0\r\n for key_modes_prof, dico_Vi_Pref_t in dico_profs_Vis_Perf_t.items():\r\n cpt_players_stables = 0\r\n dico_profils = dict()\r\n for num_pl_i, mode_i in enumerate(key_modes_prof): # 0 <= num_pl_i < m_player \r\n Vi, ben_i, cst_i = dico_Vi_Pref_t[fct_aux.RACINE_PLAYER\\\r\n +\"_\"+str(num_pl_i)]\r\n state_i = arr_pl_M_T_vars_modif[num_pl_i, t, \r\n fct_aux.AUTOMATE_INDEX_ATTRS[\"state_i\"]]\r\n gamma_i = arr_pl_M_T_vars_modif[num_pl_i, t, \r\n fct_aux.AUTOMATE_INDEX_ATTRS[\"gamma_i\"]]\r\n setx = arr_pl_M_T_vars_modif[num_pl_i, t, \r\n fct_aux.AUTOMATE_INDEX_ATTRS[\"set\"]]\r\n prod_i = arr_pl_M_T_vars_modif[num_pl_i, t, \r\n fct_aux.AUTOMATE_INDEX_ATTRS[\"prod_i\"]]\r\n cons_i = arr_pl_M_T_vars_modif[num_pl_i, t, \r\n fct_aux.AUTOMATE_INDEX_ATTRS[\"cons_i\"]]\r\n r_i = arr_pl_M_T_vars_modif[num_pl_i, t, \r\n fct_aux.AUTOMATE_INDEX_ATTRS[\"r_i\"]]\r\n mode_i_bar = None\r\n mode_i_bar = fct_aux.find_out_opposite_mode(state_i, mode_i)\r\n new_key_modes_prof = list(key_modes_prof)\r\n new_key_modes_prof[num_pl_i] = mode_i_bar\r\n new_key_modes_prof = tuple(new_key_modes_prof)\r\n \r\n Vi_bar = None\r\n Vi_bar, ben_i_bar, cst_i_bar \\\r\n = dico_profs_Vis_Perf_t[new_key_modes_prof]\\\r\n [fct_aux.RACINE_PLAYER+\"_\"+str(num_pl_i)]\r\n if Vi >= Vi_bar:\r\n cpt_players_stables += 1\r\n dico_profils[fct_aux.RACINE_PLAYER+\"_\"+str(num_pl_i)+\"_t_\"+str(t)] \\\r\n = {\"set\":setx, \"state\":state_i, \"mode_i\":mode_i, \"Vi\":Vi, \r\n \"gamma_i\":gamma_i, \"prod\":prod_i, \"cons\":cons_i, \"r_i\":r_i,\r\n \"ben\":ben_i, \"cst\":cst_i}\r\n \r\n dico_profils[\"mode_profil\"] = key_modes_prof \r\n \r\n if cpt_players_stables == len(key_modes_prof):\r\n nash_profils.append(key_modes_prof)\r\n Perf_t = dico_profs_Vis_Perf_t[key_modes_prof][\"Perf_t\"]\r\n dico_profils[\"Perf_t\"] = Perf_t\r\n dico_nash_profils[\"NASH_\"+str(cpt_nash)] = (dico_profils)\r\n cpt_nash += 1\r\n \r\n return nash_profils, dico_nash_profils",
"def _per_supercls_summarize(self):\n\n per_super_class_result = {}\n superCats = self.FPParams.catSuperClsName\n\n for superCatId, superCat in enumerate(superCats):\n superCatKey = \"supercls-\" + superCat\n\n resultDet = self._summarize_with_cat(\n f1=False, catSuperIdx=superCatId)\n per_super_class_result[superCatKey] = {\"iou\": resultDet}\n if superCatId != 1:\n results = self._summarize_with_cat(catSuperIdx=superCatId)\n resultF1 = self._summarize_with_cat(\n iou=False, catSuperIdx=superCatId)\n\n per_super_class_result[superCatKey][\"f1\"] = resultF1\n per_super_class_result[superCatKey][\"iou + f1\"] = results\n\n return per_super_class_result",
"def _prepare_score_metrics(self, local_range=5, axis_infos=Axis3D_infos):\n if self.verbose:\n print(f\"- Calculate scoring metrics\")\n self.chr_2_metrics = {}\n if not hasattr(self, 'chr_2_cand_hzxys') or not hasattr(self, 'chr_2_cand_ids'):\n _chr_2_cand_hzxys = {}\n _chr_2_cand_ids = {}\n\n for _chr_name, _chr_centers in self.chr_2_homolog_centers.items():\n if hasattr(self, 'chr_2_cand_hzxys') and hasattr(self, 'chr_2_cand_ids') :\n _chr_hzxys = self.chr_2_cand_hzxys[_chr_name]\n _chr_ids = self.chr_2_cand_ids[_chr_name]\n else:\n # get coordinates\n _chr_coords_df = self.merged_coords.loc[self.merged_coords['chr']==str(_chr_name)]\n _chr_hzxys = _chr_coords_df[['center_intensity']+[f\"center_{_x}\" for _x in axis_infos]].values\n _chr_ids = _chr_coords_df['chr_order'].values\n _chr_2_cand_hzxys[_chr_name] = _chr_hzxys\n _chr_2_cand_ids[_chr_name] = _chr_ids\n # calculate metrics\n if hasattr(self, 'chr_2_homolog_hzxys_list'):\n _ref_hzxys_list = self.chr_2_homolog_hzxys_list.get(_chr_name, None)\n else:\n _ref_hzxys_list = None\n self.chr_2_metrics[_chr_name] = prepare_score_metrics_by_chr(\n _chr_hzxys, _chr_ids, _chr_centers, \n prev_homolog_hzxys=_ref_hzxys_list, \n local_range=local_range)\n # add this attribute if not given previously\n if not hasattr(self, 'chr_2_cand_hzxys') or not hasattr(self, 'chr_2_cand_ids'):\n self.chr_2_cand_hzxys = _chr_2_cand_hzxys\n self.chr_2_cand_ids = _chr_2_cand_ids\n return",
"def __manage_tree(self):\n for pre, fill, node in RenderTree(self.tree):\n if node.name is 'count':\n logger.info(\n \"Tree info %s%s: %s %s p/s attack: %s\",\n pre, node.name, node.value, node.pps, node.attack)\n else:\n logger.info(\"Pre - [%s], Fill - [%s], Node - [%s]\",\n pre, fill, node.name)",
"def summary(self, parent):\n\t\taccuracy_item = QTreeWidgetItem(parent)\n\t\taccuracy_item.setText(0, 'Accuracy')\n\t\taccuracy_item.setText(1, str(self.accuracy()))\n\n\t\tprecision_item = QTreeWidgetItem(parent)\n\t\tprecision_item.setText(0, 'Precision')\n\t\tprecision_item.setText(1, str(self.precision()))\n\t\tclass_precision = self.precision('classwise')\n\t\tself.class_summary(precision_item, class_precision)\n\n\t\trecall_item = QTreeWidgetItem(parent)\n\t\trecall_item.setText(0, 'Recall')\n\t\trecall_item.setText(1, str(self.recall()))\n\t\tclass_recall = self.recall('classwise')\n\t\tself.class_summary(recall_item, class_recall)\n\n\t\tf1_item = QTreeWidgetItem(parent)\n\t\tf1_item.setText(0, 'F1')\n\t\tf1_item.setText(1, str(self.f1()))\n\t\tclass_f1 = self.f1('classwise')\n\t\tself.class_summary(f1_item, class_f1)\n\n\t\tauroc_item = QTreeWidgetItem(parent)\n\t\tauroc_item.setText(0, 'AUROC')\n\t\tauroc_item.setText(1, str(self.auroc()))\n\t\tclass_auroc = self.auroc('classwise')\n\t\tself.class_summary(auroc_item, class_auroc)\n\n\t\tauprc_item = QTreeWidgetItem(parent)\n\t\tauprc_item.setText(0, 'AUPRC')\n\t\tauprc_item.setText(1, str(self.auprc()))\n\t\tclass_auprc = self.auprc('classwise')\n\t\tself.class_summary(auprc_item, class_auprc)",
"def _process(self, data: np.ndarray) -> np.ndarray:\n probabilities = np.empty(data.size, dtype=object)\n\n for idx, counts_dict in enumerate(data):\n shots = sum(counts_dict.values())\n freq = counts_dict.get(self._outcome, 0)\n alpha_posterior = [freq + self._alpha_prior[0], shots - freq + self._alpha_prior[1]]\n alpha_sum = sum(alpha_posterior)\n\n p_mean = alpha_posterior[0] / alpha_sum\n p_var = p_mean * (1 - p_mean) / (alpha_sum + 1)\n\n probabilities[idx] = ufloat(nominal_value=p_mean, std_dev=np.sqrt(p_var))\n\n return probabilities",
"def stats(self):\n\t\t\n\t\tx = [self.authorities[i] for i in self.authorities if self.authorities[i] != 1]\n\t\txx = sorted(x)\n\t\tl = float(len(xx))\n\t\tprint \"-----------\"\n\t\tprint \"Population : \" + str(l)\n\t\tprint \"-----------\"\n\t\tprint \"Q1 = \" + str(xx[int(l/4)])\n\t\tprint \"Q3 = \" + str(xx[int(float(l/4)*3)])\n\t\tprint \"-----------\"\n\t\tprint \"01/08 = \" + str(xx[int(l/8)])\n\t\tprint \"07/08 = \" + str(xx[int(float(l/8)*7)])\n\t\tprint \"-----------\"\n\t\tprint \"01/16 = \" + str(xx[int(l/16)])\n\t\tprint \"15/16 = \" + str(xx[int(float(l/16)*15)])\n\t\tprint \"-----------\"\n\t\tprint \"01/32 = \" + str(xx[int(l/32)])\n\t\tprint \"31/32 = \" + str(xx[int(float(l/32)*31)])\n\t\tprint \"-----------\"\n\t\tprint \"01/64 = \" + str(xx[int(l/64)])\n\t\tprint \"63/64 = \" + str(xx[int(float(l/64)*63)])\n\t\tprint \"-----------\"\n\t\tprint \"01/128 = \" + str(xx[int(l/128)])\n\t\tprint \"127/128 = \" + str(xx[int(float(l/128)*127)])\n\t\tprint \"-----------\"\n\t\tprint \"01/256 = \" + str(xx[int(l/256)])\n\t\tprint \"255/256 = \" + str(xx[int(float(l/256)*255)])\n\t\tprint \"-----------\"\n\t\tprint \"01/512 = \" + str(xx[int(l/512)])\n\t\tprint \"511/512 = \" + str(xx[int(float(l/512)*511)])\n\t\tprint \"-----------\"",
"def scan_attrs(self, prob):\r\n \r\n # Keep track of the names of objects in the current problem\r\n # (useful to determine if attributes are referring to other objects)\r\n object_names = []\r\n for fig in prob['figures'].values():\r\n for object_name in fig.keys():\r\n if not object_name in object_names:\r\n object_names.append(object_name)\r\n \r\n if not 'attributes' in self.kb:\r\n self.kb['attributes'] = {}\r\n \r\n attrs = self.kb['attributes']\r\n \r\n # process the attributes in the current problem\r\n for fig in prob['figures'].values():\r\n for obj in fig.values():\r\n for attr, subvalues in obj.items():\r\n if not attr in attrs:\r\n attrs[attr] = {'values': [],\r\n 'relative': 'unknown',\r\n 'multi': 'unknown',\r\n 'count': 0}\r\n data = attrs[attr]\r\n \r\n data['count'] += 1\r\n \r\n if data['multi'] == 'unknown':\r\n if len(subvalues) > 1:\r\n data['multi'] = 'yes'\r\n else:\r\n data['multi'] = 'no'\r\n else:\r\n if len(subvalues) > 1 and data['multi'] == 'no':\r\n data['multi'] = 'sometimes'\r\n elif len(subvalues) == 1 and data['multi'] == 'yes':\r\n data['multi'] = 'sometimes'\r\n \r\n # process each subvalue\r\n values = data['values']\r\n for subvalue in subvalues:\r\n # check to see if this attr refers to other objects\r\n relative = False\r\n if subvalue in object_names:\r\n relative = True\r\n if data['relative'] == 'unknown':\r\n data['relative'] = 'yes'\r\n elif data['relative' ] == 'no':\r\n data['relative'] = 'sometimes'\r\n else:\r\n if data['relative'] == 'unknown':\r\n data['relative'] = 'no'\r\n elif data['relative'] == 'yes':\r\n data['relative'] = 'sometimes'\r\n \r\n # add this to the seen values if it isn't already\r\n # in there and it isn't a relative value\r\n if not relative and not subvalue in values:\r\n values.append(subvalue)\r\n \r\n # update the kb's attribute priorities based upon frequency of encounters\r\n \r\n sorted_attrs = sorted(attrs.items(), key=lambda attr: attr[1]['count'], reverse=True)\r\n priorities = self.kb['attribute_priorities'] = []\r\n for attr in sorted_attrs:\r\n priorities.append(attr[0])",
"def compute_and_print_eval_metrics(self):\n s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', '[email protected]', '[email protected]:.95')\n precision, recall, f1, mean_precision, mean_recall, map50, map = 0., 0., 0., 0., 0., 0., 0.\n ap = []\n eval_stats = [np.concatenate(x, 0) for x in zip(*self.eval_stats)]\n if len(eval_stats) and eval_stats[0].any():\n precision, recall, ap, f1, ap_class = ap_per_class(*eval_stats)\n precision, recall, ap50, ap = precision[:, 0], recall[:, 0], ap[:, 0], ap.mean(1)\n mean_precision, mean_recall, map50, map = precision.mean(), recall.mean(), ap50.mean(), ap.mean()\n nt = np.bincount(eval_stats[3].astype(np.int64), minlength=len(self.class_names)) # number of targets per class\n else:\n nt = np.zeros(1)\n\n pf = '%20s' + '%12.5g' * 6 # print format\n print(\"\\n EVALUTAION \\n\")\n print(s)\n print(pf % ('all', self.seen, nt.sum(), mean_precision, mean_recall, map50, map))\n if self.cfg.eval.verbose:\n for indx, cls in enumerate(ap_class):\n print(pf % (self.class_names[cls], self.seen, nt[cls], precision[indx], recall[indx], ap50[indx], ap[indx]))",
"def compute_statistics(self):",
"def organise_scans(self):\n self.wh_to_th = {}\n self.th_to_wh = {}\n\n wh_to_th_metrics = []\n th_to_wh_metrics = []\n wh_to_th_params = {}\n th_to_wh_params = {}\n wh_to_th_minim_info = {}\n th_to_wh_minim_info = {}\n wh_to_th_minim_info['time'] = []\n wh_to_th_minim_info['iterations'] = []\n wh_to_th_minim_info['funcevals'] = []\n wh_to_th_minim_info['status'] = []\n th_to_wh_minim_info['time'] = []\n th_to_wh_minim_info['iterations'] = []\n th_to_wh_minim_info['funcevals'] = []\n th_to_wh_minim_info['status'] = []\n\n for injparam in sorted(self.data_sets.keys()):\n injlabels = self.labels[injparam].dict\n for injkey in self.data_sets[injparam].keys():\n h0_metric_val = self.data_sets[injparam][injkey][\n 'h0_fit_to_toy_%s_asimov'\n %(injlabels['data_name'])]['metric_val']\n h1_metric_val = self.data_sets[injparam][injkey][\n 'h1_fit_to_toy_%s_asimov'\n %(injlabels['data_name'])]['metric_val']\n if h1_metric_val > h0_metric_val:\n bestfit = 'h0'\n altfit = 'h1'\n else:\n bestfit = 'h1'\n altfit = 'h0'\n\n wh_to_th_fit = self.data_sets[injparam][injkey][\n '%s_fit_to_%s_fid'%(altfit, bestfit)]['fid_asimov']\n th_to_wh_fit = self.data_sets[injparam][injkey][\n '%s_fit_to_%s_fid'%(bestfit, altfit)]['fid_asimov']\n\n wh_to_th_metrics.append(wh_to_th_fit['metric_val'])\n th_to_wh_metrics.append(th_to_wh_fit['metric_val'])\n\n for systkey in wh_to_th_fit['params'].keys():\n if systkey not in wh_to_th_params.keys():\n wh_to_th_params[systkey] = []\n wh_to_th_params[systkey].append(\n wh_to_th_fit['params'][systkey]\n )\n for systkey in th_to_wh_fit['params'].keys():\n if systkey not in th_to_wh_params.keys():\n th_to_wh_params[systkey] = []\n th_to_wh_params[systkey].append(\n th_to_wh_fit['params'][systkey]\n )\n\n wh_to_th_minim_info['time'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(altfit, bestfit)\n ]['fid_asimov']['minimizer_time'])\n wh_to_th_minim_info['iterations'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(altfit, bestfit)\n ]['fid_asimov']['minimizer_metadata']['nit'])\n wh_to_th_minim_info['funcevals'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(altfit, bestfit)\n ]['fid_asimov']['minimizer_metadata']['nfev'])\n wh_to_th_minim_info['status'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(altfit, bestfit)\n ]['fid_asimov']['minimizer_metadata']['status'])\n \n th_to_wh_minim_info['time'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(bestfit, altfit)\n ]['fid_asimov']['minimizer_time'])\n th_to_wh_minim_info['iterations'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(bestfit, altfit)\n ]['fid_asimov']['minimizer_metadata']['nit'])\n th_to_wh_minim_info['funcevals'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(bestfit, altfit)\n ]['fid_asimov']['minimizer_metadata']['nfev'])\n th_to_wh_minim_info['status'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(bestfit, altfit)\n ]['fid_asimov']['minimizer_metadata']['status'])\n\n wh_to_th_params['bestfit'] = bestfit\n wh_to_th_params['altfit'] = altfit\n th_to_wh_params['bestfit'] = bestfit\n th_to_wh_params['altfit'] = altfit\n\n self.wh_to_th['metrics'] = wh_to_th_metrics\n self.th_to_wh['metrics'] = th_to_wh_metrics\n self.wh_to_th['params'] = wh_to_th_params\n self.th_to_wh['params'] = th_to_wh_params\n self.wh_to_th['minim_info'] = wh_to_th_minim_info\n self.th_to_wh['minim_info'] = th_to_wh_minim_info",
"def _get_thresholds(self, data):\n all_thre = {}\n all_thre['value_max_length'] = 0\n all_thre['info_max_length'] = 0\n all_thre['max_pos_value'] = 0\n all_thre['min_neg_value'] = 0\n\n if self.max_value is not None:\n all_thre['max_pos_value'] = self.max_value\n\n # Iterate on all the items\n for (info, value, color) in data:\n totalvalue_len = 0\n\n # If we have a list of values for the item\n if isinstance(value, collections.Iterable):\n icount = 0\n maxvalue = 0\n minvalue = 0\n for (ivalue, icolor) in value:\n if ivalue < minvalue:\n minvalue = ivalue\n if ivalue > maxvalue:\n maxvalue = ivalue\n # if we are in multivalued mode, the value string is\n # the concatenation of the values, separeted by a ',',\n # len() must be computed on it\n # if we are not in multivalued mode, len() is just the\n # longer str(value) len ( /!\\, value can be negative,\n # which means that it's not simply len(str(max_value)))\n if self.multivalue:\n totalvalue_len += len(\",\" + self._trans_hr(ivalue))\n else:\n totalvalue_len = max(totalvalue_len, len(self._trans_hr(ivalue)))\n\n if self.multivalue:\n # remove one comma if multivalues\n totalvalue_len = totalvalue_len - 1\n\n # If the item only has one value\n else:\n totalvalue_len = len(self._trans_hr(value))\n maxvalue = value\n minvalue = value\n\n if minvalue < all_thre['min_neg_value']:\n all_thre['min_neg_value'] = minvalue\n\n if maxvalue > all_thre['max_pos_value']:\n all_thre['max_pos_value'] = maxvalue\n\n if self._len_noansi(info) > all_thre['info_max_length']:\n all_thre['info_max_length'] = self._len_noansi(info)\n\n if totalvalue_len > all_thre['value_max_length']:\n all_thre['value_max_length'] = totalvalue_len\n\n return all_thre"
] |
[
"0.53971434",
"0.5170996",
"0.5162139",
"0.5098022",
"0.50932366",
"0.5056469",
"0.5031619",
"0.49695593",
"0.4948778",
"0.48987287",
"0.48891315",
"0.4879162",
"0.486115",
"0.4814659",
"0.47854146",
"0.47718522",
"0.47487068",
"0.47278893",
"0.47230425",
"0.4713487",
"0.47107187",
"0.46939978",
"0.46921515",
"0.46777204",
"0.46756873",
"0.46624762",
"0.46516404",
"0.46345916",
"0.4620434",
"0.46190065"
] |
0.8804243
|
0
|
analyse_per_task() displays the scratch memory info per task. It can take one parameter that represents the task id and it will display the info for it. The function goes through the linked list and if no value is passed to it, it displays the complete list. Otherwise, it displays the info for the task with the id inputted or a message (if the task id is invalid). [in] self Pointer to the current object [in] task_id = None
|
def analyse_per_task(self, task_id=None):
per_task = self.chipdata.cast(
self.chipdata.get_var_strict('$_first_scratch_mem').address,
'scratch_per_task_data'
)
matching_id = False
for sc_table in self.parse_linked_list(per_task.address, 'next'):
if (task_id is None) or (sc_table.value[0] is task_id):
self.formatter.output(str(sc_table))
matching_id = True
if (task_id is not None) and (not matching_id):
self.formatter.output(
'There is no task id = ' + str(cu.hex(task_id)) + '!'
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _show_task(task, depth=0):\n indent = \" \"*depth\n # get people associated with this task\n people = query_with_results(\"select person.name from (person inner join task_person_pair on person.id = task_person_pair.person) where task_person_pair.task = ?\", [task[0]])\n people_string = \", \".join(map(lambda person : person[0], people)) if len(people) > 0 else \"no one yet\"\n # output this task line\n print(\"%s[%s] %s (%s)\" % (indent, task[0], task[1], people_string))\n child_tasks = query_with_results(\"select label, description from task where parent = ?\", [task[0]])\n for child_task in child_tasks:\n _show_task(child_task, depth+1)",
"def profiler(self):\r\n\r\n class Task(object):\r\n \"Private class to nicely wrap up the profile data\"\r\n def __init__(self, block, addr):\r\n self.block = block\r\n self.addr = addr\r\n self.name = None\r\n def tidy(self, sym):\r\n self.name = sym.varfind(self.addr).name\r\n self.CPU_FRAC = sym.constfind(\"$profiler.CPU_FRACTION_FIELD\").value\r\n def __repr__(self):\r\n if self.name is None:\r\n raise Exception(\"Need to call the tidy method before using\")\r\n return \"%-50s - %2.1f %%\" % (self.name, self.block[self.CPU_FRAC]/1000)\r\n\r\n\r\n # get the head of the list and a couple of constants\r\n head = self._core.sym.varfind(\"$profiler.last_addr\").addr\r\n NULL = self._core.sym.constfind(\"$profiler.LAST_ENTRY\").value\r\n SIZE = self._core.sym.constfind(\"$profiler.STRUC_SIZE\").value\r\n NEXT = self._core.sym.constfind(\"$profiler.NEXT_ADDR_FIELD\").value\r\n\r\n # get the first address\r\n curr = self._core.dm[head]\r\n\r\n # read all the structures off the chip as fast as we can\r\n tasks = []\r\n while curr != NULL:\r\n block = self._core.dm[curr:(curr+SIZE)]\r\n tasks.append(self.Task(block, curr))\r\n curr = block[NEXT]\r\n\r\n # now fill in the other bits\r\n for t in tasks:\r\n t.tidy(self._core.sym)\r\n\r\n # finally return\r\n return tasks",
"def show_task(self, task_id):\n\n\t\ttask_id = self._validate_task_id(task_id)\n\t\tif task_id:\n\t\t\ttask = self.tasklist.find_task(task_id)\n\t\t\tif task:\n\t\t\t\tif task.priority == 'L':\n\t\t\t\t\tpriority = Fore.YELLOW + Style.BRIGHT + ' ' + task.priority + ' ' + Fore.RESET + Style.NORMAL\n\t\t\t\telif task.priority == 'M':\n\t\t\t\t\tpriority = Fore.BLUE + Style.BRIGHT + ' ' + task.priority + ' ' + Fore.RESET + Style.NORMAL\n\t\t\t\telif task.priority == 'H':\n\t\t\t\t\tpriority = Fore.RED + Style.BRIGHT + ' ' + task.priority + ' ' + Fore.RESET + Style.NORMAL\n\t\t\t\telse:\n\t\t\t\t\tpriority = ''\n\t\t\t\ttemplate = '{0:^3} {1:^3} {2:20} {3:40}'\n\t\t\t\tprint template.format('\\nID', ' Pri', 'Description', 'Note')\n\t\t\t\tprint template.format('---', '---', '--------------------',\n\t\t\t\t '----------------------------------------')\n\t\t\t\tprint template.format(task.id, priority, task.task, task.note)",
"def run_all(self):\n self.formatter.section_start('Scratch Memory Info')\n self.formatter.section_start('Per priority')\n self.analyse_per_priority()\n self.formatter.section_end()\n self.formatter.section_start('Per task')\n self.analyse_per_task()\n self.formatter.section_end()\n self.formatter.section_end()",
"def print_task_history_for_robot(self, robot_id): \n individual_buffer = self.prev_tasks[robot_id]\n print(\"Task history for robot: \" + str(robot_id) + \".:\")\n print(\"Task ids: X, Y goal: Z orientation: Deadline:\")\n if isinstance(individual_buffer, np.float) or individual_buffer == []:\n print(\"Buffer is empty for robot \" + str(robot_id) + \"!\")\n else:\n for buff_row in range(individual_buffer.shape[0]):\n task = individual_buffer[buff_row]\n print(\"%d %f, %f %f %f\" % (int(task[0]), task[1], task[2], task[3], task[4]))",
"def print_info(task, intf):\n\n rprint(\"\\n[green]*** TARGET IDENTIFIED ***[/green]\")\n print(f\"MAC ADDRESS: {target} is present on {task.host}'s {intf}\")\n rprint(\"\\n[cyan]GENERATING DETAILS...[/cyan]\")\n cdp_result = task.run(task=send_command, command=\"show cdp neighbors\")\n task.host[\"cdpinfo\"] = cdp_result.scrapli_response.genie_parse_output()\n dev_id = \"\"\n index = task.host[\"cdpinfo\"][\"cdp\"][\"index\"]\n for num in index:\n local_intf = index[num][\"local_interface\"]\n if local_intf == intf:\n dev_id = index[num][\"device_id\"]\n port_id = index[num][\"port_id\"]\n\n ver_result = task.run(task=send_command, command=\"show version\")\n task.host[\"verinfo\"] = ver_result.scrapli_response.genie_parse_output()\n version = task.host[\"verinfo\"][\"version\"]\n serial_num = version[\"chassis_sn\"]\n oper_sys = version[\"os\"]\n uptime = version[\"uptime\"]\n version_short = version[\"version_short\"]\n print(f\"DEVICE MGMT IP: {task.host.hostname}\")\n print(f\"DEVICE SERIAL NUMBER: {serial_num}\")\n print(f\"DEVICE OPERATION SYSTEM: {oper_sys}\")\n print(f\"DEVICE UPTIME: {uptime}\")\n print(f\"DEVICE VERSION: {version_short}\")\n if dev_id:\n rprint(\"[cyan]REMOTE CONNECTION DETAILS...[/cyan]\")\n print(f\"Connected to {port_id} on {dev_id}\")",
"def command(task_id, tail, wip, limit):\n if task_id:\n task = storage.get_by_id(task_id)\n\n if not task:\n click.echo(f\"Task {task_id} not found.\")\n sys.exit(1)\n\n tasks = [task]\n else:\n tasks = storage.all(limit=limit, reverse=tail, wip=wip)\n\n print_header()\n for task in tasks:\n show_task(task)",
"def run(self, refresh=True):\n\n progress = Progress(\n \"[progress.description]{task.description}\",\n TextColumn(\"[bold green]{task.fields[measures]}\", justify=\"right\"),\n TextColumn(\n \"[dark_goldenrod]Truncated CM {task.fields[conf_matrix]}\",\n justify=\"right\",\n ),\n BarColumn(),\n \"[progress.percentage]{task.percentage:>3.0f}%\",\n TimeRemainingColumn(),\n auto_refresh=False,\n )\n\n logname = self.args.logname\n print(\"Log stored at: \", logname)\n run = wandb.init(\n project=\"information-obfuscation\",\n entity=\"peiyuanl\",\n name=logname,\n config=vars(self.args),\n )\n dirname = os.path.join(\n \"../checkpoints\",\n self.args.experiment,\n self.args.task,\n self.args.model,\n logname,\n )\n Path(dirname).mkdir(parents=True, exist_ok=True)\n\n with progress:\n gender_adv_tasks = []\n age_adv_tasks = []\n occupation_adv_tasks = []\n\n # To ensure layout correctness\n\n gender_task = progress.add_task(\n \"[cyan]Gender Task\",\n total=self.args.num_epochs,\n measures={},\n conf_matrix=[],\n )\n for name in self.get_ordered_adversary_names():\n gender_adv_tasks.append(\n progress.add_task(\n f\"[cyan]Gender {name} Adversary\",\n total=self.args.finetune_epochs,\n measures={},\n conf_matrix=[],\n )\n )\n\n age_task = progress.add_task(\n \"[cyan]Age Task\",\n total=self.args.num_epochs,\n measures={},\n conf_matrix=[],\n )\n for name in self.get_ordered_adversary_names():\n age_adv_tasks.append(\n progress.add_task(\n f\"[cyan]Age {name} Adversary\",\n total=self.args.finetune_epochs,\n measures={},\n conf_matrix=[],\n )\n )\n\n occupation_task = progress.add_task(\n \"[cyan]Occupation Task\",\n total=self.args.num_epochs,\n measures={},\n conf_matrix=[],\n )\n\n for name in self.get_ordered_adversary_names():\n occupation_adv_tasks.append(\n progress.add_task(\n f\"[cyan]Age {name} Adversary\",\n total=self.args.finetune_epochs,\n measures={},\n conf_matrix=[],\n )\n )\n\n self.train_task_with_adversary(\n \"gender\",\n dirname,\n refresh=refresh,\n progress=progress,\n task=gender_task,\n adv_tasks=gender_adv_tasks,\n )\n self.train_task_with_adversary(\n \"age\",\n dirname,\n refresh=refresh,\n progress=progress,\n task=age_task,\n adv_tasks=age_adv_tasks,\n )\n self.train_task_with_adversary(\n \"occupation\",\n dirname,\n refresh=refresh,\n progress=progress,\n task=occupation_task,\n adv_tasks=occupation_adv_tasks,\n )\n\n trained_model_artifact = wandb.Artifact(\n logname + \"_model\", type=\"model\", description=\"Task and adversary models\"\n )\n trained_model_artifact.add_dir(dirname)\n run.log_artifact(trained_model_artifact)\n\n dataset_artifact = wandb.Artifact(\n logname + \"_dataset\",\n type=\"dataset\",\n description=\"Dataset used to train the models\",\n )\n dataset_artifact.add_dir(MOVIELENS_1M_DIR)\n run.log_artifact(dataset_artifact)",
"def GetTaskIPCSummary(task, show_busy = False):\n out_string = ''\n format_string = \"{0: <#020x} {1: <6d} {2: <6d} {3: <10d} {4: <20s}\"\n busy_format = \" {0: <10d} {1: <6d}\"\n proc_name = ''\n if not task.active:\n proc_name = 'terminated: '\n if task.halting:\n proc_name += 'halting: '\n pval = Cast(task.bsd_info, 'proc *')\n if int(pval) != 0:\n proc_name += str(pval.p_comm)\n elif int(task.task_imp_base) != 0 and hasattr(task.task_imp_base, 'iit_procname'):\n proc_name += str(task.task_imp_base.iit_procname)\n table_size = int(task.itk_space.is_table_size)\n out_string += format_string.format(task, pval.p_pid, task.thread_count, table_size, proc_name)\n if show_busy:\n nbusy, nmsgs = GetTaskBusyPortsSummary(task)\n out_string += busy_format.format(nbusy, nmsgs)\n return (out_string, table_size, nbusy, nmsgs)\n return (out_string, table_size)",
"def print_info(task_state, video_state):\n os.system('clear')\n\n # instructions\n blue_bg('\\n Instructions ')\n orange_fg('\\u21e6 / \\u21e8:\\t', '1 frame back/forward')\n orange_fg('\\u21e9 / \\u21e7:\\t', '10 frame back/forward')\n orange_fg('< / >:\\t', '100 frame back/forward')\n orange_fg('[ / ]:\\t', 'Previous/next task/video')\n orange_fg('Esc:\\t', 'Exit')\n orange_fg('0-9:\\t', 'Action ID')\n orange_fg('t / i:\\t', '[User Input] Jump to Task/Image ID')\n orange_fg('Space:\\t', 'Toggle text color')\n orange_fg('Tab:\\t', 'Toggle lookahead mode')\n red_fg('Note:\\t', '(a) Select image as active window (b) Turn off Caps Lock (c) Do not press shift key')\n\n # state information\n blue_bg('\\n State ')\n orange_fg('Video ID: ', '{}\\t'.format(task_state.tasks[task_state.task_idx]), newline=False)\n orange_fg('Frame ID: ', '{}'.format(video_state.get_image_name()))\n orange_fg('Image ID: ', '{}/{}'.format(video_state.image_idx + 1, video_state.num_frames))\n orange_fg('Action ID: ', video_state.get_image_label())\n\n # action dictionary and key mapping\n blue_bg('\\n Actions List ')\n for a, action in enumerate(task_state.actions):\n orange_fg('Action {}: '.format(a + 1), action)\n\n # annotations\n blue_bg('\\n Actions Record ')\n for frame_idx, (f, a) in enumerate(video_state.labels.items()):\n orange_fg('Label {}: '.format(frame_idx + 1), '{} --> {}'.format(f, a))",
"def gather_info_and_display():\n # Obtain total rss displayed in memory.stat for each group,\n # container and service.\n try:\n output_mem = pipe_command(GREP_CMD, AWK_CMD, cwd=MEMPATH)\n LOG.debug(\n 'command: %s\\n%s',\n \"grep -rs total_rss '/sys/fs/cgroup/memory/' \"\n \"| awk '$2>0{print$0}' \",\n output_mem)\n except subprocess.CalledProcessError as error:\n LOG.error('Could not get total_rss memory, error=%s', error)\n return 1\n\n mem_info = get_meminfo()\n pt_groups = gather_groups_memory(output_mem)\n pt_cont = gather_containers_memory(output_mem)\n pt_serv = sys_service_memory()\n\n # Dump the tables out\n print('\\nPer groups memory usage:')\n\n # Get string to be printed and create list of elements separated by \\n\n list_of_table_lines = pt_groups.get_string().split('\\n')\n\n # Use the first line (+---+-- ...) as horizontal rule to insert later\n horizontal_line = list_of_table_lines[0]\n\n # Print the table, except last two lines ( \"Total\" row + final separator).\n print(\"\\n\".join(list_of_table_lines[:-2]))\n # Print separator, and finally the \"Total\" row.\n print(horizontal_line)\n print(\"\\n\".join(list_of_table_lines[-2:]))\n\n pt_namespc = prettytable.PrettyTable(\n ['Namespace',\n 'Resident Set Size (MiB)',\n ], caching=False)\n pt_namespc.align = 'l'\n pt_namespc.align['Resident Set Size (MiB)'] = 'r'\n\n print('\\nPer namespace memory usage:')\n for n_s in MEMORY['namespaces']:\n pt_namespc.add_row(\n [n_s,\n MEMORY['namespaces'][n_s],\n ])\n print(pt_namespc)\n\n print('\\nPer container memory usage:')\n print(pt_cont)\n\n print('\\nPer service memory usage:')\n print(pt_serv)\n\n base_mebib = 0.0\n k8s_system = 0.0\n k8s_addon = 0.0\n platform_memory_percent = 0.0\n\n # Calculate base memory usage (i.e., normal memory, exclude K8S and VMs)\n # e.g., docker, system.slice, user.slice\n for group in MEMORY['cgroups']:\n if group in BASE_GROUPS:\n base_mebib += float(MEMORY['cgroups'][group])\n\n # K8S platform system usage (essential) and addons usage (non-essential)\n for n_s in MEMORY['namespaces']:\n if n_s in K8S_NAMESPACE_SYSTEM:\n k8s_system += MEMORY['namespaces'][n_s]\n elif n_s in K8S_NAMESPACE_ADDON:\n k8s_addon += MEMORY['namespaces'][n_s]\n\n # Calculate platform memory usage\n platform_mebib = base_mebib + k8s_system\n\n anon_mebib = float(mem_to_mebibytes(\n mem_info['Active(anon)'] + mem_info['Inactive(anon)'])) * KBYTE\n avail_mebib = float(mem_to_mebibytes(\n mem_info['MemAvailable'])) * KBYTE\n total_mebib = float(anon_mebib + avail_mebib)\n\n anon_percent = py2_round(100 * anon_mebib / total_mebib, DECIMAL_DIGITS) # pylint: disable=W1619\n\n reserved_mebib = get_platform_reserved_memory()\n # Calculate platform memory in terms of percent reserved\n if reserved_mebib > 0.0:\n platform_memory_percent = py2_round(\n 100 * platform_mebib / reserved_mebib, DECIMAL_DIGITS) # pylint: disable=W1619\n\n pt_platf = prettytable.PrettyTable(\n ['Reserved',\n 'Platform',\n 'Base',\n 'K8s Platform system',\n 'k8s-addon'\n ], caching=False)\n pt_platf.align = 'l'\n\n pt_platf.add_row(\n [reserved_mebib,\n '{} ({}%)'.format(platform_mebib, platform_memory_percent),\n base_mebib,\n k8s_system,\n k8s_addon\n ])\n print('\\nPlatform memory usage in MiB:')\n print(pt_platf)\n\n pt_4k = prettytable.PrettyTable(\n ['Anon',\n 'Cgroup-rss',\n 'Available',\n 'Total'\n ], caching=False)\n pt_4k.align = 'l'\n\n pt_4k.add_row(\n ['{} ({}%)'.format(anon_mebib, anon_percent),\n MEMORY['cgroups']['total_rss'],\n avail_mebib,\n total_mebib\n ])\n\n print('\\n4K memory usage in MiB:')\n print(pt_4k)\n\n return 0",
"def task_history_detail(self, task_id):\n\n try:\n task_instance = TaskInstance.objects.get(id=task_id)\n except TaskInstance.DoesNotExist:\n return None\n\n workunits = [workunit.json_safe() for workunit in\n task_instance.workunits.all().order_by('id')]\n task_key = task_instance.task_key\n task = self.registry[task_key, None].tasks[task_key]\n return {\n 'details':task_instance.json_safe(),\n 'name':task.__name__,\n 'description':task.description,\n 'workunits':workunits\n }",
"def task_scanned(now_task):",
"def print_all_task_history(self): \n for robot in range(self.no_robots):\n print(\"Task history for robot: \" + str(robot) + \":\")\n print(\"Task ids: X, Y goal: Z orientation: Deadline:\")\n individual_buffer = self.prev_tasks[robot]\n if isinstance(individual_buffer, np.float) or individual_buffer == []:\n print(\"Buffer is empty for robot \" + str(robot) + \"!\")\n else:\n for buff_row in range(individual_buffer.shape[0]):\n task = individual_buffer[buff_row]\n print(\"%d %f, %f %f %f\" % (int(task[0]), task[1], task[2], task[3], task[4]))",
"def display_task(self, task):\n # Visual check for completed tasks\n checked = \" \"\n if task.done is True:\n checked = \"X\"\n # print a formated task\n print \"[{0}] {1}\\n*Tags* | {2} |\\n\".format(\n checked, task._entry, ' | '.join(task.tags))",
"def show(id, json):\n\n kargs={'host': c.cfg['host'], \"api_version\": c.cfg['api_version'], \"url_path\": \"/tasks\"}\n task = estask.Task(kargs)\n try:\n dict_resp= task.show(id)\n except Exception as e:\n sys.exit(\"Fail: %s\" %str(e))\n\n if dict_resp == None:\n click.echo(\"fail to get task list\")\n sys.exit(1)\n\n if json:\n print(jsn.dumps(dict_resp, sort_keys=True, indent=4))\n return\n\n try:\n task.print_show(dict_resp)\n except Exception as e:\n sys.exit(\"Fail: %s\" %str(e))",
"def showtask(id):\n\n tasks = Task.query.filter_by(id=id)\n return render_template('home/taskshowall/dashboard_showtask.html',tasks=tasks,title=\"tasks\")",
"def ShowTaskIPC(cmd_args=None):\n if not cmd_args:\n print \"No arguments passed\"\n print ShowTaskIPC.__doc__\n return False\n tval = kern.GetValueFromAddress(cmd_args[0], 'task *')\n if not tval:\n print \"unknown arguments:\", str(cmd_args)\n return False\n print GetTaskSummary.header + \" \" + GetProcSummary.header\n pval = Cast(tval.bsd_info, 'proc *')\n print GetTaskSummary(tval) + \" \" + GetProcSummary(pval)\n print GetTaskBusyIPCSummary.header\n (summary, table_size, nbusy, nmsgs) = GetTaskBusyIPCSummary(tval)\n print summary",
"def __str__(self): \n \n print 'Task leader: ', self.task_leader\n print 'Subject_ID: ', self.subject_ID\n print 'Number of tasks: ', len(self.tasks)\n for task in self.tasks:\n print ' ', task.name \n print ' ', task.task_number \n \n \n return ''",
"def task_scanned_template(task_id):\n #now_task = self._task_api_service.get_current_task_state(task_id)\n task = _client.tasks.find_by_id(task_id)\n altered_task = self.task_scanned(task)\n\n #diff = self._task_diff_service.calculate_diff_for_tasks(now_task, altered_task)\n #updated_task = self._task_api_service.patch_task(now_task, diff)\n #self._storage_service.store_task(updated_task)",
"def __subtask_classification__(self,task_id,classification_tasks,marking_tasks,raw_classifications,aggregations):\n\n\n # go through the tools which actually have the followup questions\n for tool in classification_tasks[task_id]:\n\n # now go through the individual followup questions\n # range(len()) - since individual values will be either \"single\" or \"multiple\"\n\n for followup_question_index in range(len(classification_tasks[task_id][tool])):\n global_index = str(task_id)+\"_\" +str(tool)+\"_\"+str(followup_question_index)\n\n\n followup_classification = {}\n # this is used for inserting the results back into our running aggregation - which are based\n # on shapes, not tools\n shapes_per_cluster = {}\n\n # go through each cluster and find the corresponding raw classifications\n for subject_id in aggregations:\n if subject_id == \"param\":\n continue\n\n # has anyone done this task for this subject?\n if task_id in aggregations[subject_id]:\n # find the clusters which we have determined to be of the correct type\n # only consider those users who made the correct type marking\n # what shape did this particular tool make?\n shape = marking_tasks[task_id][tool]\n for cluster_index,cluster in aggregations[subject_id][task_id][shape + \" clusters\"].items():\n if cluster_index in [\"param\",\"all_users\"]:\n continue\n\n # what is the most likely tool for this cluster?\n most_likely_tool,_ = max(cluster[\"tool_classification\"][0].items(),key = lambda x:x[1])\n if int(most_likely_tool) != int(tool):\n continue\n\n\n # polygons and rectangles will pass cluster membership back as indices\n # ints => we can't case tuples\n if isinstance(cluster[\"cluster members\"][0],int):\n user_identifiers = zip(cluster[\"cluster members\"],cluster[\"users\"])\n else:\n user_identifiers = zip([tuple(x) for x in cluster[\"cluster members\"]],cluster[\"users\"])\n ballots = []\n\n for user_identifiers,tool_used in zip(user_identifiers,cluster[\"tools\"]):\n # did the user use the relevant tool - doesn't matter if most people\n # used another tool\n if tool_used == tool:\n\n followup_answer = raw_classifications[global_index][subject_id][user_identifiers]\n u = user_identifiers[1]\n ballots.append((u,followup_answer))\n\n followup_classification[(subject_id,cluster_index)] = deepcopy(ballots)\n shapes_per_cluster[(subject_id,cluster_index)] = shape\n\n\n followup_results = self.__task_aggregation__(followup_classification,global_index,{})\n assert isinstance(followup_results,dict)\n\n for subject_id,cluster_index in followup_results:\n shape = shapes_per_cluster[(subject_id,cluster_index)]\n # keyword_list = [subject_id,task_id,shape+ \" clusters\",cluster_index,\"followup_questions\"]\n new_results = followup_results[(subject_id,cluster_index)]\n # if this is the first question - insert\n # otherwise append\n\n if followup_question_index == 0:\n aggregations[subject_id][task_id][shape + \" clusters\"] [cluster_index][\"followup_question\"] = {}\n\n\n aggregations[subject_id][task_id][shape + \" clusters\"] [cluster_index][\"followup_question\"][followup_question_index] = new_results.values()[0]\n\n return aggregations",
"def draw_task(self):\n self.usedTask = True\n return self.target_task",
"def human_friendly_print_running_tasks(one_off, scheduled):\n all_vals = []\n name_pad = 5\n if one_off:\n for name in one_off:\n if len(name) > name_pad:\n name_pad = len(name)\n all_vals += one_off.values()\n\n if scheduled:\n for name in scheduled:\n if len(name) > name_pad:\n name_pad = len(name)\n all_vals += scheduled.values()\n\n name_pad += 1\n\n header = f'{\"Name\":<{name_pad}}| Task type | Status | Start'\n print(header)\n print('-' * (len(header) + 5))\n for task in all_vals:\n print(f'{task[\"name\"]:<{name_pad}}| {task[\"type\"].title():<10}| {task[\"status\"]:<8} | {task[\"start\"]}')",
"def flowdetail_add_task_detail(fd_id, td_id):\n return IMPL.flowdetail_add_task_detail(fd_id, td_id)",
"def processTask(self):\n #Util.set_color(Util.FOREGROUND_YELLOW | Util.FOREGROUND_INTENSITY)\n #logging.info(\"cmd : %s\", self.ExecutionTask.get_cmd())\n #logging.info(\"param : %s\", self.ExecutionTask.get_param())\n #logging.info(\"ret : %s\", str(self.ExecutionTask.get_ret()))\n #logging.info(\"ipport : %s\", self.ExecutionTask.get_ipport())\n #Util.set_color(Util.FOREGROUND_WHITE)\n\n ##############################################################\n # Process for any commands without received messages.....\n ##############################################################\n if self.ExecutionTask.get_cmd() == 'PASS' or self.ExecutionTask.get_cmd() == 'FAIL':\n logging.debug(\"result is %s\", self.ExecutionTask.get_cmd())\n self.setStatus('STOP')\n self.setTestResult(self.ExecutionTask.get_cmd())\n return\n\n if self.ExecutionTask.get_cmd() == 'r_info':\n rinfo_result = self.ExecutionTask.get_param().split('!')\n\n if len(rinfo_result) > 1:\n msg = rinfo_result[1]\n logging.debug(\"%s\", msg)\n\n self.setStatus('STOP')\n self.setTestResult(rinfo_result[0])\n return\n\n if self.ExecutionTask.get_cmd() == 'ResultCheck':\n time.sleep(5)\n self.process_ResultCheck()\n return\n\n if self.ExecutionTask.get_cmd() == 'CheckThroughput':\n time.sleep(5)\n throughputChk = StreamHandler(self.test_mngr_initr)\n chk_result = throughputChk.processStreamResults(self.ExecutionTask.get_param())\n self.setCheckResult(chk_result)\n #if 'FAIL' in chk_result:\n # self.setStatus('STOP')\n return\n\n if self.ExecutionTask.get_cmd() == 'config_multi_subresults':\n self.process_config_multi_subresults()\n return\n\n ##############################################################\n # Process for any commands with received messages......\n ##############################################################\n status = \"\"\n retDict = self.ExecutionTask.get_ret()\n recvStr = \"\"\n if self.ExecutionTask.recv:\n recvStr = self.ExecutionTask.recv.rstrip('\\r\\n')\n #print \"recv : \" + recvStr\n \n if GlobalConfigFiles.curr_prog_name == \"WMMPS\" and \"sniffer_control_subtask\" in self.ExecutionTask.get_cmd():\n logging.debug('In WMMPS, before parsing the recvStr: %s' % recvStr)\n lines = re.split('\\n', recvStr)\n for line in lines:\n if re.search(\"RESULT\", line, re.I):\n if \"FAIL\" in line:\n self.setStatus('STOP')\n self.setTestResult('FAIL')\n logging.debug('set test result to FAIL')\n return\n if \"PASS\" in line:\n self.setTestResult('PASS')\n logging.debug('set test result to Pass')\n return\n return\n \n stitems = recvStr.split(',') \n if len(stitems) < 2:\n #logging.debug(\"Bypassing this cmd..\")\n return\n\n status = stitems[1]\n iDNB = TestScriptSymbolTable.get_value_from_sym_tab(\"iDNB\", TestScriptSymbolTable.test_script_sym_tab)\n iINV = TestScriptSymbolTable.get_value_from_sym_tab(\"iINV\", TestScriptSymbolTable.test_script_sym_tab) \n \n if iINV is None:\n iINV = 0\n \n if 'ERROR' in recvStr or 'INVALID' in recvStr and (iDNB == 0 or iDNB is None) and (iINV == 0 or iINV is None):\n #error case...\n logging.debug(\"Return ERROR or INVALID---> STOP process \")\n self.setStatus('STOP')\n self.setTestResult('FAIL')\n elif status != 'COMPLETE' and iDNB == 0 and iINV == 0:\n #incomplete case...(running?)\n logging.debug(\"Command %s not completed\", self.ExecutionTask.get_cmd())\n else:\n displayname = \"\"\n for tbd in self.test_mngr_initr.test_prog_mngr.test_prog.testbed_dev_list:\n if tbd.ctrlipaddr == self.ExecutionTask.get_ipport():\n displayname = tbd.displayname\n break\n \n if \"FAIL\" in recvStr and (iINV == 0 or iINV is None):\n if \"SNIFFER\" in displayname or \"sniffer\" in self.ExecutionTask.get_cmd():\n logging.info(\"Test Case Criteria Failure - Command returned FAIL\")\n self.setStatus('STOP')\n self.setTestResult('FAIL')\n\n elif self.ExecutionTask.get_cmd() == 'device_get_info':\n try:\n if displayname == '':\n self.tmsPacket.setDutDeviceInfo(recvStr)\n else:\n self.tmsPacket.setTestbedInfo(displayname, recvStr)\n\n #for validation\n self.setValidationInfo(displayname, recvStr)\n\n except OSError:\n logging.debug(\"exception -- device_get_info capi call\")\n elif self.ExecutionTask.get_cmd() == 'ca_get_version':\n self.setValidationInfo(displayname, recvStr)\n\n elif self.ExecutionTask.get_cmd() == 'sniffer_get_info':\n self.setValidationInfo('sniffer', recvStr)\n\n elif self.ExecutionTask.get_cmd() == 'sta_associate':\n time.sleep(10)\n\n if len(stitems) > 2:\n retParam = self.ExecutionTask.get_param().split(',')\n streamFlag = \"\"\n if len(retParam) > 4:\n streamFlag = retParam[3]\n\n if stitems[2] == 'streamID':\n streamHndler = StreamHandler(self.test_mngr_initr)\n logging.debug(\"stream config - streamID : %s\", stitems[3])\n if streamFlag == 'send':\n logging.debug(\"traffic config - send : streamInfo append\")\n streamPacket = streamInfo(\"%s\" % (stitems[3]), self.ExecutionTask.get_ipport(), -1, 'send',\n retParam[15], retParam[17], streamHndler.running_phase, streamHndler.RTPCount)\n streamHndler.add_streamInfo(streamPacket)\n streamHndler.RTPCount = streamHndler.RTPCount + 1\n\n elif streamFlag == 'receive':\n logging.debug(\"traffic config - receive : streamInfo append\")\n streamPacket = streamInfo(\"%s\" % (stitems[3]), self.ExecutionTask.get_ipport(), -1, 'receive',\n -1, -1, streamHndler.running_phase, -1)\n streamHndler.add_streamInfo(streamPacket)\n\n else:\n logging.debug(\"traffic config - else : \")\n\n\n\n if retParam[1] == 'Multicast':\n logging.debug(\"----MULTICAST----\")\n streamHndler.multicast = 1\n\n if self.ExecutionTask.get_cmd() != \"traffic_agent_send\":\n ret_val = \"%s\" %(stitems[3].strip())\n logging.debug(\"traffic config - ret_val : %s\", ret_val)\n setRetVal(getRetKey(retDict), ret_val)\n\n elif stitems[2].lower() == 'interfacetype':\n ret_val = (\"%s\" %(stitems[5]))\n setRetVal(getRetKey(retDict), ret_val)\n\n elif stitems[2].lower() == 'interfaceid':\n ret_val = stitems[3].split('_')[0]\n setRetVal(getRetKey(retDict), ret_val)\n\n elif self.ExecutionTask.get_cmd() == 'traffic_stop_ping':\n\n keyVal = retParam[1]\n #\"%s;%s\"%(retParam[1], self.ExecutionTask.get_ipport())\n setRetVal(keyVal, stitems[5])\n #print(\"%s = %s\" % (retParam[1], stitems[5]))\n pinginternalchk = TestScriptSymbolTable.get_value_from_sym_tab(\"PingInternalChk\", TestScriptSymbolTable.test_script_sym_tab)\n temp_key = getRetKey(self.ExecutionTask.get_ret())\n \n if \"$\" in temp_key:\n sent_reply = temp_key.split(',')\n #print \"SLIM==> ping result save...\"\n #print sent_reply[0]\n #print sent_reply[1]\n setRetVal(sent_reply[0], stitems[3])\n setRetVal(sent_reply[1], stitems[5]) \n\n setRetVal(\"$pingResp\", stitems[5])\n if pinginternalchk == '0':\n logging.debug(\"Ping Internal Check\")\n \n elif stitems[5] == '0':\n logging.debug (\"Test Case Criteria Failure - NO IP Connection -- Aborting the test\")\n self.setStatus('STOP')\n self.setTestResult('FAIL')\n else:\n if stitems[5] == '0':\n logging.debug (\"Test Case Criteria Failure - NO IP Connection -- Aborting the test\")\n self.setStatus('STOP')\n self.setTestResult('FAIL')\n else:\n if len(retDict) > 0:\n tempKey = getRetKey(retDict)\n temp_val = tempKey.split(',')\n count = 0\n item_len = len(stitems)\n for i in temp_val:\n if item_len > count + 3:\n setRetVal(i, stitems[3+count])\n count = count + 2\n\n if self.__status == 'STOP':\n logging.debug(\"generate final result if task stops.\")\n #self.generateFinalResult()\n else:\n pass\n #logging.debug(\"Continue---\")\n return",
"def assign(self, task=None):\n if task is None:\n print(\"\\n*** Add Task ***\")\n name = input(\"Name of the task?: \")\n try:\n priority = int(input(\"Priority of the task (1-->5): \"))\n except ValueError:\n priority = 1\n steps = []\n while 1:\n step = input(\"Add step #\" + str(len(steps) + 1) + \" (Enter empty to finish): \")\n if step:\n steps.append(step)\n else:\n break\n self.tasks.append(Task(name, priority, steps))\n self.save()\n self.sort()\n print(\"*\"*16)\n else:\n self.tasks.append(task)\n self.save()\n self.sort()",
"def task_summary_dict(request, tasks, fieldlist=None):\n sumd = {}\n numeric_fields_task = ['reqid', 'corecount', 'taskpriority', 'workqueue_id']\n\n if fieldlist:\n flist = fieldlist\n else:\n flist = copy.deepcopy(const.TASK_FIELDS_STANDARD)\n\n for task in tasks:\n for f in flist:\n if 'tasktype' in request.session['requestParams'] and request.session['requestParams']['tasktype'].startswith('analy'):\n # Remove the noisy useless parameters in analysis listings\n if flist in ('reqid', 'stream', 'tag'):\n continue\n\n if 'taskname' in task and len(task['taskname'].split('.')) == 5:\n if f == 'project':\n try:\n if not f in sumd:\n sumd[f] = {}\n project = task['taskname'].split('.')[0]\n if not project in sumd[f]:\n sumd[f][project] = 0\n sumd[f][project] += 1\n except:\n pass\n if f == 'stream':\n try:\n if not f in sumd:\n sumd[f] = {}\n stream = task['taskname'].split('.')[2]\n if not re.match('[0-9]+', stream):\n if not stream in sumd[f]:\n sumd[f][stream] = 0\n sumd[f][stream] += 1\n except:\n pass\n if f == 'tag':\n try:\n if not f in sumd:\n sumd[f] = {}\n tags = task['taskname'].split('.')[4]\n if not tags.startswith('job_'):\n tagl = tags.split('_')\n tag = tagl[-1]\n if not tag in sumd[f]:\n sumd[f][tag] = 0\n sumd[f][tag] += 1\n except:\n pass\n if f in task:\n val = task[f]\n if val is None or val == '':\n val = 'Not specified'\n if val == 'anal':\n val = 'analy'\n if f not in sumd:\n sumd[f] = {}\n if val not in sumd[f]:\n sumd[f][val] = 0\n sumd[f][val] += 1\n\n # convert to ordered lists\n suml = []\n for f in sumd:\n itemd = {}\n itemd['field'] = f\n iteml = []\n kys = sumd[f].keys()\n if f != 'ramcount':\n for ky in kys:\n iteml.append({'kname': ky, 'kvalue': sumd[f][ky]})\n iteml = sorted(iteml, key=lambda x: str(x['kname']).lower())\n else:\n newvalues = {}\n for ky in kys:\n if ky != 'Not specified':\n roundedval = int(ky / 1000)\n else:\n roundedval = -1\n if roundedval in newvalues:\n newvalues[roundedval] += sumd[f][ky]\n else:\n newvalues[roundedval] = sumd[f][ky]\n for ky in newvalues:\n if ky >= 0:\n iteml.append({'kname': str(ky) + '-' + str(ky + 1) + 'GB', 'kvalue': newvalues[ky]})\n else:\n iteml.append({'kname': 'Not specified', 'kvalue': newvalues[ky]})\n iteml = sorted(iteml, key=lambda x: str(x['kname']).lower())\n itemd['list'] = iteml\n suml.append(itemd)\n suml = sorted(suml, key=lambda x: x['field'])\n return suml",
"async def peek(self, task_id):\n\n args = (task_id,)\n res = await self.conn.call(self.__funcs['peek'], args)\n return self._create_task(res.body)",
"def show_tasks(self, tasks=None, date_format=None):\n\n\t\tif not tasks:\n\t\t\ttasks = self.tasklist.tasks\n\n\t\tif len(tasks) > 0:\n\n\t\t\ttemplate = '{0:^3} {1:20} {2:^3} {3:20} {4:15} {5:20}'\n\t\t\tprint template.format('\\nID', 'Description', ' Pri', 'Due', 'Created', 'Tags')\n\t\t\tprint template.format('---', '--------------------', '---', '--------------------', '---------------',\n\t\t\t '--------------------')\n\t\t\tfor task in tasks:\n\t\t\t\tif task.priority == 'L':\n\t\t\t\t\tpriority = Fore.YELLOW + Style.BRIGHT + task.priority.center(3) + Fore.RESET + Style.NORMAL\n\t\t\t\telif task.priority == 'M':\n\t\t\t\t\tpriority = Fore.BLUE + Style.BRIGHT + task.priority.center(3) + Fore.RESET + Style.NORMAL\n\t\t\t\telif task.priority == 'H':\n\t\t\t\t\tpriority = Fore.RED + Style.BRIGHT + task.priority.center(3) + Fore.RESET + Style.NORMAL\n\t\t\t\telse:\n\t\t\t\t\tpriority = ''\n\n\t\t\t\tif task.due_date is None:\n\t\t\t\t\tdue_date = ''\n\t\t\t\telse:\n\t\t\t\t\tif date_format:\n\t\t\t\t\t\tdue_date = task.due_date.rsplit(' ', 1)[0].ljust(20)\n\t\t\t\t\telse:\n\t\t\t\t\t\tdue_date = (arrow.get(task.due_date, task.due_date_format).humanize()).ljust(20)\n\n\t\t\t\t\tif not task.completed:\n\t\t\t\t\t\ttoday = arrow.now()\n\t\t\t\t\t\tdiff = arrow.get(task.due_date, task.due_date_format) - today\n\t\t\t\t\t\tif diff.days >= 1 and diff.seconds > 0:\n\t\t\t\t\t\t\tdue_date = Fore.CYAN + Style.BRIGHT + due_date + Fore.RESET + Style.NORMAL\n\t\t\t\t\t\telif diff.days >= 0:\n\t\t\t\t\t\t\tdue_date = Fore.BLUE + Style.BRIGHT + due_date + Fore.RESET + Style.NORMAL\n\t\t\t\t\t\telif diff.days <= 0:\n\t\t\t\t\t\t\tdue_date = Fore.RED + Style.BRIGHT + due_date + Fore.RESET + Style.NORMAL\n\n\t\t\t\tif date_format:\n\t\t\t\t\tage = (str(task.creation_date).split()[0]).ljust(15) # drop the time zone\n\t\t\t\telse:\n\t\t\t\t\tage = (arrow.get(task.creation_date, 'MM/DD/YYYY h:mm:ss A ZZ').humanize()).ljust(15)\n\n\t\t\t\tif task.note:\n\t\t\t\t\tdesc = task.task + ' *'\n\t\t\t\telse:\n\t\t\t\t\tdesc = task.task\n\n\t\t\t\tif task.completed:\n\t\t\t\t\tif task.priority:\n\t\t\t\t\t\tpriority = task.priority\n\t\t\t\t\telse:\n\t\t\t\t\t\tpriority = ''\n\t\t\t\t\ttask_id = Fore.WHITE + Style.BRIGHT + Back.WHITE + str(task.id).center(3)\n\t\t\t\t\ttags = str(task.tags) + Fore.RESET + Style.NORMAL + Back.RESET\n\t\t\t\t\tprint template.format(task_id, desc, priority, due_date, age, tags)\n\t\t\t\telse:\n\t\t\t\t\tprint template.format(task.id, desc, priority, due_date, age, task.tags)\n\n\t\t\tprint self.legend\n\t\telse:\n\t\t\tprint('\\nThere are no tasks to display!\\n')",
"def build_taskgraph(tasks):\n global taskgraph\n for task in tasks:\n if not task['id'] in taskgraph:\n taskgraph.add_node(task[\"id\"])\n data = str(task[\"DATA\"])\n tr = TaskResources(ram=int(task['RAM']), cpu=int(\n task['CPU']), hdd=int(task['HDD']))\n taskgraph.node[task[\"id\"]] = [tr, data]\n for dest in task[\"dest\"]:\n taskgraph.add_edge(task[\"id\"], dest[\"id\"])\n\n return taskgraph"
] |
[
"0.5747738",
"0.5657995",
"0.5638674",
"0.53487",
"0.533303",
"0.52720445",
"0.5245136",
"0.5212383",
"0.5205075",
"0.51718265",
"0.5147321",
"0.5135097",
"0.5119915",
"0.5110738",
"0.5069854",
"0.5043964",
"0.5008171",
"0.49889505",
"0.49718043",
"0.49696076",
"0.49006426",
"0.48946267",
"0.48636737",
"0.48529726",
"0.48506385",
"0.4839181",
"0.48245224",
"0.47956252",
"0.4794233",
"0.4789191"
] |
0.78272986
|
0
|
Creates confusion matrix with true labels along rows and predicted labels along columns. Assumes df contains columns "label"=>True labels and "classification"=>Predicted labels
|
def confusion_matrix(df):
rows, true_counts = np.unique(df["label"].values, return_counts=True)
cols, predicted_counts = np.unique(df["label"].values, return_counts=True)
matrix = np.ndarray(shape=(len(rows), len(cols)), dtype=float)
for ri, row in enumerate(rows):
for ci, col in enumerate(cols):
matrix[ri][ci] = len(df[(df.label == row) & (df.classification == col)])
return matrix, rows, cols
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def Confusion_Matrix(predicted_labels: list, actual_labels: list):\n labels = set(actual_labels)\n\n predicted_labels = list(map(custom_round, predicted_labels))\n\n matrix = pd.DataFrame(index=labels, columns=labels)\n\n matrix = matrix.fillna(0)\n\n for i in range(len(actual_labels)):\n matrix[actual_labels[i]][predicted_labels[i]] += 1\n m = matrix.values\n\n plt.matshow(m, cmap=plt.cm.Blues)\n\n for i in range(2):\n for j in range(2):\n c = m[j, i]\n plt.text(i, j, str(c), va='center', ha='center')\n\n plt.show()",
"def confusion_matrix(self,predictions,labels):\n TP = np.sum((np.round(predictions) == True) * (np.asarray(labels, dtype=bool) == True))\n FP = np.sum((np.round(predictions) == True) * (np.asarray(labels, dtype=bool) == False))\n FN = np.sum((np.round(predictions) == False) * (np.asarray(labels, dtype=bool) == True))\n TN = np.sum((np.round(predictions) == False) * (np.asarray(labels, dtype=bool) == False))\n\n return np.array([[TP,FP],[FN,TN]])",
"def confusion_matrix(\n true_labels,\n predicted_labels\n ) -> np.array:\n n_samples_true, n_samples_predicted = len(true_labels), len(predicted_labels)\n if n_samples_true != n_samples_predicted:\n raise ValueError()\n n_classes = len(set(true_labels))\n matrix = np.zeros((n_classes,n_classes))\n for i in range(len(true_labels)):\n true_label = true_labels[i]\n predicted_label = predicted_labels[i]\n matrix[predicted_label][true_label] += 1\n return matrix",
"def confusion_matrix(classifier_output, true_labels):\n\n # TODO: finish this.\n true_pos = 0.0\n true_neg = 0.0\n false_neg = 0.0\n false_pos = 0.0\n for elem1,elem2 in zip(classifier_output, true_labels):\n if(elem1==elem2) and (elem1==1):\n true_pos += 1\n elif(elem1==elem2) and (elem2!=1):\n true_neg += 1\n elif(elem1 != 1):\n false_neg +=1\n else:\n false_pos +=1\n conf_matrix = np.array([[true_pos, false_neg],[false_pos, true_neg]])\n return conf_matrix",
"def _prep_confusion_matrix(self, y_test, y_pred, labels):\n\n # Calculate confusion matrix and flatten it to a simple array\n if len(y_test.shape) == 1:\n confusion_array = metrics.confusion_matrix(y_test, y_pred).ravel()\n\n # Structure into a DataFrame suitable for Qlik\n result = []\n i = 0\n for t in labels:\n for p in labels:\n result.append([str(t), str(p), confusion_array[i]])\n i = i + 1\n self.model.confusion_matrix = pd.DataFrame(result, columns=[\"true_label\", \"pred_label\", \"count\"])\n self.model.confusion_matrix.insert(0, \"model_name\", self.model.name)\n # Handle confusion matrix format for multi-label classification\n else:\n confusion_array = metrics.multilabel_confusion_matrix(y_test, y_pred)\n result = pd.DataFrame(confusion_array.reshape(-1, 4), columns=[\"true_negative\", \"false_positive\", \"false_negative\", \"true_positive\"])\n self.model.confusion_matrix = pd.DataFrame(np.arange(len(confusion_array)), columns=[\"step\"])\n self.model.confusion_matrix = pd.concat([self.model.confusion_matrix, result], axis=1)\n self.model.confusion_matrix.insert(0, \"model_name\", self.model.name)",
"def confusion_matrix_pd(Y_true, Y_pred):\n Y_true = pd.Series([ACTIVITIES[y] for y in np.argmax(Y_true, axis=1)])\n Y_pred = pd.Series([ACTIVITIES[y] for y in np.argmax(Y_pred, axis=1)])\n return pd.crosstab(Y_true, Y_pred, rownames=['True'], colnames=['Pred'])",
"def get_confusion_matrix(labels_true: np.ndarray, labels_pred: np.ndarray) -> sparse.csr_matrix:\n check_vector_format(labels_true, labels_pred)\n mask = (labels_true >= 0) & (labels_pred >= 0)\n if np.sum(mask):\n n_labels = max(max(labels_true), max(labels_pred)) + 1\n row = labels_true[mask]\n col = labels_pred[mask]\n data = np.ones(np.sum(mask), dtype=int)\n return sparse.csr_matrix((data, (row, col)), shape=(n_labels, n_labels))\n else:\n raise ValueError('No sample with both true non-negative label and predicted non-negative label.')",
"def confusion_matrix(y_true, y_pred, labels):\n\n #Define variables\n matrix = []\n #Creates matrix dimensions\n for i in range(len(labels)):\n matrix.append([])\n for j in range(len(labels)):\n matrix[i].append(0)\n\n for i in range(len(y_true)):\n trueIndex = -1\n predIndex = -1\n #Get indexes of true and predicted values\n for j, label in enumerate(labels):\n if(label == y_true[i]):\n trueIndex = j\n if(label == y_pred[i]):\n predIndex = j\n matrix[trueIndex][predIndex] = matrix[trueIndex][predIndex] + 1\n\n return matrix",
"def confusion_df(y_true, y_pred):\n tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()\n \n # Put into pandas dataframe\n confusion = pd.DataFrame({'Predicted Negative': [tn, fn], 'Predicted Positive': [fp, tp]}, \n index=['Actual Negative', 'Actual Positive']) \n \n return confusion",
"def confusion_matrix(predict, labels, num_classes):\n # Compute the count of correct and error samples in each snr.\n conf = np.zeros([num_classes, num_classes])\n for i in range(0, len(labels)):\n j = labels[i]\n k = np.argmax(predict[i])\n conf[j, k] = conf[j, k] + 1\n\n # Compute the count of correct and error ratio in each snr.\n # =====confusion matrix=====.\n conf_norm = np.zeros([num_classes, num_classes])\n for i in range(0, num_classes):\n conf_norm[i, :] = conf[i, :] / np.sum(conf[i, :])\n\n return conf_norm",
"def get_confusion_matrix(gt_label, pred_label, class_num):\r\n index = (gt_label * class_num + pred_label).astype('int32')\r\n label_count = np.bincount(index)\r\n confusion_matrix = np.zeros((class_num, class_num))\r\n\r\n for i_label in range(class_num):\r\n for i_pred_label in range(class_num):\r\n cur_index = i_label * class_num + i_pred_label\r\n if cur_index < len(label_count):\r\n confusion_matrix[i_label, i_pred_label] = label_count[cur_index]\r\n\r\n return confusion_matrix",
"def get_confusion_matrix(gt_label, pred_label, class_num):\n index = (gt_label * class_num + pred_label).astype('int32')\n label_count = np.bincount(index)\n confusion_matrix = np.zeros((class_num, class_num))\n\n for i_label in range(class_num):\n for i_pred_label in range(class_num):\n cur_index = i_label * class_num + i_pred_label\n if cur_index < len(label_count):\n confusion_matrix[i_label, i_pred_label] = label_count[cur_index]\n\n return confusion_matrix",
"def confusionMatrix(actual, predict, truePositiveClass=''):\n classes = list(set(actual + predict))\n if len(truePositiveClass) > 0:\n id0 = classes.index(truePositiveClass)\n classes[id0] = classes[0]\n classes[0] = truePositiveClass\n cMatrix = np.zeros( (len(classes), len(classes)) )\n\n for i in range(0,len(predict)):\n ida = classes.index(actual[i])\n idp = classes.index(predict[i])\n cMatrix[ida][idp] += 1\n return cMatrix",
"def confusion_matrix_(y_true, y_hat, labels=None, df_option=False):\n matrix = []\n if labels == None:\n labels = np.unique(y_hat)\n for cat1 in labels:\n tmp = []\n for cat2 in labels:\n tmp.append(check(y_true, y_hat, cat1, cat2))\n matrix.append(tmp)\n if not df_option:\n return np.array(matrix)\n return pd.DataFrame(matrix, columns=labels, index=labels)",
"def get_confusion_matrix(y_true, y_pred):\r\n\r\n ## 3 classes\r\n TP1, TP2, TP3, FP1, FP2, FP3, TN1, TN2, TN3, FN1, FN2, FN3 = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0\r\n\r\n for i in range(y_true.shape[0]):\r\n if y_true[i] == 0 and y_pred[i] == 0:\r\n TN1 += 1\r\n elif y_true[i] == 0 and y_pred[i] != 0:\r\n FP1 += 1\r\n elif y_true[i] != 0 and y_pred[i] == 0:\r\n FN1 += 1\r\n elif y_true[i] != 0 and y_pred[i] != 0:\r\n TP1 += 1\r\n\r\n for i in range(y_true.shape[0]):\r\n if y_true[i] == 1 and y_pred[i] == 1:\r\n TN2 += 1\r\n elif y_true[i] == 1 and y_pred[i] != 1:\r\n FP2 += 1\r\n elif y_true[i] != 1 and y_pred[i] == 1:\r\n FN2 += 1\r\n elif y_true[i] != 1 and y_pred[i] != 1:\r\n TP2 += 1\r\n\r\n for i in range(y_true.shape[0]):\r\n if y_true[i] == 2 and y_pred[i] == 2:\r\n TN3 += 1\r\n elif y_true[i] == 2 and y_pred[i] != 2:\r\n FP3 += 1\r\n elif y_true[i] != 2 and y_pred[i] == 2:\r\n FN3 += 1\r\n elif y_true[i] != 2 and y_pred[i] != 2:\r\n TP3 += 1\r\n\r\n conf_matrix1 = [\r\n [TP1, FP1],\r\n [FN1, TN1]\r\n ]\r\n conf_matrix2 = [\r\n [TP2, FP2],\r\n [FN2, TN2]\r\n ]\r\n conf_matrix3 = [\r\n [TP3, FP3],\r\n [FN3, TN3]\r\n ]\r\n\r\n return conf_matrix1, conf_matrix2, conf_matrix3",
"def get_confusion_matrix(true_label, predictions, num_index):\n class_matrix = np.zeros(shape=(num_index, num_index))\n false_group = [[] for _ in range(num_index)]\n for idx, true, pred in zip(range(len(predictions)),true_label, predictions):\n class_matrix[true][pred] += 1\n if true != pred:\n false_group[true].append(idx)\n return class_matrix, false_group",
"def confusion_matrix(y_true, y_pred, table_show=True):\n\tFIRST_CLASS = 1\n\tSECOND_CLASS = 0\n\n\tzipped = np.array(list(zip(y_true, y_pred)))\n\ttp, fn, fp, tn = 0, 0, 0, 0\n\n\tfor y_true, y_pred in zipped:\n\t\tif y_true == y_pred and y_true == FIRST_CLASS:\n\t\t\ttp += 1\n\t\telif y_true == y_pred and y_true == SECOND_CLASS:\n\t\t\ttn += 1\n\t\telif y_true != y_pred and y_true == SECOND_CLASS:\n\t\t\tfp += 1\n\t\telse:\n\t\t\tfn += 1\n\n\tif table_show:\n\t\treturn np.array([tp, fn, fp, tn]).reshape([2,2])\n\n\treturn tp, fn, fp, tn",
"def custom_confusion_matrix(predictions, targets):\n tp, fp, fn, tn = [], [], [], []\n\n for pred, targ in zip(predictions, targets):\n for shift_pred, shift_targ in zip(pred, targ):\n if shift_pred == 1 and shift_targ == 1: # True positive\n tp.append(1)\n elif shift_pred == 1 and shift_targ == 0: # False positive\n fp.append(1)\n elif shift_pred == 0 and shift_targ == 1: # False negative\n fn.append(1)\n elif shift_pred == 0 and shift_targ == 0: # True negative:\n tn.append(1)\n\n tp_count = len(tp)\n fp_count = len(fp)\n fn_count = len(fn)\n tn_count = len(tn)\n\n conf_matrix = np.array([\n [tp_count, fp_count],\n [fn_count, tn_count]\n ])\n\n return conf_matrix",
"def confusion_matrix_(y_true, y_pred, labels=None):\r\n tp = 0\r\n tn = 0\r\n fp = 0\r\n fn = 0\r\n if labels == None:\r\n values = list(set(y_true))\r\n else:\r\n values = labels\r\n if (len(values)) != 2:\r\n return None\r\n for i, elem in enumerate(y_true):\r\n if y_pred[i] == values[1] and y_true[i] == y_pred[i]:\r\n tp += 1\r\n elif y_pred[i] == values[1] and y_true[i] != y_pred[i]:\r\n fp += 1\r\n elif y_pred[i] == values[0] and y_true[i] == y_pred[i]:\r\n tn += 1\r\n elif y_pred[i] == values[0] and y_true[i] != y_pred[i]:\r\n fn += 1\r\n matrix = np.array([[tp, fp], [fn, tn]])\r\n return matrix",
"def confusion_matrix(self, y_true=None, y_pred=None, labels=None, normalize=None, **kwargs):\n y_true, y_pred, binary, representor, decimal = self.get_processed_data(y_true, y_pred, decimal=None)\n matrix, imap, imap_count = cu.calculate_confusion_matrix(y_true, y_pred, labels, normalize)\n return matrix, imap, imap_count",
"def get_confusion_matrix(gt_label, pred_label, class_num, ignore_label): #seg_gt, seg_pred, args.num_classes\n\n pred_label = pred_label.flatten()\n if torch.is_tensor(gt_label) == True:\n gt_label = gt_label.cpu().detach().numpy()\n\n gt_label = gt_label.flatten()\n\n valid_flag = gt_label != ignore_label\n valid_inds = np.where(valid_flag)[0]\n\n pred_label = pred_label[valid_flag]\n gt_label = gt_label[valid_flag]\n\n index = (gt_label * class_num + pred_label).astype('int32') #gt_label(array([0, 1]), array([316446, 12684])) pred_label (array([0, 1], dtype=uint8), array([ 77728, 251402]))\n\n label_count = np.bincount(index)\n\n confusion_matrix = np.zeros((class_num, class_num))\n\n for i_label in range(class_num):\n for i_pred_label in range(class_num):\n cur_index = i_label * class_num + i_pred_label\n if cur_index < len(label_count):\n confusion_matrix[i_label, i_pred_label] = label_count[cur_index]\n\n return confusion_matrix",
"def confusion_matrix(y_true, y_pred, labels):\r\n matrix = []\r\n\r\n for i, yt in enumerate(labels):\r\n matrix.append([])\r\n for _, yp in enumerate(labels):\r\n matrix[i].append(0)\r\n\r\n for t, p in zip(y_true, y_pred):\r\n t_num = labels.index(t)\r\n p_num = labels.index(p)\r\n matrix[t_num][p_num] += 1\r\n\r\n return matrix",
"def create_confusion_matrix(prediction: list, true_y: list, save_location: str):\n # Create confusion matrix in pandas\n results = pd.DataFrame({\"prediction\": prediction, \"expected\": true_y})\n results = results.assign(combined=results.loc[:, \"prediction\"] + \"_\" + results.loc[:, \"expected\"])\n confusion_matrix = results.pivot_table(\n values=\"combined\",\n index=\"prediction\",\n columns=\"expected\",\n aggfunc=lambda x: len(x),\n ).fillna(0)\n\n # Create plot\n fig = plt.figure(figsize=(18, 16), dpi=80, edgecolor=\"k\")\n sns.heatmap(confusion_matrix, annot=True, fmt=\"g\")\n\n os.makedirs(save_location, exist_ok=True)\n plt.savefig(os.path.join(save_location, \"confusion_matrix.png\"))\n\n # save pandas confusion_matrix\n confusion_matrix.to_csv(os.path.join(save_location, \"confusion_matrix.csv\"), index=False)",
"def confusion_matrix(links_true, links_pred, total=None):\n\n links_true = _get_multiindex(links_true)\n links_pred = _get_multiindex(links_pred)\n\n tp = true_positives(links_true, links_pred)\n fp = false_positives(links_true, links_pred)\n fn = false_negatives(links_true, links_pred)\n\n if total is None:\n tn = numpy.nan\n else:\n if isinstance(total, pandas.MultiIndex):\n total = len(total)\n tn = true_negatives(links_true, links_pred, total)\n\n return numpy.array([[tp, fn], [fp, tn]])",
"def get_confusion_matrix(label, pred, num_class, ignore=255):\n\toutput = pred.cpu().numpy().transpose(0, 2, 3, 1)\n\t#mask = label.cpu().numpy().transpose(0, 2, 3, 1)\n\tseg_pred = np.asarray(np.argmax(output, axis=3), dtype=np.uint8)\n\t#seg_gt = np.asarray(np.argmax(mask, axis=3), dtype=np.int)\n\tseg_gt = label.cpu().numpy()\n\n\tignore_index = seg_gt != ignore\n\tseg_gt = seg_gt[ignore_index]\n\tseg_pred = seg_pred[ignore_index]\n\n\tindex = (seg_gt * num_class + seg_pred).astype('int32')\n\tlabel_count = np.bincount(index)\n\tconfusion_matrix = np.zeros((num_class, num_class))\n\n\tfor i_label in range(num_class):\n\t\tfor i_pred in range(num_class):\n\t\t\tcur_index = i_label * num_class + i_pred\n\t\t\tif cur_index < len(label_count):\n\t\t\t\tconfusion_matrix[i_label,\n\t\t\t\t\t\t\t\t i_pred] = label_count[cur_index]\n\treturn confusion_matrix",
"def getConfusionMatrix(pred, real):\n # print pd.crosstab(pred, real) \n \n total = float(real.shape[0])\n \n tp = 0 # true positive\n tn = 0 # true negitive\n fp = 0 # false positive\n fn = 0 # false negitive\n for predicted, actual in zip(pred, real):\n if predicted == actual:\n if predicted == 1:\n tp += 1\n else:\n tn += 1\n else:\n if predicted == 1:\n fp += 1\n else:\n fn += 1\n \n\n print \"(tp, tn, fp, fn):\" , tp, tn, fp, fn\n print \"accuracy is :\", (tp+tn)/total",
"def plot_confusion_matrix(y_true, y_pred,\n labels=None,\n normalize=False,\n title=None,\n cmap='YlGn'):\n import pandas as pd\n from sklearn.metrics import confusion_matrix\n from sklearn.utils.multiclass import unique_labels\n\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n\n # Only use the labels that appear in the data\n classes = unique_labels(y_true, y_pred)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\n if isinstance(labels, pd.DataFrame):\n id = []\n for i in classes:\n test = np.flatnonzero(labels.iloc[:,-1] == i)\n if test.size != 0:\n id.append(int(test))\n labels = list(labels.iloc[id, 0])\n else:\n labels = classes\n\n fig = plt.figure(figsize=(12,9))\n ax = plt.gca()\n im = ax.imshow(cm, interpolation='nearest', aspect='equal', origin='lower', cmap=plt.get_cmap(cmap))\n ax.figure.colorbar(im, ax=ax)\n # ax.xaxis.tick_top()\n\n ax.set(xticks=np.arange(len(labels)),\n yticks=np.arange(len(labels)),\n xticklabels=labels, yticklabels=labels,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return ax",
"def confusion_matrix(predicted, gt):\n tp = [k for k in predicted if predicted[k] and gt[k]]\n tn = [k for k in predicted if not predicted[k] and not gt[k]]\n fp = [k for k in predicted if predicted[k] and not gt[k]]\n fn = [k for k in predicted if not predicted [k] and gt[k]]\n\n return tp, tn, fp, fn",
"def cnf_matrix(df, model_name, settings):\n\n df_r = pu.reformat_df(df, \"all\", settings)\n cnf_matrix = confusion_matrix(df_r[\"target\"], df_r[\"predicted_target\"])\n\n plt.figure()\n settings.nb_classes = len(df_r[\"target\"].unique())\n class_names = [\n du.sntype_decoded(i, settings, simplify=True) for i in df_r[\"target\"].unique()\n ]\n plot_confusion_matrix(\n settings, cnf_matrix, classes=class_names, normalize=True, nameout=model_name\n )",
"def confusion_matrix(expected, predicted):\n\n retval = numpy.zeros((10,10), dtype=float)\n\n for k in range(10):\n pred_k = predicted[expected==k] # predictions that are supposed to be 'k'\n retval[:,k] = numpy.array([len(pred_k[pred_k==p]) for p in range(10)])\n retval[:,k] /= len(pred_k)\n\n return retval"
] |
[
"0.7578771",
"0.757619",
"0.7365501",
"0.7315624",
"0.7218451",
"0.7216669",
"0.7189216",
"0.71862596",
"0.71079034",
"0.71034706",
"0.7092456",
"0.7072218",
"0.7069952",
"0.70334834",
"0.70166075",
"0.70071",
"0.6982015",
"0.6978647",
"0.69725907",
"0.6929406",
"0.6928698",
"0.692471",
"0.6835931",
"0.6829143",
"0.6807047",
"0.6758385",
"0.67562145",
"0.6680646",
"0.66643244",
"0.6660101"
] |
0.8234337
|
0
|
Check whether the current process already has a CUDA context created. Returns ``False`` if current process has no CUDA context created, otherwise returns the index of the device for which there's a CUDA context.
|
def has_cuda_context():
init_once()
if not nvmlInitialized:
return False
for index in range(device_get_count()):
handle = pynvml.nvmlDeviceGetHandleByIndex(index)
if hasattr(pynvml, "nvmlDeviceGetComputeRunningProcesses_v2"):
running_processes = pynvml.nvmlDeviceGetComputeRunningProcesses_v2(handle)
else:
running_processes = pynvml.nvmlDeviceGetComputeRunningProcesses(handle)
for proc in running_processes:
if os.getpid() == proc.pid:
return index
return False
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def is_cuda(self):\n return self.share.is_cuda",
"def is_cuda(self):\n return self._tensor.is_cuda",
"def is_cuda(self):\n return next(self.parameters()).is_cuda",
"def is_cuda(self):\n return next(self.parameters()).is_cuda",
"def is_cuda(self):\n return next(self.parameters()).is_cuda",
"def is_cuda(self):\n return next(self.parameters()).is_cuda",
"def _current_device_index(self) -> int:\n device = PArray._get_current_device()\n if device is None: # not called inside current task\n return self._coherence.owner\n elif device.architecture == cpu:\n return CPU_INDEX\n else:\n # assume GPU here, won't check device.architecture == gpu\n # to avoid import `gpu`, which is slow to setup.\n return device.index",
"def check_cuda():\n if OS_VERSION[0] == \"Linux\":\n check_cuda_linux()\n elif OS_VERSION[0] == \"Windows\":\n check_cuda_windows()",
"def is_cuda_device(device):\n\treturn 'cuda' in str(device)",
"def detect_available():\n global _CUDA_AVAILABLE\n if _CUDA_AVAILABLE is not None: return _CUDA_AVAILABLE\n _CUDA_AVAILABLE = shell.run('{} -c \"import torch;print(torch.cuda.is_available())\"'.format(sys.executable)).strip('\\n') == 'True'\n return _CUDA_AVAILABLE",
"def is_cuda(model):\n\treturn next(model.parameters()).is_cuda",
"def checkCUDAisAvailable():\n # some possible lib names \n libnames = ('libcuda.so', 'libcuda.dylib', 'cuda.dll')\n libsOk = True\n for libname in libnames:\n try:\n cuda = ctypes.CDLL(libname)\n except OSError:\n continue\n else:\n break\n else:\n libsOk = False\n return libsOk",
"def _on_gpu(self) -> bool:\n return self._current_device_index != CPU_INDEX",
"def use_cuda():\n return torch.cuda.is_available() and os.getenv('AICROWD_CUDA', True)",
"def is_cuda(self, cuda=None):\n if cuda is not None:\n return cuda\n else:\n cuda = next(self.parameters()).data.is_cuda\n return cuda",
"def get_available_device():\n if torch.cuda.is_available():\n free_mem, device_idx = 0.0, 0\n for d in range(torch.cuda.device_count()):\n mem = torch.cuda.get_device_properties(d).total_memory - torch.cuda.memory_allocated(d)\n if mem > free_mem:\n device_idx = d\n free_mem = mem\n return torch.device(f'cuda:{device_idx}')\n else:\n return torch.device('cpu')",
"def get_gpu_count():\n\n gpu_count = 0\n\n env_cuda_devices = os.environ.get('CUDA_VISIBLE_DEVICES', None)\n if env_cuda_devices is not None:\n assert isinstance(env_cuda_devices, str)\n try:\n if not env_cuda_devices:\n return 0\n gpu_count = len(\n [x for x in env_cuda_devices.split(',') if int(x) >= 0])\n logger.info(\n 'CUDA_VISIBLE_DEVICES found gpu count: {}'.format(gpu_count))\n except:\n logger.info('Cannot find available GPU devices, using CPU now.')\n gpu_count = 0\n else:\n try:\n gpu_count = str(subprocess.check_output([\"nvidia-smi\",\n \"-L\"])).count('UUID')\n logger.info('nvidia-smi -L found gpu count: {}'.format(gpu_count))\n except:\n logger.info('Cannot find available GPU devices, using CPU now.')\n gpu_count = 0\n return gpu_count",
"def get_gpu_count():\n\n gpu_count = 0\n\n env_cuda_devices = os.environ.get('CUDA_VISIBLE_DEVICES', None)\n if env_cuda_devices is not None:\n assert isinstance(env_cuda_devices, str)\n try:\n if not env_cuda_devices:\n return 0\n gpu_count = len(\n [x for x in env_cuda_devices.split(',') if int(x) >= 0])\n logger.info(\n 'CUDA_VISIBLE_DEVICES found gpu count: {}'.format(gpu_count))\n except:\n logger.info(\n 'Cannot find available GPU devices, using CPU or other devices now.'\n )\n gpu_count = 0\n else:\n try:\n gpu_count = str(subprocess.check_output([\"nvidia-smi\",\n \"-L\"])).count('UUID')\n logger.info('nvidia-smi -L found gpu count: {}'.format(gpu_count))\n except:\n logger.info(\n 'Cannot find available GPU devices, using CPU or other devices now. (Please check whether you can execute `nvidia-smi` command.)'\n )\n gpu_count = 0\n return gpu_count",
"def gpu_availability():\n # assume if using tensorflow-gpu, then Nvidia GPU is available\n if is_built_with_cuda():\n return len(tf.config.list_physical_devices(\"GPU\")) > 0\n else:\n return False",
"def is_gpu_available() -> bool:\n return torch.cuda.is_available()",
"def check_cuda_windows():\n global CUDA_VERSION, CUDA_PATH\n cuda_keys = [key\n for key in os.environ.keys()\n if key.lower().startswith(\"cuda\") and key.lower() != \"cuda_path\"]\n if not cuda_keys:\n out_error(\"CUDA not found. See \"\n \"https://github.com/deepfakes/faceswap/blob/master/INSTALL.md#cuda \"\n \"for instructions\")\n return\n\n CUDA_VERSION = cuda_keys[0].replace(\"CUDA_PATH_V\", \"\").replace(\"_\", \".\")\n CUDA_PATH = os.environ[cuda_keys[0]]\n out_info(\"CUDA version: \" + CUDA_VERSION)",
"def device_count() -> int:\n return flow._oneflow_internal.CudaGetDeviceCount()",
"def try_gpu():\n try:\n ctx = mx.gpu()\n _ = nd.array([0], ctx=ctx)\n except:\n ctx = mx.cpu()\n return ctx",
"def try_gpu():\n try:\n ctx = mx.gpu()\n _ = nd.array([0], ctx=ctx)\n except:\n ctx = mx.cpu()\n return ctx",
"def _get_or_create_context_uncached(self, devnum):\n with self._lock:\n # Try to get the active context in the CUDA stack or\n # activate GPU-0 with the primary context\n with driver.get_active_context() as ac:\n if not ac:\n return self._activate_context_for(0)\n else:\n # Get primary context for the active device\n ctx = self.gpus[ac.devnum].get_primary_context()\n # Is active context the primary context?\n if USE_NV_BINDING:\n ctx_handle = int(ctx.handle)\n ac_ctx_handle = int(ac.context_handle)\n else:\n ctx_handle = ctx.handle.value\n ac_ctx_handle = ac.context_handle.value\n if ctx_handle != ac_ctx_handle:\n msg = ('Numba cannot operate on non-primary'\n ' CUDA context {:x}')\n raise RuntimeError(msg.format(ac_ctx_handle))\n # Ensure the context is ready\n ctx.prepare_for_use()\n return ctx",
"def get_device_of(self, tensor):\n if not tensor.is_cuda:\n return -1\n else:\n return tensor.get_device()",
"def try_gpu():\r\n try:\r\n ctx = mx.gpu()\r\n _ = nd.array([0], ctx=ctx)\r\n except mx.base.MXNetError:\r\n ctx = mx.cpu()\r\n return ctx",
"def return_free_GPU():\r\n if torch.cuda.is_available():\r\n gpu_num = torch.cuda.device_count()\r\n device = torch.device('cuda:{}'.format(gpu_num-1))\r\n print('Using GPU:[{}]/[{}] for training...'.format(gpu_num-1,gpu_num-1))\r\n return device\r\n \r\n raise ValueError('GPU not available for training. Check CUDA env with function \"check_cuda_env\"')",
"def try_gpu(x):\n global _GPUS_EXIST\n if _GPUS_EXIST:\n try:\n return x.cuda()\n except (AssertionError, RuntimeError):\n print('No GPUs detected. Sticking with CPUs.')\n _GPUS_EXIST = False\n return x",
"def cuda_test():\n # This flag enable the inbuilt cudnn auto-tuner\n torch.backends.cudnn.benchmark = True\n\n print('\\n__Python VERSION :', sys.version)\n print('__pyTorch VERSION :', torch.__version__)\n print('__CUDA VERSION : ', torch.version.cuda)\n print('__CUDNN VERSION : ', torch.backends.cudnn.version())\n print('__Number CUDA Devices : ', torch.cuda.device_count())\n print('__Devices : ')\n\n call([\"nvidia-smi\", \"--format=csv\", \n \"--query-gpu=index,name,driver_version,memory.total,memory.used,memory.free\"])\n\n print('Active CUDA Device: GPU', torch.cuda.current_device())\n print ('Available devices ', torch.cuda.device_count())\n print ('Current cuda device ', torch.cuda.current_device())\n\n return torch.cuda.is_available()"
] |
[
"0.6835757",
"0.6608968",
"0.6581112",
"0.6581112",
"0.6581112",
"0.6581112",
"0.6325496",
"0.6288792",
"0.6280778",
"0.6250953",
"0.6104318",
"0.60323524",
"0.5995625",
"0.59909964",
"0.5980723",
"0.59791404",
"0.59286463",
"0.58897775",
"0.5851873",
"0.5831781",
"0.5828129",
"0.5800692",
"0.57809967",
"0.57809967",
"0.57554877",
"0.5733312",
"0.57265353",
"0.56860185",
"0.5659291",
"0.56334645"
] |
0.84578913
|
0
|
Tests that NewickTokenizer without arg raises ValueError.
|
def test_no_arg(self):
self.assertRaises(ValueError, NewickTokenizer)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_no_start_open_parens(self):\n self.assertRaises(ValueError, NewickTokenizer, newick='hi')",
"def test_unclosed(self):\n nt = NewickTokenizer(newick='(a,(b,c)')\n self.assertRaises(ValueError, nt.tokens)",
"def test_label(self):\n nt = NewickTokenizer(newick=\"(a\\n'b',(b,c),(d,e));\")\n self.assertRaises(ValueError, nt.tokens)",
"def test_extra_closed(self):\n nt = NewickTokenizer(newick='(a,(b,c)));')\n self.assertRaises(ValueError, nt.tokens)",
"def test_open_closed(self):\n nt = NewickTokenizer(newick='(a,(),(d,e));')\n self.assertRaises(ValueError, nt.tokens)",
"def test_extra_suffix(self):\n nt = NewickTokenizer(newick='(a,(b,c));suffix')\n self.assertRaises(ValueError, nt.tokens)",
"def test_sans_comma(self):\n nt = NewickTokenizer(newick='(a,(b,c)(d,e));')\n self.assertRaises(ValueError, nt.tokens)",
"def test_unexpected_comma(self):\n nt = NewickTokenizer(newick='(a,(b,c),,(d,e));')\n self.assertRaises(ValueError, nt.tokens)",
"def test_unclosed_comment(self):\n nt = NewickTokenizer(newick='(a,(b,c),[(d,e));')\n self.assertRaises(ValueError, nt.tokens)",
"def test_comma_bef_semicolon(self):\n nt = NewickTokenizer(newick='(a,(b,c),(d,e)),;')\n self.assertRaises(ValueError, nt.tokens)",
"def test_takes_a_token_file(self):\n with self.assertRaises(TypeError):\n # TypeError: __init__() missing 1 required\n # positional argument: 'token_filename'\n\n # Intentionally no args; pylint: disable=E1120\n BadgrLite()",
"def test_odd_quotes(self):\n content = \"((h_ ,'p)h p,g()[],:_)hpg;\"\n tok = NewickTokenizer(StringIO(content))\n self.assertRaises(Exception, tok.tokens)\n content = \"((h_ ,'p')h p,'g()[]',:_')hpg;\"\n tok = NewickTokenizer(StringIO(content))\n self.assertRaises(Exception, tok.tokens)",
"def test_invalid_tokens(self):\n self.assertTrue(1 + 1)",
"def test05(self):\n\n s = \"a\"\n with self.assertRaises(ParserException):\n t = parse_newick(s)",
"def test_peek_none(self):\n nt = NewickTokenizer(newick='(a,(b,c));')\n nt.tokens()\n self.assertIsNone(nt._peek())",
"def test_tt_split(self):\n\n bad_arg1 = 5\n bad_arg2 = \"It's a string!\"\n ld = Lambdata(self.df)\n\n ld.tt_split(bad_arg1)\n ld.tt_split(bad_arg2)\n self.assertRaises(ValueError)",
"def test_lexing_error():\n with pytest.raises(SyntaxError):\n lex._lexer(None, None)._load_text(\"TEST\")._throw_lexing_error()",
"def test_tokenize_zero_value(self):\n\t\ttext = ['a', 'brown', 'cat', 'chases', 'mice']\n\t\tmin_words = max_words = 0\n\t\targs = [text, min_words, max_words]\n\t\tfunc = sentiment.tokenize\n\t\tself.assertRaises(sentiment.SentimentException,\n\t\t\tlambda y: list(func(*y)), args)",
"def setUp(self):\n tokenizer = TokenizerWrapper('openai/clip-vit-base-patch32')\n\n # 'Goodbye' in kiswahili\n tokenizer.add_placeholder_token('kwaheri', num_vec_per_token=1)\n # 'how much' in kiswahili\n tokenizer.add_placeholder_token('ngapi', num_vec_per_token=4)\n\n with self.assertRaises(AssertionError):\n tokenizer.add_placeholder_token('hello', num_vec_per_token=1)\n\n self.tokenizer = tokenizer",
"def test_false_lower(self):\n self.assertRaises(ParseException, self.flag.parseString, 'n')",
"def test_nonCallable(self):\n us = WrongTypedOptions()\n argV = \"--barwrong egg\".split()\n self.assertRaises(TypeError, us.parseOptions, argV)",
"def test_lexing_error_evaluate_1():\n with pytest.raises(SyntaxError):\n lex._lexer([lex_premades.float], [])._load_text(\"TEST\").evaluate()",
"def test_no_brackets_in_words():\n raise SkipTest\n assert_raises(ParseError, grammar['word'].parse, ']')",
"def test_call_invalid_input(self):\r\n with self.assertRaises(ValueError):\r\n self.estimator1(42, confidence_level=0)",
"def test_label_with_non_string():\n svl_string = \"\"\"\n DATASETS bigfoot \"bigfoot.csv\"\n LINE bigfoot\n Y classification COUNT\n SPLIT BY classification\n X date BY YEAR LABEL 1.2\n \"\"\"\n\n # TODO Make this exception more specific if possible.\n with pytest.raises(SvlSyntaxError):\n parse_svl(svl_string)",
"def test_noun_chunks_is_parsed(fi_tokenizer):\n doc = fi_tokenizer(\"Tämä on testi\")\n with pytest.raises(ValueError):\n list(doc.noun_chunks)",
"def test_lexing_error_evaluate_2():\n with pytest.raises(SyntaxError):\n lex._lexer([lex_premades.float], [], \"[[LINE]] [[TEXT]]\")._load_text(\n \"TEST\"\n ).evaluate()",
"def test_step_with_non_number():\n svl_string = \"\"\"\n DATASETS bigfoot \"bigfoot.csv\"\n HISTOGRAM bigfoot STEP \"hello there\" X temperature_mid\n \"\"\"\n\n # TODO make exception more specific if possible.\n with pytest.raises(SvlSyntaxError):\n parse_svl(svl_string)",
"def test_tokenize_no_min_no_max(self):\n\t\tobj_ut = list(sentiment.tokenize(\n\t\t\t['a', 'brown', 'cat', 'chases', 'mice']))\n\t\tself.assertEqual(obj_ut, \n\t\t\t[('a', 0), ('brown', 1), ('cat', 2), ('chases', 3), ('mice', 4)])",
"def test_unused_token_is_valid(self):\n assert self.token.is_valid()"
] |
[
"0.80845463",
"0.8068447",
"0.79711384",
"0.7760871",
"0.77430147",
"0.7444971",
"0.7422035",
"0.73452514",
"0.70215565",
"0.6891094",
"0.6746495",
"0.668125",
"0.6609283",
"0.64874065",
"0.6484285",
"0.63872623",
"0.6362746",
"0.6270539",
"0.62485063",
"0.62016183",
"0.6114688",
"0.6057465",
"0.6057451",
"0.6056509",
"0.60154545",
"0.60078883",
"0.5975853",
"0.5967271",
"0.5967016",
"0.5966016"
] |
0.9196363
|
0
|
Tests Newick with extra close parens generate errors.
|
def test_extra_closed(self):
nt = NewickTokenizer(newick='(a,(b,c)));')
self.assertRaises(ValueError, nt.tokens)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_bad_parens(self):\r\n with self.assertRaisesRegexp(Exception, 'Unknown parenthesis'):\r\n preview.LatexRendered('x^2', parens='not parens')",
"def test_no_start_open_parens(self):\n self.assertRaises(ValueError, NewickTokenizer, newick='hi')",
"def test_parse_newick(self):\r\n # confirm that it works without escaped names\r\n t1 = ('((((tax7:0.1,tax3:0.2):.98,tax8:.3, tax4:.3):.4,'\r\n '((tax1:0.3, tax6:.09):0.43,tax2:0.4):0.5):.2,'\r\n '(tax9:0.3, endbigtaxon:.08));')\r\n expected1 = ['tax7', 'tax3', 'tax8', 'tax4', 'tax1',\r\n 'tax6', 'tax2', 'tax9', 'endbigtaxon']\r\n self.assertEqual(set(parse_newick(t1).getTipNames()), set(expected1))\r\n self.assertEqual(set([tip.Name for tip in parse_newick(t1).tips()]),\r\n set(expected1))\r\n\r\n # throw some screwed up names in\r\n t2 = ('((((tax7:0.1,tax3:0.2):.98,tax8:.3, \\'tax4\\':.3):.4,'\r\n \"(('ta_______ x1':0.3, tax6:.09):0.43,tax2:0.4):0.5):.2,\"\r\n '(tax9:0.3, endbigtaxon:.08));')\r\n expected2 = ['tax7', 'tax3', 'tax8', 'tax4', 'ta_______ x1',\r\n 'tax6', 'tax2', 'tax9', 'endbigtaxon']\r\n self.assertEqual(set(parse_newick(t2).getTipNames()), set(expected2))\r\n self.assertEqual(set([tip.Name for tip in parse_newick(t2).tips()]),\r\n set(expected2))",
"def test_invalid_pseudo_close(self):\n\n with self.assertRaises(SyntaxError):\n sv.compile('div)')\n\n with self.assertRaises(SyntaxError):\n sv.compile(':is(div,)')",
"def test_unclosed(self):\n nt = NewickTokenizer(newick='(a,(b,c)')\n self.assertRaises(ValueError, nt.tokens)",
"def test_open_closed(self):\n nt = NewickTokenizer(newick='(a,(),(d,e));')\n self.assertRaises(ValueError, nt.tokens)",
"def test_with_bad_lines():\n bad_lines = [\n ')(',\n 'has (open paren only',\n 'has closed) paren only',\n '(two (open one closed)',\n 'two (closed one) open)',\n ]\n for line in bad_lines:\n with pytest.raises(ParseError):\n nest_parens(line)",
"def test_invalid_pseudo_open(self):\n\n with self.assertRaises(SyntaxError):\n sv.compile(':is(div')",
"def test_sqpp_paren_close_only_failure(self):\n self.failUnlessRaises(SyntaxError,\n self.parser.parse_query,\"expr)\")",
"def test05(self):\n\n s = \"a\"\n with self.assertRaises(ParserException):\n t = parse_newick(s)",
"def test_func(input, expected):\n from parenthetics import paren\n assert paren(input) == expected",
"def test_balanced(self):\n self.assertTrue(is_closed('(())') == 0)",
"def test_sqpp_paren_open_only_failure(self):\n self.failUnlessRaises(SyntaxError,\n self.parser.parse_query,\"(expr\")",
"def test_unclosed_comment(self):\n nt = NewickTokenizer(newick='(a,(b,c),[(d,e));')\n self.assertRaises(ValueError, nt.tokens)",
"def test_brackets_tall(self):\r\n self._each_parens(r'\\left[x^y\\right]', 'x^y', '[', tall=True)",
"def test04(self):\n\n s = \"a;\"\n t = parse_newick(s);\n self.assertTrue(self.isTree(t) and t.label == \"a\" and t.isLeaf())",
"def test_brackets_success(self):\n found = False\n pyint = Interpreter()\n try:\n pyint.run(code=BF_CORRECT_BRACK)\n except SystemExit: \n found = True\n self.assertFalse(found)",
"def test_simple(self):\n exp = [{'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'h'},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'p'},\n {'edge_info': '1', 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hp'},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'g'},\n {'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hpg'}\n ]\n content = '((h,p)hp:1,g)hpg;'\n self._do_test(content, exp)\n content = '((h,[pretest]p[test][posttest])hp,g)hpg;'\n exp = [{'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'h'},\n {'edge_info': None, 'type': NewickEvents.TIP,\n 'comments': ['pretest', 'test', 'posttest'], 'label': 'p'},\n {'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hp'},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'g'},\n {'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hpg'}\n ]\n self._do_test(content, exp)",
"def test_label(self):\n nt = NewickTokenizer(newick=\"(a\\n'b',(b,c),(d,e));\")\n self.assertRaises(ValueError, nt.tokens)",
"def test_parentheses_expr(self):\n self.assertEqual(\"(a(b(c(d))))\", grammar._PARENTHESES_EXPR.parseString(\"(a(b(c(d))))\")[0])",
"def test_parens_tall(self):\r\n self._each_parens(r'\\left(x^y\\right)', 'x^y', '(', tall=True)",
"def test_extra_suffix(self):\n nt = NewickTokenizer(newick='(a,(b,c));suffix')\n self.assertRaises(ValueError, nt.tokens)",
"def test_paren_corner(self):\n text = self.text\n pm = self.get_parenmatch()\n\n text.insert('insert', '# this is a commen)')\n pm.paren_closed_event('event')\n\n text.insert('insert', '\\ndef')\n pm.flash_paren_event('event')\n pm.paren_closed_event('event')\n\n text.insert('insert', ' a, *arg)')\n pm.paren_closed_event('event')",
"def test_parens_disabled():\n assert get_html(PARENS_TEXT) == \"<p>I am a ((parens)) example.</p>\"",
"def test_paren_styles(self):\n text = self.text\n pm = self.get_parenmatch()\n for style, range1, range2 in (\n ('opener', ('1.10', '1.11'), ('1.10', '1.11')),\n ('default',('1.10', '1.11'),('1.10', '1.11')),\n ('parens', ('1.14', '1.15'), ('1.15', '1.16')),\n ('expression', ('1.10', '1.15'), ('1.10', '1.16'))):\n with self.subTest(style=style):\n text.delete('1.0', 'end')\n pm.STYLE = style\n text.insert('insert', 'def foobar(a, b')\n\n pm.flash_paren_event('event')\n self.assertIn('<<parenmatch-check-restore>>', text.event_info())\n if style == 'parens':\n self.assertTupleEqual(text.tag_nextrange('paren', '1.0'),\n ('1.10', '1.11'))\n self.assertTupleEqual(\n text.tag_prevrange('paren', 'end'), range1)\n\n text.insert('insert', ')')\n pm.restore_event()\n self.assertNotIn('<<parenmatch-check-restore>>',\n text.event_info())\n self.assertEqual(text.tag_prevrange('paren', 'end'), ())\n\n pm.paren_closed_event('event')\n self.assertTupleEqual(\n text.tag_prevrange('paren', 'end'), range2)",
"def test_missing_close_paren_vcat():\n svl_string = \"\"\"\n (\n HISTOGRAM bigfoot X temperature_mid\n HISTOGRAM bigfoot Y temperature_high\n \"\"\"\n\n # TODO Add a more specific exception here.\n with pytest.raises(SvlSyntaxError):\n parse_svl(svl_string)",
"def test_is_lonely(g):\n assert is_lonely(g)",
"def test_missing_close_paren():\n svl_string = \"\"\"\n DATASETS bigfoot \"bigfoot.csv\"\n CONCAT(\n HISTOGRAM bigfoot X temperature_mid\n HISTOGRAM bigfoot X temperature_high\n \"\"\"\n\n with pytest.raises(SvlMissingParen):\n parse_svl(svl_string)",
"def test_unsupported_op(self):\n\n NIE = NotImplementedError\n self.assertRaises(NIE, self.table.where, 'c_complex128 > 0j')\n self.assertRaises(NIE, self.table.where, 'c_string + b\"a\" > b\"abc\"')",
"def test_calculate_three_operations_in_bracket(self):\n result = self.calcuate.calcuate('(2x2+1+7)x3-2')\n expected_result = \"34\"\n self.assertEqual(expected_result, result)"
] |
[
"0.73407525",
"0.6944178",
"0.68909293",
"0.6720148",
"0.6655094",
"0.65632147",
"0.65201473",
"0.6426458",
"0.63206464",
"0.62966466",
"0.6227613",
"0.62115365",
"0.6188436",
"0.6125099",
"0.60763067",
"0.6053083",
"0.6040547",
"0.6035202",
"0.6031782",
"0.6004939",
"0.59827435",
"0.59489757",
"0.5933089",
"0.59145766",
"0.59020543",
"0.58231366",
"0.579635",
"0.5755104",
"0.5717028",
"0.5697987"
] |
0.6950155
|
1
|
Tests behavior of peek when there are no more tokens.
|
def test_peek_none(self):
nt = NewickTokenizer(newick='(a,(b,c));')
nt.tokens()
self.assertIsNone(nt._peek())
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _peek(self):\n return self.token_list[self._current]",
"def test_cant_peek_empty(empty_deque):\n assert empty_deque.peek() is None",
"def peek(self):\n pass",
"def peek(self):\n pass",
"def test_peek_returns_value(full_deque):\n assert full_deque.peek() == 1",
"def peek(self):",
"def peek(self):\n token = self.next()\n self.push(token)\n return token",
"def peek_token(self):\n tok = next(self)\n self.unpop_token(tok)\n return tok",
"def test_peek_return_none_when_empty(new_empty_deque):\n assert new_empty_deque.peek() is None",
"def test_cant_peekleft_empty(empty_deque):\n assert empty_deque.peekleft() is None",
"def test_peek_empty_list():\n from deque import Deque\n dq = Deque()\n assert dq.peek() is None",
"def test_peek_shows_value_of_current_head(dq_3):\n assert dq_3.peek_left() == 'snine'",
"def test_peek_shows_value_of_current_tail(dq_3):\n assert dq_3.peek() == 'ragtime'",
"def test_peekleft_returns_value(full_deque):\n assert full_deque.peekleft() == 3",
"def test_that_peek_returns_tail(filled_deque):\n assert filled_deque.peek() == 1",
"def peek(self):\n if self._peeked is not None:\n return self._peeked\n else:\n # This must echo the implementation in next()\n line = self.proc.stdout.readline()\n self._peeked = line[:-1] if line else None\n return self._peeked",
"def test_peekleft_return_none_when_empty(new_empty_deque):\n assert new_empty_deque.peekleft() is None",
"def test_peek_empty():\n test_stack = stack.Stack()\n\n with pytest.raises(stack.StackEmptyError):\n test_stack.peek()",
"def peek(self):\n raise NotImplementedError",
"def test_peek_left_empty_list():\n from deque import Deque\n dq = Deque()\n assert dq.peek_left() is None",
"def peek(self):\r\n self.stack.peek(0)",
"def _is_at_end(self):\n return self._peek().token_type == scanner.TokenType.EOF",
"def test_peek_on_small_stack(small_stack):\n assert small_stack.peek().val == 3",
"def test_that_peekleft_returns_head(filled_deque):\n assert filled_deque.peekleft() == 4",
"def consume(self):\n if self.next():\n self.tokens.pop(0)",
"def _expect_empty(self):\n\n item = self._lexer.get_token()\n if item:\n line_no, token = item\n raise ParseError(u\"Unexpected token '{0}' on line {1}\"\n .format(common.from_utf8(token.strip()), line_no))",
"def _peek(self):\n if self.current_position + 1 >= len(self.stream):\n return \"\\0\"\n return self.stream[self.current_position + 1]",
"def has_next(self) -> bool:\n return self.peek() != self.sentinel",
"def Peek(self):\n return self._peek",
"def peek(self) -> T:\n pass"
] |
[
"0.6841655",
"0.67698705",
"0.67421055",
"0.67218506",
"0.6719078",
"0.6685085",
"0.6640774",
"0.6601139",
"0.65826523",
"0.6487924",
"0.6462702",
"0.6457941",
"0.6455371",
"0.6413746",
"0.6394823",
"0.6385544",
"0.6372492",
"0.6347741",
"0.6288089",
"0.6286338",
"0.6266971",
"0.6256711",
"0.62223125",
"0.6195957",
"0.61345905",
"0.61318487",
"0.6109076",
"0.60374707",
"0.60037595",
"0.59898245"
] |
0.772483
|
0
|
Tests that unclosed [] comments generate errors.
|
def test_unclosed_comment(self):
nt = NewickTokenizer(newick='(a,(b,c),[(d,e));')
self.assertRaises(ValueError, nt.tokens)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_unicode_comments(self):\n self._do_test(\n ['Hi there!', 'This is an element in a list of strings.'],\n ensure_binary(dedent(u\"\"\"\n [\n 'Hi there!',\n # This is a comment with ‘sneaky‘ unicode characters.\n 'This is an element in a list of strings.',\n # This is a comment with an obvious unicode character ☺.\n ]\n \"\"\").strip()),\n )",
"def test_comments_fail(self):\n with self.assertRaises(AssertionError):\n self.resource.comments(-1)\n\n with self.assertRaises(AssertionError):\n self.resource.comments('foo', 'bar')",
"def test_comments(self):\n\n comment_str = \"# This is a comment\\n# This is another comment\"\n doc = parser.parse(comment_str)\n\n self.assertEqual(len(doc.children()), 2)",
"def test_double_comment(self):\n self.compare_tokens(\n \"## papān libbi[belly] (already in gloss, same spelling)\\n\",\n ['COMMENT', 'ID', 'NEWLINE']\n )",
"def check_comments(self, args):\n\n for submission in args.comments:\n if any(char.isalpha() for char in submission[1]) \\\n or self._illegal_chars.search(submission[1]) != None:\n raise ValueError",
"def test_no_brackets_in_words():\n raise SkipTest\n assert_raises(ParseError, grammar['word'].parse, ']')",
"def test_dislike_a_comment(self):\n self.base_test()",
"def test_comments(self):\n fp = FilePath(self.mktemp())\n fp.setContent('something\\n#commented\\ncool')\n self.assertEqual(list(inventoryReader(fp.path)), ['something', 'cool'])",
"def test_lint_fail_nocomment(self, style):\n with ExpectedException(RuntimeError):\n run_linter_throw(\"path/to/file\",\n \"aabb\\nbbcc\",\n style,\n whitelist=[\"headerblock/filename\"])",
"def test_code_comment_success(self):\n found = False\n pyint = Interpreter()\n try:\n pyint.run(code=BF_CODE_COMMENT)\n except SystemExit: \n found = True\n self.assertFalse(found)",
"def test_does_not_match_block_comments(self):\n\n comment = dedent(\"\"\"\\\n --[[\n Hello, World!\n --]]\"\"\")\n\n script = rbxmx.ScriptElement(source=comment)\n first_comment = script.get_first_comment()\n\n assert first_comment is None",
"def test_like_a_comment(self):\n self.base_test()",
"def test_invalid_pseudo_open(self):\n\n with self.assertRaises(SyntaxError):\n sv.compile(':is(div')",
"def _parse_comment(i, doc):\n\n if doc[i].strip() != \"/**\":\n raise ParseFailure(i, \"Expected beginning of block comment\")\n\n e = i + 1\n while e < len(doc) and doc[e].strip() != \"*/\":\n e += 1\n\n return e + 1, [x.rstrip() for x in doc[i + 1: e]]",
"def test_badXPathNoClosingBracket(self):\n exc = self.assertRaises(SyntaxError, XPathQuery, \"\"\"//bar[@attrib1\"\"\")\n self.assertTrue(exc.msg.startswith(\"Trying to find one of\"),\n (\"SyntaxError message '%s' doesn't start with \"\n \"'Trying to find one of'\") % exc.msg)",
"def test_issue_delete_comment_deprecated(self):\n pass",
"def test_issue_edit_comment_deprecated(self):\n pass",
"def test_no_specification_error():\n try:\n bad_arm = survey.get_spiral_slice()\n except SyntaxError:\n assert True\n else:\n assert False",
"def test_missing_docstring(a, b): # noqa: D213, D407",
"def test_with_bad_lines():\n bad_lines = [\n ')(',\n 'has (open paren only',\n 'has closed) paren only',\n '(two (open one closed)',\n 'two (closed one) open)',\n ]\n for line in bad_lines:\n with pytest.raises(ParseError):\n nest_parens(line)",
"def test_spelling_mistake_inside_comment(self, style):\n with ExpectedException(LinterFailure):\n self._spellcheck_lint(\"{s}\\n{m} splelling mistake{e}\", style)",
"def tests_comment(self):\n\n for domain in self.domains:\n expected = None\n\n data = f\"# {domain}\"\n actual = File(data).get_converted()\n\n self.assertEqual(expected, actual)",
"def test_missing_delim(self):",
"def test_no_spelling_error_comments_starting_inside_string(self, style):\n result = self._spellcheck_lint(\"{s}{e}\\n\\\"{s}\\\" splelling\",\n style)\n self.assertTrue(result)",
"def test_md027_bad_block_quote_fenced_first():\n\n # Arrange\n scanner = MarkdownScanner()\n source_path = os.path.join(\n \"test\", \"resources\", \"rules\", \"md027\", \"bad_block_quote_fenced_first.md\"\n )\n supplied_arguments = [\n \"--disable-rules\",\n \"md031\",\n \"scan\",\n source_path,\n ]\n\n expected_return_code = 1\n expected_output = (\n f\"{source_path}:2:3: \"\n + \"MD027: Multiple spaces after blockquote symbol (no-multiple-space-blockquote)\"\n )\n expected_error = \"\"\n\n # Act\n execute_results = scanner.invoke_main(arguments=supplied_arguments)\n\n # Assert\n execute_results.assert_results(\n expected_output, expected_error, expected_return_code\n )",
"def test_comments(self):\n comment_example = os.path.join(here, 'comment-example.ini')\n manifest = ManifestParser(manifests=(comment_example,))\n self.assertEqual(len(manifest.tests), 8)\n names = [i['name'] for i in manifest.tests]\n self.assertFalse('test_0202_app_launch_apply_update_dirlocked.js' in names)",
"def test_md010_bad_configuration_code_blocks():\n\n # Arrange\n scanner = MarkdownScanner()\n supplied_arguments = [\n \"--set\",\n \"plugins.md010.code_blocks=bad\",\n \"--strict-config\",\n \"scan\",\n \"test/resources/rules/md004/good_list_asterisk_single_level.md\",\n ]\n\n expected_return_code = 1\n expected_output = \"\"\n expected_error = (\n \"BadPluginError encountered while configuring plugins:\\n\"\n + \"The value for property 'plugins.md010.code_blocks' must be of type 'bool'.\"\n )\n\n # Act\n execute_results = scanner.invoke_main(arguments=supplied_arguments)\n\n # Assert\n execute_results.assert_results(\n expected_output, expected_error, expected_return_code\n )",
"def test_issue_get_comments(self):\n pass",
"def test_md027_bad_block_quote_thematic():\n\n # Arrange\n scanner = MarkdownScanner()\n source_path = os.path.join(\n \"test\", \"resources\", \"rules\", \"md027\", \"bad_block_quote_thematic.md\"\n )\n supplied_arguments = [\n \"scan\",\n source_path,\n ]\n\n expected_return_code = 1\n expected_output = (\n f\"{source_path}:3:3: \"\n + \"MD027: Multiple spaces after blockquote symbol (no-multiple-space-blockquote)\"\n )\n expected_error = \"\"\n\n # Act\n execute_results = scanner.invoke_main(arguments=supplied_arguments)\n\n # Assert\n execute_results.assert_results(\n expected_output, expected_error, expected_return_code\n )",
"def test_md027_bad_block_quote_fenced_last_plus_one():\n\n # Arrange\n scanner = MarkdownScanner()\n supplied_arguments = [\n \"--disable-rules\",\n \"md031\",\n \"scan\",\n \"test/resources/rules/md027/bad_block_quote_fenced_last_plus_one.md\",\n ]\n\n expected_return_code = 1\n expected_output = (\n \"test/resources/rules/md027/bad_block_quote_fenced_last_plus_one.md:4:4: \"\n + \"MD027: Multiple spaces after blockquote symbol (no-multiple-space-blockquote)\"\n )\n expected_error = \"\"\n\n # Act\n execute_results = scanner.invoke_main(arguments=supplied_arguments)\n\n # Assert\n execute_results.assert_results(\n expected_output, expected_error, expected_return_code\n )"
] |
[
"0.68163717",
"0.6574816",
"0.650323",
"0.62634087",
"0.62355363",
"0.6234494",
"0.6229091",
"0.6162364",
"0.6082871",
"0.6055238",
"0.60307336",
"0.6022331",
"0.5991022",
"0.59871715",
"0.5957869",
"0.59467596",
"0.59367037",
"0.5929556",
"0.59011656",
"0.58821195",
"0.586643",
"0.58566856",
"0.5811457",
"0.58098024",
"0.58091295",
"0.5792733",
"0.57902235",
"0.57795185",
"0.5768423",
"0.5768193"
] |
0.7163699
|
0
|
Test that terminating ; is not preceded by ,.
|
def test_comma_bef_semicolon(self):
nt = NewickTokenizer(newick='(a,(b,c),(d,e)),;')
self.assertRaises(ValueError, nt.tokens)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _check_semicolon_else_skip(self, symbol):\n if symbol.type == self.scanner.SEMICOLON:\n pass\n else:\n self._display_syntax_error(\"semicolon\")\n # Skip to semicolon at end of line\n self._semicolon_skipper()",
"def test_missing_delim(self):",
"def test_unexpected_comma(self):\n nt = NewickTokenizer(newick='(a,(b,c),,(d,e));')\n self.assertRaises(ValueError, nt.tokens)",
"def validate_csv_seq(sequence):\n if sequence.find(',') != -1 or sequence.find(';') != -1:\n return True\n else:\n return False",
"def _semicolon_skipper(self):\n while (\n not self._is_semicolon(\n self.symbol)) and (\n not self._is_eof(\n self.symbol)):\n self.symbol = self.scanner.get_symbol()\n if self._is_eof(self.symbol):\n # In case file ends prematurely\n pass\n return None",
"def check_for_extra_semicolon(sql_str):\r\n try:\r\n if len(sql_str.split(';')) > 2:\r\n raise sqlErr(\"Extra Semi-Colon Detected!\")\r\n except Exception as e:\r\n raise e",
"def _is_semicolon(self, symbol):\n if symbol.type == self.scanner.SEMICOLON:\n return True\n else:\n return False",
"def test_sans_comma(self):\n nt = NewickTokenizer(newick='(a,(b,c)(d,e));')\n self.assertRaises(ValueError, nt.tokens)",
"def _check_semicolon(line_index, input_line):\n global _total_lines_of_code\n if input_line.endswith(';'):\n _code_lines.append(line_index)\n _total_lines_of_code += 1",
"def is_skippable(line: str) -> bool:\n return len(line) == 0 or line[0] == ';'",
"def _is_comma(self, symbol):\n if symbol.type == self.scanner.COMMA:\n return True\n else:\n return False",
"def test_get_separator_semi():\n # GIVEN a line with commas as delimiter\n line = \"one;two;three\"\n # WHEN getting the separator\n sep = samplesheet.get_separator(line)\n # THEN assert None is returned\n assert sep is None",
"def check_comma(text):\n err = \"style-guide.serial-comma\"\n msg = \"Use serial comma after penultimate item.\"\n regex = \"\\,\\s[a-zA-Z0-9]*\\sand\\s\"\n\n return existence_check(text, [regex], err, msg, require_padding=False)",
"def isseparator(token):\n\n # Token is a comma\n return token == \",\"",
"def comma_detector(self) -> bool:\n curr_pos = self.fileobject.tell()\n line = self.nextline()\n comma = False\n # A bold presumption, perhaps\n if ',' in line:\n comma = True\n self.fileobject.seek(curr_pos)\n return comma",
"def fix_multiallelics(cell):\n\tsplitters = [',', ';']\n\tif any(splitter in str(cell) for splitter in splitters):\n\t\tcell = re.split(';|,', cell)[0]\n\treturn cell",
"def test_onlySeparator(self):\n result = self.parser.parse(\"d\")\n\n self.assertIsNone(result)",
"def test_string_ends_with_sep():\n assert my_splitter(\"aaa,bbb,\", \",\") == [\"aaa\", \"bbb\", \"\"]",
"def test_separators_only():\n assert my_splitter(\",ad,\", \"ad\") == [\",\", \",\"]",
"def checkForNewLineAndSemiColon(string):\r\n\t\tnew_string = \"\"\r\n\t\tfor i in string:\r\n\t\t\tif i != \"\\n\" and i != \";\":\r\n\t\t\t\tnew_string += i\r\n\t\treturn new_string",
"def test_get_separator_unknown():\n # GIVEN a line with commas as delimiter\n line = \"one.two.three\"\n # WHEN getting the separator\n sep = samplesheet.get_separator(line)\n # THEN assert None is returned\n assert sep is None",
"def line_is_valid(line):\n if '-' in map(lambda item: item.strip(), line.strip().split(\";\")):\n return False\n else:\n return True",
"def test_delimiter_empty(self):\n with self.assertRaisesRegexp(Exception, \"delimiter\"):\n self.context.frame.import_csv(self.dataset,\n self.schema, delimiter=\"\")",
"def validate_semicolon(s):\n positions = identify_create_table_view(s)\n validation = {\n \"exit_code\": 0,\n \"total_lines\": count_lines(s)\n }\n if len(positions) > 1:\n validation[\"exit_code\"] = 1\n validation[\"val_lines\"] = positions\n return validation",
"def test_get_separator_csv():\n # GIVEN a line with commas as delimiter\n line = \"one,two,three\"\n # WHEN getting the separator\n sep = samplesheet.get_separator(line)\n # THEN assert comma is returned\n assert sep == \",\"",
"def has_inside(block):\n return comma(block[0]) if block else '#N/A'",
"def test_basic_dummy_no_match(self):\n self.assertLines([\"a\", \";\", \"examples/dummy.csv\"], [\"a,b,c,a_xfind\", \"1,2,3,\",])",
"def require_separator(self):\n return False",
"def test_quoted_edge_info(self):\n exp = ['(', 'a', ',', '(', 'b', ',', 'c', ')', ':', '4', ',',\n '(', 'd', ',', 'e', ')', ')', ';']\n self._do_test(\"(a,(b,c):'4',(d,e));\", exp)",
"def no_or_clauses (self,phrase):\r\n \r\n for x in phrase:\r\n if isinstance(x,list) and x[0] == '@':\r\n return False\r\n return True"
] |
[
"0.67393607",
"0.66642225",
"0.65089947",
"0.6488513",
"0.6395795",
"0.63606805",
"0.63165295",
"0.6296124",
"0.62489814",
"0.6237657",
"0.62362576",
"0.61840713",
"0.60308063",
"0.6019244",
"0.59941745",
"0.5912011",
"0.5773467",
"0.5769748",
"0.57470113",
"0.5730535",
"0.5710548",
"0.5620942",
"0.5617853",
"0.5500743",
"0.5497357",
"0.5494523",
"0.54612607",
"0.5453294",
"0.5447519",
"0.54457957"
] |
0.699548
|
0
|
Test that unquoted labels with newline character generate errors.
|
def test_label(self):
nt = NewickTokenizer(newick="(a\n'b',(b,c),(d,e));")
self.assertRaises(ValueError, nt.tokens)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def CheckLabel(Line): \n for i in Line:\n if i == '\\t': #can't detect leading tabs, stops at the first \\ \n raise InputError(Line,\"malformed input\") \n elif i != ' ':\n break",
"def test_parse_simple_quote_with_newline(self):\n with self.assertRaisesRegexp(Exception, re.escape(\"the quote included a newline (0x0a) character\")):\n api.parse_quote(\" Quote with \\n character - Author\", simple_format=True)",
"def test_label_with_non_string():\n svl_string = \"\"\"\n DATASETS bigfoot \"bigfoot.csv\"\n LINE bigfoot\n Y classification COUNT\n SPLIT BY classification\n X date BY YEAR LABEL 1.2\n \"\"\"\n\n # TODO Make this exception more specific if possible.\n with pytest.raises(SvlSyntaxError):\n parse_svl(svl_string)",
"def test_newlines(self):\n self.assertValue({\n \"foo\": \"something\\nwith\\nnewlines\",\n },\n \"foo: something_with_newlines\\n\")",
"def test_unclosed(self):\n nt = NewickTokenizer(newick='(a,(b,c)')\n self.assertRaises(ValueError, nt.tokens)",
"def test_assert_does_not_contain_newline(self):\n\n with self.assertRaisesRegexp(Exception, re.escape(\"the quote included a newline (0x0a) character\")):\n api._assert_does_not_contain(\"There is a newline (\\n) in this string.\", \"\\n\", \"quote\")",
"def test_extra_closed(self):\n nt = NewickTokenizer(newick='(a,(b,c)));')\n self.assertRaises(ValueError, nt.tokens)",
"def test_no_start_open_parens(self):\n self.assertRaises(ValueError, NewickTokenizer, newick='hi')",
"def test_missing_delim(self):",
"def test_unclosed_comment(self):\n nt = NewickTokenizer(newick='(a,(b,c),[(d,e));')\n self.assertRaises(ValueError, nt.tokens)",
"def test_is_valid_label_value_valid_input():\n # test valid label values\n assert is_valid_label_value(value=None)\n assert is_valid_label_value(value=\"\")\n assert is_valid_label_value(value=\"l0L\")\n assert is_valid_label_value(value=\"L-l\")\n assert is_valid_label_value(value=\"L.L\")\n assert is_valid_label_value(value=\"l_4\")\n assert is_valid_label_value(value=\"4-you\")\n assert is_valid_label_value(value=\"You.2\")",
"def test_is_valid_label_value_invalid_input():\n # test length violations\n assert not is_valid_label_value(value=f\"{'v' * 64}\") # value too long\n # test first character violations (not alphanum)\n assert not is_valid_label_value(value=\"-\")\n assert not is_valid_label_value(value=\"-a\")\n assert not is_valid_label_value(value=\".b\")\n assert not is_valid_label_value(value=\" c\")\n # test last character violations (not alphanum)\n assert not is_valid_label_value(value=\"a-\")\n assert not is_valid_label_value(value=\"b.\")\n assert not is_valid_label_value(value=\"c \")\n assert not is_valid_label_value(value=\"sw33T#\")\n # test middle characters violations\n assert not is_valid_label_value(value=\"a$$a\")\n assert not is_valid_label_value(value=\"b b\")",
"def test_nl_separated_values(self, test_input, expected, sc):\n assert sc.add(test_input) == expected",
"def test_is_valid_label_key_invalid_input():\n # test length violations\n assert not is_valid_label_key(key=None) # Too short\n assert not is_valid_label_key(key=\"\") # Too short\n assert not is_valid_label_key(key=f\"{'p' * 254}/n\") # prefix too long\n assert not is_valid_label_key(key=\"/n\") # prefix too short\n assert not is_valid_label_key(key=\"p/\") # name too short\n assert not is_valid_label_key(key=\"a\" * 254) # name too long\n assert not is_valid_label_key(key=f\"d/{'b'*64}\") # name too long\n # test first character violations (not alphanum)\n assert not is_valid_label_key(key=\"-a\")\n assert not is_valid_label_key(key=\".b\")\n assert not is_valid_label_key(key=\" c\")\n # test last character violations (not alphanum)\n assert not is_valid_label_key(key=\"a-\")\n assert not is_valid_label_key(key=\"b.\")\n assert not is_valid_label_key(key=\"c \")\n assert not is_valid_label_key(key=\"sw33T#\")\n # test middle characters violations\n assert not is_valid_label_key(key=\"a$$a\")\n assert not is_valid_label_key(key=\"b b\")",
"def test_is_valid_label_key_valid_input():\n # test valid label keys\n assert is_valid_label_key(key=\"l0l\")\n assert is_valid_label_key(key=\"l0L\")\n assert is_valid_label_key(key=\"L-l\")\n assert is_valid_label_key(key=\"L.L\")\n assert is_valid_label_key(key=\"4-you\")\n assert is_valid_label_key(key=\"you.2\")\n assert is_valid_label_key(key=\"p/n\")\n assert is_valid_label_key(key=\"prefix/you.2\")\n assert is_valid_label_key(key=\"how.sad/to-see\")\n assert is_valid_label_key(key=f\"{'d'*253}/{'n'*63}\")",
"def test_with_bad_lines():\n bad_lines = [\n ')(',\n 'has (open paren only',\n 'has closed) paren only',\n '(two (open one closed)',\n 'two (closed one) open)',\n ]\n for line in bad_lines:\n with pytest.raises(ParseError):\n nest_parens(line)",
"def test_open_closed(self):\n nt = NewickTokenizer(newick='(a,(),(d,e));')\n self.assertRaises(ValueError, nt.tokens)",
"def test_false_lower(self):\n self.assertRaises(ParseException, self.flag.parseString, 'n')",
"def test_with_newline(self):\n self.assertEqual(escapespaces('Hi there\\n'),\n 'Hi there<br />')",
"def test_blank_line(self):\n assert yaenv.core.EnvVar('\\n') is None\n assert yaenv.core.EnvVar(' \\t ') is None\n assert yaenv.core.EnvVar('# comment') is None",
"def test_suggest_newline(self, style):\n def get_replacement():\n \"\"\"Get replacement for lack of trailing newline.\"\"\"\n run_linter_throw(\"path/to/file\",\n \"{s}\\n{m} Text{e}\",\n style,\n whitelist=[\"file/newline_last_char\"])\n\n exception = self.assertRaises(LinterFailure, get_replacement)\n self.assertEqual(replacement(exception),\n (2, style_format(\"{m} Text{e}\\n\", style)))",
"def test_missing_multiple_tokens(self):\n self.helper_test_evaluate_raises(\n 'A or (B and (C and not D))',\n expected_exc_type=MissingSymbolError,\n A=0,\n D=1)",
"def test_whitespace(self):\n self.assertRaises(ParseException, self.flag.parseString, ' ')",
"def test_unexpected_comma(self):\n nt = NewickTokenizer(newick='(a,(b,c),,(d,e));')\n self.assertRaises(ValueError, nt.tokens)",
"def test_incompatible_rules():\n\n grammar = \"\"\"\n A: B | C;\n B: 'enumeration';\n C: value=INT;\n \"\"\"\n with pytest.raises(TextXSyntaxError):\n metamodel_from_str(grammar)",
"def test_odd_quotes(self):\n content = \"((h_ ,'p)h p,g()[],:_)hpg;\"\n tok = NewickTokenizer(StringIO(content))\n self.assertRaises(Exception, tok.tokens)\n content = \"((h_ ,'p')h p,'g()[]',:_')hpg;\"\n tok = NewickTokenizer(StringIO(content))\n self.assertRaises(Exception, tok.tokens)",
"def test_extra_suffix(self):\n nt = NewickTokenizer(newick='(a,(b,c));suffix')\n self.assertRaises(ValueError, nt.tokens)",
"def test_label_not_in_config(self):\n with self.assertRaisesRegex(\n ValueError, 'The config \\'Label\\' field should contain the positive'\n ' class label.'):\n self.ci.run_with_metadata(\n indexed_inputs=self.dataset.indexed_examples,\n model=self.model,\n dataset=self.dataset,\n )",
"def test_quoted_empty_values(parallel, read_basic):\n if parallel:\n pytest.xfail(\"Multiprocessing can fail with quoted fields\")\n text = 'a b c\\n1 2 \" \\n \"'\n table = read_basic(text, parallel=parallel)\n assert table[\"c\"][0] == \"\\n\" # empty value masked by default",
"def testBadLine(self):\n\n self.assertRaises(\n ValueError,\n tools._trackInfo,\n 'not a real line'\n )"
] |
[
"0.664799",
"0.6574299",
"0.6504542",
"0.65013915",
"0.6482029",
"0.6461377",
"0.64312947",
"0.64140993",
"0.63935316",
"0.63755876",
"0.6157155",
"0.61145914",
"0.61092746",
"0.6062495",
"0.6057764",
"0.6028911",
"0.59751755",
"0.596203",
"0.58908796",
"0.5872407",
"0.5825744",
"0.58065206",
"0.5801703",
"0.57907397",
"0.57850224",
"0.57801986",
"0.5759965",
"0.57592714",
"0.5757779",
"0.5756906"
] |
0.7631913
|
0
|
Test that quoting edge info generates no error and string edge length.
|
def test_quoted_edge_info(self):
exp = ['(', 'a', ',', '(', 'b', ',', 'c', ')', ':', '4', ',',
'(', 'd', ',', 'e', ')', ')', ';']
self._do_test("(a,(b,c):'4',(d,e));", exp)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_info_whitespace():\n pytest.raises(SaltInvocationError, mac_group.info, \"white space\")",
"def test_general_subset_invalid_space():\n pass",
"def test_handles_empty_string(self):\n result = encode_run_length(\"\")\n self.assertEqual(result, \"\")",
"def test_should_raise_error_if_inconsistent_properties(self):\r\n edge_spec = {\r\n 'type': 'edge',\r\n 'label': 'subscribed_to',\r\n 'primary_key': 'undefined'\r\n }\r\n\r\n results = [self.spec_parser.parse_statement(edge_spec)]\r\n results += [self.spec_parser.parse_statement(self.property_spec)]\r\n with self.assertRaises(ValueError):\r\n self.spec_parser.validate(results)",
"def test_lowquoteSanity(self):\n for s in stringSubjects:\n self.assertEqual(s, irc.lowDequote(irc.lowQuote(s)))",
"def test_generalized_linestring_is_valid():\n road = query_row(db_conf, 'osm_roads', 7201)\n # geometry is not simple, but valid\n # check that geometry 'survives' simplification\n assert not road['geometry'].is_simple, road['geometry'].wkt\n assert road['geometry'].is_valid, road['geometry'].wkt\n assert road['geometry'].length > 1000000\n road = query_row(db_conf, 'osm_roads_gen0', 7201)\n # but simplified geometies are simple\n assert road['geometry'].is_valid, road['geometry'].wkt\n assert road['geometry'].length > 1000000\n road = query_row(db_conf, 'osm_roads_gen1', 7201)\n assert road['geometry'].is_valid, road['geometry'].wkt\n assert road['geometry'].length > 1000000",
"def test_wrong_length():\n with pytest.raises(ValueError):\n SplineTerm(0, lam=[0, 1, 2], penalties=['auto', 'auto'])",
"def test_random_quote(self):\n quote = Quote().print()\n self.assertTrue(type(quote) == str)",
"def bad_underline_length(): # noqa: D416",
"def test_trailing_data(self):",
"def test_random_series_quote(self):\n quote = Quote().print_series_quote()\n self.assertTrue(type(quote) == str)",
"def test_material():\n with expected_protocol(\n DCXS,\n [\n (\"IALongNam\", None),\n (\"n\", \"ALongNam\"),\n ],\n ) as inst:\n inst.material = \"ALongNameWhichGetsTruncated\"\n assert \"ALongNam\" == inst.material",
"def test_buoy_format2():\n with pytest.raises(AssertionError) as err_info:\n Check_BuoyDC.check_buoy_format(buoy_format_fail_2)\n assert str(err_info.value) == 'Input length incorrect, see instructions'",
"def test_edge_driver_errors(self):\n\n with pytest.raises(\n ValueError, match=r\"Encountered invalid entry in 'reward', expected 2-bit bitstrings.\"\n ):\n qaoa.edge_driver(Graph([(0, 1), (1, 2)]), [\"10\", \"11\", 21, \"g\"])\n\n with pytest.raises(\n ValueError,\n match=r\"'reward' cannot contain either '10' or '01', must contain neither or both.\",\n ):\n qaoa.edge_driver(Graph([(0, 1), (1, 2)]), [\"11\", \"00\", \"01\"])\n\n with pytest.raises(ValueError, match=r\"Input graph must be a nx.Graph\"):\n qaoa.edge_driver([(0, 1), (1, 2)], [\"00\", \"11\"])",
"def test_to_knx_too_long(self):\n with self.assertRaises(ConversionError):\n DPTString().to_knx(\"AAAAABBBBBCCCCx\")",
"def test_extra_space(self):\n command_line = [\"filesystem\", \"create\", \"pn\", \"fn\", '--size=\"312 GiB\"']\n for prefix in [[], [\"--propagate\"]]:\n self.check_system_exit(prefix + command_line, _PARSE_ERROR)",
"def test_str(self):\r\n f = lambda id_, seq, qual: len(seq) > 3\r\n s1 = 'aa'\r\n s2 = 'aaaa'\r\n sq = SeqQualBad('Q', f)\r\n self.assertEqual(sq('x', s1, [1, 2, 3]), False)\r\n self.assertEqual(str(sq), 'Q\\t0')\r\n self.assertEqual(sq('y', s2, [1, 2, 3]), True)\r\n self.assertEqual(str(sq), 'Q\\t1')",
"def test_utils_random_string(self, tcex, string_length):\n result = tcex.utils.random_string(string_length=string_length)\n assert (\n len(result) == string_length\n ), f'The length of the string {len(result)} != {string_length}'",
"def test_get_garbage(self):\n tests = {\n '<>': ('<>', 2, ''),\n '<random characters>': ('<random characters>', 19, 'random characters'),\n '<<<>': ('<<<>', 4, '<<'),\n '<!>>': ('<!>>', 4, ''),\n '<!>>>>': ('<!>>', 4, ''),\n '<{!>}>': ('<{!>}>', 6, '{}'),\n '<!!>': ('<!!>', 4, ''),\n '<!!!>>': ('<!!!>>', 6, ''),\n '<{o\"i!a,<{i<a>': ('<{o\"i!a,<{i<a>', 14, '{o\"i,<{i<a'),\n '<<!>>,<!!>': ('<<!>>', 5, '<')\n }\n\n for args, answers in tests.items():\n message = \"Input was '{}'\".format(args)\n garbage_chars = []\n actual_garbage, actual_index = get_garbage(args, 0, garbage_chars)\n\n expected_garbage = answers[0]\n expected_index = answers[1]\n expected_garbage_str = answers[2]\n\n actual_garbage_str = ''\n for c in garbage_chars:\n actual_garbage_str += c\n\n self.assertEqual(actual_garbage, expected_garbage, msg=(\"Garbage: \" + message))\n self.assertEqual(actual_index, expected_index, msg=(\"Index: \" + message))\n self.assertEqual(actual_garbage_str, expected_garbage_str, msg=(\"Garbage str: \" + message))",
"def test_gibberish_magnitude(self):\n command_line = [\"filesystem\", \"create\", \"pn\", \"fn\", '--size=\"carbonGiB\"']\n for prefix in [[], [\"--propagate\"]]:\n self.check_system_exit(prefix + command_line, _PARSE_ERROR)",
"def test_arg_astringUnmatchedQuotes(self):\n self.assertRaises(imap4.IllegalClientResponse,\n self.server.arg_astring, b'\"open')",
"def test_missing_delim(self):",
"def test_qual_seq_length(self):\r\n self.assertRaises(KeyError, convert_fastq, self.fasta_file_path,\r\n self.noseq_qual_file_path, output_directory=self.output_dir)",
"def test_random_programming_quote(self):\n quote = Quote().print_programming_quote()\n self.assertTrue(type(quote) == str)",
"def test_case_06_side_too_big(self):\n self.__assert_equals_test_case([(195, 10, 201)], 'InvalidInput')",
"def expected_rubbish(self):",
"def test_str():\n # Test for string special method with scalar Rnode objects\n x = Rnode(1.0)\n try:\n assert str(x) == 'Reverse-mode Rnode Object ( Values: 1.0 )'\n except AssertionError as e:\n print(e)\n raise AssertionError",
"def test_exceptionGreaterThan64kEncoded(self) -> None:\n # The exception text itself is not greater than 64k but SNOWMAN\n # encodes to 3 bytes with UTF-8 so the length of the UTF-8 encoding of\n # the string representation of this exception will be greater than 2\n # ** 16.\n raise Exception(\"\\N{SNOWMAN}\" * 2 ** 15)",
"def test_size_returns_length(dq_3):\n assert dq_3.size() == 3",
"def test_mismatched_quote(self):\n with pytest.raises(yaenv.EnvError) as err:\n _ = yaenv.core.EnvVar('double=\"missing-closing')\n assert 'Mismatched quotes' in str(err.value)\n with pytest.raises(yaenv.EnvError) as err:\n _ = yaenv.core.EnvVar('double=missing-opening\"')\n assert 'Mismatched quotes' in str(err.value)\n with pytest.raises(yaenv.EnvError) as err:\n _ = yaenv.core.EnvVar(\"single='missing-closing\")\n assert 'Mismatched quotes' in str(err.value)\n with pytest.raises(yaenv.EnvError) as err:\n _ = yaenv.core.EnvVar(\"single=missing-opening'\")\n assert 'Mismatched quotes' in str(err.value)\n with pytest.raises(yaenv.EnvError) as err:\n _ = yaenv.core.EnvVar(\"both=\\\"mismatched'\")\n assert 'Mismatched quotes' in str(err.value)\n with pytest.raises(yaenv.EnvError) as err:\n _ = yaenv.core.EnvVar(\"both='mismatched\\\"\")\n assert 'Mismatched quotes' in str(err.value)"
] |
[
"0.5995964",
"0.59914005",
"0.586099",
"0.5849451",
"0.58113915",
"0.5799296",
"0.57758814",
"0.5663836",
"0.5662087",
"0.56569314",
"0.5623805",
"0.5614479",
"0.56139755",
"0.5608006",
"0.5597547",
"0.5584426",
"0.5580746",
"0.55754036",
"0.55740744",
"0.5573375",
"0.55626535",
"0.5561368",
"0.5556137",
"0.55275756",
"0.55196327",
"0.5512805",
"0.550662",
"0.5500418",
"0.54804945",
"0.5474504"
] |
0.7028773
|
0
|
Part of the testing harness. Writes `content`, the parses and compares to `expected`.
|
def _do_test(self, content, expected):
self.assertEqual(list(NewickTokenizer(StringIO(content))), expected)
self.assertEqual(list(NewickTokenizer(newick=content)), expected)
fp = path_map.next_unique_scratch_filepath('tok_test')
try:
write_to_filepath(content, fp)
self.assertEqual(list(NewickTokenizer(filepath=fp)), expected)
finally:
try:
os.unlink(fp)
except: # pragma: no cover
pass
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_content():\n # PREPARE\n expected_f = open(\n 'tests/pages/expected/stepanenkoartem-github-io.html',\n 'rb',\n )\n expected_dom = BeautifulSoup(\n expected_f.read(),\n 'html.parser',\n )\n\n actual_f = open(\n os.path.join(TEMP_DIR, path.for_page(URL)),\n )\n actual_dom = BeautifulSoup(actual_f, 'html.parser')\n\n # CHECK\n assert actual_dom.decode() == expected_dom.decode()",
"def test_001(compiler, temp_builds_dir):\n filepath = temp_builds_dir.join(\"compiler_write_001\")\n\n content = \"\"\"Some sample latin text\"\"\"\n\n compiler.write_content(content, filepath.strpath)\n\n # Read file to compare\n with io.open(filepath.strpath, \"r\", encoding=\"utf-8\") as f:\n result = f.read()\n\n assert content == result",
"def write(self, content):\n ...",
"def _do_test(self, content, expected):\n nt = NewickTokenizer(stream=StringIO(content))\n e = [deepcopy(i) for i in NewickEventFactory(tokenizer=nt)]\n self.assertEqual(e, expected)\n new_e = []\n\n def append_to_new_e(event):\n new_e.append(deepcopy(event))\n\n NewickEventFactory(newick=content, event_handler=append_to_new_e)\n self.assertEqual(new_e, expected)",
"def do_test_expected(self):\n self.maxDiff = None\n\n # We currently don't throw any exceptions in Writer, so this\n # this is always false\n if 'error' in test_src:\n self.assertRaises(test_src['error'], yamlish.dumps,\n test_src['in'], options)\n else:\n logging.debug(\"out:\\n%s\", textwrap.dedent(test_src['out']))\n want = yaml.load(textwrap.dedent(test_src['out']))\n logging.debug(\"want:\\n%s\", want)\n with tempfile.NamedTemporaryFile() as test_file:\n tested_function(test_src['in'], test_file)\n test_file.seek(0)\n got_str = test_file.read()\n logging.debug(\"got_str = %s\", got_str)\n got = yaml.load(got_str)\n self.assertEqual(got, want, \"Result matches\")",
"def test_getContent(self):\n self.assertEquals(\n self.successResultOf(self.testObject.getContent()), 'somecontent')",
"def test_write_to_file():\n from scraper import write_to_file\n encoding = 'utf-8'\n write_to_file(TEST_FILE, TEST_CONTENT, encoding)\n assert True",
"def write(self, content):\n pass",
"def test_str(self):\n self.assertEqual(str(self.content), \"Test Content\")",
"def test_write_file():\n filename = 'test'\n content = 'hello!'\n\n write_file(content, filename)\n assert read_file(filename) == 'hello!'",
"def _test(\n self, data, content, expected, _inventory, _get_from_object, _get_source_module_data\n ):\n cache = common.load_cache(os.path.join(_CURRENT_DIRECTORY, \"fake_project\", \"objects.inv\"))\n\n _inventory.return_value = cache\n _get_source_module_data.return_value = data\n _get_from_object.return_value = \"\"\n\n directive = common.make_mock_directive(content)\n nodes = directive.run()\n\n self.assertNotEqual([], nodes)\n self.assertEqual(1, len(nodes))\n self.assertEqual(expected, nodes[0].astext())",
"def test_valid_file(self):\n settings = get_settings(\n PANDOC_EXTENSIONS=PANDOC_EXTENSIONS, PANDOC_ARGS=PANDOC_ARGS\n )\n\n pandoc_reader = PandocReader(settings)\n source_path = os.path.join(TEST_CONTENT_PATH, \"valid_content.md\")\n output, metadata = pandoc_reader.read(source_path)\n\n self.assertEqual(\n (\n \"<p>This is some valid content that should pass.\"\n \" If it does not pass we\"\n \" will know something is wrong.</p>\\n\"\n ),\n output,\n )\n\n self.assertEqual(\"Valid Content\", str(metadata[\"title\"]))\n self.assertEqual(\"My Author\", str(metadata[\"author\"]))\n self.assertEqual(\"2020-10-16 00:00:00\", str(metadata[\"date\"]))",
"def test_something(self):\n self.assertEqual(\n b\"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n<Foo>bar</Foo>\"\"\",\n self.successResultOf(to_xml(tags.Foo(\"bar\"))),\n )",
"def test_given_content(self):\r\n\r\n file_path = os.path.dirname(__file__)\r\n html_content = open(os.path.join(file_path, 'readable_sample.html'))\r\n\r\n read = ReadContent.parse(html_content)\r\n\r\n self.assertTrue(\r\n read.status == 1, \"The status is 1: \" + str(read.status))\r\n self.assertTrue(not read.is_image(), \"The content is not an image\")\r\n self.assertTrue(read.content is not None, \"Content should not be none\")\r\n self.assertTrue(\r\n 'Bookie' in read.content,\r\n u\"The word Bookie is in the content: \" + unicode(read.content))",
"def test_parse(self): \n\n results = self.parser.parse()\n self.assertEqual(results, test_case_data['parse_output'])",
"def test_encoded_to_raw_conversion(self):\n settings = get_settings(\n PANDOC_EXTENSIONS=PANDOC_EXTENSIONS, PANDOC_ARGS=PANDOC_ARGS\n )\n\n pandoc_reader = PandocReader(settings)\n source_path = os.path.join(\n TEST_CONTENT_PATH, \"valid_content_with_raw_paths.md\"\n )\n output, metadata = pandoc_reader.read(source_path)\n\n # Setting this so that assert is able to execute the difference\n self.maxDiff = None # pylint: disable=invalid-name\n\n self.assertEqual(\n (\n \"<p>This is some valid content that should pass.\"\n \" If it does not pass we will know something is wrong.</p>\\n\"\n \"<p>Our fictitious internal files are available\"\n ' <a href=\"{filename}/path/to/file\">at</a>:</p>\\n'\n \"<p>Our fictitious static files are available\"\n ' <a href=\"{static}/path/to/file\">at</a>:</p>\\n'\n \"<p>Our fictitious attachments are available\"\n ' <a href=\"{attach}path/to/file\">at</a>:</p>\\n'\n ),\n output,\n )\n\n self.assertEqual(\n \"Valid Content with Fictitious Raw Paths\", str(metadata[\"title\"])\n )\n self.assertEqual(\"My Author\", str(metadata[\"author\"]))\n self.assertEqual(\"2020-10-16 00:00:00\", str(metadata[\"date\"]))",
"def test_Encoder_encode_decode_content(self):\n c = Content(\"/data/test\", \"HelloWorld\")\n ec = self.encoder1.encode(c)\n dc = self.encoder1.decode(ec)\n self.assertTrue(c == dc)",
"def _test_text(self, url, content, buffering):\n # read(-1), readable(), seekable()\n with wfdb.io._url.openurl(url, \"r\", buffering=buffering) as tf:\n self.assertTrue(tf.readable())\n self.assertTrue(tf.seekable())\n self.assertEqual(tf.read(), content)\n self.assertEqual(tf.read(), \"\")\n\n # read(10)\n with wfdb.io._url.openurl(url, \"r\", buffering=buffering) as tf:\n result = \"\"\n while True:\n chunk = tf.read(10)\n result += chunk\n if len(chunk) < 10:\n break\n self.assertEqual(result, content)\n\n # readline(), seek(), tell()\n with wfdb.io._url.openurl(url, \"r\", buffering=buffering) as tf:\n result = \"\"\n while True:\n rpos = tf.tell()\n tf.seek(0)\n tf.seek(rpos)\n chunk = tf.readline()\n result += chunk\n if len(chunk) == 0:\n break\n self.assertEqual(result, content)",
"def test_training_content(self):\n self.assertIsInstance(self.one_off_training.content, str)\n self.assertEqual(self.one_off_training.content, \"1h d'endurance\")",
"def test_validate_and_write_emit(req):\n handle = StringIO()\n req.get('http://fake/', text=u'This is a sequence file, honest.')\n r = requests.get('http://fake/')\n output = StringIO()\n config = core.Config()\n config.emit = output.write\n core._validate_and_write(r, handle, 'FAKE', config)\n\n assert output.getvalue() == u'.\\n'\n assert handle.getvalue() == u'This is a sequence file, honest.'",
"def expect_output(self, file, parse_json=False):\n contents = self._data_file(file)\n patcher = mock.patch('sys.stdout', new_callable=StringIO)\n output = patcher.start()\n yield\n patcher.stop()\n if parse_json:\n self.assertEqual(json.loads(output.getvalue()),\n json.loads(contents))\n else:\n self.assertEqual(output.getvalue().split('\\n'), contents.split('\\n'))",
"def compare(self, output, expected, ignore_imports=True):\n if ignore_imports:\n output = self.strip_future_imports(output)\n expected = self.strip_future_imports(expected)\n if isinstance(output, bytes) and not isinstance(expected, bytes):\n output = output.decode('utf-8')\n if isinstance(expected, bytes) and not isinstance(output, bytes):\n expected = expected.decode('utf-8')\n self.assertEqual(order_future_lines(output.rstrip()),\n expected.rstrip())",
"def testOk(real, sections):\n log_file.write('testOk entered\\n')\n log_file.flush()\n tested = False\n real = real.strip().replace('\\r\\n', '\\n')\n\n if 'EXPECT' in sections:\n tested = True\n wanted = sections['EXPECT'].strip().replace('\\r\\n', '\\n')\n if real != wanted:\n raise DiffError(real, wanted)\n\n if 'EXPECTF' in sections or 'EXPECTREGEX' in sections:\n tested = True\n if 'EXPECTREGEX' in sections:\n wanted = sections['EXPECTREGEX']\n else:\n wanted = sections['EXPECTF']\n wanted_re = wanted.strip().replace('\\r\\n', '\\n')\n if 'EXPECTF' in sections:\n wanted_re = re.sub(r'\\%s[^ \\n\\r]*\\.php', '%s', wanted_re)\n wanted_re = re.escape(wanted_re).replace('\\\\%', '%').replace('\\\\\\n', '\\n')\n wanted_re = wanted_re.replace(\"%s\", \".+?\")\n wanted_re = wanted_re.replace(\"%i\", \"[+\\-]?[0-9]+\")\n wanted_re = wanted_re.replace(\"%d\", \"[0-9]+\",)\n wanted_re = wanted_re.replace(\"%x\", \"[0-9a-fA-F]+\")\n wanted_re = wanted_re.replace(\"%f\", \"[+\\-]?\\.?[0-9]+\\.?[0-9]*(E-?[0-9]+)?\")\n wanted_re = wanted_re.replace(\"%c\", \".\")\n\n if len(real.split('\\n')) != len(wanted_re.split('\\n')):\n raise DiffError(real, wanted)\n\n err = []\n for line, line_wanted_re, line_wanted in zip(real.split('\\n'), wanted_re.split('\\n'), wanted.split('\\n')):\n if not re.match(r'^'+line_wanted_re+r'$', line):\n err.append('%s\\n%s' % (repr(line_wanted), repr(line)))\n if err:\n# file('out1', 'w').write(real)\n# file('out2', 'w').write(wanted)\n raise DiffError2(err)\n\n# rematcher = re.compile(r'^'+wanted_re+r'\\n*$', re.DOTALL)\n# if not re.match(r'^'+wanted_re+r'\\n*$', real):\n# if not rematcher.match(real):\n# # print wanted_re[-200:]\n# # print real[-200:]\n# # print repr(real[:20])\n# # print repr(wanted_re[:12])\n# # print re.match(wanted_re[:12], real[:20])\n# raise DiffError(real, wanted)\n log_file.write('testOk exit\\n')\n log_file.flush()\n\n if not tested:\n log_file.write('testOk throw No expected value\\n')\n log_file.flush()\n raise RuntimeError(\"No expected value\")",
"def compare_output(self, input, output, expected):\n if type(input) == UnicodeType:\n input = input.encode('raw_unicode_escape')\n if type(output) == UnicodeType:\n output = output.encode('raw_unicode_escape')\n if type(expected) == UnicodeType:\n expected = expected.encode('raw_unicode_escape')\n # Remove \"generated on\" lines.\n output = self.remove_lines(output, ('generated on --',))\n expected = self.remove_lines(expected, ('generated on --',))\n try:\n self.assertEquals('\\n' + output, '\\n' + expected)\n except AssertionError:\n print >>sys.stderr, '\\n%s\\ninput:' % (self,)\n print >>sys.stderr, input\n print >>sys.stderr, '-: expected\\n+: output'\n print >>sys.stderr, ''.join(self.compare(expected.splitlines(1),\n output.splitlines(1)))\n raise",
"def test_002(compiler, temp_builds_dir):\n filepath = temp_builds_dir.join(\"compiler_write_002\")\n\n content = \"\"\"Some sample unicode text: フランス Furansu\"\"\"\n\n compiler.write_content(content, filepath.strpath)\n\n # Read file to compare\n with io.open(filepath.strpath, \"r\", encoding=\"utf-8\") as f:\n result = f.read()\n\n assert content == result",
"def test_dump(file_contents, engine_contents):\n file_name = 'Triangle.java.xml'\n dump = XmlEngine.dump(engine_contents[file_name])\n assert dump == file_contents",
"def testing(text, output):\n extractor = Extractor(text, output)\n extractor.parse()\n extractor.extract()\n extractor.write()",
"def test_write(self):\n\n store = self.get_store(\n uri='http://localhost:9000/index.html', content=\"My message\")\n store.write('http://localhost:9000/index.html', \"New message\")\n\n content = store.read('http://localhost:9000/index.html')\n\n self.assertEquals(content, \"New message\")",
"def test_content(test):\n # from bs4 import BeautifulSoup\n # assert 'GitHub' in BeautifulSoup(response.content).title.string",
"def test_write_to_json():\r\n tmp_dir = os.getcwd()\r\n json_content = '{ \"name\":\"John\", \"age\":30}'\r\n directory = os.path.join(tmp_dir, 'inputspec.json')\r\n write_to_json(directory, json_content) \r\n with open(directory) as json_file:\r\n data = json.load(json_file)\r\n json_string = json.dumps(data)\r\n if os.path.exists(directory):\r\n os.remove(directory)\r\n assert json_string.replace(' ', '') == json_content.replace(' ' , '')"
] |
[
"0.64601886",
"0.61881155",
"0.6004434",
"0.59650457",
"0.5940238",
"0.5898892",
"0.5894748",
"0.58445835",
"0.581732",
"0.57843995",
"0.5749043",
"0.5675368",
"0.565861",
"0.5657756",
"0.5625757",
"0.5623709",
"0.5619837",
"0.5596385",
"0.55919063",
"0.5585478",
"0.5575018",
"0.55700266",
"0.5559819",
"0.5555945",
"0.5554759",
"0.55471313",
"0.5521619",
"0.5513376",
"0.5509763",
"0.5502503"
] |
0.68610203
|
0
|
Tests that NewickEventFactory without arg raises ValueError.
|
def test_no_arg(self):
self.assertRaises(ValueError, NewickEventFactory)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_create_event_model_missing_creator(self):\n with self.assertRaises(ValidationError):\n e = Event(title=self.TITLE)\n e.save()",
"def test_atevent_disabled(self):\n from plone.api.exc import InvalidParameterError\n with self.assertRaises(InvalidParameterError):\n self.event = api.content.create(\n type='Event',\n title=u'Invalid event',\n container=self.lc\n )",
"def test_badNumberOfArgumentsToBuildNews(self):\n newsBuilder = NewsBuilder()\n self.assertRaises(SystemExit, newsBuilder.main, [])\n self.assertRaises(SystemExit, newsBuilder.main, [\"hello\", \"world\"])",
"def test_register_source_invalid():\n frame_ingestor = FrameIngestor()\n invalid_arg = 'string'\n with pytest.raises(TypeError):\n frame_ingestor.register_source(invalid_arg)",
"def test_create_next_event_fails_if_not_repeated(self):\n event = Event(\n Guild(12345),\n 'Some title',\n datetime(2020, 10, 10, 10, 10, tzinfo=timezone('Europe/Paris')),\n 'Some description')\n\n with self.assertRaises(ValueError):\n event.create_next_event()",
"def test_new_invalid(self) -> None:\n with pytest.raises(TypeError) as excinfo:\n RunwayTestDefinition({}) # type: ignore\n assert str(excinfo.value).startswith(\"expected data of type\")",
"def test_constructor_with_invalid_feed_option(self):\n feed = InfiniteFeed(self.db, feed='longpoll')\n with self.assertRaises(CloudantArgumentError) as cm:\n invalid_feed = [x for x in feed]\n self.assertEqual(\n str(cm.exception),\n 'Invalid infinite feed option: longpoll. Must be set to continuous.'\n )",
"def test_init_TypeError_when_fun_predictors_is_not_a_Callable():\n fun_predictors = 'not_valid_type'\n err_msg = re.escape(f\"Argument `fun_predictors` must be a Callable. Got {type(fun_predictors)}.\")\n with pytest.raises(TypeError, match = err_msg):\n ForecasterAutoregMultiSeriesCustom(\n regressor = LinearRegression(),\n fun_predictors = 'not_valid_type',\n window_size = 5\n )",
"def test_invalid_instantiation(invalid_instance):\n with pytest.raises(ValueError):\n invalid_instance()",
"def test_event_validation():\n\n class EventValidationTest(Atom):\n ev_member = Event(Int())\n\n ev_type = Event(int)\n\n evt = EventValidationTest()\n\n evt.ev_member = 1\n evt.ev_type = 1\n with pytest.raises(TypeError):\n evt.ev_member = 1.0\n with pytest.raises(TypeError):\n evt.ev_type = 1.0",
"def test_init_TypeError_when_window_size_is_not_int():\n window_size = 'not_valid_type'\n err_msg = re.escape(\n f\"Argument `window_size` must be an int. Got {type(window_size)}.\"\n )\n with pytest.raises(TypeError, match = err_msg):\n ForecasterAutoregMultiSeriesCustom(\n regressor = LinearRegression(),\n fun_predictors = create_predictors,\n window_size = window_size\n )",
"def test_weirdCallable(self):\n us = WeirdCallableOptions()\n argV = \"--foowrong blah\".split()\n # ValueError is swallowed as UsageError\n e = self.assertRaises(usage.UsageError, us.parseOptions, argV)\n self.assertEquals(str(e), \"Parameter type enforcement failed: Yay\")\n\n us = WeirdCallableOptions()\n argV = \"--barwrong blah\".split()\n # RuntimeError is not swallowed\n self.assertRaises(RuntimeError, us.parseOptions, argV)",
"def test_init_throws_missing_argument_exception(self):\n with self.assertRaises(Exception) as ex:\n MarkerId() # trying to create MarketId objectand waits for Exception\n\n self.getLogger().warning(\"Exception: %s\", ex.exception)",
"def test_init_throws_excessive_argument_exception(self):\n with self.assertRaises(Exception) as ex:\n # trying to create MarketId objectand waits for Exception\n MarkerId('test-name', 'arg2')\n\n self.getLogger().warning(\"Exception: %s\", ex.exception)",
"def test_creation_fail(self):\n\n # Assert that a RelaxError occurs when the pipe type is invalid.\n self.assertRaises(RelaxError, pipes.create, 'new', 'x')",
"def test_register_item__non_item(self, init_event):\n event = init_event()\n\n with pytest.raises(TypeError):\n event.register_item(None)",
"def test_instantiation_fail_on_naive_datetime(self):\n with self.assertRaises(ValueError):\n Event(\n Guild(12345),\n 'Some title',\n datetime(2020, 10, 10, 10, 10), # Missing timezone\n 'Some description')",
"def test_textAsEvent_NoneText(self):\n self.assertRaises(TypeError, textAsEvent, None)",
"def test_create_raises(self):\n self.assertRaisesRegex(\n TypeError,\n \".*\\\\bcreate\\\\(\\\\) takes 1 positional argument\\\\b.*\",\n Rectangle.create, 0\n )",
"def test_invalid_aggfuncs(forecasters, aggfunc):\n y = make_forecasting_problem()\n forecaster = EnsembleForecaster(forecasters=forecasters, aggfunc=aggfunc)\n forecaster.fit(y, fh=[1, 2])\n with pytest.raises(ValueError, match=r\"not recognized\"):\n forecaster.predict()",
"def test_registering_a_callable_as_concrete_is_exception():\n\n container = Container()\n\n with pytest.raises(InvalidRegistrationError):\n container.register(lambda: \"oops\")",
"def test_12_No_args(self):\n with self.assertRaises(TypeError) as x:\n r = Rectangle()\n self.assertEqual(\"__init__() missing 2 required positional\\\n arguments: 'width' and 'height'\", str(x.exception))",
"def test_register_route_factory_not_callable():\n\n with pytest.raises(InvalidRouteFactoryTypeError):\n application_services.register_route_factory(1)",
"def test_send_event_raises():\n send_event('pytest-reportportal', '5.0.5')",
"def test_filters_anonymous_with_empty_events():\n event = {}\n with pytest.raises(EventKeyError):\n filters.anonymous(event)",
"def test_unsupported_event(event_manager: EventManager, subscriber: Mock) -> None:\n event_manager.handler(GLOBAL_SCENE_CHANGE)\n subscriber.assert_not_called()",
"def test_require_now_raises_for_unavailable_tests(self, test_generator):\n feature = test_generator()\n with mock_availability_test(feature) as check:\n check.assert_not_called()\n with self.assertRaisesRegex(MissingOptionalLibraryError, \"sentinel message\"):\n feature.require_now(\"sentinel message\")\n check.assert_called_once()",
"def test_registering_an_instance_as_factory_is_exception():\n container = Container()\n writer = MessageWriter()\n\n with pytest.raises(InvalidRegistrationError):\n container.register(MessageWriter, writer)",
"def test_constructor_invalid():\n with pytest.raises(TypeError, match='missing 1 required positional argument'):\n PseudoPotentialData() # pylint: disable=no-value-for-parameter",
"def test_create_instance(self):\n with self.assertRaises(exceptions.NoInitiation):\n Config()"
] |
[
"0.6419645",
"0.6396346",
"0.6311455",
"0.6303136",
"0.628506",
"0.6277325",
"0.61772805",
"0.6125845",
"0.61118144",
"0.6111505",
"0.6089412",
"0.6066073",
"0.6054844",
"0.60529006",
"0.60401094",
"0.60319936",
"0.60121155",
"0.60074675",
"0.5997193",
"0.5990431",
"0.5982735",
"0.5981048",
"0.5956349",
"0.5956228",
"0.5952556",
"0.59524184",
"0.59502226",
"0.59441924",
"0.59425944",
"0.5933925"
] |
0.929328
|
0
|
Find positions that are contained within segments
|
def find_overlapping_segments(pos, seg, columns):
seg = seg.sort_values(['start', 'end'])
if seg.duplicated(['start', 'end']).any():
raise ValueError('duplicate columns')
start_idx = np.searchsorted(seg['start'].values, pos['coord'].values) - 1
end_idx = np.searchsorted(seg['end'].values, pos['coord'].values)
mask = (start_idx == end_idx)
results = pos.copy()
for col in columns:
results[col] = np.nan
results.loc[mask, col] = seg[col].iloc[end_idx[mask]].values
return results
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def contains ( self, pos ):\n \n inds = in_hull(pos[:2,:].T, array(self.edges).reshape(-1,2), \\\n border = self.include_border ,tol = self.abs_tol)\n \n # if none inside, take nearest\n if ~inds.any() and self.default_nearest:\n dr2 = array(self.edges).reshape(-1,2).mean(0)\n inds[argmin(dr2)] = True\n \n return inds",
"def contains ( self, pos ):\n \n poly = Polygon(array(self.edges).reshape(-1,2)[:,0],array(self.edges).reshape(-1,2)[:,1])\n dists = poly.is_inside(pos[0,:],pos[1,:]) \n if self.include_border:\n inds = dists >= -self.abs_tol\n else:\n inds = dists > 0\n \n \n # if none inside, take nearest\n if ~inds.any() and self.default_nearest:\n dr2 = array(self.edges).reshape(-1,2).mean(0)\n inds[argmin(dr2)] = True\n \n return inds",
"def _get_containing_blocks(size, point):\n i, j = point\n block_inds = []\n if i > 0:\n if j > 0:\n block_inds.append((i - 1, j - 1))\n if j < size - 1:\n block_inds.append((i - 1, j))\n if i < size - 1:\n if j > 0:\n block_inds.append((i, j - 1))\n if j < size - 1:\n block_inds.append((i, j))\n \n return block_inds",
"def inters_segment(self, s):\r\n x1 = s.start[0] - self.center[0]\r\n y1 = s.start[1] - self.center[1]\r\n x2 = s.end[0] - self.center[0]\r\n y2 = s.end[1] - self.center[1]\r\n dx = x2 - x1\r\n dy = y2 - y1\r\n dr = math.sqrt(dx * dx + dy * dy)\r\n D = x1 * y2 - x2 * y1\r\n dr2 = dr * dr\r\n d = self.radius * self.radius * dr2 - D * D \r\n \r\n if d < 0:\r\n return []\r\n else: \r\n if dy < 0:\r\n sgndy = -1\r\n else:\r\n sgndy = 1 \r\n \r\n Ddy = D * dy\r\n mDdx = -D * dx\r\n sgndydxsqrtd = sgndy * dx * math.sqrt(d)\r\n absdysqrtd = abs(dy) * math.sqrt(d) \r\n \r\n xa = float(Ddy + sgndydxsqrtd) / dr2 + self.center[0]\r\n ya = float(mDdx + absdysqrtd) / dr2 + self.center[1]\r\n \r\n xb = (Ddy - sgndydxsqrtd) / dr2 + self.center[0]\r\n yb = (mDdx - absdysqrtd) / dr2 + self.center[1]\r\n \r\n if (d == 0) or not s.contains_point(xb, yb):\r\n if s.contains_point(xa, ya):\r\n return [(int(xa), int(ya))]\r\n else:\r\n return []\r\n else:\r\n if s.contains_point(xa, ya):\r\n return [(int(xa), int(ya)), (int(xb), int(yb))]\r\n else:\r\n return [(int(xb), int(yb))]",
"def contains ( self, pos ):\n dr2 = (pos[0, :]-self.x)**2 + (pos[1, :]-self.y)**2\n # which points are in the circle?\n if self.include_border:\n inds = (dr2 - self.r**2) < self.abs_tol\n else:\n inds = (dr2 - self.r**2) < -self.abs_tol\n \n \n # if there's no poit inside\n if ~inds.any() and self.default_nearest: \n inds[argmin(dr2)] = True\n \n return inds",
"def contains ( self, pos ):\n # make sure xmin is minimum etc\n xmin = min(self.x_min,self.x_max)\n xmax = max(self.x_min,self.x_max)\n ymin = min(self.y_min,self.y_max)\n ymax = max(self.y_min,self.y_max)\n \n abs_tol = self.abs_tol\n # get pos indices inside rectangle (* == and)\n if self.include_border:\n inds = (pos[0, :] - xmin > -abs_tol) * \\\n (pos[0, :] - xmax < abs_tol) * \\\n (pos[1, :] - ymin > -abs_tol) * \\\n (pos[1, :] - ymax < abs_tol)\n else:\n inds = (pos[0, :] - xmin > abs_tol) * \\\n (pos[0, :] - xmax < -abs_tol) * \\\n (pos[1, :] - ymin > abs_tol) * \\\n (pos[1, :] - ymax < -abs_tol)\n \n \n # if none inside, take nearest\n if ~inds.any() and self.default_nearest:\n x = (xmin + xmax) / 2.0\n y = (ymin + ymax) / 2.0\n dr2 = (pos[0, :] - x)**2 + (pos[1, :] - y)**2\n inds[argmin(dr2)] = True\n \n return inds.astype(bool)",
"def inters_segment(self, s):\r\n if (self.m == s.m) and (self.n == s.n):\r\n # The segment s is over this segment. Return the middle point\r\n x = (self.start[0] + self.end[0]) / 2\r\n y = (self.start[1] + self.end[1]) / 2\r\n elif self.m == s.m:\r\n # The segments are parallels\r\n return None\r\n elif self.m == None:\r\n x = self.start[0]\r\n y = int(s.m * x + s.n)\r\n elif s.m == None:\r\n x = s.start[0]\r\n y = self.m * x + self.n\r\n else:\r\n x = (s.n - self.n) / (self.m - s.m)\r\n y = self.m * x + self.n \r\n \r\n if self.contains_point(x, y) and s.contains_point(x, y):\r\n return int(x), int(y)\r\n else:\r\n return None",
"def contains ( self, pos ):\n # initialize with only \"False\" entries\n inds = zeros(pos.shape[1], dtype=bool)\n \n # add points contained in each sector\n for sec in self.sectors:\n inds += sec.contains(pos)\n \n return inds.astype(bool)",
"def optimal_points(segments):\n points = []\n segments.sort(key=lambda x: x.end)\n\n while len(segments) != 0:\n s = segments[0]\n points.append(s.end)\n j = 0\n while j < len(segments):\n temp = segments[j]\n if temp.start <= s.end and temp.end >= s.end:\n segments.remove(temp)\n else:\n j += 1\n return points",
"def ordered_by_xcenter(seg):\n objects = [(slice(0,0),slice(0,0))]+find_objects(seg)\n def xc(o): return mean((o[1].start,o[1].stop))\n xs = array([xc(o) for o in objects])\n for i in range(1,len(xs)):\n if xs[i-1]>xs[i]: return 0\n return 1",
"def locate_interesting_segment(binary_matrix, indeces, beats, during_threshold = 5):\n point = np.zeros([1, 4], dtype = int)\n segments = np.empty([0, 4], dtype = int)\n is_segment_bedin = False\n for index in indeces:\n temp = np.diag(binary_matrix, -index)\n for j in range(len(temp)):\n if (temp[j] == 0 and is_segment_bedin == False) or (temp[j] == 1 and is_segment_bedin == True):\n continue\n else:\n if temp[j] == 1:\n point[0, 0] = index + j\n point[0, 1] = j\n is_segment_bedin = True\n else:\n point[0, 2] = index + j\n point[0, 3] = j\n is_segment_bedin = False\n segments = np.append(segments, point, axis = 0)\n\n # using the time during whose default value is 4s to filter segment\n del_indeces = np.array([], dtype = int)\n new_binary_matrix = binary_matrix.copy()\n for i in range(len(segments)):\n\n time_begin = beats[segments[i, 0]]\n time_end = beats[segments[i, 2]]\n if time_end - time_begin < during_threshold:\n del_indeces = np.append(del_indeces, i)\n\n # set the binary matrix\n for row in range(segments[i, 0], segments[i, 2]):\n row_begin = segments[i, 0]\n col_begin = segments[i, 1]\n new_binary_matrix[row, row - row_begin + col_begin] = 0\n\n segments = np.delete(segments, del_indeces, axis=0)\n\n length = len(segments)\n # the matrix which denote if segment is close with each other\n segments_close_matrix = np.zeros([length, length], dtype = int)\n for i in range(length):\n for j in range(length):\n if i == j:\n continue\n x1 = segments[i, :]\n x2 = segments[j, :]\n\n # determine if segment is close with each other\n if x2[0] >= x1[0] - 5 and x2[2] <= x1[2] + 20 and abs(x2[1] - x1[1]) <= 20 and x2[3] <= x1[3] + 5:\n segments_close_matrix[i, j] = 1\n\n # delete some segments with less than 3 closed segment\n del_indeces = np.array([], dtype=int)\n close_count = np.sum(segments_close_matrix, axis = 0)\n for i in range(len(segments)):\n if close_count[i] < 3:\n del_indeces = np.append(del_indeces, i)\n\n # set the binary matrix\n for row in range(segments[i, 0], segments[i, 2]):\n row_begin = segments[i, 0]\n col_begin = segments[i, 1]\n new_binary_matrix[row, row - row_begin + col_begin] = 0\n\n segments = np.delete(segments, del_indeces, axis = 0)\n # plt.matshow(new_binary_matrix, cmap=plt.cm.gray)\n # plt.show()\n\n return segments, new_binary_matrix",
"def find_segment(bv: binaryninja.binaryview.BinaryView, name: str) -> List[Tuple[int, int]]:\n result = []\n for sn in bv.sections:\n sec = bv.get_section_by_name(sn)\n if sec.name == name:\n result.append((sec.start, sec.end))\n return result",
"def findPositions(self, seq):\n child = self.children.get(seq[0], None)\n if child:\n if len(seq) == 1:\n return child.positions\n return child.findPositions(seq[1:])\n else:\n return []",
"def find_segments(array_size, patch_size, index):\n start, stop = index, index + patch_size\n grid_segments = []\n patch_segments = []\n\n # Set a position and compute the number of segments\n # falling in the region (if region larger than data)\n grid_pos = start\n patch_pos = 0\n nsegments = int(ceil(float(patch_size) / array_size))\n\n for _ in range(0, nsegments + 1):\n # Compute next closest cell boundary\n boundary = (int(floor(float(grid_pos) / array_size)) + 1) * array_size\n \n # Reset the boundary if the region ends before it.\n boundary = stop if stop <= boundary else boundary\n\n # Add grid segment\n grid_segments.append((grid_pos, boundary))\n\n # Compute segment size\n segment_size = abs(boundary - grid_pos)\n\n # Add region segment\n patch_segments.append((patch_pos, patch_pos + segment_size))\n\n if boundary == stop:\n break\n \n # Update the grid and region position\n grid_pos = boundary\n patch_pos = patch_pos + segment_size\n \n return grid_segments, patch_segments",
"def segment(self):\n start = self.alignment.matching_function_startpoint(self.idx)\n end = self.alignment.matching_function_endpoint(self.idx)\n return [start, end]",
"def intersects(self):\n match = False\n for i in range(len(self.__points) - 1):\n p1 = self.__points[i]\n p2 = self.__points[i + 1]\n bounds = self.__line_segment(p1, p2)\n if not bounds is None:\n xmin = bounds[0]\n ymin = bounds[1]\n xmax = bounds[0]\n ymax = bounds[1]\n for j in range(len(bounds)):\n if not (j % 2):\n if bounds[j] < xmin:\n xmin = bounds[j]\n elif bounds[j] > xmax:\n xmax = bounds[j]\n else:\n if bounds[j] < ymin:\n ymin = bounds[j]\n elif bounds[j] > ymax:\n ymax = bounds[j]\n x = self.x\n y = self.y\n # TODO: Determine direction, and check two leading edge points; ie. last vector ----> then points are x+width,y+width x+width,y-width\n if x > xmin and x < xmax and y > ymin and y < ymax:\n match = True\n break\n return match",
"def _get_end_points(self, segmented_instances, i, stats, idx):\n\n end_points=[]\n\n # find all points intersecting the bbox\n #(tl_x, th_y, width, height, area)\n label_num=i+1\n leftmost_x = stats['bbox'][i][cv2.CC_STAT_LEFT]\n topmost_y = stats['bbox'][i][cv2.CC_STAT_TOP]\n width = stats['bbox'][i][cv2.CC_STAT_WIDTH]\n height = stats['bbox'][i][cv2.CC_STAT_HEIGHT]\n bottom_most_y = topmost_y + height-1\n right_most_x = leftmost_x + width-1\n\n segmented_instances_copy=segmented_instances.copy()\n edge_points = np.zeros(segmented_instances.shape).astype(np.uint8)\n segs = np.zeros(segmented_instances.shape).astype(np.uint8)\n segs[segmented_instances==label_num]=255\n cv2.rectangle(segmented_instances_copy,(leftmost_x, topmost_y), (right_most_x, bottom_most_y), 150, 2)\n\n #Get all points for the current stem segment\n label_points = np.argwhere(segmented_instances.copy()==label_num)\n\n # upper points from (tl_x,th_y) to (th_x, th_y) that instersect with the upper edge of the bouding box\n upper_points = [i for i in label_points if i[0]==topmost_y and i[1]>=leftmost_x and i[1]<=right_most_x]\n x_pts, segs, edge_points = self._update_imgs_and_pt_list(upper_points, edge_points, segs, 1)\n center_upper_pts = sorted(self._get_centeroids(x_pts))\n\n # left side points from (tl_x, tl_y) to (tl_x, th_y) that instersect with the left edge of the bouding box\n left_points = [i for i in label_points if i[1]==leftmost_x and i[0]<=bottom_most_y and i[0]>=topmost_y]\n x_pts, segs, edge_points = self._update_imgs_and_pt_list(left_points, edge_points, segs, 0)\n center_left_pts = sorted(self._get_centeroids(x_pts))\n\n #right side points form (th_x, tl_y) to (th_x, th_y) that instersect with the right edge of the bouding box\n right_points = [i for i in label_points if i[1]==right_most_x and i[0]<=bottom_most_y and i[0]>=topmost_y]\n x_pts, segs, edge_points = self._update_imgs_and_pt_list(right_points, edge_points, segs, 0)\n center_right_pts = sorted(self._get_centeroids(x_pts))\n\n #bottom points from (tl_x, tl_y) to (th_x,tl_y)\n bottom_points = [i for i in label_points if i[1]>=leftmost_x and i[1]<=right_most_x and i[0]==bottom_most_y]\n x_pts, segs, edge_points = self._update_imgs_and_pt_list(bottom_points, edge_points, segs, 1)\n center_bottom_pts = sorted(self._get_centeroids(x_pts))\n\n # If there are corner edges, get the centroid of that\n center_x_lb, center_y_lb, center_left_pts, center_bottom_pts = self._get_corner_centers(center_left_pts, \\\n center_bottom_pts, bottom_most_y, leftmost_x)\n if (center_x_lb != None) and (center_y_lb != None):\n end_points.append([center_x_lb, center_y_lb])\n else:\n if len(center_left_pts)>0:\n for pt_idx in range(0, len(center_left_pts)):\n end_points.append([leftmost_x, center_left_pts[pt_idx]])\n if len(center_bottom_pts)>0:\n for pt_idx in range(0, len(center_bottom_pts)):\n end_points.append([center_bottom_pts[pt_idx], bottom_most_y])\n\n # If there are corner edges, get the centroid of that\n center_x_ur, center_y_ur, center_right_pts, center_upper_pts = self._get_corner_centers(center_right_pts, \\\n center_upper_pts, topmost_y, right_most_x)\n if (center_x_ur != None) and (center_y_ur != None):\n end_points.append([center_x_ur, center_y_ur])\n else:\n if len(center_right_pts)>0:\n for pt_idx in range(0, len(center_right_pts)):\n end_points.append([right_most_x, center_right_pts[pt_idx]])\n if len(center_upper_pts)>0:\n for pt_idx in range(0, len(center_upper_pts)):\n end_points.append([center_upper_pts[pt_idx], topmost_y])\n\n # If there are corner edges, get the centroid of that\n center_x_ul, center_y_ul, center_left_pts, center_upper_pts = self._get_corner_centers(center_left_pts, \\\n center_upper_pts, topmost_y, leftmost_x)\n if (center_x_ul != None) and (center_y_ul != None):\n end_points.append([center_x_ul, center_y_ul])\n else:\n if len(center_left_pts)>0:\n for pt_idx in range(0, len(center_left_pts)):\n end_points.append([leftmost_x, center_left_pts[pt_idx]])\n if len(center_upper_pts)>0:\n for pt_idx in range(0, len(center_upper_pts)):\n end_points.append([center_upper_pts[pt_idx], topmost_y])\n\n\n # If there are corner edges, get the centroid of that\n center_x_br, center_y_br, center_right_pts, center_bottom_pts = self._get_corner_centers(center_right_pts, \\\n center_bottom_pts, bottom_most_y, right_most_x)\n if (center_x_br != None) and (center_y_br != None):\n end_points.append([center_x_br, center_y_br])\n else:\n if len(center_right_pts)>0:\n for pt_idx in range(0, len(center_right_pts)):\n end_points.append([right_most_x, center_right_pts[pt_idx]])\n if len(center_bottom_pts)>0:\n for pt_idx in range(0, len(center_bottom_pts)):\n end_points.append([center_bottom_pts[pt_idx], bottom_most_y])\n\n #self.showme(segmented_instances_copy, 'bbox')\n\n return end_points",
"def count_segments_naive(self, starts, ends, points):\r\n count = [0] * len(points)\r\n \r\n for i in range(len(points)):\r\n for j in range(len(starts)):\r\n if starts[j] <= points[i] <= ends[j]:\r\n count[i] += 1\r\n \r\n return count",
"def searchDeadEnd(self):\n boundaries = []\n if not self.red:\n i = self.midWidth - 1\n else:\n i = self.midWidth + 1\n boudaries = [(i, j) for j in range(self.height)]\n validPositions = []\n for i in boudaries:\n if not (i[0], i[1]) in self.walls:\n validPositions.append(i)\n\n dangerPos = []\n\n toExpand = self.scanmap.twoEntryPoints()\n for (x,y) in toExpand:\n adjacent = self.scanmap.adjacentValidPoints(x, y)\n if not (x,y) in dangerPos:\n for (u, w) in adjacent:\n visited = []\n visited.append((x, y))\n safe = False\n danger = False\n DFS = util.Stack()\n DFS.push((u,w))\n while not safe and not danger:\n (i,j) = DFS.pop()\n visited.append((i,j))\n adjacents = self.scanmap.adjacentValidPoints(i,j)\n for position in adjacents:\n if not position in visited:\n DFS.push(position)\n if DFS.isEmpty():\n danger = True\n dangerPos = list(set(dangerPos) | set(visited))\n\n if (i,j) in validPositions:\n safe = True\n oneEntry = self.scanmap.oneEntryPoints()\n dangerPos = list(set(oneEntry).union(set(dangerPos)))\n dangerPos.sort()\n return dangerPos",
"def segments(self):\n return (self._subset((i,i+1)) for i in range(len(self)-1))",
"def query(self, points):\n voxel_x = np.clip(np.searchsorted(\n self.segments[0], points[:, 0]) - 1, 0, self.x_y_z[0])\n voxel_y = np.clip(np.searchsorted(\n self.segments[1], points[:, 1]) - 1, 0, self.x_y_z[1])\n voxel_z = np.clip(np.searchsorted(\n self.segments[2], points[:, 2]) - 1, 0, self.x_y_z[2])\n voxel_n = np.ravel_multi_index([voxel_x, voxel_y, voxel_z], self.x_y_z)\n\n return voxel_n",
"def break_segments_at_points( intervals, positions, verbose=False ):\n starts, ends = map(list, zip(*intervals))\n for chrom_end_pos in positions:\n assert chrom_end_pos <= ends[-1], \"segment end does not include end of genome\"\n assert chrom_end_pos >= starts[0], \"start of genome to the left of first segment\"\n spos = bisect.bisect_left(starts, chrom_end_pos)\n\n if verbose:\n print \"-\"*80\n print \"position %d\"%chrom_end_pos\n print \"spos %d\"%spos\n if spos < len(starts) and starts[spos] == chrom_end_pos:\n if verbose:\n print \"already a segment boundary\"\n else:\n epos = bisect.bisect_left(ends, chrom_end_pos)\n if verbose:\n print \"epos %d\"%epos\n assert spos == epos+1\n if chrom_end_pos < ends[epos]:\n if verbose:\n print \"Break [%d, %d) into [%d, %d) and [%d, %d)\"%(starts[spos-1], ends[epos], \n starts[spos-1], chrom_end_pos,\n chrom_end_pos, ends[epos])\n print \"Delete %d,%d\"%(starts[epos], ends[epos])\n\n # update epos\n copy_end = ends[epos]\n ends[epos] = chrom_end_pos\n # update epos+1\n starts.insert(epos+1, chrom_end_pos)\n ends.insert(epos+1, copy_end)\n if verbose:\n print \"epos: \", (starts[epos], ends[epos])\n print \"epos+1: \", (starts[epos+1], ends[epos+1])\n elif chrom_end_pos == ends[epos]:\n if verbose:\n print \"already a segment boundary\"\n return zip(starts, ends)",
"def getSegments(points):\n return _identifyStrokes(points)[1]",
"def find_isolated_endpoints(lines):\n \n isolated_endpoints = []\n count = len(lines)\n print(\"Finding isolated end points 2/3\")\n pb = pbar.ProgressBar(count)\n for i, line in enumerate(lines):\n pb += 1\n other_lines = lines[:i] + lines[i+1:]\n for q in [0,-1]:\n endpoint = Point(line.coords[q])\n if any(endpoint.touches(another_line) \n for another_line in other_lines):\n continue\n else:\n isolated_endpoints.append(endpoint)\n del pb\n return isolated_endpoints",
"def find_matching_segments(self):\n hyp_matched_segs = [TIntervalGroup() for i in range(len(self.hyp))]\n for gid_ref, match_ref in enumerate(self.ref):\n bg_ref = match_ref.bbox_group\n max_gid, max_area = -1, 0\n for gid_hyp, bg_hyp in enumerate(self.hyp.get_bbox_groups()):\n rx, ry = bg_ref.page_range(), bg_hyp.page_range()\n if ry[0] > rx[1]:\n break\n area = (bg_ref & bg_hyp)\n if area > max_area:\n max_gid, max_area = gid_hyp, area\n if max_gid != -1:\n hyp_matched_segs[max_gid].extend(match_ref.tinterval_group.copy())\n print('%d -> %d' % (gid_ref, max_gid))\n for seg in hyp_matched_segs:\n seg.reduce()\n return hyp_matched_segs",
"def findPlacements(self,start,size):\n\n possible = []\n isViableX = True\n isViableY = True\n for x in range(1-size,size):\n if ((start[0]+x,start[1]) in self.ships): # Checks for Valid Horizontal Positions\n isViableX = False\n if ((start[0],start[1]+x) in self.ships): # Checks for Valid Vertical Positions\n isViableY = False\n if x == 0 or x== size-1:\n if isViableX:\n possible.append((start[0]-size+1 if x==0 else start[0]+size-1,start[1]))\n if isViableY:\n possible.append((start[0],start[1]-size+1 if x==0 else start[1]+size-1))\n isViableX = True\n isViableY = True\n print(possible)\n return(possible)",
"def neighbors(lines, of):\n return [k for k, line in enumerate(lines) if line.touches(of)]",
"def segmented_intersections(lines):\r\n\r\n intersections = []\r\n for i, group in enumerate(lines[:-1]):\r\n for next_group in lines[i+1:]:\r\n for line1 in group:\r\n for line2 in next_group:\r\n intersections.append(intersection(line1, line2)) \r\n\r\n return intersections",
"def contained(self):\n seen = set()\n return [l.to_segment for l in self.edges_to_contained \\\n if id(l) not in seen and not seen.add(id(l))]",
"def all_segs_matching_fts(self, ft_mask):\n matching_segs = [ipa for (ipa, fts) in self.segments if fts >= ft_mask]\n return sorted(matching_segs, key=lambda x: len(x), reverse=True)"
] |
[
"0.6588952",
"0.6547448",
"0.6513691",
"0.646229",
"0.6430361",
"0.64195794",
"0.6334076",
"0.62850267",
"0.62444854",
"0.6213392",
"0.61859167",
"0.61407787",
"0.6118163",
"0.6112225",
"0.6081516",
"0.60803694",
"0.606772",
"0.6065855",
"0.60600376",
"0.6054242",
"0.6050661",
"0.60436434",
"0.6042265",
"0.6036199",
"0.6004262",
"0.6003389",
"0.59736156",
"0.5961559",
"0.59564483",
"0.59533465"
] |
0.6627635
|
0
|
Simple hierarhical clustering figure for SNVs
|
def snv_hierarchical_clustering_figure(snv_data, clusters):
snv_matrix = snv_data.merge(clusters)
snv_matrix = (
snv_matrix.groupby(
['chrom', 'coord', 'ref', 'alt', 'cluster_id'],
as_index=True, observed=True)[['alt_counts', 'ref_counts']]
.sum().unstack().fillna(0).astype(int).stack().reset_index())
snv_matrix['total_counts'] = snv_matrix['ref_counts'] + snv_matrix['alt_counts']
snv_matrix['vaf'] = snv_matrix['alt_counts'] / snv_matrix['total_counts']
snv_matrix['alt_counts'] = snv_matrix['alt_counts'].clip(upper=10)
snv_matrix['is_present'] = (snv_matrix['alt_counts'] > 0) * 1
snv_matrix['is_absent'] = (snv_matrix['alt_counts'] == 0) * 1
snv_matrix['is_het'] = (snv_matrix['alt_counts'] < 0.99 * snv_matrix['total_counts']) * snv_matrix['is_present']
snv_matrix['is_hom'] = (snv_matrix['alt_counts'] >= 0.99 * snv_matrix['total_counts']) * snv_matrix['is_present']
snv_matrix['state'] = snv_matrix['is_hom'] * 3 + snv_matrix['is_het'] * 2 + snv_matrix['is_absent']
snv_presence_matrix = snv_matrix.set_index(['chrom', 'coord', 'cluster_id'])['is_present'].unstack(fill_value=0)
logging.info(f'snv matrix with shape {snv_presence_matrix.shape}, memory {snv_presence_matrix.memory_usage().sum()}')
# KLUDGE: currently recursion in dendrograms
# breaks with large datasets
import sys
sys.setrecursionlimit(10000)
g = seaborn.clustermap(snv_presence_matrix, rasterized=True, row_cluster=True, figsize=(4, 12))
return g.fig
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def clustering_and_visulization(self):\n centroids, _ = kmeans(self.data_mat, self.k)\n idx, _ = vq(self.data_mat, centroids)\n for i in range(self.k):\n\n self.plot_list.append(self.data_mat[idx == i, 0])\n self.plot_list1.append(self.data_mat[idx == i, 1])\n\n for j in range(self.k):\n plot(self.plot_list[j], self.plot_list1[j], self.color_code[j])\n plot(centroids[:, 0], centroids[:, 1], 'sg', markersize=8)\n show()\n for i in range(self.k):\n self.cluster = self.data_mat[idx == i]\n self.clusterlist.append(self.cluster)\n\n for i in range(len(self.clusterlist)):\n self.clusterdict[i] = self.clusterlist[i]\n print(self.clusterdict)\n\n\n self.indexdict = {}\n for i in self.clusterdict:\n self.indexdict[i] = []\n print(len(self.clusterdict))\n for i in range(len(idx)):\n for j in range(len(self.clusterdict)):\n if (self.clusterdict[j][:] == self.data_mat[i]).any():\n self.indexdict[j].append(i)\n print(\"cluster dict of packs\",self.indexdict)\n\n self.drugdict = {}\n for i in self.clusterdict:\n self.drugdict[i] = []\n self.drug=[]\n for i in range(len(self.indexdict.keys())):\n for j in range(len(self.indexdict[i])):\n self.drugdict[i].append(self.df.iloc[self.indexdict[i][j]].to_dict())\n print(\"drugs dict with their frequencies\",self.drugdict)\n clusterdict_from_df_as_drug_non_O_frequency = {}\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs ={}\n for i in self.drugdict:\n clusterdict_from_df_as_drug_non_O_frequency[i] = []\n for i in self.drugdict:\n for j in self.drugdict[i]:\n clusterdict_from_df_as_drug_non_O_frequency[i].append({x: y for x, y in j.items() if y != 0})\n print(\"clusterdict_from_df_as_drug_non_O_frequency\", clusterdict_from_df_as_drug_non_O_frequency)\n print('\\n')\n\n for i in self.drugdict:\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in self.drugdict:\n for j in clusterdict_from_df_as_drug_non_O_frequency[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n\n for i in self.drugdict:\n clusterdict_of_non_repeated_drugs[i]=list(more_itertools.collapse([list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n\n\n print(\"only drugs only\", clusterdict_of_non_repeated_drugs)\n\n########################################################################################################################\n try:\n common_drug_list = [x for x in clusterdict_of_non_repeated_drugs[0] if x in clusterdict_of_non_repeated_drugs[1]]\n print('\\n')\n print(\"common drug list\", common_drug_list)\n total_frequency_of_drugs_dict = {}\n for i in self.drugdict:\n total_frequency_of_drugs_dict[i] = []\n\n for drug in common_drug_list:\n\n for cluster_keys in clusterdict_from_df_as_drug_non_O_frequency.keys():\n temp_list = []\n for cluster_values_as_list in clusterdict_from_df_as_drug_non_O_frequency[cluster_keys]:\n try:\n temp_list.append(cluster_values_as_list[str(drug)])\n except KeyError:\n print(\"\\t\")\n total_frequency_of_drugs_dict[cluster_keys].append(np.sum(temp_list))\n print(\"total drugs frequency\",total_frequency_of_drugs_dict)\n total_frequency_of_drugs_dict_with_drugs = {}\n for i in self.drugdict:\n total_frequency_of_drugs_dict_with_drugs[i] = []\n temp_list1 = []\n temp_list2 = []\n for keys in self.drugdict.keys():\n temp_list1.append(clusterdict_of_non_repeated_drugs[keys])\n for keys in self.drugdict.keys():\n temp_list2.append(total_frequency_of_drugs_dict[keys])\n temp_list3 = []\n for i in temp_list1:\n for j in temp_list2:\n temp_list3.append(dict(zip(i,j)))\n temp_list4 = temp_list3[:2]\n print('\\n')\n for keys in self.drugdict:\n total_frequency_of_drugs_dict_with_drugs[keys].append(temp_list4[keys])\n print(\"total frequency with drugs dict\",total_frequency_of_drugs_dict_with_drugs)\n\n final_drugs_in_clusters_dict = {}\n for i in self.drugdict:\n final_drugs_in_clusters_dict[i] = []\n compare_list = []\n for drug in common_drug_list:\n compare_list.append(min(total_frequency_of_drugs_dict_with_drugs[0][0][drug], total_frequency_of_drugs_dict_with_drugs[1][0][drug]))\n print(\"compare list\",compare_list)\n for values in total_frequency_of_drugs_dict_with_drugs.values():\n for key1, value1 in values[0].items():\n if value1 in compare_list:\n\n key2 =values[0].keys()[values[0].values().index(value1)]\n values[0].pop(key2, None)\n\n\n print('final dict with deleted keys', total_frequency_of_drugs_dict_with_drugs)\n\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs = {}\n\n for i in self.drugdict:\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in self.drugdict:\n for j in total_frequency_of_drugs_dict_with_drugs[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n for i in self.drugdict:\n clusterdict_of_non_repeated_drugs[i] = list(more_itertools.collapse([list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n print(\"only drugs\",clusterdict_of_non_repeated_drugs)\n\n final_robot_packs_dict = {}\n for i in self.drugdict:\n final_robot_packs_dict[i] = []\n\n winner_drug_dict = {}\n for i in common_drug_list:\n winner_drug_dict[i] = []\n for drug in common_drug_list:\n if drug in clusterdict_of_non_repeated_drugs[0]:\n winner_drug_dict[str(drug)].append(0)\n if drug in clusterdict_of_non_repeated_drugs[1]:\n winner_drug_dict[str(drug)].append(1)\n print(\"winner drug dict\",winner_drug_dict)\n\n for i in self.indexdict:\n print(i)\n for pack in self.indexdict[i]:\n packdict = self.df.iloc[pack].to_dict()\n packdict_non_0 = {x: y for x, y in packdict.items() if y != 0}\n packdict_non_0_key = packdict_non_0.keys()\n for drug in packdict_non_0_key:\n if drug in clusterdict_of_non_repeated_drugs[0]:\n final_robot_packs_dict[0].append(pack)\n elif drug in clusterdict_of_non_repeated_drugs[1]:\n final_robot_packs_dict[1].append(pack)\n\n final_robot_packs_dict[i].append(pack)\n for commondrugs in winner_drug_dict:\n for winnercluster in winner_drug_dict[commondrugs]:\n if winnercluster==0:\n loosercluster =1\n if winnercluster == 1:\n loosercluster = 0\n if commondrugs in packdict_non_0_key and i==loosercluster:\n try:\n final_robot_packs_dict[i].remove(pack)\n final_robot_packs_dict[winnercluster].append(pack)\n except ValueError:\n print('\\t')\n\n for i in self.indexdict:\n final_robot_packs_dict[i] = set(final_robot_packs_dict[i])\n\n print(\"final which pack which robot dict\",final_robot_packs_dict)\n\n except IndexError:\n print(\"No common drugs\")",
"def hierarchial_clustering(self,k):\r\n\r\n print(colored(\"Performing hierarchial clustering\",color = 'yellow', attrs=['bold']))\r\n self.clustering = AgglomerativeClustering(affinity='euclidean', linkage='ward').fit(self.X)\r\n self.labels = self.clustering.labels_\r\n self.davies_bouldin_score()\r\n print()\r\n print(colored(\"The number of cluster centers formed are %d\\n\" %(self.clustering.n_clusters_),color = 'red', attrs=['bold']))\r\n self.cluster_plot()\r\n return self.labels",
"def clustering_and_visulization(self):\n try:\n centroids, _ = kmeans(self.data_mat, self.k)\n except ValueError:\n print(\"The number of clusters is more than the data points\")\n self.idx, _ = vq(self.data_mat, centroids)\n for i in range(self.k):\n\n self.plot_list.append(self.data_mat[self.idx == i, 0])\n self.plot_list1.append(self.data_mat[self.idx == i, 1])\n\n for j in range(self.k):\n plot(self.plot_list[j], self.plot_list1[j], self.color_code[j])\n plot(centroids[:, 0], centroids[:, 1], 'sg', markersize=8)\n show()\n\n for i in range(self.k):\n self.cluster = self.data_mat[self.idx == i]\n self.clusterlist.append(self.cluster)\n print(self.clusterlist)\n for i in range(len(self.clusterlist)):\n self.clusterdict[i] = self.clusterlist[i]\n print(self.clusterdict)\n\n index_dict ={}\n for i in self.clusterdict:\n index_dict[i] = []\n for i in range(len(self.data_mat)):\n for j in range(len(self.clusterdict)):\n if (self.clusterdict[j][:] == self.data_mat[i]).any():\n index_dict[j].append(i)\n print(\"drugs cluster dict\", index_dict)\n\n self.drugsdict = {}\n for i in index_dict:\n self.drugsdict[i] = []\n drugslist = list(self.df.columns.values)\n print(\"drugs list from dataframe\", drugslist)\n\n for i in index_dict:\n self.drugsdict[i] = [drugslist[index] for index in index_dict[i]]\n\n print(\"drugs cluster dict\", self.drugsdict)\n########################################################################################################################\n clusterdict_from_df_as_drug_frequency = {}\n clusterdict_from_df_as_drug_non_O_frequency = {}\n\n print('\\n')\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_frequency[i] = []\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_frequency[i].append(self.df.iloc[i].to_dict()) #\n print(\"packs in dict form of drugs frequency\", clusterdict_from_df_as_drug_frequency)\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_non_O_frequency[i] = []\n\n for i in range(len(self.df)):\n for j in clusterdict_from_df_as_drug_frequency[i]:\n clusterdict_from_df_as_drug_non_O_frequency[i].append({x: y for x, y in j.items() if y != 0})\n print(\"clusterdict_from_df_as_drug_non_O_frequency\", clusterdict_from_df_as_drug_non_O_frequency)\n print('\\n')\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs = {}\n for i in range(len(self.df)):\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in range(len(self.df)):\n for j in clusterdict_from_df_as_drug_non_O_frequency[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n for i in range(len(self.df)):\n clusterdict_of_non_repeated_drugs[i] = list(more_itertools.collapse(\n [list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n\n print(\"only drugs only\", clusterdict_of_non_repeated_drugs)\n\n########################################################################################################################\n robot_for_packs_dict = {}\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = []\n\n # for i in range(len(self.df)):\n for i in range(len(self.df)):\n for j in clusterdict_of_non_repeated_drugs[i]:\n if j in self.drugsdict[0]:\n robot_for_packs_dict[i].append(0)\n elif j in self.drugsdict[1]:\n robot_for_packs_dict[i].append(1)\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = set(robot_for_packs_dict[i])\n\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = list(more_itertools.collapse(robot_for_packs_dict[i]))\n print('\\n')\n print(\"clusterdict_of_non_repeated_drugs\", robot_for_packs_dict)",
"def plotClusters(self):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n fig.set_size_inches(18.5, 9.5)\n ax.set_title('Identification of Cluster Particles with Voronoi Volumes', fontsize=22)\n ax.set_xlabel('x [m]', fontsize=18)\n ax.set_ylabel('y [m]', fontsize=18)\n ax.set_zlabel('z [m]', fontsize=18)\n\n strength = np.linspace(0, 0.8, len(self.unique_labels))\n np.random.shuffle(strength)\n colors = [plt.cm.nipy_spectral(each) for each in strength]\n np.random.shuffle(strength)\n colorsB = [plt.cm.nipy_spectral(each) for each in strength]\n\n for k, col, colB in zip(self.unique_labels, colors, colorsB):\n a = 1\n s = 3\n if k == -1:\n # Black used for noise.\n col = [1, 0, 0]\n a = 0.3\n s = 1\n\n class_member_mask = (self.labels == k)\n xy = self.data[class_member_mask]\n if len(xy) > 0:\n ax.scatter(xy[:, 0], xy[:, 1], xy[:, 2], c=np.reshape(np.array(col), (1, -1)),\n edgecolors=np.reshape(np.array(colB), (1, -1)), alpha=a, s=s, label='Cluster ' + str(k))",
"def hierarical_clustering(p_df, method=\"average\"):\n pdf_values = p_df.values\n np.fill_diagonal(pdf_values, 0)\n pdf_values_1_d = matrix_to_squareform(pdf_values)\n cluster_matrix = linkage(pdf_values_1_d, method)\n return cluster_matrix",
"def clusters_based_on_LoS():\n algorithm = 'LoS'\n tree_filename = '../data/pickle/pneumonia_tree_without_electrolytes_min_supp_0_05.pickle'\n entity_list = read_json('../data/json/pneumonia_entity_list.json')\n mat = prepare_matrix(tree_filename, len(entity_list))\n\n length_of_stays = length_of_stay('../csv/pneumonia_admissions.csv', show_plot=False)\n borders = (7, 15, 30)\n groups = make_groups(length_of_stays, borders)\n\n labels = np.zeros(len(entity_list), dtype='int')\n labels[groups[0]] = 0\n labels[groups[1]] = 1\n labels[groups[2]] = 1\n labels[groups[3]] = 1\n\n visualize_clusters_in_2D(mat, labels, algorithm, None, show_annotations=False)",
"def plot_clusters(self):\n pass",
"def hierarchicalClustering(distanceMatrix, withDendrogram=False):\n\n # convert symmetric distance matrix into upper triangular array\n distArray = ssd.squareform(np.asmatrix(distanceMatrix), checks=False)\n # find \"best\" method\n methods = [\"ward\", \"median\", \"average\", \"single\", \"complete\"]\n bestVal = 0.0\n bestMethod = \" \"\n for mm in methods:\n Z = linkage(distArray, method=mm, optimal_ordering=True)\n\n # test the goodness of cluster with cophenetic correl coefficient\n c, cophDist = cophenet(Z, distArray)\n print(\"[ {0:10s} ] Cophenetic = {1:5.2f}\".format(mm, c))\n if c > bestVal:\n bestVal = c\n bestMethod = mm\n\n # repeat with best method\n Z = linkage(distArray, method=bestMethod, optimal_ordering=True)\n # print(Z)\n # note: The Z gives the distances at which each cluster was merged\n\n # get the cluster for each point\n # maxD = 0.95\n maxD = 0.3\n labels = fcluster(Z, maxD, criterion=\"distance\")\n labels = labels - [1]*len(labels) # start from 0\n\n if withDendrogram:\n plt.figure(figsize=(25, 10))\n plt.title('Hierarchical Clustering Dendrogram')\n plt.xlabel('sample index')\n plt.ylabel('distance')\n dendrogram(\n Z,\n leaf_rotation=90., # rotates the x axis labels\n leaf_font_size=8., # font size for the x axis labels\n show_leaf_counts=True,\n get_leaves=True,\n # truncate_mode=\"level\",\n # p =5,\n )\n plt.axhline(y=maxD, c='k')\n plt.savefig(\"dendrogram.png\")\n print(\"Dendrogram saved on disk ('dendrogam.png')\")\n\n return labels",
"def cluster_hierarchical(vectors, num_cl=None, vis=False, info=''):\n linked = linkage(vectors, method='average', metric=taxi_distance)\n labels = range(vectors.shape[0])\n if vis: \n plt.figure(figsize=(40, 28))\n dendrogram(linked,\n orientation='top',\n labels=labels,\n distance_sort='descending',\n show_leaf_counts=True)\n if vis: \n plt.savefig('./dendrograms/dendro_'+info+'.png')\n \n if num_cl is not None:\n labels = fcluster(linked, num_cl, criterion='maxclust')\n return labels, num_cl",
"def hierarchical(encoder,tsne,true_data,true_labels,save_name =\"hierarchical.png\"):\n enc_output = encoder.predict(true_data)\n # Hierarchical Clustering\n labels = HierarchicalClustering()\n predictions = labels.draw_dendogram(enc_output,title='Hierarchical Clustering Dendrogram',savetitle = \"hierarchical.png\")\n\n # Confusion matrix of hierarchical clustering\n confusion_matrix(true_labels,predictions,save_name = \"confusion_matrix_hierarchical.png\")\n\n # Visualize test predictions from hierarchical\n true_data = np.reshape(true_data,(len(true_data),64,64))\n visualize_class_predictions(true_data,true_labels,predictions)",
"def hcluster(X, metric=\"correlation\", method=\"average\"):\n distance_mat = dist.pdist(X, metric) # condensed matrix form\n linkage_mat = hier.linkage(distance_mat, method=method, metric='euclidean')\n return linkage_mat",
"def find_clusters(self, order=\"Vup-Hup\", plot_step=0):\n self.graph.numbuckets = int((self.graph.size * (self.graph.size // 2 - 1) * 2) * (1 + straightness_par))\n self.graph.buckets = [[] for _ in range(self.graph.numbuckets)]\n self.graph.wastebasket = []\n self.graph.maxbucket = 0\n\n cID, s = 0, self.graph.size\n\n if order == \"Vup-Hup\":\n vertices = self.graph.V.values()\n if order == \"Vdo-Hdo\":\n vertices = [self.graph.V[(t, y, x)]\n for x in reversed(range(s))\n for y in reversed(range(s))\n for t in range(2)\n ]\n elif order == \"Hup-Vdo\":\n vertices = [self.graph.V[(t, y, x)]\n for y in reversed(range(s))\n for x in range(s)\n for t in range(2)\n ]\n elif order == \"Hdo-Vdo\":\n vertices = [self.graph.V[(t, y, x)]\n for y in reversed(range(s))\n for x in reversed(range(s))\n for t in range(2)\n ]\n elif order == \"random\":\n vertices = random.sample(list(self.graph.V.values()), s*s*2)\n\n anyons = [vertex for vertex in vertices if vertex.state]\n\n for vertex in anyons:\n if vertex.cluster is None:\n cluster = self.graph.add_cluster(cID)\n cluster.add_vertex(vertex)\n cluster.rad = [0, 0]\n cluster.med = [vertex.sID[1], vertex.sID[2]]\n self.cluster_new_vertex(cluster, vertex, plot_step)\n cluster_place_bucket(self.graph, cluster, self.vcomb)\n cID += 1\n\n if self.uf_plot is not None and not plot_step:\n self.uf_plot.plot_removed(self.graph, \"Clusters initiated.\")\n elif self.uf_plot is not None:\n self.uf_plot.waitforkeypress(\"Clusters initiated.\")",
"def cluster_hierarchically(active_sites):\n\n\n cls, sc = agglomerative(active_sites)\n\n return cls",
"def hier_spectral(data, base_destination, branch_factor=2,\n max_size=20, _curr_label=[0], _cluster_stats={}, _root=True):\n vec = TfidfVectorizer().fit(get_contexts(data))\n cluster_df = __title_cluster_df(data, branch_factor, vec)\n if not os.path.isdir(base_destination):\n os.mkdir(base_destination)\n if _root:\n vec_path = os.path.join(base_destination, 'vectorizer.pkl')\n with open(vec_path, 'wb') as f:\n pickle.dump(vec, f)\n for i in range(branch_factor):\n titles = cluster_df[cluster_df['cluster']==i]['title']\n cluster_size = titles.shape[0]\n cluster_data = __get_data_with_titles(data, titles)\n if cluster_size <= max_size:\n dest = os.path.join(base_destination, 'cluster_{}.json'.format(_curr_label[0]))\n with open(dest, 'w') as f:\n json.dump(cluster_data, f)\n _cluster_stats[_curr_label[0]] = cluster_size\n _curr_label[0] += 1\n else:\n hier_spectral(cluster_data, base_destination, branch_factor,\n _curr_label, _cluster_stats, _root=False)\n if _root:\n stats_path = os.path.join(base_destination, 'cluster_statistics.txt')\n with open(stats_path, 'w') as f:\n for cluster in _cluster_stats.keys():\n f.write('cluster {}: '.format(cluster))\n f.write(str(_cluster_stats[cluster]) + '\\n')",
"def partition_clusters(clustering_matrix, args, nr_clusters=5, method='complete', metric='euclidean', plotting=False):\n # clustering with linkage\n fig = plt.figure(figsize=(8,8))\n ax1 = fig.add_axes([0.09,0.1,0.2,0.6])\n # gives back linkage matrix after hierarchical clustering\n Y = sch.linkage(clustering_matrix, method=method,metric=metric)\n # creates dendogram for plotting and flattening\n Z = sch.dendrogram(Y, orientation='left')\n ax1.set_xticks([])\n ax1.set_yticks([])\n # calculate cluster membership\n # fcluster flattens out dendograms to the specified nr_clusters\n cluster_memberships = sch.fcluster(Y, t=nr_clusters, criterion='maxclust') # ith element in this array is the cluster for i\n idx = np.array(Z['leaves']) # idx ordered in cluster\n \n ax2 = fig.add_axes([0.3,0.71,0.6,0.2])\n Z2 = sch.dendrogram(Y)\n ax2.set_xticks([])\n ax2.set_yticks([])\n\n axmatrix = fig.add_axes([0.3,0.1,0.6,0.6])\n\n clustering_matrix = clustering_matrix[idx,:]\n clustering_matrix = clustering_matrix[:,idx]\n im = axmatrix.matshow(clustering_matrix, aspect='auto', origin='lower', cmap=plt.cm.YlGnBu)\n axmatrix.set_xticks([])\n axmatrix.set_yticks([])\n\n # Plot colorbar.\n axcolor = fig.add_axes([0.91,0.1,0.02,0.6])\n plt.colorbar(im, cax=axcolor)\n if plotting:\n fig.savefig(f'{args.results_root_dir}/clust_{args.clustering_method}_nr_users-{args.num_users}_nr_of_partition_clusters_{nr_clusters}_method_{method}_reconstructed.png')\n\n # Plot filtered\n canvas = np.zeros_like(clustering_matrix)\n for i in range(1,nr_clusters+1):\n mask = np.ones_like(clustering_matrix)\n mask[cluster_memberships[idx]!=i,:] = 0\n mask[:,cluster_memberships[idx]!=i] = 0\n canvas+=clustering_matrix*mask\n fig = plt.figure()\n plt.matshow(canvas,origin='lower')\n if plotting:\n fig.savefig(f'{args.results_root_dir}/clust_{args.clustering_method}_nr_users-{args.num_users}_nr_of_partition_clusters_{nr_clusters}_method_{method}_filtered.png')\n\n d_error = np.sum(clustering_matrix-canvas)\n print(f'Decompostion error: {d_error}, {d_error/np.sum(clustering_matrix)}')\n\n # build cluster id to client id user dict\n cluster_user_dict = { i : idx[cluster_memberships==i] for i in range(1,nr_clusters+1)}\n\n # Test overlaps within clusters\n collected = []\n for i, cluster_members_a in cluster_user_dict.items():\n for j, cluster_members_b in cluster_user_dict.items():\n assert np.all(cluster_members_a != cluster_members_b) or set(cluster_members_a).intersection(set(cluster_members_b)) != {}, f'clusters {i} and {j} are not disjoint'\n collected.extend(cluster_members_a)\n assert np.all(np.arange(0,len(clustering_matrix),1) == np.sort(np.array(collected)))\n\n return cluster_user_dict",
"def get_cluster_dstructure(self, curs, mcl_id, splat_table, mcl_table):\n\t\tno_of_total_genes = get_no_of_total_genes(curs)\n\t\tcluster = self.get_basic_cluster_dstructure(curs, mcl_id, splat_table, mcl_table)\n\t\tif cluster:\t#not None\n\t\t\tcluster.go_no2association_genes = self.get_go_functions_of_this_gene_set(curs, cluster.vertex_set)\n\t\t\tcluster.go_no2information = self.get_information_of_go_functions(curs, cluster.go_no2association_genes, \\\n\t\t\t\tlen(cluster.vertex_set), no_of_total_genes)\n\t\t\tcluster.edge_cor_2d_list, cluster.edge_sig_2d_list = self.get_cor_sig_2d_list(curs, cluster.edge_set)\n\t\t\t#graph = self.graph_from_node_edge_set(cluster.vertex_set, cluster.edge_set)\n\t\treturn cluster\n\t\t\n\t\t\"\"\"\n\t\tprint \"vertex_set\"\n\t\tprint cluster.vertex_set\n\t\tprint \"edge_set\"\n\t\tprint cluster.edge_set\n\t\trecurrence_list_2d = ['recurrence_array']+cluster.recurrence_array\n\t\trecurrence_list_2d_1 = ['recurrence_array_1']+cluster.recurrence_array\n\t\trecurrence_list_2d = [recurrence_list_2d, recurrence_list_2d_1]\n\t\tself.column_output('/tmp/yh/recurrence_array',recurrence_list_2d)\n\n\t\tprint cluster.splat_connectivity\n\t\tprint \"connectivity\"\n\t\tprint cluster.connectivity\n\t\tprint \"connectivity_original\"\n\t\tprint cluster.connectivity_original\n\t\tcor_list_2d = []\n\t\tsig_list_2d = []\n\t\tfor i in range(len(cluster.edge_set)):\n\t\t\tcor_list_2d.append([repr(cluster.edge_set[i])]+cluster.edge_cor_2d_list[i])\n\t\t\tsig_list_2d.append([repr(cluster.edge_set[i])]+cluster.edge_sig_2d_list[i])\n\t\tself.column_output('/tmp/yh/edge_cor_2d_list', cor_list_2d)\n\t\tself.column_output('/tmp/yh/edge_sig_2d_list', sig_list_2d)\n\n\t\tgo_no_list_2d = []\n\t\tfor go_no,information in cluster.go_no2information.iteritems():\n\t\t\tgo_no_list_2d.append(list(information)+[len(cluster.go_no2association_genes[go_no])])\n\t\t#self.column_output('/tmp/yh/go_no_list_2d', go_no_list_2d)\n\t\t\"\"\"",
"def fit(self):\n self.cluseter_agglomerative(n_clusters=20, linkage='average', iterate=5)\n self.sub_clustering(n_clusters=3, index_cluster=[79], linkage='complete')\n self.merge_clusters([[0,9,53],[1,83],[46,35,67],[88,23],[6,68]])\n self.merge_clusters([[6,33,52],[17,14]])\n self.sub_clustering(n_clusters=2, index_cluster=[0], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[2], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[85], linkage='average')\n self.sub_clustering(n_clusters=2, index_cluster=[14], linkage='complete')\n self.sub_clustering(n_clusters=2, index_cluster=[16], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[22], linkage='average')\n self.sub_clustering(n_clusters=2, index_cluster=[24], linkage='complete')\n self.sub_clustering(n_clusters=2, index_cluster=[26], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[28], linkage='ward')\n self.merge_clusters([[6,98,99]])\n self.merge_clusters([[35,80]])\n self.sub_clustering(n_clusters=4, index_cluster=[35], linkage='complete')\n self.merge_clusters([[76,98]])\n self.sub_clustering(n_clusters=3, index_cluster=[35], linkage='complete')\n self.merge_clusters([[39,42]])\n self.sub_clustering(n_clusters=3, index_cluster=[47], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[51], linkage='average')\n self.merge_clusters([[70,101]])\n self.sub_clustering(n_clusters=3, index_cluster=[51], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[61], linkage='ward')\n self.merge_clusters()\n return",
"def mySpectralClustering(W, K, normalized):\n\n D = np.diag(np.sum(W,axis=0))\n L = D - W\n if normalized == 1:\n L = lin.inv(D) @ L \n vals, vecs = lin.eig(L)\n idx = vals.argsort()[::-1] \n vals = vals[idx]\n vecs = vecs[:,idx]\n N, _ = W.shape\n Y = np.zeros((K,N))\n for kk in range(K):\n Y[kk,:] = vecs[:,kk]\n kmeans = KMeans(n_clusters=K).fit(Y.T)\n estlabels = kmeans.labels_\n return estlabels, Y",
"def cluster(self, verbose=0, sum_ess=False):\n ## if sum_ess and self.linkage.__name__ != \"ward_link\":\n ## raise ValueError(\n ## \"Summing for method other than Ward makes no sense...\")\n clusters = copy.copy(self._dist_matrix)\n #clusters = self._dist_matrix\n summed_ess = 0.0\n\n while len(clusters) > max(self._num_clusters, 1):\n if verbose >= 1:\n print('k=%s' % len(clusters))\n if verbose == 2:\n print(clusters)\n\n best, i, j = self.smallest_distance(clusters)\n # In Ward (1963) ess is summed at each iteration\n # in R's hclust and Python's hcluster and some text books it is not.\n # Here it is optional...\n if sum_ess:\n summed_ess += best\n else:\n summed_ess = best\n clusters = self.update_distmatrix(i, j, clusters)\n self._dendrogram.merge(i,j)\n self._dendrogram[i].distance = summed_ess\n indices = numpy.arange(clusters.shape[0])\n indices = indices[indices!=j]\n clusters = clusters.take(indices, axis=0).take(indices, axis=1)",
"def cluster_spatial_positioning(data):\n \n n_clusters = len(set(data['clusters'])-{-1}) # since -1 element denotes noice\n if n_clusters <2:\n #Setting cluster angluar features to default\n cdist=[Cluster_Relative_Distances()]\n cdist = pd.DataFrame([o.__dict__ for o in cdist])\n\n elif n_clusters >=2:\n # Here we implement two approaches for measuring distances between clustes:\n # (1) border-boder distances and (2) centroid-centroid distances. \n # We compute dispersion measures for the distances obtained. \n \n d = dict(tuple(data.groupby('clusters')))\n d.pop(-1, None)\n\n min_dist_between_clusters=np.row_stack([[np.amin(ss.distance_matrix(np.column_stack([d[i]['X'].array,d[i]['Y'].array]), \n np.column_stack([d[j]['X'].array,d[j]['Y'].array]))) for j in d.keys()] for i in d.keys()])\n min_dist_between_clusters=np.delete(list(set(np.frombuffer(min_dist_between_clusters))) ,0)\n\n cen_dist_between_clusters=ss.distance_matrix(np.row_stack([(np.mean(d[i]['X'].array),np.mean(d[i]['Y'].array)) for i in d.keys()]),\n np.row_stack([(np.mean(d[i]['X'].array),np.mean(d[i]['Y'].array)) for i in d.keys()]))\n cen_dist_between_clusters=np.delete(list(set(np.frombuffer(cen_dist_between_clusters))) ,0)\n\n (avg_bor_bor_dist_cluster,min_bor_bor_dist_cluster,max_bor_bor_dist_cluster,\n std_bor_bor_dist_cluster,CV_bor_bor_dist_cluster,CD_bor_bor_dist_cluster,\n IQR_bor_bor_dist_cluster,Quartile_CD_bor_bor_dist_cluster)= distribution_statistics(min_dist_between_clusters)\n\n (avg_cen_cen_dist_cluster,min_cen_cen_dist_cluster,max_cen_cen_dist_cluster,\n std_cen_cen_dist_cluster,CV_cen_cen_dist_cluster,CD_cen_cen_dist_cluster,\n IQR_cen_cen_dist_cluster,Quartile_CD_cen_cen_dist_cluster)= distribution_statistics(cen_dist_between_clusters)\n\n cdist = [Cluster_Relative_Distances([avg_bor_bor_dist_cluster,min_bor_bor_dist_cluster,max_bor_bor_dist_cluster,\n std_bor_bor_dist_cluster,CV_bor_bor_dist_cluster,CD_bor_bor_dist_cluster,\n IQR_bor_bor_dist_cluster,Quartile_CD_bor_bor_dist_cluster,\n avg_cen_cen_dist_cluster,min_cen_cen_dist_cluster,max_cen_cen_dist_cluster,\n std_cen_cen_dist_cluster,CV_cen_cen_dist_cluster,CD_cen_cen_dist_cluster,\n IQR_cen_cen_dist_cluster,Quartile_CD_cen_cen_dist_cluster])]\n \n cdist = pd.DataFrame([o.__dict__ for o in cdist])\n\n \n return cdist",
"def DBscan_clustering(self,d,s):\r\n print(colored(\"Performing agglomerative clustering\",color = 'yellow', attrs=['bold']))\r\n self.clustering = DBSCAN(eps=d,min_samples=s,metric = 'euclidean').fit(self.X)\r\n self.labels = self.clustering.labels_\r\n self.davies_bouldin_score()\r\n print()\r\n print(colored(\"The number of cluster centers formed are %d\\n\"%len(np.unique(self.labels)),color = 'red', attrs=['bold']))\r\n self.cluster_plot()\r\n return self.labels",
"def __str__(self):\n return \"Clustering\"",
"def demo():\n # declare dummy input vector with two dimensions:\n vectors = numpy.array([[2,4], [0,1], [1,1], [3,2], [4,0], [2,2], [8, 9], [8, 11]])\n\n # compute the distance matrix on the basis of the vectors via sklearn:\n dist_matrix = pairwise_distances(vectors, metric='cityblock')\n\n # plot the distance matrix:\n # dist_matrix.draw() this doesn't work anymore\n\n # initialize a temporal VNC clusterer, here with the Ward linkage method:\n clusterer = VNClusterer(dist_matrix, linkage='ward') # could also be a plain Clusterer()\n\n # start the clustering procedure:\n clusterer.cluster(verbose=1)\n\n labels = ['n'+str(i+1) for i in range(len(vectors))]\n # plot the result as a dendrogram\n clusterer.dendrogram.draw(save=True,\n labels=labels,\n title=\"VNC Analysis (Ward's Linkage)\")",
"def print_clusters(vectors, labels, nclusters, show=False):\n plt.figure(1)\n plt.clf()\n\n vecs2D = TSNE(n_components=2).fit_transform(vectors)\n\n colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')\n for k, col in zip(range(nclusters), colors):\n my_members = labels == k\n\n cluster_vecs2D = vecs2D[my_members, :]\n\n print(cluster_vecs2D)\n print(cluster_vecs2D[:,0])\n print(cluster_vecs2D[:,1])\n\n plt.scatter(cluster_vecs2D[:,0], \n cluster_vecs2D[:,1], \n c=col, \n label='cluster {}'.format(k))\n\n plt.title('Estimated clusters')\n plt.legend()\n\n if show:\n plt.show()\n\n cwd = os.getcwd()\n if not os.path.exists(cwd+\"/plots\"):\n os.makedirs(cwd+\"/plots\")\n plt.savefig(cwd+'/plots/clusters.png')",
"def clusters(self):\n raise NotImplementedError",
"def cluster_linkage_seaborn(features, \n metadata, \n groupby='gene_name', \n saveDir=None, \n method='average', \n metric='euclidean'):\n \n # Normalise data\n featZ = features.apply(zscore, axis=0)\n featZ = dropNaN(featZ) # drop NaN values after normalising\n\n plt.close('all')\n cg = plot_clustermap(featZ, \n metadata,\n group_by=groupby,\n col_linkage=None,\n method=method,\n metric=metric,\n saveto=(saveDir / \"heatmap_{}.pdf\".format(method + '_' + metric) if \n saveDir is not None else None),\n figsize=[20,40],\n sns_colour_palette=\"Pastel1\",\n sub_adj={'top':0.98,'bottom':0.02,'left':0.02,'right':0.9})\n plt.close()\n\n # extract distances from clustermap dendrogram\n Z = cg.dendrogram_row.linkage\n \n # extract mean df (one sample per row)\n mean_featZ = cg.data\n\n # extract row labels from clustermap heatmap\n labels = sorted(metadata[groupby].unique())\n mean_featZ.index = labels # strain names as index \n \n return Z, mean_featZ",
"def metis(W, levels, rid=None):\n # Function written by M. Defferrard, taken verbatim, from \n # https://github.com/mdeff/cnn_graph/blob/master/lib/coarsening.py#L34\n\n N, N = W.shape\n if rid is None:\n rid = np.random.permutation(range(N))\n parents = []\n degree = W.sum(axis=0) - W.diagonal()\n graphs = []\n graphs.append(W)\n #supernode_size = np.ones(N)\n #nd_sz = [supernode_size]\n #count = 0\n\n #while N > maxsize:\n for _ in range(levels):\n\n #count += 1\n\n # CHOOSE THE WEIGHTS FOR THE PAIRING\n # weights = ones(N,1) # metis weights\n weights = degree # graclus weights\n # weights = supernode_size # other possibility\n weights = np.array(weights).squeeze()\n\n # PAIR THE VERTICES AND CONSTRUCT THE ROOT VECTOR\n idx_row, idx_col, val = scipy.sparse.find(W)\n perm = np.argsort(idx_row)\n rr = idx_row[perm]\n cc = idx_col[perm]\n vv = val[perm]\n cluster_id = metis_one_level(rr,cc,vv,rid,weights) # rr is ordered\n parents.append(cluster_id)\n\n # TO DO\n # COMPUTE THE SIZE OF THE SUPERNODES AND THEIR DEGREE \n #supernode_size = full( sparse(cluster_id, ones(N,1) ,\n #\tsupernode_size ) )\n #print(cluster_id)\n #print(supernode_size)\n #nd_sz{count+1}=supernode_size;\n\n # COMPUTE THE EDGES WEIGHTS FOR THE NEW GRAPH\n nrr = cluster_id[rr]\n ncc = cluster_id[cc]\n nvv = vv\n Nnew = cluster_id.max() + 1\n # CSR is more appropriate: row,val pairs appear multiple times\n W = scipy.sparse.csr_matrix((nvv,(nrr,ncc)), shape=(Nnew,Nnew))\n W.eliminate_zeros()\n # Add new graph to the list of all coarsened graphs\n graphs.append(W)\n N, N = W.shape\n\n # COMPUTE THE DEGREE (OMIT OR NOT SELF LOOPS)\n degree = W.sum(axis=0)\n #degree = W.sum(axis=0) - W.diagonal()\n\n # CHOOSE THE ORDER IN WHICH VERTICES WILL BE VISTED AT THE NEXT PASS\n #[~, rid]=sort(ss); # arthur strategy\n #[~, rid]=sort(supernode_size); # thomas strategy\n #rid=randperm(N); # metis/graclus strategy\n ss = np.array(W.sum(axis=0)).squeeze()\n rid = np.argsort(ss)\n\n return graphs, parents",
"def plotClusterModel(df):\n plt.figure()\n\n for c in df['label'].unique():\n df2 = df[df['label'] == c]\n labelStr = 'grp %d, y=%.2f, num=%d' % (\n c, df2['y'].mean(), df2['y'].shape[0])\n plt.scatter(df2['x'], df2['y'], label=labelStr)\n\n return plt.legend()",
"def plot_MDS():\n lds = {} #lds is a dictionary of dictionaries: {\"slovenian.txt\": {\"abc\":3,\"efg\":4...}, \"macedonian.txt\":{\"abc\":5,\"efg\":6...},...}\n for fn in listdir(\"clustering\"):\n if fn.lower().endswith(\".txt\"):\n with open(join(\"clustering\", fn), encoding=\"utf8\") as f:\n text = f.read()\n nter = terke(text, n=3)\n lds[fn] = nter\n \n distances={} #a dictionary of dictionaries that saves the distances between a language and all other languages\n \n for x in lds.keys():\n distances[x]={}\n for y in lds.keys():\n if x == y: distances[x][y]=0.0\n else: distances[x][y]=cosine_dist(lds[x],lds[y])\n\n dst=np.zeros([len(lds.keys()), len(lds.keys())])\n i=0\n j=0\n for x in lds.keys():\n j=0\n for y in lds.keys():\n dst[i,j]=distances[x][y]\n j+=1\n i+=1\n\n X, languages = prepare_data_matrix()\n\n transformer = MDS(n_components=2, dissimilarity='precomputed')\n transformed = transformer.fit_transform(dst)\n\n plt.scatter(transformed [:,0], transformed [:,1])\n for i in range(len(transformed)):\n plt.text(transformed[i,0], transformed[i,1], languages[i][:3])\n plt.show()",
"def plot_clusters(true_data, preds, cluster_center, cluster_name, savefig=\"\", title=\"\"):\n\n colors = plt.cm.get_cmap('hsv', len(cluster_name)+1) # get colors for each cluster using get_cmap. This will give us len(cluster_name) colors in a object form.\n \n for i, c in enumerate(cluster_name): # iterate through each cluster name\n if c == -1: # -1 is given by DBScan for noise\n clrs = 'grey' # make it grey\n label = 'Noise' # label it 'Noise'\n else:\n clrs = colors(c) # get color for it\n label=f'Cluster {c}' # label it by its name\n df = true_data[preds == c] # get the points from dataset whose prediction was cluster `c`\n x, y = df.iloc[:, 0], df.iloc[:, 1] # x and y axis\n plt.scatter( # plotting the x and y axis\n x, y,\n label=label,\n color=clrs\n )\n if c != -1:\n plt.text(\n cluster_center[i][0] + 0.03, cluster_center[i][1] + 0.1,\n f\"Cluster {i}\",\n weight='bold',\n fontsize=9,\n )\n \n plt.scatter(\n cluster_center[:, 0], cluster_center[:, 1], # plotting the cluster centers\n s=250, marker='*',\n c='red', edgecolor='black',\n label='Centroids'\n )\n \n plt.title(title)\n plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n plt.tight_layout()\n if savefig != \"\" : plt.savefig(f\"{savefig}.png\")\n plt.show()\n plt.close()"
] |
[
"0.6763111",
"0.6744619",
"0.6698987",
"0.66339046",
"0.6591178",
"0.65695626",
"0.65430254",
"0.64374816",
"0.6399827",
"0.6366124",
"0.6343846",
"0.6322008",
"0.628119",
"0.62615293",
"0.62459433",
"0.6195688",
"0.61666834",
"0.61042285",
"0.60836965",
"0.608267",
"0.6048858",
"0.60420597",
"0.6022237",
"0.6019229",
"0.6017976",
"0.6006029",
"0.60035163",
"0.6002751",
"0.6002365",
"0.59768677"
] |
0.77539515
|
0
|
Compute the ML tree under the dollo model of SNV evolution
|
def compute_dollo_ml_tree(snv_log_likelihoods, leaf_name_groups=None):
trees = dollo.tasks.create_trees(
snv_log_likelihoods,
sample_col='cluster_id',
leaf_name_groups=leaf_name_groups,
)
results_table = dollo.tasks.compute_tree_log_likelihoods_mp(
snv_log_likelihoods, trees,
sample_col='cluster_id', variant_col='variant_id')
ml_tree_id = results_table.set_index('tree_id')['log_likelihood'].idxmax()
tree = trees[ml_tree_id]
loss_prob = results_table.set_index('tree_id').loc[ml_tree_id, 'loss_prob']
tree_annotations = dollo.run.annotate_posteriors(
snv_log_likelihoods, tree, loss_prob=loss_prob,
sample_col='cluster_id', variant_col='variant_id')
return tree, tree_annotations
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def compute_tree(self, tree):\n g_list_val, g_list_h = self._build_graph(tree) # return theano variable of each node\n list_val = self._traversal_tree(tree) #\n f = theano.function(g_list_val, g_list_h, allow_input_downcast=True)\n result = f(*list_val)\n return result",
"def select_model():\r\n from sklearn import tree\r\n import graphviz\r\n\r\n ValidationSetAndLabels = AllSets[1]\r\n ValLabels = ValidationSetAndLabels[:, [-1]] # extract labels (last column)\r\n ValSet = np.delete(ValidationSetAndLabels, -1, axis=1) # delete labels\r\n\r\n TrainingSetAndLabels = AllSets[2]\r\n TrainLabels = TrainingSetAndLabels[:, [-1]] # extract labels (last column)\r\n TrainSet = np.delete(TrainingSetAndLabels, -1, axis=1) # delete labels\r\n\r\n \"\"\"\r\n This is the code to select the best hyperparameter (part b)\r\n\r\n for SplitCriterion in ['entropy', 'gini']:\r\n print \"Criterion: \" + SplitCriterion + '\\n'\r\n\r\n for MaxDepth in [int(depth) for depth in np.linspace(1, np.log2(TrainSet.shape[1]), 5)]:\r\n print \"max_depth: \" + str(MaxDepth) + '\\n'\r\n\r\n MyTree = tree.DecisionTreeClassifier(criterion=SplitCriterion, max_depth=MaxDepth)\r\n MyTree = MyTree.fit(TrainSet, TrainLabels)\r\n\r\n Predictions = MyTree.predict(ValSet)\r\n Result = np.abs(Predictions - ValLabels.flatten())\r\n\r\n Accuracy = 100 * float(np.count_nonzero(Result == 0)) / Predictions.shape[0]\r\n\r\n print \"Accuracy for this test is: %f %%\" %Accuracy\r\n print '\\n'\r\n\r\n print '\\n'\r\n \"\"\"\r\n\r\n MyTree = tree.DecisionTreeClassifier(criterion='entropy', max_depth=12)\r\n\r\n MyTree = MyTree.fit(TrainSet, TrainLabels)\r\n\r\n Predictions = MyTree.predict(ValSet)\r\n Result = np.abs(Predictions - ValLabels.flatten())\r\n\r\n Accuracy = 100 * float(np.count_nonzero(Result == 0)) / Predictions.shape[0]\r\n\r\n dot_data = tree.export_graphviz(MyTree, out_file=None, max_depth=2,\r\n feature_names=AllSets[3], filled=True, rounded=True, special_characters=True,\r\n class_names=TrainLabels.flatten().astype(str))\r\n graph = graphviz.Source(dot_data)\r\n graph.render(\"output\")",
"def LotkaVolterra_Dynamics(self):\n LV_c = self.toConceptual(self.state) # (nF, nR)\n LV_c = LV_c.mul((1 - LV_c) + self.LV_inhM.mm(LV_c))\n LV_s = self.toNeural(LV_c)\n\n return LV_c, LV_s",
"def main(self):\n\n self.nodelist = []\n\n self.probname = self.probpath.split('/')[-1].rstrip('.mps.lp.gz')\n\n model = Model(\"TreeD\")\n eventhdlr = LPstatEventhdlr()\n eventhdlr.nodelist = self.nodelist\n model.includeEventhdlr(eventhdlr, \"LPstat\", \"generate LP statistics after every LP event\")\n model.readProblem(self.probpath)\n model.setIntParam('presolving/maxrestarts', 0)\n\n for setting in self.scip_settings:\n model.setParam(setting[0], setting[1])\n\n model.optimize()\n\n self.scipversion = 'SCIP '+str(model.version())\n # self.scipversion = self.scipversion[:-1]+'.'+self.scipversion[-1]\n\n if model.getStatus() == 'optimal':\n self.optval = model.getObjVal()\n else:\n self.optval = None\n\n\n # print(\"performing Spatial Analysis on similarity of LP condition numbers\")\n # self.performSpatialAnalysis()\n\n columns = self.nodelist[0].keys()\n self.df = pd.DataFrame(self.nodelist, columns = columns)\n\n # merge solutions from cutting rounds into one node\n if not self.showcuts:\n self.df = self.df[self.df['first'] == False].drop_duplicates(subset='age', keep='last').reset_index()",
"def plot_dollo_ml_tree(tree, nodes):\n leaf_order = []\n for leaf in tree.leaves:\n leaf.plot_id = leaf.name\n leaf_order.append(leaf.name)\n\n origin_counts = nodes.groupby('node')['ml_origin'].sum()\n\n for node in tree.nodes:\n node.origin_count = origin_counts[node.label]\n\n loss_counts = nodes.groupby('node')['ml_loss'].sum()\n\n width = 1 + 0.5 * float(len(list(tree.leaves)))\n fig = plt.figure(figsize=(width/1.5, 6))\n\n ax = fig.add_subplot(111)\n\n def func(x, pos):\n s = '{:0,d}'.format(int(x))\n return s\n ax.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(func))\n\n wgs_analysis.plots.trees.plot_tree(ax, tree, landscape=False, flip=True, branch_length_attr='origin_count', leaf_name_attr='plot_id')\n\n ax.set_ylabel('SNV count')\n\n plt.tight_layout()",
"def tree_optimize(mvp_tree,coefs=None):\n if not coefs:\n coefs = [1,1,1]\n # TODO",
"def decision_tree(df, variables, test_size):\n from sklearn.model_selection import train_test_split\n from sklearn import tree\n\n # Define input\n X = encoding_df(df, variables)\n\n # Set validation\n y = df['target']\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=42)\n\n clf = tree.DecisionTreeRegressor()\n clf = clf.fit(X_train, y_train)\n\n print(compute_rmse(y_test, clf.predict(X_test)))\n return clf.predict(X_test), y_test",
"def mortality_model(train, val, test, model, sel_RFE,logs_file, model_name):\n create_folder(logs_file)\n x_train, y_train, feature_list = read_data(train)\n x_val, y_val, _ = read_data(val)\n x_test, y_test, _ = read_data(test)\n create_folder(logs_file)\n x_train = scaler(x_train)\n x_val = scaler(x_val)\n x_test = scaler(x_test)\n\n print(\"===================== Recursive Feature Elimination ===============================================\")\n if sel_RFE == 1:\n x_train, x_val, x_test,feature_list,num_features = features_selection(x_train, y_train,x_val,x_test,model,feature_list)\n else:\n num_features = x_train.shape[1]\n print(\"num_features\",num_features)\n \"\"\" Imbalanced classes \"\"\"\n sample_weights = class_weight.compute_sample_weight('balanced', y_train)\n print(\"===================== Fine-tuning ==============================================================\")\n if model == 'LR':\n x_train = (x_train-x_train.mean())/(x_train.max()-x_train.min())\n parameters={\"C\":np.logspace(-3,3,7), \"penalty\":[\"elasticnet\"],\"solver\":['saga'], \"l1_ratio\":[0.5],\n \"class_weight\": ['balanced'],}\n estimator = LogisticRegression()\n if model == 'SVM':\n parameters={\"C\":np.logspace(-3,3,7), \"class_weight\": ['balanced'],\"random_state\": [422]}\n estimator = svm.LinearSVC()\n if model == 'SGD':\n parameters={\"loss\": ['hinge', 'perceptron'], \"penalty\": ['l1', 'l2'],\"alpha\":[0.001, 0.0001],\n \"max_iter\": [20, 50], \"class_weight\": ['balanced'], \"n_iter_no_change\" : [3,5]\n ,\"random_state\": [422]}\n estimator = SGDClassifier()\n if model == 'RF':\n parameters={\"n_estimators\":[100,200, 50,10], \"max_features\": ['log2'],\n \"max_depth\" : [2, 4,6],\"criterion\":['gini'], \"min_impurity_decrease\":[1e-4, 1e-7],\n \"class_weight\":['balanced'],\"random_state\": [422]}\n estimator = RandomForestClassifier()\n if model == 'ADA':\n parameters={\"n_estimators\":[50, 100], \"learning_rate\": [1e-4, 1e-7],\"random_state\": [422]}\n estimator = AdaBoostClassifier(DecisionTreeClassifier(max_depth=5, class_weight = 'balanced'))\n if model == 'GBT':\n parameters={\"n_estimators\":[100,200], \"learning_rate\": [1.0, 0.1,0.9], \"max_features\": ['log2'],\n \"max_depth\":[5, 2], \"criterion\": [\"friedman_mse\"], \"loss\": ['exponential'],\n \"min_impurity_split\":[1e-4, 1e-7], \"min_weight_fraction_leaf\": [0], \"random_state\": [422]}\n estimator = GradientBoostingClassifier()\n if model == 'XGBT':\n ratio = float(np.sum(y_train == 0)) / np.sum(y_train==1)\n parameters={\"n_estimators\":[100,120], \"learning_rate\": [0.1,0.05],\"colsample_bytree\" : [0.4, 0.8],\n \"subsample\" : [0.8, 0.4], \"reg_alpha\" : [0.5], \"reg_lambda\": [2],\n \"objective\": ['binary:logistic'], \"max_depth\":[4, 2], \"gamma\":[10],\"rate_drop\": [0.5, 0.3],\n \"seed\": [422], \"eval_metric\": ['auc'],\n \"scale_pos_weight\": [ratio]}\n estimator = xgb.XGBClassifier()\n if model == 'LightGB':\n ratio = float(np.sum(y_train == 0)) / np.sum(y_train==1)\n parameters={\"objective\": ['binary'],\"learning_rate\":[0.1,0.9],\n \"metric\":['auc'], \"max_bin\":[130], \"feature_fraction\":[0.8],\"min_data_in_bin\":[1],\n \"max_depth\":[10],\"min_data_in_leaf\": [10],\"min_sum_hessian_in_leaf\":[1e-10],\"drop_rate\":[0.5],\n \"bagging_fraction\":[0.5,1.0],\"num_leaves\":[31],\"boost_from_average\":['true'],\"lambda_l2\":[0.09, 0.9],\n \"min_gain_to_split\":[10], \"num_iterations\":[200], \"random_state\": [422],\"scale_pos_weight\": [ratio]}\n estimator = lgb.LGBMClassifier()\n print(\"-----------GridSearchCV-----------------\")\n grid = GridSearchCV(estimator=estimator, param_grid=parameters, cv = cv, scoring='roc_auc', iid=\"warn\", refit = True)\n grid.fit(x_train,y_train,sample_weight = sample_weights)\n auc_train = grid.best_score_\n best_params = grid.best_params_\n\n print(\"===================== Training again with best parameters =========================================\")\n if model == \"LR\":\n model_ehr = LogisticRegression(**best_params)\n model_ehr = model_ehr.fit(x_train,y_train)\n if model == \"SVM\":\n model_ehr = svm.LinearSVC(**best_params)\n model_ehr = model_ehr.fit(x_train,y_train)\n if model == \"SGD\":\n model_ehr = SGDClassifier(**best_params)\n model_ehr = model_ehr.fit(x_train,y_train)\n if model == \"RF\":\n model_ehr = RandomForestClassifier(**best_params)\n model_ehr = model_ehr.fit(x_train,y_train)\n if model == \"ADA\":\n model_ehr = AdaBoostClassifier(DecisionTreeClassifier(max_depth=5, class_weight = 'balanced'),**best_params)\n model_ehr = model_ehr.fit(x_train,y_train)\n if model == \"GBT\":\n model_ehr = GradientBoostingClassifier(**best_params)\n model_ehr = model_ehr.fit(x_train,y_train)\n if model == \"XGBT\":\n model_ehr = xgb.XGBClassifier(**best_params)\n model_ehr = model_ehr.fit(x_train,y_train)\n if model == \"LightGB\":\n model_ehr = lgb.LGBMClassifier(**best_params)\n model_ehr = model_ehr.set_params(random_state= 422, scale_pos_weight = ratio)\n model_ehr = model_ehr.fit(x_train,y_train)\n\n \"\"\" Saving metrics\"\"\"\n auc_val, sens_val, spec_val, f1_val, acc_val,_,_ = generating_metrics(model, model_ehr, x_val, y_val) #val_set\n auc_test, sens_test, spec_test, f1_test, acc_test,fpr, tpr = generating_metrics(model, model_ehr, x_test, y_test) #test_set\n print(\"auc_train:{}, auc_val:{}, auc_test: {}, sens_test {}, spec_test: {}, f1_test {}, acc_test {}\".format(auc_train,\n auc_val, auc_test, sens_test, spec_test, f1_test, acc_test))\n saving_metrics(model_name, logs_file, num_features, auc_train\n ,auc_val, sens_val, spec_val, f1_val, acc_val\n ,auc_test, sens_test, spec_test, f1_test, acc_test,fpr, tpr)\n saving_parameters(num_features,best_params, auc_train, auc_val, model_name,logs_file)\n print(\"Ready: \", model_name)",
"def e_step(votes_ij, activations_j, mean_j, stdv_j, var_j, spatial_routing_matrix):\n \n with tf.variable_scope(\"e_step\") as scope:\n \n # AG 26/06/2018: changed stdv_j to var_j\n o_p_unit0 = - tf.reduce_sum(\n tf.square(votes_ij - mean_j, name=\"num\") / (2 * var_j), \n axis=-1, \n keepdims=True, \n name=\"o_p_unit0\")\n \n o_p_unit2 = - 0.5 * tf.reduce_sum(\n tf.log(2*np.pi * var_j), \n axis=-1, \n keepdims=True, \n name=\"o_p_unit2\"\n )\n\n # (24, 6, 6, 288, 32, 1)\n o_p = o_p_unit0 + o_p_unit2\n zz = tf.log(activations_j + FLAGS.epsilon) + o_p\n \n # AG 13/11/2018: New implementation of normalising across parents\n #----- Start -----#\n zz_shape = zz.get_shape().as_list()\n batch_size = zz_shape[0]\n parent_space = zz_shape[1]\n kh_kw_i = zz_shape[3]\n parent_caps = zz_shape[4]\n kk = int(np.sum(spatial_routing_matrix[:,0]))\n child_caps = int(kh_kw_i / kk)\n \n zz = tf.reshape(zz, [batch_size, parent_space, parent_space, kk, \n child_caps, parent_caps])\n \n \"\"\"\n # In un-log space\n with tf.variable_scope(\"to_sparse_unlog\") as scope:\n zz_unlog = tf.exp(zz)\n #zz_sparse_unlog = utl.to_sparse(zz_unlog, spatial_routing_matrix, \n # sparse_filler=1e-15)\n zz_sparse_unlog = utl.to_sparse(\n zz_unlog, \n spatial_routing_matrix, \n sparse_filler=0.0)\n # maybe this value should be even lower 1e-15\n zz_sparse_log = tf.log(zz_sparse_unlog + 1e-15) \n zz_sparse = zz_sparse_log\n \"\"\"\n\n \n # In log space\n with tf.variable_scope(\"to_sparse_log\") as scope:\n # Fill the sparse matrix with the smallest value in zz (at least -100)\n sparse_filler = tf.minimum(tf.reduce_min(zz), -100)\n# sparse_filler = -100\n zz_sparse = utl.to_sparse(\n zz, \n spatial_routing_matrix, \n sparse_filler=sparse_filler)\n \n \n with tf.variable_scope(\"softmax_across_parents\") as scope:\n rr_sparse = utl.softmax_across_parents(zz_sparse, spatial_routing_matrix)\n \n with tf.variable_scope(\"to_dense\") as scope:\n rr_dense = utl.to_dense(rr_sparse, spatial_routing_matrix)\n \n rr = tf.reshape(\n rr_dense, \n [batch_size, parent_space, parent_space, kh_kw_i, parent_caps, 1])\n #----- End -----#\n\n # AG 02/11/2018\n # In response to a question on OpenReview, Hinton et al. wrote the \n # following:\n # \"The gradient flows through EM algorithm. We do not use stop gradient. A \n # routing of 3 is like a 3 layer network where the weights of layers are \n # shared.\"\n # https://openreview.net/forum?id=HJWLfGWRb¬eId=S1eo2P1I3Q\n \n return rr",
"def evaluate(params,dataloader):\n MIN_DEPTH = 1e-3\n MAX_DEPTH = 80\n num_gpus = 1\n pred_depth_scale_factor = 1\n checkpoint_path = './log_diretory/mono_depth2-102000/model-97060'#'./log_diretory/kitti_resnet_MS2_nbn_1epoch_pose_fix/model-189107'\n\n gt_path = './utils/gt/eigen_zhou'\n eval_stereo = False\n\n with tf.Graph().as_default(), tf.device('/cpu:0'):\n\n dataloader = MonodepthDataloader(dataloader.data_path, dataloader.filenames_file, params, dataloader.dataset,\n dataloader.mode)\n reference = dataloader.reference_image_batch\n param = dataloader.param_path_batch\n\n\n # split for each gpu\n reference_splits = tf.split(reference, num_gpus,0)\n param_splits = tf.split(param,num_gpus,0)\n\n\n\n reuse_variables = None\n\n with tf.variable_scope(tf.get_variable_scope()):\n for i in range(num_gpus):\n with tf.device('/gpu:%d' % i):\n with tf.name_scope('%d' % i) as scope:\n print(i)\n model = MonodepthModel(params, dataloader.mode, reference_splits[i],None,None,None,param_splits[i],\n #param_path=param_path_splits[i],\n reuse_variables=reuse_variables, model_index=i)\n\n\n\n config = tf.ConfigProto(allow_soft_placement=True) # allow_soft_placement는 명시된 device없을 때 자동으로 잡아준다.\n sess = tf.Session(config=config)\n # Saver\n train_saver = tf.train.Saver()\n\n # Init\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n coordinator = tf.train.Coordinator() ## coordinator=조정자, threads 관리해주는 함수\n threads = tf.train.start_queue_runners(sess=sess, coord=coordinator)\n\n # Restore\n print(\"Restore\")\n\n if checkpoint_path != '':\n print('----------------------------------------------')\n print(checkpoint_path)\n print('\\n')\n print(checkpoint_path.split(\".\")[0])\n print('----------------------------------------------')\n train_saver.restore(sess, checkpoint_path)\n print(\"Restore OK\")\n with tf.variable_scope(tf.get_variable_scope()):\n for i in range(num_gpus):\n with tf.device('/gpu:%d' % i):\n with tf.name_scope('%d' % i) as scope:\n bn_updates_op = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope)\n num_test_samples = count_text_lines(dataloader.filenames_file)\n pred_disps = []\n print('Start')\n for step in range(num_test_samples):\n pred_disp = sess.run(model.disp_reference_est[0])\n\n pred_disp = pred_disp.squeeze()\n pred_disp,_ = disp_to_depth(pred_disp)\n\n # print(pred_disp.shape)\n # plt.imshow(pred_disp)\n # plt.show()\n pred_disp = np.expand_dims(pred_disp,0)\n\n pred_disps.append(pred_disp)\n\n pred_disps = np.concatenate(pred_disps)\n print(pred_disps.shape)\n gt_path = gt_path+ '/gt_depths.npz'\n gt_depths = np.load(gt_path, fix_imports=True, encoding='latin1')[\"data\"]\n print(gt_depths[0].shape)\n\n print(\"-> Evaluating\")\n disable_median_scaling=False\n if eval_stereo:\n print(\" Stereo evaluation - \"\n \"disabling median scaling, scaling by {}\".format(STEREO_SCALE_FACTOR))\n disable_median_scaling = True\n pred_depth_scale_factor = STEREO_SCALE_FACTOR\n else:\n print(\" Mono evaluation - using median scaling\")\n\n errors = []\n ratios = []\n\n for i in range(pred_disps.shape[0]):\n\n gt_depth = gt_depths[i]\n gt_height, gt_width = gt_depth.shape[:2]\n\n pred_disp = pred_disps[i]\n pred_disp = cv2.resize(pred_disp, (gt_width, gt_height))\n pred_depth = 1 / pred_disp\n print(pred_depth[0,0])\n\n\n\n\n mask = np.logical_and(gt_depth > MIN_DEPTH, gt_depth < MAX_DEPTH)\n\n crop = np.array([0.40810811 * gt_height, 0.99189189 * gt_height,\n 0.03594771 * gt_width, 0.96405229 * gt_width]).astype(np.int32)\n\n crop_mask = np.zeros(mask.shape)\n crop_mask[crop[0]:crop[1], crop[2]:crop[3]] = 1\n mask = np.logical_and(mask, crop_mask)\n\n print(mask)\n #if i ==pred_disps.shape[0]-3:\n # plt.imshow(pred_depth / 100) # pred_depth[mask]/100)\n # plt.show()\n # plt.imshow(np.where(mask,pred_depth,np.zeros_like(pred_depth))/100)#pred_depth[mask]/100)\n # plt.show()\n # plt.imshow(np.where(mask,gt_depth,np.zeros_like(gt_depth))/100)\n # plt.show()\n\n print(\"pred_depth[mask]\", pred_depth[mask])\n print(\"gt_depth[mask]\", gt_depth[mask])\n pred_depth = pred_depth[mask]\n gt_depth = gt_depth[mask]\n\n pred_depth *= pred_depth_scale_factor\n if not disable_median_scaling:\n print('?')\n ratio = np.median(gt_depth) / np.median(pred_depth)\n ratios.append(ratio)\n pred_depth *= ratio\n\n pred_depth[pred_depth < MIN_DEPTH] = MIN_DEPTH\n pred_depth[pred_depth > MAX_DEPTH] = MAX_DEPTH\n print(\"pred_depth={}\".format(pred_depth))\n print(\"pred_depth < MIN_DEPTH\",pred_depth < MIN_DEPTH)\n print(\" pred_depth[pred_depth < MIN_DEPTH] \", pred_depth[pred_depth < MIN_DEPTH] )\n print(\"pred_depth > MAX_DEPTH\",pred_depth > MAX_DEPTH)\n print(\"pred_depth[pred_depth > MAX_DEPTH]\",pred_depth[pred_depth > MAX_DEPTH])\n print(\"pred_depth_shape={}\".format(pred_depth.shape))\n print(\"gt_depth_shape={}\".format(gt_depth.shape))\n\n errors.append(compute_errors(gt_depth, pred_depth))\n\n if not disable_median_scaling:\n ratios = np.array(ratios)\n med = np.median(ratios)\n print(\" Scaling ratios | med: {:0.3f} | std: {:0.3f}\".format(med, np.std(ratios / med)))\n\n mean_errors = np.array(errors).mean(0)\n\n print(\"\\n \" + (\"{:>8} | \" * 7).format(\"abs_rel\", \"sq_rel\", \"rmse\", \"rmse_log\", \"a1\", \"a2\", \"a3\"))\n print((\"&{: 8.3f} \" * 7).format(*mean_errors.tolist()) + \"\\\\\\\\\")\n print(\"\\n-> Done!\")",
"def validation_dubo(latent_dim, covar_module0, covar_module1, likelihood, train_xt, m, log_v, z, P, T, eps):\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n v = torch.exp(log_v)\n torch_dtype = torch.double\n x_st = torch.reshape(train_xt, [P, T, train_xt.shape[1]]).to(device)\n stacked_x_st = torch.stack([x_st for i in range(latent_dim)], dim=1)\n K0xz = covar_module0(train_xt, z).evaluate().to(device)\n K0zz = (covar_module0(z, z).evaluate() + eps * torch.eye(z.shape[1], dtype=torch_dtype).to(device)).to(device)\n LK0zz = torch.cholesky(K0zz).to(device)\n iK0zz = torch.cholesky_solve(torch.eye(z.shape[1], dtype=torch_dtype).to(device), LK0zz).to(device)\n K0_st = covar_module0(stacked_x_st, stacked_x_st).evaluate().transpose(0,1)\n B_st = (covar_module1(stacked_x_st, stacked_x_st).evaluate() + torch.eye(T, dtype=torch.double).to(device) * likelihood.noise_covar.noise.unsqueeze(dim=2)).transpose(0,1)\n LB_st = torch.cholesky(B_st).to(device)\n iB_st = torch.cholesky_solve(torch.eye(T, dtype=torch_dtype).to(device), LB_st)\n\n dubo_sum = torch.tensor([0.0]).double().to(device)\n for i in range(latent_dim):\n m_st = torch.reshape(m[:, i], [P, T, 1]).to(device)\n v_st = torch.reshape(v[:, i], [P, T]).to(device)\n K0xz_st = torch.reshape(K0xz[i], [P, T, K0xz.shape[2]]).to(device)\n iB_K0xz = torch.matmul(iB_st[i], K0xz_st).to(device)\n K0zx_iB_K0xz = torch.matmul(torch.transpose(K0xz[i], 0, 1), torch.reshape(iB_K0xz, [P*T, K0xz.shape[2]])).to(device)\n W = K0zz[i] + K0zx_iB_K0xz\n W = (W + W.T) / 2\n LW = torch.cholesky(W).to(device)\n logDetK0zz = 2 * torch.sum(torch.log(torch.diagonal(LK0zz[i]))).to(device)\n logDetB = 2 * torch.sum(torch.log(torch.diagonal(LB_st[i], dim1=-2, dim2=-1))).to(device)\n logDetW = 2 * torch.sum(torch.log(torch.diagonal(LW))).to(device)\n logDetSigma = -logDetK0zz + logDetB + logDetW\n iB_m_st = torch.solve(m_st, B_st[i])[0].to(device)\n qF1 = torch.sum(m_st*iB_m_st).to(device)\n p = torch.matmul(K0xz[i].T, torch.reshape(iB_m_st, [P * T])).to(device)\n qF2 = torch.sum(torch.triangular_solve(p[:,None], LW, upper=False)[0] ** 2).to(device)\n qF = qF1 - qF2\n tr = torch.sum(iB_st[i] * K0_st[i]) - torch.sum(K0zx_iB_K0xz * iK0zz[i])\n logDetD = torch.sum(torch.log(v[:, i])).to(device)\n tr_iB_D = torch.sum(torch.diagonal(iB_st[i], dim1=-2, dim2=-1)*v_st).to(device)\n D05_iB_K0xz = torch.reshape(iB_K0xz*torch.sqrt(v_st)[:,:,None], [P*T, K0xz.shape[2]])\n K0zx_iB_D_iB_K0zx = torch.matmul(torch.transpose(D05_iB_K0xz,0,1), D05_iB_K0xz).to(device)\n tr_iB_K0xz_iW_K0zx_iB_D = torch.sum(torch.diagonal(torch.cholesky_solve(K0zx_iB_D_iB_K0zx, LW))).to(device)\n tr_iSigma_D = tr_iB_D - tr_iB_K0xz_iW_K0zx_iB_D\n dubo = 0.5*(tr_iSigma_D + qF - P*T + logDetSigma - logDetD + tr)\n dubo_sum = dubo_sum + dubo\n return dubo_sum",
"def test_lrp_svm(self, syn_genomic_data, syn_fm, syn_idx, rep, tmp_path, syn_true_pvalues): \n rep_to_plot = 0\n ttbrs = [0.5, 1,1.5]\n idx = syn_idx[str(rep_to_plot)]\n fig, axes = plt.subplots(len(ttbrs), 5, figsize=[30,15])\n x_3d = syn_fm(\"3d\")[str(rep_to_plot)][:]\n x_2d = syn_fm(\"2d\")[str(rep_to_plot)][:]\n indices_true= [inds_true for inds_true, x in enumerate(syn_true_pvalues[0].flatten()) if x]\n\n for i, ttbr in enumerate(ttbrs):\n print('Using tbrr={}'.format(ttbr))\n labels = generate_syn_phenotypes(tower_to_base_ratio=ttbr, quantity=rep)\n labels_cat = {}\n for key, l in labels.items():\n labels_cat[key] = tensorflow.keras.utils.to_categorical((l+1)/2)\n \n best_params_montaez['n_snps']= x_3d.shape[1]\n \n l_0b=labels_cat[str(rep_to_plot)]\n\n model = create_montaez_dense_model(best_params_montaez)\n y_integers = np.argmax(l_0b[idx.train], axis=1)\n class_weights = class_weight.compute_class_weight('balanced', np.unique(y_integers), y_integers)\n d_class_weights = dict(enumerate(class_weights))\n\n model.fit(x=x_3d[idx.train], y=l_0b[idx.train], validation_data=(x_3d[idx.test], l_0b[idx.test]), epochs=best_params_montaez['epochs'], class_weight=d_class_weights, callbacks=[ ReduceLROnPlateau(monitor='val_loss', factor=best_params_montaez['factor'], patience=best_params_montaez['patience'], mode='min'),],)\n\n model = iutils.keras.graph.model_wo_softmax(model)\n analyzer = innvestigate.analyzer.LRPAlpha1Beta0(model)\n weights = analyzer.analyze(x_3d).sum(0)\n\n top_indices_sorted, filtered_weights = postprocess_weights(weights, top_k, filter_window_size, p_svm, p_pnorm_filter)\n\n complete_pvalues = chi_square(syn_genomic_data[str(rep_to_plot)][:], labels[str(rep_to_plot)])\n \n pvalues_filled_deep = np.ones(n_total_snps)\n pvalues_filled_deep[top_indices_sorted] = complete_pvalues[top_indices_sorted]\n\n # Plot RPVT\n plot_pvalues(complete_pvalues, indices_true, axes[i][0])\n if i==0:\n axes[i][0].set_title('RPVT $-log_{10}$(p-values)', fontsize=22)\n axes[i][0].set_ylabel('$-log_{10}$(p-value)', fontsize=18)\n plt.setp(axes[i][0].get_yticklabels(), fontsize=16)\n plt.setp(axes[i][0].get_xticklabels(), fontsize=16)\n\n # Plot svm weights \n clf = LinearSVC(penalty='l2', loss='hinge', C=0.0022, dual=True, tol=1e-3, verbose=0, class_weight='balanced')\n idx_now, pvalues, raw_weights = combi_method(clf, syn_genomic_data[str(rep_to_plot)][:],x_2d, labels[str(rep_to_plot)], 35, 2, 2, 30)\n #filtered_svm_weights = postprocess_weights_without_avg(raw_weights, p_svm)\n pvalues_filled_combi = np.ones(len(complete_pvalues))\n pvalues_filled_combi[idx_now] = pvalues\n #svm_weights = toy_classifier.fit(x_2d, labels[str(rep_to_plot)]).coef_\n axes[i][1].scatter(range(len(np.absolute(raw_weights).sum(1))), 1000*np.absolute(raw_weights).sum(1), marker='.', color='darkblue')\n axes[i][1].scatter(indices_true,1000*np.absolute(raw_weights).sum(1)[indices_true], color='fuchsia')\n axes[i][1].set_ylim(0,1000*(np.max(np.absolute(raw_weights).sum(1))+0.001))\n if i==0:\n axes[i][1].set_title('Absolute SVM weights * 1000', fontsize=22)\n plt.setp(axes[i][1].get_yticklabels(), fontsize=16)\n plt.setp(axes[i][1].get_xticklabels(), fontsize=16)\n\t\t\t\n # Plot COMBI\n plot_pvalues(pvalues_filled_combi, indices_true, axes[i][2])\n if i==0:\n axes[i][2].set_title('COMBI $-log_{10}$(p-values)', fontsize=22)\n if i==2:\n axes[i][2].set_xlabel('SNP position', fontsize=18)\n plt.setp(axes[i][2].get_yticklabels(), fontsize=16)\n plt.setp(axes[i][2].get_xticklabels(), fontsize=16)\n\t\t\t\n # Plot LRP relevance scores\n axes[i][3].scatter(range(len(np.absolute(weights).reshape(-1, 3).sum(1))), np.absolute(weights).reshape(-1, 3).sum(1), marker='.', color='darkblue')\n axes[i][3].scatter(indices_true,np.absolute(weights).reshape(-1, 3).sum(1)[indices_true], color='fuchsia')\n #axes[i][1].legend()\n axes[i][3].set_ylim(0,np.max(np.absolute(weights).reshape(-1, 3).sum(1))+1)\n if i==0:\n axes[i][3].set_title('LRP relevance scores', fontsize=22)\n plt.setp(axes[i][3].get_yticklabels(), fontsize=16)\n plt.setp(axes[i][3].get_xticklabels(), fontsize=16)\n\t\t\t\n # Plot DeepCOMBI\n plot_pvalues(pvalues_filled_deep, indices_true, axes[i][4])\n if i==0:\n axes[i][4].set_title('DeepCOMBI $-log_{10}$(p-value)', fontsize=22)\n plt.setp(axes[i][4].get_yticklabels(), fontsize=16)\n plt.setp(axes[i][4].get_xticklabels(), fontsize=16)\n\t\t\t\n ## Plot distribution of postprocessed vectors\n #axes[i][2].plot(postprocessed_weights)\n #axes[i][2].set_title('Postprocessed relevance')\n\n fig.savefig(os.path.join(IMG_DIR, 'manhattan-example-toy-NAR.png'), bbox_inches='tight')",
"def dec_model(params):\n\n if (params['random']):\n print(\"Random Decision Tree Parameters.\")\n params['criterion'] = random.choice([\"gini\", \"entropy\"])\n params['splitter'] = random.choice([\"best\", \"random\"])\n params['max_features'] = random.choice(['auto', 'sqrt', 'log2', random.randrange(50, 1000, step=25), None])\n params['max_depth'] = random.choice([None, random.randrange(5, 1000, step=5)])\n params['min_samples_split'] = random.choice([2, random.randrange(1, 50, step=1)])\n params['max_leaf_nodes'] = random.choice([None, random.randrange(2, 50, step=1)])\n params['min_samples_leaf'] = random.choice([1, random.randrange(5, 100, step=5)])\n print(params)\n \n model = tree.DecisionTreeClassifier(\n criterion=params['criterion'],\n splitter=params['splitter'],\n max_features=params['max_features'],\n max_depth=params['max_depth'],\n min_samples_split=params['min_samples_split'],\n max_leaf_nodes=params['max_leaf_nodes'],\n min_samples_leaf=params['min_samples_leaf']\n )\n\n return model",
"def evaluate_node(self):\n # p, v = np.random.random(225).astype(np.float16), np.random.random()\n socket = zmq.Context().socket(zmq.DEALER)\n socket.setsockopt_string(zmq.IDENTITY, self.player_id)\n socket.connect('ipc://./tmp/oracle_%s' % self.tree.model_name)\n print('start to evaluate', self.tree.model_name)\n while True:\n # print(self.tree.to_evaluate.qsize())\n batch = []\n states = []\n colors = []\n size = self.tree.to_evaluate.qsize()\n if size > config.INFERENCE_BATCHSIZE:\n size = config.INFERENCE_BATCHSIZE\n elif size == 0:\n time.sleep(0.001)\n continue\n for _ in range(size):\n t, black, white = self.tree.to_evaluate.get()\n mine, yours = posswap(t, black, white)\n batch.append((str(mine), str(yours), t % 2))\n states.append((black, white))\n colors.append(t % 2)\n socket.send(msgpack.dumps((batch, self.player_id)))\n result = msgpack.loads(socket.recv())\n assert len(states) == len(result[0])\n assert len(states) == len(result[1])\n for ind, state in enumerate(states):\n with self.lock:\n self.tree.nodes[state].p = result[0][ind]\n if colors[ind] == 0:\n self.tree.nodes[state].v = result[1][ind]\n else:\n self.tree.nodes[state].v = -result[1][ind]\n self.tree.nodes[state].updated = True",
"def demo():\n def load_data():\n train = open(\"csv/svd_train.csv\", \"r\")\n r = csv.reader(train)\n next(r)\n\n data = []\n target = []\n\n print \"Prepping data...\"\n for row in r:\n aux = [0 for x in xrange(10)]\n aux[int(row[0])] = 1\n target.append(aux)\n data.append([float(x) for x in row[1:]])\n\n train.close()\n\n data = np.array(data)\n\n target = np.array(target)\n\n #train = [target[:35000],data[:35000]]\n #test = [target[35000:],data[35000:]]\n\n return [target, data]\n\n NN = MLP_NeuralNetwork(101, 75, 35, 10,\n iterations = 200,\n learning_rate = 0.5,\n momentum = 0.05,\n rate_decay = 0.005)\n\n train = load_data()\n\n NN.train(train)\n #NN.test_cross(test)\n #NN.test()\n NN.test_against()",
"def test_rnnslu(**kwargs):\n # process input arguments\n param = {\n 'fold': 3,\n 'lr': 0.1,\n 'verbose': True,\n 'decay': False,\n 'win': 3,\n 'nhidden': 300,\n 'seed': 345,\n 'emb_dimension': 50,\n 'nepochs': 60,\n 'normal': False,\n 'folder':'../result',\n 'longdependence':None,\n 'optimization':'Adagrad'\n }\n param_diff = set(kwargs.keys()) - set(param.keys())\n if param_diff:\n raise KeyError(\"invalid arguments:\" + str(tuple(param_diff)))\n param.update(kwargs)\n\n if param['verbose']:\n for k,v in param.items():\n print(\"%s: %s\" % (k,v))\n\n # create result folder if not exists\n check_dir(param['folder'])\n\n # load the dataset\n print('... loading the dataset')\n train_set, valid_set, test_set, dic = load_data(param['fold'])\n\n # create mapping from index to label, and index to word\n idx2label = dict((k, v) for v, k in dic['labels2idx'].items()) # change label2index - index2label\n idx2word = dict((k, v) for v, k in dic['words2idx'].items()) # change words2index - index2words\n\n # unpack dataset\n train_lex, train_ne, train_y = train_set\n valid_lex, valid_ne, valid_y = valid_set\n test_lex, test_ne, test_y = test_set \n\n train_lex = train_lex + test_lex\n train_y = train_y + test_y\n train_ne = train_ne + test_ne\n\n vocsize = len(dic['words2idx']) # # of words\n nclasses = len(dic['labels2idx']) # # of classes \n nsentences = len(train_lex) # # training sample [a batch is all the words in a sentence]\n\n ## get the label for (input,output) for test and valid set \n groundtruth_valid = [map(lambda x: idx2label[x], y) for y in valid_y]\n words_valid = [map(lambda x: idx2word[x], w) for w in valid_lex]\n\n # instanciate the model\n numpy.random.seed(param['seed'])\n random.seed(param['seed'])\n \n\n print('... building the model')\n lstm = LSTM(\n nh=param['nhidden'],\n nc=nclasses,\n ne=vocsize,\n de=param['emb_dimension'],\n cs=param['win'],\n normal=param['normal'],\n longdependence = param['longdependence'],\n optimization = param['optimization']\n )\n\n ## build the model for mini-batch\n # train with early stopping on validation set\n print('... training')\n best_f1 = -numpy.inf\n param['clr'] = param['lr']\n \n for epoch in range(param['nepochs']):\n\n param['ce'] = epoch\n tic = timeit.default_timer()\n print('epoch %i out of %i' %(epoch,param['nepochs']) )\n \n for i, (x, y) in enumerate(zip(train_lex, train_y)):\n input_length = len(x)\n lstm.train(x, y, param['win'], param['clr'])\n print('[learning] epoch %i >> %2.2f%%' % (\n epoch, (i + 1) * 100. / nsentences), end=' ')\n print('completed in %.2f (sec) <<\\r' % (timeit.default_timer() - tic), end='')\n\n # evaluation // back into the real world : idx -> words\n predictions_valid = [map(lambda x: idx2label[x],\n lstm.classify(numpy.asarray(\n contextwin(x, param['win'])).astype('int32')))\n for x in valid_lex]\n\n # evaluation // compute the accuracy using conlleval.pl\n res_valid = conlleval(predictions_valid,\n groundtruth_valid,\n words_valid,\n param['folder'] + '/current.valid.txt',\n param['folder'])\n\n if res_valid['f1'] > best_f1:\n\n best_f1 = res_valid['f1']\n\n if param['verbose']:\n print('NEW BEST: epoch', epoch,\n 'best test F1', res_valid['f1'])\n\n param['tf1'] = res_valid['f1']\n param['tp'] = res_valid['p']\n param['tr'] = res_valid['r']\n param['be'] = epoch\n else:\n if param['verbose']:\n print('')\n\n # learning rate decay if no improvement in 10 epochs\n if param['decay'] and abs(param['be']-param['ce']) >= 10:\n param['clr'] *= 0.5\n\n if param['clr'] < 1e-5:\n break\n \n\n print('BEST RESULT: epoch', param['be'],\n 'best test F1', param['tf1'],\n 'with the model', param['folder'])\n \n return lstm",
"def fit_decision_tree(model, x_train, y_train):\r\n model.fit(x_train, y_train)\r\n score = model.score(x_train, y_train)\r\n importance = model.feature_importances_\r\n return score, importance",
"def compute_edge_logits(self):",
"def train_decision_tree():\n train_model(DecisionTreeRegressor(max_depth=3, random_state=42),\n dataset_file_name=DECISION_TREE_DEFAULT_DATASET,\n model_file_name=DECISION_TREE_DEFAULT_MODEL)",
"def comprehensiveLOOEvaluation(directory=defaultDirectory(), \n pruneGlobal = True, numLayers = 2, \n numNodesPerLayer = 200, randSeed = 1,\n trainPer = .6, valPer = .25, testPer = 0.15,\n totalPer = 1, batchSize = 64,\n numEpochs = 1000, learningRate = 0.001, \n l2Reg = 0.0001, modelFilePrefix = '',\n useGRU = False,\n dropoutI = 0.2, dropoutH = 0.2, trainMode = 'continue',\n randSeed2 = None, center = False, prependMean = False):\n \n trainModes = ['continue', 'overwrite', 'continue-each']\n \n if trainMode.lower() not in trainModes:\n raise ValueError(\"Parameter 'trainMode' must be either 'continue', 'overwrite', or 'continue-each'.\")\n \n np.random.seed(randSeed) #control permutation of data\n # prune global coordinate data?\n if pruneGlobal:\n pruneRange = range(0, 18)\n else:\n pruneRange = None\n structs = loadDataset(directory=directory, LOUO=True, \n delRange=pruneRange, trainPer=trainPer,\n valPer = valPer, testPer=testPer, totalPer=totalPer,\n preExt = '.left', prune=True)\n\n u=0\n losses = []\n accs = []\n balAccs = []\n finAccs = []\n cmEpochs = []\n outDirectory = nameModelFile('', useGRU, numLayers, numNodesPerLayer, randSeed,\n trainPer, valPer, testPer, totalPer, dropoutI, dropoutH, l2Reg,\n center, prependMean)\n if not os.path.isdir(outDirectory):\n os.mkdir(outDirectory)\n if randSeed2 is not None: #control randomization of training (for Keras at least)\n np.random.seed(randSeed2)\n for struct in structs:\n modelFile = modelFilePrefix + 'LOU-' + str(u)\n modelFile = nameModelFile(modelFile, useGRU, numLayers, numNodesPerLayer, randSeed,\n trainPer, valPer, testPer, totalPer, dropoutI, dropoutH, l2Reg,\n center, prependMean)\n u += 1\n if (os.path.isfile(outDirectory + '\\\\' + 'Keras' + modelFile + '.json') \n and os.path.isfile(outDirectory + '\\\\' + 'Keras' + modelFile + '_Weights.h5')):\n #if we have already trained for leaving out this user\n if trainMode == 'continue': #continue until each user has a model\n trainMode2 = 'skip' \n elif trainMode == 'continue-each': # continue training previous models\n trainMode2 = 'continue'\n else:\n trainMode2 = 'overwrite'\n else:\n trainMode2 = trainMode\n\n if center:\n \"\"\"\n Center the labeled markers on their mean. \n \"\"\"\n from Postures import centerData\n struct = list(struct)\n labeledMarkerData = struct[0][:,:,18:].reshape((-1, 11, 3))\n labeledMarkerData = centerData(labeledMarkerData, True, prependMean).reshape((struct[0].shape[0], struct[0].shape[1], -1))\n struct[0] = np.concatenate([struct[0][:,:,0:18], labeledMarkerData], axis = 2)\n if prependMean:\n struct[8] += 3\n\n\n cmEpoch, loss, acc, balAcc, finAcc = trainGestureRNN(numLayers=numLayers, numNodesPerLayer=numNodesPerLayer,\n useGRU=useGRU, batchSize=batchSize, \n numEpochs = numEpochs, learningRate=learningRate,\n l1Reg=0, l2Reg = l2Reg, dropoutI=dropoutI, dropoutH=dropoutH,\n sequences = struct[0], classes = struct[1],\n trainRange = struct[2], valRange = struct[3],\n testRange = struct[4], numClasses = struct[5],\n numObservations = struct[6], numSequences = struct[7],\n numFeatures = struct[8],\n modelFile=modelFile, \n outDirectory=outDirectory, trainMode=trainMode2,\n callbacks = [EarlyStopping(patience=20)])\n #catch our breath.... Really, give the user a chance to insert Ctrl-C\n time.sleep(2)\n losses += [loss]\n accs += [acc]\n balAccs += [balAcc]\n finAccs += [finAcc]\n cmEpochs += [cmEpoch]\n losses = np.asarray(losses)\n accs = np.asarray(accs)*100\n balAccs = np.asarray(balAccs)*100\n finAccs = np.asarray(finAccs)*100\n trainPer, valPer, _, _ = normalizePercentages(trainPer, valPer, 0, 1)\n sys.stdout.write('\\n')\n sys.stdout.write('Leave One User Out Evaluation\\nTest Results for ' + str(numLayers) + '-Layer, ' \n + str(numNodesPerLayer) + ' Nodes-Per-Layer ' + ('GRU' if useGRU else 'LSTM') + ' Networks\\n'\n + 'Trained with ' + (\"%0.2f\" % (dropoutI*100)) + '% Input Dropout, '\n + (\"%0.2f\" % (dropoutH*100)) + '% Hidden Dropout, and ' + str(l2Reg) + ' L2 Regularization\\n'\n + str(numEpochs) + ' Maximum Epochs at ' + (\"%0.2f\" % trainPer) + '/' + (\"%0.2f\" % valPer) + ' Training/Validation Split\\n')\n sys.stdout.write('\\n')\n sys.stdout.write('Loss: ' + str(np.mean(losses)) + ' +/- ' + str(np.std(losses)) +'\\n')\n sys.stdout.write('25%, 50%, 75% Quartile Loss: ' + str(np.percentile(losses, 25))\n + ', ' + str(np.median(losses)) \n + ', ' + str(np.percentile(losses, 75)) +'\\n')\n sys.stdout.write('\\n')\n sys.stdout.write('Accuracy: ' + str(np.mean(accs)) + ' +/- ' + str(np.std(accs)) +'\\n')\n sys.stdout.write('25%, 50%, 75% Quartile Accuracy: ' + str(np.percentile(accs, 25))\n + ', ' + str(np.median(accs)) \n + ', ' + str(np.percentile(accs, 75)) +'\\n')\n sys.stdout.write('\\n')\n sys.stdout.write('Balanced Accuracy: ' + str(np.mean(balAccs)) + ' +/- ' + str(np.std(balAccs)) +'\\n')\n sys.stdout.write('25%, 50%, 75% Quartile Balanced Accuracy: ' + str(np.percentile(balAccs, 25))\n + ', ' + str(np.median(balAccs))\n + ', ' + str(np.percentile(balAccs, 75)) +'\\n')\n sys.stdout.write('\\n')\n sys.stdout.write('Final-Frame Accuracy: ' + str(np.mean(finAccs)) + ' +/- ' + str(np.std(finAccs)) +'\\n')\n sys.stdout.write('25%, 50%, 75% Quartile Final-Frame Accuracy: ' + str(np.percentile(finAccs, 25))\n + ', ' + str(np.median(finAccs))\n + ', ' + str(np.percentile(finAccs, 75)) +'\\n')",
"def _msqrd_v_l_l(s, t, model: SingleRhNeutrinoModel, ml: float):\n mx = model.mx\n u = 0.5 * np.tan(2 * model.theta)\n return (\n -2\n * u**2\n * GF**2\n * (\n 2 * ml**4 * (1 + 4 * SW**2 + 8 * SW**4)\n + 2 * ml**2 * (mx**2 - s - 2 * (1 + 4 * SW**2 + 8 * SW**4) * t)\n + (1 + 4 * SW**2 + 8 * SW**4)\n * (s**2 + 2 * s * t + 2 * t**2 - mx**2 * (s + 2 * t))\n )\n )",
"def test_model_tree_to_tree(model, dset_loader, metric, use_cuda=False):\n model.train(False)\n model.eval()\n\n running_corrects = 0\n accuracies = []\n\n for inputs, labels in dset_loader:\n if use_cuda:\n inputs, labels = inputs.cuda(), labels.cuda()\n \n # forward\n output = model.forward_prediction(inputs)\n accuracies.append(metric(output, labels))\n \n mean = np.mean(accuracies)\n\n return mean",
"def __init__(self, model):\n TreeLikelihoodBase.__init__(self, model)",
"def create_tree(f_train, f_test, l_train, l_test):\n # initialize model\n model = DecisionTreeClassifier(max_depth=2)\n\n # train it on training data\n model.fit(f_train, l_train)\n\n # gather the model's predictions for train\n train_predictions = model.predict(f_train)\n\n # gather the model's predictions for test\n test_predictions = model.predict(f_test)\n\n # calculate accuaracy of train\n print('Tree Train Accuracy: ', accuracy_score(l_train, train_predictions))\n\n # calculate accuracy of test\n print('Tree Test Accuracy: ', accuracy_score(l_test, test_predictions))\n\n return model",
"def rwgraph_analyze2(input=(None)):\r\n\r\n\r\n #set up graph and degree distribution arrays\r\n n=2000\r\n m=4\r\n G=nx.barabasi_albert_graph(n, m, seed=5)\r\n Nt=100\r\n M=20000\r\n maxdeg=0\r\n degree_dist=[]\r\n for i in range(0,n):\r\n degree_dist.append(G.degree[i])\r\n if G.degree[i]>maxdeg:\r\n maxdeg=G.degree[i]\r\n j=i\r\n\r\n #set inital conditions and D\r\n y0=np.zeros(n,dtype=int)\r\n y0[j]=200\r\n D=1\r\n #define time for odi Int\r\n t=np.arange(Nt+1,dtype=int)\r\n #set up operators\r\n A = nx.adjacency_matrix(G)\r\n Q = A.toarray().sum(axis=1)\r\n L=np.diag(Q)-A.toarray()\r\n Q_inv=1/Q\r\n Ls=np.diag(np.ones(n))-np.matmul(np.diag(Q_inv),A.toarray())\r\n Ls_tran=np.transpose(Ls)\r\n\r\n #convert to sparse operators and include diffusion\r\n L_spar = scipy.sparse.csr_matrix(-D*L)\r\n Ls_spar = scipy.sparse.csr_matrix(-D*Ls)\r\n Ls_tran_spar = scipy.sparse.csr_matrix(-D*Ls_tran)\r\n A=nx.adjacency_matrix(G)\r\n L=-D*(scipy.sparse.diags(degree_arr)-A)\r\n Ls=-D*(scipy.sparse.diags(np.ones(N))-scipy.sparse.diags(1/degree_arr).dot(A))\r\n\r\n #define operators\r\n def Lap(y,t):\r\n return scipy.sparse.csr_matrix.__mul__(L_spar,y)\r\n def Lap_Ls(y,t):\r\n return scipy.sparse.csr_matrix.__mul__(Ls_spar,y)\r\n def Lap_Ls_tran(y,t):\r\n return scipy.sparse.csr_matrix.__mul__(Ls_tran_spar,y)\r\n\r\n #solutions of different operators\r\n solL=scipy.integrate.odeint(Lap,y0,t)\r\n solLs=scipy.integrate.odeint(Lap_Ls,y0,t)\r\n solLs_tran=scipy.integrate.odeint(Lap_Ls_tran,y0,t)\r\n\r\n\r\n #finds eigen values and vectors and puts them into order\r\n def eigen(L):\r\n eigen_values,eigen_vectors=scipy.linalg.eig(-L)\r\n idx = eigen_values.argsort()[::-1]\r\n eigen_values = eigen_values[idx]\r\n eigen_vectors = eigen_vectors[:,idx]\r\n return eigen_values,eigen_vectors\r\n\r\n #finds all eigen values and eigen vectors of the different operators. can use sparse matrics\r\n eigen_values_LS,eigen_vectors_LS=eigen(Ls)\r\n eigen_values_LS_tran,eigen_vectors_LS_tran=eigen(Ls_tran)\r\n eigen_values_L,eigen_vectors_L=eigen(L)\r\n eigen_values_L2,eigen_vectors_L2=eigen(L*0.36)\r\n\r\n ### could have eigs here as didn't end up using all eigenvalues ####\r\n #eigen values graph\r\n n0=len(eigen_values_L)\r\n eig_nums=np.arange(n0)\r\n plt.figure(figsize=(12, 6))\r\n plt.scatter(eig_nums[0:10],eigen_values_L2[0:10],s=50,marker=\"x\" ,label='L , D=0.36')\r\n plt.scatter(eig_nums[0:10],eigen_values_LS[0:10],s=50, marker=\"|\",label='LS , D=1')\r\n plt.scatter(eig_nums[0:10],eigen_values_LS_tran[0:10],s=50,marker='_',label='LS_tran , D=1')\r\n plt.scatter(eig_nums[0:10],eigen_values_L[0:10],s=50,marker=\"+\" ,label='L , D=1')\r\n plt.legend(loc=\"lower left\", fontsize=12,fancybox=True, framealpha=1, shadow=True, borderpad=1)\r\n plt.xlabel('eigen value number')\r\n plt.ylabel('eigenvalue')\r\n plt.title(\"Eigenvlaues of Laplacian Matrixs\")\r\n plt.show()\r\n\r\n print(\"4 biggest eigenvalues for each operater\")\r\n print('L=',eigen_values_L[0:4])\r\n print('Ls=',eigen_values_LS[0:4])\r\n print('Ls_tran=',eigen_values_LS_tran[0:4])\r\n #prints 4 biggest eigen values\r\n #counts node distrubtion by creating dictionary\r\n def result_count(sol,Nt,G):\r\n \"\"\" returns cumlative frequency/probailties for nodes of same degree and returns dictionary\"\"\"\r\n n = G.number_of_nodes()\r\n dict_freq={}\r\n for i in range(n):\r\n k=G.degree(i)\r\n if k not in dict_freq:\r\n dict_freq[k]=sol[Nt,i]\r\n else:\r\n dict_freq[k]+=sol[Nt,i]\r\n return dict_freq\r\n\r\n #frequency count of solutions\r\n dict_freq=result_count(solL,Nt,G)\r\n dict_freq2=result_count(solLs,Nt,G)\r\n dict_freq3=result_count(solLs_tran,Nt,G)\r\n\r\n #random walk data\r\n X=rwgraph(G,j,20000,100)\r\n Listnodes7=[]\r\n for i in range(20000):\r\n Listnodes7.append(G.degree(X[i,100]))\r\n X=rwgraph(G,j,200,100)\r\n Listnodes8=[]\r\n for i in range(200):\r\n Listnodes8.append(G.degree(X[i,100]))\r\n X=rwgraph(G,j,50000,5000)\r\n Listnodes9=[]\r\n for i in range(50000):\r\n Listnodes9.append(G.degree(X[i,5000]))\r\n listfreq7=CountFrequency(Listnodes7)\r\n listfreq8=CountFrequency(Listnodes8)\r\n listfreq9=CountFrequency(Listnodes9)\r\n listfreq_deg=CountFrequency(degree_dist)\r\n z2=[]\r\n z3=[]\r\n z1=[]\r\n z_deg2=[]\r\n z_deg3=[]\r\n z_deg1=[]\r\n for i in listfreq7:\r\n z2.append(listfreq7[i]/(listfreq_deg[i]*20000))\r\n z_deg2.append(i)\r\n for i in listfreq8:\r\n z3.append(listfreq8[i]/(listfreq_deg[i]*200))\r\n z_deg3.append(i)\r\n for i in listfreq8:\r\n z1.append(listfreq9[i]/(listfreq_deg[i]*50000))\r\n z_deg1.append(i)\r\n #operator solutions compared to node degree frequency\r\n z4,z5,z6=[],[],[]\r\n z_deg4,z_deg5,z_deg6=[],[],[]\r\n for i in dict_freq:\r\n z4.append(dict_freq[i]/(listfreq_deg[i]*200))\r\n z_deg4.append(i)\r\n for i in dict_freq2:\r\n z5.append(dict_freq2[i]/(listfreq_deg[i]*200))\r\n z_deg5.append(i)\r\n for i in dict_freq3:\r\n z6.append(dict_freq3[i]/(listfreq_deg[i]*200))\r\n z_deg6.append(i)\r\n\r\n plt.figure(figsize=(15, 10))\r\n plt.scatter(z_deg1, z1,label='Nt=5000, M=50000')\r\n plt.scatter(z_deg2, z2,label='Nt=100, M=20000')\r\n plt.scatter(z_deg3, z3,label='Nt=100, M=200')\r\n plt.scatter(z_deg4, z4,label='L, Nt=100')\r\n plt.scatter(z_deg5, z5,label='Ls, Nt=100')\r\n plt.scatter(z_deg6, z6,label='Ls_tran, Nt=100')\r\n plt.ylim((-0.005,0.020))\r\n plt.xlabel('degree of node')\r\n plt.ylabel('frequency of final position / M*frequency of degree')\r\n plt.legend(loc=\"upper left\", fontsize=12,fancybox=True, framealpha=1, shadow=True, borderpad=1)\r\n plt.title(\"Frequency of final positions relative to number of nodes of that degree, for changing times Nt and M.\")\r\n plt.show()\r\n\r\n #code to produce final graph\r\n iarray1=LinearModel(G,x=j,i0=1,L1='L',D=1,tf=20,Nt=Nt)\r\n iarray2=LinearModel(G,x=j,i0=1,L1='Ls',D=1,tf=20,Nt=Nt)\r\n iarray3=LinearModel(G,x=j,i0=1,L1='Lst',D=1,tf=20,Nt=Nt)\r\n tarray = np.linspace(0,5,Nt+1)\r\n plt.figure(figsize=(12, 6))\r\n plt.plot(tarray, iarray1[:,7] ,label='rand node L,deg=46',color='b',alpha=0.5)\r\n plt.plot(tarray, iarray2[:,7] ,label='rand node Ls,deg=46',marker='|',color='r')\r\n plt.scatter(tarray, iarray3[:,7] ,label='rand node LST,deg=46',marker='_',color='y')\r\n plt.scatter(tarray, iarray1[:,1801] ,label='rand node L, deg=5',color='m',alpha=0.5,marker='+')\r\n plt.plot(tarray, iarray2[:,1801] ,label='rand node Ls,deg=5',marker='|',color='c')\r\n plt.scatter(tarray, iarray3[:,1801] ,label='rand node LST,deg=5',marker='_',color='g')\r\n plt.xlabel('time')\r\n plt.ylabel('representive frequency')\r\n plt.legend()\r\n plt.title(\"Comparing repestive frequency of a random nodes, for the different linear models,time step=50,D=0.1\")\r\n plt.show()\r\n return None #modify as needed\r",
"def swap_tree_test():\r\n par_rates = [0.01,0.02,0.025,0.028,0.036]\r\n maturities = [1,2,3,4,5]\r\n sig,FV,c = 0.1743,100,0.04\r\n fwd_tree,swap_values,CVA,DVA,EpEs,EnEs = swap_tree(par_rates,maturities,sig,FV,c,0.5,\\\r\n 0.4,0.025,0.01)\r\n print(\"Credit and debit adjusted value of swap\",swap_values[-1][0]+DVA-CVA)\r\n print(\"The values of CVA and DVA respectively are \",CVA,DVA)\r\n print(\"Expected positive exposures\",EpEs)\r\n print(\"Expected negative exposures\",EnEs)\r\n r\"\"\"\r\n Now we check manually that the swap value has been computed correctly, independent \r\n of the model used. \r\n \"\"\"\r\n import numpy.linalg as la\r\n import numpy as np\r\n A = np.array([[1.01,0,0,0,0],[0.02,1.02,0,0,0],[0.025,0.025,1.025,0,0],\\\r\n [0.028,0.028,0.028,1.028,0],[0.036,0.036,0.036,0.036,1.036]])\r\n b = np.array([1,1,1,1,1]).T\r\n discounts = np.dot(la.inv(A),b)\r\n b_fix = np.dot(discounts,[4,4,4,4,104])\r\n v_swap = b_fix - 100\r\n print(\"Model independent value of swap:\",v_swap)\r\n print(\"Binomial value of the swap\",swap_values[-1][0])",
"def run_snv_phylogenetics(snv_count_data, allele_cn, clusters, results_prefix):\n snv_log_likelihoods = scgenome.snvphylo.compute_snv_log_likelihoods(\n snv_count_data, allele_cn, clusters)\n\n ml_tree, tree_annotations = scgenome.snvphylo.compute_dollo_ml_tree(\n snv_log_likelihoods)\n\n return ml_tree, tree_annotations",
"def svm():",
"def __init__(self, dataset: List[SEMData], file_name):\n\n tree = Tree(file_name)\n\n self.nodes = dict()\n self.n_nodes = 0\n\n\n self.nodes['N0'] = SEMTreeNode('node')\n self.nodes['N0'].add_dist('N1', 20.8)\n self.nodes['N0'].add_dist('N3', 20.8)\n\n self.nodes['N1'] = SEMTreeNode('node')\n self.nodes['N1'].add_dist('N0', 20.8)\n self.nodes['N1'].add_dist('N2', 33.7)\n self.nodes['N1'].add_dist('NKL', 112.3)\n self.nodes['N1'].add_dist('B19', 112.3)\n\n self.nodes['NKL'] = SEMTreeNode('leaf')\n self.nodes['NKL'].add_dist('N1', 112.3)\n\n self.nodes['B19'] = SEMTreeNode('leaf')\n self.nodes['B19'].add_dist('N1', 112.3)\n\n self.nodes['N2'] = SEMTreeNode('node')\n self.nodes['N2'].add_dist('N1', 33.7)\n self.nodes['N2'].add_dist('CD4', 78.6)\n self.nodes['N2'].add_dist('CD8', 78.6)\n\n self.nodes['CD4'] = SEMTreeNode('leaf')\n self.nodes['CD4'].add_dist('N2', 78.6)\n\n self.nodes['CD8'] = SEMTreeNode('leaf')\n self.nodes['CD8'].add_dist('N2', 78.6)\n\n self.nodes['N3'] = SEMTreeNode('node')\n self.nodes['N3'].add_dist('N0', 20.8)\n self.nodes['N3'].add_dist('MON', 41.8)\n self.nodes['N3'].add_dist('NEU', 112.3)\n\n self.nodes['NEU'] = SEMTreeNode('leaf')\n self.nodes['NEU'].add_dist('N3', 112.3)\n\n self.nodes['MON'] = SEMTreeNode('leaf')\n self.nodes['MON'].add_dist('N3', 41.8)\n self.nodes['MON'].add_dist('DEN', 70.5)\n self.nodes['MON'].add_dist('MRF', 70.5)\n\n self.nodes['MRF'] = SEMTreeNode('leaf')\n self.nodes['MRF'].add_dist('MON', 70.5)\n\n self.nodes['DEN'] = SEMTreeNode('leaf')\n self.nodes['DEN'].add_dist('MON', 70.5)\n\n\n # self.get_nodes(tree)\n print(tree)\n\n # # Compare names of datasets and leaves of the tree\n # data_names = [data.name for data in dataset]\n # print(data_names)\n # for name in data_names:\n # if name not in self.nodes.keys():\n # raise ValueError('Dataset and Tree do not match')",
"def nll(self, data):\n nll = np.zeros(data.shape[0])\n self.a[0] = self.c\n for i in range(self.Ndim):\n a = self.rhos[i] * self.a[i]\n h = 0.5 * (a + np.abs(a)) # ReLU\n za = np.dot(self.Vs['alpha'][i].T, h) + self.bs['alpha'][i]\n zm = np.dot(self.Vs['mu'][i].T, h) + self.bs['mu'][i]\n zs = np.dot(self.Vs['sigma'][i].T, h) + self.bs['sigma'][i]\n self.alphas[i] = softmax(za)\n self.mus[i] = zm\n self.sigmas[i] = np.exp(zs)\n self.vars = self.sigmas ** 2.\n nll += nll_MOG_1D(data[:, i], self.alphas[i], self.mus[i],\n self.vars[i])[0]\n return nll"
] |
[
"0.59042037",
"0.58983594",
"0.5885023",
"0.5865591",
"0.58178765",
"0.5806134",
"0.5788585",
"0.578253",
"0.5766343",
"0.5740447",
"0.5739229",
"0.5704725",
"0.56913364",
"0.5689851",
"0.56591946",
"0.5614322",
"0.56094",
"0.55793583",
"0.5578998",
"0.5564107",
"0.556346",
"0.55523086",
"0.55430794",
"0.5542845",
"0.55295753",
"0.5528538",
"0.55206585",
"0.5514311",
"0.5507109",
"0.5484337"
] |
0.6712835
|
0
|
Plot a tree with branchlengths as snv origin counts
|
def plot_dollo_ml_tree(tree, nodes):
leaf_order = []
for leaf in tree.leaves:
leaf.plot_id = leaf.name
leaf_order.append(leaf.name)
origin_counts = nodes.groupby('node')['ml_origin'].sum()
for node in tree.nodes:
node.origin_count = origin_counts[node.label]
loss_counts = nodes.groupby('node')['ml_loss'].sum()
width = 1 + 0.5 * float(len(list(tree.leaves)))
fig = plt.figure(figsize=(width/1.5, 6))
ax = fig.add_subplot(111)
def func(x, pos):
s = '{:0,d}'.format(int(x))
return s
ax.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(func))
wgs_analysis.plots.trees.plot_tree(ax, tree, landscape=False, flip=True, branch_length_attr='origin_count', leaf_name_attr='plot_id')
ax.set_ylabel('SNV count')
plt.tight_layout()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def plot_tree(tree, min_x, max_x, min_y, max_y, prev_node, branch, depth=0):\r\n \r\n cur_node = tree.location # current tree's node\r\n left_branch = tree.left_child # its left branch\r\n right_branch = tree.right_child # its right branch\r\n \r\n # set line's width depending on tree's depth\r\n if depth > len(line_width)-1:\r\n ln_width = line_width[len(line_width)-1]\r\n else:\r\n ln_width = line_width[depth]\r\n \r\n k = len(cur_node.position) - 1 # k = 2\r\n axis = depth % k\r\n \r\n # draw a vertical splitting line\r\n if axis == 0:\r\n \r\n if branch is not None and prev_node is not None:\r\n \r\n if branch:\r\n max_y = prev_node[1]\r\n else:\r\n min_y = prev_node[1]\r\n \r\n plt.plot([cur_node.position[0],cur_node.position[0]], [min_y,max_y], linestyle='-', color='red', linewidth=ln_width)\r\n \r\n # draw a horizontal splitting line\r\n elif axis == 1:\r\n \r\n if branch is not None and prev_node is not None:\r\n \r\n if branch:\r\n max_x = prev_node[0]\r\n else:\r\n min_x = prev_node[0]\r\n \r\n plt.plot([min_x,max_x], [cur_node.position[1],cur_node.position[1]], linestyle='-', color='blue', linewidth=ln_width)\r\n \r\n # draw the current node\r\n plt.plot(cur_node.position[0], cur_node.position[1], 'ko')\r\n \r\n # draw left and right branches of the current node\r\n if left_branch is not None:\r\n plot_tree(left_branch, min_x, max_x, min_y, max_y, cur_node.position, True, depth+1)\r\n \r\n if right_branch is not None:\r\n plot_tree(right_branch, min_x, max_x, min_y, max_y, cur_node.position, False, depth+1)",
"def plotTree(self):\n t = self.make(self.tree)\n t.draw()",
"def _decorate_tree(t, series):\n for i, n in enumerate(t.postorder()):\n n.size = 30\n if n.is_root():\n n.size = 50\n elif n.name == n.parent.children[0].name:\n n.color = '#00FF00' # left child is green\n else:\n n.color = '#FF0000' # right child is red\n if not n.is_tip():\n t.length = series.loc[n.name]\n return t",
"def draw_tree(self):\n nx.draw(self.diffusion_tree, with_labels=True)",
"def draw_states(self):\n drawing = self.tree.draw(\n width=400,\n height=300,\n layout='d',\n node_labels=(\"idx\", 1, 1),\n node_sizes=15,\n node_style={\"stroke\": \"black\", \"stroke-width\": 2},\n node_colors=[\n toytree.colors[int(round(i[1]))] if isinstance(i, (list, np.ndarray))\n else \"white\" \n for i in self.tree.get_node_values(\"likelihood\", True, True)\n ],\n )\n return drawing",
"def plot_entity_branches(G, w=10, h=10, c=1, font_size=14, filename=None):\n start = list(G.nodes)[0]\n G = nx.bfs_tree(G, start)\n plt.figure(figsize=(w, h))\n pos = hierarchy_pos(G, start, width=float(2 * c) * math.pi, xcenter=0)\n new_pos = {\n u: (r * math.sin(theta), r * math.cos(theta)) for u, (theta, r) in pos.items()\n }\n nx.draw(\n G,\n pos=new_pos,\n alpha=0.8,\n node_size=25,\n with_labels=True,\n font_size=font_size,\n edge_color=\"gray\",\n )\n nx.draw_networkx_nodes(\n G, pos=new_pos, nodelist=[start], node_color=\"blue\", node_size=500\n )\n\n if filename:\n plt.savefig(\"{0}/{1}\".format(\"images\", filename))",
"def drawtree(self):\r\n\r\n Phylo.draw(self.tree)",
"def tree():\n nobv.visual_tree()",
"def visualize_tree(root):\n _visualize_tree(root, [], 0, '-')",
"def plotLines( self ):\n \n ## plot tree in dfs manner\n def plotLines( node_id ):\n\n node = self.mTree.node( node_id )\n\n left = self.mNodeWidthsStart[node_id]\n right = self.mNodeWidthsEnd[node_id]\n height = self.mNodeHeights[node_id] \n\n if right != left and node_id != self.mTree.root:\n self.addElements( self.mDecoratorHorizontalBranches.getElements(\n node_id,\n self.getHeaderWidth() + left,\n self.getHeaderWidth() + right,\n self.getHeaderHeight() + height ))\n \n\n for s in node.succ:\n\n new_height = self.mNodeHeights[s]\n self.addElements( self.mDecoratorVerticalBranches.getElements(\n node_id,\n self.getHeaderWidth() + right,\n self.getHeaderHeight() + height,\n self.getHeaderHeight() + new_height ))\n \n TreeTools.TreeDFS( self.mTree, self.mTree.root,\n pre_function = plotLines )",
"def draw_tree(t, df, size=10, ratio=0.6, precision=0):\n s=export_graphviz(t, out_file=None, feature_names=df.columns, filled=True,\n special_characters=True, rotate=True, precision=precision)\n IPython.display.display(graphviz.Source(re.sub('Tree {',\n f'Tree {{ size={size}; ratio={ratio}', s)))",
"def draw_tree(t, df, size=10, ratio=0.6, precision=0):\n s=export_graphviz(t, out_file=None, feature_names=df.columns, filled=True,\n special_characters=True, rotate=True, precision=precision)\n IPython.display.display(graphviz.Source(re.sub('Tree {',\n f'Tree {{ size={size}; ratio={ratio}', s)))",
"def draw_tree(self, agent, color='b'):\n for edge in self.all_edges[agent]:\n parent, child = edge\n for cords in self.xy_cords:\n plt.plot([parent.state[cords[0]], child.state[cords[0]]],\n [parent.state[cords[1]], child.state[cords[1]]], c=color)\n plt.xlim(self.Xi[0])\n plt.ylim(self.Xi[1])\n plt.show()",
"def plot_commits(graph):\n nx.draw_kamada_kawai(graph, alpha=0.5, node_color='blue', node_size = 2)\n figure = plt.gcf() # get current figure\n figure.set_size_inches(12, 8)",
"def plot_depth(self, bins=100):\n sns.set(font_scale=1.5)\n sns.set_style('whitegrid')\n g = sns.distplot(self.variant_df['depth'].astype(float), bins=bins)\n g.set_title('Read Depth Distribution for {}'.format(self.final_id))",
"def tree(branchLen):\n if branchLen > 5:\n t.backward(branchLen)\n t.right(20)\n tree(branchLen-16,t)\n t.left(40)\n tree(branchLen-16,t)\n t.right(20)\n t.forward(branchLen)",
"def Rec_Draw_Tree( self, cur_node, xs, ys ):\r\n yhold = [] #holds the y values of the children\r\n ypos1 = 0 #the yvalue of the current node\r\n ypos = 0\r\n new_xstart = cur_node.data.length * cb.xtick + xs\r\n #for each child of the current node\r\n for i in range( len( cur_node.sub ) ):\r\n #current node is to be drawn before the (cb.order)-th child\r\n if ( i == cb.order ):\r\n ypos1 = self.Draw_Node( cur_node, xs, ys )\r\n if( cb.order == 1 ):\r\n ypos1 = ys\r\n ys = ypos1 + cb.ytick\r\n if( len( cur_node.sub[i].sub ) == 0 ):#Draw a leaf\r\n ypos = self.Draw_Node( cur_node.sub[i], new_xstart, ys )\r\n yhold.append( int(ypos) )\r\n else: #Draw an internal node\r\n ys, ypos = self.Rec_Draw_Tree( cur_node.sub[i], new_xstart, ys )\r\n yhold.append( ypos )\r\n if( i < len( cur_node.sub ) - 1 ):\r\n ys = ys + cb.ytick\r\n if ( cb.order != 1 and cb.order == len( cur_node.sub ) ):\r\n ypos1 = self.Draw_Node( cur_node, xs, ys )\r\n elif( cb.order == 1 and cb.order == len( cur_node.sub) ):\r\n ypos1 = self.Draw_Node( cur_node, xs , ys+cb.ytick )\r\n ypos1 = ypos1 - cb.ytick\r\n\r\n #draw the vertical lines to the children\r\n for item in yhold:\r\n self.canvas_one.create_line( new_xstart, item, new_xstart, ypos1, width = 3, fill=self.branch_color )\r\n #return the farthest vertical position drawn and the position of the line of the current segment\r\n return ys, ypos1",
"def savage(t):\r\n a=t.GetListOfBranches()\r\n n=0\r\n for i in range(len(a)):\r\n if a[i]: n= n+1\r\n else: break\r\n size0=sqrt(n)\r\n size1=int(size0)\r\n if size0==size1: size=size1\r\n else: size=size1+1\r\n print size,n\r\n c=TCanvas()\r\n c.Divide(size,size)\r\n for i in range(n):\r\n c.cd(i+1)\r\n t.Draw(a[i].GetName())\r\n return c",
"def show_tree(self):\n G, vertex_dict = self.tree().graph()\n root = self.tree().root()\n vertical_list = []\n horizontal_list = []\n no_component_list = []\n for i, xi in vertex_dict.items():\n if xi.is_equal(root):\n root_index = i\n if self.is_component(xi):\n if xi.type() == \"II\":\n vertical_list.append(i)\n else:\n horizontal_list.append(i)\n print(i, \": \", xi)\n else:\n no_component_list.append(i)\n vertex_colors = {'red': vertical_list, 'blue': horizontal_list,\n 'grey': no_component_list}\n G.show(vertex_colors=vertex_colors, tree_root=root_index, layout='tree')",
"def newick(dataset):\r\n\r\n #Assign branch len 1 to all branches\r\n #Use Phylo distance function from Bio.Phylo.BaseTree module\r\n\r\n for i, line in dataset:\r\n x, y = line.split()\r\n tree = Phylo.read(io.StringIO(i), 'newick')\r\n clades = tree.find_clades()\r\n\r\n for clade in clades:\r\n clade.branch_length = 1\r\n\r\n sys.stdout.write('%s' % tree.distance(x, y) + ' ')\r\n\r\n sys.stdout.write('\\n')",
"def plot_mcc_tree():\n t = ete2.Tree(\"mcct.nex\")\n ts = ete2.treeview.TreeStyle()\n ts.show_scale = False\n ts.show_leaf_name = False\n ts.show_branch_support = False\n ts.scale = 500\n margin = 10\n ts.margin_top = margin\n ts.margin_bottom = margin\n ts.margin_left = margin\n ts.margin_right = margin\n\n germ_style = ete2.NodeStyle()\n germ_style[\"bgcolor\"] = \"LightSteelBlue\"\n proto_germ = t.get_common_ancestor(\"Danish\", \"Norwegian\",\"Icelandic\",\"Swedish\", \"Dutch\", \"German\", \"English\")\n proto_germ.set_style(germ_style)\n\n bs_style = ete2.NodeStyle()\n bs_style[\"bgcolor\"] = \"Moccasin\"\n proto_bs = t.get_common_ancestor(\"Bulgarian\", \"Czech\",\"Polish\",\"Russian\")\n proto_bs.set_style(bs_style)\n\n ital_style = ete2.NodeStyle()\n ital_style[\"bgcolor\"] = \"DarkSeaGreen\"\n proto_ital = t.get_common_ancestor(\"French\", \"Romanian\", \"Italian\", \"Portuguese\", \"Spanish\")\n proto_ital.set_style(ital_style)\n\n t.render(\"mcct.eps\", style_func, tree_style=ts, dpi=600, units=\"px\", w=2250)",
"def nbr_nodes(tree_depth):\n return 2**(tree_depth+1)-1",
"def tree(xt, yt, t):\n green = (1, 50, 32) # leaf color\n\n rect(screen, (150, 75, 0), (xt, yt, 15 * t, 60 * t), 0) # tree's trunk\n circle(screen, green, (xt + 15 * t / 2, yt - 30 * t), 30 * t) # leaves\n circle(screen, green, (xt + 15 * t / 2 + 30 * t, yt - 30 * t + 15 * t), 30 * t) # leaves\n circle(screen, green, (xt + 15 * t / 2 - 30 * t, yt - 30 * t + 15 * t), 30 * t) # leaves\n circle(screen, green, (xt + 15 * t / 2 + 30 * t, yt - 30 * t - 20 * t), 30 * t) # leaves\n circle(screen, green, (xt + 15 * t / 2 - 30 * t, yt - 30 * t - 20 * t), 30 * t) # leaves\n circle(screen, green, (xt + 15 * t / 2, yt - 30 * t - 50 * t), 30 * t) # leaves",
"def count_trees(matrix, dx, dy):\n\n # We begin in the upper left corner\n x = 0\n y = 0\n count = 0\n\n # We continue until y > [height of matrix]\n while(y < len(matrix)):\n if matrix[y][x] == '#':\n count += 1\n\n # X is special since it needs to be wrapped around\n x = (x + dx) % len(matrix[0])\n y += dy\n\n return count",
"def display_viz(self, width=60, label_max_len=3):\n output = ''\n last_children = [(self, width)] # Nodes to be added next loop\n for i in range(self.depth + 1):\n depth_output = ''\n depth_children = []\n for (node, subtree_width) in last_children:\n label = ' ' if node is None else str(node.label)[:label_max_len]\n this_output = label.center(subtree_width)\n this_children = [] # Children from this item\n cum_width = 0 # Cumulative character-width of all subtrees\n cum_cols = 0 # Cumulative maximum node-width of all subtrees\n # If no children, propogate the empty spaces below terminal\n if not node or not node.children:\n cum_cols += 1\n cum_width += subtree_width\n this_children.append((None, subtree_width))\n # If children, fill-in this_output with '_' to first/last child label\n else:\n children_cols = [c.n_cols for c in node.children]\n total_cols = sum(children_cols)\n for child, child_cols in zip(node.children, children_cols):\n # Convert each child's 'cols' into character spacing\n cum_cols += child_cols\n cum_ratio = cum_cols / total_cols\n target_width = math.ceil(cum_ratio * subtree_width) - cum_width\n remaining_width = subtree_width - cum_width\n child_width = min(target_width, remaining_width)\n # Add record and update tracked values\n this_children.append((child, child_width))\n cum_width += child_width\n # Add lines to the output\n start_padding = this_children[0][1] // 2 - 1 # Midpoint of first child\n end_padding = subtree_width - (this_children[-1][1] // 2) # ..of last child\n with_line = ''\n for i, v in enumerate(this_output):\n with_line += '_' if (i > start_padding and i < end_padding and v == ' ') else v\n this_output = with_line\n depth_output += this_output\n depth_children += this_children\n last_children = depth_children\n if last_children:\n depth_output += '\\n'\n output += depth_output\n return output",
"def visualise_binary_tree(self):\n tree_elements = [i for i in self.breadthfirst()] # saves the nodes of the tree in an array after the breadthfirst transversal is executed\n height = self.height(self.root())\n n = sum([2 ** i for i in range(0, height + 1)]) # total number of possible nodes of a tree\n array_tree = n * [\" \"] # array-based representation of a binary tree implemented by using level-numbering of positions(chapter 8.3.2 of Goodrich book)\n array_tree[0] = tree_elements[0] # assigning the root\n for i in range(0, len(tree_elements)):\n index1 = i\n if tree_elements[i] in array_tree:\n index1 = array_tree.index(tree_elements[i])\n for j in range(i, len(tree_elements)):\n if tree_elements[j] == self.left(tree_elements[i]):\n array_tree[2 * index1 + 1] = tree_elements[j]\n if tree_elements[j] == self.right(tree_elements[i]):\n array_tree[2 * index1 + 2] = tree_elements[j]\n break\n for i in range(0, len(array_tree)):\n if array_tree[i] != \" \": # the empty nodes are represented by \" \"\n array_tree[i] = array_tree[i].element() # changing the array from nodes to elements of the nodes\n height1 = height\n spaces = 2 ** (height + 1) - 2 # initialises the number of spaces that have to be added when displaying the nodes\n height -= 1\n pos = 0 # index of the node that is displayed\n print(spaces * \" \" + array_tree[pos])\n for i in range(0, height1 + 1): #iterates through all the levels of the binary tree\n spaces = 2 ** (height + 1) - 2\n level = spaces * \" \" # initialises each level of the binary tree with the appropiate number of spaces\n height += 1\n spaces = 2 ** (height + 1) - 1\n if 2 * pos + 3 > len(array_tree): # exit the loop if the tree was traversed\n break\n for j in range(0, 2 ** i):\n level += array_tree[2 * pos + 1] + \" \" * spaces + array_tree[2 * pos + 2] + \" \" * spaces # adds the nodes from that level\n pos += 1\n height -= 2\n print(level)",
"def correct_branch_lengths(tree_file, format, d = \"\"):\n tree = dendropy.Tree.get_from_path(tree_file, format)\n depth = tree.seed_node.distance_from_tip()\n mean_branch_length = tree.length()/(2 * len(tree.leaf_nodes()) - 3)\n string_len = len(str(int(mean_branch_length + 0.5)))\n if string_len > 1:\n correction_factor = 10 ** string_len\n else:\n correction_factor = 1\n for edge in tree.preorder_edge_iter():\n if edge.length:\n edge.length /= correction_factor\n pth = os.path.join(d, 'Tree_{0}_{1}.newick'.format(correction_factor, depth))\n tree.write_to_path(pth, 'newick')\n return depth, correction_factor, pth",
"def initializePlot( self ):\n\n self.mNTaxa = len(self.mTree.get_taxa())\n self.mNNodes = max( self.mTree.chain.keys() ) + 1\n\n self.calculateCoordinates()\n \n self.calculateCanvasSize( )",
"def print_tree(self):\n stack = [(self.root, 0, 0)] # (node, child no., tabs)\n ntabs = 0\n while len(stack):\n n, i, tabs = stack.pop()\n if len(n.branch):\n if i>=1 and i==len(n.children)-1:\n print(tabs*'\\t' + 'axis-' + str(n.axis) + ': >' + str(n.branch[i-1]))\n else:\n print(tabs*'\\t' + 'axis-' + str(n.axis) + ': <=' + str(n.branch[i]))\n stack.append((n, i+1, tabs))\n if i<len(n.children):\n stack.append((n.children[i], 0, tabs+1))\n else:\n avg = np.dot(n.probabilities[:,0], n.probabilities[:,1])\n print(tabs*'\\t' + 'Label: ' + str(avg) + '\\n')",
"def binary_tree(length, depth):\n if depth == 0:\n return # base case\n posx = turtle.xcor()\n posy = turtle.ycor()\n left(length, depth)\n turtle.up()\n turtle.goto(posx, posy)\n #turtle.dot()\n turtle.down()\n right(length, depth)"
] |
[
"0.6562327",
"0.6287754",
"0.6251616",
"0.6110724",
"0.60776746",
"0.6010152",
"0.59485954",
"0.5894037",
"0.58730257",
"0.5862622",
"0.581848",
"0.581848",
"0.5794135",
"0.57915914",
"0.5767268",
"0.57500875",
"0.57399803",
"0.5731056",
"0.56966746",
"0.56453246",
"0.5613726",
"0.55489206",
"0.5545914",
"0.55422384",
"0.55386287",
"0.5534032",
"0.5485737",
"0.5474236",
"0.5465179",
"0.54330486"
] |
0.6549905
|
1
|
Run the SNV phylogenetic analysis.
|
def run_snv_phylogenetics(snv_count_data, allele_cn, clusters, results_prefix):
snv_log_likelihoods = scgenome.snvphylo.compute_snv_log_likelihoods(
snv_count_data, allele_cn, clusters)
ml_tree, tree_annotations = scgenome.snvphylo.compute_dollo_ml_tree(
snv_log_likelihoods)
return ml_tree, tree_annotations
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def run(self, verbose=0):\n self.verbose = verbose\n self._preproc()\n self._lda()\n self._evaluate()",
"def main():\n args = parameter_parser()\n tab_printer(args)\n trainer = GPNTrainer(args)\n # trainer.fit()\n \"\"\"\n Scoring on the prediction and learning ability.\n \"\"\"\n trainer.score()\n \"\"\"\n Scoring on the subgraph test set.\n \"\"\"\n # trainer.score2()\n \"\"\"\n Scoring on the generalization ability.\n \"\"\"\n # trainer.score3()\n \"\"\"\n Finetuning for downstream tasks.\n \"\"\"\n # model = finetune_GPN(args, trainer.number_of_labels)\n # model.finetune()",
"def main():\n data = pd.read_csv('./house-votes-84.data', header = None)\n\n class_names = [\"republican\", \"democrat\"]\n\n print(\"\\n-- Train and Test with Winnow --\\n\")\n train_and_test_with_winnow(data, class_names)\n\n print(\"\\n-- Train and Test with Naive Bayes --\\n\")\n train_and_test_with_naive_bayes(data, class_names)",
"def test_ngram():\n #Some examples of functions usage\n trigram_counts, bigram_counts, unigram_counts, token_count = train_ngrams(S_train)\n print \"#trigrams: \" + str(len(trigram_counts))\n print \"#bigrams: \" + str(len(bigram_counts))\n print \"#unigrams: \" + str(len(unigram_counts))\n print \"#tokens: \" + str(token_count)\n perplexity = evaluate_ngrams(S_dev, trigram_counts, bigram_counts, unigram_counts, token_count, 0.5, 0.4)\n print \"#perplexity: \" + str(perplexity)\n ### YOUR CODE HERE\n ### END YOUR CODE",
"def main():\n # Goal is to model the OSSOS resonance detections given a file with parameters for those resonances.\n # e.g. from Crompvoets et al. (2021)\n\n # now run a survey simulation.\n params = sys.argv[1]\n H_max = float(sys.argv[2])\n outfile=f\"{os.path.splitext(params)[0]}_Model.dat\"\n print(f\"Saving results to {outfile}\")\n if not os.access(outfile, os.R_OK):\n run(outfile, params, 123456789, H_max=H_max)\n\n # confirm this looks like the OSSOS detections using rose plot.\n face_down_plot(outfile)",
"def main(args):\n gt_path = args.ground_truth\n djdd_path = args.djdd\n bjdd_path = args.bjdd\n\n mse_fn = th.nn.MSELoss()\n psnr_fn = PSNR()\n\n device = \"cpu\"\n # if th.cuda.is_available():\n # device = \"cuda\"\n\n pdf = pd.DataFrame(columns=[\"filename\",\"imgid\", \"PSNR_for_DJDD\", \"MSE_for_DJDD\", \"PSNR_for_BJDD\", \"MSE_for_BJDD\"])\n\n count = 0\n msedjdd = 0.0\n psnrdjdd = 0.0\n\n msebjdd = 0.0\n psnrbjdd = 0.0\n\n for root, _, files in os.walk(gt_path):\n for idx, name in enumerate(files):\n \n # djdd image\n output_djdd = np.array(imread(os.path.join(djdd_path, name+\"_0_output.png\"))).astype(np.float32) / (2**8-1)\n output_djdd = th.from_numpy(np.transpose(output_djdd, [2,0,1])).to(device).unsqueeze(0)\n\n #bjdd image\n output_bjdd = np.array(imread(os.path.join(bjdd_path, name.split('.')[0]+\"_sigma_0_bayer_PIPNet.png\"))).astype(np.float32) / (2**8-1)\n output_bjdd = th.from_numpy(np.transpose(output_bjdd, [2,0,1])).to(device).unsqueeze(0)\n\n # gt image\n target = np.array(imread(os.path.join(root, name))).astype(np.float32) / (2**8-1)\n target = th.from_numpy(np.transpose(target, [2, 0, 1])).to(device).unsqueeze(0)\n\n\n target_djdd = crop_like(target, output_djdd)\n target_bjdd = crop_like(target, output_bjdd)\n\n psnr_djdd = psnr_fn(output_djdd, target_djdd).item()\n mse_djdd = mse_fn(output_djdd, target_djdd).item()\n\n psnr_bjdd = psnr_fn(output_bjdd, target_bjdd).item()\n mse_bjdd = mse_fn(output_bjdd, target_bjdd).item()\n\n psnrdjdd += psnr_djdd\n msedjdd += mse_djdd\n psnrbjdd += psnr_bjdd\n msebjdd += mse_bjdd\n\n count += 1\n\n LOG.info(f\"imgid: {idx}, PSNR_BJDD: {psnr_bjdd}, MSE_BJDD: {mse_bjdd}, PSNR_DJDD: {psnr_djdd}, MSE_DJDD: {mse_djdd}\")\n pdf = pdf.append({\n \"filename\": name,\n \"imgid\": idx,\n \"PSNR_for_DJDD\": psnr_djdd,\n \"MSE_for_DJDD\": mse_djdd,\n \"PSNR_for_BJDD\": psnr_bjdd,\n \"MSE_for_BJDD\": mse_bjdd\n }, ignore_index=True)\n # pdb.set_trace()\n\n msebjdd /= count\n psnrbjdd /= count\n\n msedjdd /= count\n psnrdjdd /= count\n\n LOG.info(\"--------------BJDD---------------------\")\n LOG.info(\"Average, PSNR = %.1f dB, MSE = %.5f\", psnrbjdd, msebjdd)\n\n LOG.info(\"--------------DJDD---------------------\")\n LOG.info(\"Average, PSNR = %.1f dB, MSE = %.5f\", psnrdjdd, msedjdd)\n pdb.set_trace()\n pdf.to_csv(\"/workspace/presentation_compare.csv\")",
"def main():\n run_nutanix_vm_creation_module()",
"def run(pars, #parameter files\n #directory of scenario files\n scen_dir = r'C:\\LS\\03_TOOLS\\_git\\COVID_01\\scenarios',\n \n #map to scenario files\n scen_d = {\n 'NoNPI':'NPI_Scenario1_None.R',\n 'BI1918':'NPI_Scenario2_Bootsma_1918Influenza.R',\n 'SouthKorea':'NPI_Scenario3_SouthKorea.R',\n 'Reduced':'NPI_Scenario4_ReducedGamma.R', \n }\n ):\n \n \n \n #===========================================================================\n # precheck \n #===========================================================================\n assert len(pars)==4, 'unexpected inputs count'\n print('pars: \\n%s'%pars)\n \n #check the R Environment variables\n assert 'R_USER' in os.environ\n assert 'R_HOME' in os.environ\n \n #print('R_USER=%s \\nR_HOME=%s'%(os.getenv('R_USER'), os.getenv('R_HOME')))\n\n \n \n \n \n #===========================================================================\n # setup\n #===========================================================================\n s = setup.Setup(setup_name = 'mid_utah_'+pars[2],\n spatial_setup = WestCoastSpatialSetup(),\n nsim = int(pars[1]),\n ti = datetime.date(2020, 3, 6),\n tf = datetime.date(2020, 10, 1),\n interactive = False,\n write_csv = True,\n dt = 1/4)\n \n #===========================================================================\n # set the scenario parmaters\n #===========================================================================\n\n \n \n assert pars[2] in scen_d, 'unrecognized scenario: %s'%pars[2]\n \n rfp = os.path.join(scen_dir, scen_d[pars[2]])\n assert os.path.exists(rfp)\n \n s.script_npi = rfp\n \n print('set script_npi=%s'%s.script_npi)\n\n #===========================================================================\n # execute\n #===========================================================================\n\n print()\n print()\n print(f\">>> Starting {s.nsim} model runs on {pars[3]} processes\")\n print(f\">>> Setup *** {s.setup_name} *** from {s.ti} to {s.tf} !\")\n print(f\">>> writing to folder : {s.datadir}{s.setup_name}\")\n print()\n print()\n \n tic = time.time()\n \n res_l = seir.run_parallel(s, int(pars[3]))\n print(f\">>> Runs done in {time.time()-tic} seconds...\")",
"def main(verbose=True):\n if verbose: \n print(\"\\n---------------\")\n printCommonSNPCounts()\n print(\"---------------\")\n \n print(\"Charles River\")\n print(\"---------------\") \n getCommonSNPIndices(\"C\", save=True)\n print(\"---------------\")\n \n print(\"Harlan River\")\n getCommonSNPIndices(\"H\", save=True)\n print(\"---------------\")\n else:\n getCommonSNPIndices(\"C\", save=True)\n getCommonSNPIndices(\"H\", save=True)",
"def setUp(self):\n \n chrom = \"1\"\n pos = \"15000000\"\n snp_id = \"CM00001\"\n ref = \"A\"\n alt = \"G\"\n filt = \"PASS\"\n \n # set up a SNV object, since SNV inherits VcfInfo\n self.var = SNV(chrom, pos, snp_id, ref, alt, filt)\n self.var.debug_chrom = \"1\"\n self.var.debug_pos = \"15000000\"\n \n self.default_info = \"HGNC=ATRX;CQ=missense_variant;random_tag\"\n \n \n # here are the default filtering criteria, as loaded into python\n known_genes = {\"ATRX\": {\"inheritance\": {\"Hemizygous\": \\\n {\"Loss of function\"}}, \"start\": \"10000000\", \"chrom\": \"1\", \\\n \"confirmed_status\": {\"Confirmed DD Gene\"}, \"end\": \"20000000\"}}\n \n SNV.known_genes = known_genes\n \n self.var.add_info(self.default_info)",
"def main(self):\n\n self.nodelist = []\n\n self.probname = self.probpath.split('/')[-1].rstrip('.mps.lp.gz')\n\n model = Model(\"TreeD\")\n eventhdlr = LPstatEventhdlr()\n eventhdlr.nodelist = self.nodelist\n model.includeEventhdlr(eventhdlr, \"LPstat\", \"generate LP statistics after every LP event\")\n model.readProblem(self.probpath)\n model.setIntParam('presolving/maxrestarts', 0)\n\n for setting in self.scip_settings:\n model.setParam(setting[0], setting[1])\n\n model.optimize()\n\n self.scipversion = 'SCIP '+str(model.version())\n # self.scipversion = self.scipversion[:-1]+'.'+self.scipversion[-1]\n\n if model.getStatus() == 'optimal':\n self.optval = model.getObjVal()\n else:\n self.optval = None\n\n\n # print(\"performing Spatial Analysis on similarity of LP condition numbers\")\n # self.performSpatialAnalysis()\n\n columns = self.nodelist[0].keys()\n self.df = pd.DataFrame(self.nodelist, columns = columns)\n\n # merge solutions from cutting rounds into one node\n if not self.showcuts:\n self.df = self.df[self.df['first'] == False].drop_duplicates(subset='age', keep='last').reset_index()",
"def test_ngram():\n # Some examples of functions usage\n trigram_counts, bigram_counts, unigram_counts, token_count = train_ngrams(S_train)\n print \"#trigrams: \" + str(len(trigram_counts))\n print \"#bigrams: \" + str(len(bigram_counts))\n print \"#unigrams: \" + str(len(unigram_counts))\n print \"#tokens: \" + str(token_count)\n perplexity = evaluate_ngrams(S_dev, trigram_counts, bigram_counts, unigram_counts, token_count, 0.5, 0.4)\n print \"#perplexity: \" + str(perplexity)\n ### YOUR CODE HERE\n print(vocabsize)\n ### END YOUR CODE",
"def main():\n num_rows = 500000\n review_df = pd.read_csv(\"s3://msia490project/processed_video_reviews.csv\").dropna().head(num_rows)\n # train and test set split\n X_train, X_test, y_train, y_test = train_test_split(review_df['reviewText'], review_df['score'],\n random_state=115)\n # re-run the model pipeline and generate necessary artifacts for making predictions\n best_svm = LinearSVC(random_state=115)\n ngram_range = (1, 3)\n generate_artifacts_for_best_svm_model(best_svm, ngram_range, X_train, y_train)",
"def main():\r\n args = Parameters().parse()\r\n # #\r\n # args.method = 'student_res18_pre'\r\n args.method = 'student_esp_d'\r\n args.dataset = 'camvid_light'\r\n args.data_list = \"/ssd/yifan/SegNet/CamVid/test.txt\"\r\n args.data_dir = \"/ssd/yifan/\"\r\n args.num_classes = 11\r\n # args.method='psp_dsn_floor'\r\n args.restore_from = \"./checkpoint/Camvid/ESP/base_57.8.pth\"\r\n # args.restore_from=\"/teamscratch/msravcshare/v-yifan/ESPNet/train/0.4results_enc_01_enc_2_8/model_298.pth\"\r\n # args.restore_from = \"/teamscratch/msravcshare/v-yifacd n/sd_pytorch0.5/checkpoint/snapshots_psp_dsn_floor_1e-2_40000_TEACHER864/CS_scenes_40000.pth\"\r\n # args.restore_from = \"/teamscratch/msravcshare/v-yifan/sd_pytorch0.5/checkpoint/snapshots_psp_dsn_floor_1e-2_40000_TEACHER5121024_esp/CS_scenes_40000.pth\"\r\n # args.data_list = '/teamscratch/msravcshare/v-yifan/deeplab_v3/dataset/list/cityscapes/train.lst'\r\n args.batch_size = 1\r\n print(\"Input arguments:\")\r\n for key, val in vars(args).items():\r\n print(\"{:16} {}\".format(key, val))\r\n\r\n h, w = map(int, args.input_size.split(','))\r\n input_size = (h, w)\r\n\r\n print(args)\r\n output_path = args.output_path\r\n if not os.path.exists(output_path):\r\n os.makedirs(output_path)\r\n # args.method='psp_dsn'\r\n deeplab = get_segmentation_model(args.method, num_classes=args.num_classes)\r\n\r\n ignore_label = 255\r\n id_to_trainid = {-1: ignore_label, 0: ignore_label, 1: ignore_label, 2: ignore_label,\r\n 3: ignore_label, 4: ignore_label, 5: ignore_label, 6: ignore_label,\r\n 7: 0, 8: 1, 9: ignore_label, 10: ignore_label, 11: 2, 12: 3, 13: 4,\r\n 14: ignore_label, 15: ignore_label, 16: ignore_label, 17: 5,\r\n 18: ignore_label, 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 26: 13, 27: 14,\r\n 28: 15, 29: ignore_label, 30: ignore_label, 31: 16, 32: 17, 33: 18}\r\n\r\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu\r\n # args.restore_from=\"/teamscratch/msravcshare/v-yifan/sd_pytorch0.3/checkpoint/snapshots_resnet_psp_dsn_1e-4_5e-4_8_20000_DSN_0.4_769light/CS_scenes_20000.pth\"\r\n # if 'dense' in args.method:\r\n #\r\n if args.restore_from is not None:\r\n saved_state_dict = torch.load(args.restore_from)\r\n c_keys = saved_state_dict.keys()\r\n for i in c_keys:\r\n flag = i.split('.')[0]\r\n if 'module' in flag:\r\n deeplab = nn.DataParallel(deeplab)\r\n deeplab.load_state_dict(saved_state_dict)\r\n if 'module' not in flag:\r\n deeplab = nn.DataParallel(deeplab)\r\n # if 'dense' not in args.method:\r\n # deeplab = nn.DataParallel(deeplab)\r\n model = deeplab\r\n model.eval()\r\n model.cuda()\r\n # args.dataset='cityscapes_light'\r\n testloader = data.DataLoader(get_segmentation_dataset(args.dataset, root=args.data_dir, list_path=args.data_list,\r\n crop_size=(360, 480), mean=IMG_MEAN, scale=False,\r\n mirror=False),\r\n batch_size=args.batch_size, shuffle=False, pin_memory=True)\r\n\r\n data_list = []\r\n confusion_matrix = np.zeros((args.num_classes, args.num_classes))\r\n\r\n palette = get_palette(20)\r\n\r\n image_id = 0\r\n for index, batch in enumerate(testloader):\r\n if index % 100 == 0:\r\n print('%d processd' % (index))\r\n if args.side:\r\n image, label, _, size, name = batch\r\n elif 'sd' in args.dataset:\r\n _, image, label, size, name = batch\r\n else:\r\n image, label, size, name = batch\r\n # print('image name: {}'.format(name))\r\n size = size[0].numpy()\r\n output = predict_esp(model, image)\r\n # seg_pred = np.asarray(np.argmax(output, axis=3), dtype=np.uint8)\r\n result = np.asarray(np.argmax(output, axis=3), dtype=np.uint8)\r\n # result=cv2.resize(result, (1024, 1024), interpolation=cv2.INTER_NEAREST)\r\n m_seg_pred = ma.masked_array(result, mask=torch.eq(label, 255))\r\n ma.set_fill_value(m_seg_pred, 20)\r\n seg_pred = m_seg_pred\r\n\r\n for i in range(image.size(0)):\r\n image_id += 1\r\n print('%d th segmentation map generated ...' % (image_id))\r\n args.store_output = 'True'\r\n output_path = './esp_camvid_base/'\r\n if not os.path.exists(output_path):\r\n os.mkdir(output_path)\r\n if args.store_output == 'True':\r\n # print('a')\r\n output_im = PILImage.fromarray(seg_pred[i])\r\n output_im.putpalette(palette)\r\n output_im.save(output_path + '/' + name[i] + '.png')\r\n\r\n seg_gt = np.asarray(label.numpy()[:, :size[0], :size[1]], dtype=np.int)\r\n ignore_index = seg_gt != 255\r\n seg_gt = seg_gt[ignore_index]\r\n seg_pred = seg_pred[ignore_index]\r\n confusion_matrix += get_confusion_matrix(seg_gt, seg_pred, args.num_classes)\r\n\r\n pos = confusion_matrix.sum(1)\r\n res = confusion_matrix.sum(0)\r\n tp = np.diag(confusion_matrix)\r\n\r\n IU_array = (tp / np.maximum(1.0, pos + res - tp))\r\n mean_IU = IU_array.mean()\r\n\r\n print({'meanIU': mean_IU, 'IU_array': IU_array})\r\n\r\n print(\"confusion matrix\\n\")\r\n print(confusion_matrix)",
"def run(args):\n logger = logging.getLogger(__name__)\n\n try:\n # create output dir\n util.create_dir(args.output)\n\n # download assemblies\n args.clade = \"nostocales\"\n download_output_dir = f\"{args.output}/assemblies\"\n util.create_dir(download_output_dir)\n a = download.query_assemblies(\n args.clade, download_output_dir, quiet=args.quiet)\n\n # create csv report\n output_file = f\"{args.output}/assemblies.csv\"\n download.create_summary(a, output_file)\n except OSError as e:\n logger.error(f\"Directory `{args.output}` already exists. Exiting\")\n sys.exit(1)\n except Exception as e:\n raise\n else:\n logger.info(\"wiz phylogeny finished. Goodbye.\")",
"def main():\n parser = argparse.ArgumentParser(description='Implementation of the Naive Bayes and Perceptron classifiers')\n parser.add_argument('--statsmode', help='whether to gather stats or not', choices=['y','Y','N','n'], default='n')\n parser.add_argument('--classifier', help='classifier to use', choices=['BAYES', 'PERCEPTRON'], required=True)\n parser.add_argument('--mode', help='image class to test', choices=['VALIDATION', 'TEST'], default='TEST')\n parser.add_argument('--type', help='image type to train', choices=['DIGIT', 'FACE', 'MNIST'], required=True)\n parser.add_argument('--range', metavar=('START', 'END_EXCLUSIVE'), nargs=2, type=int, help='Range of data to test', default=[0, 100])\n parser.add_argument('--trainpercent', metavar='PERCENT', type=int, help='the percent of training data to use (int out of 100)', default=100, dest='percentage')\n parser.add_argument('--smoothing', type=int, help='Laplace smoothing constant (Naive Bayes)', default=2)\n parser.add_argument('--iterations', type=int, help='Number of times to iterate over training data (Perceptron)', default=5)\n parser.add_argument('--debug', help='Outputs more detailed information to stdout', action='store_true')\n parser.add_argument('--statloops', type=int, help='Number of times the classifier iterates over test data (Statistics only)', default=5)\n args = parser.parse_args()\n # image_type = ImageType.DIGIT if args.type == 'DIGIT' else ImageType.FACE\n image_type = None\n if args.type == 'DIGIT':\n image_type = ImageType.DIGIT\n elif args.type == 'FACE':\n image_type = ImageType.FACE\n else:\n image_type = ImageType.MNIST\n mode = Mode.TEST if args.mode == 'TEST' else Mode.VALIDATION\n if args.statsmode == 'y' or args.statsmode == 'Y':\n run_percentages_classifier(args.classifier, image_type, args)\n else:\n run = run_classifier_bayes if args.classifier == 'BAYES' else run_classifier_perceptron\n run(mode, image_type, args)",
"def main():\n\n dir_path =r'/Users/dustin/CS/projects/ship_detector/data/ships-in-satellite-imagery/shipsnet/'\n\n data_array, label_array = read_images(dir_path)\n\n array_info(data_array, label_array)\n\n image_info(data_array[0,:], plot_image=False)\n\n split_ratios = [0.8, 0.1, 0.1] #splitting the dataset into 80% train, 10% dev, 10% test\n\n X_train, X_dev, X_test, Y_train, Y_dev, Y_test = dataset_split(data_array, label_array, split_ratios)",
"def main():\n\n args = parse_cmd_line_args()\n\n random_state = check_random_state(args.random_seed)\n\n X, mu, A, phases = generate_data(args.n_features, n_samples=args.n_samples,\n period=args.period, order=args.order,\n noise_variance=args.noise_variance,\n random_state=random_state)\n\n if args.plot_data:\n plot_data_timeseries(X)\n\n best_fit, best_weights = fit_fembv_varx(\n X, n_components=args.n_components,\n max_tv_norm=args.max_tv_norm,\n memory=args.memory, n_init=args.n_init,\n tolerance=args.tolerance,\n max_iterations=args.max_iterations,\n verbose=args.verbose, random_state=random_state)\n\n if args.plot_weights:\n plot_weights_timeseries(best_weights, phases)",
"def main(domain):\n\n filepath_train1 = '../../Non_covid_data_15oct/train_data_batch1_disregard_removed.pkl'\n filepath_test1 = '../../Non_covid_data_15oct/test_data_batch1_disregard_removed.pkl'\n filepath_train2 = '../../Covid_data_11nov/traindata_covidbatch.pkl'\n filepath_test2 = '../../Covid_data_11nov/testdata_covidbatch.pkl'\n\n df_train_nc, df_test_nc = createDataframe(filepath_train1, filepath_test1, domain, 'noncovid')\n df_train_c, df_test_c = createDataframe(filepath_train2, filepath_test2, domain, 'covid')\n #print(df_train)\n sen_reps_tr_nc, labels_tr_nc, sen_reps_te_nc, labels_te_nc = prepro(df_train_nc, df_test_nc)\n sen_reps_tr_c, labels_tr_c, sen_reps_te_c, labels_te_c = prepro(df_train_c, df_test_c)\n #print(labels_te)\n\n #Uncomment to combine training datasets \n #sen_reps_tr_c += sen_reps_tr_nc\n #labels_tr_c += labels_tr_nc\n\n #Uncomment to combine test datasets and test labels if necessary (if you do so, also combine test df's)\n #sen_reps_te_c += sen_reps_te_nc\n #labels_te_c += labels_te_nc\n #df_test = pd.concat([df_test_c, df_test_nc])\n\n #Feed selected train and test data to regression model\n predictions = get_predictions(sen_reps_tr_c, labels_tr_c, sen_reps_te_c)\n\n #Make dataframes of note id's and labels\n df_ann = make_note_df(df_test_c, labels_te_c)\n df_pred = make_note_df(df_test_c, predictions)\n\n #Evaluate on sentence level\n MSE, MAE, RMSE = evaluation(labels_te_c, predictions)\n\n print(\"MSE \"+domain, MSE)\n print(\"MAE \"+domain, MAE)\n print(\"RMSE \"+domain, RMSE)\n\n #Aggregate per note\n means_ann = means(df_ann)\n means_pred = means(df_pred)\n\n #Evaluate on note level\n MSE, MAE, RMSE = evaluation(means_ann, means_pred)\n\n print(\"MSE agg\"+domain, MSE)\n print(\"MAE agg\"+domain, MAE)\n print(\"RMSE agg\"+domain, RMSE)",
"def run(config_file,vehicle_ego, SUMO):\r\n # Load configuration.\r\n config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,\r\n neat.DefaultSpeciesSet, neat.DefaultStagnation,\r\n config_file)\r\n\r\n # Create the population, which is the top-level object for a NEAT run.\r\n p = Population(config)\r\n\r\n # Add a stdout reporter to show progress in the terminal.\r\n p.add_reporter(neat.StdOutReporter(True))\r\n stats = neat.StatisticsReporter()\r\n p.add_reporter(stats)\r\n\r\n # Run for up to 300 generations.\r\n winner = p.run(eval_genomes, trafic, SUMO, number_episodes)\r\n# p.stop()\r\n\r\n # Display the winning genome.\r\n print('\\nBest genome:\\n{!s}'.format(winner))\r\n\r\n # Show output of the most fit genome against training data.\r\n print('\\nOutput:')\r\n# winner_net = neat.nn.FeedForwardNetwork.create(winner, config)\r\n# for xi, xo in zip(xor_inputs, xor_outputs):\r\n# output = winner_net.activate(xi)\r\n# print(\r\n# \"input {!r}, expected output {!r}, got {!r}\".format(xi, xo, output)\r\n# )\r\n\r\n if visualize is not None:\r\n node_names = {-1: 'distance', -2: 'v_ego',-3:'v_prec', -4:'v_allowed', 0: 'a_set'}\r\n visualize.draw_net(config, winner, True, node_names=node_names)\r\n visualize.plot_stats(stats, ylog=False, view=True)\r\n visualize.plot_species(stats, view=True)\r\n return winner",
"def main(args):\n\n\t##############################################################################\n\t######## Pass user command line arguments to setup.py which will #############\n\t############# initialise some parameters for the analysis ###################\n\t##############################################################################\n\tinit_ = setup.initialise_user_input(args)\n\n\t##############################################################################\n\t######## Define system_ which is the object, of class nanoCISC, ##############\n\t######## which contains all relevant information about your nanoparticle ####\n\t##############################################################################\n\tsystem_ = nano_cisc.nanoCISC(init_.nano_particle, init_.anchors, init_.beta, init_.calcrange, \n init_.curves, init_.targetinc, init_.density) \n\t# initialise system_ as nanoCISC class here ^^^\n\n\t# If density is being calculated, define grid from grid class\n\tif args['density']:\n\t\tgrid=grids.grid(system_)\n\n\n\t##############################################################################\n\t################ Process trajectory, frame by frame ##########################\n\t##############################################################################\n\n\tfor ts in init_.u.trajectory: # loop through trajectory frames here \n\t\tprint \"Processing snapshot %d \" % (ts.frame)\n\n\t\t# Array for calculating intrinsic density is initialised to {0}\n\t\tintrinsic_count=np.zeros( ( np.ceil( 3 * system_.calculation_range).astype(np.int) ,len(system_.density) ), dtype = np.float32) \n\n\t\t# Array that stores the instantaneous volume of each spatial interval is initialised to {0}\n\t\tvolume_at_dist=np.zeros( ( np.ceil( 3 * system_.calculation_range).astype(np.int) ,len(system_.density) ), dtype = np.float32) \n\n\t\t# Centre of mass position is updated\n\t\tsystem_.update_com()\n\n\t\t# Vectors describing the anchor points are updated \n\t\tsystem_.update_anchors() \n\n\t\t# Nanoparticle depth values are updated\n\t\tsystem_.update_surface() \t\n\n\t\tif args['XYZsurface']:\n\t\t\tsystem_.write_surface(init_.f_visualise_surface) # write micelle surface to xyz file\n \n \t\tif args['density']: \n \t\t\tgrid.update_volume_estimate(volume_at_dist, system_) # volume estimate is updated for snapshot\n\t\t\tsystem_.calculate_density(intrinsic_count, volume_at_dist) # calculate density here\n\n\t\tsystem_.frames_processed += 1\n\n\t##################################\n\t##### Print results to files #####\n\t##################################\n\tif args['density']:\n\t\tsystem_.print_intrinsic_density(init_.f_intrinsic_density_out)\n\t\tsystem_.print_radial_density()\n\n\n\tprint \"Program finished successfully!!!\\n\"",
"def run_ps(self):\n config = self._estimator.config\n server = self._start_std_server(config)\n server.join()",
"def run(self) -> None:\n ts = time.time()\n startTime = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')\n\n svm_dataset = \"NLP/SVM/IHE/SVM_dataset_ihe.csv\"\n \n tags = ['IHE {}'.format(i) for i in range(1, 10)] # IHE tags.\n\n # SDG results files.\n model = \"NLP/SVM/IHE/model.pkl\"\n\n self.load_dataset(svm_dataset)\n self.load_tags(tags)\n print(\"Loaded dataset: size =\", len(self.dataset))\n\n print(\"Training...\")\n X_train, X_test, y_train, y_test = self.train()\n\n\n print(\"Saving results...\")\n self.serialize(model)\n\n print(\"Done.\")",
"def run(config_file):\n config = neat.config.Config(neat.DefaultGenome, neat.DefaultReproduction,\n neat.DefaultSpeciesSet, neat.DefaultStagnation,\n config_file)\n\n # Create the population, which is the top-level object for a NEAT run.\n p = neat.Population(config)\n\n # Add a stdout reporter to show progress in the terminal.\n p.add_reporter(neat.StdOutReporter(True))\n stats = neat.StatisticsReporter()\n p.add_reporter(stats)\n #p.add_reporter(neat.Checkpointer(5))\n\n # Run for up to 50 generations.\n winner = p.run(eval_genomes, 50)\n #loop()\n\n # show final stats\n print('\\nBest genome:\\n{!s}'.format(winner))",
"def main():\n\n # Chdir into script directory so to properly resolve relative paths in configuration\n os.chdir(os.path.dirname(os.path.realpath(__file__)) + \"/\")\n\n # Disable proxy as we access localhost, both to avoid overhead and issues with proxy misconfiguration\n os.environ['NO_PROXY'] = '*'\n\n # Stop any GraphDB server that we previously started and is possibly still around due to script interruption/crash\n shell(f\"{cmd_graphdb} stopall\")\n\n # Generate synthetic traces, both for populating the repositories and for the {sf, sp, pf, pp} tests\n prepare_traces()\n \n # Generate central repositories (if needed)\n for size, approach in itertools.product(sizes, approaches):\n prepare_repository(size, approach)\n \n # Run experiments (if needed)\n for size, approach in itertools.product(sizes, approaches):\n run_experiments(size, approach)",
"def runtrials_nomap(name, configs, test, N=20, gui=False, rosbag=True):\n print \"*** %s ***\" % name\n scriptdir = \"%s/%s\" % (os.getcwd(), name)\n if os.path.exists(scriptdir):\n print \"WARNING: directory %s already exists, skipping script\" % scriptdir\n return False\n os.mkdir(scriptdir)\n evman = init(scriptdir)\n for n in range(N):\n psd = \"%s/psd\" % scriptdir\n os.mkdir(psd)\n xypose = None\n for cname, search_kwargs in configs:\n print \"# Evaluating n=%i %s\" % (n, cname)\n logdir = \"%s/%s_%i\" % (scriptdir, cname, n)\n os.mkdir(logdir)\n evman.initBaseSystem(gui)\n evman.initRviz(gui)\n xypose = evman.setCamera(xypose)\n evman.initMapping(logdir)\n evman.initSearchMan(psd=psd, log=logdir, **search_kwargs)\n if rosbag:\n evman.recordRosbag(logdir)\n evman.runTest(**test)\n evman.shutdown()\n # --- CLEANUP ---\n try:\n shutil.rmtree(psd)\n except OSError:\n print \"WARNING: Expected to see persistent_samples, but there were none!\"\n evman.shutdown()\n return True",
"def run():\n\tglobal D\n\tif True:\n\t\treport = D.session.next()\n\t\t# print report\n\t\tgpsMsg = RosGPS()\n\t\tgpsMsg.ros_time = rospy.get_time()\n\t\tif report[\"class\"] == \"TPV\":\n\t\t\tif hasattr(report,\"time\"): \n\t\t\t\tif report.time != None: gpsMsg.gps_time = getUnix(report.time)\n\t\t\tif hasattr(report,\"lon\"): \n\t\t\t\tif report.lon != None: gpsMsg.longitude = report.lon\n\t\t\tif hasattr(report,\"lat\"): \n\t\t\t\tif report.lat != None: gpsMsg.latitude = report.lat\n\t\t\tif hasattr(report,\"climb\"): \n\t\t\t\tif report.climb != None: gpsMsg.climb = report.climb\n\t\t\tif hasattr(report,\"speed\"):\n\t\t\t\tif report.speed != None: gpsMsg.speed = report.speed\n\t\t\tif hasattr(report,\"epx\"):\n\t\t\t\tif report.epx != None: gpsMsg.epx = report.epx\n\t\t\tif hasattr(report,\"epy\"): \n\t\t\t\tif report.epy != None: gpsMsg.epy = report.epy\n\t\t\tif hasattr(report,\"ept\"): \n\t\t\t\tif report.ept != None: gpsMsg.ept = report.ept\n\t\t\tif hasattr(report,\"eps\"): \n\t\t\t\tif report.eps != None: gpsMsg.eps = report.eps\n\t\t\tif hasattr(report,\"epv\"): \n\t\t\t\tif report.epv != None: gpsMsg.epv = report.epv\t\n\t\t# get number of satellites in view. Save to global instead of message\n\t\t# since it doesn't always get published\n\t\telif report[\"class\"] == \"SKY\":\n\t\t\tD.NSatellites = len(report.satellites)\n\t\t# also save satellite data in msg \t\n\t\tgpsMsg.NSatellites = D.NSatellites\n\t\t# publish the message\n \t\tD.gpsPub.publish(gpsMsg)\t\t\n\n\t#except:\n\t\t#rospy.logwarn(\"Failed to read next message\")",
"def main(cls, args):\n #cls.trainOfflineAndTest(100, 0.1, 0.1, 0.9);\n #cls.trainOfflineAndTest(500, 0.1, 0.1, 1.0);\n\n cls.trainer.teachActiveAndSaveStatistics(\"onlineTest\", 10, 0.8, 1.0 ,1.0, 0.0, 0.3, True, True,True);\n cls.trainer.teachActiveAndSaveStatistics(\"path\", 10, 0.0, 0.0, 0.0, 0.0, 0.0, True, False, False)\n\n #trainer.teachActiveAndSaveStatistics(\"onlineTest\", 10000, 0.1f, 0.1f, 1.0f, 0.0f, 0.1f,true, true, true);\n # \t\ttrainer.teachActiveAndSaveStatistics(\"onlineTest\", 10000, 0.1f, 0.1f, 1.0f, 0.0f, 0.1f,\n # \t\t\t\tfalse, true, true);\n # \t\t\n # \t\ttestAgentFromFileWithOutLearning(\"net10000_a0.1_b0.1g_1.0e_0.1_first.net\", 10000, true);\n # \t\ttestAgentFromFileWithOutLearning(\"net10000_a0.1_b0.1g_1.0e_0.1_secound.net\", 10000, true);\n #cls.testAgentFromFileWithOutLearning(\"net10000_a0.1_b0.1g_1.0e_0.1_first.net\", 10000, False)\n #cls.testAgentFromFileWithOutLearning(\"net10000_a0.1_b0.1g_1.0e_0.1_secound.net\", 10, False)\n # \t\ttestAgentFromFileWithOutLearning(\"net10000_a0.1_b0.1g_1.0e_0.1.net\", 10000, false);",
"def run_test(self):\n self.output_analytics = self.run_inference()\n self.output_df = pd.DataFrame(self.output_analytics)",
"def main():\n subjectlist = ['hel{}'.format(i) for i in range(1, 20) if i is not 9]\n logfile = setup_log(os.path.join(os.environ['hel'], 'logs',\n 'randomise_setup_fslmerge'))\n logfile.info('Setup for randomise.')\n logfile.info('Making a 4D data set by combining images')\n outdir = os.path.join(os.environ['hel'], 'graph_analyses',\n 'randomise_global_connectivity')\n for subclust_n in range(1, 4):\n outfilename = os.path.join(outdir,\n 'knnward_clst1_subclust{}_4Dfile'.format(\n subclust_n))\n mergefsl(logfile, make_file_list(subjectlist, subclust_n), outfilename)"
] |
[
"0.5969365",
"0.5918953",
"0.5864023",
"0.58417416",
"0.58233285",
"0.57631445",
"0.5671599",
"0.561224",
"0.5609736",
"0.5591403",
"0.55810434",
"0.557726",
"0.55602694",
"0.5552123",
"0.54858965",
"0.54581356",
"0.5456001",
"0.5453447",
"0.54224575",
"0.54124206",
"0.5400985",
"0.5395593",
"0.5394256",
"0.5384866",
"0.5349574",
"0.53451854",
"0.5335386",
"0.53329384",
"0.5331594",
"0.53175086"
] |
0.68774
|
0
|
The function is used to extract faces from face coordinates
|
def extract_faces(self, img, list_face_coord):
#from the img array extract the facees
list_faces = []
#Go through each face coordinates and store the array
#or clip face region in the list
for i, coord in enumerate(list_face_coord):
left, top, right, bottom = coord
list_faces.append(img[top:bottom, left:right])
return list_faces
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_faces(image):\n return (image.crop(face) for face in image.faces)",
"def detect_faces(image):\n\n face_locations = face_recognition.face_locations(image)\n return face_locations",
"def detect_face(self, img):\n # Fetch face location from the frame with 128 encoding of face landmarks\n curr_face_loc, name_list, info_list = load_encode_loc(img, self.kwn_names,\n self.kwn_encoding,\n self.status_list, self.since_list)\n print('Current value is ', curr_face_loc, name_list)\n face_list = []\n face_area = []\n print('face loc', curr_face_loc)\n if len(curr_face_loc):\n\n for (top, right, bottom, left), name in zip(curr_face_loc, name_list):\n print(top, right, bottom, left)\n cv2.rectangle(img, (top, right), (bottom, left), (0, 255, 2), 2)\n\n w = right - left\n h = bottom - top\n cx = left + w // 2\n cy = top + h // 2\n area = w * h\n\n for idx, info in enumerate(info_list):\n cv2.putText(img, info, (bottom, int(left * idx * 0.2)),\n cv2.FONT_HERSHEY_COMPLEX, 1,\n (0, 0, 255), 1)\n\n face_list.append([cx, cy])\n face_area.append(area)\n\n i = face_area.index(max(face_area))\n\n return img, [face_list[i], face_area[i]]\n\n else:\n return img, [[0, 0], 0]",
"def detect_face(self, img, img_file_path=None):\n #use dlib face detector\n #create dlib detector, this is hog with svm\n detector = dlib.get_frontal_face_detector()\n #win = dlib.image_window()\n if img_file_path:\n img = dlib.load_rgb_image(img_file_path)\n #detect number of faces in an image\n dets = detector(img)\n list_face_coord = [] # this will store left, top, right, bottom\n for i, d in enumerate(dets):\n list_face_coord.append((d.left(), d.top(), d.right(), d.bottom()))\n return list_face_coord",
"def faceRecognition(image):\r\n faceLandmarks = [[],[],[]]\r\n face_landmarks_list = face_recognition.face_landmarks(image)\r\n if len(face_landmarks_list)>0:\r\n if len(face_landmarks_list[0]['left_eye'])>0:\r\n leftEyePos = [tuple(map(lambda i: int(i/32),i)) for i in face_landmarks_list[0]['left_eye']]\r\n for i in set(leftEyePos):\r\n if leftEyePos.count(i)>=len(leftEyePos)//len(set(leftEyePos)):\r\n faceLandmarks[0] += [i,]\r\n if len(face_landmarks_list[0]['right_eye'])>0:\r\n rightEyePos = [tuple(map(lambda i: int(i/32),i)) for i in face_landmarks_list[0]['right_eye']]\r\n for i in set(rightEyePos):\r\n if rightEyePos.count(i)>=len(rightEyePos)//len(set(rightEyePos)):\r\n faceLandmarks[1] += [i,]\r\n if len(face_landmarks_list[0]['top_lip'])>0:\r\n mouthPos = [tuple(map(lambda i: int(i/32),i)) for i in (face_landmarks_list[0]['top_lip']+face_landmarks_list[0]['bottom_lip'])]\r\n for i in set(mouthPos):\r\n if mouthPos.count(i)>=len(mouthPos)//len(set(mouthPos)):\r\n faceLandmarks[2] += [i,]\r\n return faceLandmarks",
"def detectFaces():\n faceEngine = VLFaceEngine()\n detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)\n\n imageWithOneFace = VLImage.load(filename=EXAMPLE_O)\n pprint.pprint(detector.detectOne(imageWithOneFace, detect5Landmarks=False, detect68Landmarks=False).asDict())\n detection = detector.detectOne(imageWithOneFace, detect5Landmarks=False, detect68Landmarks=False)\n pprint.pprint(detector.redetectOne(image=imageWithOneFace, bBox=detection))\n pprint.pprint(detector.redetectOne(image=imageWithOneFace, bBox=detection.boundingBox.rect))\n\n imageWithSeveralFaces = VLImage.load(filename=EXAMPLE_SEVERAL_FACES)\n severalFaces = detector.detect([imageWithSeveralFaces], detect5Landmarks=False, detect68Landmarks=False)\n\n pprint.pprint(\n detector.redetect(\n images=[\n ImageForRedetection(imageWithSeveralFaces, [face.boundingBox.rect for face in severalFaces[0]]),\n ImageForRedetection(imageWithOneFace, [detection.boundingBox.rect]),\n ImageForRedetection(imageWithOneFace, [Rect(0, 0, 1, 1)]),\n ]\n )\n )",
"def detect_face(self, img):\n #convert the test image to gray image as opencv face detector expects gray images\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n #let's detect multiscale (some images may be closer to camera than others) images\n #result is a list of faces\n faces = self.face_cascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5);\n\n #if no faces are detected then return None\n if (len(faces) == 0):\n return None, None\n\n #under the assumption that there will be only one face,\n #extract the face area\n (x, y, w, h) = faces[0]\n\n #return only the face part of the image\n return gray[y:y+w, x:x+h], faces[0]",
"def faces(self):\r\n return self._faces",
"def detect_faces(self, image):\n return self.face_detector(image, 1)",
"def face_detection(frame):\n if frame is None :\n return 0,0,0,0\n \n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n faces = faceCascade.detectMultiScale(\n gray,\n scaleFactor=1.1,\n minNeighbors=5,\n minSize=(30, 30),\n flags=cv2.CASCADE_SCALE_IMAGE\n )\n\n # Draw a rectangle around the faces\n position_x, position_y ,width,height = 0, 0, 0, 0\n for x, y, w, h in faces:\n position_x, position_y ,width,height = x, y, w, h\n\n return position_x, position_y,width,height",
"def _box_faces(image):\n for face in image.faces:\n _box_face(image, face)\n return image",
"def extract_face_detections(self):\n self.detector.setInput(self.image_blob)\n self.detections = self.detector.forward()",
"def face_coords(img, model):\n (h, w) = img.shape[:2]\n blob = cv2.dnn.blobFromImage(cv2.resize(img, (300, 300)), 1.0, (300, 300), (104.0, 177.0, 123.0))\n\n model.setInput(blob)\n detections = model.forward()\n\n box = detections[0, 0, 0, 3:7] * np.array([w, h, w, h])\n (startX, startY, endX, endY) = box.astype(\"int\")\n\n return (startX, startY, endX, endY)",
"def detect_faces(self, img):\n with tf.Graph().as_default():\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=.7)\n sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))\n with sess.as_default():\n pnet, rnet, onet = detect_face.create_mtcnn(sess, None)\n\n minsize = 20 # minimum size of face\n threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold\n factor = 0.709 # scale factor\n\n bounding_boxes, _ = detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)\n\n nrof_faces = bounding_boxes.shape[0]\n img_size = np.asarray(img.shape)[0:2]\n\n faces = []\n faces_rects = []\n\n for i in range(nrof_faces):\n det = bounding_boxes[i,0:4]\n bb = np.zeros(4, dtype=np.int32)\n bb[0] = np.maximum(det[0]-5/2, 0)\n bb[1] = np.maximum(det[1]-5/2, 0)\n bb[2] = np.minimum(det[2]+5/2, img_size[1])\n bb[3] = np.minimum(det[3]+5/2, img_size[0])\n faces.append(img[bb[1]:bb[3], bb[0]:bb[2], :])\n faces_rects.append({'name': 'none', 'x': bb[0], 'y': bb[1], 'w': bb[2]-bb[0], 'h': bb[3]-bb[1]})\n\n return [img, faces, faces_rects]",
"def _getface_cascade(self,img,mode):\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n faces = self.model.detectMultiScale(gray, 1.3, 5)\n if len(faces)==0:\n return None\n x,y,w,h = faces[0]\n top, right, bottom, left = self._change_shape(x,y,w,h)\n\n if mode == 1 :\n out = [x,y,w,h]\n elif mode == 2 :\n out = [top, right, bottom, left]\n return out",
"def get_face_features(frame, face):\r\n\r\n import math\r\n\r\n def distance(p1, p2):\r\n \"\"\"\r\n Calculate euclidean distance between two points\r\n \"\"\"\r\n return math.hypot(p1[0] - p2[0], p1[1] - p2[1])\r\n\r\n if f_type == \"LD\":\r\n distances = []\r\n for couple in [(50, 58), (61, 67), (51, 57), (62, 66), (52, 56), (63, 65), (48, 54),\r\n (60, 64), (49, 59), (53, 55)]:\r\n a_indexes = (couple[0], couple[0] + 68)\r\n b_indexes = (couple[1], couple[1] + 68)\r\n\r\n a = (video_features[frame][face][a_indexes[0]], video_features[frame][face][a_indexes[1]])\r\n\r\n b = (video_features[frame][face][b_indexes[0]], video_features[frame][face][b_indexes[1]])\r\n\r\n distances.append(distance(a, b))\r\n return distances\r\n else:\r\n return video_features[frame][face][136:]",
"def get_face(self, image):\n face = self.__detect_face(image)[0]\n x1, y1, x2, y2, _, _ = face.left(), face.top(), \\\n face.right() + 1, face.bottom() + 1, face.width(), face.height()\n return image[y1:y1 + (y2 - y1), x1:x1 + (x2 - x1), :]",
"def face_detector(img):\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # faceCascade imports in the previously made classifier\n faceCascade = cv2.CascadeClassifier('src/face_detection/haarcascade_frontalface_default.xml')\n faces = faceCascade.detectMultiScale(\n gray, \n scaleFactor=1.2,\n minNeighbors=1, \n minSize=(100, 100)\n )\n\n return faces",
"def get_faces(self, img):\n results = self.detect_and_draw(img, False)\n (width, height) = cv.GetSize(img)\n for result in results:\n self.add_property('name', 'face')\n self.add_property('pixel_location', result[0])\n self.add_property('relative_location', (float(result[0][0]) / width, float(result[0][1]) / height))\n self.add_property('angle_and_distance', result[1])\n self.add_property('face_area', result[2])\n self.add_property('width', result[4])\n self.add_property('height', result[5])\n self.add_property('relative_face_area', (float(result[2]) / (width * height)))\n self.add_property('confidence', result[3])\n self.add_property('dimensions', (width, height))\n self.add_property('rotation', self.rotation)\n self.store_observation()",
"def detect_face_api(self, img):\n\n curr_face_loc, name_list, info_list = load_encode_loc(img, self.kwn_names,\n self.kwn_encoding,\n self.status_list, self.since_list)\n print('Current value is ', curr_face_loc, name_list)\n face_list = []\n face_area = []\n print('face loc', curr_face_loc)\n if len(curr_face_loc):\n\n for (top, right, bottom, left), name in zip(curr_face_loc, name_list):\n print(top, right, bottom, left)\n cv2.rectangle(img, (top, right), (bottom, left), (0, 255, 2), 2)\n\n w = right - left\n h = bottom - top\n cx = left + w // 2\n cy = top + h // 2\n area = w * h\n\n for idx, info in enumerate(info_list):\n cv2.putText(img, info, (bottom, int(left * idx * 0.2)),\n cv2.FONT_HERSHEY_COMPLEX, 1,\n (0, 0, 255), 1)\n\n face_list.append([cx, cy])\n face_area.append(area)\n\n i = face_area.index(max(face_area))\n\n return img, [face_list[i], face_area[i]]\n\n else:\n return img, [[0, 0], 0]",
"def findFaces(self):\n\t\trects = self.detectAll()\n\t\tif len(rects)==0:\n\t\t\trects = []\n\t\telse:\n\t\t\trects[:, 2:] += rects[:, :2]\n\t\tself.analyzeFrame(rects)",
"def get_faces(self, image):\n\t\t\n\t\t# Convert the image to grayscale and normalise\n\t\tcv.CvtColor(image, self.gray, cv.CV_BGR2GRAY)\n\t\tcv.EqualizeHist(self.gray, self.gray)\n\t\t\n\t\t# Detect faces\n\t\treturn cv.HaarDetectObjects(self.gray, self.cascade, self.storage,\n\t\t scale_factor = 1.3,\n\t\t min_neighbors = 2,\n\t\t flags = cv.CV_HAAR_DO_CANNY_PRUNING,\n\t\t min_size = (40,40))",
"def _raw_face_locations(img, number_of_times_to_upsample=1):\n return face_detector(img, number_of_times_to_upsample)",
"def get_faces(self):\n faces = []\n for j in range(0, self.height - 1):\n for i in range(0, self.width - 1):\n # add the two triangle faces\n tl = (j * self.width) + i\n tr = (j * self.width) + i + 1\n bl = ((j+1) * self.width) + i\n br = ((j+1) * self.width) + i + 1\n\n face = [bl, tr, tl]\n faces.append(face)\n face = [bl, br, tr]\n faces.append(face)\n return faces",
"def get_faceboxes(self,img,threshold):\n faces = self.mark_detector.extract_cnn_facebox(img,threshold)\n \n\n\n faceboxes = []\n for face in faces:\n start = (face[0],face[1])\n end = (face[2],face[3])\n box = Box.setStartEnd(start,end)\n\n faceboxes.append(box)\n \n return faceboxes",
"def _raw_face_locations(img, number_of_times_to_upsample=1, model=\"hog\"):\n if model == \"cnn\":\n cnn_face_detector = dlib.cnn_face_detection_model_v1('mmod_human_face_detector.dat')\n return cnn_face_detector(img, number_of_times_to_upsample)\n else:\n # face_detector = dlib.get_frontal_face_detector()\n return face_detector(img, number_of_times_to_upsample)",
"def detector(videoframe, facedetection, maskdetection):\n (h, w) = videoframe.shape[:2]\n blobimage = cv2.dnn.blobFromImage(videoframe, 1.0, (224, 224), (104.0, 177.0, 123.0))\n\n facedetection.setInput(blobimage)\n ffinding = facedetection.forward()\n\n face_list = []\n locations = []\n predictions = []\n\n for i in range(0, ffinding.shape[2]):\n credence = ffinding[0, 0, i, 2]\n if credence > 0.6:\n case = ffinding[0, 0, i, 3:7] * np.array([w, h, w, h])\n (x_start, y_start, x_end, y_end) = case.astype(\"int\")\n (x_start, y_start) = (max(0, x_start), max(0, y_start))\n (x_end, y_end) = (min(w - 1, x_end), min(h - 1, y_end))\n\n image = videoframe[y_start:y_end, x_start:x_end]\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = cv2.resize(image, (224, 224))\n image = img_to_array(image)\n image = preprocess_input(image)\n face_list.append(image)\n locations.append((x_start, y_start, x_end, y_end))\n\n if len(face_list) > 0:\n face_list = np.array(face_list, dtype=\"float32\")\n predictions = maskdetection.predict(face_list, batch_size=32)\n return (locations, predictions)",
"def _getface_hog_cnn(self,img,mode):\n faces = face_locations(img,number_of_times_to_upsample=1,model=self.model_name)\n if len(faces)==0:\n return None\n if mode == 1:\n out = faces[0]\n elif mode ==2 :\n top,right,bottom,left = faces[0]\n x,y,w,h = int(left), int(top), int(right-left+1), int(bottom-top+1)\n out = [x,y,w,h]\n return out",
"def faces(self, image):\n\n response = self._send_request(\"faces\", files=dict(image=image))\n return response['objectdetection']",
"def face_detection(image, xml_path):\n\n face_cascade = cv2.CascadeClassifier(xml_path)\n faces = face_cascade.detectMultiScale(image, 1.3, 5)\n\n images = []\n\n for face in faces:\n x_beginning, y_beginning, face_width, face_height = face\n roi_img = image[y_beginning:y_beginning + face_height, x_beginning:x_beginning + face_width]\n\n images.append(roi_img)\n\n return faces, images"
] |
[
"0.7587838",
"0.7559992",
"0.71235484",
"0.70463777",
"0.703729",
"0.6998884",
"0.6969824",
"0.69670296",
"0.6928266",
"0.6918168",
"0.69046986",
"0.690407",
"0.69037646",
"0.6871529",
"0.68654686",
"0.6846667",
"0.68419194",
"0.68375146",
"0.68206996",
"0.67943585",
"0.67732865",
"0.67554593",
"0.668884",
"0.66594565",
"0.665774",
"0.6650192",
"0.66472304",
"0.66434956",
"0.6637729",
"0.6615887"
] |
0.78909254
|
0
|
Takes in an Image object. Resizes to terminal window size, maps pixel darkness to characters, and matches pixel colors to closest colors available to xterm. Returns CursesFrame.
|
def render_frame(self, image):
arr = np.array(image.resize(self.curses_shape))
characters = self.character_transformer.map_pixels_to_characters(arr)
colors = self.color_transformer.nearest_neighbors(arr)
return CursesFrame(characters, colors)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def convertDepthFrame(self):\n try:\n \"\"\" \n Convert Depth frame to rudimentary colormap\n \"\"\"\n self.DepthHSV[...,0] = self.currentDepthFrame\n self.DepthHSV[...,1] = 0x9F\n self.DepthHSV[...,2] = 0xFF\n self.DepthCM = cv2.cvtColor(self.DepthHSV,cv2.COLOR_HSV2RGB)\n cv2.drawContours(self.DepthCM,self.block_contours,-1,(0,0,0),3)\n cv2.drawContours(self.DepthCM,self.block_contours_2,-1,(0,0,0),3)\n cv2.drawContours(self.DepthCM,self.block_contours_3,-1,(0,0,0),3)\n\n img = QImage(self.DepthCM,\n self.DepthCM.shape[1],\n self.DepthCM.shape[0],\n QImage.Format_RGB888\n )\n return img\n except:\n return None",
"def convertDepthFrame(self):\n try:\n\n \"\"\" \n Convert Depth frame to rudimentary colormap\n \"\"\"\n self.DepthHSV[...,0] = self.currentDepthFrame\n self.DepthHSV[...,1] = 0x9F\n self.DepthHSV[...,2] = 0xFF\n self.DepthCM = cv2.cvtColor(self.DepthHSV,cv2.COLOR_HSV2RGB)\n cv2.drawContours(self.DepthCM,self.block_contours,-1,(0,0,0),3)\n\n img = QImage(self.DepthCM,\n self.DepthCM.shape[1],\n self.DepthCM.shape[0],\n QImage.Format_RGB888\n )\n return img\n except:\n return None",
"def get_frame_extracted_image(img):\n\n max_window_size = 0.1\n steps = 25\n offset = 4\n img = re.remove_border(img, steps, max_window_size, offset)\n return img",
"def color_window():\n # Create the image and convert it to HSV\n img = cv2.imread('../game.png', cv2.IMREAD_COLOR)\n img = scale_image(img, 0.5)\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n orig = deepcopy(img)\n cv2.imshow('orig', orig)\n\n # Set up the window\n win_name = 'color_gui'\n cv2.namedWindow(win_name)\n\n # Trackbars (name, min, max, [start value])\n trackbars = [\n ['H', 0, 255, 63],\n ['Tol', 0, 10, 5],\n ['Open', 1, 15, 1],\n ['S Low', 0, 255],\n ['S High', 0, 255, 255],\n ['V Low', 0, 255],\n ['V High', 0, 255, 255]\n ]\n trackbar_names = [trackbar[0] for trackbar in trackbars] # Just the names\n\n # Add the trackbars to the window\n for trackbar in trackbars:\n def nothing(x): pass\n cv2.createTrackbar(trackbar[0], win_name, trackbar[1], trackbar[2], nothing)\n if len(trackbar) > 3: cv2.setTrackbarPos(trackbar[0], win_name, trackbar[3]) #Set an initial value\n\n values = \"don\\'t press p when window starts\".split()\n while 1:\n k = cv2.waitKey(50) & 0xFF\n if k == 27:\n break\n\n #Get the values\n values = {}\n slider_vals = [cv2.getTrackbarPos(name, win_name) for name in trackbar_names]\n for name, value in zip(trackbar_names, slider_vals):\n values[name] = value\n\n #Threshold out the colors\n hue_thresh = values['H']\n tol = values['Tol']\n lower_h = max(hue_thresh - tol, 0)\n upper_h = min(hue_thresh + tol, 255)\n lowerb = (lower_h, values['S Low'], values['V Low'])\n upperb = (upper_h, values['S High'], values['V High'])\n\n #Filter out the color\n mask = cv2.inRange(hsv, lowerb, upperb)\n img = cv2.bitwise_and(orig, orig, mask=mask)\n\n #Morphological transformation\n m = values['Open']\n # img = cv2.morphologyEx(img, cv2.MORPH_CLOSE, np.ones((m,m), np.uint8))\n\n #Draw number of contours onto the screen\n hsv2 = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n cont_mask = cv2.inRange(hsv2, lowerb, upperb)\n contours, _ = cv2.findContours(cont_mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n contours = filter_out_contours(contours)\n img = cv2.drawContours(img, contours, -1, 255, 2)\n img = cv2.putText(img, str(len(contours)), (10,30), cv2.FONT_HERSHEY_SCRIPT_SIMPLEX, 1, 255, 2, cv2.LINE_AA)\n\n cv2.imshow('filtered', img)",
"def _size_term(self):\n curr_dim = self._screen.getmaxyx()\n dims = [50, 150]\n if curr_dim[0] < dims[0] or curr_dim[1] < dims[1]:\n sys.stdout.write(\"\\x1b[8;{rows};{cols}t\".format(rows=dims[0], cols=dims[1]))\n curses.resize_term(dims[0], dims[1])\n return dims\n else:\n return curr_dim",
"def draw_color_bar(frame):\n depth = 35\n cv2.rectangle(frame, (0,0), (72,depth - 1), (0,0,0), 2) # white/eraser\n cv2.rectangle(frame, (72,0), (138,depth), (0,0,0), -1) # black\n cv2.rectangle(frame, (138,0), (204,depth), (122,78,32), -1) # brown\n cv2.rectangle(frame, (204,0), (270,depth), (242,0,255), -1) # violet/purple\n cv2.rectangle(frame, (270,0), (336,depth), (0,0,255), -1) # blue\n cv2.rectangle(frame, (336,0), (402,depth), (63,255,0), -1) # green\n cv2.rectangle(frame, (402,0), (468,depth), (255,250,0), -1) # yellow\n cv2.rectangle(frame, (468,0), (534,depth), (255,174,0), -1) # orange\n cv2.rectangle(frame, (534,0), (600,depth), (255,0,0), -1) # red",
"def get_dark_channel(self,img, *, size):\n #Extract the dark/hazy part from the image\n minch = np.amin(img, axis=2)\n box = cv2.getStructuringElement(cv2.MORPH_RECT, (size // 2, size // 2))\n return cv2.erode(minch, box)",
"def darkText(img):\n kernel = np.ones((30, 30), np.uint8) \n img_orig = cv2.morphologyEx(img, cv2.MORPH_BLACKHAT, kernel)\n \n TH = 150\n img_orig[(img_orig[:,:,0] < TH) | (img_orig[:,:,1] < TH) | (img_orig[:,:,2] < TH)] = (0,0,0)\n \n img_orig = closing(img_orig, size=(1, int(img.shape[1] / 8)))\n \n return (cv2.cvtColor(img_orig, cv2.COLOR_BGR2GRAY) != 0).astype(np.uint8)",
"def sub_brightbg(self, ansimatch):\n return self.ansi_xterm256_bright_bg_map_dict.get(ansimatch.group(), \"\")",
"def setup_colors(self):\n curses.start_color()\n curses.use_default_colors()\n\n if curses.can_change_color(): # Can't get these to work :(\n #curses.init_color(11, 254, 0, 1000)\n pass\n\n # This only works with: TERM=xterm-256color\n curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_WHITE)\n curses.init_pair(2, curses.COLOR_WHITE, curses.COLOR_BLUE)\n curses.init_pair(3, curses.COLOR_RED, curses.COLOR_BLUE)\n curses.init_pair(4, curses.COLOR_WHITE, -1)\n\n # Higlight colors:\n black = curses.COLOR_BLACK\n curses.init_pair(10, -1, -1) # Default (white on black)\n\n curses.init_pair(11, curses.COLOR_BLUE, black)\n curses.init_pair(12, curses.COLOR_CYAN, black)\n curses.init_pair(13, curses.COLOR_GREEN, black)\n curses.init_pair(14, curses.COLOR_MAGENTA, black)\n curses.init_pair(15, curses.COLOR_RED, black)\n curses.init_pair(17, curses.COLOR_YELLOW, black)\n curses.init_pair(16, curses.COLOR_WHITE, black)\n\n # Better colors\n try:\n # TODO: Define RGB forthese to avoid getting\n # different results in different terminals\n curses.init_pair(11, 69, black) # blue\n curses.init_pair(12, 81, black) # cyan\n curses.init_pair(13, 119, black) # green\n curses.init_pair(14, 171, black) # magenta\n curses.init_pair(15, 197, black) # red\n curses.init_pair(17, 221, black) # yellow\n except:\n self.app.logger.log(\"Enhanced colors failed to load.\")",
"def scan_single_win_size(image, clf, rescale, hyperparams, box_color=(0,0,1.0), heatmap=None):\n # get important hyperparams\n y_start = hyperparams['Y_START']\n y_stop = hyperparams['Y_STOP']\n color_space = hyperparams['COLOR_SPACE']\n spatial_feat = hyperparams['SPATIAL_FEAT']\n spatial_size = hyperparams['SPATIAL_SIZE']\n hist_feat = hyperparams['HIST_FEAT']\n hist_bin = hyperparams['HIST_BIN']\n hist_range = hyperparams['HIST_RANGE']\n hog_orient = hyperparams['HOG_ORIENT']\n hog_cell_per_block = hyperparams['HOG_CELL_PER_BLOCK']\n hog_pix_per_cell = hyperparams['HOG_PIX_PER_CELL']\n hog_feat = hyperparams['HOG_FEAT']\n hog_channel = hyperparams['HOG_CHANNEL']\n # if heatmap was not provided, initialize it\n if heatmap is None:\n heatmap = np.zeros(image.shape[0:2])\n # copy image for visualization purposes\n draw_image = np.zeros_like(image)\n # image to be analyzed is color-converted to target color space and resized to match\n # appropriately rescaled window size\n rescaled_image = convert_color(image[y_start:y_stop, :, :], color_space)\n if rescale != 1:\n imshape = image.shape\n rescaled_image = cv2.resize(rescaled_image, (np.int(imshape[1] / rescale), np.int(imshape[0] / rescale)))\n # Define number of blocks\n n_x_blocks = (rescaled_image.shape[1] // hog_pix_per_cell) - hog_cell_per_block + 1\n n_y_blocks = (rescaled_image.shape[0] // hog_pix_per_cell) - hog_cell_per_block + 1\n window = 64 # 64 was the orginal sampling rate, with 8 cells and 8 pix per cell\n blocks_per_window = (window // hog_pix_per_cell) - hog_cell_per_block + 1\n cells_per_step = 2 # Instead of overlap, define how many cells to step\n n_x_steps = (n_x_blocks - blocks_per_window) // cells_per_step\n n_y_steps = (n_y_blocks - blocks_per_window) // cells_per_step\n # Prepare the hog features\n hogs = []\n if hog_feat:\n for channel in range(0, rescaled_image.shape[2]):\n hogs.append(get_hog_features(rescaled_image[:, :, channel],\n hog_orient, hog_pix_per_cell, hog_cell_per_block, feature_vec=False))\n # Go through blocks of image step by step\n for x_window in range(n_x_steps):\n for y_window in range(n_y_steps):\n y_pos = y_window * cells_per_step\n x_pos = x_window * cells_per_step\n x_left = x_pos * hog_pix_per_cell\n y_top = y_pos * hog_pix_per_cell\n sub_image = rescaled_image[y_top:y_top + window, x_left:x_left + window]\n # Extract HOG for this patch\n features = []\n if spatial_feat:\n features.append(get_spatial_features(sub_image, size=spatial_size))\n if hist_feat:\n features.append(get_hist_features(sub_image, nbins=hist_bin, bins_range=hist_range)[4])\n if hog_feat:\n if hog_channel == \"ALL\":\n for h in hogs:\n features.append(h[y_pos:y_pos+blocks_per_window, x_pos:x_pos+blocks_per_window].ravel())\n else:\n features.append(hogs[hog_channel][y_pos:y_pos+blocks_per_window, x_pos:x_pos+blocks_per_window].ravel())\n # Scale features and make a prediction\n test_features = np.hstack(features).reshape(1, -1)\n test_prediction = clf.predict(test_features)\n if test_prediction == 1:\n # If prediction is true, re-calculate sliding window position\n x_box_left = np.int(x_left * rescale)\n y_box_top = np.int(y_top * rescale)\n box_size = np.int(window * rescale)\n # if box color was set, draw box on image\n if box_color is not None:\n cv2.rectangle(draw_image, (x_box_left, y_box_top + y_start),\n (x_box_left + box_size, y_box_top + box_size + y_start), box_color, 6)\n # add heat to heatmap\n heatmap[y_box_top+y_start:y_box_top+box_size+y_start, x_box_left:x_box_left+box_size] += \\\n clf.decision_function(test_features)\n return draw_image, heatmap",
"def get_frame_with_visual(img, action, state, action_pos= False):\n fig = plt.figure(figsize=(1, 1), dpi=64)\n fig.add_subplot(111)\n plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0)\n\n axes = plt.gca()\n plt.cla()\n axes.axis('off')\n plt.imshow(img, zorder=0)\n axes.autoscale(False)\n\n if action_pos:\n p = mujoco_to_imagespace(action)\n else:\n p = mujoco_to_imagespace(state + .05 * action)\n\n state = mujoco_to_imagespace(state)\n\n plt.plot(state[1], state[0], zorder=1, marker='o', color='r')\n\n yaction = np.array([state[0], p[0]])\n xaction = np.array([state[1], p[1]])\n plt.plot(xaction, yaction, zorder=1, color='y', linewidth=3)\n\n fig.canvas.draw() # draw the canvas, cache the renderer\n\n data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')\n data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n\n # plt.show()\n # Image.fromarray(data).show()\n # pdb.set_trace()\n\n return data",
"def dark(s='dark'):\n s = s.strip()[:80] #truncate to 80 char to fit in FITS header\n print camera.SetShutter(2)\n camera.status.imgtype = 'DARK'\n camera.status.object = s\n camera.status.update()",
"def sub_xterm256(self, rgbmatch, use_xterm256=False, color_type=\"fg\"):\n if not rgbmatch:\n return \"\"\n\n # get tag, stripping the initial marker\n # rgbtag = rgbmatch.group()[1:]\n\n background = color_type in (\"bg\", \"gbg\")\n grayscale = color_type in (\"gfg\", \"gbg\")\n\n if not grayscale:\n # 6x6x6 color-cube (xterm indexes 16-231)\n try:\n red, green, blue = [int(val) for val in rgbmatch.groups() if val is not None]\n except (IndexError, ValueError):\n logger.log_trace()\n return rgbmatch.group(0)\n else:\n # grayscale values (xterm indexes 0, 232-255, 15) for full spectrum\n try:\n letter = [val for val in rgbmatch.groups() if val is not None][0]\n except IndexError:\n logger.log_trace()\n return rgbmatch.group(0)\n\n if letter == \"a\":\n colval = 16 # pure black @ index 16 (first color cube entry)\n elif letter == \"z\":\n colval = 231 # pure white @ index 231 (last color cube entry)\n else:\n # letter in range [b..y] (exactly 24 values!)\n colval = 134 + ord(letter)\n\n # ansi fallback logic expects r,g,b values in [0..5] range\n gray = round((ord(letter) - 97) / 5.0)\n red, green, blue = gray, gray, gray\n\n if use_xterm256:\n\n if not grayscale:\n colval = 16 + (red * 36) + (green * 6) + blue\n\n return \"\\033[%s8;5;%sm\" % (3 + int(background), colval)\n # replaced since some clients (like Potato) does not accept codes with leading zeroes,\n # see issue #1024.\n # return \"\\033[%s8;5;%s%s%sm\" % (3 + int(background), colval // 100, (colval % 100) // 10, colval%10) # noqa\n\n else:\n # xterm256 not supported, convert the rgb value to ansi instead\n rgb = (red, green, blue)\n\t\n def _convert_for_ansi(val):\n return int((val+1)//2)\n\n # greys\n if (max(rgb) - min(rgb)) <= 1:\n match rgb:\n case (0,0,0):\n return ANSI_BACK_BLACK if background else ANSI_NORMAL + ANSI_BLACK\n case ((1|2), (1|2), (1|2)):\n return ANSI_BACK_BLACK if background else ANSI_HILITE + ANSI_BLACK\n case ((2|3), (2|3), (2|3)):\n return ANSI_BACK_WHITE if background else ANSI_NORMAL + ANSI_WHITE\n case ((3|4), (3|4), (3|4)):\n return ANSI_BACK_WHITE if background else ANSI_NORMAL + ANSI_WHITE\n case ((4|5), (4|5), (4|5)):\n return ANSI_BACK_WHITE if background else ANSI_HILITE + ANSI_WHITE\n\n match tuple(_convert_for_ansi(c) for c in rgb):\n # red\n case ((2|3), (0|1), (0|1)):\n return ANSI_BACK_RED if background else ANSI_HILITE + ANSI_RED\n case ((1|2), 0, 0):\n return ANSI_BACK_RED if background else ANSI_NORMAL + ANSI_RED\n # green\n case ((0|1), (2|3), (0|1)):\n return ANSI_BACK_GREEN if background else ANSI_HILITE + ANSI_GREEN\n case ((0 | 1), 1, 0) if green > red:\n return ANSI_BACK_GREEN if background else ANSI_NORMAL + ANSI_GREEN\n # blue\n case ((0|1), (0|1), (2|3)):\n return ANSI_BACK_BLUE if background else ANSI_HILITE + ANSI_BLUE\n case (0, 0, 1):\n return ANSI_BACK_BLUE if background else ANSI_NORMAL + ANSI_BLUE\n # cyan\n case ((0|1|2), (2|3), (2|3)) if red == min(rgb):\n return ANSI_BACK_CYAN if background else ANSI_HILITE + ANSI_CYAN\n case (0, (1|2), (1|2)):\n return ANSI_BACK_CYAN if background else ANSI_NORMAL + ANSI_CYAN\n # yellow\n case ((2|3), (2|3), (0|1|2)) if blue == min(rgb):\n return ANSI_BACK_YELLOW if background else ANSI_HILITE + ANSI_YELLOW\n case ((2|1), (2|1), (0|1)):\n return ANSI_BACK_YELLOW if background else ANSI_NORMAL + ANSI_YELLOW\n # magenta\n case ((2|3), (0|1|2), (2|3)) if green == min(rgb):\n return ANSI_BACK_MAGENTA if background else ANSI_HILITE + ANSI_MAGENTA\n case ((1|2), 0, (1|2)):\n return ANSI_BACK_MAGENTA if background else ANSI_NORMAL + ANSI_MAGENTA",
"def showColors(self):\n\t\tcolors = ['white', 'red', 'green', 'orange', 'blue', 'purple', 'cyan', 'lightgrey',\n\t\t\t\t 'darkgrey', 'light red', 'light green', 'yellow', 'light blue', 'purple', 'cyan', 'dark white']\n\t\tmax = curses.COLORS if curses.COLORS <= 16 else 16\n\t\tself.screen.clear()\n\t\tfor c in range(0, max):\n\t\t\tself.wts(c + 2, 1, \"color \" + str(c) + ' : ' + colors[c], c)\n\t\tself.wts(18, 1, \"color 16 : red on white\", 16)\n\t\tself.wts(20, 1, 'Color demo, displaying ' + str(max) + ' colors + 1 special')\n\t\tself.screen.refresh()\n\t\tch = False\n\t\twhile not ch:\n\t\t\tch = self.screen.getch()\n\t\tself.exit('Color demo complete')",
"def output_screen_diff(output, screen, current_pos, previous_screen=None, last_char=None, accept_or_abort=False, style=None, grayed=False):\n #: Remember the last printed character.\n last_char = [last_char] # nonlocal\n background_turned_on = [False] # Nonlocal\n\n #: Variable for capturing the output.\n write = output.write\n\n def move_cursor(new):\n current_x, current_y = current_pos.x, current_pos.y\n\n if new.y > current_y:\n # Use newlines instead of CURSOR_DOWN, because this meight add new lines.\n # CURSOR_DOWN will never create new lines at the bottom.\n # Also reset attributes, otherwise the newline could draw a\n # background color.\n output.reset_attributes()\n write('\\r\\n' * (new.y - current_y))\n current_x = 0\n output.cursor_forward(new.x)\n last_char[0] = None # Forget last char after resetting attributes.\n return new\n elif new.y < current_y:\n output.cursor_up(current_y - new.y)\n\n if current_x >= screen.size.columns - 1:\n write('\\r')\n output.cursor_forward(new.x)\n elif new.x < current_x or current_x >= screen.size.columns - 1:\n output.cursor_backward(current_x - new.x)\n elif new.x > current_x:\n output.cursor_forward(new.x - current_x)\n\n return new\n\n def get_style_for_token(token, replace_if_grayed=True):\n \"\"\"\n Get style\n \"\"\"\n # If grayed, replace token\n if grayed and replace_if_grayed:\n token = Token.Aborted\n\n try:\n return style.style_for_token(token)\n except KeyError:\n return None\n\n def chars_are_equal(new_char, old_char):\n \"\"\"\n Test whether two `Char` instances are equal if printed.\n \"\"\"\n new_token = Token.Aborted if grayed else new_char.token\n\n # We ignore z-index, that does not matter if things get painted.\n return new_char.char == old_char.char and new_token == old_char.token\n\n def output_char(char):\n \"\"\"\n Write the output of this charact.r\n \"\"\"\n # If the last printed character has the same token, it also has the\n # same style, so we don't output it.\n if last_char[0] and last_char[0].token == char.token:\n write(char.char)\n else:\n style = get_style_for_token(char.token)\n\n if style:\n output.set_attributes(style['color'], style['bgcolor'],\n bold=style.get('bold', False),\n underline=style.get('underline', False))\n\n # If we print something with a background color, remember that.\n background_turned_on[0] = bool(style['bgcolor'])\n else:\n # Reset previous style and output.\n output.reset_attributes()\n\n write(char.char)\n\n last_char[0] = char\n\n\n # Disable autowrap\n if not previous_screen:\n output.disable_autowrap()\n output.reset_attributes()\n\n # When the previous screen has a different size, redraw everything anyway.\n if not previous_screen or previous_screen.size != screen.size:\n current_pos = move_cursor(Point(0, 0))\n output.reset_attributes()\n output.erase_down()\n\n previous_screen = Screen(screen.size)\n\n # Get height of the screen.\n # (current_height changes as we loop over _buffer, so remember the current value.)\n current_height = screen.current_height\n\n # Loop over the rows.\n row_count = max(screen.current_height, previous_screen.current_height)\n c = 0 # Column counter.\n\n for y, r in enumerate(range(0, row_count)):\n new_row = screen._buffer[r]\n previous_row = previous_screen._buffer[r]\n\n new_max_line_len = max(new_row.keys()) if new_row else 0\n previous_max_line_len = max(previous_row.keys()) if previous_row else 0\n\n # Loop over the columns.\n c = 0\n while c < new_max_line_len + 1:\n char_width = (new_row[c].get_width() or 1)\n\n if not chars_are_equal(new_row[c], previous_row[c]):\n current_pos = move_cursor(Point(y=y, x=c))\n output_char(new_row[c])\n current_pos = current_pos._replace(x=current_pos.x + char_width)\n\n c += char_width\n\n # If the new line is shorter, trim it\n if previous_screen and new_max_line_len < previous_max_line_len:\n current_pos = move_cursor(Point(y=y, x=new_max_line_len+1))\n output.reset_attributes()\n output.erase_end_of_line()\n last_char[0] = None # Forget last char after resetting attributes.\n\n # Move cursor:\n if accept_or_abort:\n current_pos = move_cursor(Point(y=current_height, x=0))\n output.erase_down()\n else:\n current_pos = move_cursor(screen.get_cursor_position())\n\n if accept_or_abort:\n output.reset_attributes()\n output.enable_autowrap()\n\n # If the last printed character has a background color, always reset.\n # (Many terminals give weird artifacs on resize events when there is an\n # active background color.)\n if background_turned_on[0]:\n output.reset_attributes()\n last_char[0] = None\n\n return current_pos, last_char[0]",
"def __calc_size(self):\r\n maxyx = self.stdscr.getmaxyx()\r\n self.termwidth = maxyx[1]\r\n self.termheight = maxyx[0]\r\n self.posx = 0\r\n self.posy = 0\r\n self.width = self.termwidth\r\n self.height = self.termheight\r\n self.calc_size()",
"def retrieveColor(image):\n w, h, dim = image.shape\n ret = np.zeros((w, h, dim), dtype=np.uint8)\n for i in range(w):\n for j in range(h):\n ret[i][j] = fakingColors(image[i][j])\n return np.clip(ret, 0, 255)",
"def __init__(self, y, x, height, width):\n\n self.window = curses.newwin(height, width, y, x)\n self.window.keypad(1)\n self.window.scrollok(True)\n self.scrolling = 0\n self.width = width\n self.height = height\n self.y = y\n self.x = x\n self.fg = \"W\"\n self.bg = None",
"def check_image_color(image):\n\n def check_color(i, j, k):\n \"\"\" Function used only for DEBUGGING\"\"\"\n img.show()\n image = Image.new(\"RGB\", (200, 200), (int(Y), int(Y), int(Y)))\n image.show()\n image = Image.new(\"RGB\", (200, 200), (int(i), int(j), int(k)))\n image.show()\n\n if not os.path.isfile(image):\n return \"Image not found\"\n\n def calculate_bgr(data):\n average_color_per_row = numpy.average(data, axis=0)\n average_color = numpy.average(average_color_per_row, axis=0)\n return tuple(average_color)\n\n def calculate_y(r, g, b):\n alpha = 0.299\n betta = 0.587\n gamma = 0.114\n return alpha * r + betta * g + gamma * b\n\n # split the image for four squares calucate averate pixel for them and take higest value\n # blure image and save to /Library/Caches as com.apple.desktop.admin.png\n # in case using blur tool --> blur = cv2.blur(img,(5,5))\n try:\n img_cv_data = cv2.imread(image)\n B, G, R = calculate_bgr(img_cv_data)\n Y = calculate_y(B, G, R)\n height, width = img_cv_data.shape[:2]\n except Exception as err:\n print(f\"[ERROR] {err} with image: {image}\")\n return \"Error parsing image\"\n\n # image detection\n if Y < 72.0:\n _type = \"dark\"\n elif Y >= 73.0 and Y <= 108.0:\n _type = \"evening\"\n else:\n _type = \"light\"\n\n return _type",
"def cut_main_screen(im):\n top = 0\n left = 0\n bottom = 980\n right = 1350\n return im[top:bottom, left:right].copy()",
"def _color(self):\n # get my renderer\n renderer = self.renderer\n # sign on\n yield \"\"\n yield renderer.commentLine(\"color support\")\n\n # sniff the terminal type\n yield renderer.commentLine(\"initialize the TERM environment variable\")\n yield from renderer.setu(name=\"TERM\", value=\"dumb\")\n\n # build a conditional assignment block so we can turn color off on terminals that\n # don't understand ANSI control sequences\n yield from renderer.ifeq(\n op1=renderer.value(var=\"TERM\"),\n op2=renderer.builtin(\n func=\"findstring\",\n args=[renderer.value(var=\"TERM\"), self._ansiTerminals],\n ),\n onTrue=self._ansiCSI(),\n onFalse=self._dumbCSI(),\n )\n\n # render the color database\n # basic colors\n yield \"\"\n yield renderer.commentLine(\"basic colors\")\n yield from renderer.set(\n name=\"palette.normal\", value=renderer.call(func=\"csi3\", args=[\"0\"])\n )\n yield from renderer.set(\n name=\"palette.black\", value=renderer.call(func=\"csi3\", args=[\"0;30\"])\n )\n yield from renderer.set(\n name=\"palette.red\", value=renderer.call(func=\"csi3\", args=[\"0;31\"])\n )\n yield from renderer.set(\n name=\"palette.green\", value=renderer.call(func=\"csi3\", args=[\"0;32\"])\n )\n yield from renderer.set(\n name=\"palette.brown\", value=renderer.call(func=\"csi3\", args=[\"0;33\"])\n )\n yield from renderer.set(\n name=\"palette.blue\", value=renderer.call(func=\"csi3\", args=[\"0;34\"])\n )\n yield from renderer.set(\n name=\"palette.purple\", value=renderer.call(func=\"csi3\", args=[\"0;35\"])\n )\n yield from renderer.set(\n name=\"palette.cyan\", value=renderer.call(func=\"csi3\", args=[\"0;36\"])\n )\n yield from renderer.set(\n name=\"palette.light-gray\", value=renderer.call(func=\"csi3\", args=[\"0;37\"])\n )\n\n # bright colors\n yield \"\"\n yield renderer.commentLine(\"bright colors\")\n yield from renderer.set(\n name=\"palette.dark-gray\", value=renderer.call(func=\"csi3\", args=[\"1;30\"])\n )\n yield from renderer.set(\n name=\"palette.light-red\", value=renderer.call(func=\"csi3\", args=[\"1;31\"])\n )\n yield from renderer.set(\n name=\"palette.light-green\", value=renderer.call(func=\"csi3\", args=[\"1;32\"])\n )\n yield from renderer.set(\n name=\"palette.yellow\", value=renderer.call(func=\"csi3\", args=[\"1;33\"])\n )\n yield from renderer.set(\n name=\"palette.light-blue\", value=renderer.call(func=\"csi3\", args=[\"1;34\"])\n )\n yield from renderer.set(\n name=\"palette.light-purple\", value=renderer.call(func=\"csi3\", args=[\"1;35\"])\n )\n yield from renderer.set(\n name=\"palette.light-cyan\", value=renderer.call(func=\"csi3\", args=[\"1;36\"])\n )\n yield from renderer.set(\n name=\"palette.white\", value=renderer.call(func=\"csi3\", args=[\"1;37\"])\n )\n\n # pretty colors\n yield \"\"\n yield renderer.commentLine(\"pretty colors\")\n yield from renderer.set(\n name=\"palette.amber\",\n value=renderer.call(func=\"csi24\", args=[\"38\", \"255\", \"191\", \"0\"]),\n )\n yield from renderer.set(\n name=\"palette.lavender\",\n value=renderer.call(func=\"csi24\", args=[\"38\", \"192\", \"176\", \"224\"]),\n )\n yield from renderer.set(\n name=\"palette.sage\",\n value=renderer.call(func=\"csi24\", args=[\"38\", \"176\", \"208\", \"176\"]),\n )\n yield from renderer.set(\n name=\"palette.steel-blue\",\n value=renderer.call(func=\"csi24\", args=[\"38\", \"70\", \"130\", \"180\"]),\n )\n\n # diagnostics\n yield \"\"\n yield renderer.commentLine(\"diagnostics\")\n yield from renderer.set(\n name=\"palette.info\", value=renderer.call(func=\"csi8\", args=[\"38\", \"28\"])\n )\n yield from renderer.set(\n name=\"palette.warning\", value=renderer.call(func=\"csi8\", args=[\"38\", \"214\"])\n )\n yield from renderer.set(\n name=\"palette.error\", value=renderer.call(func=\"csi8\", args=[\"38\", \"196\"])\n )\n yield from renderer.set(\n name=\"palette.debug\", value=renderer.call(func=\"csi8\", args=[\"38\", \"75\"])\n )\n yield from renderer.set(\n name=\"palette.firewall\", value=renderer.value(var=\"palette.light-red\")\n )\n\n # the default theme\n yield \"\"\n yield renderer.commentLine(\"the default theme\")\n yield from renderer.set(\n name=\"palette.asset\", value=renderer.value(var=\"palette.steel-blue\")\n )\n yield from renderer.set(\n name=\"palette.action\", value=renderer.value(var=\"palette.lavender\")\n )\n yield from renderer.set(\n name=\"palette.attention\", value=renderer.value(var=\"palette.purple\")\n )\n\n # all done\n return",
"def print_tile(tile: Image.Image):\n width, height = tile.size\n\n pixels = tile.getcolors(width * height)\n\n most_frequent_pixel = pixels[0]\n\n for count, color in pixels:\n if count > most_frequent_pixel[0]:\n most_frequent_pixel = (count, color)\n\n r, g, b = most_frequent_pixel[1]\n\n light = r * 299/1000 + g * 587/1000 + b * 114/1000\n\n char = get_char_from_light(light)\n\n color = get_xterm_color(r, g, b)\n\n print(\"\\u001b[38;5;\" + str(color) + \"m\" + char, end=\"\\033[0m\")",
"def draw_on_image(D):\n \n #Set up rectangle's position within window\n lower_left_x = 20 \n lower_left_y = 42\n dx = 5\n dy = 5\n\n #Display border for rectangle\n #Border is a black rectangle under white text rectangle\n bord_upper_left = (lower_left_x-dx-3, lower_left_y-dy-20-3)\n bord_lower_right = (lower_left_x+dx+160+3, lower_left_y+dy+50+3)\n cv.Rectangle(D.image, bord_upper_left, bord_lower_right, D.black, cv.CV_FILLED)\n \n #Display white rectangle under text\n rect_upper_left = (lower_left_x-dx, lower_left_y-dy-20)\n rect_lower_right = (lower_left_x+dx+160, lower_left_y+dy+50)\n cv.Rectangle(D.image, rect_upper_left, rect_lower_right, D.white, cv.CV_FILLED)\n \n ####\n hive = \"hi!\"\n hiveStat = \"hive\"\n \n #Build Strings\n robotAssignment = (\"Assignment #: %.lf\"%R.assignment)\n robotConverged = (\"Converged: %s\"%R.converged)\n robotStatus = (\"Robot Status: \" + R.status)\n hiveCommand = (\"Hive Command: \" + hive)\n hiveStatus = (\"Hive Status: \" + hiveStat)\n \n # Position strings in a box so they won't overlap\n firstLineString = (lower_left_x,lower_left_y) \n secondLineString = (lower_left_x, lower_left_y + 20) \n thirdLineString = (lower_left_x, lower_left_y + 40)\n fourthLineString = (lower_left_x, lower_left_y + 60)\n fifthLineString = (lower_left_x, lower_left_y + 80)\n\n #Display strings in window\n cv.PutText(D.image, robotAssignment, firstLineString, D.font, cv.RGB(0,0,255)) \n cv.PutText(D.image, robotConverged, secondLineString, D.font, cv.RGB(0,0,255))\n cv.PutText(D.image, robotStatus, thirdLineString, D.font, cv.RGB(0,0,255))\n cv.PutText(D.image, hiveCommand, fourthLineString, D.font, cv.RGB(0,0,255))\n cv.PutText(D.image, hiveStatus, fifthLineString, D.font, cv.RGB(0,0,255))",
"def draw_qr_codes(self, frame):\n for qr in self.codes:\n (x, y, w, h) = qr[\"rect\"]\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)\n cv2.putText(frame, qr[\"text\"], (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)\n\n return frame",
"def createGlyphs(input):\r\n\r\n command_open = subprocess.Popen([\"start\", \"cmd\",\"/k\", 'echo {}'.format(input)], shell = True)\r\n\r\n time.sleep(2) #time for window to appear\r\n\r\n #print win32gui.FindWindow(None, \"C:\\Windows\\system32\\cmd.exe\")\r\n hwnd = win32gui.FindWindow(None, \"C:\\Windows\\system32\\cmd.exe\")\r\n win32gui.SetForegroundWindow(hwnd)\r\n bbox = win32gui.GetWindowRect(hwnd)\r\n img = ImageGrab.grab(bbox)\r\n #img.show()\r\n\r\n length_of_chars = len(input) * 8 # +2 to compensate for the quotation marks\r\n combined_chars = img.crop((10,34,length_of_chars+9,45))\r\n #combined_chars.show()\r\n\r\n #nonetype error was caused by chaining the .show() wiutth the rest of the stuff\r\n\r\n chars = {x:\"\" for x in input}\r\n\r\n for i, each in enumerate(range(8,combined_chars.size[0]+9,8)): #starts from 1, and +8 to compensate for the -8 below VVV\r\n #if i not in bad_indexes: #this is to avoid the first and last double quotation marks\r\n current_char = input[i]\r\n \r\n indiv_char = combined_chars.crop((each-8,0,each,11))\r\n \r\n w, h = indiv_char.size #should be 8 wide by 9 high\r\n\r\n for i2, pixel in enumerate(indiv_char.getdata()): #tuple values can either be (0, 0, 0) or (192,192,192) for the default terminal colours\r\n if pixel == (192,192,192): \r\n chars[current_char] += u\"\\u2588\"\r\n\r\n else:\r\n chars[current_char] += \" \"\r\n\r\n if i2 % w == w-1: # we want it too look decent so overflow is neeeded onto the next lines\r\n # ^^^ before it was i2 % w == 0, but that lead to a trail behind character, so whats before 0? -1! so w-1!!!\r\n chars[current_char] += \"\\n\"\r\n\r\n chars[current_char] = chars[current_char][:-1] #this is to remove the last \"\\n\"\r\n\r\n return chars",
"def render_environment(self):\n board_img = make_img_of_board(*self.hist[-1])\n return board_img",
"def blue_channel(image: Image) -> Image:\n new_image = copy(image)\n # filter the intensities of every component in every pixel.\n for x, y, (r, g, b) in image:\n blue = create_color(0,0,b)\n set_color(new_image, x, y, blue)\n return new_image",
"def choose_colours(self, img):\n def choose_colours_callback(event, x, y, flags, param):\n \"\"\"\n Mouse callback function for choose_colours()\n \"\"\"\n window_name_callback = \"Select colours:\"\n nhood = 10 #neighbourhood to take median\n lim = nhood/2\n \n #if left mouse button clicked, save cursor position\n if event == cv2.EVENT_LBUTTONDOWN:\n #take median colour in nhood*nhood neighbourhood\n l = np.median(img_lab[ y - lim : y + lim+1 , x - lim : x + lim , 0])\n a = np.median(img_lab[ y - lim : y + lim+1 , x - lim : x + lim , 1])\n b = np.median(img_lab[ y - lim : y + lim+1 , x - lim : x + lim , 2])\n selected_cols.append([l, a, b])\n #display 3 largest objects with selected colours in binary image\n tmp, img_bin = self.find_colours(img, [l,a,b], 3)\n cv2.imshow(window_name_callback,img_bin)\n cv2.waitKey(1000) & 0xFF\n cv2.destroyWindow(window_name_callback)\n \n window_name = \"Click on objects to select colours.\"\n selected_cols = []\n \n #convert BGR to LAB\n img_lab = cv2.cvtColor(img,cv2.COLOR_BGR2LAB)\n\n #create window, set callback function\n cv2.namedWindow(window_name)\n cv2.setMouseCallback(window_name,choose_colours_callback)\n \n #while ENTER or BACKSPACE not pressed, wait for mouse clicks\n key = -1\n while key != 13:\n cv2.imshow(window_name,img)\n key = cv2.waitKey(0) & 0xFF\n #if ENTER pressed and colours selected, save colours and exit\n if key==13:\n print(\"Selected colours:\")\n for col in selected_cols:\n print(\" \"+str(col))\n cv2.destroyWindow(window_name)\n if len(selected_cols) == 1:\n return selected_cols[0]\n else:\n return selected_cols\n #if BACKSPACE pressed, delete last entry and continue\n elif key==8:\n del(selected_cols[-1])\n print(\"Removed last entry.\")",
"def process_image(self, img):\n show = False\n # draw_image = np.copy(img)\n # search all scale windows and return cars' windows\n hots = search_scales(img,self.svc, self.X_scaler, self.orient, \n self.pix_per_cell, self.cell_per_block, self.spatial_size, self.hist_bins)\n # update the self boxes\n self.update(hots)\n # detect cars using threshold\n window_image = self.detect(img, 2)\n if show:\n plt.imshow(window_image)\n plt.show()\n return window_image"
] |
[
"0.546428",
"0.5435573",
"0.535255",
"0.53075343",
"0.51861966",
"0.5182019",
"0.50476694",
"0.50233334",
"0.5015882",
"0.49961483",
"0.4940678",
"0.49397632",
"0.49355415",
"0.4922061",
"0.48910037",
"0.48616993",
"0.48419634",
"0.48383147",
"0.48173562",
"0.47996178",
"0.47621155",
"0.47495303",
"0.4731375",
"0.47291687",
"0.47218937",
"0.47119668",
"0.47020477",
"0.46946314",
"0.4684417",
"0.4679841"
] |
0.667852
|
0
|
Use black text in our curses colorpairs
|
def use_black_text(self):
black_foreground = 0
for color in range(curses.COLORS):
curses.init_pair(color, black_foreground, color)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def format_black(c):\n c.run(\"black .\")",
"def prBlueBG(text):\n print(\"\\033[44m{}\\033[0m\".format(text), sep=\"\")",
"def paint(self):\r\n self.win.bkgd(\" \", COLOR_PAIR[\"con_text\"])",
"def charcolor(message):\n try:\n print(c.clear)\n while True:\n print_colored(c.clear + c.multi + \"Hello\" + \" \" + who + \"!\")\n except KeyboardInterrupt:\n exit()",
"def showColors(self):\n\t\tcolors = ['white', 'red', 'green', 'orange', 'blue', 'purple', 'cyan', 'lightgrey',\n\t\t\t\t 'darkgrey', 'light red', 'light green', 'yellow', 'light blue', 'purple', 'cyan', 'dark white']\n\t\tmax = curses.COLORS if curses.COLORS <= 16 else 16\n\t\tself.screen.clear()\n\t\tfor c in range(0, max):\n\t\t\tself.wts(c + 2, 1, \"color \" + str(c) + ' : ' + colors[c], c)\n\t\tself.wts(18, 1, \"color 16 : red on white\", 16)\n\t\tself.wts(20, 1, 'Color demo, displaying ' + str(max) + ' colors + 1 special')\n\t\tself.screen.refresh()\n\t\tch = False\n\t\twhile not ch:\n\t\t\tch = self.screen.getch()\n\t\tself.exit('Color demo complete')",
"def _no_color(text, *a, **kw):\n return text",
"def colorText(s, c):\n\n if not FORMATTING_AVAILABLE:\n return s\n\n HEAD = \"\\033[\"\n TAIL = \"m\"\n\n color = \"39;49\"\n lastDifference = 800\n\n for i in COLORS:\n diff = abs(i[0] - c[0]) + abs(i[1] - c[1]) + abs(i[2] - c[2]) #calculates difference to stock color\n if diff < lastDifference:\n lastDifference = diff #chooses closest match\n color = i[3]\n\n return HEAD+color+TAIL+s+COLOR_RESET #color code + string + reset code",
"def color(color):\n if sys.platform == \"win32\":\n if color == \"green\":\n set_text_attr(FOREGROUND_GREEN | get_text_attr() & 0x0070 | FOREGROUND_INTENSITY)\n elif color == \"yellow\":\n set_text_attr(FOREGROUND_YELLOW | get_text_attr() & 0x0070 | FOREGROUND_INTENSITY)\n elif color == \"red\":\n set_text_attr(FOREGROUND_RED | get_text_attr() & 0x0070 | FOREGROUND_INTENSITY)\n elif color == \"blue\":\n set_text_attr(FOREGROUND_BLUE | get_text_attr() & 0x0070 | FOREGROUND_INTENSITY)\n elif color == \"reset\":\n set_text_attr(FOREGROUND_GREY | get_text_attr() & 0x0070)\n else :\n if color == \"green\":\n sys.stdout.write('\\033[92m')\n elif color == \"red\":\n sys.stdout.write('\\033[91m')\n elif color == \"blue\":\n sys.stdout.write('\\033[94m')\n elif color == \"reset\":\n sys.stdout.write('\\033[0m')",
"def initCurse():\n #On initialise notre fenêtre\n curses.initscr()\n #Couleur active !\n curses.start_color()\n curses.use_default_colors()\n curses.init_pair(1, curses.COLOR_WHITE, -1)\n curses.init_pair(2, curses.COLOR_GREEN, -1)\n curses.init_pair(3, curses.COLOR_RED, -1)\n curses.curs_set(0)",
"def _colorstr(self, args):",
"def test_colorFormatting(self):\n self.assertEqual(irc.parseFormattedText(\"\\x0301yay\\x03\"), A.fg.black[\"yay\"])\n self.assertEqual(\n irc.parseFormattedText(\"\\x0301,02yay\\x03\"), A.fg.black[A.bg.blue[\"yay\"]]\n )\n self.assertEqual(\n irc.parseFormattedText(\"\\x0301yay\\x0302yipee\\x03\"),\n A.fg.black[\"yay\", A.fg.blue[\"yipee\"]],\n )",
"def redtext(mesg):\n if sys.platform == 'win32':\n import win32console\n handle = win32console.GetStdHandle(win32console.STD_OUTPUT_HANDLE)\n reset = handle.GetConsoleScreenBufferInfo()['Attributes']\n handle.SetConsoleTextAttribute(12)\n sys.stdout.writelines(mesg+'\\n')\n handle.SetConsoleTextAttribute(reset)\n else:\n sys.stdout.write('\\033[91m'+mesg+'\\033[0m\\n')",
"def colorize(text, color):\n\n if not supports_color():\n return text\n\n return color + text + Colors.ENDC",
"def colorize(text, color):\n return COLOR_DICT[color] + str(text) + COLOR_DICT['end']",
"def printcolor(color, text):\r\n pushcolor()\r\n setcolor(color)\r\n print text\r\n popcolor()",
"def textColor(colorNumber):\n return '\\033[%dm' % (30 + colorNumber)",
"def test_assembleColor(self):\n self.assertEqual(\n irc.assembleFormattedText(A.fg.red[A.bg.blue[\"hello\"]]),\n \"\\x0f\\x0305,02hello\",\n )",
"def text(self, str: str, x: int, y: int, colour: int, /) -> None:",
"def test_assembleForegroundColor(self):\n self.assertEqual(\n irc.assembleFormattedText(A.fg.blue[\"hello\"]), \"\\x0f\\x0302hello\"\n )",
"def c_prnt(self, text, color):\n if color == 'pink':\n a = self.pink\n elif color == 'blue':\n a = self.blue\n elif color == 'green':\n a = self.green\n elif color == 'dgrn':\n a = self.dgrn\n elif color == 'yel':\n a = self.yellow\n elif color == 'amber':\n a = self.amber\n else:\n raise Exception('The color you selected is not acceptable')\n print(a + text + self.ENDC)",
"def colorful_text(text, color=Fore.RESET):\n return color + text + Fore.RESET",
"def colorText(text, color):\n\tif text is not None and color is not None:\n\t\tif isinstance(color, types.StringTypes):\n\t\t\tcolor = ColorUtil.keyToType(color)\n\t\tif color >= 0:\n\t\t\treturn CyTranslator().changeTextColor(text, color)\n\treturn text",
"def _color(self, args):",
"def colorize(text='', opts=(), **kwargs):\n code_list = []\n if text == '' and len(opts) == 1 and opts[0] == 'reset':\n return '\\x1b[%sm' % RESET\n for k, value in six.iteritems(kwargs):\n if k == 'fg':\n code_list.append(FOREGROUND[value])\n elif k == 'bg':\n code_list.append(BACKGROUND[value])\n for opt in opts:\n if opt in OPT_DICT:\n code_list.append(OPT_DICT[opt])\n if 'noreset' not in opts:\n text = text + '\\x1b[%sm' % RESET\n return ('\\x1b[%sm' % ';'.join(code_list)) + text",
"def Init_curses():\n curses.noecho()\n curses.cbreak()\n curses.curs_set(False)\n stdscr.keypad(True)\n curses.init_pair(1, curses.COLOR_YELLOW, curses.COLOR_BLACK)\n curses.init_pair(2, curses.COLOR_RED, curses.COLOR_BLACK)\n curses.init_pair(3, curses.COLOR_GREEN, curses.COLOR_BLACK)\n curses.init_pair(4, curses.COLOR_BLUE, curses.COLOR_BLACK)",
"def initialize_colors(self) -> None:\n curses.init_pair(ColorPair.black_on_white.value, curses.COLOR_BLACK, curses.COLOR_WHITE)\n curses.init_pair(ColorPair.red_on_black.value, curses.COLOR_RED, curses.COLOR_BLACK)\n curses.init_pair(ColorPair.blue_on_black.value, curses.COLOR_BLUE, curses.COLOR_BLACK)\n curses.init_pair(ColorPair.green_on_black.value, curses.COLOR_GREEN, curses.COLOR_BLACK)\n curses.init_pair(ColorPair.white_on_black.value, curses.COLOR_WHITE, curses.COLOR_BLACK)",
"def setup_colors(self):\n curses.start_color()\n curses.use_default_colors()\n\n if curses.can_change_color(): # Can't get these to work :(\n #curses.init_color(11, 254, 0, 1000)\n pass\n\n # This only works with: TERM=xterm-256color\n curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_WHITE)\n curses.init_pair(2, curses.COLOR_WHITE, curses.COLOR_BLUE)\n curses.init_pair(3, curses.COLOR_RED, curses.COLOR_BLUE)\n curses.init_pair(4, curses.COLOR_WHITE, -1)\n\n # Higlight colors:\n black = curses.COLOR_BLACK\n curses.init_pair(10, -1, -1) # Default (white on black)\n\n curses.init_pair(11, curses.COLOR_BLUE, black)\n curses.init_pair(12, curses.COLOR_CYAN, black)\n curses.init_pair(13, curses.COLOR_GREEN, black)\n curses.init_pair(14, curses.COLOR_MAGENTA, black)\n curses.init_pair(15, curses.COLOR_RED, black)\n curses.init_pair(17, curses.COLOR_YELLOW, black)\n curses.init_pair(16, curses.COLOR_WHITE, black)\n\n # Better colors\n try:\n # TODO: Define RGB forthese to avoid getting\n # different results in different terminals\n curses.init_pair(11, 69, black) # blue\n curses.init_pair(12, 81, black) # cyan\n curses.init_pair(13, 119, black) # green\n curses.init_pair(14, 171, black) # magenta\n curses.init_pair(15, 197, black) # red\n curses.init_pair(17, 221, black) # yellow\n except:\n self.app.logger.log(\"Enhanced colors failed to load.\")",
"def black(cls):\n return cls('black', '000', 'X', 2)",
"def printRed(text):\n print(Fore.RED + text + Fore.WHITE)",
"def color_print(txt, foreground=PALETTE['white'], background=PALETTE['black']):\n print(color_text(txt, foreground, background))"
] |
[
"0.68883044",
"0.65288943",
"0.6435637",
"0.64223605",
"0.6409496",
"0.64047366",
"0.637912",
"0.63355744",
"0.63222665",
"0.6212109",
"0.6198274",
"0.6194393",
"0.618123",
"0.6157293",
"0.6144373",
"0.6131209",
"0.61289185",
"0.6126858",
"0.6119503",
"0.61139125",
"0.6083473",
"0.6073887",
"0.60486454",
"0.60442376",
"0.6030098",
"0.6029015",
"0.6012537",
"0.5992983",
"0.5964639",
"0.59532815"
] |
0.8621288
|
0
|
Takes in a CursesFrame, draws to terminal window
|
def draw(self, curses_frame):
nrows, ncols = self.shape[1], self.shape[0]
for row in range(nrows):
for col in range(ncols):
self.screen.addch(row, col,
curses_frame.characters[row][col],
curses.color_pair(curses_frame.colors[row][col]))
self.screen.refresh()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def draw(self, win):\n self.rect.draw(win)\n self.text.draw(win)",
"def draw(self, screen):",
"def draw(text: list):\n\n curses.wrapper(wrapper, text)",
"def draw():",
"def draw_window(win, bird, pipes, score):\n\twin.blit(bg, (0,0))\n\n\tfor pipe in pipes:\n\t\tpipe.draw(win)\n\n\tbird.draw(win)\n\n\tpygame.display.update()",
"def update_window(self, window, frame):\n self.draw_eyes()\n self.show(window, frame)\n self.new_frame()",
"def __init__(self, y, x, height, width):\n\n self.window = curses.newwin(height, width, y, x)\n self.window.keypad(1)\n self.window.scrollok(True)\n self.scrolling = 0\n self.width = width\n self.height = height\n self.y = y\n self.x = x\n self.fg = \"W\"\n self.bg = None",
"def draw(screen):\n MY.restart_button.draw(screen)\n MY.display_text.draw(screen)",
"def draw(self) -> None:\n assert self.screen is not None\n self.screen.border()\n self.screen.addstr(2, 2, self.title, curses.A_STANDOUT)\n self.screen.addstr(4, 2, self.subtitle, curses.A_BOLD)\n\n for index, item in enumerate(self.all_items):\n self.draw_item(index, item)\n\n self.refresh_screen()\n if self._debug_screens: # pragma: no cover all\n with _SCREENDUMP_DIR.joinpath(f\"{self.title}-{time.time()}\").open(\n \"wb\",\n ) as f:\n self.screen.putwin(f)\n with _SCREENDUMP_DIR.joinpath(\n f\"stdscr-{self.title}-{time.time()}\",\n ).open(\"wb\") as f:\n self.screen.putwin(f)",
"def draw(self):\n ui.clear()\n ui.draw_board(self)\n ui.output_buffer()",
"def runFrame(self):\n self._drawFrame(self._advanceTime())",
"def display(stdscr, event, config):\n\n initializing = True\n height, width = stdscr.getmaxyx()\n hmax = height - 3\n wmax = width - 5\n spinner = ['\\\\', '|', '/', '-']\n spin_index = 0\n spin_len = 4\n try:\n stdscr.nodelay(True)\n curses.curs_set(0)\n curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_WHITE)\n curses.init_pair(2, curses.COLOR_GREEN, curses.COLOR_WHITE)\n curses.init_pair(3, curses.COLOR_RED, curses.COLOR_BLACK)\n curses.init_pair(4, curses.COLOR_WHITE, curses.COLOR_BLACK)\n curses.init_pair(5, curses.COLOR_GREEN, curses.COLOR_BLACK)\n curses.init_pair(6, curses.COLOR_MAGENTA, curses.COLOR_BLACK)\n curses.init_pair(7, curses.COLOR_YELLOW, curses.COLOR_BLACK)\n curses.init_pair(8, curses.COLOR_BLACK, curses.COLOR_BLACK)\n stdscr.bkgd(curses.color_pair(8))\n\n pad = curses.newpad(hmax, wmax)\n last_y = 0\n while True:\n c = stdscr.getch()\n if c == curses.KEY_RESIZE:\n height, width = stdscr.getmaxyx()\n hmax = height - 3\n wmax = width - 5\n pad.resize(hmax, wmax)\n elif c == ord('w'):\n config['global']['ui'] = False\n pad.clear()\n del pad\n curses.endwin()\n return\n if len(job_sets) == 0:\n sleep(1)\n continue\n pad.clrtobot()\n y = 0\n x = 0\n for year_set in job_sets:\n line = 'Year_set {num}: {start} - {end}'.format(\n num=year_set.set_number,\n start=year_set.set_start_year,\n end=year_set.set_end_year)\n #pad.addstr(y, x, line, curses.color_pair(1))\n write_line(pad, line, x, y, curses.color_pair(1))\n pad.clrtoeol()\n y += 1\n # if xy_check(x, y, hmax, wmax) == -1:\n # sleep(1)\n # break\n color_pair = curses.color_pair(4)\n if year_set.status == SetStatus.COMPLETED:\n color_pair = curses.color_pair(5)\n elif year_set.status == SetStatus.FAILED:\n color_pair = curses.color_pair(3)\n elif year_set.status == SetStatus.RUNNING:\n color_pair = curses.color_pair(6)\n line = 'status: {status}'.format(\n status=year_set.status)\n #pad.addstr(y, x, line, color_pair)\n write_line(pad, line, x, y, color_pair)\n if initializing:\n sleep(0.01)\n pad.refresh(0, 0, 3, 5, hmax, wmax)\n pad.clrtoeol()\n y += 1\n # if xy_check(x, y, hmax, wmax) == -1:\n # sleep(1)\n # break\n # if y >= (hmax/3):\n # last_y = y\n # y = 0\n # x += (wmax/2)\n # if x >= wmax:\n # break\n if year_set.status == SetStatus.COMPLETED \\\n or year_set.status == SetStatus.NO_DATA \\\n or year_set.status == SetStatus.PARTIAL_DATA:\n continue\n for job in year_set.jobs:\n line = ' > {type} -- {id} '.format(\n type=job.get_type(),\n id=job.job_id)\n # pad.addstr(y, x, line, curses.color_pair(4))\n write_line(pad, line, x, y, curses.color_pair(4))\n color_pair = curses.color_pair(4)\n if job.status == JobStatus.COMPLETED:\n color_pair = curses.color_pair(5)\n elif job.status in [JobStatus.FAILED, 'CANCELED', JobStatus.INVALID]:\n color_pair = curses.color_pair(3)\n elif job.status == JobStatus.RUNNING:\n color_pair = curses.color_pair(6)\n elif job.status == JobStatus.SUBMITTED or job.status == JobStatus.PENDING:\n color_pair = curses.color_pair(7)\n line = '{status}'.format(status=job.status)\n pad.addstr(line, color_pair)\n pad.clrtoeol()\n if initializing:\n sleep(0.01)\n pad.refresh(0, 0, 3, 5, hmax, wmax)\n y += 1\n # if y >= (hmax/3):\n # last_y = y\n # y = 0\n # x += (wmax/2)\n # if x >= wmax:\n # break\n\n x = 0\n if last_y:\n y = last_y\n # pad.refresh(0, 0, 3, 5, hmax, wmax)\n pad.clrtobot()\n y += 1\n # if xy_check(x, y, hmax, wmax) == -1:\n # sleep(1)\n # continue\n for line in event_list[-10:]:\n if 'Transfer' in line:\n continue\n if 'hosted' in line:\n continue\n if 'failed' in line or 'FAILED' in line:\n prefix = '[-] '\n pad.addstr(y, x, prefix, curses.color_pair(3))\n else:\n prefix = '[+] '\n pad.addstr(y, x, prefix, curses.color_pair(5))\n pad.addstr(line, curses.color_pair(4))\n pad.clrtoeol()\n if initializing:\n sleep(0.01)\n pad.refresh(0, 0, 3, 5, hmax, wmax)\n #pad.refresh(0, 0, 3, 5, hmax, wmax)\n y += 1\n if xy_check(x, y, hmax, wmax) == -1:\n sleep(1)\n break\n pad.clrtobot()\n y += 1\n if xy_check(x, y, hmax, wmax) == -1:\n sleep(1)\n continue\n\n file_start_y = y\n file_end_y = y\n file_display_list = []\n current_year = 1\n year_ready = True\n partial_data = False\n # for line in sorted(file_list, cmp=file_list_cmp):\n # index = line.find('-')\n # year = int(line[:index])\n # month = int(line[index + 1:])\n # if month == 1:\n # year_ready = True\n # partial_data = False\n # if file_list[line] != SetStatus.DATA_READY:\n # year_ready = False\n # else:\n # partial_data = True\n # if month == 12:\n # if year_ready:\n # status = SetStatus.DATA_READY\n # else:\n # if partial_data:\n # status = SetStatus.PARTIAL_DATA\n # else:\n # status = SetStatus.NO_DATA\n # file_display_list.append('Year {year} - {status}'.format(\n # year=year,\n # status=status))\n\n # line_length = len(file_display_list[0])\n # num_cols = wmax/line_length\n # for line in file_display_list:\n # if x + len(line) >= wmax:\n # diff = wmax - (x + len(line))\n # line = line[:diff]\n # pad.addstr(y, x, line, curses.color_pair(4))\n # pad.clrtoeol()\n # y += 1\n # if y >= (hmax-10):\n # y = file_start_y\n # x += line_length + 5\n # if x >= wmax:\n # break\n # if y > file_end_y:\n # file_end_y = y\n\n y = file_end_y + 1\n x = 0\n msg = 'Active transfers: {}'.format(active_transfers)\n pad.addstr(y, x, msg, curses.color_pair(4))\n pad.clrtoeol()\n if active_transfers:\n for line in event_list:\n if 'Transfer' in line:\n index = line.find('%')\n if index:\n s_index = line.rfind(' ', 0, index)\n percent = float(line[s_index: index])\n if percent < 100:\n y += 1\n pad.addstr(y, x, line, curses.color_pair(4))\n pad.clrtoeol()\n for line in event_list:\n if 'hosted' in line:\n y += 1\n pad.addstr(y, x, line, curses.color_pair(4))\n spin_line = spinner[spin_index]\n spin_index += 1\n if spin_index == spin_len:\n spin_index = 0\n y += 1\n pad.addstr(y, x, spin_line, curses.color_pair(4))\n pad.clrtoeol()\n pad.clrtobot()\n y += 1\n if event and event.is_set():\n return\n pad.refresh(0, 0, 3, 5, hmax, wmax)\n initializing = False\n sleep(1)\n\n except KeyboardInterrupt as e:\n raise",
"def render_frame(self, image):\n arr = np.array(image.resize(self.curses_shape))\n characters = self.character_transformer.map_pixels_to_characters(arr)\n colors = self.color_transformer.nearest_neighbors(arr)\n return CursesFrame(characters, colors)",
"def display():\n screen.addch(head[0],head[1],'x')",
"def draw() -> None:\n if not WINDOW:\n return\n imgui.new_frame()\n ACTIVE_CALLBACK()\n WINDOW.clear()\n imgui.render()\n cast(PygletRenderer, IMPL).render(imgui.get_draw_data())",
"def loop(self, frame):\n self.root = frame\n self.drawUI()\n cv2.imshow('Fotopasca', self.root)",
"def _redraw(self):\n\n\t\tif not self._has_gui:\n\t\t\treturn\n\n\t\trows, _ = os.popen('stty size', 'r').read().split()\n\t\tself._gui_size = int(rows)\n\n\t\tif self._gui_output is None or len(self._gui_output) != self._gui_size:\n\t\t\tself._gui_output = [[\"\" for _ in range(0, self._gui_size)] for _ in range(0, self._gui_size)]\n\n\t\tself._update_output()\n\n\t\tos.system(\"clear\")\n\n\t\t# pylint: disable-msg=C0103; (Invalid variable names x, y)\n\t\t# We draw from top left (0,49) to bottom right (49,0)\n\t\tfor y in range(self._gui_size - 1, -1, -1):\n\t\t\tline_output = \"\"\n\t\t\tfor x in range(0, self._gui_size):\n\t\t\t\tline_output += self._gui_output[x][y] + \" \"\n\t\t\tprint(line_output)",
"def done_paint(self):\r\n curses.panel.update_panels()\r\n curses.doupdate()",
"def display_frame(self, frame=None):\n if frame is None:\n frame = self.get_frame()\n cv2.namedWindow('frame')\n cv2.imshow('frame', frame)\n cv2.waitKey(0)",
"def curses_loop(stdscr):\r\n\r\n # this function may under no circumstancs raise an exception, so I'm\r\n # wrapping everything into try/except (should actually never happen\r\n # anyways but when it happens during coding or debugging it would\r\n # leave the terminal in an unusable state and this must be avoded).\r\n # We have a list debug_tb[] where we can append tracebacks and\r\n # after curses uninitialized properly and the terminal is restored\r\n # we can print them.\r\n try:\r\n init_colors()\r\n gox = goxapi.Gox(secret, config)\r\n\r\n logwriter = LogWriter(gox)\r\n printhook = PrintHook(gox)\r\n\r\n conwin = WinConsole(stdscr, gox)\r\n bookwin = WinOrderBook(stdscr, gox)\r\n statuswin = WinStatus(stdscr, gox)\r\n chartwin = WinChart(stdscr, gox)\r\n\r\n strategy_manager = StrategyManager(gox, strat_mod_list)\r\n\r\n gox.start()\r\n while True:\r\n key = stdscr.getch()\r\n if key == ord(\"q\"):\r\n break\r\n elif key == curses.KEY_F4:\r\n DlgNewOrderBid(stdscr, gox).modal()\r\n elif key == curses.KEY_F5:\r\n DlgNewOrderAsk(stdscr, gox).modal()\r\n elif key == curses.KEY_F6:\r\n DlgCancelOrders(stdscr, gox).modal()\r\n elif key == curses.KEY_RESIZE:\r\n # pylint: disable=W0212\r\n with goxapi.Signal._lock:\r\n stdscr.erase()\r\n stdscr.refresh()\r\n conwin.resize()\r\n bookwin.resize()\r\n chartwin.resize()\r\n statuswin.resize()\r\n elif key == ord(\"l\"):\r\n strategy_manager.reload()\r\n\r\n # which chart to show on the right side\r\n elif key == ord(\"H\"):\r\n set_ini(gox, \"display_right\", \"history_chart\",\r\n gox.history.signal_changed, gox.history, None)\r\n elif key == ord(\"D\"):\r\n set_ini(gox, \"display_right\", \"depth_chart\",\r\n gox.orderbook.signal_changed, gox.orderbook, None)\r\n\r\n # depth chart step\r\n elif key == ord(\",\"): # zoom out\r\n toggle_depth_group(gox, +1)\r\n elif key == ord(\".\"): # zoom in\r\n toggle_depth_group(gox, -1)\r\n\r\n # orderbook grouping step\r\n elif key == ord(\"-\"): # zoom out (larger step)\r\n toggle_orderbook_group(gox, +1)\r\n elif key == ord(\"+\"): # zoom in (smaller step)\r\n toggle_orderbook_group(gox, -1)\r\n\r\n elif key == ord(\"S\"):\r\n toggle_orderbook_sum(gox)\r\n\r\n elif key == ord(\"T\"):\r\n toggle_depth_sum(gox)\r\n\r\n # lowercase keys go to the strategy module\r\n elif key >= ord(\"a\") and key <= ord(\"z\"):\r\n gox.signal_keypress(gox, (key))\r\n else:\r\n gox.debug(\"key pressed: key=%i\" % key)\r\n\r\n except KeyboardInterrupt:\r\n # Ctrl+C has been pressed\r\n pass\r\n\r\n except Exception:\r\n debug_tb.append(traceback.format_exc())\r\n\r\n # we are here because shutdown was requested.\r\n #\r\n # Before we do anything we dump stacktraces of all currently running\r\n # threads to a separate logfile because this helps debugging freezes\r\n # and deadlocks that might occur if things went totally wrong.\r\n with open(\"goxtool.stacktrace.log\", \"w\") as stacklog:\r\n stacklog.write(dump_all_stacks())\r\n\r\n # we need the signal lock to be able to shut down. And we cannot\r\n # wait for any frozen slot to return, so try really hard to get\r\n # the lock and if that fails then unlock it forcefully.\r\n try_get_lock_or_break_open()\r\n\r\n # Now trying to shutdown everything in an orderly manner.it in the\r\n # Since we are still inside curses but we don't know whether\r\n # the printhook or the logwriter was initialized properly already\r\n # or whether it crashed earlier we cannot print here and we also\r\n # cannot log, so we put all tracebacks into the debug_tb list to\r\n # print them later once the terminal is properly restored again.\r\n try:\r\n strategy_manager.unload()\r\n except Exception:\r\n debug_tb.append(traceback.format_exc())\r\n\r\n try:\r\n gox.stop()\r\n except Exception:\r\n debug_tb.append(traceback.format_exc())\r\n\r\n try:\r\n printhook.close()\r\n except Exception:\r\n debug_tb.append(traceback.format_exc())\r\n\r\n try:\r\n logwriter.close()\r\n except Exception:\r\n debug_tb.append(traceback.format_exc())\r\n\r\n # curses_loop() ends here, we must reach this point under all circumstances.\r\n # Now curses will restore the terminal back to cooked (normal) mode.\r",
"def render(self, frame: Frame):\n\n cv2.imshow(winname=self.title, mat=frame)\n cv2.waitKey(delay=self.delay)\n\n if self.step:\n while cv2.waitKey(delay=0) != self.step_key:\n continue",
"def draw_menu(self, screen: curses.window) -> None:\n screen.clear()\n sh, sw = screen.getmaxyx()\n self.draw_title_window(screen, 3, sw, 0, 0)\n\n bottom_win_height = sh - 2\n output_win_width = sw // 2 + 25\n input_win_width = sw - output_win_width + 1\n\n self.draw_output_window(screen, bottom_win_height - 1, output_win_width, 2, 0)\n self.draw_input_window(screen, bottom_win_height - 1, input_win_width, 2, output_win_width - 1)\n\n self.draw_status_bar(screen)",
"def draw():\n screen.fill((0, 0, 0))\n alien.draw()",
"def outline(self, x, y, width=None, height=None, char=None,\n fg=(255, 255, 255), bg=None):\n self.console.draw_frame(x, y, width, height, char, fg, bg)",
"def __init__(self, stdscr):\r\n self.stdscr = stdscr\r\n self.posx = 0\r\n self.posy = 0\r\n self.width = 10\r\n self.height = 10\r\n self.termwidth = 10\r\n self.termheight = 10\r\n self.win = None\r\n self.panel = None\r\n self.__create_win()",
"def draw_frame(board,board_y,board_x):\n\t# drawing boards frames\n\tfor frame_x in range(0,board_x):\n\t\tboard.addch(0,frame_x,'-')\n\t\tboard.addch(pad_y,frame_x,'-')\n\n\tfor frame_y in range(0,board_y):\n\t\tboard.addch(frame_y,0,'|')\n\t\tboard.addch(frame_y,pad_x,'|')\n\n\t# making the edges prettier\n\tfor edge in [[0,0],[board_y,0],[0,pad_x],[pad_y,pad_x]]:\n\t\tboard.addch(edge[0],edge[1],'+')",
"def init():\n unicornhathd.rotation(270)\n unicornhathd.brightness(1.0)\n\n stdscr = curses.initscr()\n curses.cbreak()\n curses.noecho()\n stdscr.nodelay(1)\n stdscr.keypad(1)\n\n return stdscr",
"def redraw_window(win, board, playtime, strikes):\n win.fill((250, 250, 250))\n\n # Display time\n font = pygame.font.SysFont(\"georgia\", 30)\n txt = font.render(\"Time Elapsed: \" + str(format_time(playtime)), 1, (0, 0, 0))\n win.blit(txt, (540 - 300, 560))\n\n # Display strikes\n txt = font.render(\"X \" * strikes, 1, (255, 0, 0))\n win.blit(txt, (10, 560))\n\n # Draw grid lines and board\n board.draw(win)",
"def draw_output_window(self, screen: curses.window, height: int, width: int, y: int, x: int) -> None:\n output_win = screen.subwin(height, width, y, x)\n output_win.border(0, 0, 0, 0, curses.ACS_SSSB)\n\n if self.comic_results and self.comic_results.comics:\n comic = self.comic_results.comics[self.comic_results_index]\n page_id = comic.page\n title = comic.title\n alt_text = comic.script\n comic_url = comic.comic_url\n image_url = comic.image_url\n comic_url_text = \"Comic URL:\"\n image_url_text = \"Image URL:\"\n output_win.addstr(4, 1, comic_url_text)\n output_win.addstr(5, 1, image_url_text)\n\n if len(image_url) + len(image_url_text) + 1 >= width:\n image_url = image_url[:len(image_url) + len(image_url_text) - 5]\n image_url += \"...\"\n\n if len(comic_url) + len(comic_url_text) + 1 >= width:\n image_url = image_url[:len(comic_url) + len(comic_url_text) - 10]\n image_url += \"...\"\n\n add_str_color(output_win, 4, len(comic_url_text) + 2, comic_url, ColorPair.green_on_black)\n add_str_color(output_win, 5, len(image_url_text) + 2, image_url, ColorPair.green_on_black)\n self.comic_url_button.is_enabled = True\n self.image_url_button.is_enabled = True\n self.comic_url_button.url = comic_url\n self.image_url_button.url = image_url\n self.comic_url_button.set_bounding_box(\n 6,\n len(comic_url_text) + 2,\n 6,\n len(comic_url_text) + len(comic_url) + 2\n )\n self.image_url_button.set_bounding_box(\n 7,\n len(image_url_text) + 2,\n 7,\n len(image_url_text) + len(image_url) + 2\n )\n\n show_image_button_text = self.show_image_button.get_current_option()\n add_str_color(output_win, 6, 1, show_image_button_text, ColorPair.green_on_black)\n self.show_image_button.is_enabled = True\n self.show_image_button.set_bounding_box(\n 8,\n 1,\n 8,\n len(show_image_button_text)\n )\n\n open_folder_text = \"Open Folder\"\n open_folder_y = 1\n open_folder_x = width - len(open_folder_text) - 2\n add_str_color(output_win, open_folder_y, open_folder_x, open_folder_text, ColorPair.green_on_black)\n self.open_folder_button.is_enabled = True\n self.open_folder_button.set_bounding_box(\n open_folder_y + 2,\n open_folder_x,\n open_folder_y + 2,\n open_folder_x + len(open_folder_text)\n )\n\n if show_image_button_text == \"Hide Image\":\n image_path = comic.image_path\n ascii_img = img2text.img_to_ascii(image_path, width=width - 2, reverse=True)\n lines = ascii_img.split('\\n')\n for i, line in enumerate(lines):\n if i > height - 12:\n break\n output_win.addstr(i + 8, 1, line)\n else:\n self.comic_url_button.is_enabled = False\n self.image_url_button.is_enabled = False\n self.show_image_button.is_enabled = False\n self.open_folder_button.is_enabled = False\n page_id = \"N/A\"\n title = \"N/A\"\n alt_text = \"N/A\"\n output_win.addstr(4, 1, \"Comic URL: N/A\")\n output_win.addstr(5, 1, \"Image URL: N/A\")\n output_win.addstr(1, 1, f\"ID: {page_id}\")\n output_win.addstr(2, 1, f\"Title: {title}\")\n if len(alt_text) + len(\"Alt Text:\") + 1 >= width:\n alt_text = alt_text[:width - len(\"Alt Text:\") - 6]\n alt_text += \"...\"\n output_win.addstr(3, 1, f\"Alt Text: {alt_text}\")\n\n if self.comic_results and self.comic_results.comics:\n num_results = len(self.comic_results.comics)\n num_results_text = f\"{self.comic_results_index + 1}/{num_results} Results\"\n else:\n num_results_text = \"0/0 Results\"\n\n output_win.addstr(height - 2, width // 2 - len(num_results_text) // 2, num_results_text)\n\n next_text = \"Next\"\n back_text = \"Back\"\n next_x = width - len(next_text) - 1\n back_x = 1\n add_str_color(output_win, height - 2, next_x, next_text, ColorPair.black_on_white)\n add_str_color(output_win, height - 2, back_x, \"Back\", ColorPair.black_on_white)\n\n self.next_button.set_bounding_box(\n height,\n next_x,\n height,\n next_x + len(next_text)\n )\n self.back_button.set_bounding_box(\n height,\n back_x,\n height,\n back_x + len(back_text)\n )",
"def render(self):\n\t\tself._getSize()\n\t\tself.screen.clear()\n\t\tif self.width < 60 or self.height < 20:\n\t\t\tself.wts(1, 1, \"Windows too small to render!\" , 1)\n\t\telse:\n\t\t\t# check if resized\n\t\t\tif curses.is_term_resized(self.height, self.width):\n\t\t\t\tcurses.resizeterm(self.height, self.width)\n\t\t\t\tself._getSize()\n\t\t\t# render border\n\t\t\tif self.screenBorder:\n\t\t\t\tself.drawBorder()\n\t\t\t# render lines\n\t\t\tself.drawLines()\n\t\t\t# render status\n\t\t\tself.wts(self.height - 1, 1, self.status , 1)\n\t\t\t# render objects\n\t\t\tself.drawObjects(self.objects)\n\t\t\t# render menus\n\t\t\tself.drawObjects(self.menus)\n\t\tself.screen.refresh()"
] |
[
"0.6687068",
"0.65805525",
"0.6468488",
"0.6441222",
"0.64168227",
"0.6387608",
"0.63720363",
"0.63700324",
"0.632748",
"0.63065773",
"0.6263605",
"0.61770093",
"0.61756444",
"0.61599934",
"0.61589754",
"0.6118374",
"0.60708827",
"0.6054496",
"0.60516113",
"0.6051077",
"0.6009887",
"0.599775",
"0.5990748",
"0.5987487",
"0.5983493",
"0.59738266",
"0.5970037",
"0.59604084",
"0.5954743",
"0.5943634"
] |
0.78547007
|
0
|
Run 'git up' w/o remotes
|
def test_no_remotes():
os.chdir(master_path)
from PyGitUp.gitup import GitUp
with pytest.raises(GitError):
GitUp(testing=True)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def flush_repo():\n server = get_server()\n run(\"rm -rf %(project_name)s\" % env)\n git.clone()\n server.setup()",
"def update():\n call('git -C ~/norminette+ pull', shell=True)",
"def test_out_of_tree():\n os.chdir(work_tree)\n\n from PyGitUp.gitup import GitUp\n gitup = GitUp(testing=True)\n gitup.run()\n\n assert gitup.states == ['fast-forwarding']",
"def up(\n context,\n user=get_local_user(),\n remote=False,\n instance=None,\n stack=None,\n services=None,\n):\n command = \"up --build\"\n\n if remote:\n command = f\"{command} --detach\"\n\n run_command_with_services(context, user, remote, instance, stack, command, services)",
"def git_pull():\n\n puts(yellow(\"Pull master from GitHub\"))\n with cd(env.source_dir):\n run('git reset --hard HEAD')\n run('git pull')",
"def git_available():\n null = open(\"/dev/null\", \"w\")\n subprocess.Popen(\"git\", stdout=null, stderr=null)\n null.close()",
"def update_code_from_git():\n if not files.exists(REMOTE_REPO_DIR):\n with cd(HOME_DIR):\n run(\"git clone %s\" % MAIN_GITHUB_REP )\n with cd(REMOTE_REPO_DIR):\n run(\"git pull\")",
"def push_updates():\n check_call(['git', 'push', '--tags', '--force'])",
"def clean(self):\n self.run(['git', 'reset', '--hard', 'HEAD'])\n self.run(['git', 'clean', '-fdx'])\n self.run(['git', 'checkout', 'origin/master'])",
"def git_config_setup():\n\n local('git config user.email $GIT_EMAIL')\n local('git config user.name $GIT_NAME')\n\n local(\n 'git remote set-url --push origin '\n 'https://[email protected]/$TRAVIS_REPO_SLUG.git'\n )",
"def d_ploy():\n\tlocal(\"git push origin --all\")\n\twith cd(LIVE_ROOT):\n\t\trun(\"git pull\")",
"def git():\n pass",
"def git_update(c):\n c.run('git submodule update --recursive --remote')",
"def git_server():\n log('Instalando git', yellow)\n sudo('apt-get -y install git')",
"def pull():\n _with_deploy_env(['git pull'])",
"def dev_up():\n _with_deploy_env(['./bin/develop up'])",
"def d_test():\n\tlocal(\"git push origin --all\")\n\twith cd(TEST_ROOT):\n\t\trun(\"git pull\")",
"def git_push(c):\n c.run(\"git submodule foreach git push \")",
"def setUp(self):\n self.tempdir = tempfile.mkdtemp()\n self.repo = os.path.join(self.tempdir, 'throwaway')\n os.mkdir(self.repo)\n cmd = (\n \"cd {} && git init && \"\n \"git checkout -b master && \"\n \"git commit --allow-empty -m \\\"new repo\\\" \"\n ).format(self.repo)\n os.system(cmd)",
"def prepare_deploy():\n from fabdeploy.django import test as django_test\n django_test()\n git.add_commit_pull()\n git.push()",
"def pull_changes():\n\n check_prompt = (\n not env.prompt or\n console.confirm(\n \"Switch to appropriate branch and pull changes from upstream?\",\n default=True,\n )\n )\n\n if check_prompt:\n with cd(env.repo_path):\n run(\"git checkout %s\" % env.branch)\n run(\"git pull\")",
"def clean_repo(c):\n c.run('git clean -ffdx')\n c.run('git reset --hard')",
"def clone(c):\n\n for p in get_config().get('packages', []):\n try:\n c.run(f\"git clone {p}\")\n except UnexpectedExit as e:\n pass",
"def reset_repository(self):\n # Remove index lock just in case.\n lock_file = f\"{self.repo.working_tree_dir}/.git/index.lock\"\n try:\n os.remove(lock_file)\n logging.info(f\"removed {lock_file}\")\n except FileNotFoundError:\n logging.info(f\"{lock_file} does not exist\")\n logging.info('Syncing local, origin and upstream...')\n if 'upstream' not in self.repo.remotes:\n self.repo.create_remote('upstream', url=LLVM_GITHUB_URL)\n self.repo.remotes.upstream.fetch()\n self.repo.git.clean('-ffxdq')\n self.repo.git.reset('--hard')\n self.repo.git.fetch('--all')\n if self.find_commit('main') is None:\n origin = self.repo.remotes.origin\n self.repo.create_head('main', origin.refs.main)\n self.repo.heads.main.set_tracking_branch(origin.refs.main)\n self.repo.heads.main.checkout()\n self.repo.git.pull('origin', 'main')\n self.repo.git.pull('upstream', 'main')\n if self.push_branch:\n self.repo.git.push('origin', 'main')",
"def git_fix_detached(c, message):\n c.run('git submodule update')\n c.run('git submodule foreach git checkout master')\n c.run('git submodule foreach git pull origin master')",
"def update_code_from_git():\n if not files.exists(CODE_DIR):\n with cd(HOME_DIR):\n run(\"git clone %s\" % MAIN_GITHUB_REP )\n\n with cd(CODE_DIR):\n git_pull()",
"def hard_reset_branches(args):\n checkout_branches(args)\n man = load_manifest()\n for (name, project) in man.projects.iteritems():\n print >>sys.stderr, \"Hard resetting tracking branch in project: %s\" % name\n repo = GitRepo(workdir_for_project(project))\n repo.check_command([\"reset\", \"--hard\", project.remote_refspec])",
"def setup(c):\n files.directory(conn, utils.join(SALT_DEPLOY_PATH, utils.DEPLOY_REPO_DIR))\n files.directory(conn, utils.join(SALT_DEPLOY_PATH, utils.DEPLOY_RELEASES_DIR))\n\n with conn.cd(utils.join(SALT_DEPLOY_PATH, utils.DEPLOY_REPO_DIR)):\n if not files.exists(conn, \"HEAD\"):\n conn.run(f\"git clone --mirror --depth 1 --no-single-branch {SALT_REPO} .\")\n\n conn.run(f\"git remote set-url origin {SALT_REPO}\")\n conn.run(f\"git fetch --depth 1 origin {SALT_BRANCH}\")",
"def push():\n files = []\n for i in sp.check_output([\"git\", \"status\"]).decode().split(\"\\n\"):\n nf = \"#\\tnew file:\"\n mf = \"#\\tmodified:\"\n\t# Should have a deleted-files option here too.\n if i[: len(nf)] == nf or i[: len(mf)] == mf:\n f = i.split(\" \")[-1]\n files.append(f)\n files = list(set(files)) # Remove duplicates\n\n print(\"Committing these files: {}\".format(files))\n\n # Run all py scripts through black for formatting.\n# for f in files:\n# if f[-3:] == \".py\":\n# sp.call([\"black\", f])\n\n [sp.call([\"git\", \"add\", \"{}\".format(i)]) for i in files]\n\n commit_message = str(input(\"Enter commit message:\\n\"))\n commit_message = \"Updated\" if commit_message == \"\" else commit_message\n print(\"Committing with commit message of: {}\\n\\n\".format(commit_message))\n sp.call([\"git\", \"commit\", \"-m\", \"{}\".format(commit_message)])\n sp.call([\"git\", \"push\"])",
"def need_to_install_git(args, git_directory):\n if args.force:\n return True\n git_exe_path = os.path.join(git_directory, 'bin', 'git.exe')\n if not os.path.exists(git_exe_path):\n return True\n if subprocess.call(\n [git_exe_path, '--version'],\n stdout=DEVNULL, stderr=DEVNULL) != 0:\n return True\n for script in ('git.bat', 'gitk.bat', 'ssh.bat', 'ssh-keygen.bat',\n 'git-bash'):\n full_path = os.path.join(ROOT_DIR, script)\n if not os.path.exists(full_path):\n return True\n with open(full_path) as f:\n if os.path.relpath(git_directory, ROOT_DIR) not in f.read():\n return True\n if not os.path.exists(os.path.join(\n git_directory, 'etc', 'profile.d', 'python.sh')):\n return True\n return False"
] |
[
"0.65749276",
"0.631371",
"0.60905033",
"0.60868317",
"0.6072795",
"0.6063428",
"0.6059339",
"0.60275495",
"0.60171896",
"0.59809583",
"0.59625286",
"0.594102",
"0.5918649",
"0.59147465",
"0.5892201",
"0.5878971",
"0.58600026",
"0.58579963",
"0.58295166",
"0.5828949",
"0.5806608",
"0.580582",
"0.5743399",
"0.5728281",
"0.5711246",
"0.570522",
"0.5686919",
"0.5663669",
"0.5654039",
"0.56059086"
] |
0.696725
|
0
|
Python function wrapping c++ dcpagerank function. Computes the pagerank on the double cover of a graph.
|
def dcpagerank_weighted_cpp(n, ai, aj, a, alpha, eps, seedids, maxsteps, simplify=True, xlength=10**7):
# Find the appropriate types and the function to call
float_type, vtype, itype, ctypes_vtype, ctypes_itype, fun = _get_dcpagerank_weighted_cpp_types_fun(ai, aj)
# Set up the parameters for the function call, including making sure their types are correct.
nseedids = len(seedids)
seedids = np.array(seedids, dtype=vtype)
xids_1 = np.zeros(xlength, dtype=vtype)
xids_2 = np.zeros(xlength, dtype=vtype)
values_1 = np.zeros(xlength, dtype=float_type)
values_2 = np.zeros(xlength, dtype=float_type)
# Set the array offset. In python, this is 0.
offset = 0
# Call the c++ function to compute the double cover pagerank.
actual_length = fun(n, ai, aj, a, offset, alpha, eps, seedids, nseedids, maxsteps, xids_1, xids_2, xlength,
values_1, values_2, simplify)
# If the actual output is longer than we expected, we will need to run the algorithm again to ensure we get the
# correct output.
if actual_length > xlength:
warnings.warn("Running pagerank for a second time. The xlength parameter was not long enough.")
# Re-initialise the output vectors
xlength = actual_length
xids_1 = np.zeros(xlength, dtype=vtype)
xids_2 = np.zeros(xlength, dtype=vtype)
values_1 = np.zeros(xlength, dtype=float_type)
values_2 = np.zeros(xlength, dtype=float_type)
# Call the pagerank method again with more memory allocated.
actual_length = fun(n, ai, aj, a, offset, alpha, eps,
seedids, nseedids, maxsteps, xids_1, xids_2, xlength, values_1, values_2, simplify)
actual_values_1 = values_1[0:actual_length]
actual_values_2 = values_2[0:actual_length]
actual_xids_1 = xids_1[0:actual_length]
actual_xids_2 = xids_2[0:actual_length]
# Since actual_length is the length of the longest of xids_1 and xids_2, check whether the actual value is 0, and
# if so, ignore it.
num_zeros_1 = 0
len_1 = len(actual_values_1)
len_2 = len(actual_values_2)
while len_1 > 0 and actual_values_1[-1] == 0:
num_zeros_1 += 1
actual_values_1 = actual_values_1[:-1]
len_1 -= 1
num_zeros_2 = 0
while len_2 > 0 and actual_values_2[-1] == 0:
num_zeros_2 += 1
actual_values_2 = actual_values_2[:-1]
len_2 -= 1
if num_zeros_1 > 0:
actual_xids_1 = actual_xids_1[:-num_zeros_1]
if num_zeros_2 > 0:
actual_xids_2 = actual_xids_2[:-num_zeros_2]
return actual_xids_1, actual_xids_2, actual_values_1, actual_values_2
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def pagerank(matrix, bias, d=0.85):\n n = matrix.shape[0]\n rank = 0\n new_rank = np.array([1.0 / n] * n)\n for i in range(0,200):\n print \"iteration: \"+str(i)\n rank = new_rank\n new_rank = np.array([(1.0-d)/n] * n) + d * np.dot(matrix, rank)\n# new_rank = (1.0-d) * bias + d * np.dot(matrix, rank)\n # new_rank = [(((1.0-d) / n) +\n # d * sum((rank[i] * link) for i, link in enumerate(row)))\n # for row in matrix]\n if(has_converged(rank, new_rank)):\n break\n return new_rank",
"def plot_pagerank(net, label, outpath):\n _, pagerank_values = networkit_util.get_pagerank(net, label, outpath)\n unique_value, unique_cnt = np.unique(pagerank_values, return_counts=True)\n unique_cumcnt = np.cumsum(unique_cnt) / sum(unique_cnt)\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.plot(unique_value, unique_cumcnt, 'r.')\n # ax.set_title('Cumulative distribution of pagerank of nodes')\n ax.set_xlabel('pagerank value v')\n ax.set_ylabel('p(x <= v)')\n plt.savefig(outpath + label + \"-pagerank-distribution.eps\")",
"def add_pagerank(self):\n\n query = '''\n MATCH (c1:)-[r:INTERACTS]->(c2:)\n RETURN c1.name, c2.name, r.weight AS weight\n '''\n ig = IGraph.TupleList(self.graph.run(query), weights=True)\n\n pg = ig.pagerank()\n pgvs = []\n for p in zip(ig.vs, pg):\n print(p)\n pgvs.append({\"name\": p[0][\"name\"], \"pg\": p[1]})\n\n write_clusters_query = '''\n UNWIND {nodes} AS n\n MATCH (c:) WHERE c.name = n.name\n SET c.pagerank = n.pg\n '''\n\n self.graph.run(write_clusters_query, nodes=pgvs)",
"def pagerank(self):\n\n raise NotImplementedError",
"def iterate_pagerank(corpus, damping_factor):\n pagerank = dict()\n newrank = dict()\n\n for page in corpus:\n pagerank[page] = 1 / len(corpus)\n\n repeat = True\n\n while repeat:\n\n for page in pagerank:\n\n summation = 0\n\n links = get_links(corpus, page)\n\n if not links:\n for p in corpus:\n summation += pagerank[p] / len(corpus)\n\n for link in links:\n summation += pagerank[link] / len(corpus[link])\n\n newrank[page] = (1 - damping_factor) / len(corpus) + damping_factor * summation\n\n repeat = False\n\n for page in pagerank:\n if abs(newrank[page] - pagerank[page]) > 0.001:\n repeat = True\n\n pagerank[page] = newrank[page]\n\n return pagerank",
"def calculate_pagerank(binary_cosine_matrix, d):\n pr = 0\n try:\n A = (binary_cosine_matrix.T / binary_cosine_matrix.sum(axis=1)).T\n B = np.zeros((len(A), len(A))) + 1/len(A)\n M = (1-d)*A + d*B\n v = np.linalg.eig(M.T)[1][:, 0].astype(float) #throws complex warning\n pr = v / sum(v)\n except:\n pass\n return pr",
"def iterate_pagerank(corpus, damping_factor):\n pagerank = dict()\n new_pagerank = dict()\n repeat = True\n\n # Assigning each page a rank of 1 / N, where N is the total number of pages in the corpus.\n for page in corpus:\n pagerank[page] = 1 / len(corpus)\n\n # Repeatedly calculate new rank values based on all of the current rank values\n while repeat:\n for page in corpus:\n\n # Probability that we followed a link from a page i to current page.\n followed = 0.0\n for linked_page in linked_pages(corpus, page):\n followed += pagerank[linked_page] / number_of_links(corpus, linked_page)\n\n new_pagerank[page] = (1 - damping_factor) / len(corpus) + damping_factor * followed\n\n repeat = False\n\n # Repeat the process if new PageRank value changes by more than 0.001\n for page in pagerank:\n if not isclose(pagerank[page], new_pagerank[page], abs_tol=0.001):\n repeat = True\n\n # Assigning new values to the previous ones\n pagerank[page] = new_pagerank[page]\n\n # Sorting pagerank by keys\n pagerank = dict(sorted(pagerank.items()))\n\n return pagerank",
"def dfs(x, p, step):\n disc[x] = low[x] = step\n for xx in graph.get(x, []): \n if disc[xx] == inf: \n step += 1\n dfs(xx, x, step)\n low[x] = min(low[x], low[xx])\n if low[xx] > disc[x]: ans.append([x, xx]) # bridge\n elif xx != p: low[x] = min(low[x], disc[xx])",
"def compute_pagerank(urls, inlinks, outlinks, b=.85, iters=20):\n ###TODO\n pagerank = defaultdict(lambda: 1.0)\n N = len(urls)\n for url in urls:\n pagerank[url]\n for i in range(0, iters):\n for url in urls:\n result_sum = 0.0\n for link in inlinks[url]:\n if len(outlinks[link]) is not 0:\n result_sum += (pagerank[link] / len(outlinks[link]))\n pagerank[url] = (1/N) * (1-b) + (b * result_sum)\n return pagerank\n pass",
"def iterate_pagerank(corpus, damping_factor):\n\n PageRank = dict()\n accuracy = 0.001\n\n # initialise the rank of each page with 1 / N\n\n N = len(corpus)\n\n for page in corpus:\n PageRank[page] = 1 / N\n\n # for each page, use the PageRank formula to calculate the ranks\n\n while True:\n\n count = 0\n\n for page in corpus:\n\n new_rank = (1 - damping_factor) / N\n change = 0\n\n for new_page in corpus:\n\n if page in corpus[new_page]:\n NumLinks = len(corpus[new_page])\n change = change + (PageRank[new_page] / NumLinks)\n\n change = damping_factor * change\n new_rank += change\n\n if abs(PageRank[page] - new_rank) < accuracy:\n count += 1\n\n PageRank[page] = new_rank\n\n if count == N:\n break\n\n return PageRank",
"def pagerank(self, alpha=0.85):\n try:\n self.logger.info('正在计算网络的PageRank值 ...')\n return self.order_dict(nx.pagerank(self.G, alpha=alpha), index=1)\n except Exception as e:\n self.logger.error(\"计算失败,原因:{0}\".format(e))",
"def iterate_pagerank(corpus, damping_factor):\n pagerank = dict()\n\n #Modifying the corpus, to account the fact that,\n #\"A page that has no links at all should be interpreted as having one link for every page in the corpus\"\n modif_corpus = copy.deepcopy(corpus)\n for pg in modif_corpus.keys():\n if len(modif_corpus[pg]) == 0:\n modif_corpus[pg] = list(corpus.keys())\n\n #Assigning each page a rank of 1 / N, where N is the total number of pages in the corpus\n for pg in modif_corpus.keys():\n pagerank[pg] = 1/len(modif_corpus.keys())\n\n convergence_check = False\n while not convergence_check:\n old_pagerank = copy.deepcopy(pagerank)\n\n for page in pagerank.keys():\n sigma = 0\n for pg in pagerank.keys():\n if page in modif_corpus[pg]: #Finding all the pages that link to 'page'\n sigma += pagerank[pg]/len(modif_corpus[pg])\n \n pagerank[page] = (1-damping_factor)/len(modif_corpus.keys()) + damping_factor*sigma\n\n #Making sure the new values differ more than 0.001\n convergence_check = True\n for pg in modif_corpus.keys():\n if abs(pagerank[pg] - old_pagerank[pg]) > 0.001:\n convergence_check = False\n break\n\n return pagerank",
"def _personalized_pagerank(seed, W):\n restart_prob = RESTART\n r = restart_prob * seed\n s_ovr = np.copy(r)\n for i in range(MAX_ITER):\n r_new = (1. - restart_prob) * (W.transpose().dot(r))\n s_ovr = s_ovr + r_new\n delta = abs(r_new.sum())\n if delta < 1e-5: break\n r = r_new\n return np.squeeze(s_ovr)",
"def iterate_pagerank(corpus, damping_factor):\n # Set initial values to choosing a page randomly\n corpus_length = len(corpus)\n prev_iterated_page_rank = defaultdict(lambda: 1/corpus_length)\n max_abs_difference = inf\n while max_abs_difference > 0.001:\n max_iter_diff = -inf\n next_iterated_page_rank = defaultdict(lambda: (1 - damping_factor) / corpus_length)\n for prev_page in corpus:\n if not corpus[prev_page]:\n print(\"hi\")\n for next_page in corpus:\n next_iterated_page_rank[next_page] += prev_iterated_page_rank[prev_page] * 1/len(corpus)\n else:\n print(\"hi2\")\n for next_page in corpus[prev_page]:\n next_iterated_page_rank[next_page] += damping_factor * prev_iterated_page_rank[prev_page]/len(corpus[prev_page])\n\n for prev_prob, next_prob in zip(prev_iterated_page_rank.values(), next_iterated_page_rank.values()):\n max_iter_diff = max(max_iter_diff, abs(next_prob-prev_prob))\n max_abs_difference = min(max_abs_difference, max_iter_diff)\n\n prev_iterated_page_rank = next_iterated_page_rank.copy()\n assert abs(sum(prev_iterated_page_rank.values())-1) < 10**-2\n assert abs(sum(next_iterated_page_rank.values()) - 1) < 10**-2\n return prev_iterated_page_rank",
"def approximate_PageRank_weighted(G,\n ref_nodes,\n iterations: int = 100000,\n alpha: float = 0.15,\n rho: float = 1.0e-6): \n \n #print(\"Uses the weighted Andersen Chung and Lang (ACL) Algorithm.\")\n n = G.adjacency_matrix.shape[0]\n (length,xids,values) = aclpagerank_weighted_cpp(n,G.ai,G.aj,G.adjacency_matrix.data,alpha,rho,\n ref_nodes,iterations)\n #p = np.zeros(n)\n #p[xids] = values\n\n return xids, values",
"def pagerank(dict_prefs, nitems, eps_search=20):\n prefs_mat=np.zeros((nitems,nitems))\n for k,v in dict_prefs.items():\n if v==0:\n continue\n elif v>0:\n prefs_mat[k[1],k[0]]+=v\n else:\n prefs_mat[k[0],k[1]]-=v\n prefs_mat_orig=prefs_mat.copy()\n eps_grid=list(.5**np.logspace(0,1,eps_search))\n best=-10^5\n best_order=None\n \n for eps in eps_grid:\n prefs_mat=prefs_mat_orig.copy()\n for i in range(nitems):\n prefs_mat[:,i]+=eps\n tot=np.sum(prefs_mat[:,i])\n prefs_mat[:,i]=prefs_mat[:,i]/tot\n\n \n pr=np.ones((nitems,1))/nitems\n for i in range(30):\n pr=prefs_mat.dot(pr)\n lst_pagerank=list(np.argsort(pr.reshape(-1)))\n score_this_order=eval_ordering(lst_pagerank,dict_prefs)\n if score_this_order>best:\n best=score_this_order\n best_order=deepcopy(lst_pagerank)\n return best_order",
"def run_pagerank(tag_table, unique_tags, targetNum):\n id2tag = {i: tag for i, tag in enumerate(unique_tags)}\n tag2id = {tag: i for i, tag in id2tag.items()}\n\n co_occurence = dict()\n for tag_list in tag_table:\n indices = [tag2id[tag] for tag in tag_list]\n for pair in combinations(indices, 2):\n co_occurence[pair] = co_occurence.get(pair, 0) + 1\n\n nodes = range(len(unique_tags))\n edges = [(pair[0], pair[1], weight) for pair, weight in co_occurence.items()]\n G = nx.Graph()\n G.add_nodes_from(nodes)\n G.add_weighted_edges_from(edges)\n pr = nx.pagerank(G, weight='weight')\n\n top_indices, top_scores = zip(*sorted(pr.items(), key=operator.itemgetter(1), reverse=True)[:targetNum])\n topTags = [id2tag[i] for i in top_indices]\n return topTags",
"def calculate_PageRank(outlinks):\n\n\t# Damping factor\n\td = 0.85\n\n\t# size of the matrix\n\tsize = outlinks.shape[0]\n\n\t# list to hold page ranks\n\tpage_ranks = [1 for i in range(size)]\n\n\t# Calculating the out degree of each node and storing in a list\n\tout_degrees = []\n\tfor i in range(size):\n\t\tsums = 0\n\t\tfor j in range(size):\n\t\t\tsums += outlinks[i][j]\n\t\tout_degrees.append(sums)\n\n\t#print(out_degrees)\n\n\tprint('Initial page ranks:')\n\tprint(page_ranks)\n\n\tfor _ in range(100):\n\t\tfor j in range(size):\n\t\t\ttemp = 0\n\t\t\tfor i in range(size):\n\t\t\t\tif outlinks[i][j] == 1:\n\t\t\t\t\ttemp += page_ranks[i] / out_degrees[i]\n\t\t\ttemp *= d\n\t\t\ttemp += (1-d)\n\t\t\tpage_ranks[j] = round(temp, 4)\n\n\treturn page_ranks",
"def _setup_dc_pagerank_weighted_args(vtypestr, itypestr, fun):\n float_type, vtype, itype, ctypes_vtype, ctypes_itype, bool_type = standard_types(\n vtypestr, itypestr)\n\n fun.restype = ctypes_vtype\n fun.argtypes = [ctypes_vtype, # n - number of vertices\n ndpointer(ctypes_itype, flags=\"C_CONTIGUOUS\"), # ai - indptr vector\n ndpointer(ctypes_vtype, flags=\"C_CONTIGUOUS\"), # aj - index vector\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"), # a - data vector\n ctypes_vtype, # offset - 0 or 1\n ctypes.c_double, # alpha\n ctypes.c_double, # epsilon\n ndpointer(ctypes_vtype, flags=\"C_CONTIGUOUS\"), # seedids vector\n ctypes_vtype, # nseedids\n ctypes_vtype, # maxsteps\n ndpointer(ctypes_vtype, flags=\"C_CONTIGUOUS\"), # xids_1 - output vector\n ndpointer(ctypes_vtype, flags=\"C_CONTIGUOUS\"), # xids_2 - output vector\n ctypes_vtype, # xlength - length of output vector\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"), # values_1 - first output value vector\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"), # values_2 - second output value vector\n bool_type] # simplify - whether to simplify\n\n return fun",
"def nextDim(leaf, args):\n x = args['xsectionNum'] # number of subregions to partition for the leaf\n lb = leaf.lb # the lower bound of the leaf region\n ub = leaf.ub # the upper bound of the leaf region\n dimDiff = [] # store the diff value (e.g. max-min of dominantion count) for partition direction\n dimX = len(lb) # the number of dimension\n visitedPoints = leaf.visitedPoints() # all the visited points in the tree\n pool = leaf.pool # the visited points in this leaf\n #determine the deminsion of point's objective\n dim = len(leaf.problem.objectives) \n #recorganize all the visited points together into one sorted array\n _visitedPoints = utils.dictToSortedNumpyArray(visitedPoints,dim) \n # calculate the domination count for each point in this pool\n dominantionCount = {} \n for key in pool:\n _p = np.array([pool[key].mean])\n dominantionCount[key] = _cutils.calDominationCount(_p, _visitedPoints, len(_p))[1][0]\n # enumerate all the possible feasible next dimension to partition\n feasibleDim = feasible(leaf, x)\n for dimID in feasibleDim:\n # determine the partition unit distance \n unit = (ub[dimID] - lb[dimID]) / x\n # initialize the promisingIndex for each subregion based on xsection\n promisingIndex = [] \n for i in range(x):\n _lb, _ub = [np.array([]) for _ in range(2)]\n # change the lower and upper bound value at dimID for subRegion x\n for j in range(dimX):\n _lb = np.append(_lb, lb[j] + (unit * i) * (j == dimID))\n _ub = np.append(_ub, ub[j] - (unit * (x - i - 1)) * (j == dimID))\n # calculate the promisingIndex for each subregions\n poolDominantionCount = [np.nan] # in case no points in this subregion\n for key in pool:\n p = pool[key] \n if all(_lb <= p.x) and all(p.x < _ub):\n poolDominantionCount.append(dominantionCount[key])\n # calculate the promising index in this subregion \n promisingIndex.append(np.nanmin(poolDominantionCount))\n # calculate the dimDiff for the dimension dimID \n diff = np.nanmax(promisingIndex) - np.nanmin(promisingIndex)\n dimDiff.append(diff)\n # select the dimension with largest dimDiff value as next dimension to partition\n if dimDiff:\n maxDiff = np.nanmax(dimDiff)\n else:\n maxDiff = np.nan\n if not(np.isnan(maxDiff)):\n candidate = [feasibleDim[i] for i in range(len(feasibleDim)) if dimDiff[i] == maxDiff] \n dim = candidate[np.random.randint(0,len(candidate))]\n elif dimDiff:\n dim = feasibleDim[np.random.randint(0,len(feasibleDim))]\n else:\n dim = np.random.randint(0, dimX)\n #print('Select Dim %d with maxDiff %.2f, range %.2f at level %d' % (dim, maxDiff, ub[dim]-lb[dim],leaf.level))\n return dim",
"def pagerank(self, limit=20):\r\n\t\tfor urlid in self.url_ids:\r\n\t\t\tself.all_scores[urlid] = 1.0\r\n\r\n\t\tfor i in range(limit):\r\n\t\t\tfor urlid in self.url_ids:\r\n\t\t\t\tscore = self.all_scores[urlid]\r\n\t\t\t\tfor fromid in self.from_ids[urlid]:\r\n\t\t\t\t\tscore += self.all_scores[fromid] / \\\r\n\t\t\t\t\t\t\t (len(self.from_ids[fromid])+len(self.to_ids[fromid]))\r\n\t\t\t\tscore *= 0.85\r\n\t\t\t\tscore += 0.15\r\n\t\t\t\tself.all_scores[urlid] = score\r\n\t\tself.save_pr()",
"def getPageRank(elistPath, alpha, maxiter, tolerance):\n\n adjGraph = AdjGraph(elistPath, separator=\" \")\n graph = adjGraph.SNAPGraph\n\n preference_vector = []\n for node in graph.Nodes():\n id = node.GetId()\n if (id % 4) == 0:\n preference_vector.append(id)\n\n pageRank, convIter, time = biasedPageRank(\n adjGraph, preference_vector=preference_vector, alpha=alpha,\n max_iterations=maxiter, tolerance=tolerance)\n\n writeCentrality(\"pagerank.txt\", pageRank)\n return pageRank, convIter, time",
"def griewank(x):\n nopt = np.size(x)\n # if (nopt == 2) | (nopt == 10):\n xx = x\n if nopt == 2:\n d = 200.0\n else:\n d = 4000.0\n\n u1 = 0.0\n u2 = 1.0\n for j in range(nopt):\n u1 = u1 + xx[j]**2 / d\n u2 = u2 * np.cos(xx[j] / np.sqrt(float(j + 1)))\n\n f = u1 - u2 + 1\n return f",
"def dfs(image, rdx, cdx, dim):\n # N, NE, E, SE, S, SW, W, NW\n directions = [(-1, 0), (-1, 1), (0, 1), (1, 1),\n (1, 0), (1, -1), (0, -1), (-1, -1)]\n\n stack = [(rdx, cdx)]\n while stack:\n x, y = stack.pop()\n if image[x][y] == '1':\n image[x][y] = '0'\n for h, v in directions:\n if 0 <= x + h < dim and 0 <= y + v < dim:\n stack.append((x + h, y + v))",
"def page_rank(self):\n print(\"Generating the matrix...\")\n G = self.graph\n p = FOLLOW\n column_sum = np.sum(G, axis=0, dtype=np.float64)\n \n n = self.graph.shape[0]\n\n D = sps.lil_matrix((n, n))\n D.setdiag(np.divide(1.,column_sum, where=column_sum != 0, out=np.zeros_like(column_sum)).reshape(-1, 1))\n self.diagonal = D\n print(\"created diagonal\")\n e = np.ones((n, 1))\n I = sps.eye(n)\n x = sps.linalg.spsolve((I - p*G*D), e)\n x = x/np.sum(x)\n\n self.page_rank = x\n\n return x",
"def dpsearch(points,k):\n\t#M = k\n\tpoints = np.sort(points,axis=0)\n\tL = len(points)\n\tM = k\n\tT = list(np.zeros(M+1,dtype='int'))\n\tT[0] = 0\t#first threshold is by default always set to index 0 in trellis graph.\n\tT[M] = L \t#last threshold is by default always set to last number in input points.\n\ttrellis_value = np.full((M+1,L+1),np.inf)\n\ttrellis_backpointer = np.full((M+1,L+1),np.inf)\n\n\t# Stage 1: m=1\t\n\tfor l in range(1,L-M+2):\n\t\ttrellis_value[1][l] = ((l-0)/float(L))*np.var(points[0:l])\n\t\ttrellis_backpointer[1][l] = 0\n\n\t\n\tif(M>2):\n\t\t# Stage 2: m=2 to m=M-1\n\t\tfor m in range(2,M):\n\t\t\tfor l in range(m,L-M+m+1):\n\t\t\t\t#finding optimal path\n\t\t\t\tJ_min = np.inf\n\t\t\t\tJ_temp = np.inf\n\t\t\t\tfor i in range(m-1,l):\n\t\t\t\t\tJ_temp = trellis_value[m-1][i] + ((l-i)/float(L))*np.var(points[i:l])\n\t\t\t\t\tif J_temp < J_min:\n\t\t\t\t\t\tJ_min = J_temp\n\t\t\t\t\t\tptr = i\n\t\t\t\t\n\t\t\t\ttrellis_value[m][l],trellis_backpointer[m][l] = J_min,ptr\n\t\t\t\t\n\n\t# Stage 3: m=M\n\tm = M\n\tl = L\n\t#finding optimal path\n\tJ_min = np.inf\n\tJ_temp = np.inf\n\tfor i in range(m-1,l):\n\t\tJ_temp = trellis_value[m-1][i] + ((l-i)/float(L))*np.var(points[i:l])\n\t\tif J_temp < J_min:\n\t\t\tJ_min = J_temp\n\t\t\tptr = i\n\n\t\n\ttrellis_value[M][L] = J_min\n\ttrellis_backpointer[M][L] = ptr\n\t\n\t\n\t# Backtracking\n\tl = L\n\tm = M\n\twhile m>=2:\n\t\tT[m-1] = int(trellis_backpointer[m][l])\n\t\tl = int(trellis_backpointer[m][l])\n\t\tm = m - 1\n\n\t#Assign cluster labels\n\tlabels = np.full(len(points),0)\n\tj = T[0]\n\tcounter = 0\n\tfor i in range(1,k+1):\n\t\tlabels[j:T[i]] = counter\n\t\tj = T[i]\n\t\tcounter += 1\n\n\n\treturn labels,T",
"def nDCG_at(prediction, target, k=10):\n pass",
"def DCG_p(results, topic, p):\n rel = lambda label: gold_topic_labels[topic][label]\n top_p = results[:p]\n dcg = 0\n for idx, label in enumerate(top_p):\n rank = idx + 1\n if idx == 0:\n dcg += rel(label)\n continue\n dcg += rel(label)/ math.log(rank,2)\n return dcg",
"def ndcg_at_k(r, k, method=0):\n dcg_max = dcg_at_k(sorted(r, reverse=True), k, method)\n if not dcg_max:\n return 0.\n return dcg_at_k(r, k, method) / dcg_max",
"def ndcg_at_k(r, k, method=0):\n dcg_max = dcg_at_k(sorted(r, reverse=True), k, method)\n if not dcg_max:\n return 0.\n return dcg_at_k(r, k, method) / dcg_max"
] |
[
"0.6244098",
"0.6042641",
"0.59871113",
"0.58468974",
"0.5497095",
"0.545267",
"0.53842545",
"0.53722274",
"0.5371284",
"0.5293458",
"0.5290599",
"0.52026564",
"0.5152519",
"0.51313794",
"0.5127473",
"0.50595695",
"0.5054545",
"0.502223",
"0.50221354",
"0.50027007",
"0.49226266",
"0.49176693",
"0.4892914",
"0.48275387",
"0.48062378",
"0.47721896",
"0.46719447",
"0.46700418",
"0.46305668",
"0.46305668"
] |
0.7260255
|
0
|
Returns the root node.
|
def root(self) -> Node:
return self._root
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_root_node(self):\n return self.root",
"def root_node(self):\n return self.process_tree",
"def get_root(self):\n return self.root",
"def get_root(self):\n return self._root",
"def get_root(self):\n return self.__root",
"def get_root(self):\n return self.__root",
"def get_root(self):\n return self._root",
"def getRoot(self):\n return self.__root",
"def root(self):\n return self._root()",
"def root(self):\n return self._root",
"def root(self):\n return self._root",
"def root(self):\n return self._root",
"def root(self):\n\t\treturn self._root",
"def root(self):\n\t\treturn self._root",
"def root(self):\n return self._root",
"def root(self):\n return self[0]",
"def root(self):\n return self.__root__",
"def root(self):\n return self._make_position(self._root)",
"def root(self):\n return self._make_position(self._root)",
"def root(self):\n return self._make_position(self._root)",
"def root(self):\n return self._make_position(self._root)",
"def root(self):\n return self._make_position(self._root)",
"def root(self):\n return self._make_position(self._root)",
"def root(tree):\n\n return tree[0]",
"def root(self):\n\n return self.parent.root",
"def root(self):\n return self._make_position(self._root)",
"def root(self):\n return self if self.is_root else self.__parent.root",
"def root(self):\n if self.has_multiple_roots:\n raise ValueError(\"More than one root exists. Use tree.roots instead\")\n return self.left_root",
"def getRoot(self):\n n = self\n while n.parent is not None:\n n = n.parent\n return n",
"def get_root(self) -> object:"
] |
[
"0.910351",
"0.8290668",
"0.8217083",
"0.8173495",
"0.8160994",
"0.8160994",
"0.8012295",
"0.79397935",
"0.7906321",
"0.7891344",
"0.7891344",
"0.7891344",
"0.78236896",
"0.78236896",
"0.778467",
"0.76256216",
"0.74740446",
"0.74515945",
"0.74515945",
"0.74515945",
"0.74515945",
"0.74515945",
"0.74515945",
"0.7387487",
"0.7385491",
"0.73288995",
"0.73237556",
"0.7245616",
"0.7230988",
"0.7166136"
] |
0.8593523
|
1
|
Returns whether a given node is the root of the tree.
|
def is_root(self, node: Node) -> bool:
return node == self._root
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def is_root(self, node: object) -> bool:\n if node == self.root:\n return True\n else:\n return False",
"def is_root(self, node):\n self._validate_node(node)\n return node._index == 0",
"def is_root(self):\n return (self.__type & NODE_ROOT) == NODE_ROOT",
"def is_leaf(self, node: object) -> bool:\n if node.left == None and node.right == None:\n return True\n else:\n return False",
"def is_root(self):\n return self.root in [-1, self]",
"def is_root(self, p):\n return p == self.root()",
"def is_root(self, p):\n return self.root() == 0",
"def is_root(self, n):\n return n == self._root",
"def is_root(self) -> bool:\n return self.parent_id is None",
"def is_root(self):\n return self._parent == None",
"def is_root(self, p):\n return self.root() == p",
"def is_root(self, p):\n return self.root() == p",
"def is_root(self, p):\n return self.root() == p",
"def has_single_root(self):\n root = self.left_root\n if root != NULL and self.right_sib(root) == NULL:\n return True\n return False",
"def is_root(self):\n return True",
"def is_root(self):\n return True",
"def is_root(self,p):\n return self.root() == p",
"def is_root(self):\n return self.parent == None",
"def is_leaf_node(self):\n if self is None:\n return False\n if self.left is None and self.right is None:\n return True\n return False",
"def is_root(self):\n return self.parent_id is None",
"def isRoot(self):\n\n # If I don't have a parent, I am root\n return not self._father",
"def is_root(self, p):\n return self.root() == p",
"def is root(self, p):\n return self.root() == p",
"def is_leaf(self, node):\n self._validate_node(node)\n if self.num_children(node) == 0:\n return True\n return False",
"def isLeaf(self, treeNode):\n if not treeNode:\n return False\n\n if treeNode.left == None and treeNode.right == None:\n return True\n else:\n return False",
"def leaf(self, node: object) -> bool:\n if node.left is None and node.right is None:\n return True\n\n else:\n return False",
"def is_root(self):\n return not self.parent",
"def _check_root(self):\n calc_root = MerkleTree(self._transactions).get_root()\n return calc_root == self.header[\"root\"]",
"def is_root(self):\n return self.unpack_word(0x2) & 0x0004 > 0",
"def is_leaf(node):\n return node.children == {}"
] |
[
"0.87300867",
"0.86150634",
"0.8057707",
"0.773191",
"0.76230824",
"0.76225185",
"0.75887793",
"0.75870013",
"0.7549237",
"0.75374883",
"0.7516718",
"0.7516718",
"0.7516718",
"0.75107855",
"0.7507924",
"0.7507924",
"0.7460199",
"0.74530596",
"0.7420673",
"0.7382724",
"0.73562056",
"0.73398864",
"0.73329747",
"0.73075414",
"0.7215433",
"0.71643436",
"0.7123385",
"0.71092534",
"0.7106734",
"0.70537794"
] |
0.87255406
|
1
|
Loads the SNLI dataset Returns the train, dev and test sets in a dictionary, each as a tuple of containing the trees and the labels.
|
def load_snli(path, terminals_only=True, binary=False):
splits = {}
for split in ["train", "dev", "test"]:
data = list(
read_snli(split, path, terminals_only=terminals_only)
)
premises = [premise for premise, _, _ in data]
hypotheses = [hypothesis for _, hypothesis, _ in data]
labels = [lbl for _, _, lbl in data]
splits[split] = (premises, hypotheses, labels)
return splits
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def import_datasets(snli_path):\n print('extract data from snli directory..')\n train = dict(); dev = dict(); test = dict()\n gold_labels = {'entailment': 0, 'neutral': 1, 'contradiction': 2}\n\n for file_type in ['train', 'dev', 'test']:\n path = os.path.join(snli_path, 'snli_1.0_{}.jsonl'.format(file_type))\n with open(path) as file:\n data = [json.loads(line) for line in file]\n eval(file_type)['premise'] = [entry['sentence1'] for entry in data if entry['gold_label'] != '-']\n eval(file_type)['hypothesis'] = [entry['sentence2'] for entry in data if entry['gold_label'] != '-']\n g_labels = np.array([gold_labels[entry['gold_label']] for entry in data if entry['gold_label'] != '-'])\n eval(file_type)['label'] = g_labels\n print('extraction process was finished successfully!')\n return train, dev, test",
"def load_data(self):\n sets = ['train', 'val']\n images = []\n labels = []\n self.labels_dic = {}\n file = open(self.path + 'wnids.txt')\n train_labels = file.read().split()\n if self.train:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n for i in os.listdir(self.path + 'train/' + f + '/images/'):\n images.append(Image.open(self.path + 'train/' + f + '/images/' + i))\n labels.append(f)\n #image label n link to folder names of TinyImageNet\n self.labels_dic[f] = fn\n\n else:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n self.labels_dic[f] = fn\n file_val = open(self.path + 'val/val_annotations.txt')\n val_labels = file_val.read().split('\\n')\n for im in val_labels:\n im_data = im.split(\"\t\")[:2]\n if len(im_data) < 2:\n continue\n if im_data[1] in self.labels_dic:\n images.append(Image.open(self.path + 'val/images/' + im_data[0]))\n labels.append(im_data[1])\n\n self.images = images\n self.labels = labels",
"def load_mnist (path, kind='train'):\n\n\tdf_dict = {}\n\n\tfor kind in ['train', 'test']:\n\t\tlabels_path = os.path.join(path, \"%s-labels-idx1-ubyte\" % kind)\n\t\tpixels_path = os.path.join(path, \"%s-images-idx3-ubyte\" % kind)\n\n\t\t# read 'magic' number (description of the file protocol) \n\t\t# and number of lines 'n'\n\t\twith open(labels_path, 'rb') as labels_h:\n\t\t\tmagic, n = struct.unpack('>II', labels_h.read(8))\n\t\t\tlabels = np.fromfile(labels_h, dtype=np.uint8)\n\n\t\twith open(pixels_path, 'rb') as pixels_h:\n\t\t\tmagic, num, rows, cols = struct.unpack('>IIII', pixels_h.read(16))\n\t\t\tpixels = np.fromfile(pixels_h, dtype=np.uint8).reshape(len(labels), 784)\n\n\t\tdf = pd.DataFrame(pixels)\n\t\tdf['target'] = labels\n\n\t\tif kind == 'train':\n\t\t\tX = df.iloc[:,:-1]\n\t\t\ty = df['target']\n\t\t\tX_train, X_validation, y_train, y_validation = train_test_split(\n\t\t\t\tX, \n\t\t\t\ty, \n\t\t\t\ttrain_size=50000, \n\t\t\t\ttest_size=10000, \n\t\t\t\trandom_state=0\n\t\t\t\t)\n\t\t\tX_train = X_train.applymap(_normalize_features)\n\t\t\tX_validation = X_validation.applymap(_normalize_features)\n\t\t\t#print(X_train.loc[0].tolist())\n\t\t\tdf_train = pd.concat([X_train, y_train], axis=1, sort=False)\n\t\t\tdf_validation = pd.concat([X_validation, y_validation], axis=1, sort=False)\n\t\t\tdf_dict[\"train\"] = df_train\n\t\t\tdf_dict[\"validation\"] = df_validation\n\t\telse:\n\t\t\tdf.loc[:, df.columns != 'target'].applymap(_normalize_features)\n\t\t\tdf_dict[\"test\"] = df \n\n\treturn df_dict",
"def load_datasets():\n from .dataset import num_classes, image_size\n\n train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)\n test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)\n\n train_folders = maybe_extract(train_filename)\n test_folders = maybe_extract(test_filename)\n if not (len(train_folders) == len(test_folders) == num_classes):\n raise Exception('Expected %d folders, one per class. Found %d and %d instead.' % (\n num_classes, len(train_folders), len(test_folders)))\n print(\"Dataset folders: %s, %s\" % (train_folders, test_folders))\n\n # load datasets\n train_datasets = maybe_pickle(train_folders, 45000, image_size)\n test_datasets = maybe_pickle(test_folders, 1800, image_size)\n\n return train_datasets, test_datasets",
"def load_dataset():\n temp = gzip.open('mnist.pkl.gz')\n train, val , test = pickle.load(temp,encoding='latin1')\n temp.close()\n train_inp = [np.reshape(x, (784,1)) for x in train[0]]\n train_outp = [one_hot(y) for y in train[1]]\n training_data = zip(train_inp, train_outp)\n validation_inp = [np.reshape(x, (784, 1)) for x in val[0]]\n validation_data = zip(validation_inp, val[1])\n test_inp = [np.reshape(x, (784, 1)) for x in test[0]]\n test_data = zip(test_inp, test[1])\n return (training_data,validation_data,test_data)",
"def load_nist(block=0):\n dataset_directory = os.path.join(root_directory,'NIST19')\n if(block == 2):\n bitmaps = np.load(dataset_directory+'/train_nist19_bitmaps_lim10000.npz')['bitmaps']\n targets = np.load(dataset_directory+'/train_nist19_targets_lim10000.npz')['targets']\n names = np.load(dataset_directory+'/train_nist19_names_lim10000.npz')['names']\n else:\n if(block == 1):\n bitmaps = np.load(dataset_directory+'/train_nist19_bitmaps_1000.npz')['bitmaps']\n targets = np.load(dataset_directory+'/train_nist19_targets_1000.npz')['targets']\n names = np.load(dataset_directory+'/train_nist19_names_1000.npz')['names']\n else:\n directory_class = os.path.join(dataset_directory,'by_class')\n list_of_class = os.listdir(directory_class)\n list_of_class.sort()\n bitmaps = [];\n targets = [];\n names = [];\n id_class = 0;\n\n for a_class in list_of_class:\n directory_subclass = os.path.join(directory_class,a_class)\n list_of_subclass = [fn for fn in os.listdir(directory_subclass) if (\"train\" in fn)]\n sorted(list_of_subclass)\n for folder_of_images in list_of_subclass:\n directory_of_images = os.path.join(directory_subclass,folder_of_images)\n list_of_images = os.listdir(directory_of_images)\n sorted(list_of_images)\n for filename in list_of_images:\n img = np.where(io.imread(os.path.join(directory_of_images,filename),True) > 0, 0, 1)\n bitmaps.append(Bitmap(img.astype(np.int8)))\n targets.append(id_class);\n id_class += 1\n names.append(binascii.unhexlify(a_class).decode('UTF-8'));\n\n\n return {'bitmaps': bitmaps, 'targets': targets, 'names':names}",
"def load_data():\n data = gzip.open(\"mnist.pkl.gz\", \"rb\")\n train_set, valid_set, test_set = cPickle.load(data)\n data.close()\n\n # Combine validation and train folds to recreate the master 60k set.\n new_images = numpy.concatenate((train_set[0], valid_set[0]))\n new_labels = numpy.concatenate((train_set[1], valid_set[1]))\n\n train_set = (new_images, new_labels)\n \n return (train_set, test_set)",
"def load_data():\n print(\"PARSING TRAIN\")\n ys_train, x_train, ids_train = load_pickle_data(\"ys_train\"), load_pickle_data(\"x_train\"), load_pickle_data(\n \"ids_train\")\n if ys_train is None or x_train is None or ids_train is None:\n ys_train, x_train, ids_train = load_csv_data(\"{}/train.csv\".format(DATA_DIR))\n dump_pickle_data(ys_train, \"ys_train\")\n dump_pickle_data(x_train, \"x_train\")\n dump_pickle_data(ids_train, \"ids_train\")\n\n print(\"PARSING TEST\")\n x_test, ids_test = load_pickle_data(\"x_test\"), load_pickle_data(\"ids_test\")\n if x_test is None or ids_test is None:\n _, x_test, ids_test = load_csv_data(\"{}/test.csv\".format(DATA_DIR))\n dump_pickle_data(x_test, \"x_test\")\n dump_pickle_data(ids_test, \"ids_test\")\n\n return ys_train, x_train, ids_train, x_test, ids_test",
"def fetch_stanford_labels():\n dipy_home = pjoin(os.path.expanduser('~'), '.dipy')\n folder = pjoin(dipy_home, 'stanford_hardi')\n baseurl = 'https://stacks.stanford.edu/file/druid:yx282xq2090/'\n\n files = {}\n files[\"aparc-reduced.nii.gz\"] = (baseurl + \"aparc-reduced.nii.gz\",\n '742de90090d06e687ce486f680f6d71a')\n files[\"label-info.txt\"] = (baseurl + \"label_info.txt\",\n '39db9f0f5e173d7a2c2e51b07d5d711b')\n fetch_data(files, folder)\n return files, folder",
"def _read_train_datas(self):\r\n with open(self.train_label_path, 'r') as fb:\r\n lines = fb.readlines()\r\n return self._parse_raw_labels(lines)",
"def _load_training_and_test_sets(normalize):\n class_labels = []\n test_labels = []\n norm = None\n if normalize == True:\n norm = loading.get_normalize_vector()\n\n for i in range(0, 10):\n [training, test] = loading.load_number_set(i, 0.7, norm_vector=norm)\n labels = [str(i)] * training.shape[0]\n tlabels = [str(i)] * test.shape[0]\n if i == 0:\n train_points = training\n test_points = test\n else:\n train_points = np.concatenate((train_points, training), axis = 0)\n test_points = np.concatenate((test_points, test), axis = 0)\n class_labels.extend(labels)\n test_labels.extend(tlabels)\n\n return train_points, test_points, class_labels, test_labels",
"def load_data(label_name='Species'):\n\n # Create a local copy of the training set.\n train_path = tf.keras.utils.get_file(fname=TRAIN_URL.split('/')[-1],\n origin=TRAIN_URL)\n # train_path now holds the pathname: (训练集和测试集路径) ~/.keras/datasets/iris_training.csv\n\n # Parse the local CSV file.(解析)\n train = pd.read_csv(filepath_or_buffer=train_path,\n names=CSV_COLUMN_NAMES, # list of column names\n header=0 # ignore the first row of the CSV file.\n )\n # train now holds a pandas DataFrame, which is data structure\n # analogous to a table.\n\n # 1. Assign the DataFrame's labels (the right-most column) to train_label.\n # 2. Delete (pop) the labels from the DataFrame.\n # 3. Assign the remainder of the DataFrame to train_features\n print(\"-\")\n train_features, train_label = train, train.pop(label_name)\n\n # Apply the preceding logic to the test set.\n test_path = tf.keras.utils.get_file(TEST_URL.split('/')[-1], TEST_URL)\n test = pd.read_csv(test_path, names=CSV_COLUMN_NAMES, header=0)\n test_features, test_label = test, test.pop(label_name)\n\n # Return four DataFrames.\n return (train_features, train_label), (test_features, test_label)",
"def load_data():\n f = gzip.open('../data/mnist.pkl.gz', 'rb')\n training_data, validation_data, test_data = cPickle.load(f)\n f.close()\n return (training_data, validation_data, test_data)",
"def load_MNIST_data():\n mnist = input_data.read_data_sets('data', one_hot=True)\n return {'train': mnist.train.images,\n 'validation': mnist.validation.images,\n 'test': mnist.test.images}",
"def load_data():\n\n base = 'http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/BSR/'\n fname = 'BSR_bsds500.tgz'\n\n path = get_file(fname,\n origin = base + fname,\n cache_dir = DEFAULT_CACHE_DIR,\n dset_name = 'bsds500')\n\n f = tarfile.open(path)\n\n train_data = []\n test_data = []\n for name in f.getnames():\n if name.startswith('BSR/BSDS500/data/images/train/'):\n try:\n fp = f.extractfile(name)\n img = imageio.imread(fp)\n train_data.append(img)\n except:\n continue\n elif name.startswith('BSR/BSDS500/data/images/test/'):\n try:\n fp = f.extractfile(name)\n img = skimage.io.imread(fp)\n test_data.append(img)\n except:\n continue\n\n\n return (train_data, test_data)",
"def load_data():\r\n f = gzip.open('mnist.pkl.gz', 'rb')\r\n training_data, validation_data, test_data = pickle.load(f,encoding='bytes')\r\n f.close()\r\n return (training_data, validation_data, test_data)",
"def load_training_set():\n global training_set\n f = gzip.open('mnist.pkl.gz', 'rb')\n train, valid, test = cPickle.load(f)\n [training_set, training_labels] = train\n [validation_set, validation_labels] = valid\n [testing_set, testing_labels] = test\n training_set = np.concatenate((training_set, validation_set))\n f.close()\n np.random.shuffle(training_set)",
"def load_data_pkl(self):\n pkl_name = '{}/data/mini-imagenet-cache-{}.pkl'.format(self.root_dir, self.split)\n print('Loading pkl dataset: {} '.format(pkl_name))\n\n try:\n with open(pkl_name, \"rb\") as f:\n data = pkl.load(f, encoding='bytes')\n image_data = data[b'image_data']\n class_dict = data[b'class_dict']\n except:\n with open(pkl_name, \"rb\") as f:\n data = pkl.load(f)\n image_data = data['image_data']\n class_dict = data['class_dict']\n\n print(data.keys(), image_data.shape, class_dict.keys())\n data_classes = sorted(class_dict.keys()) # sorted to keep the order\n\n n_classes = len(data_classes)\n print('n_classes:{}, n_label:{}, n_unlabel:{}'.format(n_classes,self.n_label,self.n_unlabel))\n dataset_l = np.zeros([n_classes, self.n_label, self.im_height, self.im_width, self.channels], dtype=np.float32)\n if self.n_unlabel>0:\n dataset_u = np.zeros([n_classes, self.n_unlabel, self.im_height, self.im_width, self.channels], dtype=np.float32)\n else:\n dataset_u = []\n\n for i, cls in enumerate(data_classes):\n idxs = class_dict[cls] \n np.random.RandomState(self.seed).shuffle(idxs) # fix the seed to keep label,unlabel fixed\n dataset_l[i] = image_data[idxs[0:self.n_label]]\n if self.n_unlabel>0:\n dataset_u[i] = image_data[idxs[self.n_label:]]\n print('labeled data:', np.shape(dataset_l))\n print('unlabeled data:', np.shape(dataset_u))\n \n self.dataset_l = dataset_l\n self.dataset_u = dataset_u\n self.n_classes = n_classes\n\n del image_data",
"def load_data():\n prefix = 'mnist_data/'\n train_data = np.load(prefix + 'mnist_train_images.npy')\n train_labels = np.load(prefix + 'mnist_train_labels.npy')\n val_data = np.load(prefix + 'mnist_validation_images.npy')\n val_labels = np.load(prefix + 'mnist_validation_labels.npy')\n test_data = np.load(prefix + 'mnist_test_images.npy')\n test_labels = np.load(prefix + 'mnist_test_labels.npy')\n assert train_data.shape == (55000, 784) and train_labels.shape == (55000, 10)\n assert val_data.shape == (5000, 784) and val_labels.shape == (5000, 10)\n assert test_data.shape == (10000, 784) and test_labels.shape == (10000, 10)\n return train_data, train_labels, val_data, val_labels, test_data, test_labels",
"def load_for_sklearn(self):\n\n labels = [] # string labels\n examples = [] # examples as strings\n\n # document number -> label mapping\n doc2label = n2b2.map_patients_to_labels(\n self.xml_dir,\n self.category)\n\n for f in os.listdir(self.cui_dir):\n doc_id = f.split('.')[0]\n file_path = os.path.join(self.cui_dir, f)\n file_as_string = open(file_path).read()\n\n string_label = doc2label[doc_id]\n int_label = LABEL2INT[string_label]\n labels.append(int_label)\n examples.append(file_as_string)\n\n return examples, labels",
"def load_data(limit=None, load_test=False):\n data_snli_dev, data_snli_train = None, None\n data_multinli_train, data_multinli_dev = None, None\n data_mli_train, data_mli_dev = None, None\n data_mli_test = None\n\n if SNLI_TRAIN_FILENAME.exists():\n data_snli_train = read_nli_data(SNLI_TRAIN_FILENAME, set_genre='snli', limit=limit)\n data_snli_dev = read_nli_data(SNLI_DEV_FILENAME, set_genre='snli', limit=limit)\n logging.info('SNLI: train - %s, dev - %s', data_snli_train.shape, data_snli_dev.shape)\n\n if MULTINLI_TRAIN_FILENAME.exists():\n data_multinli_train = read_nli_data(MULTINLI_TRAIN_FILENAME, limit=limit)\n data_multinli_dev = read_nli_data(MULTINLI_DEV_FILENAME, limit=limit)\n logging.info('MultiNLI: train - %s, dev - %s', data_multinli_train.shape, data_multinli_dev.shape)\n\n if MLI_TRAIN_FILENAME.exists():\n data_mli_train = read_nli_data(MLI_TRAIN_FILENAME, set_genre='clinical', limit=limit)\n data_mli_dev = read_nli_data(MLI_DEV_FILENAME, set_genre='clinical', limit=limit)\n logging.info('MLI: train - %s, dev - %s', data_mli_train.shape, data_mli_dev.shape)\n\n if load_test:\n data_mli_test = read_nli_data(MLI_TEST_FILENAME, set_genre='clinical', limit=limit)\n\n # Drop columns that are presented not in all datasets\n columns_to_drop = ['captionID', 'promptID', 'annotator_labels']\n for d in [data_snli_dev, data_snli_train, data_multinli_train, data_multinli_dev, data_mli_train, data_mli_dev,\n data_mli_test]:\n if d is not None:\n d.drop(columns_to_drop, axis=1, inplace=True, errors='ignore')\n\n # concatenate all data together\n data_train = pd.concat([data_snli_train, data_multinli_train, data_mli_train], axis=0)\n data_dev = pd.concat([data_snli_dev, data_multinli_dev, data_mli_dev], axis=0)\n\n data_train.set_index('genre', inplace=True)\n data_dev.set_index('genre', inplace=True)\n\n if data_mli_test is not None:\n data_mli_test.set_index('genre', inplace=True)\n\n if not load_test:\n return data_dev, data_train\n else:\n return data_dev, data_train, data_mli_test",
"def load_mnist(dataset=\"training\", digits=np.arange(10), path=\".\"):\n\n if dataset == \"training\":\n fname_img = os.path.join(path, 'train-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 'train-labels-idx1-ubyte')\n elif dataset == \"testing\":\n fname_img = os.path.join(path, 't10k-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 't10k-labels-idx1-ubyte')\n else:\n raise ValueError(\"dataset must be 'testing' or 'training'\")\n\n flbl = open(fname_lbl, 'rb')\n magic_nr, size = struct.unpack(\">II\", flbl.read(8))\n lbl = pyarray(\"b\", flbl.read())\n flbl.close()\n\n fimg = open(fname_img, 'rb')\n magic_nr, size, rows, cols = struct.unpack(\">IIII\", fimg.read(16))\n img = pyarray(\"B\", fimg.read())\n fimg.close()\n\n ind = [ k for k in range(size) if lbl[k] in digits ]\n N = len(ind)\n\n images = zeros((N, rows, cols), dtype=uint8)\n labels = zeros((N, 1), dtype=int8)\n for i in range(len(ind)):\n images[i] = array(img[ ind[i]*rows*cols : (ind[i]+1)*rows*cols ]).reshape((rows, cols))\n labels[i] = lbl[ind[i]]\n\n return images, labels",
"def load_mnist_data(nr_nodes, nr_classes, allocation, subset, batch_size):\n train_loader_list = []\n test_loader_list = []\n\n train = LoadData('MNIST', True, subset)\n test = LoadData('MNIST', False, False)\n\n train_data, train_targets = train.split(allocation, nr_nodes, class_per_node=nr_classes)\n for data, targets in zip(train_data, train_targets):\n train_dataset = CustomDataset(data, targets)\n train_loader_list.append(DataLoader(train_dataset, batch_size=batch_size, shuffle=True))\n\n test_data, test_targets = test.split('uniform', nr_nodes)\n for data, targets in zip(test_data, test_targets):\n test_dataset = CustomDataset(data, targets)\n test_loader_list.append(DataLoader(test_dataset, batch_size=batch_size, shuffle=False))\n\n return train_loader_list, test_loader_list",
"def load_data():\n f = gzip.open('../data/mnist.pkl.gz', mode='rb')\n\n # NOTE: I get errors when I don't use encoding='latin1' because of Python 2 vs Python 3 compatibility issues\n # training_data, validation_data, test_data = pickle.load(f, encoding='latin1')\n training_data, validation_data, test_data = pickle.load(f)\n\n f.close()\n\n return training_data, validation_data, test_data",
"def load_mnist(data_filename, batch_size):\n\n train_data, valid_data, test_data = unpickle_mnist(data_filename)\n\n train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)\n valid_loader = DataLoader(valid_data, batch_size=batch_size, shuffle=True)\n test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=True)\n\n return train_loader, valid_loader, test_loader",
"def load_data(self):\n params = self.params\n catg = params.data_category\n langs = ['en', params.target_lang]\n data = {lang: {splt: {} for splt in (['train', 'valid'] if lang == 'en' else ['test'])} for lang in langs}\n clf_dataset_path = {\n lang: {\n splt: {\n 'x': os.path.join(params.data_path, '%s_%s_%s_x.bpe.pth' % (splt, lang, catg)),\n 'y': os.path.join(params.data_path, '%s_%s_%s_y.txt' % (splt, lang, catg)),\n } for splt in (['train', 'valid'] if lang == 'en' else ['test'])\n } for lang in langs\n }\n for splt in ['train', 'valid', 'test']:\n for lang in langs:\n if lang == 'en' and splt in ['train', 'valid'] or lang != 'en' and splt == 'test':\n # load data and dictionary\n data1 = load_binarized(clf_dataset_path[lang][splt]['x'], params)\n data['dico'] = data.get('dico', data1['dico'])\n # set dictionary parameters\n set_dico_parameters(params, data, data1['dico'])\n # create dataset\n data[lang][splt]['x'] = Dataset(data1['sentences'], data1['positions'], params)\n # load labels\n with open(clf_dataset_path[lang][splt]['y'], 'r') as f:\n labels = [int(l) for l in f]\n data[lang][splt]['y'] = torch.LongTensor(labels)\n assert len(data[lang][splt]['x']) == len(data[lang][splt]['y'])\n\n return data",
"def load_dataset():\n\n\n train_dd_loader = DailyDialogLoader(PATH_TO_TRAIN_DATA, load=False)\n train_dataloader = DataLoader(train_dd_loader, batch_size=16, shuffle=True, num_workers=0,\n collate_fn=PadCollate())\n\n test_dd_loader = DailyDialogLoader(PATH_TO_TEST_DATA, load=True)\n test_dataloader = DataLoader(test_dd_loader, batch_size=1, shuffle=False, num_workers=0,\n collate_fn=PadCollate())\n\n assert train_dd_loader.vocabulary.n_words == test_dd_loader.vocabulary.n_words\n\n return train_dd_loader, train_dataloader, test_dataloader",
"def load_data_and_labels_without_shuffled():\n # Load data from files\n with codecs.open('./data/train_pos.txt', 'r+', 'utf-8') as f:\n train_pos = f.readlines()\n with codecs.open('./data/dev_pos.txt', 'r+', 'utf-8') as f:\n dev_pos = f.readlines()\n with codecs.open('./data/train_neg.txt', 'r+', 'utf-8') as f:\n train_neg = f.readlines()\n with codecs.open('./data/dev_neg.txt', 'r+', 'utf-8') as f:\n dev_neg = f.readlines()\n\n positive_examples1 = []\n positive_examples2 = []\n negative_examples1 = []\n negative_examples2 = []\n\n for i in train_pos:\n item1, item2 = i.split('\\t')\n item1 = remove_stop_word(item1)\n item2 = remove_stop_word(item2)\n positive_examples1.append(item1)\n positive_examples2.append(item2)\n\n for i in train_neg:\n item1, item2 = i.split('\\t')\n item1 = remove_stop_word(item1)\n item2 = remove_stop_word(item2)\n negative_examples1.append(item1)\n negative_examples2.append(item2)\n\n # Split by words\n x_text_train1 = positive_examples1 + negative_examples1\n x_text_train2 = positive_examples2 + negative_examples2\n\n positive_dev1 = []\n positive_dev2 = []\n negative_dev1 = []\n negative_dev2 = []\n\n for i in dev_pos:\n item1, item2 = i.split('\\t')\n item1 = remove_stop_word(item1)\n item2 = remove_stop_word(item2)\n positive_dev1.append(item1)\n positive_dev2.append(item2)\n\n for i in dev_neg:\n item1, item2 = i.split('\\t')\n item1 = remove_stop_word(item1)\n item2 = remove_stop_word(item2)\n negative_dev1.append(item1)\n negative_dev2.append(item2)\n\n x_text_dev1 = positive_dev1 + negative_dev1\n x_text_dev2 = positive_dev2 + negative_dev2\n\n # Generate labels\n train_positive_labels = [[0, 1] for _ in train_pos]\n dev_positive_labels = [[0, 1] for _ in dev_pos]\n train_negative_labels = [[1, 0] for _ in train_neg]\n dev_negative_labels = [[1, 0] for _ in dev_neg]\n y_train = np.concatenate([train_positive_labels, train_negative_labels], 0)\n y_dev = np.concatenate([dev_positive_labels, dev_negative_labels], 0)\n\n return [x_text_train1, x_text_train2, x_text_dev1, x_text_dev2, y_train, y_dev]",
"def import_scikit_data(sralist):\n scikit_data_dict = {}\n for dataset in sralist:\n with open(TMP_DIR+'scikit_'+dataset+'/ALL_genes_profile_dict.json', 'r') as scikit_data:\n scikit_data_dict[dataset] = [json.load(scikit_data)]\n return scikit_data_dict",
"def load_data(self):\n if self.args.dataset == 'mnist':\n # Transform and store MNIST images for each dataset split\n MNIST_data = load_MNIST_data()\n return {data_split: transform_mnist_data(x=MNIST_data[data_split],\n transform_mode=self.args.transformation,\n max_translation=self.args.max_translation,\n sigma=self.args.sigma)\n for data_split in ['train', 'validation', 'test']}\n\n elif self.args.dataset == 'norb':\n raise NotImplementedError('NORB interface still not implemented.')\n\n else:\n raise ValueError('{} is not a valid dataset.'.format(self.args.dataset))"
] |
[
"0.7550551",
"0.69392824",
"0.6362481",
"0.62841177",
"0.6205859",
"0.6195043",
"0.61733544",
"0.6165103",
"0.6156814",
"0.61507016",
"0.6137038",
"0.61166465",
"0.60398614",
"0.60327417",
"0.602376",
"0.59936607",
"0.59875435",
"0.5966797",
"0.5961108",
"0.5958861",
"0.5932016",
"0.59258527",
"0.59030765",
"0.5867514",
"0.5851886",
"0.58506405",
"0.5834311",
"0.5826885",
"0.5822242",
"0.5819639"
] |
0.70486546
|
1
|
a naive implementation of numerical gradient of f at x f shoule be a function that takes a single argument x is the point (numpy array) to evaluate the gradient at
|
def eval_numerical_gradient(f,x):
grad = np.zeros(x.shape)
h = 0.0001
# iterate over all indexes in x
it = np.nditer(x, flag = ['multi_index'], op_flags = ['readwrite'])
while not it.finished:
ix = it.multi_index
old_value = x[ix]
x[ix] = old_value + h
fxh_left = f(x)
x[ix] = old_value - h
fxh_right = f(x)
x[ix] = old_value
# compute the partial derivative
grad[ix] = (fxh_left - fxh_right) / (2 * h)
it.iterate()
return grad
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def numerical_gradient(f, x: np.ndarray):\n h = 1e-4\n grad = np.zeros_like(x)\n for i in range(x.size):\n tmp_val = x.flat[i]\n x.flat[i] = tmp_val + h\n fxh1 = f(x)\n\n x.flat[i] = tmp_val - h\n fxh2 = f(x)\n grad.flat[i] = (fxh1 - fxh2) / (2 * h)\n x.flat[i] = tmp_val\n return grad",
"def eval_numerical_gradient(f, x, verbose = True, h = 0.00001):\n fx = f(x) # evaluate function value at original point\n grad = np.zeros_like(x) # iterate over all indexese in x\n it = np.nditer(x, flags = ['multi_index'], op_flags = ['readwrite'])\n while not it.finished:\n # evaluate function at x+h\n ix = it.multi_index\n oldval = x[ix]\n x[ix] = oldval + h # increment by h\n fxph = f(x) # evaluate f(x+h)\n x[ix] = oldval - h\n fxmh = f(x) # evaluate f(x-h)\n x[ix] = oldval # restore\n \n #compute the partial derivative with centered fromula.\n grad[ix] = (fxph - fxmh) / (2 * h)\n if verbose:\n print(ix, grad[ix])\n it.iternext()\n return grad",
"def eval_numerical_gradient(f, x, verbose=True, h=0.00001):\n\n fx = f(x) # evaluate function value at original point\n grad = np.zeros_like(x)\n # iterate over all indexes in x\n it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])\n while not it.finished:\n\n # evaluate function at x+h\n ix = it.multi_index\n oldval = x[ix]\n x[ix] = oldval + h # increment by h\n fxph = f(x) # evalute f(x + h)\n x[ix] = oldval - h\n fxmh = f(x) # evaluate f(x - h)\n x[ix] = oldval # restore\n\n # compute the partial derivative with centered formula\n grad[ix] = (fxph - fxmh) / (2 * h) # the slope\n if verbose:\n print(ix, grad[ix])\n it.iternext() # step to next dimension\n\n return grad",
"def gradient(f, x, s=_DEFAULT_STEP):\n x = np.asarray(x)\n n = len(x)\n e = np.eye(n)\n\n forw = np.zeros(n)\n for i in range(n):\n forw[i] = f(x + s*e[i])\n\n g = (forw - f(x)) / s\n return g",
"def approx_gradient(f, x, epsilon, args=()):\n n = x.shape[0]\n npts = 1\n if len(x.shape) > 1:\n npts = x.shape[1]\n g = np.zeros((n, npts))\n ei = np.zeros(n)\n for i in range(n):\n ei[i] = .5 * epsilon\n g[i, :] = (f((x.T + ei).T, *args) - f((x.T - ei).T, *args)) / epsilon\n ei[i] = 0\n return g.squeeze()",
"def approx_gradient(f, x, epsilon):\n n = len(x)\n g = np.zeros(n)\n ei = np.zeros(n)\n for i in range(n):\n ei[i] = .5 * epsilon\n g[i] = (f(x + ei) - f(x - ei)) / epsilon\n ei[i] = 0\n return g",
"def gradient(self, x):\n pass",
"def check_gradient(f, x, delta=1e-5, tol=1e-4):\n\n assert isinstance(x, np.ndarray)\n assert x.dtype == np.float\n \n orig_x = x.copy()\n #print('check_g, orig_x befor',orig_x)\n #print('check_g, x befor',x)\n #print('befor first pass in grad check')\n fx, analytic_grad = f(x)\n #print('after first pass in grad check')\n #print('check_g, orig_x after',orig_x)\n #print('check_g, x.shape',x.shape)\n #print('func',f(x)[0])\n #print('fx=',fx,'analityc_grad=',analytic_grad)\n \n assert np.all(np.isclose(orig_x, x, tol)), \"Functions shouldn't modify input variables\"\n\n assert analytic_grad.shape == x.shape\n #print('analitical grad.shape',analytic_grad.shape)\n analytic_grad = analytic_grad.copy()\n\n # We will go through every dimension of x and compute numeric\n # derivative for it\n it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])\n #print('it.shape=',it.shape)\n while not it.finished:\n ix = it.multi_index\n #print('ix',ix)\n #print('x[ix]',x[ix])\n analytic_grad_at_ix = analytic_grad[ix]\n #print('analitical_grad-at_ix',analytic_grad_at_ix)\n orig_x = x.copy()\n #print('orig_x',orig_x)\n #print('x.shape befor delta',x.shape)\n orig_x[ix]+=delta\n #print('x.shape after delta',x.shape)\n #print('orig_x[ix] delta +',orig_x[ix])\n fx_plus=f(orig_x)[0]\n #fx_plus=fx_plus_full[ix[0]]\n #print('fx__plus',fx_plus)\n orig_x = x.copy()\n orig_x[ix]-=delta\n #print('orig_x[ix] delta -',orig_x[ix])\n fx_minus=f(orig_x)[0]\n #print('fx_minus',fx_minus)\n \n divider=2*delta\n #print('divider',divider)\n #numeric_grad_at_ix = np.divide((fx_plus-fx_minus),divider)\n numeric_grad_at_ix = (fx_plus-fx_minus)/divider\n #print('numeric_grad_at_ix',numeric_grad_at_ix)\n #print('fx(ix)', fx[ix])\n\n # TODO compute value of numeric gradient of f to idx\n \n if not np.isclose(numeric_grad_at_ix, analytic_grad_at_ix, tol):\n print(\"Gradients are different at %s. Analytic: %2.5f, Numeric: %2.5f\" % (ix, analytic_grad_at_ix, numeric_grad_at_ix))\n return False\n\n it.iternext()\n\n print(\"Gradient check passed!\")\n return True",
"def calc_gradient_at(self, x: np.ndarray) -> np.ndarray:\n\n if self.gradient_f:\n # if the problem has knowledge about the gradient, use it directly without approximation\n return self.gradient_f(x)\n\n return gradient_approximation(self.f, x)",
"def eval_numerical_gradient_array(f, x, df, h=1e-5):\n grad = np.zeros_like(x)\n it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])\n while not it.finished:\n ix = it.multi_index\n\n oldval = x[ix]\n x[ix] = oldval + h\n pos = f(x).copy()\n x[ix] = oldval - h\n neg = f(x).copy()\n x[ix] = oldval\n\n grad[ix] = np.sum((pos - neg) * df) / (2 * h)\n it.iternext()\n return grad",
"def eval_numerical_gradient_array(f, x, df, h = 1e-5):\n grad = np.zeros_like(x)\n it = np.nditer(x, flags = ['multi_index'], op_flags =['readwrite'])\n while not it.finished:\n ix = it.multi_index\n \n oldval = x[ix]\n x[ix] = oldval + h\n pos = f(x).copy()\n x[ix] = oldval - h\n neg = f(x).copy()\n x[ix] = oldval\n \n grad[ix] = np.sum((pos - neg) * df) / (2 * h)\n it.iternext()\n return grad",
"def gradient(self, x):\n return 0.0",
"def gradient(self, x):\n u = np.asarray([x[0]])\n C = self.C_func(u)\n dC = self.dC_func(u, order=1)\n P = self.P\n numerator = np.sum((C - P) * dC, axis=0)\n denominator = np.sum(np.sum((C - P) ** 2, axis=0) ** (1 / 2))\n if np.abs(denominator) > 0:\n gradient = numerator/denominator\n else:\n gradient = np.asarray(0)[np.newaxis]\n return gradient",
"def gradient(f,h,X):\n # X = list(X)# flip to a list from tuple so we can modify elements\n fx = f(*X) # only need this once\n dX = []\n for i in range(len(X)):\n # Make a vector of Value(X_i, [0 ... 1 ... 0]) with 1 in ith position\n X[i] += h # tweak in dimension i\n y = f(*X)\n X[i] -=h # undo the tweak for next round\n dx = (y - fx)/h\n dX.append(dx)\n return dX",
"def compute_gradient(self, function, arguments):",
"def gradient(cls, x):\n y = Sigmoid.apply(x)\n return np.multiply(y, 1 - y)",
"def gradient(self,i,f):\n\n diff = self.points[f, :] - self.points[i, :]\n gradient = diff[1]/diff[0]\n\n return gradient",
"def check_gradient(f, g, x):\n x = np.asarray(x)\n return np.max(g(x) - gradient(f, x))",
"def dalf(x):\n return grad(alf)(x)",
"def gradFun(self, x):\n tmp = x.reshape(self.inp_shape)\n g = np.ravel(np.asarray(self.calcGrad(np.asarray(tmp,dtype=np.float32)),dtype=np.float64)) + 2*self.alpha*x\n return g",
"def gradient(cls, x):\n return 1 - TanH.apply(x) ** 2",
"def gradientDescent(f, df, x, niter=10):\n\n points = []\n\n for i in xrange(niter):\n point = -dfx\n slope = np.dot(point,-point)\n \n #calculate a\n a = backtracking(f,slope,x,point)\n \n\n #update the search point\n x_k = x + a*p\n points.append(x_k)\n x = x_k\n\n return points",
"def grad(x):\n\n return x - np.arange(1, cfg.n + 1, dtype=np.float_)",
"def gradFun(self, S, x):",
"def gradient(self, x):\n g = self._grad(\n time_series=self.observed_data,\n a=x[0],\n b=x[1],\n c=x[2],\n sigma=self.sigma\n )\n return g",
"def gradientFunction(theta, X, y):\n y = y[:, 0]\n m = y.shape # number of training samples\n grad = X.T.dot(sigmoid(theta.dot(X.T))-1*y)\n grad /= m\n return grad",
"def gradient_descent(f, df, x, sigma=0.5, epsilon=1e-8):\n pass",
"def gradient(cls, x):\n return np.multiply(1, x > 0)",
"def gradient(cls, x):\n return np.minimum(-1. < x, x < 1.) * 1.",
"def gradientFunctionReg(theta, X, y, Lambda):\n m = len(y) # number of training examples\n grad = np.zeros(theta.shape[0])\n theta = np.transpose(theta)\n sum_1 = 0\n X = X.values\n y = y.values\n #calcuate the theta_0 \n# ====================== YOUR CODE HERE ======================\n# Instructions: Compute the gradient of a particular choice of theta.\n# Compute the partial derivatives and set grad to the partial\n# derivatives of the cost w.r.t. each parameter in theta\n for i in range(theta.shape[0]):\n if i == 0:\n for j in range(m):\n sum_1 += (sigmoid(np.dot(X[j],theta)) - y[j]) * X[j,i]\n else:\n for j in range(m):\n sum_1 += (sigmoid(np.dot(X[j],theta)) - y[j]) * X[j,i] + Lambda*theta[i]\n grad[i] = sum_1/m\n sum_1 = 0\n\n# =============================================================\n\n return grad"
] |
[
"0.84269315",
"0.81413966",
"0.80848795",
"0.79042566",
"0.78174055",
"0.7724996",
"0.77106667",
"0.7672559",
"0.7582516",
"0.75697535",
"0.7540686",
"0.75193363",
"0.7438803",
"0.7428454",
"0.73855096",
"0.72808963",
"0.7259516",
"0.7252474",
"0.7248751",
"0.7218508",
"0.7183805",
"0.7093859",
"0.7083382",
"0.70773673",
"0.70527226",
"0.7051656",
"0.7049455",
"0.7045744",
"0.6990543",
"0.6952391"
] |
0.8318079
|
1
|
Checks that all the user inputs were valid
|
def check_all_user_inputs_valid(self):
self.check_RNN_layers_valid()
self.check_activations_valid()
self.check_embedding_dimensions_valid()
self.check_initialiser_valid()
self.check_y_range_values_valid()
self.check_return_final_seq_only_valid()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def validate(self):\n return (self.check_input_digits_count()\n and self.check_if_input_is_int()\n and self.check_if_input_digits_are_unique())",
"def validate_inputs(self, extra_inputs=[]):\n pass",
"def _check_inputs(self):\n\n self._check_resident_prefs()\n self._check_hospital_prefs()",
"def isInputValid(self, input):\r\n pass",
"def validate():",
"def validate_input(self, *args):\n return",
"def validate(self):\n try:\n self.values.clear()\n self.values.append(int(self.e1.get()))\n except ValueError:\n messagebox.showwarning(\n \"Bad input\",\n \"Illegal values, please try again.\")\n return False\n\n return True",
"def test_check_inputs(\n student_names, project_names, supervisor_names, capacities, seed, clean\n):\n\n *_, game = make_game(\n student_names, project_names, supervisor_names, capacities, seed, clean\n )\n\n with warnings.catch_warnings(record=True):\n warnings.simplefilter(\"error\")\n game.check_inputs()\n\n assert game.students == game._all_students\n assert game.projects == game._all_projects\n assert game.supervisors == game._all_supervisors",
"def check_inputs(self, inputs):\n if self.debug:\n print(\"Checking inputs\")\n result = True\n for _input in inputs:\n if \"word_\" in _input and inputs[_input] == \"\":\n result = False\n elif \"idiom_\" in _input and inputs[_input] == \"\":\n if \"list\" not in _input:\n result = False\n return result",
"def check_inputs(self, item_data):\n if not item_data[0] in self.data['pizza']:\n print('Error: ' + item_data[0] + ' pizza does not exist.')\n return False\n\n if not item_data[1] in self.data['pizza'][item_data[0]]:\n print('Error: ' + item_data[1] + ' size does not exist for '\n + item_data[0] + ' pizza.')\n return False\n\n for topping in item_data[2]:\n if not topping in self.data['topping']:\n print('Error: Pizza topping ' + topping + ' does not exist.')\n return False\n return True",
"def check_validity(self):",
"def input_check(self):\n\n if self.species == 'He': assert self.line_model == 'voigt'\n n_upper_range, e_dens_range, temp_range, b_field_range = get_param_ranges(self.line_model)\n\n if np.isnan(n_upper_range).sum() <= 1:\n assert (self.n_upper in range(n_upper_range[0], n_upper_range[1]))\n if np.isnan(e_dens_range).sum() <= 1:\n assert (e_dens_range[0] <= self.e_dens <= e_dens_range[1])\n if np.isnan(temp_range).sum() <= 1:\n assert (temp_range[0] <= self.temp <= temp_range[1])\n if np.isnan(b_field_range).sum() <= 1:\n assert (b_field_range[0] <= self.b_field <= b_field_range[1])",
"def validate_all_fields(self):\n\n if self.validate_byr() and \\\n self.validate_iyr() and \\\n self.validate_eyr() and \\\n self.validate_hgt() and \\\n self.validate_hcl() and \\\n self.validate_ecl() and \\\n self.validate_pid() and \\\n self.validate_cid():\n return True\n return False",
"def __check_validation(input_string):\n if not input_string:\n raise NullInputException(\"Input string should be not empty\")\n if type(input_string) != str:\n raise NonStringInputException(\"Input value should be a string\")\n if len(input_string) >= 200:\n raise TooLongInputException(\"Input string should be less than 200 characters\")\n for i in input_string:\n if not i.isalpha():\n raise NonStringInputException(\"All input value characters should be an alpha\")",
"def validate_input(self):\n self._validate_limits_cols_prefixed()\n self._validate_fillna_cols_prefixed()\n self._validate_ratio_input()",
"def validate_data(values):\n try:\n [int(value) for value in values]\n if len(values) != 6:\n raise ValueError(\n f'Exactly 6 values are required - you provided {len(values)}'\n )\n except ValueError as e:\n print(f'Invalid data entered: {e}, please try again!\\n')\n return False\n\n return True",
"def validate(self):\n\n\tmissing = []\n\tbadcheck = []\n\tfor name, checkfunc, params in self._required:\n\t try:\n\t\targ = self.make_required(name)\n\t\tif checkfunc is not None:\n\t\t if params is not None:\n\t\t\tparams = (self.param_map[name], arg) + params\n\t\t else:\n\t\t\tparams = (self.param_map[name], arg)\n\t\t try:\n\t\t\tapply(checkfunc, params)\n\t\t except ValidationError, msg:\n\t\t\tbadcheck.append(msg)\n\t except ValidationError, args:\n\t\tmissing.append(args)\n\n\tfor (name, checkfunc, params) in self._optional:\n\t tup = self.make_optional(name)\n\t if tup and checkfunc is not None:\n\t\tif params is not None:\n\t\t params = (self.param_map[name], tup) + params\n\t\telse:\n\t\t params = (self.param_map[name], tup)\n\t\ttry:\n\t\t apply(checkfunc, params)\n\t\texcept ValidationError, msg:\n\t\t badcheck.append(msg)\n\n\tif (missing or badcheck) and self.log_errors:\n\t self.log_error(missing, badcheck)\n\n\tif (missing or badcheck) and self.generate_error_page:\n\t self.generate_HTML(missing, badcheck)\n\n\tself.missing = missing\n\tself.badcheck = badcheck\n\n\treturn not (missing or badcheck)",
"def checkUserInput(self):\n prm = []\n err = \"\"\n guess = self.text.text()\n items = str(guess).split(',')\n if len(items) != 2:\n err = \"Two parameters must be given\"\n else:\n for i in range(0, len(items)):\n val = items[i].strip()\n if not isNumber(val):\n err = \"Parameter {0} is not numeric\".format(i + 1)\n break\n if float(val) < 0.0:\n err = \"Parameter {0} is negative\".format(i + 1)\n break\n val = float(val)\n if i == 0 and val > self.yspan:\n err = \"minHeight is too large\"\n break\n if i == 1:\n if val < self.xspan/self.npt or val > self.xspan/2:\n err = \"minWidth is too large\"\n break\n prm.append(val)\n if err:\n errmsg = \"Incorrect input:\\n{0}\".format(err)\n QtWidgets.QMessageBox.warning(self, self.title, errmsg)\n return False\n\n # Store parameters values in global variables for the next call\n global lastfilename, lastmph, lastmpw\n lastfilename = self.pltw.filename\n self.mph = lastmph = prm[0]\n self.mpw = lastmpw = prm[1]\n return True",
"def validate_command_line_input(args):\n valid = False\n if 0 < len(args) <= 4:\n valid = True\n for arg in args:\n if int(arg) > 4:\n valid = False\n break\n else:\n pass\n if valid:\n CRUDStarter.load_operations(args)\n pass\n else:\n CRUDStarter.logger.info(\"Argument maximum acceptable value is 4\")\n else:\n CRUDStarter.logger.info(\"at least One at most Four argument(s) required\")",
"def _is_valid(self):\n self._is_allows_valid()\n self._is_denies_valid()",
"def validate_inputs(name, country, catches):\n while not name:\n name = input('Player name cannot be empty: ')\n\n while not country:\n country = input('Enter a valid country name: ')\n\n while not catches:\n catches = input('Now enter number of catches record: ')\n try: # Once user has input data, try to cast it to integer to verify is not string\n int(catches)\n except ValueError: # if input data is not an integer, print message and clear catches value to keep asking user to enter data\n print('Data given is not a number')\n catches = ''\n\n return name, country, catches",
"def test_with_valid_input(self):\n for dataset_type in ['regular', 'raw', 'REGULAR', 'RAW']:\n try:\n check_dataset_type(dataset_type)\n except ValueError:\n self.fail(\"Dataset {0} should be valid\".format(dataset_type))",
"def check_data(self):\n\n for i in range(len(self.full_ed_lines)):\n if self.full_ed_lines[i].text() != \"\":\n if self.full_ed_lines[i].hasAcceptableInput():\n continue\n else:\n if i == 1:\n self.msg2Statusbar.emit('Неправильный формат версии! Исправьте и повторите действие!')\n elif i == 5:\n self.msg2Statusbar.emit('Неправильная почта! Исправьте и повторите действие!')\n return False\n else:\n self.msg2Statusbar.emit('Не все поля заполнены! Исправьте и повторите действие!')\n return False\n return True",
"def _validateInputs(self):\n if self.args[\"Counties\"] == [] and self.args[\"BBox\"] == None:\n raise Exception(\"Invalid arguments provided. Must provide either a geographical bounding box or a list of counties.\")\n\n if self.args[\"StartDateTime\"] > self.args[\"EndDateTime\"]:\n raise Exception(\"Invalid arguments provided. StartDateTime cannot be after EndDateTime\")",
"def _check_inputs(self):\n\n # Check if attributes exists\n if self.attributes is None:\n print(\"attributes is missing; call set_attributes(new_attributes) to fix this! new_attributes should be a\",\n \"populated dataset of independent variables.\")\n return False\n\n # Check if labels exists\n if self.labels is None:\n print(\"labels is missing; call set_labels(new_labels) to fix this! new_labels should be a populated dataset\",\n \"of dependent variables.\")\n return False\n\n # Check if attributes and labels have same number of rows (samples)\n if self.attributes.shape[0] != self.labels.shape[0]:\n print(\"attributes and labels don't have the same number of rows. Make sure the number of samples in each\",\n \"dataset matches!\")\n return False\n\n # Type-checking for fit_intercept, normalize, and copy_X isn't needed; these can accept truthy/falsy values\n\n # Check if n_jobs is an integer or None\n if self.n_jobs is not None and not isinstance(self.n_jobs, int):\n print(\"n_jobs must be None or an integer; call set_n_jobs(new_n_jobs) to fix this!\")\n return False\n\n # Check if test_size is a float or None\n if self.test_size is not None and not isinstance(self.test_size, (int, float)):\n print(\"test_size must be None or a number; call set_test_size(new_test_size) to fix this!\")\n return False\n\n return True",
"def _check_validity(self):\n pass",
"def validate_args(*args: Any) -> bool:\n\n return len(args) == 4 and Item.validate_price(args[2]) and Entity.validate_discount(args[3])",
"def _check_inputlengths(self):\n # Check x and y have more than 1 item, and x and y are equal length\n if not len(self.x) > 1:\n raise ValueError(\"Route input 'x' must contain more than 1 item\")\n\n if not (len(self.y) > 1):\n raise ValueError(\"Route input 'y' must contain more than 1 item\")\n\n if not (len(self.x) == len(self.y)):\n raise ValueError(\"Route inputs 'x' and 'y' must be of equal length\")\n\n # Performs checks on z if not empty\n if self.z is not None:\n for v in self.z.values():\n if not (len(v) == len(self.x)):\n raise ValueError(\"Route input 'z' must be of equal length to 'x' and 'y'\")",
"def test_check_inputs(game):\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"error\")\n game.check_inputs()\n\n assert game.residents == game._all_residents\n assert game.hospitals == game._all_hospitals",
"def validate_input(**kwargs):\n for name, value in kwargs.items():\n if name == \"x\" or name == \"y\":\n if type(value) != int:\n raise TypeError(\"{} must be an integer\".format(name))\n elif value < 0:\n raise ValueError(\"{} must be >= 0\".format(name))\n else:\n if type(value) != int:\n raise TypeError(\"{} must be an integer\".format(name))\n elif value <= 0:\n raise ValueError(\"{} must be > 0\".format(name))"
] |
[
"0.7220598",
"0.7108561",
"0.70876473",
"0.70693684",
"0.70338637",
"0.70334446",
"0.70019114",
"0.6869092",
"0.68278015",
"0.682422",
"0.6806817",
"0.67754287",
"0.6775213",
"0.67563385",
"0.67295283",
"0.672505",
"0.66668016",
"0.6663835",
"0.6622917",
"0.66160303",
"0.6612179",
"0.65962905",
"0.6581959",
"0.657739",
"0.65623057",
"0.65370363",
"0.6528479",
"0.65274495",
"0.64695585",
"0.6459639"
] |
0.8166545
|
0
|
Checks that layers provided by user are valid
|
def check_RNN_layers_valid(self):
error_msg_layer_type = "First element in a layer specification must be one of {}".format(self.valid_RNN_hidden_layer_types)
error_msg_layer_form = "Layer must be of form [layer_name, hidden_units]"
error_msg_layer_list = "Layers must be provided as a list"
error_msg_output_heads = "Number of output activations must equal number of output heads"
assert isinstance(self.layers_info, list), error_msg_layer_list
all_layers = self.layers_info[:-1]
output_layer = self.layers_info[-1]
assert isinstance(output_layer, list), error_msg_layer_list
if isinstance(output_layer[0], list):
assert len(output_layer) == len(
self.output_activation), error_msg_output_heads
for layer in output_layer:
all_layers.append(layer)
else:
assert not isinstance(self.output_activation, list) or len(self.output_activation) == 1, error_msg_output_heads
all_layers.append(output_layer)
rest_must_be_linear = False
for layer in all_layers:
assert isinstance(layer, list), "Each layer must be a list"
assert isinstance(layer[0], str), error_msg_layer_type
layer_type_name = layer[0].lower()
assert layer_type_name in self.valid_RNN_hidden_layer_types, "Layer name {} not valid, use one of {}".format(
layer_type_name, self.valid_RNN_hidden_layer_types)
assert isinstance(layer[1], int), error_msg_layer_form
assert layer[1] > 0, "Must have hidden_units >= 1"
assert len(layer) == 2, error_msg_layer_form
if rest_must_be_linear: assert layer[0].lower() == "linear", "If have linear layers then they must come at end"
if layer_type_name == "linear": rest_must_be_linear = True
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def check_layers(self, layer_param, params, permitted_layers, mandatory):\n exception = None\n\n requested_layers = params.get(layer_param)\n if requested_layers:\n requested_layers = requested_layers.split(',')\n for layer in requested_layers:\n # allow only permitted layers\n if layer and not layer.startswith('EXTERNAL_WMS:') and layer not in permitted_layers:\n exception = {\n 'code': \"LayerNotDefined\",\n 'message': (\n 'Layer \"%s\" does not exist or is not permitted'\n % layer\n )\n }\n break\n elif mandatory:\n # mandatory layers param is missing or blank\n exception = {\n 'code': \"MissingParameterValue\",\n 'message': (\n '%s is mandatory for %s operation'\n % (layer_param, params.get('REQUEST'))\n )\n }\n\n return exception",
"def check_layer(layer1, layer2, values=False):\n def check(name):\n assert check_shape(layer1, layer2, name)\n if values:\n assert check_values(layer1, layer2, name)\n\n assert type(layer1) is type(layer2)\n if hasattr(layer1, 'input_shape'):\n assert layer1.input_shape == layer2.input_shape\n if hasattr(layer2, 'output_shape'):\n assert layer1.output_shape == layer2.output_shape\n if isinstance(layer1, (Conv2DLayer, DenseLayer)):\n assert check_shape(layer1, layer2, 'W')\n check('b')\n assert layer1.nonlinearity == layer2.nonlinearity\n if isinstance(layer1, NonlinearityLayer):\n assert layer1.nonlinearity == layer2.nonlinearity\n if isinstance(layer1, BatchNormLayer):\n check('mean')\n check('inv_std')\n check('gamma')\n check('beta')\n if isinstance(layer1, DropoutLayer):\n assert layer1.p == layer2.p\n assert layer1.rescale == layer2.rescale\n assert layer1.shared_axes == layer2.shared_axes\n if isinstance(layer1, ScaleLayer):\n check('scales')\n if isinstance(layer1, BiasLayer):\n check('b')\n if isinstance(layer1, GlobalPoolLayer):\n assert layer1.pool_function is layer2.pool_function\n if isinstance(layer1, Pool2DLayer):\n assert layer1.ignore_border == layer2.ignore_border\n assert layer1.mode == layer2.mode\n assert layer1.pad == layer2.pad\n assert layer1.pool_size == layer2.pool_size\n assert layer1.stride == layer2.stride\n return True",
"def check_model(self):\n layers_map = self.core.query_network(network=self.network,\n device_name=self.device)\n\n unsupported_layers = [\n l for l in self.network.layers.keys() if l not in layers_map\n ]\n\n if (unsupported_layers != []):\n sys.exit(\"Those mention layers in your model are not supported by OpenVino Inference Engine:\" \\\n \" \\n\\t\" + \"\\n\\t\".join(unsupported_layers))",
"def _check_layer_exists(self) -> None:\n layer_exists = (\n self.viewer.layer_dict[self.layer_type][self.layer_name][\n self.layer_subtype\n ][\"layer\"]\n is not None\n )\n # hide button if layer doesn't exist\n if layer_exists:\n self.layout.display = \"block\"\n else:\n self.layout.display = \"none\"\n self.logger.debug(\n (\n \"LayerButtonWidget hidden for %s of %s. \"\n \"(type: %s). Layer doesn't exist.\"\n ),\n self.layer_subtype,\n self.layer_name,\n self.layer_type,\n )",
"def check_data_integrity(layer_objects):\n\n # Link to documentation\n manpage = ('http://risiko_dev.readthedocs.org/en/latest/usage/'\n 'plugins/development.html')\n instructions = ('Please add keywords as <keyword>:<value> pairs '\n ' in the .keywords file. For more information '\n 'please read the sections on impact functions '\n 'and keywords in the manual: %s' % manpage)\n\n # Set default values for projection and geotransform.\n # Enforce DEFAULT (WGS84).\n # Choosing 'None' will use value of first layer.\n reference_projection = Projection(DEFAULT_PROJECTION)\n geotransform = None\n coordinates = None\n\n for layer in layer_objects:\n\n # Check that critical keywords exist and are non empty\n keywords = layer.get_keywords()\n for kw in REQUIRED_KEYWORDS:\n msg = ('Layer %s did not have required keyword \"%s\". '\n '%s' % (layer.name, kw, instructions))\n verify(kw in keywords, msg)\n\n val = keywords[kw]\n msg = ('No value found for keyword \"%s\" in layer %s. '\n '%s' % (kw, layer.name, instructions))\n verify(val, msg)\n\n # Ensure that projection is consistent across all layers\n if reference_projection is None:\n reference_projection = layer.projection\n else:\n msg = ('Projections in input layer %s is not as expected:\\n'\n 'projection: %s\\n'\n 'default: %s'\n '' % (layer, layer.projection, reference_projection))\n verify(reference_projection == layer.projection, msg)\n\n # Ensure that geotransform and dimensions is consistent across\n # all *raster* layers\n if layer.is_raster:\n if geotransform is None:\n geotransform = layer.get_geotransform()\n else:\n msg = ('Geotransforms in input raster layers are different: '\n '%s %s' % (geotransform, layer.get_geotransform()))\n verify(numpy.allclose(geotransform,\n layer.get_geotransform(),\n rtol=1.0e-12), msg)\n\n # In case of vector layers, we just check that they are non-empty\n # FIXME (Ole): Not good as nasty error is raised in cases where\n # there are no buildings in the hazard area. Need to be more graceful\n # See e.g. shakemap dated 20120227190230\n if layer.is_vector:\n msg = ('There are no vector data features. '\n 'Perhaps zoom out or pan to the study area '\n 'and try again')\n verify(len(layer) > 0, msg)\n\n # Check that arrays are aligned.\n\n refname = None\n for layer in layer_objects:\n if layer.is_raster:\n\n if refname is None:\n refname = layer.get_name()\n M = layer.rows\n N = layer.columns\n\n msg = ('Rasters are not aligned!\\n'\n 'Raster %s has %i rows but raster %s has %i rows\\n'\n 'Refer to issue #102' % (layer.get_name(),\n layer.rows,\n refname, M))\n verify(layer.rows == M, msg)\n\n msg = ('Rasters are not aligned!\\n'\n 'Raster %s has %i columns but raster %s has %i columns\\n'\n 'Refer to issue #102' % (layer.get_name(),\n layer.columns,\n refname, N))\n verify(layer.columns == N, msg)",
"def _sanity_check_datasource(ds):\n if len(ds) != 1:\n raise SanityCheckError('GeoJSON should have only 1 layer.')\n # TODO: add more checks",
"def check_layers_count(context, count):\n history = DOCKER_CLIENT.history(context.config.userdata['IMAGE'])\n if len(history) == int(count):\n return True\n\n raise Exception(\"Image does not contain %s layers, current number of layers: %s\" % (count, len(history)), history)",
"def _check(self):\n if not isinstance(self.fc_layers, tuple):\n raise TypeError(f'fc_layers require tuple, get {type(self.fc_layers)}')\n if not isinstance(self.use_dropout, tuple):\n raise TypeError(f'use_dropout require tuple, get {type(self.use_dropout)}')\n if not isinstance(self.drop_prob, tuple):\n raise TypeError(f'drop_prob require tuple, get {type(self.drop_prob)}')\n if not isinstance(self.use_activation, tuple):\n raise TypeError(f'use_activation require tuple, get {type(self.use_activation)}')\n l_fc_layer = len(self.fc_layers)\n l_use_drop = len(self.use_dropout)\n l_drop_prob = len(self.drop_prob)\n l_use_activation = len(self.use_activation)\n pass_check = l_fc_layer >= 2 and l_use_drop < l_fc_layer and l_drop_prob < l_fc_layer and l_use_activation < l_fc_layer and l_drop_prob == l_use_drop\n if not pass_check:\n msg = 'Wrong BaseDiscriminator parameters!'\n raise ValueError(msg)",
"def _validate_cfg(self, ephase, cfg):\n super()._validate_cfg(ephase, cfg)\n\n if (ephase != NNModelPhase.TRAIN): return\n\n if (len(cfg.arch) != len(cfg.lp)):\n raise Exception('Layer purpose string for each layers is not' + \n ' specified. (length of `cfg.arch` != length of `cfg.lp`).')",
"def check_all_user_inputs_valid(self):\n self.check_RNN_layers_valid()\n self.check_activations_valid()\n self.check_embedding_dimensions_valid()\n self.check_initialiser_valid()\n self.check_y_range_values_valid()\n self.check_return_final_seq_only_valid()",
"def test_addon_layer(self):\n layers = [l.getName() for l in registered_layers()]\n self.assertIn('IBriefyPloneLayer', layers)",
"def test_layer_ok(self):\n self.assertTrue(self.vector)",
"def checkRequirements(self):\n # Checkin requirements\n self.runButton.setEnabled(False)\n tbl = self.toBendLayer()\n pl = self.pairsLayer()\n if tbl is None:\n self.displayMsg(\"You must select a vector layer to bend !\", True)\n return\n if pl is None:\n self.displayMsg(\n (\n \"You must select a vector (line) layer \"\n \"which defines the points pairs !\"\n ),\n True,\n )\n return\n if pl is tbl:\n self.displayMsg(\n (\"The layer to bend must be \" \"different from the pairs layer !\"), True\n )\n return\n if not tbl.isEditable():\n self.displayMsg(\"The layer to bend must be in edit mode !\", True)\n return\n if not pl.isEditable() and self.pairsToPinsCheckBox.isChecked():\n self.displayMsg(\n (\n \"The pairs layer must be in edit mode if you want \"\n \"to change pairs to pins !\"\n ),\n True,\n )\n return\n if self.stackedWidget.currentIndex() == 0:\n self.displayMsg(\n \"Impossible to run with an invalid transformation type.\", True\n )\n return\n self.displayMsg(\"Ready to go...\")\n self.runButton.setEnabled(True)",
"def is_valid(self):\n if len(self.exterior) < 3:\n return False\n return self.to_shapely_polygon().is_valid",
"def is_valid_input(geometry, **kwargs):\n return lib.is_valid_input(geometry, **kwargs)",
"def check_shape(layer1, layer2, attr):\n attr1 = getattr(layer1, attr, None)\n attr2 = getattr(layer2, attr, None)\n if not attr1:\n return not attr2\n return all(attr1.shape.eval() == attr2.shape.eval())",
"def validate_mesh(self):\n pass",
"def check_layer_name(field):\n \n hygienize = field.replace(\"\\\"\", \"\")\n layer_name = (hygienize.split(\".\"))[0]\n \n if layer_name in layer_names:\n return True\n return False",
"def check_init(self):\n if self.Nlayer > 1:\n raise Exception(\"Nlayer == 1 currently\")",
"def checkLayersOverride(shape):\n required = []\n connected = [] \n\n # find the shaders / displacement that are required\n layersOverride = cmds.getAttr(\"%s.layersOverride\" % shape)\n if layersOverride:\n layersOverride = json.loads(layersOverride)\n for layer in layersOverride:\n if layersOverride[layer].has_key('shaders'):\n for k in layersOverride[layer]['shaders'].keys():\n if not k in required:\n required.append(k)\n\n shape_connections = cmds.listAttr(\"%s.shaders\" % shape, multi=True)\n\n # go find the connected shaders\n if shape_connections:\n for con in shape_connections:\n connected_shader = cmds.listConnections(\"%s.%s\" % (shape, con))[0]\n connected.append(connected_shader)\n \n port = len(connected)\n for req in required:\n if req not in connected:\n if cmds.objExists(req):\n cmds.connectAttr( req + \".message\", shape + \".shaders[%i]\" % port)\n port += 1\n message = 'Connected %s to %s' % (req, shape)\n MGlobal.displayInfo(message)\n else:\n message = \"Missing shader : %s\" % req\n MGlobal.displayWarning(message)",
"def test_all_layer_types(self):\n\n\t\tdetails = self.watcher.describe()\n\t\t\n\t\tdenseLayers = details[details.layer_type==str(LAYER_TYPE.DENSE)]\n\t\tdenseCount = len(denseLayers)\n\t\tself.assertEqual(denseCount, 3, \"3 dense layers, but {} found\".format(denseCount))\t\t\n\t\n\t\tconv2DLayers = details[details.layer_type==str(LAYER_TYPE.CONV2D)]\n\t\tconv2DCount = len(conv2DLayers)\n\t\tself.assertEqual(conv2DCount, 8, \"8 conv2D layers, but {} found\".format(denseCount))",
"def test_addon_layer_removed(self):\n layers = [l.getName() for l in registered_layers()]\n self.assertNotIn('IBriefyPloneLayer', layers)",
"def test_invalid_filter_shape(self):\r\n self.assertRaises(AssertionError, self.validate,\r\n (3, 2, 8, 8), (4, 3, 5, 5),\r\n 'valid')",
"def check(cls, layer_param, is_check_verbose=False, **kw):\n\t\t# setup\n\t\tif type(layer_param) == ListWrapper or type(layer_param) == tuple: layer_param = list(layer_param)\n\n\t\t# check parameters\n\t\tif not type(layer_param) == list: \n\t\t\tif is_check_verbose: print(\"layer_param must be converatble to list but is type %s\"%type(layer_param))\n\t\t\treturn False\n\n\t\tif not cls._check(layer_param=layer_param, is_check_verbose=is_check_verbose, **kw): \n\t\t\tif is_check_verbose: print(\"checks failed\")\n\t\t\treturn False\n\n\t\t# additional checks\n\t\tif cls.additional_check(layer_param=layer_param, is_check_verbose=is_check_verbose, **kw) is False: \n\t\t\tif is_check_verbose: print(\"additional checks failed\")\n\t\t\treturn False\n\n\t\treturn True",
"def _check_param(grads, images, kernel_name, align_corners, half_pixel_centers):\n if half_pixel_centers:\n if align_corners:\n raise RuntimeError(\"If half_pixel_centers is True, \"\n \"align_corners must be False.\")\n grads_shape = grads.get(\"shape\")\n grads_dtype = grads.get(\"dtype\")\n images_shape = images.get(\"shape\")\n images_dtype = images.get(\"dtype\")\n data_limit = ((1 << 31) - 1) // (4 if images_dtype == \"float32\" else 2)\n util.check_kernel_name(kernel_name)\n util.check_shape_rule(grads_shape)\n util.check_shape_rule(images_shape)\n util.check_shape_size(grads_shape, data_limit)\n util.check_shape_size(images_shape, data_limit)\n check_list_grads = (\"float32\")\n check_list_images = (\"float32\")\n util.check_dtype_rule(grads_dtype.lower(), check_list_grads)\n util.check_dtype_rule(images_dtype.lower(), check_list_images)",
"def test_basic1(self):\r\n self.validate((2, 2, 3, 3), (2, 2, 2, 2), 'valid', verify_grad=False)",
"def validate_image(path):\n problems = False\n # Rasterio env is required to make sure that the gdal bindings are setup correctly.\n with rasterio.Env():\n try:\n dataset = rasterio.open(path)\n except Exception as e:\n logging.error(\"Could not open dataset\", e)\n return False\n\n # Check the bands have sort of sensible values\n if dataset.count != args.bands:\n logging.error(f\"There is not the required number of bands. Expected {args.bands} found {dataset.count}\")\n problems = True\n\n if not data_validation.check_data(dataset):\n problems = True\n\n # Validate coordinate box doesn't cover the origin.\n # Also make sure that it has valid coordinates.\n if dataset.transform:\n top_left = dataset.transform * (0, 0)\n bottom_right = dataset.transform * (dataset.width, dataset.height)\n if np.sign(bottom_right[0]) != np.sign(top_left[0]) and np.sign(bottom_right[1]) != np.sign(top_left[1]):\n logging.error(f\"Data set appears to be over the origin of the coordinate space.\")\n problems = True\n else:\n logging.error(f\"Dataset transform is missing.\")\n problems = True\n return not problems # return true if the image is valid",
"def _validate(self) -> None:\n for box in self.boxes:\n if any(box[0] == s[0] and box[1] == s[1] for s in self.wall_squares):\n raise RuntimeError('In illegal state. Box should not be inside wall.')\n if box[0] == self.current_location[0] and box[1] == self.current_location[1]:\n raise RuntimeError('In illegal state. Box should not be inside player.')\n if any(self.current_location[0] == s[0] and self.current_location[1] == s[1] for s in self.wall_squares):\n raise RuntimeError('In illegal state. Player should not be inside wall.')",
"def test_prism_layer_invalid_surface_reference(\n dummy_layer,\n): # pylint: disable=redefined-outer-name\n coordinates, surface, reference, _ = dummy_layer\n # Surface with wrong shape\n surface_invalid = np.arange(20, dtype=float)\n with pytest.raises(ValueError):\n prism_layer(coordinates, surface_invalid, reference)\n # Reference with wrong shape\n reference_invalid = np.zeros(20)\n surface = np.arange(20, dtype=float).reshape(4, 5)\n with pytest.raises(ValueError):\n prism_layer(coordinates, surface, reference_invalid)",
"def check_models(models):\n layers_list = [get_all_layers(m) for m in models]\n n = len(layers_list[0])\n assert all(n == len(l) for l in layers_list)\n for layers in zip(*layers_list):\n first, *rest = layers\n assert all(check_layer(first, c) for c in rest)"
] |
[
"0.755296",
"0.6597595",
"0.64689934",
"0.6319897",
"0.6265187",
"0.62488556",
"0.6099883",
"0.6094844",
"0.6083564",
"0.60594773",
"0.6057919",
"0.6048783",
"0.60151726",
"0.5988586",
"0.59779537",
"0.59748954",
"0.5968838",
"0.59571236",
"0.59380984",
"0.5897566",
"0.5858149",
"0.58509547",
"0.5845388",
"0.58176965",
"0.5805423",
"0.57956165",
"0.57768124",
"0.575832",
"0.5747491",
"0.57448864"
] |
0.6611211
|
1
|
Puts relevant data through embedding layers and then concatenates the result with the rest of the data ready to then be put through the hidden layers
|
def incorporate_embeddings(self, x):
all_embedded_data = []
for embedding_layer_ix, embedding_var in enumerate(self.columns_of_data_to_be_embedded):
data = x[:, :, embedding_var]
embedded_data = self.embedding_layers[embedding_layer_ix](data)
all_embedded_data.append(embedded_data)
if len(all_embedded_data) > 1: all_embedded_data = Concatenate(axis=2)(all_embedded_data)
else: all_embedded_data = all_embedded_data[0]
non_embedded_columns = [col for col in range(x.shape[2]) if col not in self.columns_of_data_to_be_embedded]
if len(non_embedded_columns) > 0:
x = tf.gather(x, non_embedded_columns, axis=2)
x = Concatenate(axis=2)([tf.dtypes.cast(x, float), all_embedded_data])
else: x = all_embedded_data
return x
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def encode_data(opt, model, data_loader, log_step=10, logging=print, contextual_model=True):\n batch_time = AverageMeter()\n val_logger = LogCollector()\n\n # switch to evaluate mode\n model.val_start(opt)\n\n end = time.time()\n\n # numpy array to keep all the embeddings\n clip_embs, cap_embs = [], []\n vid_embs, para_embs = [], []\n vid_contexts, para_contexts = [], []\n num_clips_total = []\n cur_vid_total = []\n for i, (clips, captions, videos, paragraphs, lengths_clip, lengths_cap, lengths_video, lengths_paragraph, num_clips, num_caps, ind, cur_vid) in enumerate(data_loader):\n # make sure val logger is used\n model.logger = val_logger\n num_clips_total.extend(num_clips)\n\n # compute the embeddings\n clip_emb, cap_emb = model.forward_emb(clips, captions, lengths_clip, lengths_cap)\n vid_context, para_context = model.forward_emb(videos, paragraphs, lengths_video, lengths_paragraph)\n if contextual_model:\n vid_emb, para_emb = model.structure_emb(clip_emb, cap_emb, num_clips, num_caps, vid_context, para_context)\n else:\n vid_emb, para_emb = model.structure_emb(clip_emb, cap_emb, num_clips, num_caps)\n\n\n clip_emb = F.normalize(clip_emb)\n cap_emb = F.normalize(cap_emb)\n vid_emb = F.normalize(vid_emb)\n para_emb = F.normalize(para_emb)\n vid_context = F.normalize(vid_context)\n para_context = F.normalize(para_context)\n\n\n # initialize the numpy arrays given the size of the embeddings\n clip_embs.extend(clip_emb.data.cpu())\n cap_embs.extend(cap_emb.data.cpu())\n vid_embs.extend(vid_emb.data.cpu())\n para_embs.extend(para_emb.data.cpu())\n vid_contexts.extend(vid_context.data.cpu())\n para_contexts.extend(para_context.data.cpu())\n cur_vid_total.extend(cur_vid)\n\n # measure accuracy and record loss\n model.forward_loss(vid_emb, para_emb, 'test')\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % log_step == 0:\n logging('Test: [{0}/{1}]\\t'\n '{e_log}\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n .format(\n i, len(data_loader), batch_time=batch_time,\n e_log=str(model.logger)))\n\n vid_embs = torch.stack(vid_embs, 0)\n para_embs = torch.stack(para_embs, 0)\n vid_embs = vid_embs.numpy()\n para_embs = para_embs.numpy()\n\n clip_embs = torch.stack(clip_embs, 0)\n cap_embs = torch.stack(cap_embs, 0)\n clip_embs = clip_embs.numpy()\n cap_embs = cap_embs.numpy()\n\n vid_contexts = torch.stack(vid_contexts, 0)\n para_contexts = torch.stack(para_contexts, 0)\n vid_contexts = vid_contexts.numpy()\n para_contexts = para_contexts.numpy()\n\n return vid_embs, para_embs, clip_embs, cap_embs, vid_contexts, para_contexts, num_clips_total, cur_vid_total",
"def add_embedding(self):\n ### YOUR CODE HERE (~4-6 lines)\n embeddingTensor = tf.Variable(self.pretrained_embeddings)\n embeddings = tf.nn.embedding_lookup(embeddingTensor, self.input_placeholder)\n embeddings = tf.reshape(embeddings, [-1, self.max_length, Config.n_features * Config.embed_size])\n ### END YOUR CODE\n return embeddings",
"def add_embeddings(self):\n\n with tf.device('/cpu:0'):\n with tf.variable_scope('Embedding_Layer'):\n embeddings = tf.Variable(self.initial_embeddings,name = 'Embeddings')\n self.input_embeddings = tf.nn.embedding_lookup(embeddings, self.inputs_placeholder) #(N,S,D)\n self.question_embeddings = tf.nn.embedding_lookup(embeddings, self.questions_placeholder) #(N,S,D)",
"def _embed(self):\n with tf.variable_scope('word_embedding'):\n self.word_embeddings = tf.get_variable(\n 'word_embeddings',\n shape=(self.term_vocab.size(), self.term_vocab.embed_dim),\n initializer=tf.constant_initializer(self.term_vocab.embeddings),\n trainable=True\n )\n self.p_word_emb = tf.nn.embedding_lookup(self.word_embeddings, self.p)\n self.q_word_emb = tf.nn.embedding_lookup(self.word_embeddings, self.q)\n\n with tf.variable_scope('char_embedding'):\n self.char_embeddings = tf.get_variable(\n 'char_embeddings',\n shape=(self.char_vocab.size(), self.char_vocab.embed_dim),\n initializer=tf.constant_initializer(self.char_vocab.embeddings),\n trainable=True\n )\n self.p_char_emb = tf.nn.embedding_lookup(self.char_embeddings, self.p_char) # [batch, seqlen, max_char_num, embedding_size]\n self.q_char_emb = tf.nn.embedding_lookup(self.char_embeddings, self.q_char)\n\n self.p_char_emb = self.cnn_emb(self.p_char_emb, \"p_emb\")\n self.q_char_emb = self.cnn_emb(self.q_char_emb, \"q_emb\")\n '''\n self.p_char_emb = tf.reshape(self.p_char_emb, [-1, self.max_char_num, self.emb_size])\n self.q_char_emb = tf.reshape(self.q_char_emb, [-1, self.max_char_num, self.emb_size])\n\n self.p_char_emb = cnn_layer.conv(self.p_char_emb, self.emb_size,\n bias=True, activation=tf.nn.relu, kernel_size=5, name=\"char_conv\", reuse=None)\n self.q_char_emb = cnn_layer.conv(self.q_char_emb, self.emb_size,\n bias=True, activation=tf.nn.relu, kernel_size=5, name=\"char_conv\", reuse=True)\n\n self.p_char_emb = tf.reduce_max(self.p_char_emb, axis=1) # [batch*seqlen, 1, emb_size]\n self.q_char_emb = tf.reduce_max(self.q_char_emb, axis=1)\n\n batch_size = tf.shape(self.p_word_emb)[0]\n self.p_char_emb = tf.reshape(self.p_char_emb, [batch_size, -1, self.emb_size])\n self.q_char_emb = tf.reshape(self.q_char_emb, [batch_size, -1, self.emb_size])\n\n self.p_char_emb = tf.nn.dropout(self.p_char_emb, 0.95)\n self.q_char_emb = tf.nn.dropout(self.q_char_emb, 0.95)\n '''\n self.p_emb = tf.concat([self.p_word_emb, self.p_char_emb], -1)\n self.q_emb = tf.concat([self.q_word_emb, self.q_char_emb], -1)",
"def _get_embedding(self, data):\n embedding_list = [super()._get_embedding(data)]\n context = data['context']\n for i in range(context.shape[1]):\n embedding_list.append(getattr(self, f'context{i}')(context[:, i:i+1]))\n return torch.cat(embedding_list, dim=1)",
"def encode_data(model, data_loader, opt, log_step=10, logging=print):\n batch_time = AverageMeter()\n val_logger = LogCollector()\n\n # switch to evaluate mode\n model.val_start()\n\n end = time.time()\n\n # np array to keep all the embeddings\n img_embs = None\n cap_embs = None\n cap_lens = None\n\n max_n_word = 0\n for i, (images, input_ids, attention_mask, token_type_ids, lengths, ids) in enumerate(data_loader):\n max_n_word = max(max_n_word, max(lengths))\n\n for i, batch_data in enumerate(data_loader):\n # make sure val logger is used\n model.logger = val_logger\n\n # compute the embeddings\n img_emb, cap_emb, cap_len, ids = model.forward_emb(20, batch_data, volatile=True)\n #print(img_emb)\n if img_embs is None:\n if img_emb.dim() == 3:\n img_embs = np.zeros((len(data_loader.dataset), img_emb.size(1), img_emb.size(2)))\n else:\n img_embs = np.zeros((len(data_loader.dataset), img_emb.size(1)))\n cap_embs = np.zeros((len(data_loader.dataset), cap_emb.size(1)))\n cap_lens = [0] * len(data_loader.dataset)\n # cache embeddings\n ids = batch_data[-1]\n img_embs[ids] = img_emb.data.cpu().numpy().copy()\n cap_embs[ids] = cap_emb.data.cpu().numpy().copy()\n # for j, nid in enumerate(ids):\n # cap_lens[nid] = cap_len[j]\n\n # measure accuracy and record loss\n model.forward_loss(0, img_emb, cap_emb, cap_len, ids)\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % log_step == 0:\n logging('Test: [{0}/{1}]\\t'\n '{e_log}\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n .format(\n i, len(data_loader), batch_time=batch_time,\n e_log=str(model.logger)))\n # del images, input_ids, attention_mask, token_type_ids\n return img_embs, cap_embs, cap_lens",
"def _embed(self):\n with tf.variable_scope('word_embedding'):\n self.pretrained_word_mat = tf.get_variable(\"word_emb_mat\",\n [self.vocab.word_size() - 2, self.vocab.word_embed_dim],\n dtype=tf.float32,\n initializer=tf.constant_initializer(\n self.vocab.word_embeddings[2:],\n dtype=tf.float32),\n trainable=False)\n self.word_pad_unk_mat = tf.get_variable(\"word_unk_pad\",\n [2, self.pretrained_word_mat.get_shape()[1]],\n dtype=tf.float32,\n initializer=tf.constant_initializer(\n self.vocab.word_embeddings[:2],\n dtype=tf.float32),\n trainable=True)\n\n self.word_mat = tf.concat([self.word_pad_unk_mat, self.pretrained_word_mat], axis=0)\n self.p_emb = tf.nn.embedding_lookup(self.word_mat, self.p)\n self.q_emb = tf.nn.embedding_lookup(self.word_mat, self.q)",
"def bert_embed(data, bert_model, BATCH_SIZE = 16, MAX_LEN = 128):\n \n dataset = TensorDataset(\n data['input_ids'], data['attention_masks'], data['indices']\n )\n sampler = SequentialSampler(dataset)\n dataloader = DataLoader(dataset, sampler=sampler, batch_size=BATCH_SIZE)\n \n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n print('Running on ' + device.type)\n if device.type == 'cuda':\n bert_model.cuda() # put bert in training mode\n \n N = data['indices'].shape[0]\n X = np.zeros((N, 768))\n pos = 0\n for batch in dataloader:\n batch = tuple(t.to(device) for t in batch)\n b_input_ids, b_input_masks, b_indices = batch\n \n with torch.no_grad():\n embeddings = bert_model(\n b_input_ids.view(-1, MAX_LEN),\n b_input_masks.view(-1, MAX_LEN)\n )[2]\n # Take the mean of the last 4 hidden states\n embeddings = (embeddings[-4] + embeddings[-3] + embeddings[-2] + embeddings[-1])/4\n for j, label_ind in enumerate(b_indices.cpu().detach().numpy()):\n X[pos,:] = embeddings[j, int(label_ind), :].cpu().detach().numpy()\n pos+=1\n return X",
"def compute_embeddings(model, opts, data):\n node_embeddings = []\n node_scores = []\n # batch size is 1 for computing embeddings\n dataloader = DataLoader(dataset=data, batch_size=1, shuffle=True, num_workers=16)\n model.eval()\n print(\"computing embeddings...\")\n with torch.no_grad():\n for batch in tqdm(dataloader):\n batch.to(opts.device)\n batch_scores, batch_embeddings = model(batch, compute_embeddings=False)\n node_embeddings.append(batch_embeddings)\n node_scores.append(batch_scores)\n # input('enter for embeddings')\n # print(node_embeddings)\n\n return torch.stack(node_scores), torch.stack(node_embeddings)",
"def forward(self, tgt, m, enc_embed, mask):\n bs = tgt.shape[0]\n enc_embed = enc_embed.permute(2, 0, 1)\n m = m.permute(2, 0, 1)\n tgt = tgt.permute(2, 0, 1)\n dec_embed = self.dec_embed.weight.unsqueeze(1).repeat(1, bs, 1)\n\n out = tgt\n for layer in self.decoder_layers:\n out = layer(out, m, \n pos=enc_embed,\n query_pos=dec_embed\n )\n \n return self.decoder_norm(out).permute(1, 2, 0), dec_embed.permute(1, 2, 0)",
"def add_embedding(self):\n #with tf.variable_scope(\"RNN\", reuse = tf.AUTO_REUSE):\n embeddings = tf.get_variable(\"embeddings\", initializer = self.pretrained_embeddings,trainable=True)\n inputs = self.input_placeholder\n inputs = tf.reshape(inputs, [self.config.batch_size, -1 , self.config.n_features])\n embeddings = tf.nn.embedding_lookup(embeddings, self.input_placeholder)\n embeddings = tf.reshape(embeddings, [self.config.batch_size, -1, self.config.n_features* self.config.embed_size])\n embeddings = tf.cast(embeddings, tf.float32)\n return embeddings",
"def embedding_layer(self):\n with tf.name_scope(\"Embedding_Layer\"):\n V_size = len(self.vocab)\n embed_dim = len(self.embed[0]) \n W_embed_ = tf.get_variable(\"W_embed\",shape=[V_size, embed_dim],trainable=False).assign(np.asarray(self.embed))\n W_analogy_embed_ = tf.get_variable(\"W_analogy_embed\",shape=[V_size, embed_dim],trainable=True,initializer=tf.random_uniform_initializer(minval=-1,maxval=1))\n return W_embed_, W_analogy_embed_",
"def _get_embedding_layer(self, input_data, doc_input_data):\n opts = self._options\n word_embedding = tf.Variable(tf.random_uniform((self.vocab_size, opts.embed_dim), -1.0, 1.0))\n embed = []\n\n temp = tf.zeros([opts.batch_size, opts.embed_dim])\n embed_d = []\n for n in range(opts.sentence_sample):\n temp = tf.add(temp, tf.nn.embedding_lookup(word_embedding, doc_input_data[:, n]))\n embed_d.append(temp)\n\n if opts.concat == 'True':\n combined_embed_vector_length = opts.embed_dim * opts.window_size + opts.embed_dim\n for j in range(opts.window_size):\n embed_w = tf.nn.embedding_lookup(word_embedding, input_data[:, j])\n embed.append(embed_w)\n embed.append(embed_d)\n else:\n combined_embed_vector_length = opts.embed_dim\n embed_w = tf.zeros([opts.batch_size, opts.embed_dim])\n for j in range(opts.window_size):\n embed_w += tf.nn.embedding_lookup(word_embedding, input_data[:, j])\n embed_w += embed_d\n embed.append(embed_w)\n\n return tf.concat(embed, 1), word_embedding, combined_embed_vector_length",
"def pretrained_embedding_layer(model,model2,model3, word_to_index,emb_dim_max):\n words_ignored = []\n vocab_len = len(word_to_index) + 1 \n emb_matrix = np.zeros([vocab_len,emb_dim_max])\n \n print(' Total words would be processed : '+str(vocab_len))\n for word, idx in word_to_index.items():\n if word in model:\n emb_matrix[idx,:200] = model[word]\n emb_matrix[idx,200:] = 0\n if word in model2:\n emb_matrix[idx, :100] = model2[word]\n emb_matrix[idx, 100:] = 0\n if word in model3.keys():\n emb_matrix[idx,:] = model3[word]\n else:\n words_ignored.append(word)\n print(str(len(words_ignored))+\" words ignored\")\n print(emb_matrix.shape) \n \n \n embedding_layer = Embedding(vocab_len,emb_dim_max,trainable = True)\n \n # Build the embedding layer, it is required before setting the weights of the embedding layer. \n embedding_layer.build((None,)) # Do not modify the \"None\". This line of code is complete as-is.\n # Set the weights of the embedding layer to the embedding matrix. Your layer is now pretrained.\n embedding_layer.set_weights([emb_matrix])\n \n return embedding_layer,words_ignored",
"def build_image_embeddings(self):\n inception_output = image_embedding.inception_v3(\n self.images,\n trainable=self.train_inception,\n is_training=self.is_training())\n\n # Map inception output onto embedding space.\n with tf.variable_scope(\"image_embedding\") as scope:\n image_embeddings = tf.contrib.layers.fully_connected(\n inputs=inception_output,\n num_outputs=self.config.sentence_embedding_size,\n activation_fn=None,\n weights_initializer=self.initializer,\n biases_initializer=None,\n scope=scope)\n \n if self.mode == \"train\":\n # to avoid overfitting we use dropout for all fully connected layers\n image_embeddings = tf.nn.dropout(image_embeddings, self.config.dropout_keep_prob_encoder)\n\n # Save the embedding size in the graph.\n tf.constant(self.config.sentence_embedding_size, name=\"image_embedding_size\")\n\n self.image_embeddings = image_embeddings",
"def _embed(self):\n batch_size = tf.shape(self.p)[0]\n with tf.variable_scope(\"emb\"):\n with tf.variable_scope(\"char\"):\n pc_emb = tf.reshape(tf.nn.embedding_lookup(\n self.char_embed, self.pc), \n [batch_size * self.max_p_len, self.max_w_len, self.vocab.char_embed_dim])\n qc_emb = tf.reshape(tf.nn.embedding_lookup(\n self.char_embed, self.qc), \n [batch_size * self.max_q_len, self.max_w_len, self.vocab.char_embed_dim])\n cell_fw = tf.contrib.rnn.GRUCell(self.char_hidden_size)\n cell_bw = tf.contrib.rnn.GRUCell(self.char_hidden_size)\n _, (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(\n cell_fw, cell_bw, pc_emb, self.pc_length, dtype=tf.float32)\n pc_emb = tf.concat([state_fw, state_bw], axis=1)\n _, (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(\n cell_fw, cell_bw, qc_emb, self.qc_length, dtype=tf.float32)\n qc_emb = tf.concat([state_fw, state_bw], axis=1)\n pc_emb = tf.reshape(pc_emb, [batch_size, self.max_p_len, 2 * self.char_hidden_size])\n qc_emb = tf.reshape(qc_emb, [batch_size, self.max_q_len, 2 * self.char_hidden_size])\n\n with tf.name_scope(\"word\"):\n p_emb = tf.nn.embedding_lookup(self.word_embed, self.p)\n q_emb = tf.nn.embedding_lookup(self.word_embed, self.q)\n\n with tf.name_scope(\"pos\"):\n p_pos_emb = tf.nn.embedding_lookup(self.pos_embed, self.p_pos)\n q_pos_emb = tf.nn.embedding_lookup(self.pos_embed, self.q_pos)\n \n with tf.name_scope(\"em\"):\n sh = tf.shape(self.p_em)\n resh = [sh[0], sh[1], 1]\n p_em_feat = tf.reshape(tf.cast(self.p_em, dtype=tf.float32), shape=resh)\n\n self.p_emb = tf.concat([p_emb, pc_emb, p_pos_emb, p_em_feat], axis=2)\n self.q_emb = tf.concat([q_emb, qc_emb, q_pos_emb], axis=2)",
"def produce_outputs(self):\n # if self.loaded_aggregated:\n # debug(\"Skippping {} mapping due to preloading\".format(self.base_name))\n # return\n # need to calc term numeric index for aggregation\n\n\n # if self.loaded_preprocessed:\n # debug(\"Skippping {} mapping due to preloading\".format(self.base_name))\n # return\n\n bagger = Bag(vocabulary=self.term_list, weighting=self.base_name, ngram_range=self.ngram_range)\n\n self.embeddings = np.ndarray((0, len(self.term_list)), dtype=np.int32)\n for idx in self.indices.get_train_test():\n texts = Text.get_strings(self.text.data.get_slice(idx))\n vecs = bagger.map_collection(texts, fit=False, transform=True)\n self.embeddings = np.append(self.embeddings, vecs, axis=0)\n del texts\n\n # texts = Text.get_strings(self.text.data.get_slice(test_idx))\n # vec_test = bagger.map_collection(texts, fit=do_fit)\n # del texts\n\n # self.embeddings = np.vstack((vec_train, vec_test))\n\n # self.embeddings = np.append(vec_train, vec_test)\n # self.vector_indices = (np.arange(len(train)), np.arange(len(test)))\n\n # set misc required variables\n self.set_constant_elements_per_instance()",
"def embed_data(X, model, batch_size):\n n_batch = int(np.ceil(len(X) / batch_size))\n return np.vstack(\n [\n model.embedder(\n model.encoder(np.array(X[(i) * batch_size : (i + 1) * batch_size, :]))\n )\n for i in range(n_batch)\n ]\n )",
"def embed_data(\n self,\n data: Dict[str, tf.SparseTensor]\n ) -> Tuple[tf.Tensor, tf.Tensor]:\n\n batch_shape = tf.shape(data[\"t\"])[:-1]\n flat_data = nest.map_structure(batches.flatten_batch, data)\n flat_data = nest.map_structure(batches.sparse_fill_empty_rows, flat_data)\n\n context_embeddings = (\n self.embedding.provide_embeddings_to_forward_fn(\n flat_data, feature_types=self._config.context_features))\n context_embeddings = nest.map_structure(\n batches.get_unflatten_batch_fn(batch_shape), context_embeddings)\n\n sequential_embeddings = (\n self.embedding.provide_embeddings_to_forward_fn(\n flat_data, feature_types=self._config.sequential_features))\n sequential_embeddings = nest.map_structure(\n batches.get_unflatten_batch_fn(batch_shape), sequential_embeddings)\n\n dt = tf.divide(tf.cast(data[\"dt\"], dtype=tf.float32), 5400.)\n t = tf.divide(tf.cast(data[\"t\"], dtype=tf.float32), 5400.)\n dt_log = tf.log(dt + 1.)\n\n embedding_dict = sequential_embeddings.copy()\n embedding_dict.update(context_embeddings)\n embedding_dict[\"dt_s\"] = tf.matmul(dt_log, self.w_dt)\n combined_embedding = self._combine_embeddings_for_input(embedding_dict)\n inputs = combined_embedding\n if self._config.get(\"apply_bias\", False):\n inputs = inputs + tf.get_variable(\n \"_\".join([self._config.embedding_type, \"final_bias\"]),\n shape=[self.get_total_embedding_size()],\n initializer=tf.zeros_initializer)\n time_vect = t\n\n return inputs, time_vect",
"def main():\r\n # Prepare the data and the pretrained embedding matrix\r\n if FRESH_START:\r\n print(\"Preprocessing all data from scratch....\")\r\n train, dev, test = utils.get_data(DATA_FN)\r\n # train_data includes .word2idx and .label_enc as fields if you would like to use them at any time\r\n train_generator, dev_generator, test_generator, embeddings, train_data = utils.vectorize_data(train, dev, test, BATCH_SIZE, EMBEDDING_DIM)\r\n print(\"Saving DataLoaders and embeddings so you don't need to create them again; you can set FRESH_START to \"\r\n \"False to load them from file....\")\r\n with open(TEMP_FILE, \"wb+\") as f:\r\n pickle.dump((train_generator, dev_generator, test_generator, embeddings, train_data), f)\r\n else:\r\n try:\r\n with open(TEMP_FILE, \"rb\") as f:\r\n print(\"Loading DataLoaders and embeddings from file....\")\r\n train_generator, dev_generator, test_generator, embeddings, train_data = pickle.load(f)\r\n except FileNotFoundError:\r\n raise FileNotFoundError(\"You need to have saved your data with FRESH_START=True once in order to load it!\")\r\n \r\n\r\n # Use this loss function in your train_model() and test_model()\r\n loss_fn = nn.CrossEntropyLoss()\r\n\r\n ########## YOUR CODE HERE ##########\r\n HIDDEN_DIM = 64\r\n ########## Base DNN ################\r\n # # TODO: for each of the two models, you should 1) create it,\r\n print(\"train and test on DNN!\")\r\n dnn = models.DenseNetwork(EMBEDDING_DIM, NUM_CLASSES, HIDDEN_DIM, embeddings)\r\n optimizer = optim.Adam(dnn.parameters())\r\n # TODO 2) run train_model() to train it, and\r\n #trained_dnn = train_model(dnn, loss_fn, optimizer, train_generator, dev_generator)\r\n DNN_PATH = 'dense.pth'\r\n #torch.save(trained_dnn, DNN_PATH)\r\n # TODO: 3) run test_model() on the result\r\n print(\"Test on the saved Dense Network\")\r\n dnn_test = torch.load(DNN_PATH)\r\n test_model(dnn_test, loss_fn, test_generator)\r\n \"\"\"\r\n Output:\r\n Test loss: tensor([25.7230])\r\n F-score: 0.4399188910197242\r\n \"\"\"\r\n\r\n ########## Base RNN ################\r\n # TODO: for each of the two models, you should 1) create it,\r\n print(\"train and test on RNN!\")\r\n SENTENCE_LEN = 91\r\n rnn = models.RecurrentNetwork(SENTENCE_LEN, NUM_CLASSES, HIDDEN_DIM, embeddings)\r\n optimizer = optim.Adam(rnn.parameters())\r\n # TODO 2) run train_model() to train it, and\r\n #trained_rnn = train_model(rnn, loss_fn, optimizer, train_generator, dev_generator)\r\n RNN_PATH = 'recurrent.pth'\r\n #torch.save(trained_rnn, RNN_PATH)\r\n # TODO: 3) run test_model() on the result\r\n print(\"Test on the saved Recurrent Network\")\r\n rnn_test = torch.load(RNN_PATH)\r\n test_model(rnn_test, loss_fn, test_generator)\r\n \"\"\"\r\n Output:\r\n Test loss: tensor([25.7136])\r\n F-score: 0.42172967869116373\r\n \"\"\"\r\n\r\n # extension-grading: Extension 1, changes to the preprocessing of the data - Tweets tokenizers.\r\n # Major changes are in the utils.py labeled by \"extension-grading\"\r\n Extension1 = False\r\n if Extension1:\r\n print(\"Train and test dnn with Extension 1: Tweets tokenizers\")\r\n train, dev, test = utils.get_data(DATA_FN)\r\n train_generator, dev_generator, test_generator, embeddings,train_data = utils.vectorize_data(train, dev, test, BATCH_SIZE, EMBEDDING_DIM, extension=True)\r\n # try on DNN\r\n dnn = models.DenseNetwork(EMBEDDING_DIM, NUM_CLASSES, HIDDEN_DIM, embeddings)\r\n optimizer = optim.Adam(dnn.parameters())\r\n trained_dnn = train_model(dnn, loss_fn, optimizer, train_generator, dev_generator)\r\n test_model(trained_dnn, loss_fn, test_generator)\r\n \"\"\"\r\n Output:\r\n Test loss: tensor([25.5987])\r\n F-score: 0.4465511728425936\r\n # Compared with original tokenizer, F-score increased by 1.6%.\r\n \"\"\"\r\n\r\n # extension-grading: Extension 2, architecture changes - flattening embeddings using the average of unpadded sentence words other than sum. \r\n # Major changes are in the models.py labeled by \"extension-grading\"\r\n Extension2 = False\r\n if Extension2:\r\n print(\"Train and test dnn with Extension 2: Architecture changes - flattening embeddings\")\r\n # initialize the experimental model\r\n exp = models.ExperimentalNetwork(EMBEDDING_DIM, NUM_CLASSES, HIDDEN_DIM, embeddings)\r\n optimizer = optim.Adam(exp.parameters())\r\n # run train_model() to train it\r\n trained_exp = train_model(exp, loss_fn, optimizer, train_generator, dev_generator)\r\n # run test_model() on the result\r\n test_model(trained_exp, loss_fn, test_generator)\r\n \"\"\"\r\n Output:\r\n Test loss: tensor([29.4298])\r\n F-score: 0.22199231332724553\r\n # Compared with original architecture, F-score decreased by half.\r\n \"\"\"",
"def __init__(self, directory, dataset, B_SIZE = 32):\n \n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n \n self.directory = directory\n self.dataset = dataset\n self.batch_size = B_SIZE\n self.hidden_dim = 64\n self.embedding_dim = 300\n \n all_data = pickle.load(open(directory + dataset + \"/data.p\", \"rb\"))\n \n self.w2ix = all_data.w2ix\n self.vocab_size = len(self.w2ix) \n \n self.mask_list = []\n self.mask_tokens = [\"<PAD>\", \"<SOS>\", \"<EOS>\", \".\"]\n \n for item in self.mask_tokens:\n \n if item in self.w2ix:\n \n self.mask_list.append(self.w2ix[item])\n \n self.pretrained_embeds = all_data.pretrained_embeds\n \n \n # In[4]:\n \n \n x_train, y_train = zip(*all_data.train)\n x_dev, y_dev = zip(*all_data.dev)\n x_test, y_test = zip(*all_data.test)\n \n print(\"\\nVocab size:\", len(self.w2ix),\n \"\\nTraining size:\", len(y_train),\n \"\\nDev size:\", len(y_dev),\n \"\\nTest size:\", len(y_test))\n \n # In[5]:\n \n self.output_size= len(np.unique(y_train))\n \n print(\"\\nOutput dimension: \", self.output_size, \"\\n\")\n \n \n self.sequence_length = all_data.sequence_length()\n \n if dataset == \"mimicanemia\":\n \n \tself.sequence_length = 2200\n \n print(\"--Sequence length :\", self.sequence_length, \"\\n\")\n \n # In[10]:\n \n from modules.utils import padder\n \n x_train_pad, train_lengths = padder(x_train, pad_len = self.sequence_length)\n x_dev_pad, dev_lengths = padder(x_dev, pad_len = self.sequence_length)\n x_test_pad, test_lengths = padder(x_test, pad_len = self.sequence_length)\n \n \n # In[11]:\n \n x_train_pad = torch.LongTensor(x_train_pad)#.to(device)\n x_dev_pad = torch.LongTensor(x_dev_pad)#.to(device)\n x_test_pad = torch.LongTensor(x_test_pad)#.to(device)\n train_lengths = torch.LongTensor(train_lengths)#.to(device)\n dev_lengths = torch.LongTensor(dev_lengths)#.to(device)\n test_lengths = torch.LongTensor(test_lengths)#.to(device)\n y_train = torch.LongTensor(y_train)#.to(device)\n y_dev = torch.LongTensor(y_dev)#.to(device)\n y_test = torch.LongTensor(y_test)#.to(device)\n \n \n # In[12]:\n \n \n training_prebatch = list(zip(x_train_pad, train_lengths, y_train))\n dev_prebatch = list(zip(x_dev_pad, dev_lengths, y_dev))\n testing_prebatch = list(zip(x_test_pad, test_lengths, y_test))\n \n \n training_prebatch = sorted(training_prebatch, key = lambda x : x[1], reverse = False)\n dev_prebatch = sorted(dev_prebatch, key = lambda x : x[1], reverse = False)\n testing_prebatch = sorted(testing_prebatch, key = lambda x : x[1], reverse = False)\n \n # In[13]:\n \n ### removing sos and eos only sentences\n \n train_prebatch = [x for x in training_prebatch if x[1] > 2]\n dev_prebatch = [x for x in dev_prebatch if x[1] > 2]\n test_prebatch = [x for x in testing_prebatch if x[1] > 2]\n \n \n self.training = DataLoader(train_prebatch, batch_size = self.batch_size, \n shuffle = True, pin_memory = False)\n \n self.development = DataLoader(dev_prebatch, batch_size = self.batch_size, \n shuffle = False, pin_memory = False)\n \n \n self.testing = DataLoader(test_prebatch, batch_size = self.batch_size, \n shuffle = False, pin_memory = False)",
"def forward(self,input,mask,length):\r\n max_length = length.max()\r\n batch_size = input.shape[0]\r\n h,c = self.init_hidden_state(batch_size)\r\n embeddings = torch.zeros(batch_size,max_length,self.hidden_size).cuda()\r\n end_indx = length.unsqueeze(-1).long().cuda() - 1\r\n mask[:,0] = 0\r\n mask = mask.scatter_(1,end_indx,1)\r\n for t in range(max_length):\r\n pad_mask = mask[:,t].unsqueeze(-1).repeat(1,self.hidden_size)\r\n h = h*pad_mask[:h.shape[0],:]\r\n c = c*pad_mask[:h.shape[0],:]\r\n batch_size_t = sum([l >t for l in length])\r\n h,c = self.rnn(input[:batch_size_t,t,:], (h[:batch_size_t],c[:batch_size_t])\r\n )\r\n embeddings[:batch_size_t,t,:] = h\r\n \r\n word_nums = mask.sum(1)\r\n max_word_num = word_nums.max().int()\r\n i = 0\r\n\r\n # take the\r\n for num in word_nums:\r\n p = (num - word_nums.max()).int()\r\n # if mask[i,-1] != 0:\r\n # p = p-1\r\n # mask[i,p:] = 1\r\n if p != 0:\r\n mask[i,p:] = 1\r\n while mask[i].sum()<word_nums.max():\r\n p -= 1\r\n mask[i,p:] = 1\r\n i += 1\r\n index = mask.nonzero() # index[0,:] batch position, index[:,1] frame position\r\n index = index[:,0] * mask.shape[1] + index[:,1] - 1 # we take the frame of the boundary as the begaining of next word\r\n flat = embeddings.view(-1,embeddings.shape[-1])\r\n word_embeddings = flat[index.long()]\r\n output = word_embeddings.view(batch_size,max_word_num,-1)\r\n output = output.transpose(2,1)\r\n\r\n\r\n\r\n return output, word_nums",
"def pretrained_embedding_layer(word_to_vec_map, word_to_index):\n \n vocab_size = len(word_to_index) + 1 # adding 1 to fit Keras embedding (requirement)\n any_word = list(word_to_vec_map.keys())[0]\n emb_dim = word_to_vec_map[any_word].shape[0] # define dimensionality of your GloVe word vectors (= 50)\n \n ### START CODE HERE ###\n # Step 1\n # Initialize the embedding matrix as a numpy array of zeros.\n # See instructions above to choose the correct shape.\n emb_matrix = np.zeros((vocab_size, emb_dim))\n \n # Step 2\n # Set each row \"idx\" of the embedding matrix to be \n # the word vector representation of the idx'th word of the vocabulary\n for word, idx in word_to_index.items():\n emb_matrix[idx, :] = word_to_vec_map[word]\n\n # Step 3\n # Define Keras embedding layer with the correct input and output sizes\n # Make it non-trainable.\n embedding_layer = tensorflow.keras.layers.Embedding(input_dim = vocab_size, output_dim = emb_dim, trainable = False)\n ### END CODE HERE ###\n\n # Step 4 (already done for you; please do not modify)\n # Build the embedding layer, it is required before setting the weights of the embedding layer. \n embedding_layer.build((None,)) # Do not modify the \"None\". This line of code is complete as-is.\n \n # Set the weights of the embedding layer to the embedding matrix. Your layer is now pretrained.\n embedding_layer.set_weights([emb_matrix])\n \n return embedding_layer",
"def _get_embedding(self, data):\n # Tensor(n, c)\n cat = data['cat']\n return self.one_hot_embed(cat)",
"def _encode(self):\n with tf.variable_scope('encoding'):\n self.sep_p_encodes, _ = bilstm_layer(self.p_emb, self.p_length, self.hidden_size)\n tf.get_variable_scope().reuse_variables()\n self.sep_q_encodes, _ = bilstm_layer(self.q_emb, self.q_length, self.hidden_size)\n if self.use_dropout:\n self.sep_p_encodes = tf.nn.dropout(self.sep_p_encodes, 1-self.dropout)\n self.sep_q_encodes = tf.nn.dropout(self.sep_q_encodes, 1-self.dropout)",
"def forward(self, data):\r\n x = F.celu(self.lin0(data.x)) # data.x [#,#node features] -lin0-> [#, hidden_dim]\r\n for conv in self.convs:\r\n # conv <- Block [#, hidden_dim]\r\n x = x + F.dropout(conv(x, data.edge_index, data.edge_attr), p=self.dropout, training=self.training)\r\n x = self.set2set(x, data.batch) # [batch_size, 2*hidden_dim]\r\n \"\"\" data.batch marks the atoms that belongs to each one of the 128 molecules of a batch\"\"\"\r\n x = self.out(F.dropout(x, p=self.dropout, training=self.training)) #[batch_size, 2]\r\n\r\n return x",
"def forward_step(self, prev_embed, encoder_hidden, src_mask, proj_key, hidden):\n\n # compute context vector using attention mechanism\n #we only want the hidden, not the cell state of the lstm CZW, hence the hidden[0]\n query = hidden[0][-1].unsqueeze(1) # [#layers, B, D] -> [B, 1, D]\n context, attn_probs = self.attention(\n query=query, proj_key=proj_key,\n value=encoder_hidden, mask=src_mask)\n\n # update rnn hidden state\n rnn_input = torch.cat([prev_embed, context], dim=2)\n output, hidden = self.rnn(rnn_input, hidden)\n \n pre_output = torch.cat([prev_embed, output, context], dim=2)\n pre_output = self.dropout_layer(pre_output)\n pre_output = self.pre_output_layer(pre_output)\n\n return output, hidden, pre_output",
"def __init__(self, num_vars, num_categs, hidden_dim, input_mask, sparse_embeds=False):\n super().__init__()\n self.num_vars = num_vars\n self.hidden_dim = hidden_dim\n self.input_mask = input_mask\n self.sparse_embeds = sparse_embeds\n self.num_categs = num_categs\n # For each of the N networks, we have num_vars*num_categs possible embeddings to model.\n # Sharing embeddings across all N networks can limit the expressiveness of the networks.\n # Instead, we share them across 10-20 variables for large graphs to reduce memory.\n self.num_embeds = self.num_vars*self.num_vars*self.num_categs\n if self.num_embeds > 1e7:\n self.num_embeds = int(math.ceil(self.num_embeds / 20.0))\n self.shortend = True\n elif self.num_embeds > 1e6:\n for s in range(11, -1, -1):\n if self.num_vars % s == 0:\n self.num_embeds = self.num_embeds // s\n break\n self.shortend = True\n else:\n self.shortend = False\n self.embedding = nn.Embedding(num_embeddings=self.num_embeds,\n embedding_dim=hidden_dim)\n self.embedding.weight.data.mul_(2./math.sqrt(self.num_vars))\n self.bias = nn.Parameter(torch.zeros(num_vars, self.hidden_dim))\n # Tensor for mapping each input to its corresponding embedding range in self.embedding\n pos_trans = torch.arange(self.num_vars**2, dtype=torch.long) * self.num_categs\n self.register_buffer(\"pos_trans\", pos_trans, persistent=False)",
"def _add_embedding_layer(model_1, model_2):\n result_layer = torch.nn.Embedding(\n model_1.num_embeddings, model_1.embedding_dim + model_2.embedding_dim\n )\n result_layer.weight = torch.nn.Parameter(\n torch.cat((model_1.weight.data, model_2.weight.data), dim=1)\n )\n return result_layer",
"def process_batch(self, data):\n [embedding_batch] = self._sess.run([self._embedding_tensor],\n feed_dict={self._features_tensor: data})\n return embedding_batch"
] |
[
"0.6751634",
"0.66188323",
"0.6565778",
"0.6504802",
"0.6489491",
"0.6482955",
"0.64141357",
"0.64005345",
"0.63975483",
"0.6358861",
"0.6324988",
"0.6312998",
"0.6306232",
"0.62920576",
"0.6267797",
"0.62240666",
"0.6208343",
"0.61861455",
"0.6166707",
"0.6162105",
"0.6103803",
"0.6095411",
"0.6085588",
"0.6063868",
"0.6060395",
"0.6052751",
"0.6046635",
"0.6006692",
"0.60014147",
"0.59882075"
] |
0.6679302
|
1
|
Method to create a dictionary of state block representing stream states. This takes a dict with stream name keys and stream values.
|
def stream_states_dict(streams, time_point=0):
stream_dict = OrderedDict()
def _stream_dict_add(sb, n, i=None):
"""add a line to the stream table"""
if i is None:
key = n
else:
key = "{}[{}]".format(n, i)
stream_dict[key] = sb
for n in streams.keys():
if isinstance(streams[n], Arc):
for i, a in streams[n].items():
try:
# if getting the StateBlock from the destination port
# fails for any reason try the source port. This could
# happen if a port does not have an associated
# StateBlock. For example a surrogate model may not
# use state blocks, unit models may handle physical
# properties without state blocks, or the port could
# be used to serve the purpose of a translator block.
sb = _get_state_from_port(a.ports[1], time_point)
except: # pylint: disable=W0702
sb = _get_state_from_port(a.ports[0], time_point)
_stream_dict_add(sb, n, i)
elif isinstance(streams[n], Port):
sb = _get_state_from_port(streams[n], time_point)
_stream_dict_add(sb, n)
else:
# _IndexedStateBlock is a private class, so cannot directly test
# whether streams[n] is one or not.
try:
sb = streams[n][time_point]
except KeyError as err:
raise TypeError(
f"Either component type of stream argument {streams[n]} "
f"is unindexed or {time_point} is not a member of its "
f"indexing set."
) from err
_stream_dict_add(sb, n)
return stream_dict
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __init__(self, owner, stateNumber):\n self.streamInfoDict = {}\n self.stateNumber = stateNumber\n self.owner = owner",
"def make_state_dict(self):\r\n state_dict = {c.TRANSITION_IN: self.transition_in,\r\n c.TRANSITION_OUT: self.transition_out,\r\n c.NORMAL: self.normal_update}\r\n\r\n return state_dict",
"def make_state_dict(self):\n state_dict = {c.TRANSITION_IN: self.transition_in,\n c.TRANSITION_OUT: self.transition_out,\n c.NORMAL: self.normal_update}\n\n return state_dict",
"def load_stream(self, stream):\n\n import pyarrow as pa\n import json\n from itertools import groupby\n from pyspark.sql.streaming.state import GroupState\n\n def construct_state(state_info_col):\n \"\"\"\n Construct state instance from the value of state information column.\n \"\"\"\n\n state_info_col_properties = state_info_col[\"properties\"]\n state_info_col_key_row = state_info_col[\"keyRowAsUnsafe\"]\n state_info_col_object = state_info_col[\"object\"]\n\n state_properties = json.loads(state_info_col_properties)\n if state_info_col_object:\n state_object = self.pickleSer.loads(state_info_col_object)\n else:\n state_object = None\n state_properties[\"optionalValue\"] = state_object\n\n return GroupState(\n keyAsUnsafe=state_info_col_key_row,\n valueSchema=self.state_object_schema,\n **state_properties,\n )\n\n def gen_data_and_state(batches):\n \"\"\"\n Deserialize ArrowRecordBatches and return a generator of\n `(a list of pandas.Series, state)`.\n\n The logic on deserialization is following:\n\n 1. Read the entire data part from Arrow RecordBatch.\n 2. Read the entire state information part from Arrow RecordBatch.\n 3. Loop through each state information:\n 3.A. Extract the data out from entire data via the information of data range.\n 3.B. Construct a new state instance if the state information is the first occurrence\n for the current grouping key.\n 3.C. Leverage the existing state instance if it is already available for the current\n grouping key. (Meaning it's not the first occurrence.)\n 3.D. Remove the cache of state instance if the state information denotes the data is\n the last chunk for current grouping key.\n\n This deserialization logic assumes that Arrow RecordBatches contain the data with the\n ordering that data chunks for same grouping key will appear sequentially.\n\n This function must avoid materializing multiple Arrow RecordBatches into memory at the\n same time. And data chunks from the same grouping key should appear sequentially, to\n further group them based on state instance (same state instance will be produced for\n same grouping key).\n \"\"\"\n\n state_for_current_group = None\n\n for batch in batches:\n batch_schema = batch.schema\n data_schema = pa.schema([batch_schema[i] for i in range(0, len(batch_schema) - 1)])\n state_schema = pa.schema(\n [\n batch_schema[-1],\n ]\n )\n\n batch_columns = batch.columns\n data_columns = batch_columns[0:-1]\n state_column = batch_columns[-1]\n\n data_batch = pa.RecordBatch.from_arrays(data_columns, schema=data_schema)\n state_batch = pa.RecordBatch.from_arrays(\n [\n state_column,\n ],\n schema=state_schema,\n )\n\n state_arrow = pa.Table.from_batches([state_batch]).itercolumns()\n state_pandas = [self.arrow_to_pandas(c) for c in state_arrow][0]\n\n for state_idx in range(0, len(state_pandas)):\n state_info_col = state_pandas.iloc[state_idx]\n\n if not state_info_col:\n # no more data with grouping key + state\n break\n\n data_start_offset = state_info_col[\"startOffset\"]\n num_data_rows = state_info_col[\"numRows\"]\n is_last_chunk = state_info_col[\"isLastChunk\"]\n\n if state_for_current_group:\n # use the state, we already have state for same group and there should be\n # some data in same group being processed earlier\n state = state_for_current_group\n else:\n # there is no state being stored for same group, construct one\n state = construct_state(state_info_col)\n\n if is_last_chunk:\n # discard the state being cached for same group\n state_for_current_group = None\n elif not state_for_current_group:\n # there's no cached state but expected to have additional data in same group\n # cache the current state\n state_for_current_group = state\n\n data_batch_for_group = data_batch.slice(data_start_offset, num_data_rows)\n data_arrow = pa.Table.from_batches([data_batch_for_group]).itercolumns()\n\n data_pandas = [self.arrow_to_pandas(c) for c in data_arrow]\n\n # state info\n yield (\n data_pandas,\n state,\n )\n\n _batches = super(ArrowStreamPandasSerializer, self).load_stream(stream)\n\n data_state_generator = gen_data_and_state(_batches)\n\n # state will be same object for same grouping key\n for _state, _data in groupby(data_state_generator, key=lambda x: x[1]):\n yield (\n _data,\n _state,\n )",
"def stream_from_dict(desc):\r\n stream = Stream()\r\n stream.update(desc)\r\n return stream",
"def create_state_dict(self):\n return {\n 'resting': self.resting,\n 'moving': self.moving,\n 'animated resting': self.animated_resting,\n 'autoresting': self.auto_resting,\n 'automoving': self.auto_moving,\n 'battle resting': self.battle_resting,\n 'attack': self.attack,\n 'enemy attack': self.enemy_attack,\n c.RUN_AWAY: self.run_away,\n c.VICTORY_DANCE: self.victory_dance,\n c.KNOCK_BACK: self.knock_back,\n c.FADE_DEATH: self.fade_death\n }",
"def _stateDict(self):\n\n data = {}\n # if self.currentState[4]:\n # data['action'] = 'BRAK'\n # else:\n data['action'] = 'MCTL'\n data['speed'] = float(self.speed)\n data['steerAngle'] = float(self.steering_angle)\n\n return data",
"def __init__(self, streams):\r\n streams = list(streams)\r\n self._labels = dict(streams)\r\n self._refresh = set(stream for (stream, _) in streams)\r\n self._heads = set()",
"def __getstate__(self):\n state = {\n 'connector_keys' : self.connector_keys,\n 'metric_key' : self.metric_key,\n 'location_key' : self.location_key,\n 'parameters' : self.parameters,\n 'mrsm_instance' : self.instance_keys,\n }\n return state",
"def _sharded_state_dict(self, *args: Any, **kwargs: Any) -> Any:\n with self.set_state_dict_type(StateDictType.SHARDED_STATE_DICT):\n return self.state_dict(self, *args, **kwargs)",
"def state_dict(self, *args, **kwargs):\n return self.module.state_dict(*args, **kwargs)",
"def state_dict(self):\n return {\n 'epoch': self.epoch,\n 'iterations_in_epoch': self.iterations_in_epoch,\n }",
"def make_stream(\n d2h_streams: Dict[str, str], tensors: Dict[str, Variable], label: str\n) -> Dict[str, str]:\n d2h_streams[label] = popxl.d2h_stream(\n tensors[label]._pb_tensor.info.shape(),\n dtype.as_dtype(tensors[label]._pb_tensor.info.data_type_lcase()),\n name=f\"{label}_d2h_stream\",\n )\n ops.host_store(d2h_streams[label], tensors[label])\n return d2h_streams",
"def get_stream_props(stream=None):\n stream_props = {}\n if stream not in STREAMS.keys():\n raise Exception(\n \"Stream not defined as Pendo Stream\"\n )\n elif stream in STREAMS.keys():\n for stream_name, stream_props in STREAMS.items():\n stream_props[stream_name] = {\n \"stream_name\": stream_props.get(\"stream_name\"),\n \"key_properties\": stream_props.get(\"key_properties\"),\n \"replication_method\": stream_props.get(\"replication_method\"),\n \"replication_key\": stream_props.get(\"replication_key\"),\n \"primary_key\": stream_props.get(\"primary_key\"),\n \"field_mappings\": stream_props.get(\"field_mappings\", True)\n }\n return stream_props",
"def state_dict(self) -> dict:\n _state_dict: dict[str, Any] = super().state_dict\n _state_dict[\"rng_state\"] = self.rng.get_state()\n _state_dict[\"seed\"] = self.seed\n _state_dict[\"strategy\"] = self.strategy.state_dict\n return _state_dict",
"def states(self):\n from geoid.core import names\n from geoid.censusnames import geo_names, stusab\n\n states = {}\n\n for state_no, stusab in stusab.items():\n states[stusab] = {\n 'name': geo_names[(state_no,0)],\n 'stusab': stusab,\n 'number' : state_no\n }\n\n states['US'] = {\n 'name': 'United States',\n 'stusab': 'US',\n 'number' : 0\n }\n\n return states",
"def get_state_walk(self, state_description, class_=State):\n states = {}\n names = state_description.keys()\n for dt in self.data.index:\n name_value_mapping = {name: self.get(name, dt) for name in names}\n states[dt] = state_description.to_state(class_,\n **name_value_mapping)\n\n return states",
"def _get_state_dict(self) -> dict:\n return {\n 'optimizers': [o.state_dict() for o in self.optimizers],\n 'schedulers': [s.state_dict() for s in self.schedulers],\n }",
"def _get_state_dict(self) -> dict:\n return {\n 'optimizers': [o.state_dict() for o in self.optimizers],\n 'schedulers': [s.state_dict() for s in self.schedulers],\n }",
"def __getstate__(self):\n\n state = {}\n for key in self.__slots__:\n state[key] = getattr(self, key)\n\n return state",
"def states(self):\n states = _StatesDict({})\n for k, v in self.coordinates.items():\n states.__setitem__(k, v, allow=True)\n for k, v in self.speeds.items():\n states.__setitem__(k, v, allow=True)\n return states",
"def filter_by_state(self, stream_state: Mapping[str, Any] = None, record: Mapping[str, Any] = None) -> Iterable:\n start_date = \"1900-01-01T00:00:00.0Z\"\n if pendulum.parse(record[self.cursor_field]) > pendulum.parse((stream_state or {}).get(self.cursor_field, start_date)):\n # Persist state.\n # There is a bug in state setter: because of self._cursor_value is not defined it raises Attribute error\n # which is ignored in airbyte_cdk/sources/abstract_source.py:320 and we have an empty state in return\n # See: https://github.com/airbytehq/oncall/issues/1317\n self.state = record\n yield record",
"def make(max_states=10):\n states = range(max_states)\n keys = range(max_states)\n shuffle(keys)\n return {\n 'states': states,\n 'transitions': {\n keys[index]: DummyProgramGenerator._transition(state)\n for index, state in enumerate(states)}}",
"def __init__(self, n_states: int, n_actions: int):\n self._p = {s: {a: [] for a in range(n_actions)} for s in range(n_states)}",
"def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, Any]:\n\n if self.first_run:\n self.first_run = False\n return {self.cursor_field: self.initial_state}\n else:\n self.max_state = max(self.max_state, latest_record[self.cursor_field])\n return {self.cursor_field: self.max_state if self.current_slice == self.last_slice else self.initial_state}",
"def get_states_dict():\n\n with open(\"../data/states_data.txt\", \"r\") as states:\n us_states = []\n for state in states:\n us_states.append(state.strip())\n\n with open(\"../data/states_abv.txt\", \"r\") as states_abv:\n us_states_abv = []\n for state in states_abv:\n us_states_abv.append(state.strip())\n state_dict = dict(zip(us_states, us_states_abv))\n\n return state_dict",
"def state(self):\n return {self._reverse_mapping[k]: v for k, v in enumerate(self._state)}",
"def get_state(self):\n\n # TODO: Assemble a dictionary containing the tracker state\n\n return {}",
"def serialize_state(state):\n doc = {\n LABEL_STATE_TYPE: state.type_id,\n LABEL_CREATED_AT: state.created_at\n }\n if state.is_running():\n doc[LABEL_STARTED_AT] = state.started_at\n elif state.is_canceled() or state.is_error():\n doc[LABEL_STARTED_AT] = state.started_at\n doc[LABEL_FINISHED_AT] = state.stopped_at\n doc[LABEL_MESSAGES] = state.messages\n elif state.is_success():\n doc[LABEL_STARTED_AT] = state.started_at\n doc[LABEL_FINISHED_AT] = state.finished_at\n doc[LABEL_FILES] = state.files\n return doc",
"def __init__(self,ParamFunctionStateTuples):\n self.mDict = dict()\n for stateInit,param,func,stateFinal in ParamFunctionStateTuples:\n assert param not in stateInit\n self.mDict[param] = StateDict.EmitObj(stateInit,func,stateFinal)"
] |
[
"0.6171527",
"0.6134761",
"0.6117695",
"0.6105486",
"0.5810494",
"0.5645507",
"0.56314987",
"0.5620535",
"0.5580415",
"0.55707365",
"0.5560745",
"0.5542073",
"0.55274445",
"0.5510747",
"0.5489986",
"0.54460365",
"0.5440837",
"0.5429758",
"0.5429758",
"0.54054964",
"0.5401729",
"0.5398308",
"0.53893346",
"0.5382872",
"0.53534824",
"0.5352066",
"0.53460115",
"0.5337874",
"0.5303597",
"0.5296299"
] |
0.75801265
|
0
|
Method to create a stream table in the form of a pandas dataframe. Method takes a dict with name keys and stream values. Use an OrderedDict to list the streams in a specific order, otherwise the dataframe can be sorted later.
|
def create_stream_table_dataframe(
streams, true_state=False, time_point=0, orient="columns"
):
stream_attributes = OrderedDict()
stream_states = stream_states_dict(streams=streams, time_point=time_point)
full_keys = [] # List of all rows in dataframe to fill in missing data
stream_attributes["Units"] = {}
for key, sb in stream_states.items():
stream_attributes[key] = {}
if true_state:
disp_dict = sb.define_state_vars()
else:
disp_dict = sb.define_display_vars()
for k in disp_dict:
for row, i in enumerate(disp_dict[k]):
stream_key = k if i is None else f"{k} {i}"
quant = report_quantity(disp_dict[k][i])
stream_attributes[key][stream_key] = quant.m
if row == 0 or stream_key not in stream_attributes["Units"]:
stream_attributes["Units"][stream_key] = quant.u
if stream_key not in full_keys:
full_keys.append(stream_key)
# Check for missing rows in any stream, and fill with "-" if needed
for k, v in stream_attributes.items():
for r in full_keys:
if r not in v.keys():
# Missing row, fill with placeholder
v[r] = "-"
return DataFrame.from_dict(stream_attributes, orient=orient)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def create_stream_table_ui(\n streams, true_state=False, time_point=0, orient=\"columns\", precision=5\n):\n\n # Variable Types:\n class VariableTypes:\n UNFIXED = \"unfixed\"\n FIXED = \"fixed\"\n PARAMETER = \"parameter\"\n EXPRESSION = \"expression\"\n\n stream_attributes = OrderedDict()\n stream_states = stream_states_dict(streams=streams, time_point=time_point)\n full_keys = [] # List of all rows in dataframe to fill in missing data\n\n stream_attributes[\"Units\"] = {}\n\n for key, sb in stream_states.items():\n stream_attributes[key] = {}\n if true_state:\n disp_dict = sb.define_state_vars()\n else:\n disp_dict = sb.define_display_vars()\n for k in disp_dict:\n for row, i in enumerate(disp_dict[k]):\n stream_key = k if i is None else f\"{k} {i}\"\n\n # Identifying value's variable type\n var_type = None\n if isinstance(disp_dict[k][i], (_GeneralVarData, Var)):\n if disp_dict[k][i].fixed:\n var_type = VariableTypes.FIXED\n else:\n var_type = VariableTypes.UNFIXED\n elif isinstance(disp_dict[k][i], Param):\n var_type = VariableTypes.PARAMETER\n elif isinstance(disp_dict[k][i], Expression):\n var_type = VariableTypes.EXPRESSION\n\n quant = report_quantity(disp_dict[k][i])\n stream_attributes[key][stream_key] = (\n round(quant.m, precision),\n var_type,\n )\n if row == 0 or stream_key not in stream_attributes[\"Units\"]:\n stream_attributes[\"Units\"][stream_key] = quant.u\n\n if stream_key not in full_keys:\n full_keys.append(stream_key)\n\n # Check for missing rows in any stream, and fill with \"-\" if needed\n for k, v in stream_attributes.items():\n for r in full_keys:\n if r not in v.keys():\n # Missing row, fill with placeholder\n v[r] = \"-\"\n\n return DataFrame.from_dict(stream_attributes, orient=orient)",
"def stream_table_dataframe_to_string(stream_table, **kwargs):\n # Set some default values for keyword arguments\n na_rep = kwargs.pop(\"na_rep\", \"-\")\n justify = kwargs.pop(\"justify\", \"center\")\n float_format = kwargs.pop(\"float_format\", lambda x: \"{:#.5g}\".format(x))\n\n # Print stream table\n return stream_table.to_string(\n na_rep=na_rep, justify=justify, float_format=float_format, **kwargs\n )",
"def streams():\r\n section = document.add_section()\r\n new_width, new_height = section.page_height, section.page_width\r\n section.orientation = WD_ORIENT.LANDSCAPE\r\n section.page_width = 10058400\r\n section.page_height = 7772400\r\n document.add_heading('Streams', level=1)\r\n streams = get_qlik_sense.get_streams()\r\n num_of_streams = len(streams)\r\n table = document.add_table(rows=num_of_streams+1, cols=1)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'Stream name'\r\n for stream in range(num_of_streams):\r\n row = table.rows[stream+1]\r\n row.cells[0].text = str(streams[stream]['name'])\r\n document.add_page_break()",
"def dataframe(self):\n dictionary = OrderedDict(zip(self.keys, [[value] for value in self.values]))\n dataframe = pd.DataFrame(dictionary)\n return dataframe",
"def get_dataframe(self):\n # Using a list here appears faster than using a generator expression\n df = pd.DataFrame.from_records(\n [{'event_id' : x.event_id,\n 'time_delta' : x.time_delta,\n 'src_id' : x.src_id,\n 't' : x.cur_time,\n 'sink_id' : y}\n for x in self.events\n for y in x.sink_ids]\n )\n return df",
"def make_stream(\n d2h_streams: Dict[str, str], tensors: Dict[str, Variable], label: str\n) -> Dict[str, str]:\n d2h_streams[label] = popxl.d2h_stream(\n tensors[label]._pb_tensor.info.shape(),\n dtype.as_dtype(tensors[label]._pb_tensor.info.data_type_lcase()),\n name=f\"{label}_d2h_stream\",\n )\n ops.host_store(d2h_streams[label], tensors[label])\n return d2h_streams",
"def load_stream(self, stream):\n batches = super(ArrowStreamPandasSerializer, self).load_stream(stream)\n import pyarrow as pa\n\n for batch in batches:\n yield [self.arrow_to_pandas(c) for c in pa.Table.from_batches([batch]).itercolumns()]",
"def to_df(thisdict, name=None, index=None) -> pd.DataFrame:\n df = pd.DataFrame.from_dict(thisdict, orient='index')\n if index:\n df = df.set_index(index)\n if name:\n df.index.name=name\n\n if df.size>0:\n df.sort(inplace=True, ascending=False)\n return df",
"def to_dataframe(self):\n \n #Try importing pandas\n try:\n import pandas as pd\n except ImportError as e:\n raise RuntimeError(\"Error: pandas is not available. Install pandas in order to use this function.\") from e\n \n #Get season info\n season_info = self.annual_summary()\n season_info_keys = season_info['id']\n \n #Set up empty dict for dataframe\n ds = {'id':[],'name':[],'vmax':[],'mslp':[],'category':[],'ace':[],'start_time':[],'end_time':[]}\n \n #Add every key containing a list into the dict\n keys = [k for k in self.dict.keys()]\n for key in keys:\n if key in season_info_keys:\n sidx = season_info_keys.index(key)\n ds['id'].append(key)\n ds['name'].append(self.dict[key]['name'])\n ds['vmax'].append(season_info['max_wspd'][sidx])\n ds['mslp'].append(season_info['min_mslp'][sidx])\n ds['category'].append(season_info['category'][sidx])\n ds['start_time'].append(self.dict[key]['date'][0])\n ds['end_time'].append(self.dict[key]['date'][-1])\n ds['ace'].append(np.round(season_info['ace'][sidx],1))\n \n #Convert entire dict to a DataFrame\n ds = pd.DataFrame(ds)\n\n #Return dataset\n return ds",
"def data_frame(dict_cycle, number):\n list1, list2 = (list(dict_cycle.get('cycle_'+str(number)).items()))\n zipped_list = list(zip(list1[1], list2[1]))\n data = pd.DataFrame(zipped_list, columns=['Potential', 'Current'])\n return data",
"def create_dataframe(self):\n sessions = pandas.DataFrame().from_dict(self.values)\n sessions_lists = pandas.DataFrame().from_dict(self.lists)\n return sessions, sessions_lists",
"def dump_stream(self, iterator, stream):\n\n import pandas as pd\n import pyarrow as pa\n\n def construct_state_pdf(state):\n \"\"\"\n Construct a pandas DataFrame from the state instance.\n \"\"\"\n\n state_properties = state.json().encode(\"utf-8\")\n state_key_row_as_binary = state._keyAsUnsafe\n if state.exists:\n state_object = self.pickleSer.dumps(state._value_schema.toInternal(state._value))\n else:\n state_object = None\n state_old_timeout_timestamp = state.oldTimeoutTimestamp\n\n state_dict = {\n \"properties\": [\n state_properties,\n ],\n \"keyRowAsUnsafe\": [\n state_key_row_as_binary,\n ],\n \"object\": [\n state_object,\n ],\n \"oldTimeoutTimestamp\": [\n state_old_timeout_timestamp,\n ],\n }\n\n return pd.DataFrame.from_dict(state_dict)\n\n def construct_record_batch(pdfs, pdf_data_cnt, pdf_schema, state_pdfs, state_data_cnt):\n \"\"\"\n Construct a new Arrow RecordBatch based on output pandas DataFrames and states. Each\n one matches to the single struct field for Arrow schema. We also need an extra one to\n indicate array length for data and state, so the return value of Arrow RecordBatch will\n have schema with three fields, in `count`, `data`, `state` order.\n (Readers are expected to access the field via position rather than the name. We do\n not guarantee the name of the field.)\n\n Note that Arrow RecordBatch requires all columns to have all same number of rows,\n hence this function inserts empty data for count/state/data with less elements to\n compensate.\n \"\"\"\n\n max_data_cnt = max(1, max(pdf_data_cnt, state_data_cnt))\n\n # We only use the first row in the count column, and fill other rows to be the same\n # value, hoping it is more friendly for compression, in case it is needed.\n count_dict = {\n \"dataCount\": [pdf_data_cnt] * max_data_cnt,\n \"stateCount\": [state_data_cnt] * max_data_cnt,\n }\n count_pdf = pd.DataFrame.from_dict(count_dict)\n\n empty_row_cnt_in_data = max_data_cnt - pdf_data_cnt\n empty_row_cnt_in_state = max_data_cnt - state_data_cnt\n\n empty_rows_pdf = pd.DataFrame(\n dict.fromkeys(pa.schema(pdf_schema).names),\n index=[x for x in range(0, empty_row_cnt_in_data)],\n )\n empty_rows_state = pd.DataFrame(\n columns=[\"properties\", \"keyRowAsUnsafe\", \"object\", \"oldTimeoutTimestamp\"],\n index=[x for x in range(0, empty_row_cnt_in_state)],\n )\n\n pdfs.append(empty_rows_pdf)\n state_pdfs.append(empty_rows_state)\n\n merged_pdf = pd.concat(pdfs, ignore_index=True)\n merged_state_pdf = pd.concat(state_pdfs, ignore_index=True)\n\n return self._create_batch(\n [\n (count_pdf, self.result_count_pdf_arrow_type),\n (merged_pdf, pdf_schema),\n (merged_state_pdf, self.result_state_pdf_arrow_type),\n ]\n )\n\n def serialize_batches():\n \"\"\"\n Read through an iterator of (iterator of pandas DataFrame, state), and serialize them\n to Arrow RecordBatches.\n\n This function does batching on constructing the Arrow RecordBatch; a batch will be\n serialized to the Arrow RecordBatch when the total number of records exceeds the\n configured threshold.\n \"\"\"\n # a set of variables for the state of current batch which will be converted to Arrow\n # RecordBatch.\n pdfs = []\n state_pdfs = []\n pdf_data_cnt = 0\n state_data_cnt = 0\n\n return_schema = None\n\n for data in iterator:\n # data represents the result of each call of user function\n packaged_result = data[0]\n\n # There are two results from the call of user function:\n # 1) iterator of pandas DataFrame (output)\n # 2) updated state instance\n pdf_iter = packaged_result[0][0]\n state = packaged_result[0][1]\n\n # This is static and won't change across batches.\n return_schema = packaged_result[1]\n\n for pdf in pdf_iter:\n # We ignore empty pandas DataFrame.\n if len(pdf) > 0:\n pdf_data_cnt += len(pdf)\n pdfs.append(pdf)\n\n # If the total number of records in current batch exceeds the configured\n # threshold, time to construct the Arrow RecordBatch from the batch.\n if pdf_data_cnt > self.arrow_max_records_per_batch:\n batch = construct_record_batch(\n pdfs, pdf_data_cnt, return_schema, state_pdfs, state_data_cnt\n )\n\n # Reset the variables to start with new batch for further data.\n pdfs = []\n state_pdfs = []\n pdf_data_cnt = 0\n state_data_cnt = 0\n\n yield batch\n\n # This has to be performed 'after' evaluating all elements in iterator, so that\n # the user function has been completed and the state is guaranteed to be updated.\n state_pdf = construct_state_pdf(state)\n\n state_pdfs.append(state_pdf)\n state_data_cnt += 1\n\n # processed all output, but current batch may not be flushed yet.\n if pdf_data_cnt > 0 or state_data_cnt > 0:\n batch = construct_record_batch(\n pdfs, pdf_data_cnt, return_schema, state_pdfs, state_data_cnt\n )\n\n yield batch\n\n def init_stream_yield_batches(batches):\n \"\"\"\n This function helps to ensure the requirement for Pandas UDFs - Pandas UDFs require a\n START_ARROW_STREAM before the Arrow stream is sent.\n\n START_ARROW_STREAM should be sent after creating the first record batch so in case of\n an error, it can be sent back to the JVM before the Arrow stream starts.\n \"\"\"\n should_write_start_length = True\n\n for batch in batches:\n if should_write_start_length:\n write_int(SpecialLengths.START_ARROW_STREAM, stream)\n should_write_start_length = False\n\n yield batch\n\n batches_to_write = init_stream_yield_batches(serialize_batches())\n\n return ArrowStreamSerializer.dump_stream(self, batches_to_write, stream)",
"def format_streamflows(data_dir):\n\n search_expr = data_dir + \"/*.json\"\n\n df = pd.DataFrame()\n\n for json_file in glob.glob(search_expr):\n\n with open(json_file, 'r') as fn:\n data = json.load(fn)\n\n try:\n data = json2dataframe(data)\n\n new_df = data[0]\n new_df = new_df.drop(columns=['dateTime', 'qualifiers'])\n new_df = new_df.rename(columns={'value': data[1]})\n df = pd.concat([df, new_df], axis=1)\n\n except IndexError, e:\n print 'Error:', e\n continue\n\n return df",
"def create_tables(*args):\n import pandas\n tables = {}\n for locus in args[0].index:\n tables[locus] = get_table(*args, locus)\n return tables",
"def transitions_table(transitions, states, alphabet):\n transitions = sanitize_transitions(transitions)\n\n check_transitions(transitions, states, alphabet)\n\n table = []\n for current in states:\n for read in alphabet:\n # DEBUG: print(state, read)\n next, write, move = transitions(current, read)\n table.append([current, read, next, write, move])\n\n df = pd.DataFrame(table, columns = ['current', 'read', 'next', 'write', 'move'])\n return df",
"def construct_df():\n iterable = [['approach', 'contact', 'retract', 'pause'], ['force', 'height']]\n index = pd.MultiIndex.from_product(iterable, names=['segment', 'channel'])\n return pd.DataFrame(columns=index)",
"def to_Table(self, **kwargs):\n mean_transmit, transmit_ = self.transmit\n data_ = {'WAVELENGTH': self._wavelength,\n 'THROUGHPUT': mean_transmit}\n for num, filterk in enumerate(transmit_, 1):\n data_['THROUGHPUT_{0:d}'.format(num)] = filterk\n data = SimpleTable(data_)\n\n if self.wavelength_unit is not None:\n data.header['WAVELENGTH_UNIT'] = self.wavelength_unit\n data.header['DETECTOR'] = self.dtype\n data.header['COMPNAME'] = self.name\n data.header['NAME'] = self.name\n data.set_comment('THROUGHPUT', 'filter throughput definition')\n data.set_comment('WAVELENGTH', 'filter wavelength definition')\n for num in range(1, len(transmit_) + 1):\n data.set_comment('THROUGHPUT_{0:d}'.format(num),\n 'filter throughput sample')\n data.set_comment('WAVELENGTH', self.wavelength_unit or 'AA')\n return data",
"def _listofdict_to_df(self, osw_dict=None): \n if type(osw_dict) is not dict:\n raise ValueError(\"The 'osw_dict' arg is invalid!\")\n \n frame = pd.DataFrame.from_dict(osw_dict, orient='columns')\n \n return frame",
"def tabular(sessions: List[Session], *, columns: Optional[str] = None):\n if not columns:\n columns = \"id,start_time,status,provider,url\"\n\n if any(s.ssh_enabled for s in sessions):\n columns += \",ssh\"\n\n return tabulate(collection=sessions, columns=columns, columns_mapping=SESSION_COLUMNS)",
"def generate_df(js_dict, naming, value=\"value\"):\n\n values = []\n dimensions, dim_names = get_dimensions(js_dict, naming)\n values = get_values(js_dict, value=value)\n output = pd.DataFrame([category + [values[i]]\n for i, category in\n enumerate(get_df_row(dimensions, naming))])\n output.columns = dim_names + [value]\n output.index = range(0, len(values))\n return output",
"def stream_from_dict(desc):\r\n stream = Stream()\r\n stream.update(desc)\r\n return stream",
"def generate_log_df(log_columns, log_values):\n return pd.DataFrame(dict(zip(log_columns, log_values)), index=[0])",
"def session_table(session, global_params=False, source_info=True, path_info=True):\n from ecogdata.devices.data_util import find_loadable_files\n conf = session_conf(session)\n top_level_info = conf.pop('session')\n named_recordings = sorted([s for s in conf.sections if s != 'session'])\n keys = set(top_level_info.keys())\n for r in named_recordings:\n keys.update(conf[r].keys())\n if not global_params:\n gp = set(load_params().keys())\n keys.difference_update(gp)\n if not path_info:\n path_keys = {'nwk_path', 'exp_path', 'network_exp', 'local_exp', 'store_path'}\n keys.difference_update(path_keys)\n required_keys = ['headstage', 'electrode', 'exp_type']\n other_keys = list(keys.difference(required_keys))\n if source_info:\n columns = list(required_keys) + ['primary_file', 'downsampled_file'] + list(other_keys)\n else:\n columns = list(required_keys) + list(other_keys)\n tab = pd.DataFrame(columns=columns)\n for r in named_recordings:\n rec = top_level_info.copy()\n rec.update(conf[r])\n tab_row = dict([(k, rec.get(k, 'unknown')) for k in required_keys + other_keys])\n tab_row = filter_values(tab_row)\n if source_info:\n tab_row['primary_file'] = find_loadable_files(session, r, downsampled=False)\n tab_row['downsampled_file'] = find_loadable_files(session, r, downsampled=True)\n tab = tab.append(pd.DataFrame(index=[r], data=tab_row))\n return tab",
"def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):\n super(Stream, self).__init__(\"stream\")\n\n # Validate arg\n # ------------\n if arg is None:\n arg = {}\n elif isinstance(arg, self.__class__):\n arg = arg.to_plotly_json()\n elif isinstance(arg, dict):\n arg = _copy.copy(arg)\n else:\n raise ValueError(\n \"\"\"\\\nThe first argument to the plotly.graph_objs.table.Stream \nconstructor must be a dict or \nan instance of :class:`plotly.graph_objs.table.Stream`\"\"\"\n )\n\n # Handle skip_invalid\n # -------------------\n self._skip_invalid = kwargs.pop(\"skip_invalid\", False)\n\n # Import validators\n # -----------------\n from plotly.validators.table import stream as v_stream\n\n # Initialize validators\n # ---------------------\n self._validators[\"maxpoints\"] = v_stream.MaxpointsValidator()\n self._validators[\"token\"] = v_stream.TokenValidator()\n\n # Populate data dict with properties\n # ----------------------------------\n _v = arg.pop(\"maxpoints\", None)\n self[\"maxpoints\"] = maxpoints if maxpoints is not None else _v\n _v = arg.pop(\"token\", None)\n self[\"token\"] = token if token is not None else _v\n\n # Process unknown kwargs\n # ----------------------\n self._process_kwargs(**dict(arg, **kwargs))\n\n # Reset skip_invalid\n # ------------------\n self._skip_invalid = False",
"def get_ts_df(self):\n df = pd.DataFrame(self.ts_list)\n df.columns = self.col_names\n df.sort_values(by=self.col_names[0], inplace=True)\n df.reset_index(drop=True, inplace=True)\n\n return df",
"def to_df(self):\n if self.shape > 1:\n range_str = [s for s in range(self.shape)]\n iterables = [self.columns, range_str]\n multiindex = pd.MultiIndex.from_product(iterables, names=['song', 'frame'])\n # multiindex = [i for i in itertools.product(self.columns, range_str, repeat=1)]\n df = pd.DataFrame(columns=multiindex, index=self.columns, dtype=np.float64)\n\n for c_1 in self.columns:\n for c_2 in self.columns:\n for s in range_str:\n df.loc[c_1][c_2, s] = self.dict_[c_1][c_2][s]\n df = df.T\n else:\n df = pd.DataFrame(columns=self.columns + ['song'], dtype=np.float64)\n df['song'] = self.columns\n df = df.set_index('song')\n\n for c_1 in self.columns:\n for c_2 in self.columns:\n df.loc[c_1, c_2] = self.max_diff(c_1, c_2)\n\n return df",
"def make_stats_df(self):\n columns = ['DATE', 'TEAM', 'teamId', 'R', 'HR', 'RBI', 'SBN', 'OBP', \n 'K', 'QS', 'SV', 'ERA', 'WHIP', 'MOVES', 'CHANGE']\n trimmed_table = self.parse_soup(self.stats)\n self.df_stats = pd.DataFrame(trimmed_table, columns=columns) \n # load season standings csv from file\n try: # if it already exists\n df = pd.read_csv('2016_stats.csv', index_col=0)\n except OSError:\n df = pd.DataFrame(columns=columns) # if it doesn't already exist\n df = df.append(self.df_stats)\n df.to_csv('2016_stats.csv')",
"def to_dataframe(data, names, **kwargs):\n return Component(\n \"ToDataframe\",\n arguments={\n 'data': Component.of(data),\n 'names': Component.of(names)\n },\n options={\n \n },\n constraints=kwargs)",
"def create_dataframe(ids, names, p_links, c_links, cl_links):\n try:\n dict = {'ID':ids, 'Name': names, 'Photo':p_links, 'Flag':c_links, 'Club Logo':cl_links}\n df = pd.DataFrame(dict)\n return df\n except Exception as e:\n print(\"Exception creating or storing the dataframe: \" + str(e))",
"def dataFrame(self):\n\n memory_file = StringIO(initial_value=self.sparql_result.decode('utf-8'), newline='\\n')\n reader = DictReader(memory_file)\n\n schema = StructType(\n list(map(lambda f: StructField(f, StringType()), reader.fieldnames))\n )\n\n data = list(map(lambda d: [d[f] for f in reader.fieldnames], list(reader)))\n\n return self.spark.createDataFrame(data, schema)"
] |
[
"0.7057761",
"0.627377",
"0.611543",
"0.5960522",
"0.58987206",
"0.5864669",
"0.57906604",
"0.5757231",
"0.5680212",
"0.56271696",
"0.56033725",
"0.5602798",
"0.5569012",
"0.555265",
"0.5547795",
"0.5495419",
"0.54677314",
"0.54455346",
"0.54358",
"0.5394573",
"0.53550637",
"0.534457",
"0.5342853",
"0.5342839",
"0.53209853",
"0.5319858",
"0.5314052",
"0.5295439",
"0.52802336",
"0.5279372"
] |
0.7505053
|
0
|
Method to create a stream table in the form of a pandas dataframe. Method takes a dict with name keys and stream values. Use an OrderedDict to list the streams in a specific order, otherwise the dataframe can be sorted
|
def create_stream_table_ui(
streams, true_state=False, time_point=0, orient="columns", precision=5
):
# Variable Types:
class VariableTypes:
UNFIXED = "unfixed"
FIXED = "fixed"
PARAMETER = "parameter"
EXPRESSION = "expression"
stream_attributes = OrderedDict()
stream_states = stream_states_dict(streams=streams, time_point=time_point)
full_keys = [] # List of all rows in dataframe to fill in missing data
stream_attributes["Units"] = {}
for key, sb in stream_states.items():
stream_attributes[key] = {}
if true_state:
disp_dict = sb.define_state_vars()
else:
disp_dict = sb.define_display_vars()
for k in disp_dict:
for row, i in enumerate(disp_dict[k]):
stream_key = k if i is None else f"{k} {i}"
# Identifying value's variable type
var_type = None
if isinstance(disp_dict[k][i], (_GeneralVarData, Var)):
if disp_dict[k][i].fixed:
var_type = VariableTypes.FIXED
else:
var_type = VariableTypes.UNFIXED
elif isinstance(disp_dict[k][i], Param):
var_type = VariableTypes.PARAMETER
elif isinstance(disp_dict[k][i], Expression):
var_type = VariableTypes.EXPRESSION
quant = report_quantity(disp_dict[k][i])
stream_attributes[key][stream_key] = (
round(quant.m, precision),
var_type,
)
if row == 0 or stream_key not in stream_attributes["Units"]:
stream_attributes["Units"][stream_key] = quant.u
if stream_key not in full_keys:
full_keys.append(stream_key)
# Check for missing rows in any stream, and fill with "-" if needed
for k, v in stream_attributes.items():
for r in full_keys:
if r not in v.keys():
# Missing row, fill with placeholder
v[r] = "-"
return DataFrame.from_dict(stream_attributes, orient=orient)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def create_stream_table_dataframe(\n streams, true_state=False, time_point=0, orient=\"columns\"\n):\n\n stream_attributes = OrderedDict()\n stream_states = stream_states_dict(streams=streams, time_point=time_point)\n full_keys = [] # List of all rows in dataframe to fill in missing data\n\n stream_attributes[\"Units\"] = {}\n\n for key, sb in stream_states.items():\n stream_attributes[key] = {}\n if true_state:\n disp_dict = sb.define_state_vars()\n else:\n disp_dict = sb.define_display_vars()\n for k in disp_dict:\n for row, i in enumerate(disp_dict[k]):\n stream_key = k if i is None else f\"{k} {i}\"\n quant = report_quantity(disp_dict[k][i])\n stream_attributes[key][stream_key] = quant.m\n if row == 0 or stream_key not in stream_attributes[\"Units\"]:\n stream_attributes[\"Units\"][stream_key] = quant.u\n if stream_key not in full_keys:\n full_keys.append(stream_key)\n\n # Check for missing rows in any stream, and fill with \"-\" if needed\n for k, v in stream_attributes.items():\n for r in full_keys:\n if r not in v.keys():\n # Missing row, fill with placeholder\n v[r] = \"-\"\n\n return DataFrame.from_dict(stream_attributes, orient=orient)",
"def stream_table_dataframe_to_string(stream_table, **kwargs):\n # Set some default values for keyword arguments\n na_rep = kwargs.pop(\"na_rep\", \"-\")\n justify = kwargs.pop(\"justify\", \"center\")\n float_format = kwargs.pop(\"float_format\", lambda x: \"{:#.5g}\".format(x))\n\n # Print stream table\n return stream_table.to_string(\n na_rep=na_rep, justify=justify, float_format=float_format, **kwargs\n )",
"def streams():\r\n section = document.add_section()\r\n new_width, new_height = section.page_height, section.page_width\r\n section.orientation = WD_ORIENT.LANDSCAPE\r\n section.page_width = 10058400\r\n section.page_height = 7772400\r\n document.add_heading('Streams', level=1)\r\n streams = get_qlik_sense.get_streams()\r\n num_of_streams = len(streams)\r\n table = document.add_table(rows=num_of_streams+1, cols=1)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'Stream name'\r\n for stream in range(num_of_streams):\r\n row = table.rows[stream+1]\r\n row.cells[0].text = str(streams[stream]['name'])\r\n document.add_page_break()",
"def make_stream(\n d2h_streams: Dict[str, str], tensors: Dict[str, Variable], label: str\n) -> Dict[str, str]:\n d2h_streams[label] = popxl.d2h_stream(\n tensors[label]._pb_tensor.info.shape(),\n dtype.as_dtype(tensors[label]._pb_tensor.info.data_type_lcase()),\n name=f\"{label}_d2h_stream\",\n )\n ops.host_store(d2h_streams[label], tensors[label])\n return d2h_streams",
"def dataframe(self):\n dictionary = OrderedDict(zip(self.keys, [[value] for value in self.values]))\n dataframe = pd.DataFrame(dictionary)\n return dataframe",
"def load_stream(self, stream):\n batches = super(ArrowStreamPandasSerializer, self).load_stream(stream)\n import pyarrow as pa\n\n for batch in batches:\n yield [self.arrow_to_pandas(c) for c in pa.Table.from_batches([batch]).itercolumns()]",
"def get_dataframe(self):\n # Using a list here appears faster than using a generator expression\n df = pd.DataFrame.from_records(\n [{'event_id' : x.event_id,\n 'time_delta' : x.time_delta,\n 'src_id' : x.src_id,\n 't' : x.cur_time,\n 'sink_id' : y}\n for x in self.events\n for y in x.sink_ids]\n )\n return df",
"def to_df(thisdict, name=None, index=None) -> pd.DataFrame:\n df = pd.DataFrame.from_dict(thisdict, orient='index')\n if index:\n df = df.set_index(index)\n if name:\n df.index.name=name\n\n if df.size>0:\n df.sort(inplace=True, ascending=False)\n return df",
"def format_streamflows(data_dir):\n\n search_expr = data_dir + \"/*.json\"\n\n df = pd.DataFrame()\n\n for json_file in glob.glob(search_expr):\n\n with open(json_file, 'r') as fn:\n data = json.load(fn)\n\n try:\n data = json2dataframe(data)\n\n new_df = data[0]\n new_df = new_df.drop(columns=['dateTime', 'qualifiers'])\n new_df = new_df.rename(columns={'value': data[1]})\n df = pd.concat([df, new_df], axis=1)\n\n except IndexError, e:\n print 'Error:', e\n continue\n\n return df",
"def dump_stream(self, iterator, stream):\n\n import pandas as pd\n import pyarrow as pa\n\n def construct_state_pdf(state):\n \"\"\"\n Construct a pandas DataFrame from the state instance.\n \"\"\"\n\n state_properties = state.json().encode(\"utf-8\")\n state_key_row_as_binary = state._keyAsUnsafe\n if state.exists:\n state_object = self.pickleSer.dumps(state._value_schema.toInternal(state._value))\n else:\n state_object = None\n state_old_timeout_timestamp = state.oldTimeoutTimestamp\n\n state_dict = {\n \"properties\": [\n state_properties,\n ],\n \"keyRowAsUnsafe\": [\n state_key_row_as_binary,\n ],\n \"object\": [\n state_object,\n ],\n \"oldTimeoutTimestamp\": [\n state_old_timeout_timestamp,\n ],\n }\n\n return pd.DataFrame.from_dict(state_dict)\n\n def construct_record_batch(pdfs, pdf_data_cnt, pdf_schema, state_pdfs, state_data_cnt):\n \"\"\"\n Construct a new Arrow RecordBatch based on output pandas DataFrames and states. Each\n one matches to the single struct field for Arrow schema. We also need an extra one to\n indicate array length for data and state, so the return value of Arrow RecordBatch will\n have schema with three fields, in `count`, `data`, `state` order.\n (Readers are expected to access the field via position rather than the name. We do\n not guarantee the name of the field.)\n\n Note that Arrow RecordBatch requires all columns to have all same number of rows,\n hence this function inserts empty data for count/state/data with less elements to\n compensate.\n \"\"\"\n\n max_data_cnt = max(1, max(pdf_data_cnt, state_data_cnt))\n\n # We only use the first row in the count column, and fill other rows to be the same\n # value, hoping it is more friendly for compression, in case it is needed.\n count_dict = {\n \"dataCount\": [pdf_data_cnt] * max_data_cnt,\n \"stateCount\": [state_data_cnt] * max_data_cnt,\n }\n count_pdf = pd.DataFrame.from_dict(count_dict)\n\n empty_row_cnt_in_data = max_data_cnt - pdf_data_cnt\n empty_row_cnt_in_state = max_data_cnt - state_data_cnt\n\n empty_rows_pdf = pd.DataFrame(\n dict.fromkeys(pa.schema(pdf_schema).names),\n index=[x for x in range(0, empty_row_cnt_in_data)],\n )\n empty_rows_state = pd.DataFrame(\n columns=[\"properties\", \"keyRowAsUnsafe\", \"object\", \"oldTimeoutTimestamp\"],\n index=[x for x in range(0, empty_row_cnt_in_state)],\n )\n\n pdfs.append(empty_rows_pdf)\n state_pdfs.append(empty_rows_state)\n\n merged_pdf = pd.concat(pdfs, ignore_index=True)\n merged_state_pdf = pd.concat(state_pdfs, ignore_index=True)\n\n return self._create_batch(\n [\n (count_pdf, self.result_count_pdf_arrow_type),\n (merged_pdf, pdf_schema),\n (merged_state_pdf, self.result_state_pdf_arrow_type),\n ]\n )\n\n def serialize_batches():\n \"\"\"\n Read through an iterator of (iterator of pandas DataFrame, state), and serialize them\n to Arrow RecordBatches.\n\n This function does batching on constructing the Arrow RecordBatch; a batch will be\n serialized to the Arrow RecordBatch when the total number of records exceeds the\n configured threshold.\n \"\"\"\n # a set of variables for the state of current batch which will be converted to Arrow\n # RecordBatch.\n pdfs = []\n state_pdfs = []\n pdf_data_cnt = 0\n state_data_cnt = 0\n\n return_schema = None\n\n for data in iterator:\n # data represents the result of each call of user function\n packaged_result = data[0]\n\n # There are two results from the call of user function:\n # 1) iterator of pandas DataFrame (output)\n # 2) updated state instance\n pdf_iter = packaged_result[0][0]\n state = packaged_result[0][1]\n\n # This is static and won't change across batches.\n return_schema = packaged_result[1]\n\n for pdf in pdf_iter:\n # We ignore empty pandas DataFrame.\n if len(pdf) > 0:\n pdf_data_cnt += len(pdf)\n pdfs.append(pdf)\n\n # If the total number of records in current batch exceeds the configured\n # threshold, time to construct the Arrow RecordBatch from the batch.\n if pdf_data_cnt > self.arrow_max_records_per_batch:\n batch = construct_record_batch(\n pdfs, pdf_data_cnt, return_schema, state_pdfs, state_data_cnt\n )\n\n # Reset the variables to start with new batch for further data.\n pdfs = []\n state_pdfs = []\n pdf_data_cnt = 0\n state_data_cnt = 0\n\n yield batch\n\n # This has to be performed 'after' evaluating all elements in iterator, so that\n # the user function has been completed and the state is guaranteed to be updated.\n state_pdf = construct_state_pdf(state)\n\n state_pdfs.append(state_pdf)\n state_data_cnt += 1\n\n # processed all output, but current batch may not be flushed yet.\n if pdf_data_cnt > 0 or state_data_cnt > 0:\n batch = construct_record_batch(\n pdfs, pdf_data_cnt, return_schema, state_pdfs, state_data_cnt\n )\n\n yield batch\n\n def init_stream_yield_batches(batches):\n \"\"\"\n This function helps to ensure the requirement for Pandas UDFs - Pandas UDFs require a\n START_ARROW_STREAM before the Arrow stream is sent.\n\n START_ARROW_STREAM should be sent after creating the first record batch so in case of\n an error, it can be sent back to the JVM before the Arrow stream starts.\n \"\"\"\n should_write_start_length = True\n\n for batch in batches:\n if should_write_start_length:\n write_int(SpecialLengths.START_ARROW_STREAM, stream)\n should_write_start_length = False\n\n yield batch\n\n batches_to_write = init_stream_yield_batches(serialize_batches())\n\n return ArrowStreamSerializer.dump_stream(self, batches_to_write, stream)",
"def data_frame(dict_cycle, number):\n list1, list2 = (list(dict_cycle.get('cycle_'+str(number)).items()))\n zipped_list = list(zip(list1[1], list2[1]))\n data = pd.DataFrame(zipped_list, columns=['Potential', 'Current'])\n return data",
"def to_dataframe(self):\n \n #Try importing pandas\n try:\n import pandas as pd\n except ImportError as e:\n raise RuntimeError(\"Error: pandas is not available. Install pandas in order to use this function.\") from e\n \n #Get season info\n season_info = self.annual_summary()\n season_info_keys = season_info['id']\n \n #Set up empty dict for dataframe\n ds = {'id':[],'name':[],'vmax':[],'mslp':[],'category':[],'ace':[],'start_time':[],'end_time':[]}\n \n #Add every key containing a list into the dict\n keys = [k for k in self.dict.keys()]\n for key in keys:\n if key in season_info_keys:\n sidx = season_info_keys.index(key)\n ds['id'].append(key)\n ds['name'].append(self.dict[key]['name'])\n ds['vmax'].append(season_info['max_wspd'][sidx])\n ds['mslp'].append(season_info['min_mslp'][sidx])\n ds['category'].append(season_info['category'][sidx])\n ds['start_time'].append(self.dict[key]['date'][0])\n ds['end_time'].append(self.dict[key]['date'][-1])\n ds['ace'].append(np.round(season_info['ace'][sidx],1))\n \n #Convert entire dict to a DataFrame\n ds = pd.DataFrame(ds)\n\n #Return dataset\n return ds",
"def transitions_table(transitions, states, alphabet):\n transitions = sanitize_transitions(transitions)\n\n check_transitions(transitions, states, alphabet)\n\n table = []\n for current in states:\n for read in alphabet:\n # DEBUG: print(state, read)\n next, write, move = transitions(current, read)\n table.append([current, read, next, write, move])\n\n df = pd.DataFrame(table, columns = ['current', 'read', 'next', 'write', 'move'])\n return df",
"def construct_df():\n iterable = [['approach', 'contact', 'retract', 'pause'], ['force', 'height']]\n index = pd.MultiIndex.from_product(iterable, names=['segment', 'channel'])\n return pd.DataFrame(columns=index)",
"def create_tables(*args):\n import pandas\n tables = {}\n for locus in args[0].index:\n tables[locus] = get_table(*args, locus)\n return tables",
"def create_dataframe(self):\n sessions = pandas.DataFrame().from_dict(self.values)\n sessions_lists = pandas.DataFrame().from_dict(self.lists)\n return sessions, sessions_lists",
"def tabular(sessions: List[Session], *, columns: Optional[str] = None):\n if not columns:\n columns = \"id,start_time,status,provider,url\"\n\n if any(s.ssh_enabled for s in sessions):\n columns += \",ssh\"\n\n return tabulate(collection=sessions, columns=columns, columns_mapping=SESSION_COLUMNS)",
"def to_Table(self, **kwargs):\n mean_transmit, transmit_ = self.transmit\n data_ = {'WAVELENGTH': self._wavelength,\n 'THROUGHPUT': mean_transmit}\n for num, filterk in enumerate(transmit_, 1):\n data_['THROUGHPUT_{0:d}'.format(num)] = filterk\n data = SimpleTable(data_)\n\n if self.wavelength_unit is not None:\n data.header['WAVELENGTH_UNIT'] = self.wavelength_unit\n data.header['DETECTOR'] = self.dtype\n data.header['COMPNAME'] = self.name\n data.header['NAME'] = self.name\n data.set_comment('THROUGHPUT', 'filter throughput definition')\n data.set_comment('WAVELENGTH', 'filter wavelength definition')\n for num in range(1, len(transmit_) + 1):\n data.set_comment('THROUGHPUT_{0:d}'.format(num),\n 'filter throughput sample')\n data.set_comment('WAVELENGTH', self.wavelength_unit or 'AA')\n return data",
"def stream_from_dict(desc):\r\n stream = Stream()\r\n stream.update(desc)\r\n return stream",
"def session_table(session, global_params=False, source_info=True, path_info=True):\n from ecogdata.devices.data_util import find_loadable_files\n conf = session_conf(session)\n top_level_info = conf.pop('session')\n named_recordings = sorted([s for s in conf.sections if s != 'session'])\n keys = set(top_level_info.keys())\n for r in named_recordings:\n keys.update(conf[r].keys())\n if not global_params:\n gp = set(load_params().keys())\n keys.difference_update(gp)\n if not path_info:\n path_keys = {'nwk_path', 'exp_path', 'network_exp', 'local_exp', 'store_path'}\n keys.difference_update(path_keys)\n required_keys = ['headstage', 'electrode', 'exp_type']\n other_keys = list(keys.difference(required_keys))\n if source_info:\n columns = list(required_keys) + ['primary_file', 'downsampled_file'] + list(other_keys)\n else:\n columns = list(required_keys) + list(other_keys)\n tab = pd.DataFrame(columns=columns)\n for r in named_recordings:\n rec = top_level_info.copy()\n rec.update(conf[r])\n tab_row = dict([(k, rec.get(k, 'unknown')) for k in required_keys + other_keys])\n tab_row = filter_values(tab_row)\n if source_info:\n tab_row['primary_file'] = find_loadable_files(session, r, downsampled=False)\n tab_row['downsampled_file'] = find_loadable_files(session, r, downsampled=True)\n tab = tab.append(pd.DataFrame(index=[r], data=tab_row))\n return tab",
"def generate_df(js_dict, naming, value=\"value\"):\n\n values = []\n dimensions, dim_names = get_dimensions(js_dict, naming)\n values = get_values(js_dict, value=value)\n output = pd.DataFrame([category + [values[i]]\n for i, category in\n enumerate(get_df_row(dimensions, naming))])\n output.columns = dim_names + [value]\n output.index = range(0, len(values))\n return output",
"def _listofdict_to_df(self, osw_dict=None): \n if type(osw_dict) is not dict:\n raise ValueError(\"The 'osw_dict' arg is invalid!\")\n \n frame = pd.DataFrame.from_dict(osw_dict, orient='columns')\n \n return frame",
"def test_stream_sorted():\n archive = Archive()\n archive.commit(doc=DataFrameDocument(df=DF1))\n names = list(archive.open(version=0).sorted(keys=[0]).to_df()['Name'])\n assert names == ['Alice', 'Alice', 'Bob', 'Claire']",
"def generate_log_df(log_columns, log_values):\n return pd.DataFrame(dict(zip(log_columns, log_values)), index=[0])",
"def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):\n super(Stream, self).__init__(\"stream\")\n\n # Validate arg\n # ------------\n if arg is None:\n arg = {}\n elif isinstance(arg, self.__class__):\n arg = arg.to_plotly_json()\n elif isinstance(arg, dict):\n arg = _copy.copy(arg)\n else:\n raise ValueError(\n \"\"\"\\\nThe first argument to the plotly.graph_objs.table.Stream \nconstructor must be a dict or \nan instance of :class:`plotly.graph_objs.table.Stream`\"\"\"\n )\n\n # Handle skip_invalid\n # -------------------\n self._skip_invalid = kwargs.pop(\"skip_invalid\", False)\n\n # Import validators\n # -----------------\n from plotly.validators.table import stream as v_stream\n\n # Initialize validators\n # ---------------------\n self._validators[\"maxpoints\"] = v_stream.MaxpointsValidator()\n self._validators[\"token\"] = v_stream.TokenValidator()\n\n # Populate data dict with properties\n # ----------------------------------\n _v = arg.pop(\"maxpoints\", None)\n self[\"maxpoints\"] = maxpoints if maxpoints is not None else _v\n _v = arg.pop(\"token\", None)\n self[\"token\"] = token if token is not None else _v\n\n # Process unknown kwargs\n # ----------------------\n self._process_kwargs(**dict(arg, **kwargs))\n\n # Reset skip_invalid\n # ------------------\n self._skip_invalid = False",
"def to_df(self):\n if self.shape > 1:\n range_str = [s for s in range(self.shape)]\n iterables = [self.columns, range_str]\n multiindex = pd.MultiIndex.from_product(iterables, names=['song', 'frame'])\n # multiindex = [i for i in itertools.product(self.columns, range_str, repeat=1)]\n df = pd.DataFrame(columns=multiindex, index=self.columns, dtype=np.float64)\n\n for c_1 in self.columns:\n for c_2 in self.columns:\n for s in range_str:\n df.loc[c_1][c_2, s] = self.dict_[c_1][c_2][s]\n df = df.T\n else:\n df = pd.DataFrame(columns=self.columns + ['song'], dtype=np.float64)\n df['song'] = self.columns\n df = df.set_index('song')\n\n for c_1 in self.columns:\n for c_2 in self.columns:\n df.loc[c_1, c_2] = self.max_diff(c_1, c_2)\n\n return df",
"def streaming_weather_data(**kwargs):\n df = weather_data(['San Francisco'])\n df['time'] = [pd.Timestamp.now()]\n return df.set_index('time')",
"def load_stream(self, stream):\n import pyarrow as pa\n\n dataframes_in_group = None\n\n while dataframes_in_group is None or dataframes_in_group > 0:\n dataframes_in_group = read_int(stream)\n\n if dataframes_in_group == 2:\n batch1 = [batch for batch in ArrowStreamSerializer.load_stream(self, stream)]\n batch2 = [batch for batch in ArrowStreamSerializer.load_stream(self, stream)]\n yield (\n [self.arrow_to_pandas(c) for c in pa.Table.from_batches(batch1).itercolumns()],\n [self.arrow_to_pandas(c) for c in pa.Table.from_batches(batch2).itercolumns()],\n )\n\n elif dataframes_in_group != 0:\n raise ValueError(\n \"Invalid number of pandas.DataFrames in group {0}\".format(dataframes_in_group)\n )",
"def get_ts_df(self):\n df = pd.DataFrame(self.ts_list)\n df.columns = self.col_names\n df.sort_values(by=self.col_names[0], inplace=True)\n df.reset_index(drop=True, inplace=True)\n\n return df",
"def adapter(data, headers, **kwargs):\n keys = (\"sep_title\", \"sep_character\", \"sep_length\")\n return vertical_table(data, headers, **filter_dict_by_key(kwargs, keys))"
] |
[
"0.7518011",
"0.62724274",
"0.6179858",
"0.588675",
"0.5853962",
"0.58128667",
"0.57937926",
"0.5699301",
"0.56680727",
"0.5644031",
"0.5609429",
"0.55866784",
"0.55698466",
"0.5561937",
"0.55287117",
"0.5496556",
"0.545095",
"0.5440981",
"0.5406918",
"0.5390033",
"0.5360076",
"0.5336658",
"0.5329586",
"0.53174394",
"0.5291928",
"0.52620155",
"0.5253498",
"0.5251155",
"0.5240381",
"0.5220377"
] |
0.71313876
|
1
|
Method to print a stream table from a dataframe. Method takes any argument understood by DataFrame.to_string
|
def stream_table_dataframe_to_string(stream_table, **kwargs):
# Set some default values for keyword arguments
na_rep = kwargs.pop("na_rep", "-")
justify = kwargs.pop("justify", "center")
float_format = kwargs.pop("float_format", lambda x: "{:#.5g}".format(x))
# Print stream table
return stream_table.to_string(
na_rep=na_rep, justify=justify, float_format=float_format, **kwargs
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def print_dataframe_content(dataframe):\n\n dataframe_len = len(dataframe.columns)\n dataframe_columns_index = list(range(0, dataframe_len))\n dataframe_column_index_len = len(dataframe_columns_index)\n\n if dataframe.empty:\n print('Empty dataframe.')\n else:\n should_continue_process = True\n current_process_batch_size = 0\n\n while should_continue_process:\n slide_object = slice(current_process_batch_size,\n current_process_batch_size+MAX_ROWS_TO_PRINT)\n dataframe_index_columns = dataframe_columns_index[slide_object]\n\n current_process_batch_size = current_process_batch_size+MAX_ROWS_TO_PRINT\n\n dataframe_buffer = dataframe.iloc[:,\n dataframe_index_columns].copy()\n print(tabulate(dataframe_buffer, headers='keys', tablefmt='psql'))\n\n if current_process_batch_size >= dataframe_column_index_len:\n should_continue_process = False",
"def print_dataframe(self, df):\n header = [\n '일련번호',\n '학생 id',\n '이름',\n '생년월일',\n '중간고사',\n '기말고사',\n '평균',\n 'Grade'\n ]\n\n header_str = '{:10s}' * len(header)\n print(header_str.format(*header))\n print(df.to_string(header=False, col_space=10))",
"def print_df(cluster_df):\n import sys\n\n cluster_df.to_string(sys.stdout, index=False, header=True)\n # Print an empty line to finish\n print()",
"def print_table(self, table):\n raise NotImplementedError('print_table method not defined!')",
"def print_table(hdrs, flag=False, data=[],fmt='psql'):\n\tres = cur.fetchall()\n\tif flag:\n\t\tres = data\n\tprint(tabulate(res, headers=hdrs, tablefmt=fmt))",
"def df_print(df):\n with pd.option_context('display.max_rows', None, 'display.max_columns', 3):\n print(df)",
"def dataframe_displayer(df):\n\n #On paramètre les options d'affichage du module pandas\n pd.set_option('display.max_rows', None)\n pd.set_option('display.max_columns', None)\n pd.set_option('display.width', None)\n pd.set_option('display.max_colwidth', -1)\n\n print(df)",
"def show_dataframe(self, df, **kwargs):\n show_index = False\n if 'show_index' in kwargs:\n show_index = kwargs['show_index']\n\n exceed_limit = len(df) > self.max_result\n header_buf = StringIO(\"\")\n if show_index:\n idx_name = str(df.index.name) if df.index.name is not None else \"\"\n header_buf.write(self.normalizeColumn(idx_name) + \"\\t\")\n header_buf.write(self.normalizeColumn(str(df.columns[0])))\n for col in df.columns[1:]:\n header_buf.write(\"\\t\")\n header_buf.write(self.normalizeColumn(str(col)))\n header_buf.write(\"\\n\")\n\n body_buf = StringIO(\"\")\n rows = df.head(self.max_result).values if exceed_limit else df.values\n rowNumber = len(rows)\n index = df.index.values\n for idx, row in zip(index, rows):\n if show_index:\n body_buf.write(\"%html <strong>{}</strong>\".format(idx))\n body_buf.write(\"\\t\")\n body_buf.write(self.normalizeColumn(str(row[0])))\n for cell in row[1:]:\n body_buf.write(\"\\t\")\n body_buf.write(self.normalizeColumn(str(cell)))\n # don't print '\\n' after the last row\n rowNumber -=1\n if rowNumber != 0:\n body_buf.write(\"\\n\")\n body_buf.seek(0)\n header_buf.seek(0)\n print(\"%table \" + header_buf.read() + body_buf.read())\n body_buf.close()\n header_buf.close()\n if exceed_limit:\n print(\"\\n%html <font color=red>Results are limited by {}.</font>\".format(self.max_result))",
"def prettyprint(self):\n\n import pandas.io.sql as psql\n df = psql.read_sql(\"SELECT * FROM ATOM\",self.conn)\n print(df)",
"def show_data(df):\n printmd(str(\"The Data contains **\" + str(df.shape[0])+ '** rows.'))\n printmd(\"*__Sample of the data :__*\")\n display(df.head(n=5))\n print(\"\")\n print(\"\")",
"def print_full(df):\n pandas.set_option('display.max_rows', len(df))\n print df\n pandas.reset_option('display.max_rows')",
"def print_dataframe(df):\n print (\"\")\n if df.shape[0] > 20:\n print (df.head())\n print (df.tail())\n else: \n print (df)",
"def print_table(self, table, connection=None):\n\n connection = connection or self.engine.connect()\n result = connection.execute(select([table]))\n print(\n '-----------------------------------------------------------'\n '\\nColumns:\\n\\t{}\\nData:\\n\\t{}\\n'\n '-----------------------------------------------------------'.format(\n table.columns, '\\n\\t'.join(str(row) for row in result)\n )\n )\n\n result.close()",
"def print_table(table):\n for row in table:\n print(row)",
"def display_df(df):\n with pd.option_context(\"display.max_rows\", 1000, \"display.max_columns\", 100):\n display(df.head(10))",
"def print_table(table):\n for row in table:\n print(row)",
"def print_table(table):\n for row in table:\n print(row)",
"def debug_print_dataframe(data, num_rows=2, debug=False):\n if debug:\n with pandas.option_context('display.max_rows', None, 'display.max_columns',\n None):\n print(data[:num_rows])",
"def echo(df: pd.DataFrame) -> str:\n\n tmp = df.name + \" < \" + df.message\n values = tmp.to_csv(index=False).replace(\"0\\n\", \"\")\n return values",
"def sql_display(line, cell=None):\n val = cell if cell is not None else line \n return spark.sql(val).limit(max_show_lines).toPandas()",
"def df_to_string(df):\n with pd.option_context('display.max_rows', None, 'display.max_columns', 3):\n return df.to_string()",
"def disp(df):\n display(HTML(df.to_html(index=False)))",
"def cprint(df, nrows=None, sample=False):\n if not isinstance(df, (pd.DataFrame, dask.dataframe.DataFrame)):\n try:\n df = df.to_frame()\n except:\n raise ValueError('object cannot be coerced to df')\n\n if not nrows: nrows = 5\n print('-' * 79)\n print('dataframe information')\n print('-' * 79)\n if sample:\n print(df.sample(nrows))\n else:\n print(df.tail(nrows))\n print('-' * 50)\n print(df.info())\n print('-' * 79)\n print()",
"def __str__(self) -> str:\n if self.data is not None:\n list_of_params = []\n for key, data_dict in self.data.to_dict(orient=\"index\").items():\n data_dict[\"index\"] = key\n list_of_params.append(data_dict)\n formated_list_of_params = self.format_params(list_of_params)\n return f\"\\n{tabulate(formated_list_of_params, headers='keys', tablefmt='fancy_grid')}\"\n else:\n return \"Empty DataFrame\"",
"def raw_data(df):\n\n print('\\nDisplaying individual trip sample...\\n')\n start_time = time.time()\n\n cnt = 0\n rows = 5\n while True:\n if cnt+rows < df.shape[0]:\n aux = df.iloc[cnt:cnt+rows]\n print(\"\\n\" + aux.to_string(index=False))\n cnt += rows\n answer = input(\"Do you want to see more individual trips? (yes or no)\")\n if answer != \"yes\":\n break\n else:\n break\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def print_full(x):\r\n pd.set_option('display.max_rows', len(x))\r\n pd.set_option('display.max_columns', 1000)\r\n pd.set_option('display.expand_frame_repr', False)\r\n print(x)\r\n pd.reset_option('display.max_rows')\r\n pd.reset_option('display.max_columns')\r\n pd.reset_option('display.expand_frame_repr')",
"def print_table(table):\n for i in range(len(table)):\n print \"Row \", i, \"\\t\",\n for j in range(len(table[i])):\n print table[i][j],\n print \"\\n\"",
"def print_data_table_length(document_label, data_frame, debug=False):\n print('{}\\n'.format(document_label), len(data_frame))\n debug_print_dataframe(data_frame, debug=debug)",
"def table(\n columns: typing.Iterable[str],\n rows: typing.Iterable[typing.Iterable[object]],\n *,\n title: str = None,\n buffer: int = 2\n):\n\n # Determine the width of the window\n _, terminalWidth = os.popen('stty size', 'r').read().split()\n terminalWidth = int(terminalWidth)\n tprint = lambda x: print(x) if len(x) < terminalWidth else print(x[:terminalWidth - 4] + '...')\n\n # Determine the columns widths\n columnWidths = [0]*len(columns)\n for row in [columns] + rows:\n for i in range(len(columns)):\n columnWidths[i] = max(columnWidths[i], len(str(row[i])))\n columnWidths = [x + buffer for x in columnWidths]\n\n # define the row formats\n rowTemplate = '|'.join(['{'+str(i)+':^{'+str(i + len(columns))+'}}' for i in range(len(columns))])\n\n header = rowTemplate.format(*columns, *columnWidths)\n print()\n\n if title is not None:\n width = min(terminalWidth, len(header))\n print(\"{0:^{1}}\".format(title, width))\n print('='*width)\n\n tprint(header)\n tprint('='*len(header))\n for row in rows:\n tprint(rowTemplate.format(*[str(x) for x in row], *columnWidths))\n print()",
"def print_table(table):\n # transpose the table:\n table = map(list, zip(*table))\n # get the column width:\n col_width = [max(len(str(x)) for x in col) for col in zip(*table)]\n # print it to screen:\n print\n for line in table:\n print \"| \" + \" | \".join(\"{:{}}\".format(x, col_width[i]) for i, x in enumerate(line)) + \" |\"\n print"
] |
[
"0.71346676",
"0.70650566",
"0.67613006",
"0.6756046",
"0.66866994",
"0.6665646",
"0.6648919",
"0.6601342",
"0.6567974",
"0.6533634",
"0.647307",
"0.6460402",
"0.63788",
"0.63754827",
"0.6371477",
"0.6357922",
"0.6357922",
"0.63577956",
"0.6285462",
"0.6260336",
"0.6228227",
"0.6205464",
"0.6148797",
"0.6141786",
"0.6135759",
"0.6060919",
"0.60537857",
"0.60512465",
"0.6025124",
"0.60021955"
] |
0.7853061
|
0
|
Attempt to find a StateBlocklike object connected to a Port. If the object is indexed both in space and time, assume that the time index comes first. If no components are assigned to the Port, raise a ValueError. If the first component's parent block has no index, raise an AttributeError. If different variables on the port appear to be connected to different state blocks, raise a RuntimeError.
|
def _get_state_from_port(port, time_point):
vlist = list(port.iter_vars())
states = [v.parent_block().parent_component() for v in vlist]
if len(vlist) == 0:
raise ValueError(
f"No block could be retrieved from Port {port.name} "
f"because it contains no components."
)
# Check the number of indices of the parent property block. If its indexed
# both in space and time, keep the second, spatial index and throw out the
# first, temporal index. If that ordering is changed, this method will
# need to be changed as well.
try:
idx = vlist[0].parent_block().index()
except AttributeError as err:
raise AttributeError(
f"No block could be retrieved from Port {port.name} "
f"because block {vlist[0].parent_block().name} has no index."
) from err
# Assuming the time index is always first and the spatial indices are all
# the same
if isinstance(idx, tuple):
idx = (time_point, vlist[0].parent_block().index()[1:])
else:
idx = (time_point,)
# This method also assumes that ports with different spatial indices won't
# end up at the same port. Otherwise this check is insufficient.
if all(states[0] is s for s in states):
return states[0][idx]
raise RuntimeError(
f"No block could be retrieved from Port {port.name} "
f"because components are derived from multiple blocks."
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def find(self,port):\n\tif self.portlist == []:\n\t\treturn -1\n\tif isinstance(port,Port):\n\t\tport = (int(port.GetPortNbr()), port.GetPortWay())\n\tmatch = ( (int(self.portlist[0].GetPortNbr()), self.portlist[0].GetPortWay()) == port )\n\ti = 0\n\twhile ( not match and i<len(self.portlist)-1 ):\n\t\ti += 1\n\t\tmatch = ( (int(self.portlist[i].GetPortNbr()), self.portlist[i].GetPortWay()) == port )\n\tif match:\n\t\treturn i\n\telse:\n\t\treturn -1",
"def find(self, find_state):\n\t\tpointer = self.head\n\t\tif pointer.state == find_state:\n\t\t\treturn pointer.state\n\t\telse:\n\t\t\twhile pointer.next != None:\n\t\t\t\tpointer = pointer.next\n\t\t\t\tif pointer.state == find_state:\n\t\t\t\t\treturn pointer.state\n\t\tprint \"Sorry, your item was not found\"",
"def __detect_connected_sap (port):\n connected_port = [l.dst for u, v, l in\n nffg.real_out_edges_iter(port.node.id)\n if str(l.src.id) == str(port.id)]\n # If the number of detected nodes is unexpected continue to the next req\n if len(connected_port) < 1:\n log.warning(\"Skip edge rebinding: No connected node is detected for \"\n \"SAP port: %s\" % port)\n return None\n elif len(connected_port) > 1:\n log.warning(\"Skip edge rebinding: Multiple connected nodes are \"\n \"detected for SAP port: %s: %s!\" % (port, connected_port))\n return None\n elif connected_port[0].node.type == NFFG.TYPE_SAP:\n return connected_port[0]\n else:\n return None",
"def xtest_find_port(self):\n line, head = self._get_line()\n p1, p2, p3, p4 = self.box1.ports()\n\n head.pos = 110, 50\n port = self.tool.find_port(line, head, self.box1)\n self.assertEquals(p1, port)\n\n head.pos = 140, 60\n port = self.tool.find_port(line, head, self.box1)\n self.assertEquals(p2, port)\n\n head.pos = 110, 95\n port = self.tool.find_port(line, head, self.box1)\n self.assertEquals(p3, port)\n\n head.pos = 100, 55\n port = self.tool.find_port(line, head, self.box1)\n self.assertEquals(p4, port)",
"def find_net_for_port(self, port, pin):\n\n # Get the port\n assert port in self.ports, (self.name, self.instance, (port, pin))\n port = self.ports[port]\n\n # Unconnected\n if pin not in port.connections:\n return None\n\n # Get the pin connection\n conn = port.connections[pin]\n\n # The connection refers to a net directly\n if isinstance(conn, str):\n return conn\n\n # Get driving block\n block = self.get_neighboring_block(conn.driver)\n assert block is not None, (self.instance, conn.driver)\n\n # Recurse\n return block.find_net_for_port(conn.port, conn.pin)",
"def find_block(int_svip, comp_block):\n print(\"-\" * 20 + \" find_block started\")\n bsz = comp_block\n outsz = 0\n bsdict = {}\n bsdict [0] = bsz\n # Build the dictionary of the host networks\n while outsz < 255:\n outsz = outsz + bsz\n bsdict[outsz] = (outsz + bsz) -1\n #print(outsz)\n \n # Determine the upper and lower bounds of the host network\n for key in bsdict.keys():\n if int_svip >= key and int_svip <= bsdict[key]:\n block_start = key\n block_end = bsdict[key]\n\n #print(\"Block start is {}\\nBlock end is {}\".format(block_start, block_end))\n return block_start, block_end",
"def _find_connection_element(self, var1, var2):\n cn1, cn2 = var1.component.name, var2.component.name\n cnames = set([cn1, cn2])\n for conn in getattr(self.model, u'connection', []):\n mc = conn.map_components\n if set([mc.component_1, mc.component_2]) == cnames:\n break\n else:\n conn = None\n if conn:\n swap = conn.map_components.component_1 == cn2\n else:\n swap = False\n return conn, swap",
"def get_node_from_hint(self, hint='blocked'):\n self.cond.acquire()\n node = None\n try:\n if hint == 'blocked':\n node = self.nodes[self.next_index]\n self.next_index = (self.next_index + 1) % len(self.nodes)\n\n elif hint == 'strided':\n previous_node = self.nodes[self.next_index - 1]\n start_index = self.next_index\n # Search for next node\n while (self.nodes[self.next_index] == previous_node):\n # Skip next\n self.next_index = (self.next_index + 1) % len(self.nodes)\n \n if self.next_index == start_index:\n # All entries in nodefile is identical\n break\n\n # Select node\n node = self.nodes[self.next_index]\n self.next_index = (self.next_index + 1) % len(self.nodes)\n \n #elif hint == 'auto':\n # raise InfoException(\"The auto hint is not implemented yet. Use 'blocked', 'strided' or 'local'.\")\n\n elif hint == 'local':\n node = 'localhost'\n\n else:\n raise InfoException(\"ClusterProcess does not support hint='\"+hint+\"'. Please replace with 'strided', 'blocked' or 'local'.\")\n finally:\n self.cond.release()\n\n return node",
"def find_unused_connection(self, node):\r\n value = None\r\n for index, c in enumerate(node.connections):\r\n if not c.visited:\r\n if value is None:\r\n value = index\r\n if c.value == 'RUNG':\r\n return index\r\n return value",
"def _get_port_details(self, port_name):\n # TODO: Need to add a check that the port was recorded\n component_class = self.celltype.model.component_class\n port = None\n for name in (port_name, port_name + '__cell'):\n try:\n port = component_class.send_port(name)\n except NineMLNameError:\n try:\n port = component_class.state_variable(name)\n except NineMLNameError:\n pass\n if port is None:\n raise Pype9UsageError(\n \"Unknown port or state-variable '{}' for '{}' \"\n \"component array (available '{}').\".format(\n port_name, self.name, \"', '\".join(chain(\n component_class.send_port_names,\n component_class.sub_component(\n 'cell').send_port_names))))\n if isinstance(port, StateVariable):\n communicates = 'analog'\n else:\n communicates = port.communicates\n return communicates, port.name",
"def next_component(self, index):\n for component_index in range(index + 1, len(self.address)):\n if self.address[self.__keys[component_index]].value:\n return self.__keys[component_index]\n return None",
"def __getitem__(self, qubit):\n for oneq_state in self.states:\n if oneq_state.qubit == qubit:\n return oneq_state\n raise IndexError()",
"def findConnectedSpinSystem(spinSystem, delta=-1):\n\n spinSystemB = None\n \n nLinks = getSeqSpinSystemLinks(spinSystem, delta=delta)\n if nLinks:\n nLink = nLinks[0]\n if spinSystem is nLink.parent:\n spinSystemB = nLink.possibility\n \n else:\n spinSystemB = nLink.parent\n\n return spinSystemB",
"def __getitem__(self, item: tp.Union[str, Port, Block]):\n from blox.core.port import Port\n from blox.core.block import Block\n\n # For block it returns the block state\n if isinstance(item, Block):\n return self._block_states[item]\n\n # For ports its returns the port value (if any)\n elif isinstance(item, Port):\n return self._block_states[item.block].ports[item]\n\n # For strings (item should be string) it returns the value of the global parameter\n else:\n if not isinstance(item, str):\n raise TypeError('Global parameter names must be strings')\n return self._meta[item]",
"def findSolution(initialState, goalState):\n initialState = convertToRow(initialState)\n goalState = convertToRow(goalState)\n closedNode = set()\n nodeList = Queue()\n allNode = Queue()\n count = 0\n root = Node(initialState, None, count)\n count += 1\n if isSolvable(initialState):\n nodeList.put(root)\n while(nodeList.qsize() != 0):\n currNode = nodeList.get()\n if currNode.key == goalState:\n allNode.put(currNode)\n return 1, currNode, closedNode, allNode\n elif isOpen(currNode, closedNode):\n allNode.put(currNode)\n closedNode.add(currNode.key)\n if isSolvable(currNode.key):\n count = getChild(currNode, nodeList, closedNode, count)\n else: \n continue\n else:\n continue\n else:\n return 0, None, closedNode, allNode\n return 0, None, closedNode, allNode",
"def find_read_number_block(self):\n\t\tnode = self.find_read_number_block_link()\n\t\tif node is not None:\n\t\t\treturn node\n\n\t\tnode = self.find_read_number_block_fixed_raw()\n\t\tif node is not None:\n\t\t\treturn node\n\n\t\t# Couldn't find the node, bail out.\n\t\tself.hdf_internal_error(\"unknown HDF5 structure: can't find read block item\")",
"def find(self, CableLength=None, ChainTopology=None, ConnectRetries=None, DeviceType=None, ErrorDescription=None, ErrorState=None, Hostname=None, Ip=None, IsMaster=None, IxnBuildNumber=None, IxosBuildNumber=None, LicenseErrors=None, MasterDevice=None, OsType=None, ProtocolBuildNumber=None, SequenceId=None, State=None):\n # type: (int, str, int, str, str, str, str, str, bool, str, str, List[str], str, str, str, int, str) -> Locations\n return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))",
"def nmap_get_state():\n # Only checking port 22, but could check others if needeed\n ip_str = str(self.ip)\n nm = nmap.PortScanner()\n nm.scan(ip_str, arguments='-sn')\n # Get host state if it is reachable \n if ip_str in nm.all_hosts():\n self.stored_state = nm[ip_str].state()\n return self.stored_state\n\n # Otherwise unreachable\n self.stored_state = None\n\n return \"Unreachable\"",
"def find_by_status(self, host, state):",
"def test_get_node_state_smartfail(self):\n pass",
"def get_state():\n\tif node.id < 0:\n\t\treactor.callLater(0, get_state)\n\t\treturn\n\t\n\tnode.send(node.id, generate_start_graph, None)\n\tnode.target_filename = target_filename\n\tnode.roots = {}\n\tif DO_PROBLEMS:\n\t\ttarget_msg = msg_get_dirty_connections\n\telse:\n\t\ttarget_msg = msg_get_connections\n\tfor i in range(0, node.id):\n\t\tnode.send(i, target_msg, node.id)\n\tnode.send(node.id, wait_full_state, 0)",
"def __init__(self, start_t: float, end_t: float, num_time_blocks: int):\n self._num_time_blocks: int = num_time_blocks\n self._num_states: Optional[int] = None\n self._nlps: Dict[int, InteriorPointInterface] = dict() # keys are the time block index (passed into the build_model_for_time_block method\n self._link_forward_matrices: Dict[int, coo_matrix] = dict() # these get multiplied by the primal vars of the corresponding time block\n self._link_backward_matrices: Dict[int, coo_matrix] = dict() # these get multiplied by the primal vars of the corresponding time block\n self._link_forward_coupling_matrices: Dict[int, coo_matrix] = dict() # these get multiplied by the coupling variables\n self._link_backward_coupling_matrices: Dict[int, coo_matrix] = dict() # these get multiplied by the coupling variables\n\n self._primals_lb: BlockVector = BlockVector(self._num_time_blocks + 1)\n self._primals_ub: BlockVector = BlockVector(self._num_time_blocks + 1)\n\n self._ineq_lb: BlockVector = BlockVector(self._num_time_blocks)\n self._ineq_ub: BlockVector = BlockVector(self._num_time_blocks)\n\n self._init_primals: BlockVector = BlockVector(self._num_time_blocks + 1)\n self._primals: BlockVector = BlockVector(self._num_time_blocks + 1)\n self._delta_primals: BlockVector = BlockVector(self._num_time_blocks + 1)\n\n self._init_slacks: BlockVector = BlockVector(self._num_time_blocks)\n self._slacks: BlockVector = BlockVector(self._num_time_blocks)\n self._delta_slacks: BlockVector = BlockVector(self._num_time_blocks)\n\n self._init_duals_eq: BlockVector = BlockVector(self._num_time_blocks)\n self._duals_eq: BlockVector = BlockVector(self._num_time_blocks)\n self._delta_duals_eq: BlockVector = BlockVector(self._num_time_blocks)\n\n self._init_duals_ineq: BlockVector = BlockVector(self._num_time_blocks)\n self._duals_ineq: BlockVector = BlockVector(self._num_time_blocks)\n self._delta_duals_ineq: BlockVector = BlockVector(self._num_time_blocks)\n\n self._init_duals_primals_lb: BlockVector = BlockVector(self._num_time_blocks + 1)\n self._duals_primals_lb: BlockVector = BlockVector(self._num_time_blocks + 1)\n self._delta_duals_primals_lb: BlockVector = BlockVector(self._num_time_blocks + 1)\n\n self._init_duals_primals_ub: BlockVector = BlockVector(self._num_time_blocks + 1)\n self._duals_primals_ub: BlockVector = BlockVector(self._num_time_blocks + 1)\n self._delta_duals_primals_ub: BlockVector = BlockVector(self._num_time_blocks + 1)\n\n self._init_duals_slacks_lb: BlockVector = BlockVector(self._num_time_blocks)\n self._duals_slacks_lb: BlockVector = BlockVector(self._num_time_blocks)\n self._delta_duals_slacks_lb: BlockVector = BlockVector(self._num_time_blocks)\n\n self._init_duals_slacks_ub: BlockVector = BlockVector(self._num_time_blocks)\n self._duals_slacks_ub: BlockVector = BlockVector(self._num_time_blocks)\n self._delta_duals_slacks_ub: BlockVector = BlockVector(self._num_time_blocks)\n\n self._eq_resid: BlockVector = BlockVector(self._num_time_blocks)\n self._ineq_resid: BlockVector = BlockVector(self._num_time_blocks)\n self._grad_objective: BlockVector = BlockVector(self._num_time_blocks + 1)\n self._jac_eq: BlockMatrix = BlockMatrix(nbrows=self._num_time_blocks, nbcols=self._num_time_blocks + 1)\n self._jac_ineq: BlockMatrix = BlockMatrix(nbrows=self._num_time_blocks, nbcols=self._num_time_blocks + 1)\n self._kkt: BlockMatrix = BlockMatrix(nbrows=num_time_blocks + 1, nbcols=num_time_blocks + 1)\n self._rhs: BlockVector = BlockVector(nblocks=num_time_blocks + 1)\n\n self._setup(start_t=start_t, end_t=end_t)\n self._setup_block_vectors()\n self._setup_jacs()\n self._setup_kkt_and_rhs_structure()",
"def get_state(self):\n return PLANET_STATES[self.state][0]",
"def get_state(self):\n return PLANET_STATES[self.state][0]",
"def block_state_index(palette, **blockstate):\n try:\n return palette.index(blockstate)\n except ValueError:\n palette.append(blockstate)\n return len(palette)-1",
"def _get_next_unconnected_pin(self) -> Pin:\n for pin in self.pins:\n if not pin._isconnected():\n return pin\n\n raise ValueError(f\"{self.__class__.__name__} has no unconnected pins.\")",
"def lookup_socket(self, address): # TODO: optimize me\n\n net_tuple = self.read_nodestate(0)\n for item in net_tuple:\n discovered_address = item[1]\n if address == discovered_address:\n return item[0]",
"def get_Port_Struct_from_Port_v2(prfnbr, checkPort):\n try: \n portLink = ael.PortfolioLink.select('member_prfnbr=%i' % prfnbr)\n match = get_Port_Struct(portLink, checkPort)\n except:\n match = 0\n return match",
"def find_start(cend, canchor, name, plevel):\n col = 1\n for bank in plevel:\n pend, panchor, pstart_names = bank\n if name == pend:\n if not panchor and not canchor:\n weight = 10.0\n else:\n weight = 1.0\n return col, weight\n elif cend == pend and name == pstart_names:\n return col, 5.0\n col += 1\n raise Exception(\"connecting layer not found!\")",
"def _read_device_state():\n \n try:\n _debug_print(\"Connecting to bus...\")\n i2c_bus = smbus.SMBus(_bus_id)\n\n current_state = i2c_bus.read_byte(_device_addr) & 0x0F\n\n return int(current_state)\n\n except:\n print(\"Error: There was a problem reading from the device\")\n # Best to re-raise as we can't recover from this\n raise"
] |
[
"0.5097411",
"0.49286756",
"0.49020544",
"0.4896261",
"0.48728776",
"0.4848658",
"0.48388606",
"0.48070934",
"0.47842085",
"0.4696942",
"0.46295482",
"0.46090135",
"0.4582102",
"0.4582032",
"0.4542554",
"0.45188886",
"0.45160478",
"0.45076382",
"0.45029464",
"0.45028275",
"0.44820297",
"0.4469051",
"0.44466236",
"0.44466236",
"0.44436562",
"0.44319454",
"0.44144836",
"0.44090146",
"0.43962377",
"0.4394565"
] |
0.7329799
|
0
|
Create a Pandas DataFrame that contains a list of userdefined attributes from a set of Blocks.
|
def generate_table(blocks, attributes, heading=None, exception=True):
if heading is None:
heading = attributes
st = DataFrame(columns=heading)
row = [None] * len(attributes) # not a big deal but save time on realloc
for key, s in blocks.items():
for i, a in enumerate(attributes):
j = None
if isinstance(a, (list, tuple)):
# if a is list or tuple, assume index supplied
try:
assert len(a) > 1
except AssertionError:
_log.error(f"An index must be supplided for attribute {a[0]}")
raise AssertionError(
f"An index must be supplided for attribute {a[0]}"
)
j = a[1:]
a = a[0]
v = getattr(s, a, None)
if j is not None and v is not None:
try:
v = v[j]
except KeyError:
if not exception:
v = None
else:
_log.error(f"{j} is not a valid index of {a}")
raise KeyError(f"{j} is not a valid index of {a}")
try:
v = value(v, exception=False)
except TypeError:
if not exception:
v = None
else:
_log.error(f"Cannot calculate value of {a} (may be subscriptable)")
raise TypeError(
f"Cannot calculate value of {a} (may be subscriptable)"
)
except ZeroDivisionError:
v = None
row[i] = v
st.loc[key] = row
return st
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def start_dataframe(block_zones, block_guid, block_name, block_pt, block_layer):\n # make an empty list to populate with block objects\n blocks = []\n # connect gh wires to python block classes\n for i, z in enumerate(block_zones):\n b = Block()\n b.guid = block_guid[i]\n b.name = block_name[i]\n b.point = block_pt[i]\n b.layer = block_layer[i]\n b.phase = find_phase(b.layer)\n b.zones = z\n b.x = b.point[0]\n b.y = b.point[1]\n b.z = b.point[2]\n b.floor = find_floor(str(z))\n b.elevation = find_elevation(str(z))\n b.swing_drop = get_drop(str(z))\n b.priority = find_priority(str(z))\n b.access = get_access(str(z))\n b.survey = get_survey(str(z))\n\n # populate list of blocks\n blocks.append(b)\n\n # turn the above list to a pandas dataframe\n df = pd.DataFrame([vars(f) for f in blocks])\n\n # append a columns to df to track drop sort order\n df[\"drop_sort\"] = df.swing_drop.apply(lambda x: nat_sort(x, df))\n\n # further specify dataframe sort order\n df = df.sort_values([\"access\", \"survey\", \"elevation\", \"floor\", \"drop_sort\", \"y\", \"x\"],\n ascending=[False, True, True, False, True, False, True])\n df.reset_index(inplace=True, drop=True)\n df[\"new_order\"] = df.index + 1\n\n # append columns\n df[\"instance\"] = df.groupby(\"name\").cumcount() + 1\n df[\"sample\"] = 0\n df.loc[df.instance == 1, \"sample\"] = 1\n\n # create the survey name/bumper sticker name\n df[\"survey_name\"] = df.swing_drop + \"-\" + df.floor.map(str) + \"-\" + df.name + \"[\" + df.new_order.map(str) + \"]\"\n # df[\"survey_name\"] = f\"{df.swing_drop}-{df.floor.map(str)-{df.name}[{df.new_order.map(str)}]}\"\n return df",
"def get_attributes(units, properties=[\"p_set\", \"q_set\"]):\n df = pd.DataFrame()\n for unit in units.items():\n for prop in properties:\n df.at[unit[0], prop] = getattr(unit[1], prop)\n return df",
"def to_dataframe(self, attrs_as_columns=False):\n\n # Set up empty dict for dataframe\n ds = {}\n\n # Add every key containing a list into the dict\n keys = [k for k in self.dict.keys()]\n for key in keys:\n if isinstance(self.dict[key], list):\n ds[key] = self.dict[key]\n else:\n if attrs_as_columns:\n ds[key] = self.dict[key]\n\n # Convert entire dict to a DataFrame\n ds = pd.DataFrame(ds)\n\n # Return dataset\n return ds",
"def _make_body_cells_df(body_cells_response):\n body_cells_df = util.make_dataframe(body_cells_response)\n if not \"attributes.type\" in body_cells_df.columns.to_list():\n body_cells_df[\"attributes.type\"] = None\n body_cells_df[\"attributes.text\"] = None\n body_cells_df = body_cells_df[\n [\"text\", \"column_index_begin\", \"column_index_end\", \"row_index_begin\", \"row_index_end\", \"cell_id\",\n \"column_header_ids\", \"column_header_texts\", \"row_header_ids\", \"row_header_texts\",\n \"attributes.text\", \"attributes.type\"]]\n return body_cells_df",
"def construct_df():\n iterable = [['approach', 'contact', 'retract', 'pause'], ['force', 'height']]\n index = pd.MultiIndex.from_product(iterable, names=['segment', 'channel'])\n return pd.DataFrame(columns=index)",
"def get_block_stats(stats, centroids):\n stats_columns = [\"left\", \"top\", \"width\", \"height\", \"area\"]\n block_stats = pd.DataFrame(stats, columns=stats_columns)\n block_stats[\"centroid_x\"], block_stats[\"centroid_y\"] = centroids[:,\n 0], centroids[:, 1]\n # Ignore the label 0 since it is the background\n block_stats.drop(0, inplace=True)\n return block_stats",
"def create_sample_dataframe():\n ax_readings = []\n ay_readings = []\n az_readings = []\n mx_readings = []\n my_readings = []\n mz_readings = []\n gx_readings = []\n gy_readings = []\n gz_readings = []\n activity_list = [LABELS_NAMES[0] for _ in range(SEGMENT_TIME_SIZE)]\n\n\n for _ in range(SEGMENT_TIME_SIZE):\n ax_readings.append(random.uniform(-10,10))\n ay_readings.append(random.uniform(-10,10))\n az_readings.append(random.uniform(-10,10))\n mx_readings.append(random.uniform(-10,10))\n my_readings.append(random.uniform(-10,10))\n mz_readings.append(random.uniform(-10,10))\n gx_readings.append(random.uniform(-10,10))\n gy_readings.append(random.uniform(-10,10))\n gz_readings.append(random.uniform(-10,10))\n\n data_dict = {\n COLUMN_NAMES[0]: activity_list, COLUMN_NAMES[1]: ax_readings,\n COLUMN_NAMES[2]: ay_readings, COLUMN_NAMES[3]: az_readings,\n COLUMN_NAMES[4]: gx_readings, COLUMN_NAMES[5]: gy_readings,\n COLUMN_NAMES[6]: gz_readings, COLUMN_NAMES[7]: mx_readings,\n COLUMN_NAMES[8]: my_readings, COLUMN_NAMES[9]: mz_readings\n }\n\n df = pd.DataFrame(data=data_dict)\n return df",
"def construct_data_frame(self) -> pd.DataFrame:\n data_frame = self.base_data_frame[\n [self.name_col, self.description_col]\n ].reset_index()\n data_frame.columns = [\"label_encoder\", \"name\", \"description\"]\n\n return data_frame.set_index(\"label_encoder\")",
"def _build_rows_struct(self):\n struct = []\n # Step 1 - See docstring for details\n for row in itertools.izip_longest(*self.columns, fillvalue=(\"\",)):\n # Step 2\n row_list = [dict(itertools.izip((\"v\",\"f\",\"p\"), item))\n for item in row]\n # Step 3\n struct.append({\n \"c\": row_list\n })\n return struct",
"def make_frames(blocks, level):\n\tassert level in ('word', 'sentence', 'block'), \\\n\t'level parameter must be one of: \\'word\\', \\'sentence\\', \\'block\\''\n\t\n\tif level == 'block':\n\t\treturn pd.DataFrame(blocks, columns = ['block',])\n\telse:\n\t\tframes = []\n\t\tfor b in blocks:\n\t\t\tblob = tb.TextBlob(b.decode('utf-8','ignore'))\n\t\t\tif level == 'sentence':\n\t\t\t\tsentences = [str(i) for i in blob.sentences]\n\t\t\t\tframes.append(pd.DataFrame(sentences, columns = ['sentence']))\n\t\t\telse:\n\t\t\t\twords = list(blob.words)\n\t\t\t\tframes.append(pd.DataFrame(words, columns = ['word']))\n\t\treturn frames",
"def df(self) -> \"pandas.DataFrame\":\n titles = []\n comments = []\n alternative_codes = []\n for cat in self.values():\n titles.append(cat.title)\n comments.append(cat.comment)\n alternative_codes.append(cat.codes[1:])\n return pandas.DataFrame(\n index=list(self.keys()),\n data={\n \"title\": titles,\n \"comment\": comments,\n \"alternative_codes\": alternative_codes,\n },\n )",
"def create_frames_from_data(self, data, blocks):\n item_dict = {}\n for (item_name, data) in data.items():\n item_dict[item_name] = MeasurementFrame(\n name=data['name'],\n pixel_pose_x=data['pixel_pose_x'],\n pixel_pose_y=data['pixel_pose_y'],\n pixel_pose_theta=data['pixel_pose_theta'],\n block=blocks.get(data['block']))\n item_dict[item_name].save()\n return item_dict",
"def data_frame(records: list) -> pandas.DataFrame:\n return pandas.DataFrame(records, columns=lciafmt_cols)",
"def _createModelFromData(self, headers_blocks, data_blocks):\n if self._project.experimentsCount() > 1:\n print(\"Currently, only 1 measured datablock is supported. Given: \", self._project.experimentsCount())\n experiment_id = self._project.experimentsIds()[0] # only 1st measured datablock is currently taken into account\n headers, data = headers_blocks[experiment_id], data_blocks[experiment_id]\n row_count = len(data)\n column_count = len(data[0])\n # set headers\n headerModel = QStandardItemModel(1, column_count)\n for column_index in range(column_count):\n index = headerModel.index(0, column_index)\n value = headers[column_index]\n headerModel.setData(index, value, Qt.DisplayRole) #Qt.WhatsThisRole\n # set model data\n dataModel = QStandardItemModel(row_count, column_count)\n for row_index in range(row_count):\n for column_index in range(column_count):\n index = dataModel.index(row_index, column_index)\n value = data[row_index][column_index]\n dataModel.setData(index, value, Qt.DisplayRole)\n return headerModel, dataModel",
"def _inv_dict_list_to_dataframe(inv: list) -> pd.DataFrame:\n columns = ['item_id', 'color_id', 'name', 'itemtype', 'category_id', 'quantity', ]\n df = pd.DataFrame(inv, columns=columns)\n # Create the element_id column and set as index\n df['element_id'] = df[['item_id', 'color_id']].apply(tuple, axis=1)\n df.set_index('element_id', inplace=True)\n return df",
"def so_attributes(self):\n try:\n self.channels\n except AttributeError:\n # create if doesn't exist\n self.channels = [x[0] for x in self.data.columns]\n \n dfs = ['sofiltEEG', 'spsofiltEEG']\n [setattr(self, df, pd.DataFrame(index=self.data.index)) for df in dfs]\n self.so_events = {}\n self.so_rejects = {}",
"def request_df(request_list, row_template, col_headers):\n \n rows = [row for request_rows in request_list for row in request_rows.rows]\n outer = []\n \n for i in range(len(rows)):\n inner = []\n \n for j in range(len(row_template)):\n \n inner.append(getattr(rows[i].duals[j], row_template[j]))\n \n outer.append(inner)\n \n return pd.DataFrame(outer, columns=col_headers)",
"def build_dataframe() -> pd.DataFrame:\n df = pd.DataFrame(\n np.random.randint(0, 1000, size=(1000, 6)), columns=list(\"ABCDEF\")\n )\n\n return df",
"def parse_ether(n_blocks):\n df = pd.DataFrame()\n page_number_max = int(n_blocks / 25) # 25 block per page\n for dd in range(1, page_number_max + 1):\n cols, temp, time = parse_one_day_ether(dd)\n temp.columns = cols.values\n temp['Age'] = time\n df = pd.concat([df, temp])\n # Remove duplicates, Ethereum blocks can be faster than scrapping\n df = df.drop_duplicates(df.columns[0])\n df = df.reset_index(drop=True)\n # Cleaning\n df[df.columns[8]] = df[df.columns[8]].apply(lambda x: float(x[:x.find(' E')]))\n df[df.columns[7]] = df[df.columns[7]].apply(\n lambda x: float(x[:x.find(' G')].replace(',', '')) if not '-' in x else 0)\n df[df.columns[2]] = df[df.columns[2]].apply(float)\n df = df.rename(columns={'txn': columns[1],\n 'Age': columns[4]})\n df[columns[3]] = df['Reward'] - 3\n df[columns[2]] = df[columns[3]] / df[columns[1]]\n df[columns[4]] = pd.to_datetime(df[columns[4]], format=\"%b-%d-%Y %I:%M:%S %p\")\n return df",
"def cif_df(cif_object) -> DataFrame:\n if cif_object is None:\n return DataFrame()\n row_list = cif_object.row_list\n attr_list = cif_object.attribute_list\n return DataFrame(data=row_list, columns=attr_list)",
"def make_dataset(self, df, **kwargs):\n\t\treturn df",
"def get_abu_musa_dataset():\n ds = AttrDict()\n classes = [\n '__background__', \n 'Underground Shelter',\n 'Communication Tower',\n 'Dense Structures',\n 'Vehicle',\n 'Cargo container',\n 'Ship',\n 'Swimming pool',\n 'Sports field',\n 'Storage Tank',\n 'Standalone Building',\n 'Defensive Earthworks']\n \n ds.classes = {i: name for i, name in enumerate(classes)}\n return ds",
"def generate_database_from_metadatas(metadata_dict, stimulus_dict):\n # Create empty database from template class\n indexes = sorted(metadata_dict.keys())\n database = pd.DataFrame(index=indexes, columns=['Number', 'Metadata', 'Tracking', 'Registration', 'Stimuli'])\n\n # Fill in metadata from the dictionary\n for sessname, metadata in sorted(metadata_dict.items()):\n database['Metadata'][sessname] = metadata\n database['Number'][sessname] = metadata['number']\n\n for sessname, stimulus in sorted(stimulus_dict.items()):\n database['Stimuli'][sessname] = stimulus\n\n print(colored('Database initialized.','yellow'))\n return database",
"def create_regressor_attributes(df, attribute, list_of_prev_t_instants) :\n \n list_of_prev_t_instants.sort()\n start = list_of_prev_t_instants[-1] \n end = len(df)\n df['datetime'] = df.index\n df.reset_index(drop=True)\n\n df_copy = df[start:end]\n df_copy.reset_index(inplace=True, drop=True)\n\n for attribute in attribute :\n foobar = pd.DataFrame()\n\n for prev_t in list_of_prev_t_instants :\n new_col = pd.DataFrame(df[attribute].iloc[(start - prev_t) : (end - prev_t)])\n new_col.reset_index(drop=True, inplace=True)\n new_col.rename(columns={attribute : '{}_(t-{})'.format(attribute, prev_t)}, inplace=True)\n foobar = pd.concat([foobar, new_col], sort=False, axis=1)\n\n df_copy = pd.concat([df_copy, foobar], sort=False, axis=1)\n \n df_copy.set_index(['datetime'], drop=True, inplace=True)\n return df_copy",
"def to_frame(self) -> pd.DataFrame:\n df = pd.DataFrame(data={\n 'Name': [p.name for p in self],\n 'Description': [p.desc for p in self],\n 'Value': [p.value for p in self],\n 'Hyper-Space': [p.hyper_space for p in self]\n }, columns=['Name', 'Description', 'Value', 'Hyper-Space'])\n return df",
"def transactions_df():\n return pd.DataFrame(\n {\n \"user_id\": [1, 1, 1, 2, 2, 2, 3, 3, 3],\n \"item_id\": [11, 22, 22, 11, 22, 33, 33, 33, 44],\n \"amount\": [10, 20, 30, 40, 50, 60, 70, 80, 90],\n }\n )",
"def make_label_sets(df: pd.DataFrame) -> LabelSets:\n dominant_pore = LabelSet(label_map={'IP': 0, 'VUG': 1, 'MO': 2, 'IX': 3, 'WF': 4, 'WP': 4, 'WF-WP': 4},\n class_names=['IP', 'VUG', 'MO', 'IX', 'WF-WP'],\n sample_labels={idx: label for idx, label in\n df[['Sample', 'Macro_Dominant_type']].values})\n\n dunham = LabelSet(label_map={'rDol': 0, 'B': 1, 'FL': 2, 'G': 3, 'G-P': 4, 'P': 5, 'P-G': 4, 'G-P,P-G': 4},\n class_names=['rDol', 'B', 'FL', 'G', 'G-P,P-G', 'P'],\n sample_labels={idx: label for idx, label in df[['Sample', 'Dunham_class']].values})\n\n lucia = LabelSet(label_map={'0': 0, '1': 1, '2': 2},\n class_names=['0', '1', '2'],\n sample_labels={idx: label for idx, label in df[['Sample', 'Lucia_class']].values})\n return LabelSets(sets={'DominantPore': dominant_pore,\n 'Dunham': dunham,\n 'Lucia': lucia})",
"def get_df(self):\n data = self.load_data()\n userID, itemID = self.get_user_and_item_ids(data)\n rating = data[:, 1]\n data_np = np.stack((userID, itemID, rating), axis=-1)\n df = pd.DataFrame(data_np)\n df.columns = [\"userID\", \"itemID\", \"rating\"]\n return df",
"def taxa_data_frame(self):\n cols = list(self._taxa.keys())\n cols.remove(\"uid\")\n cols.remove(\"object\")\n df = DataFrame(self._taxa, columns=cols, index=self._taxa[\"uid\"])\n df.index.name = \"uid\"\n\n return df",
"def generate_report_data(self):\n if not (self.course_block_structure or self.course_blocks):\n return []\n\n time_on_asset_column_name = getattr(\n settings,\n 'OPR_GOOGLE_BIGQUERY_TIME_ON_ASSET_DAILY_COLUMN_NAME',\n '',\n )\n bigquery_data = list(self.get_google_bigquery_data())\n\n if not bigquery_data:\n return []\n\n course_blocks = list(self.course_blocks)\n user_data = []\n\n for user in self.users:\n user_course_cohort = get_course_cohort(user=user, course_key=self.course_key)\n user_course_teams = get_course_teams(membership__user=user, course_id=self.course_key)\n block_data = []\n chapter_name = ''\n chapter_position = 0\n sequential_name = ''\n sequential_position = 0\n vertical_name = ''\n vertical_position = 0\n\n for course_block in course_blocks:\n bigquery_item = {}\n\n for item_data in bigquery_data:\n if (item_data.get('username', '') == user.username and\n course_block.block_id in item_data.get('module_id', '')):\n bigquery_item = item_data\n break\n\n if course_block.block_type == 'chapter':\n chapter_name = self.course_block_structure.get_xblock_field(\n course_block,\n 'display_name',\n ) or ''\n sequential_position = 0\n chapter_position += 1\n elif course_block.block_type == 'sequential':\n sequential_name = self.course_block_structure.get_xblock_field(\n course_block,\n 'display_name',\n ) or ''\n sequential_position += 1\n elif course_block.block_type == 'vertical':\n vertical_name = self.course_block_structure.get_xblock_field(\n course_block,\n 'display_name',\n ) or ''\n # The vertical position must be only incremental.\n vertical_position += 1\n\n block_data.append({\n 'average_time_spent': bigquery_item.get(\n time_on_asset_column_name,\n 0,\n ) if bigquery_item else 0,\n 'chapter_name': chapter_name,\n 'chapter_position': chapter_position,\n 'sequential_name': sequential_name,\n 'sequential_position': sequential_position,\n 'vertical_name': vertical_name,\n 'vertical_position': vertical_position,\n })\n\n user_data.append({\n 'username': user.username,\n 'user_cohort': user_course_cohort.name if user_course_cohort else '',\n 'user_teams': user_course_teams[0].name if user_course_teams else '',\n 'blocks': block_data,\n })\n\n return user_data"
] |
[
"0.6213763",
"0.60232276",
"0.5933897",
"0.58126754",
"0.57580143",
"0.5599309",
"0.5351924",
"0.5347785",
"0.53098786",
"0.52822083",
"0.52670205",
"0.5263557",
"0.52503467",
"0.52472496",
"0.52292097",
"0.52067655",
"0.51953876",
"0.518646",
"0.51731384",
"0.5167735",
"0.51672274",
"0.5166475",
"0.5159573",
"0.51582396",
"0.51430094",
"0.5142395",
"0.5132176",
"0.51274997",
"0.5127331",
"0.5053275"
] |
0.68994987
|
0
|
Imports jammer info from [jammers.csv] it will perform classification and picture (url) fetching returns a list of Jammer objects with propagated fields. Hardcoded reasoner that thinks the jammer has a ticket, if it comes from the jammers.csv file.
|
def import_jammers(csvfile, fieldnames=None):
parsed_jammers = []
if fieldnames is None:
# Read fieldnames from first line of csvfile.
jammers = csv.DictReader(csvfile)
else:
# Fieldnames provided
# Skip header line/fieldnames line
jammers = csv.DictReader(csvfile, fieldnames)
next(jammers)
for jammer in jammers:
if hasattr(csvfile, "name") and csvfile.name == "jammers.csv":
# These jammers has registered to the jam site.
jammer["ticket"] = True
# Put it in object yo.
jammer = Jammer(**jammer)
parsed_jammers.append(jammer)
return parsed_jammers
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def import_registered_jammers(filename=\"jammerskillz.csv\"):\n\tfrom utils import gf_fieldnames\n\treturn import_jammers(filename, fieldnames=gf_fieldnames())",
"def import_from_file(jamsite, source='jammers.csv', fieldnames=None):\n\t# import jammers.csv\n\twith open(source) as csvfile:\n\t\tjamsite.mergeinsert( import_jammers(csvfile, fieldnames=fieldnames) )",
"def import_from_url(jamsite, url, fieldnames=None):\n\t# import csv, from the webz.\n\tcsvfile = fetch_csv_from_url(url)\n\tjamsite.mergeinsert( import_jammers(csvfile, fieldnames=fieldnames) )",
"def load_campers():\n\n for row in open(\"static/campers.csv\"):\n row = row.rstrip()\n\n email, password, first_name, last_name, camper_photo, camper_photo_url = row.split(\",\")\n\n camper = Camper(\n camper_email=email,\n password=password,\n first_name=first_name,\n last_name=last_name,\n camper_photo=camper_photo,\n camper_photo_url=camper_photo_url)\n\n db.session.add(camper)\n\n db.session.commit()",
"def load_meetings():\n\n print \"Importing meetings...\"\n\n # Delete all rows in table, so if we need to run this a second time,\n # we won't be trying to add duplicate retailers\n Meeting.query.delete()\n\n # Read CSV file\n with open(\"seed_data/meetings.csv\") as source_file:\n example_data = list(csv.reader(source_file))\n\n # skip header row for populating db\n for list_item in example_data[1:]:\n meeting = Meeting(meeting_title=list_item[1],\n meeting_time=list_item[2],\n attendees=list_item[3],\n length=list_item[4],\n topic_id=list_item[5])\n\n # Add the current retailer to the session\n db.session.add(meeting)\n\n # Commit the db.session changes to the database\n db.session.commit()",
"def add_talks_from_csv(self, fname):\r\n plugin = self.plugman.get_plugin_by_name(\"CSV Importer\", \"Importer\")\r\n importer = plugin.plugin_object\r\n presentations = importer.get_presentations(fname)\r\n\r\n if presentations:\r\n for presentation in presentations:\r\n talk = Presentation(presentation[\"Title\"],\r\n presentation[\"Speaker\"],\r\n presentation[\"Abstract\"], # Description\r\n presentation[\"Level\"],\r\n presentation[\"Event\"],\r\n presentation[\"Room\"],\r\n presentation[\"Time\"],\r\n presentation[\"Time\"])\r\n self.insert_presentation(talk)\r\n\r\n else:\r\n log.info(\"CSV: No data found.\")",
"def process(self):\n parser = csv.reader(self.reader,delimiter=self.delimiter_DIC[self.delim])\n firstRec = True\n for fields in parser:\n if firstRec:\n fieldNames = fields\n firstRec = False\n else:\n self.dicts.append({})\n for i,f in enumerate(fields):\n try:\n self.dicts[-1][fieldNames[i]] = f\n except:\n import pdb\n pdb.set_trace()\n if self.eng is \"spectrumMill\":\n for i,row in enumerate(self.dicts):\n fileSM = row[self.engine[self.eng][0]]\n acNoSM = row[self.engine[self.eng][1]]\n masSM=row[self.engine[self.eng][2]]\n chrgeSM=row[self.engine[self.eng][3]]\n preAmSM=row[self.engine[self.eng][4]].replace('(','').replace(')','')\n pepSM=row[self.engine[self.eng][5]]\n nAmSM=row[self.engine[self.eng][6]].replace('(','').replace(')','')\n modSM=row[self.engine[self.eng][7]].split('\\s')+row[self.engine[self.eng][8]].split('\\s')\n modLis = [mod.strip() for mod in modSM if mod!=' ']\n modSM = ';'.join(modLis)\n scoreSM=row[self.engine[self.eng][9]]\n descrimentSM=row[self.engine[self.eng][10]]\n if modSM !='':\n modPepInHupaFormat=self.modTermDic.spectrumMill(preAmSM,pepSM,nAmSM,modSM,self.eng)\n parsedData=acNoSM+'\\t'+masSM+'\\t'+chrgeSM+'\\t'+modPepInHupaFormat+'\\t'+scoreSM+'\\n'\n data = self.mapCaller(parsedData,self.eng)\n #print >>self.writer,data\n else:\n parsedData=acNoSM+'\\t'+masSM+'\\t'+chrgeSM+'\\t'+preAmSM+'.'+pepSM+'.'+nAmSM+'\\t'+'-'+'\\t'+scoreSM+'\\n'\n data = self.mapCaller(parsedData,self.eng)\n #print >>self.writer,data\n\n if self.eng is \"mascot\":\n \"\"\"\n In Mascot, under every gi (protein) corresponding peptide information will be there\n \"\"\"\n giFound=True\n for i,row in enumerate(self.dicts):\n if row[self.engine[self.eng][0]]!='':\n giAsKey = row[self.engine[self.eng][0]]\n giFound=False\n if giFound==False:\n massM=row[self.engine[self.eng][1]]\n chargeM=row[self.engine[self.eng][2]]\n preAmM=row[self.engine[self.eng][3]]\n pepM=row[self.engine[self.eng][4]]\n nAmM = row[self.engine[self.eng][5]]\n modM=row[self.engine[self.eng][6]]\n modSiteM=row[self.engine[self.eng][7]]\n scoreM=row[self.engine[self.eng][8]]\n evalM=row[self.engine[self.eng][9]]\n if modM !='':\n \"\"\"\n modificationFormat from modification.py creates a MASTER_UNIMOD dictionary \n Where all modifications of unimod would be available. \n At same time formatMod function in modificationFormat class would convert modification format \"\"\"\n modPepInHupaFormat=self.modTermDic.mascot(preAmM,pepM,nAmM,modSiteM,modM,self.eng)\n parsedData=giAsKey+'\\t'+massM+'\\t'+chargeM+'\\t'+modPepInHupaFormat+'\\t'+scoreM+'\\n'\n data=self.mapCaller(parsedData,self.eng)\n #print >>self.writer,data\n else:\n \n parsedData=giAsKey+'\\t'+massM+'\\t'+chargeM+'\\t'+preAmM+'.'+pepM+'.'+nAmM+'\\t'+'-'+'\\t'+scoreM+'\\n'\n data=self.mapCaller(parsedData,self.eng)\n #print >>self.writer,data\n\n if self.eng is \"inspect\":\n \"\"\"\n InSpect does not have mass information in TSV file\n So we need to fetch it from spectrum file (this is not yet done)\n \"\"\"\n for i,row in enumerate(self.dicts):\n data = row[self.engine[self.eng][0]]+'\\t'+row[self.engine[self.eng][1]]+'\\t'+row[self.engine[self.eng][2]]+'\\t'+row[self.engine[self.eng][3]]+'\\t'+row[self.engine[self.eng][4]]+'\\t'+row[self.engine[self.eng][5]]+'\\n'\n #return data\n #data = self.mapCaller(data)\n #self.writer.write(data)\n\n if self.eng is \"omssa\":\n #OMSSA csv doesnot contain start and last residue of the peptide, instead contains position. So start and last residue need to fetch from protein sequence\n for i,row in enumerate(self.dicts):\n giO = row[self.engine[self.eng][6]]\n massO= row[self.engine[self.eng][5]]\n chargeO=row[self.engine[self.eng][8]]\n preAmO = row[self.engine[self.eng][1]]#position in protein\n pepO = row[self.engine[self.eng][2]]\n nextAmO = row[self.engine[self.eng][3]] #position in protein\n scoreO= row[self.engine[self.eng][4]]\n modO=row[self.engine[self.eng][7]]\n if modO !='':\n #parsedData=giO+'#'+massO+'#'+chargeO+'#'+preAmO+'.'+pepO+'.'+nextAmO+'#'+modO+'#'+scoreO\n modPepInHupaFormat=self.modTermDic.omssa(preAmO,pepO,nextAmO,modO,self.eng)\n parsedData=giO+'\\t'+massO+'\\t'+chargeO+'\\t'+modPepInHupaFormat+'\\t'+scoreO+'\\n'\n self.mapCaller(parsedData,self.eng)\n else:\n parsedData=giO+'\\t'+massO+'\\t'+chargeO+'\\t'+preAmO+'.'+pepO+'.'+nextAmO+'\\t'+'-'+'\\t'+scoreO+'\\n'\n self.mapCaller(parsedData,self.eng)",
"def main():\n\n csv_file = \"shortlist.csv\"\n team_count = 0\n participant_count = 0\n\n\n #Delete all existing teams and participants from the database.\n Team.objects.all().delete()\n Participant.objects.all().delete()\n\n with open(csv_file) as f:\n reader = csv.reader(f)\n data = [row for row in reader]\n\n for item in data:\n if item[0]:\n team_count += 1\n\n t = Team.objects.create(\n name=item[0].strip(),\n idea=item[30].strip()\n )\n\n no_of_p = int(item[1])\n print item[1]\n participant_count += no_of_p\n\n p1 = Participant.objects.create(\n participant_id=get_pid(),\n name=item[2].strip() + \" \" + item[3].strip(),\n gender=item[4].strip(),\n college=item[7].strip(),\n email=item[5].strip(),\n phone=str(item[6]),\n team=t\n )\n\n p2 = Participant.objects.create(\n participant_id=get_pid(),\n name=item[11].strip() + \" \" +item[12].strip(),\n gender=item[13].strip(),\n college=item[16].strip(),\n email=item[14].strip(),\n phone=str(item[15]),\n team=t\n )\n\n if no_of_p == 3:\n p3 = Participant.objects.create(\n participant_id=get_pid(),\n name=item[20].strip() + \" \" +item[21].strip(),\n college=item[25].strip(),\n gender=item[22].strip(),\n email=item[23].strip(),\n phone=str(item[24]),\n team=t\n )\n\n print \"{} teams and {} participants imported.\".format(team_count,\n participant_count)",
"def getRatingsFromJudge(self, judge):\n\n judgesExcelLogger.info(\"getRatingsFromJudge: Attempting to get ratings from Judge '%s'\", judge)\n try:\n fileToUse = self.judgeToFileName[judge]\n os.chdir(self.path) # Change to set's directory context\n judgeRatings = []\n with open(fileToUse, encoding=\"utf-8-sig\") as judgeFile:\n for line in judgeFile:\n if line.startswith('['):\n parsedRating = self.getSimpleRating(line)\n judgeRatings.append(parsedRating)\n judgeFile.close()\n # print(judgeRatings)\n return judgeRatings\n except:\n judgesExcelLogger.warning(\"getRatingsFromJudge: {0}: {1}\".format(sys.exc_info()[0].__name__,\n str(sys.exc_info()[1])))",
"def fetch_data():\n for category in CHEATSHEETS.items():\n subprocess.call(f'curl -o {PATH}{category[0] + \".csv\"} {category[1]}', shell=True)\n\n index = -1\n for filename in os.listdir(PATH):\n for idx, row in pd.read_csv(PATH + filename, on_bad_lines='skip').replace(np.nan, '').iterrows():\n name = row['Model']\n url = REDIRECT_URL + name.lower()\n category = filename.split('.')[0]\n featurizers = row['Acceptable Featurizers'].split(' ') if row['Acceptable Featurizers'] != '' else []\n backends = ['PyTorch' if item in {\"PTorch\", \"Torch\", \"PyTorch \"} else item for item in row['Backend'].split('/')]\n types = row['Type'] if filename != 'general.csv' else row['Classifier/Regressor']\n types = types.split('/') if filename == 'material.csv' else types.split('/ ')\n index += 1\n\n backend_list.append(backends)\n type_list.append(types)\n featurizer_list.append(featurizers)\n model_list.append(Model(name, url, category, featurizers, backends, types, index))",
"def getJudgeRatings(self):\n judgeNotesLogger.info(\"getJudgeRatings: Parsing Judge Notes File\")\n try:\n os.chdir(self.fileDir) # Change to batch directory context\n with open(self.notesFile, encoding=\"utf-8-sig\") as judgeFile:\n for line in judgeFile:\n if line.startswith('['):\n self.getRatingWithInfo(line)\n judgeFile.close()\n self.numJudgedFiles = len(self.judgedSongList)\n except:\n judgeNotesLogger.warning(\"getJudgeRatings: {0}: {1}\".format(sys.exc_info()[0].__name__,\n str(sys.exc_info()[1])))",
"def load_ratings():\n res = {}\n with open(RATINGS_PATH, newline='', encoding=RATINGS_ENCRYPTION) as csvfile:\n spamreader = csv.reader(csvfile)\n for i, row in enumerate(spamreader):\n if i:\n title = row[3]\n res[title] = imdbData(row)\n return res",
"def extractionTitlePrincipals(cur, conn):\n fh = open(pathTitlePrincipals)\n reader = csv.reader(fh, delimiter = '\\t')\n firstLine = True\n idActor_list = []\n idJugar = 1\n for row in reader:\n if firstLine : firstLine = False # Read header\n else :\n if (row[3]=='actor' or row[3]=='actress'): #only record actors\n idTitulo = int(row[0][2:])\n idActor = int(row[2][2:])\n idActor_list.append(idActor)\n idJugar +=1\n # print(jugarInsert.format(idJugar, idTitulo, idActor))\n # REGISTER DATA IN JUGAR TABLE\n cur.execute(jugarInsert.format(idJugar, idTitulo, idActor))\n conn.commit()\n return idActor_list",
"def load_ratings():\n\n print \"Importing ratings...\"\n\n # Delete all rows in table, so if we need to run this a second time,\n # we won't be trying to add duplicate retailers\n Rating.query.delete()\n\n # Read CSV file\n with open(\"seed_data/ratings.csv\") as source_file:\n example_data = list(csv.reader(source_file))\n\n # skip header row for populating db\n for list_item in example_data[1:]:\n rating = Rating(meeting_id=list_item[1],\n score=list_item[2])\n\n # Add the current retailer to the session\n db.session.add(rating)\n\n # Commit the db.session changes to the database\n db.session.commit()",
"def load(cls):\n\n # Loop through meds and build patient med lists:\n meds = csv.reader(open(MEDS_FILE,'U'),dialect='excel-tab')\n header = next(meds)\n for med in meds:\n cls(dict(zip(header,med))) # Create a med instance (saved in Med.meds)",
"def load_lenders():\n\n for row in open(\"static/lenders.csv\"):\n row = row.rstrip()\n email, password, first_name, last_name, lender_photo, lender_photo_url = row.split(\",\")\n\n lender = Lender(\n lender_email=email,\n password=password,\n first_name=first_name,\n last_name=last_name,\n lender_photo=lender_photo,\n lender_photo_url=lender_photo_url)\n\n db.session.add(lender)\n\n db.session.commit()",
"def candidates_import_from_sample_file():\n # Load saved json from local file\n logger.info(\"Loading CandidateCampaigns from local file\")\n\n with open(\"candidate/import_data/candidates_sample.json\") as json_data:\n structured_json = json.load(json_data)\n\n return candidates_import_from_structured_json(structured_json)",
"def candidates_import_from_sample_file():\n # Load saved json from local file\n logger.info(\"Loading CandidateCampaigns from local file\")\n\n with open(\"candidate/import_data/candidate_campaigns_sample.json\") as json_data:\n structured_json = json.load(json_data)\n\n return candidates_import_from_structured_json(structured_json)",
"def import_party_results(election, file, mimetype):\n\n errors = []\n parties = {}\n party_results = {}\n party_totals = {}\n panachage_results = {}\n panachage_headers = None\n\n # The party results file has one party per year per line (but only\n # panachage results in the year of the election)\n if file and mimetype:\n csv, error = load_csv(\n file, mimetype, expected_headers=ELECTION_PARTY_HEADERS)\n if error:\n errors.append(error)\n else:\n panachage_headers = parse_panachage_headers(csv)\n for line in csv.lines:\n line_errors = []\n parse_party_result(\n line, line_errors,\n party_results, party_totals, parties,\n election.date.year\n )\n parse_panachage_results(\n line, line_errors,\n panachage_results, panachage_headers,\n election.date.year\n )\n if line_errors:\n errors.extend(\n FileImportError(error=err, line=line.rownumber)\n for err in line_errors\n )\n\n if panachage_headers:\n for list_id in panachage_headers.values():\n if not list_id == '999' and list_id not in parties.keys():\n errors.append(FileImportError(\n _(\"Panachage results ids and id not consistent\")))\n break\n\n if errors:\n return errors\n\n session = object_session(election)\n for result in election.party_results:\n session.delete(result)\n for result in election.panachage_results:\n session.delete(result)\n\n for result in party_results.values():\n election.party_results.append(result)\n\n for target in panachage_results:\n if target in parties:\n for source, votes in panachage_results[target].items():\n if source in parties or source == '999':\n election.panachage_results.append(\n PanachageResult(\n owner=election.id,\n id=uuid4(),\n source=parties.get(source, ''),\n target=parties[target],\n votes=votes\n )\n )\n\n return",
"def import_captains(input_csv=\"../2012_ROOMS_Captain_email_sample.csv\"):\n reader = csv.DictReader(open(input_csv))\n for s in reader:\n def clean_s(k):\n return s[k].replace('\\n', ' ').replace('\\xe2', \"'\").replace('\\x80', \"'\").replace('\\x99', '').replace('\\xc3', '').replace('\\x95', '').encode('ascii', 'replace')\n\n key = s.get('key')\n email = clean_s(\"Email\")\n rooms_id = clean_s(\"ROOMS Captain ID\")\n # name = \"%s %s\" % (clean_s(\"First Name\"),\n # clean_s(\"Last Name\"))\n name = clean_s(\"Name\")\n captain = None\n if key:\n captain = models.Captain.get_by_id(int(key))\n if captain:\n logging.info('got captain from key %s', key)\n if not captain:\n captain = models.Captain.all().filter('rooms_id =', rooms_id).get()\n if captain:\n logging.info('got captain from rooms_id %s', rooms_id)\n if not captain:\n captain = models.Captain.all().filter('email =', email).get()\n if captain:\n logging.info('got captain from email %s', email)\n if not captain:\n logging.info('creating captain key %s name %s email %s rooms_id %s',\n key, name, email, rooms_id)\n captain = models.Captain(name=name, email=email, rooms_id=rooms_id)\n\n # Over-write these values, assume volunteer database is more up to\n # date.\n captain.name = name\n captain.email = email\n captain.rooms_id = rooms_id\n # captain.phone1 = clean_s(\"Preferred Phone\") or None\n # captain.phone_mobile = clean_s(\"Phone mobile\")\n # captain.phone_work = clean_s(\"Phone work\")\n # captain.phone_home = clean_s(\"Phone home\")\n # captain.phone_fax = clean_s(\"Phones Fax::number\")\n # captain.phone_other = clean_s(\"Phones Other::number\")\n captain.put()\n\n number = s[\"Site ID\"]\n site = models.NewSite.all().filter('number =', number).get()\n if not site:\n logging.error('site %s does not exist, skipping', number)\n continue\n\n # In input type is like \"Volunteer Captain\" but in model it's\n # \"Volunteer\"\n input_type = s[\"Captain Type\"]\n for t in models.SiteCaptain.type.choices:\n if t in input_type:\n break\n\n query = models.SiteCaptain.all()\n query.filter('site =', site).filter('captain =', captain)\n sitecaptain = query.get()\n if sitecaptain is None:\n logging.info('Creating new SiteCaptain mapping %s to %s',\n site.number, captain.name)\n sitecaptain = models.SiteCaptain(site=site, captain=captain, type=t)\n else:\n logging.info('Found existing SiteCaptain')\n sitecaptain.type = t\n sitecaptain.put()",
"def csvObj():\n CSV_URL = \"http://unitedstates.sunlightfoundation.com/legislators/legislators.csv\"\n s = requests.get(CSV_URL) # Download the csv using requests.\n reader = csv.DictReader(s.text.splitlines(), lineterminator=\"\\n\") # Use the dictreader to make a dictionary with the attribute name paired with the rows value for that attribute.\n name2twitter_id = {}\n for row in reader:\n if (row['in_office'] == \"1\" and row['twitter_id'] != \"\"):\n name = row['firstname'] + \" \" # Construct the name.\n if (row['middlename'] != \"\"): # Not all names have middle names.\n name += row['middlename'] + \" \"\n name += row['lastname']\n name2twitter_id[name] = row['twitter_id'] # Assign the name to their handle.\n del name2twitter_id[\"Tim Murphy\"] # This representative does not have an active twitter handle. \n name2twitter_id[\"Gregory W. Meeks\"] = \"RepGregoryMeeks\" # Insert this representatives twitter handle manually.\n return name2twitter_id",
"def parse(file_name, user, agenda_type, db):\n db.execute('''select section from roll where onyen=%(onyen)s''', dict(onyen=user))\n row = db.fetchone()\n section = None if row is None else row.section\n\n # Get Recitation zoom\n db.execute(\"\"\"select url from zoom where type='recitation'\"\"\")\n row = db.fetchone()\n recitation_zoom_url = row.url if row else None\n\n # Get lecture zoom\n lecture_zoom_urls = []\n if section in ['001', '003']:\n db.execute(\"\"\"select url from zoom where type='lecture' and section='001'\"\"\")\n lecture_zoom_urls.append(db.fetchone().url)\n if section in ['002', '003']:\n db.execute(\"\"\"select url from zoom where type='lecture' and section='002'\"\"\")\n lecture_zoom_urls.append(db.fetchone().url)\n\n # Get checklist information\n checklist_info = get_checklist_info(db, user, agenda_type)\n\n if agenda_type == 'la':\n first_day_of_class = date(2021, 1, 12)\n else:\n first_day_of_class = date(2021, 1, 19)\n last_day_of_classes = date(2021, 5, 5)\n today = date.today()\n with open(file_name, \"rt\") as fp:\n agenda = fp.read().split(\"\\n| \")\n day = first_day_of_class\n result = []\n for one_days_details in agenda:\n lines = one_days_details.split(\"\\n\")\n title = lines[0]\n output_lines = []\n for line in lines[1:]:\n if line.startswith(\"S \"):\n line = slide_line(line)\n elif line.startswith(\"#\"):\n line = comment_line(line, user)\n elif line.startswith(\"Z\"):\n line = zoom_line(line, day, section, lecture_zoom_urls, recitation_zoom_url)\n elif line.startswith(\"CL\"):\n line = checklist_line(line, day, checklist_info)\n output_lines.append(line)\n when = set_when(day, today)\n\n result.append(\n {\"date\": day, \"title\": title, \"when\": when,\n \"body\": renderMarkdown(renderTemplate(\"\\n\".join(output_lines)))})\n day = increment_day(day, last_day_of_classes, result, agenda_type)\n return result",
"def load_explainer(self):\n explainer_path = os.path.join(self.model_path, \"explainer.dill\")\n csv_path = os.path.join(self.model_path, self.__csv_path)\n if os.path.isfile(explainer_path):\n with open(explainer_path, \"rb\") as f:\n self.__explainer = dill.load(f)\n elif os.path.isfile(csv_path):\n print(\"[WARN] Making new explainer!\")\n self.__explainer = make_explainer(\n pd.read_csv(csv_path),\n self.FEATURES\n )\n with open(explainer_path, \"wb\") as f:\n dill.dump(self.__explainer, f)\n else:\n print(\"[WARN] Explainer not found!\")",
"def gene_list_reader():\n \n relPath = \"data/genes_met_modelling_human.csv\"\n \n geneL = []\n with file_importer(relPath, encodeS = \"utf-8-sig\") as inpF:\n for inpLine in inpF:\n inpI = inpLine.strip(\"\\n'\").split(\".\")[0]\n if inpI not in geneL: geneL.append(inpI)\n \n return geneL",
"def load_meds_data(raw=False):\n _df_meds = pd.read_csv('data/meps_meds.csv', index_col=0)\n\n if raw:\n return _df_meds\n\n rxNickname = (_df_meds['rxName']\n .str.upper()\n .str.extract('([A-Z]{4,})', expand=False)\n .str.strip()\n )\n rxNickname.name = 'rxNickname'\n\n f = lambda df: pd.Series({\n 'numPrescriptions': len(df),\n 'originalNDCs': df['rxNDC'].unique()\n })\n\n df_meds = _df_meds.groupby(['id', rxNickname]).apply(f)\n\n assert df_meds.index.is_unique\n \n return df_meds",
"def solution_reader(smart_wijk, results_path = 'Results/best_brabo_solution.csv'):\n with open(results_path, 'r') as f:\n best_reader = csv.reader(f)\n for i, row in enumerate(best_reader):\n\n # for some reason the csv contains empty lists?\n if len(row) == 0:\n continue\n if i == 1:\n # print(row[1])\n row[1] = row[1].replace(\"'\", '\"')\n parsed_data = json.loads(row[1])\n # print(len(row))\n\n best_houses = parsed_data['DATA']\n\n for connectionz in best_houses:\n smart_wijk.connect(connectionz['connected_to'], connectionz['position'])\n\n smart_wijk.prettify()\n print(smart_wijk.calc_cost())\n smart_wijk.cap_left()",
"def load_mentions(sc, file):\n mentions_df = sc.read.csv(config.GDELT_PATH + file, sep=\"\\t\",\n header=False, schema=config.MENTIONS_SCHEMA, mode=\"DROPMALFORMED\")\n mentions_df = mentions_df .select(\"GLOBALEVENTID\", \"EventTimeDate\", \"MentionTimeDate\", \"MentionSourceName\", \"MentionIdentifier\") \\\n .withColumn(\"EventTimeDate\", F.to_timestamp(mentions_df.EventTimeDate, \"yyyyMMddHHmmss\")) \\\n .withColumn(\"MentionTimeDate\", F.to_timestamp(mentions_df.MentionTimeDate, \"yyyyMMddHHmmss\"))\n return mentions_df",
"def _do_action_import_movie_info(self):\n self._run_express_job(\n \"org.kiji.tutorial.load.MovieInfoImporter\",\n options=\"--movie-info ml-100k/u.item\"\n )\n self._scan_table(\"movies\")",
"def load_skills():\n\n Skill.query.delete()\n\n # get all the qualifications text from postings\n postings = db.session.query(Posting.qualifications).all()\n # combine qualifications into a list\n all_skills = []\n with open('filler.txt') as filler:\n del_words = filler.read()\n for post in postings:\n words = post.qualifications.lower().split()\n # iterate through a list of those skills\n for word in words:\n word = word.strip(\"-()/\\,.:;* 1234567890\")\n # check to see if that word isn't in our filler document\n # if not, add it to the table\n if word not in del_words and word not in all_skills:\n all_skills.append(word)\n skill = Skill(skill=word)\n db.session.add(skill)\n db.session.commit()",
"def testMinivan():\n # Assign the csv file from the web to getFile veriable.\n getFile = \"http://icarus.cs.weber.edu/~hvalle/cs3030/data/minivanTest.csv\"\n # open the csv file \n with urlopen(getFile) as testFile:\n # Declare and Store the output in a record list.\n record = []\n # Loop over each line in the testFile\n for line in testFile:\n # Decode the line and replace spaces, strip them and split them with comma.\n lineRows = line.decode('utf-8').replace(\" \", \"\").strip().split(\",\")\n # Add what is in lineRows variable to record list\n record.append(lineRows)\n # Start the counter \n counter = 0\n # Loop over each element in the record list\n for el in record:\n # Check if the counter is greater than 0\n if counter > 0:\n # Print record list.\n print(\"Reading Record {0}:\".format(counter))\n # Call doors_lockers function from minivan.py and assign each el to the corrsponding variable..\n doors_lockers(el)\n # Pring an empty line.\n print()\n # Increase the counter by 1\n counter += 1"
] |
[
"0.6269612",
"0.5713424",
"0.545809",
"0.5050449",
"0.50021917",
"0.49669176",
"0.49323994",
"0.4921784",
"0.48439816",
"0.48355928",
"0.48049712",
"0.47829503",
"0.4769548",
"0.4752347",
"0.47450462",
"0.47308967",
"0.4725612",
"0.46987265",
"0.4671122",
"0.4665979",
"0.46559274",
"0.46463588",
"0.4641364",
"0.4633405",
"0.46074086",
"0.46064344",
"0.45977828",
"0.45941678",
"0.4591899",
"0.45819482"
] |
0.6562913
|
0
|
Fetches the gforms results page as csv from a published csv link. imports it to the jamsite.jammers
|
def import_from_url(jamsite, url, fieldnames=None):
# import csv, from the webz.
csvfile = fetch_csv_from_url(url)
jamsite.mergeinsert( import_jammers(csvfile, fieldnames=fieldnames) )
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def download_report():\n entities = get_names()\n save_csv(entities)",
"def get_csv(request, cur_course_user, assessment_id):\n assessment = shortcuts.get_object_or_404(models.Assessment, pk=assessment_id)\n\n # Create the HttpResponse object with the appropriate CSV header.\n response = http.HttpResponse(content_type='text/csv')\n\n filename = \"%s-scores.csv\" % assessment.name\n # Replace spaces in the assessment name with dashes and convert to lower case\n filename = filename.replace(' ', '-').lower()\n\n response['Content-Disposition'] = 'attachment; filename=\"%s\"' % filename\n\n question_parts = assessment.get_prefetched_question_parts().order_by('-question_number')\n num_questions = assessment.get_num_questions()\n\n fieldnames=['Last Name', 'First Name', 'ID', 'Email', 'Total Score']\n if hasattr(assessment, 'homework'):\n fieldnames.append('Finalized?')\n fieldnames.append('Submission time')\n fieldnames.append('Late days')\n\n for i in range(num_questions):\n fieldnames.append('Question %d' % (i + 1))\n\n writer = csv.DictWriter(response, fieldnames=fieldnames)\n\n submissions = assessment.get_prefetched_submissions().order_by('course_user__user__last_name',\n 'course_user__user__first_name')\n\n writer.writeheader()\n\n for submission in submissions:\n for course_user in submission.group_members.all():\n user = course_user.user\n score = submission.points if submission.graded else 'ungraded'\n\n row = {\n 'Last Name': user.last_name,\n 'First Name': user.first_name,\n 'ID': user.student_id,\n 'Email': user.email,\n 'Total Score': score\n }\n\n if hasattr(assessment, 'homework'):\n cur_timezone = pytz.timezone(assessment.course.get_timezone_string())\n local_time = timezone.localtime(submission.time, timezone=cur_timezone)\n row['Submission time'] = local_time.strftime('%m/%d/%Y %I:%M %p')\n\n diff = submission.time - submission.assessment.homework.soft_deadline\n late_days = diff.total_seconds() / 24.0 / 60.0 / 60.0\n late_days = max(0, math.ceil(late_days))\n row['Late days'] = late_days\n\n row['Finalized?'] = 'Yes' if submission.is_finalized() else 'No'\n\n for i in range(num_questions):\n if submission.is_question_graded(i + 1):\n row['Question %d' % (i + 1)] = submission.get_question_points(i + 1)\n else:\n row['Question %d' % (i + 1)] = 'ungraded'\n writer.writerow(row)\n\n return response",
"def _csv_download(page):\n # gc = gspread.login(page.timetable.google_user, page.timetable.google_passwd)\n gc = googleoauth.authenticate_google_docs()\n csv_file = gc.open('WebValley2019')\n\n # gsession = gss.Client(page.timetable.google_user, page.timetable.google_passwd)\n # ss = gss.Spreadsheet(page.timetable.spreadsheet)\n # csv_file = gsession.download(ss, gid=page.timetable.spreadsheet_gid)\n # read = csv_file.read()\n read = csv_file.worksheet('TIMETABLE').get_all_values()\n # print \"csv\", read\n return read",
"def handlehtmlsearch_csv(querystring, keywordstring, searchlimit, searchname, cache, smartconstrain):\n fulltitle = os.path.join(os.getcwd(), 'results', 'materialsSearchCSV-WC', searchname + 'Full.csv')\n contitle = os.path.join(os.getcwd(), 'results', 'materialsSearchCSV-WC', searchname + 'Condensed.csv')\n\n if wcexists:\n if not os.path.exists(os.path.join(os.getcwd(), 'results', 'materialsSearchCSV-WC', searchname)):\n os.makedirs(os.path.join(os.getcwd(), 'results', 'materialsSearchCSV-WC', searchname))\n\n with open(fulltitle, 'wt') as csvFull, open(contitle, 'wt') as csvCon:\n fwriter = csv.writer(csvFull, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n cwriter = csv.writer(csvCon, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\n keywords, mpsearch, wokresults, keyresults = handlehtmlsearch_wok(querystring, keywordstring, searchlimit,\n cache, smartconstrain)\n\n conheader = ['Material', 'Publications', 'Space Group', 'Calculated Band Gap']\n for n in keywords:\n conheader.append(n)\n cwriter.writerow(conheader)\n\n linenum = 0\n\n for i in range(len(wokresults)):\n searchdata = wokresults[i]\n\n if wcexists:\n wc = searchWoKTools.generateabstractwc(searchdata)\n imgpath = os.path.join(os.getcwd(), 'results', 'materialsSearchCSV-WC', searchname,\n searchdata[0]['pretty_formula'] + '.png')\n wc.to_file(imgpath)\n\n fwriter.writerow([searchdata[0]['pretty_formula'],\n str(searchdata[0]['numResults']) + ' publications',\n str(searchdata[0]['spacegroup']) + ' spacegroup',\n str(searchdata[0]['band_gap']) + ' band gap',\n searchdata[0]['searchURL'],\n '=HYPERLINK(\"' + imgpath + '\",\"Word Cloud\")'])\n linenum += 1\n\n conline = [\n '=HYPERLINK(\"[' + fulltitle + ']' + searchname + 'Full' + '!A' + str(linenum) + '\",\"' +\n searchdata[0]['pretty_formula'] + '\")',\n\n str(searchdata[0]['numResults']),\n str(searchdata[0]['spacegroup']),\n str(searchdata[0]['band_gap'])]\n\n fwriter.writerow([])\n linenum += 1\n\n for key in keyresults[i].keys():\n keyrow = []\n conkeynum = 0\n for n in range(len(keyresults[i][key])):\n paper = keyresults[i][key][n]\n if paper != 0:\n cellstring = '=HYPERLINK(\"' + searchdata[1][n]['DOIlink'] + '\",\"' + key + '(' + str(\n paper) + ')\")'\n keyrow.append(cellstring)\n conkeynum += 1\n if keyrow:\n fwriter.writerow(keyrow)\n linenum += 1\n if conkeynum != 0:\n constring = '=HYPERLINK(\"[' + fulltitle + ']' + searchname + 'Full' + '!A' + str(\n linenum) + '\",\"' + str(conkeynum) + '\")'\n conline.append(constring)\n else:\n conline.append('')\n\n cwriter.writerow(conline)\n\n fwriter.writerow([])\n fwriter.writerow([])\n linenum += 2\n\n return json.dumps([os.path.join(os.getcwd(), 'results', 'materialsSearchCSV-WC', searchname)])",
"def exportEvaluation(self,results,url):\n profbox()\n if not os.path.exists(url):\n open(url, 'w').close()\n myfile = open(url, 'a')\n\n wr = csv.writer(myfile)\n r = numpy.array(results)\n if len(r.shape) == 1:\n wr.writerow(results)\n else:\n wr.writerows(results)",
"def sites_csv():\n import io\n import csv\n\n dest = io.StringIO()\n dest.write('\\ufeff')\n writer = csv.writer(dest, quoting=csv.QUOTE_MINIMAL)\n\n with Config() as config:\n with db.Connection(config) as con:\n writer.writerow(con.fieldnames)\n writer.writerows(con.read_entries())\n\n output = flask.make_response(dest.getvalue())\n output.headers[\"Content-Disposition\"] = \"attachment; filename=spatialcitizenscience.csv\"\n output.headers[\"Content-type\"] = \"text/csv\"\n return output",
"def downloadData(url):\n response = urllib2.urlopen(url)\n html = response.read()\n localfile = open('hitdata.csv', 'wb')\n localfile.write(html)\n localfile.close()",
"def exportEvaluation(self, results, url):\r\n # research\r\n profprint()\r\n if not os.path.exists(url):\r\n print \"creating new results file: \",url\r\n open(url, 'w').close()\r\n myfile = open(url, 'a')\r\n\r\n wr = csv.writer(myfile)\r\n r = numpy.array(results)\r\n if len(r.shape) == 1:\r\n wr.writerow(results)\r\n else:\r\n wr.writerows(results)",
"def print_csv():\n # read lines, and make the first a link\n show_played = request.args.get('showPlayed', 'true') == 'true'\n show_out_of_office = request.args.get('showOutOfOffice', 'true') == 'true'\n songs = database.load_songs(include_played=show_played, include_out_of_office=show_out_of_office)\n entries = [_convert_first_href(str(x)) for x in songs]\n header_line = \"YouTube Link,Played,Song Name,Added by\\n\"\n return \"%s%s\" % (header_line, \"\\n\".join(entries))",
"def loop_csv(input_csv_path, output_csv_path):\n counter = 0\n with open(input_csv_path, 'rb') as read_csvfile:\n projectsreader = csv.DictReader(\n read_csvfile, delimiter=',', quotechar='\"')\n\n with open(output_csv_path, 'w') as write_csvfile:\n fieldnames = ['acronym', 'title', 'projectUrl', 'foundProjectUrl1',\n 'foundProjectUrl2', 'foundProjectUrl3',\n 'foundProjectUrl4', 'foundProjectUrl5',\n 'foundProjectUrl6', 'foundProjectUrl7',\n 'foundProjectUrl8', 'foundProjectUrl9',\n 'foundProjectUrl10']\n writer = csv.DictWriter(write_csvfile, fieldnames=fieldnames)\n # writer.writeheader() # this method only available at python 2.7\n for row in projectsreader:\n if counter == 100:\n time.sleep(86400) # sleep 1 day\n counter = 0\n\n res = query_google_cse(\n row['acronym'] + \" \" + row['title'] +\n \" project -site:cordis.europa.eu -site:ec.europa.eu\")\n\n # save response to file\n with open('responses_gcse.json', 'w') as outfile:\n json.dump(res, outfile)\n\n # a query response may not have 10 results, so we have to check\n # for that\n results = []\n result_size = res['queries']['request'][0]['totalResults']\n\n print \"INFO: RESULT SIZE %s\" % result_size\n for i in range(10):\n if i < int(result_size):\n results.append(res['items'][i]['link'])\n else:\n results.append('')\n\n # print \"Control Print: \" + res['items'][0]['link']\n print \"INFO: First Result: \" + results[0]\n writer.writerow({\n 'acronym': row['acronym'],\n 'title': row['title'],\n 'projectUrl': row['projectUrl'],\n 'foundProjectUrl1': results[0],\n 'foundProjectUrl2': results[1],\n 'foundProjectUrl3': results[2],\n 'foundProjectUrl4': results[3],\n 'foundProjectUrl5': results[4],\n 'foundProjectUrl6': results[5],\n 'foundProjectUrl7': results[6],\n 'foundProjectUrl8': results[7],\n 'foundProjectUrl9': results[8],\n 'foundProjectUrl10': results[9],\n })\n sys.stdout.flush()\n time.sleep(2)\n counter += 1",
"def ingest_file(input, fields, advanced_operators, output, delimiter=',', quotechar='\"'):\n with open(input, 'rb') as csv_file:\n reader = csv.DictReader(csv_file)\n\n with open(output, 'a') as write_csvfile:\n fieldnames = ['acronym', 'title', 'projectUrl',\n 'foundProjectUrl1', 'foundProjectUrl2',\n 'foundProjectUrl3', 'foundProjectUrl4',\n 'foundProjectUrl5', 'foundProjectUrl6',\n 'foundProjectUrl7', 'foundProjectUrl8',\n 'foundProjectUrl9', 'foundProjectUrl10']\n\n writer = csv.DictWriter(write_csvfile, fieldnames=fieldnames)\n writer.writeheader() # this method only available at python 2.7\n\n search_engine = SearchWeb()\n\n # iterate reader\n for row in reader:\n query_string = str(concatenate(row, fields))\n\n response = search_engine.search(query_string, advanced_operators)\n\n projectsUrl = []\n results_size = len(response)\n\n # TODO print with logger\n print \"INFO: RESULT SIZE - %s\" % results_size\n\n for i in range(10):\n if i < results_size:\n projectsUrl.append(response[i]['Url'])\n else:\n projectsUrl.append('')\n\n # TODO print with logger\n print \"INFO: FIRST RESULT - %s\" % projectsUrl[0]\n writer.writerow(dict(acronym=row['acronym'], title=row['title'], projectUrl=row['projectUrl'],\n foundProjectUrl1=projectsUrl[0], foundProjectUrl2=projectsUrl[1],\n foundProjectUrl3=projectsUrl[2], foundProjectUrl4=projectsUrl[3],\n foundProjectUrl5=projectsUrl[4], foundProjectUrl6=projectsUrl[5],\n foundProjectUrl7=projectsUrl[6], foundProjectUrl8=projectsUrl[7],\n foundProjectUrl9=projectsUrl[8], foundProjectUrl10=projectsUrl[9]))",
"def download_examples(request):\n\n file_required = request.GET.get('token',None)\n path = ''\n workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in\n if file_required == 'reports':\n path = os.path.join(workpath, './static/examples/report.csv')\n\n elif file_required == 'concepts':\n path = os.path.join(workpath, './static/examples/concept.csv')\n\n elif file_required == 'labels':\n path = os.path.join(workpath, './static/examples/labels.csv')\n\n elif file_required == 'pubmed':\n path = os.path.join(workpath, './static/examples/pubmed.csv')\n\n content = open(path,'r')\n return HttpResponse(content, content_type='text/csv')",
"def citations(request, format='csv'):\n \n filtered = False # make sure we're filtering by something\n records = Record.objects.distinct() #.order_by('gabi_acc_number')\n \n \n # accession number\n if request.GET.get('gabi_acc_number'):\n filtered = True\n records = records.filter(gabi_acc_number=request.GET.get('gabi_acc_number').upper())\n \n # species AND bentity\n if request.GET.get('species'):\n filtered = True\n if request.GET.get('species'): \n records = records.filter(valid_species_name_id=request.GET.get('species').capitalize())\n if request.GET.get('bentity_id'):\n records = records.filter(bentity_id=request.GET.get('bentity_id').upper())\n \n # lat and lon\n if request.GET.get('lat') and request.GET.get('lon'):\n filtered = True\n if request.GET.get('lat'):\n records = records.filter(lat=request.GET.get('lat'))\n if request.GET.get('lon'):\n records = records.filter(lon=request.GET.get('lon'))\n \n # status\n if request.GET.get('status'):\n records = records.filter(status=request.GET.get('status')[0].upper())\n \n \n # error message if the user didn't supply an argument to filter the records\n if not filtered: \n return errorResponse(\"Please supply at least one these argument-combinations: 'gabi_acc_number', ('species' and 'bentity_id'), or ('lat' and 'lon').\", format, {'records': []})\n \n \n # fetch all the bentitites at once, so we don't have to hit the database once for each record\n records = records.prefetch_related('bentity') \n \n output_objects = [{\n 'gabi_acc_number': r.gabi_acc_number,\n 'species': r.valid_species_name_id,\n 'bentity_id': r.bentity_id,\n 'bentity_name': r.bentity.bentity,\n 'status': r.status,\n 'type_of_data': r.type_of_data,\n 'lat': r.lat,\n 'lon': r.lon, \n 'citation': r.citation,\n } for r in records]\n \n \n \n if format == 'csv':\n return CSVResponse(output_objects, ('gabi_acc_number', 'species', 'bentity_id', 'bentity_name', 'lat', 'lon', 'status', 'type_of_data', 'citation'))\n \n else:\n return JSONResponse({'records': output_objects})",
"def downloadData(url):\r\n\r\n data = urllib2.urlopen(url)\r\n csvdata = data.read()",
"def download_to_csv(self, search_results, filename):\n\n current_page = search_results\n\n with open(filename, \"w\") as csvfile:\n fieldnames = [\"id\", \"name\", \"name_abbreviation\", \"decision_date\", \"court_id\", \"court_name\", \"court_slug\",\n \"judges\", \"attorneys\", \"citations\", \"url\", \"head\", \"body\"]\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n while True:\n for case in current_page[\"results\"]:\n case_data = {\n \"id\": case[\"id\"],\n \"name\": case[\"name\"],\n \"name_abbreviation\": case[\"name_abbreviation\"],\n \"decision_date\": case[\"decision_date\"],\n \"court_id\": case[\"court\"][\"id\"],\n \"court_name\": case[\"court\"][\"name\"],\n \"court_slug\": case[\"court\"][\"slug\"],\n \"judges\": str(case[\"casebody\"][\"data\"][\"judges\"]),\n \"attorneys\": str(case[\"casebody\"][\"data\"][\"attorneys\"]),\n \"citations\": str(case[\"citations\"]),\n \"url\": case[\"url\"],\n \"head\": case[\"casebody\"][\"data\"][\"head_matter\"],\n \"body\": case[\"casebody\"][\"data\"][\"opinions\"][0][\"text\"]\n }\n writer.writerow(case_data)\n\n try:\n next_result = self._request(current_page[\"next\"])\n current_page = next_result.json()\n\n except:\n break\n\n print(\"Downloaded \" + str(search_results[\"count\"]) + \" court cases to file \" + filename + \".\")",
"def download_ground_truths(request):\n\n workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in\n path1 = os.path.join(workpath, './static/temp/temp.csv')\n path2 = os.path.join(workpath, './static/BioC/temp_files/to_download.csv')\n if os.path.exists(path1):\n os.remove(path1)\n if os.path.exists(path2):\n os.remove(path2)\n username = request.session['username']\n inst = request.GET.get('institute',None)\n if inst == '':\n inst = None\n else:\n inst = str(inst)\n use = request.GET.get('usec',None)\n if use == '':\n use = None\n else:\n use = str(use)\n report_type = request.GET.get('report_type',None)\n if report_type == '':\n report_type = None\n annotation_mode = request.GET.get('mode',None)\n if annotation_mode == '':\n annotation_mode = None\n lang = request.GET.get('lang',None)\n if lang == '':\n lang = None\n else:\n lang = str(lang)\n batch = request.GET.get('batch','') # added 22/10/2021\n if batch == '' or batch == 'all':\n batch = None\n else:\n batch = int(batch)\n\n all = request.GET.get('all_gt',None)\n action = request.GET.get('action',None)\n format = request.GET.get('format',None)\n json_resp = {}\n json_resp['ground_truth'] = []\n if format == 'json' or all =='all' :\n json_resp = create_json_to_download(report_type,action,username,use,annotation_mode,inst,lang,all,batch)\n return JsonResponse(json_resp)\n\n elif format == 'csv':\n response = HttpResponse(content_type='text/csv')\n resp = create_csv_to_download(report_type,annotation_mode,username,use,inst,lang,action,response,batch)\n return resp\n\n elif format == 'biocxml':\n json_keys_to_display = request.session['fields']\n json_keys_to_ann = request.session['fields_to_ann']\n if report_type == 'pubmed':\n json_keys_to_display = ['year','authors','volume','journal']\n json_keys_to_ann = ['title','abstract']\n json_keys = json_keys_to_display + json_keys_to_ann\n resp = generate_bioc(json_keys,json_keys_to_ann,username,action,lang,use,inst,'xml',annotation_mode,report_type,batch)\n return HttpResponse(resp,content_type='application/xml')\n\n elif format == 'biocjson':\n json_keys_to_display = request.session['fields']\n json_keys_to_ann = request.session['fields_to_ann']\n json_keys = json_keys_to_display + json_keys_to_ann\n resp = generate_bioc(json_keys,json_keys_to_ann,username,action,lang,use,inst,'json',annotation_mode,report_type,batch)\n return HttpResponse(resp,content_type='application/xml')",
"def download_mltpl_courts(self, search_results, filename):\n\n court_index = 0\n row_count = 0\n search_results = search_results\n current_uri = search_results[court_index]\n\n downloaded_count = 0\n\n with open(filename, \"w\", encoding='utf-8') as csvfile:\n fieldnames = [\"id\", \"name\", \"name_abbreviation\", \"decision_date\", \"court_id\", \"court_name\",\n \"court_slug\", \"judges\", \"attorneys\", \"citations\", \"url\", \"head\", \"body\"]\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n current_page = self._request(current_uri).json()\n\n while True:\n try:\n if current_page[\"count\"] > 0:\n for case in current_page[\"results\"]:\n case_data = {\n \"id\": case[\"id\"],\n \"name\": case[\"name\"],\n \"name_abbreviation\": case[\"name_abbreviation\"],\n \"decision_date\": case[\"decision_date\"],\n \"court_id\": case[\"court\"][\"id\"],\n \"court_name\": case[\"court\"][\"name\"],\n \"court_slug\": case[\"court\"][\"slug\"],\n \"judges\": str(case[\"casebody\"][\"data\"][\"judges\"]),\n \"attorneys\": str(case[\"casebody\"][\"data\"][\"attorneys\"]),\n \"citations\": str(case[\"citations\"]),\n \"url\": case[\"url\"],\n \"head\": case[\"casebody\"][\"data\"][\"head_matter\"],\n \"body\": case[\"casebody\"][\"data\"][\"opinions\"][0][\"text\"]\n }\n writer.writerow(case_data)\n\n downloaded_count = downloaded_count + len(current_page[\"results\"])\n next_result = self._request(current_page[\"next\"])\n current_page = next_result.json()\n\n except:\n if len(current_page[\"results\"]) != 0:\n print(\"Downloaded \" + current_page[\"results\"][0][\"court\"]['slug'] + \" (\" +\n str(downloaded_count) + \" total rows)\")\n court_index = court_index + 1\n row_count = row_count + current_page[\"count\"]\n if (court_index + 1 <= len(search_results)):\n current_uri = search_results[court_index]\n current_page = self._request(current_uri).json()\n\n else:\n break\n\n print(\"Downloaded \" + str(downloaded_count) + \" court cases to file \" + filename + \".\")",
"def csv_import():\n activities = current_user.get_supervised_activities()\n if activities == []:\n flash(\"Fonction non autorisée.\", \"error\")\n return redirect(url_for(\"event.index\"))\n\n choices = [(str(a.id), a.name) for a in activities]\n form = CSVForm(choices)\n\n if not form.is_submitted():\n form.description.data = current_app.config[\"DESCRIPTION_TEMPLATE\"]\n\n failed = []\n if form.validate_on_submit():\n activity_type = ActivityType.query.get(form.type.data)\n\n file = form.csv_file.data\n processed, failed = process_stream(\n file.stream, activity_type, form.description.data\n )\n\n flash(\n f\"Importation de {processed-len(failed)} éléments sur {processed}\",\n \"message\",\n )\n\n return render_template(\n \"import_csv.html\",\n form=form,\n failed=failed,\n title=\"Création d'event par CSV\",\n )",
"def read_google(url, **kwargs):\n if url[-1] != '/':\n url += '/'\n return pd.read_csv(url + 'export?gid=0&format=csv', **kwargs)",
"def scrapper(request):\n bq_create_table()\n df = loop_req()\n csv = df.to_csv()\n upload_bucket(csv)\n return csv",
"def downloadResponse(request, formcode=None):\n if formcode !=None:\n response = HttpResponse(content_type='text/csv')\n responses = Response.objects.filter(form_id=formcode)\n writer = csv.writer(response)\n writer.writerow(['User', 'Submit Date', 'Answer1', 'Answer2', 'Answer3'])\n for r in responses:\n user = User.objects.get(id=r.user_id)\n writer.writerow([user, r.submitDate, r.answer1 ,r.answer2 , r.answer3])\n\n response['Content-Disposition'] = 'attachment; filename=\"response.csv\"'\n return response \n return render(request, 'download.html')",
"def csvdata():\n return render_template(\"data.html\")",
"def dwn_rel_sup_csv(request):\n i = int(request.GET.get('i'))\n \n return FileResponse(open('temp/relation_support_datasets/relation_support_dataset_{}_{}.csv'.format(i, request.user.username),'rb'))",
"def download_global_csv(output_dir: str):\n for filename, url_path in CSVS_TO_READ:\n url = urljoin(GITHUB_BASE_URL, url_path)\n path = os.path.join(output_dir, filename)\n df = pd.read_csv(url)\n df.to_csv(path)",
"def downloading_csv(self, url_address):\n cache.clear()\n url = URL(url_address)\n f = open(self.cur_quotes_csvfile, 'wb') # save as test.gif\n f.write(url.download())\n f.close()",
"def dlCsvReport(self):\r\n requestElems = {'xf': 'csv'}\r\n requestElems.update(self.getReportConfig())\r\n \r\n csvdata = self.sendRequest(self.reportFormURL, self.fileOpener,\r\n requestElems, 'POST').read()\r\n\r\n self.writeExportFile('csv', csvdata)",
"def get(self, request, **_kwargs):\n self.check_access()\n response = HttpResponse(content_type='text/csv')\n filename = \"team-membership_{}_{}_{}.csv\".format(\n self.course.id.org, self.course.id.course, self.course.id.run\n )\n response['Content-Disposition'] = f'attachment; filename=\"{filename}\"'\n load_team_membership_csv(self.course, response)\n return response",
"def csv_download_view(request):\n logging.info(\" CSV file download is working\")\n now = datetime.now()\n timestamp = now.strftime(\"%Y_%m_%d\")\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"results_' + \\\n GLOBAL_VARIABLE.get_host_name()+'_'+timestamp+'.csv\"'\n\n writer = csv.writer(response)\n list_of_cd = list(GLOBAL_VARIABLE.get_current_data())\n\n for i in range(10):\n rows = [sub_list[i] for sub_list in list_of_cd]\n writer.writerow(rows)\n\n return response",
"def _get_csv_data():\n temp_dir = './catalog/vendor_caches'\n if not os.path.exists(temp_dir):\n os.mkdir(temp_dir)\n \n csv_output_dict = {}\n \n print 'Downloading csv file...'\n br = utils.create_browser(SLEEP_MIN, SLEEP_MAX)\n \n if TESTRUN: print 'Load Login Page'\n br.open(\"https://www.broderbros.com/cgi-bin/online/webbro/bro-index.w\")\n# br.response().read()\n try:\n # Fill login form\n br.select_form(name = 'frmLogin')\n frm = br.form\n \n ctrl = frm.find_control('userName')\n ctrl.value = USERNAME\n ctrl = frm.find_control('password')\n ctrl.value = PASSWORD\n \n # Submit login form\n if TESTRUN: print 'Submit Login Form'\n \n br.select_form(name = 'frmLogin')\n br.submit()\n except:\n print \"Login form does not exist, please check URL, downloaded html or site is down\"\n return None\n \n # Simulate js for setting cookies\n utmn = str(int(random.random()*4294967295))\n utmu = str(int(time.time()/1000))\n utm1 = \"__utm1=\"+utmn+\".\"+utmu+\"; path=/; expires=Sun, 18 Jan 2038 00:00:00 GMT\"\n utm2 = \"__utm2=\"+utmu+\"; path=/; expires=Sun, 18 Jan 2038 00:00:00 GMT\"\n utm3 = \"__utm3=\"+utmu+\"; path=/;\"\n br.set_cookie(utm1)\n br.set_cookie(utm2)\n br.set_cookie(utm3)\n \n if TESTRUN: print 'Downloading and extracting CSV'\n try:\n tar_url = \"https://www.broderbros.com/cgi-bin/download/webshr/prod-info-view.w?f=bro-AllStyles_R06.tar.gz\"\n br.retrieve(tar_url, os.path.join(temp_dir, \"bro-AllStyles_R06.tar.gz\"))\n tar = tarfile.open(os.path.join(temp_dir, \"bro-AllStyles_R06.tar.gz\"))\n #~ member = tar.getmember('/usr/dbx/ai/AllStyles4/bro/items_R06.csv') # get file info \n for member in tar.getmembers():\n member.name = member.name.split('/')[-1] # strip directory from filename\n tar.extractall(os.path.join(temp_dir, 'bro-AllStyles_R06'))\n tar.close()\n except:\n print \"Issue in downloading CSV\"\n return None\n \n #reader = csv.reader(open('data/bro-AllStyles_R06/items_R06.csv', 'rb'))\n \n f_object = open(os.path.join(temp_dir, 'bro-AllStyles_R06/items_R06.csv'), 'rb')\n reader = csv.reader(f_object)\n \n for row in reader:\n item_id = row[7].lower()\n if csv_output_dict.has_key(item_id):\n if TESTRUN:print \"item id already in dictionary so excluding it.\"\n pass\n else:\n mill = row[23]\n item_url = 'https://www.broderbros.com/cgi-bin/online/webshr/prod-detail.w?sr='+str(item_id)\n browser = utils.create_browser(SLEEP_MIN, SLEEP_MAX)\n browser.set_handle_redirect(False)\n \n try:\n #~ browser.open_novisit(item_url)\n temp_dict = {}\n temp_dict['id'] = item_id.lower()\n temp_dict['brand'] = mill.lower()\n temp_dict['url'] = item_url\n csv_output_dict[item_id] = temp_dict\n if TESTRUN:\n print temp_dict\n print '+'*78\n except:\n pass\n f_object.close()\n shutil.rmtree(os.path.join(temp_dir, \"bro-AllStyles_R06\"))\n \n os.remove(os.path.join(temp_dir, \"bro-AllStyles_R06.tar.gz\"))\n return csv_output_dict",
"def get_results():\n # store info in a dictionary {name -> shortname}\n res = {}\n session = requests.Session()\n handle_url('http://www.gocomics.com/features', session, res)\n handle_url('http://www.gocomics.com/explore/editorial_list', session, res)\n handle_url('http://www.gocomics.com/explore/sherpa_list', session, res)\n save_result(res, json_file)"
] |
[
"0.6201149",
"0.607644",
"0.6065895",
"0.6027334",
"0.6022655",
"0.6019954",
"0.5921036",
"0.58984226",
"0.58627075",
"0.58585614",
"0.5844853",
"0.5842384",
"0.58340067",
"0.5828979",
"0.57914275",
"0.57760054",
"0.57621425",
"0.57086945",
"0.5707585",
"0.5690205",
"0.56767404",
"0.56727576",
"0.5670345",
"0.56537724",
"0.5649873",
"0.56418747",
"0.5625504",
"0.5620954",
"0.561983",
"0.56006753"
] |
0.6346724
|
0
|
Imports from jammers.csv and mergeinserts into jamsite.jammers.
|
def import_from_file(jamsite, source='jammers.csv', fieldnames=None):
# import jammers.csv
with open(source) as csvfile:
jamsite.mergeinsert( import_jammers(csvfile, fieldnames=fieldnames) )
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def import_registered_jammers(filename=\"jammerskillz.csv\"):\n\tfrom utils import gf_fieldnames\n\treturn import_jammers(filename, fieldnames=gf_fieldnames())",
"def import_jammers(csvfile, fieldnames=None):\n\tparsed_jammers = []\n\tif fieldnames is None:\n\t\t# Read fieldnames from first line of csvfile.\n\t\tjammers = csv.DictReader(csvfile) \n\telse:\n\t\t# Fieldnames provided\n\t\t# Skip header line/fieldnames line\n\t\tjammers = csv.DictReader(csvfile, fieldnames)\n\t\tnext(jammers)\n\n\tfor jammer in jammers:\n\t\tif hasattr(csvfile, \"name\") and csvfile.name == \"jammers.csv\":\n\t\t\t# These jammers has registered to the jam site. \n\t\t\tjammer[\"ticket\"] = True\n\t\t# Put it in object yo.\n\t\tjammer = Jammer(**jammer)\n\t\tparsed_jammers.append(jammer)\n\treturn parsed_jammers",
"def import_from_url(jamsite, url, fieldnames=None):\n\t# import csv, from the webz.\n\tcsvfile = fetch_csv_from_url(url)\n\tjamsite.mergeinsert( import_jammers(csvfile, fieldnames=fieldnames) )",
"def load_subjects_to_db():\n try:\n with open(configuration.get_file_location(\"materias.csv\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\";\")\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n logging.info(f'Column names are {\", \".join(row)}')\n line_count += 1\n else:\n subject = MateriaClass.Materia(row[1], row[0])\n logging.info(subject.print())\n sql = connectSQLite.save_subject(subject)\n for row in sql.fetchall():\n logging.info(row)\n sql = connectSQLite.get_db().close()\n except Exception as error:\n logging.info(\"FALSE, exception ocurred\")\n print(error)\n # line_count += 1\n # print(f'Processed {line_count} lines.')",
"def populate_from_samples():\n\n # Tags\n try:\n for row in get_csv_data('samples/tags.csv'):\n tag = Tag(name=row['Name'], desc=row['Description'])\n db_session.add(tag)\n finally:\n db_session.commit()\n\n # Organizations\n try:\n for row in get_csv_data('samples/organizations.csv'):\n org = Organization(desc=row['Name'])\n db_session.add(org)\n finally:\n db_session.commit()\n\n # Departments\n try: \n for row in get_csv_data('samples/departments.csv'):\n org = db_session.query(Organization).filter_by(desc=row['Organization']).one()\n dpt = Department(desc=row['Department'], org=org)\n\n db_session.add(dpt)\n finally:\n db_session.commit()\n\n # Application types\n try:\n for row in get_csv_data('samples/apptypes.csv'):\n apptype = AppType(desc=row['Name'])\n db_session.add(apptype)\n finally:\n db_session.commit()\n\n # Applications\n try:\n for row in get_csv_data('samples/applications.csv'):\n apptype = db_session.query(AppType).filter_by(desc=row['AppType']).one()\n dpt = db_session.query(Department).join(Organization).\\\n filter(Department.desc==row['Department']).\\\n filter(Organization.desc==row['Organization']).\\\n one()\n\n app = App(desc=row['Application'], \n app_type=apptype, \n department=dpt,\n version=row['Version'],\n environment=row['Environment'],\n platform=row['Platform']\n )\n\n db_session.add(app)\n finally:\n db_session.commit()\n\n # Connections and Headers\n try:\n for row in get_csv_data('samples/connections.csv'):\n conn = Connection(conn_type=row['Type'], url=row['URL'], port=row['Port'], answer=row['Answer'])\n header = Header(conn_id=conn.id, header=row['Header'], value=row['Value'], conn=conn)\n\n db_session.add(conn)\n db_session.add(header)\n finally:\n db_session.commit()",
"def doImport(self,textFile):\n self.loadText(textFile)\n self.getBooks()\n #self.copyBooks()\n self.genLibData()\n self.genLibCells()\n self.sortRecords()",
"def importer():\n\n #Lager liste der eg legg transaksjonar som blir henta og ikkje laga:\n get_list = []\n\n #Gjer txt-fila i mappen om til csv-fil\n file_fixer()\n\n with open(out_path) as file:\n reader = csv.reader(file)\n r_0 = next(reader)\n r_0.append(\"type\")\n r_0.append('amount')\n r_0.append('category')\n r_0.append('account')\n r_0.append('project')\n\n\n for row in reader:\n #Legger til dei fire kollonenne (amount, account, subaacount, project), tomme.\n row.append(\"\")\n row.append(\"\")\n\n #Omformatterer rader:\n row = format_fix(row)\n row.append(\"\")\n row.append(\"\")\n row.append(\"\")\n print(row)\n\n\n try:\n obj, created = Transaction.objects.get_or_create(\n date=row[0],\n transaction_type=row[1],\n description=row[2],\n amount=row[3]\n )\n\n except Transaction.MultipleObjectsReturned:\n continue\n\n if not created:\n get_list.append(obj.pk)\n\n return get_list",
"def populate(self):\n\n self.create_index()\n self.check_type()\n self.create_mapping()\n\n f = open(self.csv_file, 'rU')\n\n # Read the first line for all the headers\n headers = f.readline().split(',')\n\n # Read the rest of the document\n rows = f.readlines()\n added_counter = 0\n\n actions = []\n for row in rows:\n fields = row.split(',')\n obj = {}\n for header in headers:\n # we call lower-case here because we were originally using\n # analyzed strings in elasticsearch (and they were\n # automatically converted). Code was built based on that so it's\n # easiest to convert for now\n try:\n obj[header.replace('\\n', '')] = float(fields[\n headers.index(header)].replace('\\n', '').lower())\n except ValueError:\n obj[header.replace('\\n', '')] = fields[\n headers.index(header)].replace('\\n', '').lower()\n # check afterwards to replace empty strings with None (which json.dumps hopefully writes to null)\n if obj[header.replace('\\n', '')] == '':\n obj[header.replace('\\n', '')] = None\n try:\n item = {\n '_index': self.es_main_index,\n '_type': self.es_main_type,\n '_source': obj\n }\n\n actions.append(item)\n\n added_counter += 1\n print('%s new records added' % added_counter,\n end='\\r')\n sys.stdout.flush()\n\n if added_counter % self.chunk_size == 0:\n helpers.bulk(self.es, actions)\n actions = []\n\n except ConnectionError:\n print('There was a connection error. Check your Elastic' +\n ' Search setting and make sure Elastic Search is ' +\n 'running.')\n return False\n\n # add the remaining items\n if actions:\n helpers.bulk(self.es, actions)\n\n print('The update is completed. %s new records were added.' %\n added_counter)",
"def update_subjects_to_db():\n with open(configuration.get_file_location(\"materias.csv\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\";\")\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n logging.info(f'Column names are {\", \".join(row)}')\n line_count += 1\n else:\n subject = MateriaClass.Materia(row[1], row[0])\n logging.info(subject.print())\n connectSQLite.update_subject(subject)\n # line_count += 1\n # print(f'Processed {line_count} lines.')",
"def importFile(self):\n\n ## Backing up old CSV and JSON files before beginning import operations\n if os.path.isfile(\"text_files/customers.csv\") and os.path.isfile(\"text_files/customers.json\"):\n print(\"\\nCreating a backup of the existing customer .csv and .json files before overwriting\")\n shutil.copy2(\"text_files/customers.csv\", \"text_files/customers.csv.backup\" + str(time.time()))\n shutil.copy2(\"text_files/customers.json\", \"text_files/customers.json.backup\" + str(time.time()))\n\n ## Importing the text file for cleaning then converting to CSV\n input_file = open(\"text_files/customer_export.txt\", \"r\")\n output_file = open(\"text_files/customers.csv\", \"w\")\n\n ## A loop to clean and write the customer_export txt file to a CSV\n for line in input_file:\n clean_text = \"\"\n check_line = line.replace(\"#\", \"\").replace(\",,\",\"\").split(\"|\")\n for line in check_line:\n if line != check_line[10]:\n clean_text += line + \",\"\n elif line == check_line[10]:\n clean_text += line + \"\\n\"\n output_file.write(clean_text)\n\n ## Closing TXT file and CSV file after formatting\n input_file.close()\n output_file.close()\n\n ## Opening the cleaned CSV file for conversion to Json\n with open('text_files/customers.csv') as clean_csv:\n ## Converting CSV file to Json\n converted = csv.DictReader(clean_csv)\n rows = list(converted)\n\n ## Writing converted CSV to Json file\n with open('text_files/customers.json', 'w') as convert:\n json.dump(rows, convert)\n\n ## Deleting all data currently in database before importing new file\n db_connection.executeQuery(\"DELETE FROM CRM;DBCC CHECKIDENT ('CRM', RESEED, 0) DELETE FROM Mailings; DBCC CHECKIDENT ('Mailings', RESEED, 0) COMMIT\") \n\n ## Loading the newly created Json file\n with open(\"text_files/customers.json\") as customers_json:\n customers = json.load(customers_json)\n\n ## A loop to add the contents of the Json file to the database \n print(\"Writing imported file to database please wait...\")\n for key in customers:\n db_connection.executeQuery(\"INSERT INTO dbo.CRM (f_name, l_name, company, address, city, county, state, zip, primary_phone, secondary_phone, email_address) VALUES ('\" + key[\"first_name\"].replace(\"\\'\", \"\\'\\'\") + \"', '\" + key[\"last_name\"].replace(\"\\'\", \"\\'\\'\") + \"', '\" + key[\"company_name\"].replace(\"\\'\", \"\\'\\'\") + \"', '\" + key[\"address\"] + \"', '\" + key[\"city\"].replace(\"\\'\", \"\\'\\'\") + \"', '\" + key[\"county\"].replace(\"\\'\", \"\\'\\'\") + \"', '\" + key[\"state\"] + \"', '\" + str(key[\"zip\"]) + \"', '\" + key[\"phone1\"] + \"', '\" + key[\"phone2\"] + \"' , '\" + key[\"email\"] + \"'); COMMIT\")\n db_connection.executeQuery(\"INSERT INTO dbo.Mailings (name, company, address) VALUES ('\" + key[\"first_name\"].replace(\"\\'\", \"\\'\\'\") + \" \" + key[\"last_name\"].replace(\"\\'\", \"\\'\\'\") + \"', '\" + key[\"company_name\"].replace(\"\\'\", \"\\'\\'\") + \"','\" + key[\"address\"] + \" \" + key[\"city\"] + \" \" + key[\"county\"] + \" \" + key[\"state\"] + \" \" + str(key[\"zip\"]) + \"'); COMMIT\") \n\n print(\"\\nFinished writing to file. Returning to main menu...\")",
"def importData():\n #importChallengeDataToDB()\n importTrendingDataToDB()",
"def convert_csv_to_alfed(self) -> None:\n global output_path, file_name\n self.parse_command_line_args()\n self.validate_command_line_args()\n\n for _, _, files in walk(self.args.input):\n for output_file in files:\n if output_file.endswith(\".csv\"):\n file_name, _ = path.splitext(output_file)\n output_path = \"\"\n output_path = path.join(self.args.output, file_name)\n\n try:\n mkdir(output_path)\n print(f\"Creating folder {output_path}...\")\n except OSError:\n print(f\"Creation of directory {output_path} failed\")\n\n with open(path.join(self.args.input, output_file), \"rt\") as csv_file:\n reader = DictReader(csv_file, fieldnames=self.args.fieldorder)\n\n for row in reader:\n uid = str(uuid.uuid1()).upper()\n row[\"content\"] = self.replace_embedded_snipptes(row[\"content\"], self.args.lplaceholder,\n self.args.rplaceholder, self.args.changeplaceholders)\n output = dumps(\n {\n \"alfredsnippet\": {\n \"snippet\": row['content'],\n \"uid\": uid,\n \"name\": row['name'],\n \"keyword\": row['abbreviation']\n }\n },\n sort_keys=False, indent=4,\n separators=(',', ': ')\n )\n\n output_file = f\"{row['name']}_[{uid}].json\"\n target = path.join(output_path, output_file)\n f = open(target, \"w\")\n f.write(output)\n f.close()\n print(f\"Writing file {target}...\")\n else:\n self.error_msg(\"The files in the input folder are not with extension '*.csv'\")\n\n subprocess.call(\n [\n 'ditto',\n '--norsrc',\n '-ck',\n output_path,\n self.args.output + \"/\" + file_name + \".alfredsnippets\"\n ]\n )\n print(f\"{self.args.output}/{file_name}.alfredsnippets was created\")\n self.created_folders.append(file_name)\n\n self.remove_temp_folders()",
"def expand_source_data():\n\n file = csv_file('exercise.csv')\n add_to_csv_file = generate_csv.BuildCsvFile(100000, file)\n add_to_csv_file.add_rows()",
"def load_skills():\n\n Skill.query.delete()\n\n # get all the qualifications text from postings\n postings = db.session.query(Posting.qualifications).all()\n # combine qualifications into a list\n all_skills = []\n with open('filler.txt') as filler:\n del_words = filler.read()\n for post in postings:\n words = post.qualifications.lower().split()\n # iterate through a list of those skills\n for word in words:\n word = word.strip(\"-()/\\,.:;* 1234567890\")\n # check to see if that word isn't in our filler document\n # if not, add it to the table\n if word not in del_words and word not in all_skills:\n all_skills.append(word)\n skill = Skill(skill=word)\n db.session.add(skill)\n db.session.commit()",
"def insert_from_csv_reader(self, reader):\n\n conn = sqlite3.connect(self.db_name)\n c = conn.cursor()\n for installation_key in reader.installations:\n insert_query = \"INSERT INTO Installation(Id, Name, Address, PostalCode, City, Latitude, Longitude) \" \\\n \"VALUES(?, ?, ?, ?, ?, ?, ?) \"\n install = reader.installations[installation_key]\n c.execute(insert_query, (\n install.id, install.name, install.address, install.postal_code, install.city, install.latitude,\n install.longitude))\n\n for activity_key in reader.activities:\n insert_query = \"INSERT INTO Activity(Id, Name) VALUES(?, ?)\"\n act = reader.activities[activity_key]\n c.execute(insert_query, (act.id, act.name))\n\n for equipment_key in reader.equipments:\n insert_query = \"INSERT INTO Equipment(Id, Name, IdInstallation) VALUES(?, ?, ?)\"\n equip = reader.equipments[equipment_key]\n c.execute(insert_query, (equip.id, equip.name, equip.installation.id))\n\n # We use the activities references present on the equipment to create a linking table\n for act in equip.activities:\n insert_query = \"INSERT INTO EquipmentActivity(IdEquipment, IdActivity) VALUES(?, ?)\"\n c.execute(insert_query, (equip.id, act.id))\n\n conn.commit()\n conn.close()",
"def main():\n langs = []\n\n with open(\"sql/07_populate.sql\", 'w', encoding='utf8') as sql:\n sql.write(\"--this file is generated from csv files in data folder\\n\\n\")\n\n langs = write_lang_city(sql)\n write_groups_diets(sql, langs)\n\n with open(\"sql/10_populate_test_data.sql\", 'w', encoding='utf8') as sql:\n sql.write(\"--this file is generated from csv files in moc_data folder\\n\\n\")\n write_test_data(sql)\n # This command sets postgis coordinates based on latitude and longitude\n sql.write(\"UPDATE restaurant SET geo_location = ST_POINT(latitude, longitude);\\n\")\n sql.close()",
"def process_bibliography(conn: Connection, path: Path) -> None:\n sql = \"INSERT OR IGNORE INTO Bibliography (key, text) VALUES (?, ?)\"\n run_sql_on_csv(conn, path, sql, (str, str))",
"def import_smses():\n log.debug('importing sms..')\n\n smses = []\n smses.extend(parse(os.path.join('data', 'inbox.csv')))\n smses.extend(parse(os.path.join('data', 'sent.csv')))\n\n contacts = link_contact_with_sms(smses)\n for contact in contacts:\n contact.sort_smses()\n return smses",
"def load_campers():\n\n for row in open(\"static/campers.csv\"):\n row = row.rstrip()\n\n email, password, first_name, last_name, camper_photo, camper_photo_url = row.split(\",\")\n\n camper = Camper(\n camper_email=email,\n password=password,\n first_name=first_name,\n last_name=last_name,\n camper_photo=camper_photo,\n camper_photo_url=camper_photo_url)\n\n db.session.add(camper)\n\n db.session.commit()",
"def merge_csv_initial(output_filename, path):\n\n prefix = ['ParticipantID',\n 'igtb.datatime',\n 'igtb.timezone']\n\n names = ['irb',\n 'itp',\n 'ocb',\n 'inter.deviance',\n 'org.deviance',\n 'shipley.abs',\n 'shipley.vocab',\n 'neuroticism',\n 'conscientiousness',\n 'extraversion',\n 'agreeableness',\n 'openness',\n 'pos.affect',\n 'neg.affect',\n 'stai.trait',\n 'audit',\n 'gats.quantity',\n 'ipaq',\n 'psqi',\n 'gats.status']\n\n\n \n\n #b = np.loadtxt(path + names[0] + '.csv', delimiter=\",\", skiprows=1, usecols=(0, 1, 2), dtype=object)\n #a = np.array(b, dtype=object)\n\n for i,n in enumerate(names):\n file = path + n + '.csv'\n if(i==0):\n df = pd.read_csv(file, sep=',', index_col=0,usecols=[0,1,2,3]) \n df_all = df\n else:\n df = pd.read_csv(file, sep=',', index_col=0,usecols=[0,3]) \n df_all=pd.concat([df_all,df],axis=1)\n \n df_all=df_all.reset_index() \n a = df_all.as_matrix()\n\n # column_format = '%20s %10s %10s %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f'\n # column_format = '%20s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s'\n column_format = '%20s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s'\n names_string = ','.join(prefix + names)\n\n print(a.shape)\n\n np.savetxt(output_filename, a, delimiter=\",\", fmt=column_format, comments='', header=names_string)\n\n return output_filename",
"def populate_hateword_data():\n with open(\"./data/hate-speech-lexicons/refined_ngram_dict.csv\") as f:\n lst = [row.split(',', 1)[0] for row in f]\n lst = lst[1:]\n\n lst = [{\n 'word': word,\n 'category': [],\n 'similar_to': []\n } for word in lst]\n\n try:\n db = mongo_client.MongoClient(config.MONGO_URI).twitter\n db.hateword.delete_many({})\n result = db.hateword.insert_many(lst)\n print(\"Completed populating\", len(result.inserted_ids), \"hate words\")\n except pymongo.errors.BulkWriteError as e:\n print(e.details)",
"def _importInDjango(self):\n\n with open(settings.DATA_PATH, 'r', encoding='latin-1') as csv_file:\n reader = csv.DictReader(csv_file, delimiter=';')\n for raw in reader:\n\n # Créer ou mettre à jour la division\n division, created = Division.objects.get_or_create(\n nom=raw['Division']\n )\n if created:\n self.stdout.write(\n 'Divion {} ajoutée'.format(division.nom)\n )\n\n # Créer ou mettre à jour les équipes\n equipeDom, created = Equipe.objects.get_or_create(\n nom=raw['Equipe 1'],\n division=division\n )\n if created:\n self.stdout.write(\n 'Equipe {} ajoutée'.format(equipeDom.nom)\n )\n\n equipeExt, created = Equipe.objects.get_or_create(\n nom=raw['Equipe 2'],\n division=division\n )\n if created:\n self.stdout.write(\n 'Equipe {} ajoutée'.format(equipeExt.nom)\n )\n\n # Créer ou mettre à jour la rencontre\n scoreDom = 0 if raw['Score 1'] == '' else int(raw['Score 1'])\n scoreExt = 0 if raw['Score 2'] == '' else int(raw['Score 2'])\n forfaitDom = True if raw['Forfait 1'] == 'true' else False\n forfaitExt = True if raw['Forfait 2'] == 'true' else False\n date = datetime.datetime.strptime(raw['Date de rencontre'], '%d/%m/%Y')\n heure = datetime.datetime.strptime(raw['Heure'], '%H:%M')\n rencontre, created = Rencontre.objects.update_or_create(\n numero=int(raw['N° de match']),\n equipeDom=equipeDom,\n equipeExt=equipeExt,\n defaults={\n 'date': date,\n 'heure': heure,\n 'scoreDom': scoreDom,\n 'scoreExt': scoreExt,\n 'forfaitDom': forfaitDom,\n 'forfaitExt': forfaitExt,\n }\n )\n if created:\n self.stdout.write(\n 'Rencontre {} / {} ajoutée'.format(\n rencontre.equipeDom,\n rencontre.equipeExt\n )\n )",
"def dataLoad():\n try:\n try: #Python3\n f = open(__file__ + \".csv\",\"rt\")\n except: #Python2\n f = open(__file__ + \".csv\",\"rb\")\n data = f.read().split(',')\n entryCol.entry0.delete(0,END)\n entryCol.entry0.insert(0,data[0])\n entryCol.entry1.delete(0,END)\n entryCol.entry1.insert(0,data[1])\n entryCol.entry2.delete(0,END)\n entryCol.entry2.insert(0,data[2])\n entryCol.entry3.delete(0,END)\n entryCol.entry3.insert(0,data[3])\n botWind.writeN(\"DataLoad: File\")\n except:\n botWind.writeN(\"DataLoad: Default\")",
"def read_csv_and_insert_families_sql(self, a_columns):\n \n csv_reader = csv.DictReader(open('%s/tbl_families.csv' %(self._root_dir)))\n \n nb_rows = 0\n \n lookup_dict = Lookup(LCSVRoddExtractor.FAMILIES_MAPPER)\n \n # for each line of data create an insert line\n\n insert_line = \"INSERT INTO %s.%s (%s) VALUES (%s)\"\n \n \n columns = self._create_sql_columns(a_columns)\n \n #file = open(\"/tmp/insert_products.sql\",\"w+\")\n\n for row in csv_reader:\n cpt_keys = 0\n values = \"\"\n \n for elem in a_columns:\n \n #get list of matching keys\n key = lookup_dict.get_key(elem)\n \n if not key:\n raise Exception(\"Error: %s as no matching keys in %s\" %(elem, LCSVRoddExtractor.FAMILIES_MAPPER))\n \n val = row.get(key[0], None)\n \n # and elem == \"resources_1\"\n if nb_rows == 200 and (\"%\" in val):\n print(\"This is the break\")\n \n \n val = \"%s\" % ( \"'%s'\" % (val) if val else \"NULL\")\n \n # add in values\n if cpt_keys == 0:\n values += \"%s\" % ( val )\n else:\n values += \", %s\" % ( val )\n \n \n cpt_keys += 1\n \n insert = insert_line % (\"RODD\", \"families\", columns, values)\n \n #print('[r%d]:insert = %s\\n' %(nb_rows, insert) )\n #file.write(\"%s;\\n\" %(insert))\n self._conn.execute(\"%s;\" %(insert))\n \n nb_rows += 1",
"def initialize_import(self, speakers, token_headers, subannotations=None):\n directory = self.config.temporary_directory('csv')\n for s in speakers:\n for k, v in token_headers.items():\n path = os.path.join(directory, '{}_{}.csv'.format(re.sub(r'\\W', '_', s), k))\n with open(path, 'w', newline='', encoding='utf8') as f:\n w = csv.DictWriter(f, v, delimiter=',')\n w.writeheader()\n if subannotations is not None:\n for k, v in subannotations.items():\n for sub in v:\n path = os.path.join(directory, '{}_{}_{}.csv'.format(re.sub(r'\\W', '_', s), k, sub))\n with open(path, 'w', newline='', encoding='utf8') as f:\n header = ['id', 'begin', 'end', 'annotation_id', 'label']\n w = csv.DictWriter(f, header, delimiter=',')\n w.writeheader()\n\n def _corpus_index(tx):\n tx.run('CREATE CONSTRAINT ON (node:Corpus) ASSERT node.name IS UNIQUE')\n\n def _discourse_index(tx):\n tx.run('CREATE INDEX ON :Discourse(name)')\n\n def _speaker_index(tx):\n tx.run('CREATE INDEX ON :Speaker(name)')\n\n def _corpus_create(tx, corpus_name):\n tx.run('MERGE (n:Corpus {name: $corpus_name}) return n', corpus_name=corpus_name)\n\n with self.graph_driver.session() as session:\n try:\n session.write_transaction(_corpus_index)\n except neo4j.exceptions.ClientError as e:\n if e.code != 'Neo.ClientError.Schema.EquivalentSchemaRuleAlreadyExists':\n raise\n try:\n session.write_transaction(_discourse_index)\n except neo4j.exceptions.ClientError as e:\n if e.code != 'Neo.ClientError.Schema.EquivalentSchemaRuleAlreadyExists':\n raise\n try:\n session.write_transaction(_speaker_index)\n except neo4j.exceptions.ClientError as e:\n if e.code != 'Neo.ClientError.Schema.EquivalentSchemaRuleAlreadyExists':\n raise\n session.write_transaction(_corpus_create, self.corpus_name)",
"def run(self):\n lineage_csv_gz = self.input_files_local[0][0]\n output_db = self.output_files_local()[0]\n log.write(f\"input: {lineage_csv_gz} output: {output_db}\")\n\n with IdSeqDictForUpdate(output_db, IdSeqDictValue.VALUE_TYPE_ARRAY) as lineage_dict:\n batch_list = {}\n with gzip.open(lineage_csv_gz, \"rt\") as gzf:\n for line in gzf:\n fields = line.rstrip().split(\",\")\n taxid = fields[0]\n species, genus, family = fields[-1:-4:-1]\n batch_list[taxid] = [species, genus, family]\n if len(batch_list) >= BATCH_INSERT_SIZE:\n lineage_dict.batch_inserts(batch_list.items())\n batch_list = {}\n lineage_dict.batch_inserts(batch_list.items())",
"def load_records():\n\n with open('seed_data/records.csv', 'rb') as csvfile:\n data = csv.reader(csvfile)\n for row in data:\n record_id, user_id, common_name, date_time, latitude, longitude, notes, seen, num_birds = row\n\n record = Record(record_id=record_id, user_id=user_id, common_name=common_name,\n date_time=date_time, latitude=latitude, longitude=longitude, \n notes=notes, seen=seen, num_birds=num_birds)\n\n db.session.add(record)\n\n db.session.commit()",
"def import_csv(item):\n (f_csv, f_csv_out, target_column, merge_columns) = item\n has_checked_keys = False\n\n if not merge_columns:\n raise ValueError(\"merge_columns must not be empty\")\n\n with open(f_csv_out, \"w\") as FOUT:\n CSV_HANDLE = None\n total_rows = 0\n\n for row in csv_iterator(f_csv):\n\n output = {\"_ref\": next(_ref_counter)}\n\n if not has_checked_keys:\n for key in merge_columns:\n if key not in row.keys():\n msg = \"Column **{}** not in csv file {}\"\n raise KeyError(msg.format(key, f_csv))\n has_checked_keys = True\n\n if target_column in row.keys():\n msg = \"Generated column **{}** already in csv file {}\"\n raise KeyError(msg.format(target_column, f_csv))\n\n text = []\n for key in merge_columns:\n val = row[key].strip()\n if not val:\n continue\n if val[-1] not in \".?!,\":\n val += \".\"\n text.append(val)\n\n output[target_column] = \"\\n\".join(text).strip()\n\n if CSV_HANDLE is None:\n CSV_HANDLE = csv.DictWriter(FOUT, sorted(output.keys()))\n CSV_HANDLE.writeheader()\n\n CSV_HANDLE.writerow(output)\n total_rows += 1\n\n logger.info(\"Imported {}, {} entries\".format(f_csv, total_rows))",
"def insertdata():\n import models \n from models import Ngrams\n from models import Phrases\n allphrases = {}\n phrase_index= {}\n # Reading 100000 questions for this project. Original data was 7GB \n # and very large to process.\n r = engine.execute('select * from questions where id < 100000')\n data = r.fetchall()\n for row in data:\n answer = row[4]\n # Tokenizing answer\n ans = answer.split()\n for i in range(len(ans)):\n # Running inner loop to generate trigrams\n for j in range(i+1, len(ans)+1):\n phrase = \" \".join(ans[i:j])\n # Getting only 3 grams instead of all ngrams\n if len(phrase.split()) < 4:\n print row[0]\n lemmaphrase = lemmatize(ans[i:j])\n ng = Ngrams(row[0],phrase, lemmaphrase)\n db_session.add(ng)\n phrase = phrase.lower()\n if phrase not in allphrases:\n allphrases[phrase] = [phrase.lower()]\n phrase_index[phrase] = newPhraseInfo(phrase)\n phrase_index[phrase][\"count\"] += 1\n phrase_index[phrase][\"ids\"].add(str(row[0]))\n db_session.commit()",
"def import_sites(input_csv=\"../2012_ROOMS_site_info_sample.csv\"):\n reader = csv.DictReader(open(input_csv))\n for s in reader:\n number = s[\"Site ID\"]\n site = models.NewSite.all().filter('number =', number).get()\n if site:\n logging.info('site %s exists, skipping', number)\n continue\n else:\n site = models.NewSite(number=number)\n site.program = PROGRAM\n site.budget = int(s[\"Budgeted Cost in Campaign\"]) if s[\"Budgeted Cost in Campaign\"] else 0\n\n # Because Python 2.x csv module only reads ascii.\n def clean_s(k):\n return s[k].replace('\\n', ' ').replace('\\xe2', \"'\").replace('\\x80', \"'\").replace('\\x99', '').replace('\\xc3', '').replace('\\x95', '').replace('\\xb1', '').encode('ascii', 'replace')\n\n site.name = clean_s(\"Repair Application: Applicant's Name\")\n site.street_number = clean_s(\"Street Address\")\n site.city_state_zip = \"%s CA, %s\" % (\n clean_s(\"Repair Application: Recipient's City\"), \n clean_s(\"Repair Application: Recipient's Zip Code\"))\n site.applicant = clean_s(\"Repair Application: Applicant's Name\")\n site.applicant_home_phone = clean_s(\"Repair Application: Applicant Home Phone\")\n site.applicant_work_phone = clean_s(\"Repair Application: Applicant Work Phone\")\n site.applicant_mobile_phone = clean_s(\"Repair Application: Applicant Mobile Phone\")\n site.sponsor = clean_s(\"(Sponsor) Campaign Description\")\n site.rrp_test = clean_s(\"Repair Application: RRP Test Results\")\n site.rrp_level = clean_s(\"Repair Application: RRP Result Notes\")\n # site.roof = clean_s(\"Roof?\")\n site.jurisdiction = clean_s(\"Jurisdiction\")\n site.announcement_subject = clean_s(\"Announcement Subject\")\n site.announcement_body = clean_s(\"Announcement Body\")\n site.put()\n logging.info('put site %s', number)"
] |
[
"0.65137184",
"0.643069",
"0.60201544",
"0.5691",
"0.56182426",
"0.5514427",
"0.55112034",
"0.54825675",
"0.5477282",
"0.5451876",
"0.53347766",
"0.5326418",
"0.53094995",
"0.5309341",
"0.5289789",
"0.5280948",
"0.52332306",
"0.5228012",
"0.5208589",
"0.52083606",
"0.5201785",
"0.51814455",
"0.5152316",
"0.514598",
"0.51386666",
"0.5128695",
"0.5089342",
"0.507016",
"0.5063333",
"0.5062016"
] |
0.77328527
|
0
|
Input a google form mapped fieldnames file,
|
def gf_fieldnames(fn="forms-fields.txt"):
if fn is None:
return None
with open(fn) as f:
fieldnames = [fieldname.split(":")[0] for fieldname in f]
return fieldnames
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def populate_PDF_with_field_names(csv_file_name, pdf_file_name):\n field_mapping = get_field_list(csv_file_name)\n print \"UPDATING FORM VALUES\"\n update_form_values(pdf_file_name, 'out-' + pdf_file_name) # enumerate & fill the fields with their own names\n update_form_values(pdf_file_name, 'output-' + pdf_file_name, field_mapping) # update the form fields",
"def get_form_fields(infile):\n infile = PdfFileReader(open(infile, 'rb'))\n fields = _getFields(infile)\n return OrderedDict((k, v.get('/V', '')) for k, v in fields.items())",
"def read_field(file_name):\n\n f = open(file_name, 'r', encoding='utf-8', errors='ignore')\n data = dict()\n row = 1\n for i in f:\n n = 1\n i = i.strip('\\n')\n for symb in i:\n data[(row, n)] = symb\n n += 1\n row += 1\n return data",
"def get_field_list(filename):\n with open(filename) as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n field_mapping = {}\n for row in reader:\n field_mapping[row[0]] = row[1]\n return field_mapping",
"def load_field(self, filename,unmask=True,timeslice=None,fieldname=None,\n check_for_grid_info=False,grid_info=None,grid_type='HD',\n **grid_kwargs):\n\n print(\"Reading input from {0}\".format(filename))\n grid = gd.makeGrid(grid_type,**grid_kwargs)\n return np.loadtxt(filename,np.float64).reshape(grid.get_grid_dimensions())",
"def load_field(self,filename,unmask=True,timeslice=None,fieldname=None,\n check_for_grid_info=False,grid_info=None,grid_type='HD',\n **grid_kwargs):\n\n if not check_for_grid_info:\n grid = gd.makeGrid(grid_type,**grid_kwargs)\n print(\"Reading input from {0}\".format(filename))\n with netCDF4.Dataset(filename,mode='r',format='NETCDF4') as dataset:\n if check_for_grid_info:\n latitudes = None\n longitudes = None\n for latitude_names in ['lat','y']:\n fields = dataset.get_variables_by_attributes(name=latitude_names)\n if len(fields) >= 1:\n break\n if len(fields) == 1:\n latitudes = fields[0][:]\n for longitude_names in ['lon','long','x']:\n fields = dataset.get_variables_by_attributes(name=longitude_names)\n if len(fields) >= 1:\n break\n if len(fields) == 1:\n longitudes = fields[0][:]\n elif len(fields) > 1:\n raise RuntimeError(\"File {0} contains\"\n \" multiple longitude fields\".format(filename))\n elif len(fields) > 1:\n raise RuntimeError(\"File {0} contains\"\n \" multiple latitude fields\".format(filename))\n if longitudes is not None:\n grid = gd.makeGrid('LatLong',nlat=len(latitudes),nlong=len(longitudes))\n grid.set_latitude_points(np.asarray(latitudes))\n grid.set_longitude_points(np.asarray(longitudes))\n grid_info.append(grid)\n else:\n grid = gd.makeGrid(grid_type,**grid_kwargs)\n fields = None\n if fieldname is None:\n potential_field_names = ['Topo','topo','field_value','orog','z','ICEM',\n 'DEPTO','usurf','bats','slm','FDIR','lsmask',\n 'lake_field','river_flow',\n 'basin_catchment_numbers','rdirs','lsm',\n \"cumulative_flow\",\"catchments\",\n \"cumulative_flow_to_ocean\",\"acc\",\"catch\",\"rdir\"]\n else:\n potential_field_names = [fieldname]\n for potential_field_name in potential_field_names:\n fields = dataset.get_variables_by_attributes(name=potential_field_name)\n if len(fields) >= 1:\n break\n if len(fields) == 1:\n if timeslice is not None:\n field_slice = fields[0][timeslice,:,:]\n else:\n field_slice = fields[0][:]\n if grid_type==\"generic_1d\":\n if unmask:\n return np.asarray(field_slice)\n else:\n return np.asanyarray(field_slice)\n else:\n if unmask:\n return np.asarray(field_slice.reshape(grid.get_grid_dimensions()))\n else:\n return np.asanyarray(field_slice.reshape(grid.get_grid_dimensions()))\n elif len(fields) > 1:\n raise RuntimeError('File {0} contains multiple fields'.format(filename))\n else:\n raise RuntimeError('Field not found in file {0}'.format(filename))",
"def import_form(input_file):\n\n headers = {\n 'content-type': 'application/json',\n }\n\n data = input_file\n\n url = 'https://reactome.org/AnalysisService/import/form'\n\n try:\n response = requests.post(url=url, headers=headers, data=data)\n except ConnectionError as e:\n print(e)\n\n if response.status_code == 200:\n return response.json()\n else:\n print(\"Status code returned a value of %s\" % response.status_code)",
"def write_field(self,filename,field,griddescfile=None,fieldname=None):\n\n pass",
"def individual_formfields():\n # Instantiate Consent Tracker\n consent = s3db.auth_Consent(processing_types = VOL_CONSENT_OPTIONS)\n\n formfields = [utable.first_name,\n utable.last_name,\n Field(\"addr_L3\",\n label = T(\"Location\"),\n requires = IS_IN_SET(districts_and_uk),\n ),\n Field(\"addr_street\",\n label = T(\"Street Address\"),\n ),\n Field(\"addr_postcode\",\n label = T(\"Postcode\"),\n ),\n Field(\"mobile\",\n label = T(\"Contact Number (Preferred)\"),\n requires = IS_PHONE_NUMBER_MULTI(),\n comment = DIV(_class = \"tooltip\",\n _title = \"%s|%s\" % (T(\"Contact Number (Preferred)\"),\n T(\"Ideally a Mobile Number, so that we can send you Text Messages.\")),\n ),\n ),\n Field(\"home\",\n label = T(\"Contact Number (Secondary)\"),\n requires = IS_EMPTY_OR(IS_PHONE_NUMBER_MULTI()),\n ),\n utable.email,\n utable[passfield],\n # Password Verification Field\n Field(\"password_two\", \"password\",\n label = auth_messages.verify_password,\n requires = IS_EXPR(\"value==%s\" % \\\n repr(request.vars.get(passfield)),\n error_message = auth_messages.mismatched_password,\n ),\n ),\n\n # Skills\n s3db.hrm_multi_skill_id(empty = False,\n label = T(\"Volunteer Offer\"),\n ),\n Field(\"skills_details\",\n label = T(\"Please specify details\"),\n ),\n Field(\"certificates\", \"list:string\",\n label = T(\"Qualifications\"),\n requires = IS_IN_SET(certificates, multiple=True),\n widget = S3MultiSelectWidget(header=\"\",\n selectedList=3),\n ),\n Field(\"experience\",\n label = T(\"Skills and Experience\"),\n widget = lambda f, v: \\\n s3_comments_widget(f, v, _placeholder = \"e.g. Co-ordination, Event Management, PCV qualified.\")\n ),\n Field(\"resources\",\n label = T(\"Offers of Resources\"),\n widget = lambda f, v: \\\n s3_comments_widget(f, v, _placeholder = \"e.g. Minibus.\")\n ),\n Field(\"where_operate\", \"list:string\",\n label = T(\"Where would you be willing to volunteer?\"),\n requires = IS_IN_SET(districts, multiple=True),\n widget = S3MultiSelectWidget(header=\"\",\n selectedList=3),\n ),\n Field(\"travel\", \"integer\",\n label = T(\"Willing to Travel?\"),\n requires = IS_IN_SET({0: T(\"No\"),\n 1: T(\"Yes\"),\n }),\n widget = lambda f, v: \\\n SQLFORM.widgets.radio.widget(f, v,\n style=\"divs\"),\n ),\n Field(\"slots\", \"list:string\",\n label = T(\"Times\"),\n requires = IS_IN_SET(slots, multiple=True),\n widget = S3MultiSelectWidget(header=\"\",\n selectedList=3),\n ),\n Field(\"significant_physical\", \"integer\",\n label = T(\"That require significant physical activity (including lifting and carrying) and may involve being outdoors (e.g. clean up of affected properties)\"),\n requires = IS_IN_SET({0: T(\"No\"),\n 1: T(\"Yes\"),\n }),\n widget = lambda f, v: \\\n SQLFORM.widgets.radio.widget(f, v,\n style=\"divs\"),\n ),\n Field(\"some_physical\", \"integer\",\n label = T(\"That require some physical activity and may involve being outdoors (e.g. door knocking)\"),\n requires = IS_IN_SET({0: T(\"No\"),\n 1: T(\"Yes\"),\n }),\n widget = lambda f, v: \\\n SQLFORM.widgets.radio.widget(f, v,\n style=\"divs\"),\n ),\n Field(\"little_physical\", \"integer\",\n label = T(\"That require little physical activity and are based indoors (e.g. preparing refreshments)\"),\n requires = IS_IN_SET({0: T(\"No\"),\n 1: T(\"Yes\"),\n }),\n widget = lambda f, v: \\\n SQLFORM.widgets.radio.widget(f, v,\n style=\"divs\"),\n ),\n Field(\"health_details\",\n label = T(\"If you wish, you can give us some further information on any fitness, medical or mobility issues that might limit the kind of activities you are able to volunteer for; this will help us to suggest suitable opportunities for you\"),\n ),\n Field(\"faith_requirements\", \"integer\",\n label = T(\"Do you have any faith requirements that you would like help with if you are coming to Support Cumbria?\"),\n requires = IS_IN_SET({0: T(\"No\"),\n 1: T(\"Yes\"),\n }),\n widget = lambda f, v: \\\n SQLFORM.widgets.radio.widget(f, v,\n style=\"divs\"),\n ),\n Field(\"faith_requirements_details\",\n label = T(\"If Yes please outline\"),\n ),\n Field(\"emergency_contact_name\",\n label = T(\"Contact Name\"),\n requires = IS_NOT_EMPTY(),\n ),\n Field(\"emergency_contact_number\",\n label = T(\"Contact Number\"),\n requires = IS_PHONE_NUMBER_MULTI(),\n ),\n Field(\"emergency_contact_relationship\",\n label = T(\"Relationship\"),\n requires = IS_NOT_EMPTY(),\n ),\n Field(\"workplace\", \"integer\",\n label = T(\"Are you volunteering under your workplace volunteering scheme?\"),\n requires = IS_IN_SET({0: T(\"No\"),\n 1: T(\"Yes\"),\n }),\n widget = lambda f, v: \\\n SQLFORM.widgets.radio.widget(f, v,\n style=\"divs\"),\n ),\n Field(\"workplace_details\",\n label = T(\"If yes please name your employer\"),\n ),\n Field(\"dbs\", \"integer\",\n label = T(\"Are you DBS checked?\"),\n requires = IS_IN_SET({0: T(\"No\"),\n 1: T(\"Yes\"),\n }),\n widget = lambda f, v: \\\n SQLFORM.widgets.radio.widget(f, v,\n style=\"divs\"),\n ),\n #Field(\"convictions\", \"integer\",\n # label = T(\"Do you have any unspent convictions?\"),\n # comment = T(\"Please tick 'Yes' if you have any convictions that are not yet spent under the Rehabilitation of Offenders Act 1974. The term 'convictions' is used to refer to any sentence or disposal issued by a court. If all your convictions are spent, you can tick 'No'. If you're not sure if your convictions are unspent or spent, you can use a tool available at www.disclosurecalculator.org.uk and read guidance at hub.unlock.org.uk/roa\"),\n # requires = IS_IN_SET({0: T(\"No\"),\n # 1: T(\"Yes\"),\n # }),\n # widget = lambda f, v: \\\n # SQLFORM.widgets.radio.widget(f, v,\n # style=\"divs\"),\n # ),\n # Consent (GDPR + FOC)\n Field(\"consent\",\n label = T(\"Consent\"),\n widget = consent.widget,\n ),\n ]\n\n required_fields = [\"first_name\",\n \"last_name\",\n \"addr_L3\",\n \"addr_street\",\n \"addr_postcode\",\n \"mobile\",\n \"emergency_contact\",\n \"where_operate\",\n ]\n\n return formfields, required_fields",
"def fill_by_name(self, fields, prefix=\"\"):\n self.fill({'[name=\"%s%s\"]' % (prefix, k): v for k, v in fields.items()})",
"def _read_form_strings(vba):\n\n try:\n r = []\n skip_strings = [\"Tahoma\", \"Tahomaz\"]\n for (subfilename, stream_path, form_string) in vba.extract_form_strings():\n\n # Skip default strings.\n if (form_string in skip_strings):\n continue\n # Skip unprintable strings.\n if (not all((ord(c) > 31 and ord(c) < 127) for c in form_string)):\n continue\n\n # Save the stream name.\n stream_name = stream_path.replace(\"Macros/\", \"\")\n if (\"/\" in stream_name):\n stream_name = stream_name[:stream_name.index(\"/\")]\n\n # Save the stream name and form string.\n r.append((stream_name, form_string))\n\n # Done.\n return r\n\n except Exception as e:\n log.error(\"Cannot read form strings. \" + str(e))\n return []",
"def load_field(self, filename,unmask=True,timeslice=None,\n fieldname=None,check_for_grid_info=False,\n grid_info=None,grid_type='HD',**grid_kwargs):\n\n grid = gd.makeGrid(grid_type,**grid_kwargs)\n print(\"Reading input from {0}\".format(filename))\n mgnr = f2py_mg.f2py_manager(path.join(fortran_source_path,\n \"mod_topo_io.f90\"), func_name=\"read_topo\")\n data = mgnr.run_current_function_or_subroutine(filename,*grid.get_grid_dimensions())\n #rotate the array 90 clockwise (i.e. 270 degrees anticlockwise); also flip\n #to match symmetry of other loading methods\n return np.fliplr(np.rot90(data,k=3))",
"def load_field(self,filename,unmask=True,timeslice=None,\n fieldname=None,check_for_grid_info=False,\n grid_info=None,grid_type='HD',**grid_kwargs):\n\n grid = gd.makeGrid(grid_type,**grid_kwargs)\n with scipyio.FortranFile(filename,mode='r') as f: #@UndefinedVariable:\n print(\"Reading input from {0}\".format(filename))\n return f.read_record(self.data_type).reshape(grid.get_grid_dimensions())",
"def form_File(request):\n schema = schemaish.Structure()\n schema.add('myFile', schemaish.File())\n form = formish.Form(schema, 'form')\n return form",
"def load_field(self,filename,unmask=True,timeslice=None,\n fieldname=None,check_for_grid_info=False,\n grid_info=None,grid_type='HD',**grid_kwargs):\n\n pass",
"def read_inputs_field_types():\n inputs = yaml.load(\n open(os.path.join(os.path.dirname(__file__), 'inputs.yml')).read())\n\n for db in inputs.keys():\n inputs[db]['fieldnames'] = [field['name']for field in inputs[db]['fields']]\n return inputs",
"def createFieldMapping(sgidPoints):\n # Create field mappings\n sgidFMs = arcpy.FieldMappings()\n\n # Perform some field renaming\n mapPairs = [\n ('State', 'State'),\n ('City', 'Inc_Muni'),\n ('CountyID', 'County'),\n ('ZipCode', 'Zip_Code'),\n ('PrefixDir', 'StN_PreDir'),\n ('StreetName', 'StreetName'),\n ('StreetType', 'StN_PosTyp'),\n ('SuffixDir', 'StN_PosDir'),\n ('AddNum', 'Add_Number'),\n ('LandmarkName', 'landmkName'),\n ('Building', 'Building'),\n ('UnitType', 'Unit'),\n ('AddSource', 'AddAuth'),\n ('AddSystem', 'UniqWithin'),\n ('LoadDate', 'LastUpdate')]\n\n for p in mapPairs:\n print p\n sgidFMs.addFieldMap(getRenameFieldMap(sgidPoints, p[0], p[1]))\n\n return sgidFMs",
"def _getFormFile(self, filename):\n namespace = {}\n global_namespace = {\n 'sys': sys,\n }\n folder, file = os.path.split(filename)\n form_name = os.path.splitext(file)[0]\n with open(os.path.join(OUTPUT_FOLDER, folder, '{}.py'.format(form_name)), 'r') as f:\n text = f.read()\n exec(text, global_namespace, namespace)\n #\n return namespace[form_name], namespace['FormControls_{}'.format(form_name)]",
"def find_all_field(smali_file_list):\n for smali_file in smali_file_list: # For each file\n for smali_line in u.open_file_input(smali_file): # For each line\n if re.search(r'^([ ]*?)\\.field', smali_line) is not None: # If this line contains a field definition\n field_name = get_match_line(smali_line)\n if field_name is not None:\n yield field_name # Return the field name\n else:\n print smali_line, # Print back the line unchanged",
"def get_fields_by_names(form, field_names):\n fields = []\n for field_name in field_names.split(','):\n field = get_field_by_name(form, field_name)\n if field:\n fields.append(field)\n return fields",
"def file_name_request(self):\n self.file_name = input(\"What is the name of the input file?\\n>>>\")",
"def __init__(self, *args, **kwargs):\n step_numeral, step_name = kwargs.pop('step', (None, None))\n corp_app = kwargs.pop('corp_app', '')\n file_path = kwargs.pop('file_path', '')\n\n super(CSVForm, self).__init__(*args, **kwargs)\n\n if step_numeral == 1:\n \"\"\"\n Basic Form: Application & File Uploader\n \"\"\"\n self.fields['corp_app'] = forms.ModelChoiceField(\n label=_('Corp Application'), queryset=CorpApp.objects.all())\n\n self.fields['update_option'] = forms.CharField(\n widget=forms.RadioSelect(\n choices=(('skip', 'Skip'),\n ('update', 'Update Blank Fields'),\n ('override', 'Override All Fields'),)),\n initial='skip',\n label=_('Select an Option for the Existing Records:')\n )\n\n self.fields['csv'] = forms.FileField(label=_(\"CSV File\"))\n\n if step_numeral == 2:\n \"\"\"\n Basic Form + Mapping Fields\n \"\"\"\n\n # file to make field-mapping form\n csv = csv_to_dict(file_path)\n\n # choices list\n choices = csv[0].keys()\n\n # make tuples; sort tuples (case-insensitive)\n choice_tuples = [(c, c) for c in csv[0].keys()]\n\n # insert blank option\n choice_tuples.insert(0, ('', ''))\n choice_tuples = sorted(choice_tuples, key=lambda c: c[0].lower())\n\n app_fields = CorpField.objects.filter(corp_app=corp_app)\n required_fields = ['name', 'corporate_membership_type']\n for field in app_fields:\n if field.field_type not in ['section_break', 'page_break']:\n if field.field_name:\n field_key = field.field_name\n else:\n field_key = \"field_%s\" % field.id\n is_required = False\n if field_key in required_fields:\n is_required = True\n self.fields[field_key] = ChoiceField(**{\n 'label': field.label,\n 'choices': choice_tuples,\n 'required': is_required,\n })\n for choice in choices:\n if (field.label).lower() == choice.lower() or \\\n field_key.lower() == choice.lower():\n self.fields[field_key].initial = choice\n\n extra_fields = (('secret_code', 'Secret Code'),\n ('join_dt', 'Join Date'),\n ('renew_dt', 'Renew Date'),\n ('expiration_dt', 'Expiration Date'),\n ('approved', 'Approved'),\n ('dues_rep', 'Dues Representative'),\n ('status', 'Status'),\n ('status_detail', 'Status Detail'))\n # corp_memb_field_names = [smart_str(field.name)\n # for field in CorporateMembership._meta.fields]\n for key, label in extra_fields:\n if key not in self.fields.keys():\n self.fields[key] = ChoiceField(**{\n 'label': label,\n 'choices': choice_tuples,\n 'required': False,\n })\n for choice in choices:\n if label.lower() == choice.lower() or \\\n key.lower() == choice.lower():\n self.fields[key].initial = choice",
"def __obtain_csv_fieldnames__(self, csvfile):\n self.__fieldnames__ = csvfile.readline()\n self.__obtain_csv_delimiter__(self.__fieldnames__)\n self.__fieldnames__ = self.__remove_break_line__(self.__fieldnames__)\n self.__fieldnames__ = self.__split_for_delimiter__(self.__fieldnames__)",
"def parse_uinput_mapping(name, mapping):\n axes, buttons, mouse, mouse_options = {}, {}, {}, {}\n description = \"ds4drv custom mapping ({0})\".format(name)\n\n for key, attr in mapping.items():\n key = key.upper()\n if key.startswith(\"BTN_\") or key.startswith(\"KEY_\"):\n buttons[key] = attr\n elif key.startswith(\"ABS_\"):\n axes[key] = attr\n elif key.startswith(\"REL_\"):\n mouse[key] = attr\n elif key.startswith(\"MOUSE_\"):\n mouse_options[key] = attr\n\n create_mapping(name, description, axes=axes, buttons=buttons,\n mouse=mouse, mouse_options=mouse_options)",
"def get_field_mapping_filename(field_name: str, config_location: str) -> str:\n return os.path.join(config_location, field_name + \".csv\")",
"def input_template(template, fields):\n editor = os.environ.get('EDITOR', '/usr/bin/vim')\n with tempfile.NamedTemporaryFile('w+t') as ofile:\n ofile.write(template % fields)\n ofile.flush()\n user_command = '%s %s' % (editor, ofile.name)\n if os.system(user_command) != 0:\n raise Error('Error acquiring user input (command was %r).' % user_command)\n with open(ofile.name, 'r') as ifile:\n filled_template = ifile.read()\n\n fields = dict(parse_template(filled_template))\n return fields",
"def parse_spider_fname(mystr, *fieldvals):\n\t# helper functions and classes\n\tdef rm_stack_char(mystr):\n\t\t\"Helper function to remove a stack character if it exists\"\n\t\tstackloc = mystr.find(\"@\")\n\t\tif stackloc != -1: \n\t\t\t# there's an '@' somewhere\n\t\t\tif len(mystr) - 1 == stackloc:\n\t\t\t\t# It's at the end of the string\n\t\t\t\treturn mystr[:-1]\n\t\t\telse:\n\t\t\t\t# '@' not at the end, so it's an error\n\t\t\t\traise ValueError, \"Invalid format: misplaced '@'.\"\n\t\telse:\n\t\t\t# no '@' at all\n\t\t\treturn mystr\n\tclass Fieldloc:\n\t\t\"Helper class to store description of a field\"\n\t\tdef __init__(self, begin, end):\n\t\t\tself.begin = begin\n\t\t\tself.end = end\n\t\tdef count(self):\n\t\t\t\"Size of the field (including braces)\"\n\t\t\treturn self.end - self.begin + 1\n\tdef find_fields(mystr):\n\t\t\"Helper function to identify and validate fields in a string\"\n\t\tfields = []\n\t\tloc = 0\n\t\twhile True:\n\t\t\tbegin = mystr.find('{', loc)\n\t\t\tif begin == -1: break\n\t\t\tend = mystr.find('}', begin)\n\t\t\tfield = Fieldloc(begin, end)\n\t\t\t# check validity\n\t\t\tasterisks = mystr[begin+1:end]\n\t\t\tif asterisks.strip(\"*\") != \"\":\n\t\t\t raise ValueError, \"Malformed {*...*} field: %s\" % \\\n\t\t\t\tmystr[begin:end+1]\n\t\t\tfields.append(Fieldloc(begin, end))\n\t\t\tloc = end\n\t\treturn fields\n\t# remove leading whitespace\n\tmystr.strip()\n\t# remove stack character (if it exists)\n\tmystr = rm_stack_char(mystr)\n\t# locate fields to replace\n\tfields = find_fields(mystr)\n\tif len(fields) != len(fieldvals):\n\t\t# wrong number of fields?\n\t\traise ValueError, \"Number of field values provided differs from\" \\\n\t\t\t\"the number of {*...*} fields.\"\n\tnewstrfrags = []\n\tloc = 0\n\tfor i, field in enumerate(fields):\n\t\t# text before the field\n\t\tnewstrfrags.append(mystr[loc:field.begin])\n\t\t# replace the field with the field value\n\t\tfieldsize = field.count() - 2\n\t\tfielddesc = \"%0\" + str(fieldsize) + \"d\"\n\t\tnewstrfrags.append(fielddesc % fieldvals[i])\n\t\tloc = field.end + 1\n\tnewstrfrags.append(mystr[loc:])\n\treturn \"\".join(newstrfrags)",
"def write_fields(self, filename, fields,griddescfile=None,fieldnames=None):\n\n nlat,nlong = fields[0].get_grid().get_grid_dimensions()\n if fieldnames is None:\n fieldnames = ['field_value']*len(fields)\n print(\"Writing output to {0}\".format(filename))\n if griddescfile is not None:\n output_filename=filename\n filename=path.splitext(filename)[0] + '_temp' + path.splitext(filename)[1]\n with netCDF4.Dataset(filename,mode='w',format='NETCDF4') as dataset:\n dataset.createDimension(\"latitude\",nlat)\n dataset.createDimension(\"longitude\",nlong)\n for field,fieldname in zip(fields,fieldnames):\n data_was_bool = False\n if field.get_data().dtype == np.bool_:\n field.set_data(field.get_data().astype(np.int32))\n data_was_bool=True\n field_values = dataset.createVariable(fieldname,field.get_data().dtype,\n ('latitude','longitude'))\n field_values[:,:] = field.get_data()\n if data_was_bool:\n field.set_data(field.get_data().astype(np.bool_))\n if griddescfile is not None:\n cdo_instance = cdo.Cdo()\n cdo_instance.setgrid(griddescfile,input=filename,output=output_filename)\n os.remove(filename)",
"def import_data(self, form):\n error_msg = \"\"\n \n try:\n # Set insert order\n columns = \"organization, contact, email, phone, data_url, \\\n project_name_short, project_name, project_description, timeline_start, timeline_finish, project_funder,\\\n data_target, location_description, site_count, data_collector, data_type, data_format, data_policies, \\\n keyword, other, location, shp_file\"\n \n # Gather submitted for values\n values = []\n # Source data\n values.append( '\"%s\"' % form.getvalue('organization') )\n values.append( '\"%s\"' % form.getvalue('contact') )\n values.append( '\"%s\"' % form.getvalue('email') )\n if form.getvalue('phone'):\n values.append( form.getvalue('phone') )\n else:\n values.append('NULL')\n values.append( '\"%s\"' % form.getvalue('source') )\n # Project data\n if len(form.getvalue('labelShort')) > 0:\n values.append( '\"%s\"' % form.getvalue('labelShort') )\n else:\n values.append( '\"%s\"' % form.getvalue('label') )\n values.append( '\"%s\"' % form.getvalue('label') )\n values.append( '\"%s\"' % form.getvalue('description') ) \n values.append( \"STR_TO_DATE('\"+ form.getvalue('timelineStart') +\"', '%m/%d/%Y')\" )\n values.append( \"STR_TO_DATE('\"+ form.getvalue('timelineFinish') +\"', '%m/%d/%Y')\" )\n values.append( '\"%s\"' % form.getvalue('funder') )\n # Meta data\n values.append( '\"%s\"' % form.getvalue('target') )\n values.append( '\"%s\"' % form.getvalue('locdescription') )\n values.append( form.getvalue('numsites') )\n values.append( '\"%s\"' % form.getvalue('collector') )\n values.append( '\"%s\"' % form.getvalue('datatype') )\n values.append( '\"%s\"' % form.getvalue('dataformat') )\n values.append( '\"%s\"' % form.getvalue('policies') )\n # Other Data\n values.append( '\"%s\"' % \" \".join(pattern.sub(' ', form.getvalue('keyword')).split()) )\n values.append( '\"%s\"' % form.getvalue('other') )\n # Shape file data \n zip_shp_file = form['shp_file'].file\n zip_shp_file_name = form.getvalue('shp_file') \n # Latitude/Longitude data\n lat = form.getvalue('lat')\n lng = form.getvalue('lng')\n \n # Build MySQL Geometry syntax\n locations = []\n json_data = \"\"\n if zip_shp_file_name:\n # Extract all files from compressed shapefile\n zip_shp_file_contents = zip_shp_file.read()\n with ZipFile(StringIO(zip_shp_file_contents), 'r') as zip_sf:\n temp_dir = mkdtemp(dir=self.base_dir+\"/tmp/\")\n zip_sf.extractall(path=temp_dir)\n path_to_shapefile = self.find_shapefile(temp_dir)\n \n #json_data = {'message':'DEBUG::Temp Dir:'+temp_dir}\n #self.return_message = json.dumps(json_data);\n #return\n \n # Set POLYGON GEOMETRY from shp file\n polygons,errors,warnings = self.set_poly_geo(path_to_shapefile[0]) \n \n # Regardless of errors process polygons\n for polygon in polygons:\n # Re-map polygon coordinates with spaces between lat and lng\n for idx, val in enumerate(polygon):\n # Reverse values so that latitude is first, then longitude\n val.reverse()\n polygon[idx] = \" \".join( map( str, val) )\n locations.append(\"GeomFromText('POLYGON((%s))')\" % (\",\".join(polygon)))\n \n # Send errors, if any\n errors_warnings = errors + warnings\n html_errors = \"<br>\".join(errors_warnings)\n json_data = {'message':html_errors}\n self.return_message = json.dumps(json_data);\n \n # If there are errors, warnings are OK, then return without inserting\n if len(errors) > 0: \n return\n elif lat and lng:\n # Set MySQL NULL value for shp contents\n zip_shp_file_contents = \"NULL\"\n # Set POINT GEOMETRY from latitude and longitude\n locations.append(\"GeomFromText('POINT(\"+lat+\" \"+lng+\")')\") \n else:\n json_data = {'message':'ERROR:: No Shape File nor Coordinates were found.'}\n self.return_message = json.dumps(json_data);\n return\n \n # For each location insert details into DB\n count = 0\n if len(locations) < 1:\n json_data = {'message':'ERROR:: Coordinates were not found.'}\n self.return_message = json.dumps(json_data);\n return\n \n for location in locations:\n if not location:\n json_data = {'message':'ERROR:: Empty location.'} \n self.return_message = json.dumps(json_data);\n return\n \n # Init reusable list to append location and shapefile\n locs_shps = []\n count = count+1\n \n # Build MySQL insert query\n locs_shps.append(location)\n locs_shps.append( '\"%s\"' % self.db.escape_string(zip_shp_file_contents) )\n \n insert_query = \"INSERT INTO calswim.GeoData (\"+columns+\") VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);\"\n insert_values = tuple(values+locs_shps)\n insert_query_with_values = insert_query % insert_values \n self.cursor.execute(insert_query_with_values)\n if json_data == \"\":\n json_data = {'message':'Data import successful'} \n \n # Commit queries\n self.db.commit()\n \n select_query = \"SELECT LAST_INSERT_ID() as id\"\n self.cursor.execute(select_query)\n row = self.cursor.fetchone()\n \n data_file = form['data_file']\n if data_file.filename:\n data_file_name = os.path.basename(data_file.filename) \n \n download_dir = self.base_dir +\"/downloads/\"+ str(row[0]) +\"/\" \n if not os.path.exists(download_dir):\n os.makedirs(download_dir)\n \n data_save_file = open(download_dir+data_file_name, \"w\")\n data_save_file.write(data_file.file.read())\n data_save_file.close\n \n update_query = \"\"\"UPDATE calswim.GeoData SET data_url=\"%(PATH)s\" WHERE gd_id=%(ID)s\"\"\" % {'PATH':\"/downloads/\"+ str(row[0]) +\"/\"+ data_file_name, 'ID':row[0]}\n self.cursor.execute(update_query) \n \n # Return JavaScript boolean to view \n self.return_message = json.dumps(json_data)\n except:\n e = sys.exc_info()[1]\n #json_data = {'message': error_msg+\" \"+str(e)}\n json_data = {'message': \"ERROR:: Please try again.\"} \n self.return_message = json.dumps(json_data)\n print >> self.errors, \"ERROR:: \"+error_msg+\" \"+str(e)\n \n # Delete temp files\n try:\n shutil.rmtree(temp_dir) # delete directory\n except:\n e = sys.exc_info()[1]\n print >> self.errors,\"ERROR:: \"+error_msg+\" \"+str(e)\n # Close DB connections \n self.cursor.close()",
"def get_fields():\n fields = []\n with open(\"rules\", \"r\") as f:\n for line in f:\n field, ranges = line.strip().split(\": \")\n r1, r2 = ranges.split(\" or \")\n range1 = get_range(r1)\n range2 = get_range(r2)\n fields.append(Field(field, range1, range2))\n return fields"
] |
[
"0.61632854",
"0.548621",
"0.5437756",
"0.54099333",
"0.538648",
"0.51360255",
"0.5062522",
"0.50576115",
"0.5052346",
"0.5046612",
"0.50351626",
"0.5026276",
"0.50158596",
"0.5007784",
"0.49943054",
"0.4981942",
"0.4961241",
"0.49573717",
"0.4890512",
"0.48849797",
"0.48737633",
"0.48713467",
"0.4828983",
"0.48008075",
"0.4778221",
"0.4762295",
"0.47393158",
"0.47302896",
"0.4721308",
"0.47098103"
] |
0.6461564
|
0
|
Remove all files from list of files not mentioned in a changes file.
|
def remove_cruft_files(cls, files):
valid_files = []
for changes_file in files:
if cls.is_changes(changes_file):
LOG.debug("Checking: {c}".format(c=changes_file))
try:
with mini_buildd.misc.open_utf8(changes_file) as cf:
for fd in debian.deb822.Changes(cf).get("Files", []):
valid_files.append(fd["name"])
LOG.debug("Valid: {c}".format(c=fd["name"]))
valid_files.append(os.path.basename(changes_file))
except BaseException as e:
mini_buildd.config.log_exception(LOG, "Invalid changes file: {f}".format(f=changes_file), e, logging.WARNING)
for f in files:
if os.path.basename(f) not in valid_files:
# Be sure to never ever fail, just because cruft removal fails (instead log accordingly)
try:
if os.path.isdir(f):
shutil.rmtree(f)
else:
os.remove(f)
LOG.warning("Cruft file (not in any changes file) removed: {f}".format(f=f))
except BaseException as e:
mini_buildd.config.log_exception(LOG, "Can't remove cruft from incoming: {f}".format(f=f), e, logging.CRITICAL)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def remove_files(self, files: Set[str]) -> None:\n for f in files:\n src = os.path.join(self.get_directory(), f)\n os.remove(src)",
"def remove_files(files):\n for file_name in files:\n os.remove(file_name)",
"def decide_files_to_delete(files: list) -> Set:\n files_to_keep = decide_files_to_keep(files)\n file_set = set(files)\n # using set theory: files_to_delete = files - files_to_keep\n return file_set.difference(files_to_keep)",
"def clean_data():\n for clean_file in clean_files:\n file_list = [f for f in os.listdir(\".\") if f.endswith(clean_file)]\n for f in file_list:\n os.remove(f)",
"def _remove_files(self):\n if hasattr(self, 'files'):\n for file in self.files:\n if os.path.exists(file):\n os.remove(file)\n\n self._remove_changes()\n self._remove_temporary_files()",
"def remove_files(file_list):\n###############################################################################\n for fpath in file_list:\n if os.path.exists(fpath):\n os.remove(fpath)\n # End if\n # End for",
"def git_removed_files(self):\n\n etc_tracked = self.repo.tracked_files('etc-tmp')\n for rpath in etc_tracked:\n etc_file = os.path.join(self.root_dir, rpath)\n if not os.path.lexists(etc_file):\n self.etc_commits.removed.rpaths.append(rpath)\n self.etc_commits.removed.commit()\n\n master_tracked = self.repo.tracked_files('master-tmp')\n for rpath in master_tracked:\n etc_file = os.path.join(self.root_dir, rpath)\n if not os.path.lexists(etc_file):\n self.master_commits.removed.rpaths.append(rpath)\n self.master_commits.removed.commit()",
"def remove_unused_files(self):\n\n response_list = self.client.api_call(\n f'files.list?'\n f'count=1000&'\n )\n assert response_list['ok']\n\n for file in [\n f for f in response_list['files']\n if not f['channels'] and not f['groups'] and not f['ims']\n ]:\n response_delete = self.client.api_call(\n f'files.delete?'\n f'file={file[\"id\"]}'\n )\n assert response_delete['ok']",
"def remove(args):\n files = []\n for path in args.files:\n if os.path.isdir(path):\n ft = filetree(path)\n files.extend(ft.filelist())\n else:\n files.append(path)\n for path in files:\n relpath = os.path.normpath(os.path.relpath(path, args.base))\n if relpath in args.cache:\n del args.cache[args.cache.index(relpath)]\n if args.delete and os.path.exists(path):\n os.remove(path)\n args.update = True\n return",
"def clean(vendor):\n remove_all(\n path\n for path in vendor.glob('*')\n if path.basename() != 'vendored.txt'\n )",
"def clean_files_for(file):\n for f in [file, f\"{file}.json\", f\"{file}.lock\"]:\n if os.path.isfile(f):\n os.remove(f)",
"def remove_files(files):\n for file in files:\n if os.path.exists(file):\n if file.startswith(\"./\") or file.startswith(\".\\\\\"):\n file = file[2:]\n if os.path.isdir(file):\n rmtree(file)\n else:\n os.unlink(file)",
"def _remove_changes(self):\n if os.path.exists(self.changes_file):\n os.remove(self.changes_file)",
"def clean(files):\n\tfor file in files:\n\t\ttry:\n\t\t\tos.remove(file)\n\t\texcept Exception as e:\n\t\t\tprint(e)",
"def delete_b_files(intermediate_files: List[File]) -> None:\n for f in intermediate_files:\n f.remove()",
"def remove_matching_files(\n removal_ids: Set[str],\n directory: str,\n ):\n for file_name in os.listdir(directory):\n file_id, _ = os.path.splitext(file_name)\n if file_id in removal_ids:\n os.remove(os.path.join(directory, file_name))",
"def remove_extra_files(self):\n\n for f in self._extra_files:\n if os.path.isfile(f):\n os.remove(f)",
"def remover_files():\n directory = os.getcwd()\n for file_name in glob.glob((\"{}/tmp/*\").format(directory)):\n remove(file_name)",
"def _deleteFiles(self, fileList):\n import os\n import glob\n\n for ent in fileList:\n # for fil in glob.glob(os.path.join(self._outputDir_, ent)):\n for fil in glob.glob(ent):\n try:\n if os.path.exists(fil):\n os.remove(fil)\n except OSError as e:\n self._reporter.writeError(\"Failed to delete '\" + fil + \"' : \" + e.strerror)\n raise",
"def pop_files(local_list: List[str], drop_files: List[str]) -> List[str]:\n files: List[str] = [f for f in local_list if f not in drop_files]\n return files",
"def clean():\n for f in [f for f in os.listdir() if f.endswith(\".part\")]:\n os.remove(f)",
"def cleanFiles(a_file_list):\n for entry in a_file_list:\n cmd = 'sudo rm ' + entry\n os.system(cmd)",
"def remove(self,filelist):\n\n self.ws.execute('svn remove %s' % (' '.join(filelist)))",
"def _purge_except_yaml(self, list_files): \n yaml_files = []\n for filepath in list_files:\n if filepath.endswith(\".yaml\"):\n yaml_files.append(filepath)\n\n return yaml_files",
"def ExcludeFiles(filters, files):\n if not filters:\n return files\n match = set()\n for file_filter in filters:\n excludes = set(fnmatch.filter(files, file_filter))\n match |= excludes\n return [name for name in files if name not in match]",
"def clean_files(self):\n self.filenames.clear()",
"def remove_hidden_files(files):\n hidden = []\n for f in files:\n if f.startswith(\".\"):\n hidden.append(f)\n\n for h in hidden:\n files.remove(h)",
"def clean_files(ftype, remove=False):\n import os\n files = os.listdir()\n found_files = [f for f in files if ftype in f]\n if remove:\n for ff in found_files:\n os.remove(ff)\n print(\"Removed {}\".format(ff))\n else:\n return found_files",
"def _identify_files_to_remove(self, job_result_filepaths, params):\r\n return []",
"def _identify_files_to_remove(self, job_result_filepaths, params):\r\n return []"
] |
[
"0.73587847",
"0.7211431",
"0.7047009",
"0.6971956",
"0.6960391",
"0.69041646",
"0.6855135",
"0.6853807",
"0.684943",
"0.6742613",
"0.67363465",
"0.6628714",
"0.66005105",
"0.6550325",
"0.65438914",
"0.65044606",
"0.64617664",
"0.64221704",
"0.6410761",
"0.63926953",
"0.63668877",
"0.636552",
"0.63518023",
"0.63395613",
"0.6300935",
"0.6286882",
"0.62831056",
"0.62759155",
"0.62720436",
"0.62720436"
] |
0.7371116
|
0
|
Remove cruft files from incoming.
|
def remove_cruft(cls):
cls.remove_cruft_files(["{p}/{f}".format(p=mini_buildd.config.INCOMING_DIR, f=f) for f in os.listdir(mini_buildd.config.INCOMING_DIR)])
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def remove_cruft_files(cls, files):\n valid_files = []\n for changes_file in files:\n if cls.is_changes(changes_file):\n LOG.debug(\"Checking: {c}\".format(c=changes_file))\n try:\n with mini_buildd.misc.open_utf8(changes_file) as cf:\n for fd in debian.deb822.Changes(cf).get(\"Files\", []):\n valid_files.append(fd[\"name\"])\n LOG.debug(\"Valid: {c}\".format(c=fd[\"name\"]))\n\n valid_files.append(os.path.basename(changes_file))\n except BaseException as e:\n mini_buildd.config.log_exception(LOG, \"Invalid changes file: {f}\".format(f=changes_file), e, logging.WARNING)\n\n for f in files:\n if os.path.basename(f) not in valid_files:\n # Be sure to never ever fail, just because cruft removal fails (instead log accordingly)\n try:\n if os.path.isdir(f):\n shutil.rmtree(f)\n else:\n os.remove(f)\n LOG.warning(\"Cruft file (not in any changes file) removed: {f}\".format(f=f))\n except BaseException as e:\n mini_buildd.config.log_exception(LOG, \"Can't remove cruft from incoming: {f}\".format(f=f), e, logging.CRITICAL)",
"def clean(vendor):\n remove_all(\n path\n for path in vendor.glob('*')\n if path.basename() != 'vendored.txt'\n )",
"def _clean_files(self):\n if self.delfiles & 1:\n ProcUtils.remove(self.okm)\n if self.delfiles & 2:\n ProcUtils.remove(self.hkm)\n if self.delfiles & 4:\n ProcUtils.remove(self.qkm)\n if self.delfiles & 8:\n ProcUtils.remove(self.obc)\n\n if self.log is False:\n ProcUtils.remove(self.pcf_file)\n base = os.path.basename(self.okm)\n ProcUtils.remove(os.path.join(self.dirs['run'],\n '.'.join(['LogReport', base])))\n ProcUtils.remove(os.path.join(self.dirs['run'],\n '.'.join(['LogStatus', base])))\n ProcUtils.remove(os.path.join(self.dirs['run'],\n '.'.join(['LogUser', base])))",
"def clean_files(self):\n self.filenames.clear()",
"def _clean_input_dir():\n for existing_file in os.listdir(join(input_dir, 'analysis')):\n if existing_file != '.hold':\n os.remove(join(input_dir, 'analysis', existing_file))",
"def _clean_input_dir():\n for existing_file in os.listdir(join(input_dir, 'fitting')):\n if existing_file != '.hold':\n os.remove(join(input_dir, 'fitting', existing_file))",
"def clean(self):\n files = ['CHG', 'CHGCAR', 'POSCAR', 'INCAR', 'CONTCAR',\n 'DOSCAR', 'EIGENVAL', 'IBZKPT', 'KPOINTS', 'OSZICAR',\n 'OUTCAR', 'PCDAT', 'POTCAR', 'vasprun.xml',\n 'WAVECAR', 'XDATCAR', 'PROCAR', 'ase-sort.dat',\n 'LOCPOT', 'AECCAR0', 'AECCAR1', 'AECCAR2',\n 'WAVECAR.GTO', 'vasp.out', 'vasp.err']\n for f in files:\n try:\n os.remove(f)\n except OSError:\n pass",
"def clean_data():\n for clean_file in clean_files:\n file_list = [f for f in os.listdir(\".\") if f.endswith(clean_file)]\n for f in file_list:\n os.remove(f)",
"def clean_data(self, path, exclude_msgtypes=None):",
"def scrub():\n\n\tlocal(\"rm -fr dist build\")\n\tlocal(\"find . -name \\\"*.pyc\\\" -exec rm '{}' ';'\")",
"def remove_unused_files(self):\n\n response_list = self.client.api_call(\n f'files.list?'\n f'count=1000&'\n )\n assert response_list['ok']\n\n for file in [\n f for f in response_list['files']\n if not f['channels'] and not f['groups'] and not f['ims']\n ]:\n response_delete = self.client.api_call(\n f'files.delete?'\n f'file={file[\"id\"]}'\n )\n assert response_delete['ok']",
"def clean():\n for f in [f for f in os.listdir() if f.endswith(\".part\")]:\n os.remove(f)",
"def clean_filesystem(files=[]):\n remove_files(files + find_cache_files())",
"def remove_extra_files(self):\n\n for f in self._extra_files:\n if os.path.isfile(f):\n os.remove(f)",
"def dev_clean():\n clean_files(\"csv\", True)\n clean_files(\"jsontxt\", True)",
"def cleanup() -> None:\n\n for fname in glob(os.path.join(tdir, 'alexandria.*')):\n if os.path.splitext(fname)[1] not in {'.c', '.h'}:\n os.unlink(fname)",
"def clean_flatbuffer_binaries():\n for element in FLATBUFFERS_CONVERSION_DATA:\n for json in element.input_files:\n path = processed_json_path(json)\n if os.path.isfile(path):\n os.remove(path)",
"def clean_file(file_to_clean):\n\n logging.info(f'Cleaning file = {file_to_clean}')\n new_file_name = (file_to_clean.split('/')[-1]).split('.')[0]\n text = open(f\"{new_file_name}\", \"w\")\n lecture = open(f\"{file_to_clean}\", \"r\")\n for line in lecture:\n if not ('[' in line):\n line = line.replace('#', '')\n text.write(line)\n text.close()\n lecture.close()\n logging.info(f'File = {file_to_clean} Cleaned')\n synthesis_file(new_file_name)",
"def clean():\n clean_files()",
"def clean_files_for(file):\n for f in [file, f\"{file}.json\", f\"{file}.lock\"]:\n if os.path.isfile(f):\n os.remove(f)",
"def clean_files(ftype, remove=False):\n import os\n files = os.listdir()\n found_files = [f for f in files if ftype in f]\n if remove:\n for ff in found_files:\n os.remove(ff)\n print(\"Removed {}\".format(ff))\n else:\n return found_files",
"def clean_old_data():\n logger.info('Cleaning standalone files on disk...')\n for absolute_path in glob.glob(MEDIA_URL + '*'):\n file_name = os.path.basename(absolute_path)\n try:\n relative_path = os.path.join(AUDIOS_URL, file_name)\n audio = Audio.objects.get(filename=relative_path)\n if audio.get_type() == 'episode':\n try:\n # If there are inactive audios on its being\n for e in audio.podcast.episode_set.exclude(pk=audio.podcast.active_episode.pk):\n if not e.is_active():\n logger.info('Inactive audio found in podcast set. Erasing files.')\n e.delete_files()\n except Exception, e:\n logger.exception(e.message)\n except ObjectDoesNotExist, e:\n logger.info('A file with no audio registered in database')\n if os.path.isfile(relative_path):\n logger.info('Erasing: %s' % relative_path)\n os.remove(relative_path)\n logger.info('... Done.')",
"def clean(files):\n\tfor file in files:\n\t\ttry:\n\t\t\tos.remove(file)\n\t\texcept Exception as e:\n\t\t\tprint(e)",
"def test_upload_area_cleanup(self):\n vis2_uvid='urn:mrn:stm:service:instance:furuno:vis2'\n p = Path('import')\n files = list(p.glob('**/urn:mrn:s124:*'))\n for item in files:\n print(item)\n os.remove(str(item))\n pass",
"def cleanup(e):\n for f in e.files:\n try:\n if os.path.isfile(f):\n os.remove(f)\n except OSError:\n continue\n\n return",
"def clean():\n for dirpath, dirnames, filenames in os.walk('.'):\n for filename in filenames:\n if filename.endswith('.pyc') or filename.endswith('.pyo'):\n full_pathname = os.path.join(dirpath, filename)\n click.echo('Removing {}'.format(full_pathname))\n os.remove(full_pathname)",
"def _remove_files(self):\n if hasattr(self, 'files'):\n for file in self.files:\n if os.path.exists(file):\n os.remove(file)\n\n self._remove_changes()\n self._remove_temporary_files()",
"def withdraw(self):\n files = self._file_list\n for f in files:\n remove(str(f))\n self._file_list = []\n self._filename = \"\"",
"def removeFilenameFilter(call, args=(), kwargs={}, nodeClass='*'):",
"def _cleanUp(self):\r\n limit = datetime.now() - timedelta(seconds=self._timeout)\r\n\r\n toClean = [msg for msg in self._incompleteMsgs if msg.older(limit)]\r\n\r\n if toClean:\r\n for msg in toClean:\r\n self._incompleteMsgs.remove(msg)\r\n\r\n log.msg('{0} incomplete messages have been dropped '\r\n 'from assembler.'.format(len(toClean)))\r\n\r\n toClean = [uri for uri, (_, timestamp) in self._binaries.iteritems()\r\n if timestamp < limit]\r\n\r\n if toClean:\r\n for uri in toClean:\r\n del self._binaries[uri]\r\n\r\n log.msg('{0} unused binaries have been dropped '\r\n 'from assembler.'.format(len(toClean)))"
] |
[
"0.7069261",
"0.65022355",
"0.6483339",
"0.6470212",
"0.63693684",
"0.63030815",
"0.62388057",
"0.6237357",
"0.61482847",
"0.61369854",
"0.6103959",
"0.6088915",
"0.60184264",
"0.5994813",
"0.5988947",
"0.5985742",
"0.59550464",
"0.593584",
"0.5918022",
"0.5881333",
"0.5867643",
"0.5837925",
"0.5817305",
"0.5789348",
"0.5753837",
"0.5736902",
"0.5735758",
"0.57278526",
"0.56854665",
"0.5658378"
] |
0.7153117
|
0
|
Requeue all existing changes in incoming. We must feed the the user uploads first, so the daemon does not get any yetunknown build results (hence the sorting).
|
def requeue_changes(cls, queue):
for c in sorted(cls.get_changes(), key=lambda c: 1 if fnmatch.fnmatch(c, "*mini-buildd-build*") else 0):
LOG.info("Incoming: Re-queuing: {c}".format(c=c))
queue.put(c)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def queueStatusAll():",
"def _process_incoming_queue_messages(self):\n while self._queue.qsize():\n msg = self._queue.get()\n if msg == MAP_UPDATE:\n self._clear_measurement_progress_label()\n self._presenter.update_map(self.chosen_value.get())",
"def _flush_enqueued(self):\n\n msgs = self.RPC.query.all()\n for msg in msgs:\n if msg.enqueued:\n if 'plan_name' in list(msg.ctxt.keys()): # Python 3 Conversion -- dict object to list object\n LOG.info('Plan name: {}'.format(msg.ctxt['plan_name']))\n elif 'plan_name' in list(msg.args.keys()): # Python 3 Conversion -- dict object to list object\n LOG.info('Plan name: {}'.format(msg.args['plan_name']))\n msg.delete()",
"def flushMsgs(self):\n\n self.queue = self.pre_queue[:]\n self.pre_queue = []",
"def run():\n ftpd_thread = mini_buildd.misc.run_as_thread(\n mini_buildd.ftpd.run,\n name=\"ftpd\",\n bind=get().model.ftpd_bind,\n queue=get().incoming_queue)\n\n builder_thread = mini_buildd.misc.run_as_thread(\n mini_buildd.builder.run,\n name=\"builder\",\n daemon_=get())\n\n while True:\n event = get().incoming_queue.get()\n if event == \"SHUTDOWN\":\n break\n\n try:\n LOG.info(\"Status: {0} active packages, {1} changes waiting in incoming.\".\n format(len(get().packages), get().incoming_queue.qsize()))\n\n changes = None\n changes = mini_buildd.changes.Changes(event)\n\n if changes.type == changes.TYPE_BREQ:\n # Build request: builder\n\n def queue_buildrequest(event):\n \"\"\"Queue in extra thread so we don't block here in case builder is busy.\"\"\"\n get().build_queue.put(event)\n mini_buildd.misc.run_as_thread(queue_buildrequest, name=\"build queuer\", daemon=True, event=event)\n\n else:\n # User upload or build result: packager\n mini_buildd.packager.run(\n daemon=get(),\n changes=changes)\n\n except BaseException as e:\n mini_buildd.config.log_exception(LOG, \"Invalid changes file\", e)\n\n # Try to notify\n try:\n with mini_buildd.misc.open_utf8(event, \"r\") as body:\n subject = \"INVALID CHANGES: {c}: {e}\".format(c=event, e=e)\n get().model.mbd_notify(subject, body.read())\n except BaseException as e:\n mini_buildd.config.log_exception(LOG, \"Invalid changes notify failed\", e)\n\n # Try to clean up\n try:\n if changes:\n changes.remove()\n else:\n os.remove(event)\n except BaseException as e:\n mini_buildd.config.log_exception(LOG, \"Invalid changes cleanup failed\", e)\n\n finally:\n get().incoming_queue.task_done()\n\n get().build_queue.put(\"SHUTDOWN\")\n mini_buildd.ftpd.shutdown()\n builder_thread.join()\n ftpd_thread.join()\n\n # keyrings.close() is not called implicitly; this leaves tmp files around.\n # There should be a nicer way, really...\n try:\n get().keyrings.close()\n except BaseException:\n pass",
"def refresh_queue(self):\n state = self.get_state()\n return state.refresh_queue()",
"def dump_queue(self):\n self.set_polling_many(self.queue)\n self.queue = []",
"def collect(self):\n while True:\n if not self._queue.empty():\n message = self._queue.get()\n self.working_on = message['job_type']\n else:\n break\n logging.info(\"Popped off message: {}\\n\".format(str(message)))\n\n if message['job_type'] == 'STOP':\n break\n\n if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE':\n raise ValueError('{} is not a recognized task type'.format(message['job_type']))\n pass\n\n # Query all repos with repo url of given task\n repoUrlSQL = s.sql.text(\"\"\"\n SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}'\n \"\"\".format(message['given']['github_url']))\n repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id'])\n\n try:\n if message['models'][0] == 'pull_requests':\n self.pull_requests_model(message, repo_id)\n elif message['models'][0] == 'pull_request_commits':\n self.pull_request_commits_model(message, repo_id)\n elif message['models'][0] == 'pull_request_files':\n self.pull_requests_graphql(message, repo_id)\n except Exception as e:\n register_task_failure(self, message, repo_id, e)\n pass",
"def process_todo_q(self):\n self.logger.debug(\"==> %s files to process\" % len(self.todo_queue))\n\n while len(self.todo_queue) > 0:\n if len(self.active_queue) == 0:\n # add job to [active] queue...\n self.active_queue.append(self.todo_queue.pop(0))\n job_id = self.active_queue[0][\"id\"]\n # ...log his 'id'...\n self.logger.info(\"[active/%s] processing file '%s'\"\n % (job_id,\n self.active_queue[0][\"objects_filename\"]))\n # ...and process it\n has_config, cfg_file = self._check_object_config()\n if has_config:\n self.logger.debug(\"[active/%s] config file '%s' is present\"\n % (job_id,\n cfg_file))\n self._set_target_symlinks()\n self._run_operations()\n else:\n self.logger.error(\"[active/%s] config file '%s' is absent\"\n % (job_id,\n cfg_file))\n self._send_alert(\"the configuration file is absent '%s'\" %\n cfg_file)\n\n # remove the job from the [active] queue\n self.active_queue = []\n else:\n raise ProfileProcessingError(\"only one job is permitted \\\n in [active] queue\")\n\n self.logger.info(\"all files has been processed\")",
"def update_on_demand_queue(cfg):\n\n # temp storage of all sprites to update\n update_list = list()\n\n while len(update_queue.update_queue) > 0:\n next_sprite = update_queue.update_queue.pop()\n update_list.append(next_sprite)\n #print(\"[update_on_demand_queue] Found in on demand queue:\", next_sprite.name)\n\n #print(\"[update_on_demand_queue] Updating on demand queue with contents:\", update_list)\n\n for s in update_list:\n s.update()",
"def trigger_update(self):\n update_thread = Thread(target=self.process_queued_msg)\n update_thread.setDaemon(True)\n update_thread.start()",
"def get_queued_commits(self, author_filter=None):\n with ChDir(self.directory):\n logger.debug('Verify Dir Has an upstream branch: {}'.format(self.directory))\n run_command('{} rev-parse --quiet @{{u}}..'.format(self.git_cmd), subprocess.PIPE)\n cmd_args = \"{} rev-list\".format(self.git_cmd)\n if author_filter:\n for author in (get_git_config_user_email(), get_git_config_user_name()):\n cmd_args += ' --author=\"{}\" '.format(author)\n cmd_args += ' @{u}..'\n logger.debug('Running command: {}'.format(cmd_args))\n self.queued_commits = run_command(cmd_args, subprocess.PIPE).splitlines()",
"def finish_stager_tasks(self):\n\n update_files = {}\n messages = []\n while not self.finished_queue.empty():\n file = self.finished_queue.get()\n update_files[file['content_id']] = {'status': ContentStatus.AVAILABLE,\n 'pfn_size': file['pfn_size'],\n 'pfn': file['pfn']}\n msg = {'event_type': 'FILE_AVAILABLE',\n 'payload': {'scope': file['scope'],\n 'name': file['name'],\n 'startEvent': file['min_id'],\n 'lastEvent': file['max_id'],\n 'pfn': file['pfn']},\n 'created_at': date_to_str(datetime.datetime.utcnow())}\n messages.append(msg)\n\n self.logger.info('Got %s staged outputs' % len(update_files))\n update_contents_by_id(update_files)\n\n if self.send_messaging:\n for msg in messages:\n self.messaging_queue.put(msg)",
"def collect(self):\n while True:\n if not self._queue.empty():\n message = self._queue.get()\n self.working_on = message['job_type']\n else:\n break\n logging.info(\"Popped off message: {}\\n\".format(str(message)))\n\n if message['job_type'] == 'STOP':\n break\n\n if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE':\n raise ValueError('{} is not a recognized task type'.format(message['job_type']))\n pass\n\n \"\"\" Query all repos with repo url of given task \"\"\"\n repoUrlSQL = s.sql.text(\"\"\"\n SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}'\n \"\"\".format(message['given']['git_url']))\n repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id'])\n\n try:\n if message['models'][0] == 'badges':\n self.badges_model(message, repo_id)\n except Exception as e:\n register_task_failure(self, logging, message, repo_id, e)\n pass",
"def queue_buildrequest(event):\n get().build_queue.put(event)",
"def updateCache(self):\n for root, dirs, files in os.walk(cachedFilesPath):\n for file in files:\n if file.endswith(cachedFileExtensionSuffix):\n path = os.getcwd()+'/'+cachedFilesPath+file\n with open(path, mode='r') as f:\n payload_json = f.read()\n payload_obj=jsonpickle.decode(payload_json)\n r= self.upload(payload_obj)\n if isinstance(r, types.NoneType):\n #do nothing\n print(\"\")\n else:\n if r.status_code == 200 :\n #uploaded!\n if cacheArhive:\n #move it to archive\n dst=os.getcwd()+'/'+cachedArchivePath+file\n shutil.move(path, dst)\n print(\"archived log: \", file)\n else:\n #delete it\n os.remove(path)",
"def queue (self):\n\n with self.__t.steps():\n import exception\n from lib import schema\n from lib import common\n from lib import process\n from lib import data\n from sqlalchemy import and_\n import json\n from collections import OrderedDict\n\n with schema.select(\"process_queue\", schema.table.process_queue.status==None) as select:\n for queued in select.limit(1000).all():\n blocked = False\n if len(queued.depend) > 0:\n for depend_id in queued.depend:\n depend = schema.select_one(\"series\", schema.table.series.id==depend_id)\n match_tags = json.dumps(OrderedDict(sorted(data.decode_tags(depend.tags).items())))\n if depend and schema.select_one(\"process_queue\", schema.table.process_queue.tags==match_tags):\n blocked = True\n break # queued dependencies\n if not blocked:\n queued.status = \"queued\"\n schema.save(queued)\n run.apply_async([queued.tags]) #queue process\n self.__t.ok()\n self.apply_async(queue=\"control\", countdown=30) #queue next",
"def drain_call_queue(self):\n pass",
"def queuing(self):\n try:\n src_dir = self.config[\"source\"][\"directory\"]\n # get the filter pattern\n src_obj_filter = self._source_filter()\n self.logger.debug(\"==> filter is '%s'\" % src_obj_filter)\n\n # put valid files into [todo] queue\n # items into queue are limited by the 'todo_queue_limit' parameter\n self.logger.debug(\"==> todo queue limit is set to '%s'\" %\n self.todo_queue_limit)\n for f in sorted_ls(src_dir):\n if len(self.todo_queue) < self.todo_queue_limit:\n is_object_file = re.match(r'%s' % src_obj_filter, f)\n if is_object_file:\n self.source_object_files += 1\n if is_object_file.group(1) > self.state_timestamp:\n item = {\"id\": is_object_file.group(1),\n \"objects_filename\": os.path.join(src_dir, f)}\n self.todo_queue.append(item)\n\n return len(self.todo_queue)\n\n except KeyError:\n raise ProfileError(\"no value found for source.directory\")",
"def sync(self):\n for subscription in self.getSubscriptionList():\n #user_id = subscription.getZopeUser()\n #uf = self.getPortalObject().acl_users\n #user = uf.getUserById(user_id).__of__(uf)\n #newSecurityManager(None, user)\n subscription.activate(activity='SQLQueue',\n tag=subscription.getId(),\n priority=ACTIVITY_PRIORITY\n ).SubSync(subscription.getPath())",
"def tidyUp():\n\n global queue\n queue.put(0)\n\n pass",
"def tidyUp():\n\n global queue\n queue.put(0)\n\n pass",
"def reset_queueing(self):\n self._num_queued = 0",
"def resubmit(self):\n self.keep_data = True\n ManagedJob.submit(self)",
"def process_queued_msg(self):\n try:\n while not self.queue.empty():\n port, tbl = self.queue.get()\n reveived_port = self.switches[port.neighbor_switch_dpid].ports[port.neighbor_port_no]\n self.tbl.update_by_neighbor(reveived_port, port, tbl)\n self.deploy_routing_table()\n except:\n pass",
"def RequeueWorkItems(instance_id):\n query = db.Query(run_log.RunLog)\n query.filter('status =', enum.CASE_STATUS.IN_PROGRESS)\n query.filter('client_id =', instance_id)\n\n logs = []\n for log in query:\n logs.append(log)\n\n # Ensure that the work item can be retried.\n if log.retry_count > 0:\n log.retry_count -= 1\n log.status = enum.CASE_STATUS.QUEUED\n log.client_id = ''\n log.priority -= 1\n else:\n log.status = enum.CASE_STATUS.UNKNOWN_ERROR\n\n db.put(logs)",
"def process_queue_fast(self):\n while self.queue:\n self.queue.popleft()()",
"def clearQueueAll():",
"def push(self):\n\n self.start = time.time()\n self.log.info('Uploading {} files to database...'\n ''.format(len(self.filenames)))\n i = 0\n\n # Loop over a portion of files and upload them\n if self.n_files != -1:\n files = self.filenames[0:self.n_files]\n else:\n files = self.filenames\n\n for i, f in enumerate(files):\n\n # If were not debugging script allow exceptions and report them\n # later\n if not self.debug:\n try:\n self._push_one(f, **self.meta)\n\n except Exception as e:\n self.log.error('Error with {}'.format(f))\n self.log.error(e)\n self.errors.append((f, e))\n\n else:\n self._push_one(f, **self.meta)\n\n self.session.close()\n\n # Log the ending errors\n self.report(i + 1)",
"def drain_call_queue(self):\n if len(self.call_queue) == 0:\n return\n self.apply(lambda x: x)"
] |
[
"0.63072103",
"0.59501857",
"0.5914015",
"0.58714503",
"0.58414",
"0.582209",
"0.5728848",
"0.57213223",
"0.56732166",
"0.5665568",
"0.56222856",
"0.56147325",
"0.5610322",
"0.5587205",
"0.558216",
"0.55685467",
"0.55618864",
"0.5524419",
"0.5512161",
"0.5472448",
"0.54688776",
"0.54688776",
"0.5457412",
"0.54485077",
"0.54248106",
"0.54171336",
"0.5415021",
"0.54043204",
"0.53819305",
"0.5369082"
] |
0.7815794
|
0
|
Check if the testbed is dualtor.
|
def is_dualtor(tbinfo):
return "dualtor" in tbinfo["topo"]["name"]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def skip_dualtor(tbinfo):\n pytest_require(\"dualtor\" not in tbinfo[\"topo\"][\"name\"], \"Skip 'test_tagged_arp' over dualtor.\")",
"def test_case(self) -> bool:\n return pulumi.get(self, \"test_case\")",
"def spec(self) -> bool:\n\t\treturn True",
"def in_test_mode(mode: str) -> bool:\n return mode == TEST",
"def test_turtle(self):\n assert not inspection.is_fixture_method(DummyTestCase.turtle_method)",
"def DualMode(self) -> bool:",
"def is_test(self):\r\n return self.has_label('tests')",
"def test_secondary(self):\n st = ServiceTicketFactory()\n self.assertFalse(st.is_primary())",
"def testable(self):\n return False",
"def is_strobe(self):\n if self._driver is None and not self._strobers:\n raise ValueError(\n 'internal %s is not driven by anything' % self._name)\n return bool(self._strobers)",
"def check_stability(self):",
"def testControlEnvironment(video1, video2):\n try:\n control.main(video1, video2, Verbose=True, Testing=True)\n return True\n except ValueError:\n return False",
"def test_if(self):",
"def match(self):\n return 'test' in self.name",
"def _isnotsuite(test):\n try:\n iter(test)\n except TypeError:\n return True\n return False",
"def testable(self):\n\t\treturn True",
"def is_inequality(self): \n return False",
"def test_tas_fix():\n assert Tas is BaseTas",
"def test_equal(scraper):\n\n assert scraper.is_compatible_with(punters_client.__version__) is True",
"def get_do_comparison(self):\n\n return self._doComparison and not Test.skip_comparison",
"def test_setup_not_is_ledger_tx(self):\n # setup\n self.strategy._is_ledger_tx = False\n\n # before\n assert not self.strategy.is_searching\n\n # operation\n self.search_behaviour.setup()\n\n # after\n assert self.strategy.is_searching",
"def test_server_administrator():\n if is_server_administrator():\n return True\n raise False",
"def test_verify_state_of_a_device():",
"def test_subsystems(self):\n pass",
"def has_test(args):\n return (args.test_set or args.test_source or args.test_dataset or\n args.test_stdin or args.test_datasets)",
"def test_is_scripting_mode():\n\n assert application_services.is_scripting_mode() is False",
"def is_test_only_dependency(self):\n return self._is_test_only_dependency",
"def is_expected_for_this_test(obj):\n if obj['test-name'] != test_name:\n return False\n if not fnmatch.fnmatch(config_filename, obj['configuration-filename']):\n return False\n expected_variant = obj.get('variant', None)\n if expected_variant == \"*\":\n return True\n for k in expected_variant:\n if not k in variant:\n return False\n if expected_variant[k] != variant[k]:\n return False\n return True",
"def test_proper(self):\n\n self.assertTrue(self.cs.isProper)\n self.assertFalse(self.cs.isDegenerate)",
"def test_theft_and_stealing(self):"
] |
[
"0.6514536",
"0.60643315",
"0.60052663",
"0.592934",
"0.5915355",
"0.5871197",
"0.5859092",
"0.5826975",
"0.5814629",
"0.5802817",
"0.5687041",
"0.5671905",
"0.5647522",
"0.56311524",
"0.563024",
"0.562746",
"0.5623227",
"0.56203735",
"0.55674803",
"0.55630165",
"0.5551409",
"0.55225533",
"0.55144185",
"0.5505287",
"0.55003464",
"0.5494481",
"0.5494124",
"0.548721",
"0.5486257",
"0.5462532"
] |
0.6489944
|
1
|
Given a timedelta object, returns a float representing milliseconds
|
def ms_from_timedelta(td):
return (td.seconds * 1000) + (td.microseconds / 1000.0)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __timedelta_millis(td):\n return int(round(td.total_seconds(), 3) * 1000)",
"def timedeltaToFloat(self,time_d):\n time_d_min = time_d / timedelta(minutes=1)\n time_d_s = time_d / timedelta(seconds=1)\n time_d_ms = time_d / timedelta(milliseconds=1)\n\n return (time_d_min * 60 + time_d_s + time_d_ms * 0.001)",
"def millis(start_time):\n dt = datetime.now() - start_time\n ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0\n return ms",
"def _time_ms(dt):\n epoch = datetime.datetime.utcfromtimestamp(0)\n diff = dt - epoch\n return diff.total_seconds() * 1000",
"def _convert_to_timedelta(time_diff):\n return timedelta(microseconds=time_diff / _NANO_TO_MICRO)",
"def millis(): \n return int(round(monotonic.monotonic() * C.MILLISECONDS))",
"def _time_ms(self, dt):\n if dt.tzinfo is None:\n dt = dt.replace(tzinfo=pytz.utc)\n return int((dt - self._EPOCH).total_seconds() * 1000)",
"def get_millis(seconds):\n return seconds * 10 ** 3",
"def millis(): \r\n return int(round(monotonic.monotonic() * C.MILLISECONDS))",
"def convert_time_diff_to_total_milliseconds(sample):\n return (sample.total_seconds() * 1000.0) + (sample.microseconds / 1000.0)",
"def get_time_ms():\n return int(round(time.time() * 1000))",
"def get_duration(days=0, hours=0, minutes=0, seconds=0, millis=0):\n\n duration = 0.0\n\n duration += float(days) * 24 * 60 * 60\n duration += float(hours) * 60 * 60\n duration += float(minutes) * 60\n duration += float(seconds)\n duration += millis / 1000.0\n\n return duration",
"def millis() -> int:",
"def millis(self):\n return self._micros // 1000",
"def millis():\n return int(round(time() * 1000))",
"def calculate_time_ms(self, jiffies):\n\n return int((jiffies * 1000.0) / self._jiffies_per_sec)",
"def test_milliseconds():\n assert_equal(datetime.timedelta(seconds=0.01), convert_delta(\"10ms\"))",
"def get_time_delta(start):\n\tend = datetime.now()\n\tduration = ((end-start).microseconds)/1000\n\treturn duration",
"def millisecond():\n return int(round(time.time() * 1000))",
"def _time_delta_from_info(info):\n now = datetime.datetime.now()\n then = info.start_time\n return str(now.replace(microsecond=0) - then.replace(microsecond=0))",
"def _STEPS2TIME(step):\n return step/1000.",
"def _unit_ms(self):\n return (self.time_base / 1000.0) / 60.0",
"def __float_to_time(float_value):\n time_ms = int(float_value*24*60*60*1e3)\n return (datetime.datetime.min + datetime.timedelta(milliseconds=time_ms)).time()",
"def units_to_msec(units, resolution):\n time_ms = units * float(resolution) / 1000\n return time_ms",
"def timedelta_to_duration(obj: \"timedelta\") -> \"Duration\":\n d = Duration()\n d.seconds = int(obj.total_seconds())\n d.nanos = obj.microseconds * 1000\n return d",
"def duration_in_millis(self):\n return self._duration_in_millis",
"def time_millis():\n\n return int(time.time() * 1000)",
"def time_difference(time1: Time, time2: Time) -> float:\n dsec = time1.sec - time2.sec\n dnanosec = time1.nanosec - time2.nanosec\n dt = dsec + dnanosec/(10**9)\n return dt",
"def as_millis(self):\n return int(ntplib.ntp_to_system_time(self.start) * 1000), int(ntplib.ntp_to_system_time(self.stop) * 1000)",
"def datetime_to_epoch_microseconds(obj: \"datetime\") -> float:\n td = datetime_to_epoch_timedelta(obj)\n return (td.days * 86400 + td.seconds) * 10**6 + td.microseconds"
] |
[
"0.79984653",
"0.7154051",
"0.69038457",
"0.68848103",
"0.67910194",
"0.6784229",
"0.6774616",
"0.67619616",
"0.6695946",
"0.6629579",
"0.6577556",
"0.65353703",
"0.6470385",
"0.6467742",
"0.6434467",
"0.6433005",
"0.6432159",
"0.6419568",
"0.63950807",
"0.638573",
"0.6364025",
"0.63196814",
"0.6283646",
"0.62590563",
"0.62206036",
"0.6203286",
"0.61937624",
"0.6190622",
"0.61889976",
"0.6153603"
] |
0.8328362
|
0
|
Couchdbkit < 0.6.0 changes feed listener
|
def old_changes(self):
from couchdbkit import Consumer
c = Consumer(self.couch_db, backend='gevent')
while True:
try:
c.wait(self.parsing_processor, since=self.since, filter=self.couch_filter,
heartbeat=WAIT_HEARTBEAT, feed='continuous', timeout=30000, **self.extra_args)
except Exception, ex:
pillow_logging.exception("Exception in form listener: %s, sleeping and restarting" % ex)
gevent.sleep(RETRY_INTERVAL)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def MyDataChangedCallback(self, inRefcon):\r\n pass",
"def new_changes(self):\n with ChangesStream(self.couch_db, feed='continuous', heartbeat=True, since=self.since,\n filter=self.couch_filter, **self.extra_args) as st:\n for c in st:\n self.processor(c)",
"def onUpdated(self):",
"def getChanges():",
"def watch(self):",
"def GetChangesSample():\n client = CreateClient()\n changes = client.GetChanges()\n for change in changes.entry:\n print change.title.text, change.changestamp.value",
"def change_list(ctx, start, count):\n data = ctx.obj.get_all_changes(start=start, count=count)\n output_json_data(data)",
"def db_change_callback(self, table, key, action, value, topic=None):\n if self.USE_CACHE:\n # Update cache\n if action == 'create' or action == 'set':\n if table == 'lport':\n self.cache_logical_port_by_port_id[key] = self.nb_api.get(l2.LogicalPort(id=key))\n if table == 'lrouter':\n self.cache_logical_router_by_dpid[key] = self.nb_api.get(l3.LogicalRouter(id=key))\n if action == 'del':\n if table == 'lport':\n # default if key does not exists is None\n self.cache_logical_port_by_port_id.pop(key, None)\n if table == 'lrouter':\n self.cache_logical_router_by_dpid.pop(key, None)\n\n print(\"L3 App: Received Update for table {} and key {} action {}\".format(table, key, action))\n if action == 'set':\n if table == 'lport':\n if self.USE_CACHE:\n updated_port = self.cache_logical_port_by_port_id[key]\n else:\n updated_port = self.nb_api.get(l2.LogicalPort(id=key))\n\n if len(updated_port.ips) is not 0:\n for ip in updated_port.ips:\n # new ip discovered\n # install route on every datapath\n # only update the other datapaths\n for dpid, datapath in self.cache_datapath_by_dpid.iteritems():\n out_port, new_src_mac, new_dst_mac = self.get_next_hop(dpid, ip)\n if out_port is None:\n continue\n out_port_id = \"{}:{}\".format(dpid, out_port)\n lout_port = self.nb_api.get(l2.LogicalPort(id=out_port_id))\n if ip in lout_port.ips:\n continue\n # else add new ip and install flow\n lout_port.ips.append(ip)\n self.nb_api.update(lout_port)\n # install flow\n print \"L3 IP via pubsub: installing flow on {}: out_port: {} src_mac:\" \\\n \" {} dst_mac: {}, ip: {}\".format(datapath.id, out_port, new_src_mac, new_dst_mac, ip)\n self.add_flow_gateway_for_ip(datapath, int(out_port), ip, new_src_mac, new_dst_mac)",
"def on_change(key):\n pass",
"def check_updates(self):\n self.db.__connect__()\n self.ZULIP_SERVICE_TOPIC_MAP = self.db.get_topics()\n self.db.__disconnect__()",
"def on_refresh(self):\n pass",
"def _notify_update(self, cuds_object):",
"def changeAdded(change):",
"def listen_for_new_updates(event):\n\n if event.retval:\n news_indicator.create_and_update_menu(event.retval)\n if NewsIndicator.notifications:\n show_notifications(event.scheduled_run_time)\n Gtk.main()",
"def db_changed(self):\n self.dbstate.db.connect('person-add', self.update)\n self.dbstate.db.connect('person-delete', self.update)\n self.dbstate.db.connect('person-update', self.update)\n self.dbstate.db.connect('family-add', self.update)\n self.dbstate.db.connect('family-delete', self.update)\n self.dbstate.db.connect('family-update', self.update)",
"def watch(self, callback):\n raise NotImplementedError",
"def on_watch(self, payload):\n pass",
"def get_changefeed(connection, table, operation, *, prefeed=None):\n\n return MongoDBChangeFeed(table, operation, prefeed=prefeed,\n connection=connection)",
"def request_changes(self):\n self._check_if_open()\n data = {\"request-changes\": True}\n return self.post(\"request-changes\", data)",
"def notifyObservers(self):",
"def subscribeToChanges(self, callback):\n return self._change_subscriptions.subscribe(callback)",
"def events(self):",
"def run(self, event, db):\n pass",
"def watch(self):\n all_rss_feeds = [feed for feed in models.RSSFeed.query.all()]\n\n for rss_feed in all_rss_feeds:\n rss_feed.aggregate()",
"def changed(self, *args, **kwargs): # real signature unknown\n pass",
"def view_update(self, context):\n\n for collection in self._watch_list:\n collection_name = get_collection_name(collection)\n collection_set = set(collection)\n tracking_set = self._tracking_sets[collection_name]\n\n # Check for new items\n add_set = collection_set - tracking_set\n self.add_delta[collection_name] = add_set\n tracking_set |= add_set\n\n # Check for removed items\n remove_set = tracking_set - collection_set\n self.remove_delta[collection_name] = remove_set\n tracking_set -= remove_set\n\n # Check for updates\n update_set = {item for item in collection if item.is_updated}\n self.update_delta[collection_name] = update_set",
"def run_changefeed(conn, table, last_ts):\n while True:\n try:\n # XXX: hack to force reconnection, in case the connection\n # is lost while waiting on the cursor. See #1154.\n conn._conn = None\n namespace = conn.dbname + '.' + table\n query = conn.query().local.oplog.rs.find(\n {'ns': namespace, 'ts': {'$gt': last_ts}},\n {'o._id': False},\n cursor_type=pymongo.CursorType.TAILABLE_AWAIT\n )\n cursor = conn.run(query)\n logging.debug('Tailing oplog at %s/%s', namespace, last_ts)\n while cursor.alive:\n try:\n record = cursor.next()\n yield record\n last_ts = record['ts']\n except StopIteration:\n if _FEED_STOP:\n return\n except (BackendError, pymongo.errors.ConnectionFailure):\n logger.exception('Lost connection while tailing oplog, retrying')\n time.sleep(1)",
"def dbtrace_filter_on_change(new_state):\n\n pass",
"def _notify_read(self, cuds_object):",
"def getChanges(self, query = {}, docIds = []):\n if not docIds:\n return self.client.get(self.name +\"/_changes\", query).getBodyData()\n\n query = query or {}\n # ensure query filter\n if \"filter\" not in query:\n query[\"filter\"] = \"_doc_ids\"\n\n return self.client.post(self.name +\"/_changes\", query,\n {\"doc_ids\": docIds}).getBodyData()"
] |
[
"0.62532365",
"0.6226378",
"0.61087376",
"0.59061956",
"0.580392",
"0.5717122",
"0.5684762",
"0.5652663",
"0.5625966",
"0.55350727",
"0.5533221",
"0.54960304",
"0.5480702",
"0.5476974",
"0.546286",
"0.5462046",
"0.5432287",
"0.5430175",
"0.5393917",
"0.5377997",
"0.5354258",
"0.53528875",
"0.5348897",
"0.531724",
"0.5305978",
"0.5285313",
"0.52447176",
"0.52175564",
"0.5215927",
"0.5207325"
] |
0.6658043
|
0
|
Processor that also parses the change to json only for pre 0.6.0 couchdbkit, as the change is passed as a string
|
def parsing_processor(self, change):
self.processor(simplejson.loads(change))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def changeFromMessage(message):\n retval = message['payload']['change']\n retval['revision'] = retval['rev']\n retval['properties'] = dict((k,v) for (k,v,s) in retval['properties'])\n retval['files'] = [f['name'] for f in retval['files']]\n if not retval['files']:\n retval['files'] = ['dummy']\n retval['user'] = retval['who']\n del retval['rev']\n del retval['who']\n del retval['number']\n del retval['at']\n return retval",
"def test_logentry_change_message_not_json(self):\n logentry = LogEntry(change_message=\"non-JSON string\")\n self.assertEqual(logentry.get_change_message(), logentry.change_message)",
"def patch_data():\n return json.loads('{\"success\":true, \"message\":\"Field of data updated (but not really)\" }')",
"def changeset(self, revision):\r\n url = self.base_url + 'changesets/%s/' % (revision)\r\n return json.loads(self.bb.load_url(url))",
"def json_diff_str(diff):\r\n\r\n res = []\r\n flat_diff_from, flat_diff_to = diff\r\n flat_diff_from = remove_none_values(flat_diff_from)\r\n flat_diff_to = remove_none_values(flat_diff_to)\r\n common_keys, _, _, from_keys, to_keys = c_keys(flat_diff_from, flat_diff_to)\r\n for key in from_keys:\r\n res.append(\"- {0}: {1}\".format(key, flat_diff_from[key]))\r\n for key in common_keys:\r\n res.append(\"{0}: {1} -> {2}\".format(key, flat_diff_from[key], \\\r\n flat_diff_to[key]))\r\n for key in to_keys:\r\n res.append(\"+ {0}: {1}\".format(key, flat_diff_to[key]))\r\n return res",
"def _get_delta_json_string(self):\n return json.dumps(self.delta_fp_string_dict.delta_dict)",
"def call_change_log(input_filter):\n try:\n if input_filter is None:\n latest = _find_latest()\n service_endpoint = _find_filter(\"change_log\")\n else:\n keyword = input_filter.split(\" \")[0]\n if \"release\" == keyword or \"build\" == keyword:\n service_endpoint = _find_filter(input_filter.split(\";\")[2])\n else:\n service_endpoint = _find_filter(keyword)\n\n rel_build = input_filter.replace(\"_\", \".\").split(\" \")[1].split(\";\")\n\n if \"build\" == keyword:\n latest_rel = rel_build[1]\n latest_bui = rel_build[0]\n else:\n latest_rel = rel_build[0]\n latest_bui = rel_build[1]\n\n latest = {\"latest_val\": latest_rel + \"_\" + latest_bui,\n \"second_latest_val\": latest_rel + \"_\" + str(int(latest_bui)-1)}\n\n latest_query = latest[\"second_latest_val\"] + \"..\" + latest[\"latest_val\"]\n data = _call_rest_api(service_endpoint + \"/\" + latest_query, None)\n except Exception as e:\n logger.error(str(e))\n data = {\"success\": \"\", \"data\": {}, \"error\": {\"Message\": str(e)}}\n data = jsonify(data)\n return data",
"def generic_single_field_json_change_handler(old_value, new_value, changed_field, base_fqn=None):\n\n if base_fqn is None:\n return \"%s changed from \\\"%s\\\" to \\\"%s\\\"\\n\" % (changed_field, old_value.to_json(), new_value.to_json())\n else:\n return \"%s.%s changed from \\\"%s\\\" to \\\"%s\\\"\\n\" % (base_fqn, changed_field, old_value.to_json(), new_value.to_json())",
"def _fix_old_syntax(tree):\n for key in list(tree.keys()):\n if 'object' in list(tree[key].keys()):\n # if no name is present and the object name is the old syntax we\n # need to be creative and pull the object name and use it\n if 'name' not in list(tree[key].keys()) and \\\n tree[key]['object'].find(':') >= 0:\n tree[key]['name'] = tree[key]['object'].replace(':', '_')\n\n # strip the old syntax from the object name\n tree[key]['object'] = tree[key]['object'].split(':')[0]\n\n # for the remaining syntax we will replace ':' with '_'\n for line in tree[key]:\n try:\n tree[key][line] = tree[key][line].replace(':', '_')\n except AttributeError:\n # If we've hit a dict, recurse.\n if isinstance(tree[key][line], dict):\n # Since dicts are mutable, and tree[key][line]\n # is a dict, this should work just fine for\n # updating in place.\n _fix_old_syntax(tree={line: tree[key][line]})\n else:\n raise TypeError(\"Something weird is going on.\")\n\n # if we are working with fuses let's set the mean replace time to 1\n # hour if not specified. Then we aviod a warning!\n if tree[key]['object'] == 'fuse' \\\n and 'mean_replacement_time' not in list(tree[key].keys()):\n tree[key]['mean_replacement_time'] = 3600.0\n\n # # FNCS is not able to handle names that include \"-\" so we will\n # # replace that with \"_\".\n # for prop in RENAME:\n # try:\n # # Attempt to fix the property.\n # tree[key][prop] = tree[key][prop].replace('-', '_')\n # except KeyError:\n # # Property isn't present - move along.\n # pass\n\n # No return, as we're modifying in place.\n return None",
"def GetChange(host, change):\n return FetchUrlJson(host, _GetChangePath(change))",
"def change_representation_compressed(\n change: Dict[str, Any], link: str, filepath: str, current_depth: int, max_depth: int\n) -> Optional[Any]:\n change_elements: List[Any] = [\n E.B(change[\"type\"]),\n E.SPAN(\" \"),\n E.A(change[\"name\"], href=link),\n E.B(\" changed lines: \"),\n E.SPAN(str(change[\"changed_lines\"])),\n ]\n\n if change[\"total_lines\"]:\n change_elements.extend([E.SPAN(\"/\"), E.SPAN(str(change[\"total_lines\"]))])\n\n if change[\"children\"]:\n change_elements.extend([E.BR()])\n child_elements = []\n for child in change[\"children\"]:\n child_element = render_change_as_html(\n child, filepath, current_depth + 1, max_depth, True\n )\n if child_element is not None:\n child_elements.append(child_element)\n change_elements.append(E.UL(*child_elements))\n\n return change_elements",
"def test_to_json(self):\n\n self.parser.parse()\n json_string = self.parser.to_json()\n \n self.assertTrue(isinstance(json_string, str))",
"def _process_json(self, json_content):\n if self._ns_sqlcon.connection is None:\n LOG.error(f'failed to open connection to DB')\n return\n entries = [entry for entry in json_content]\n LOG.info('started updating DB')\n num_of_entries = len(entries)\n for x in range(num_of_entries):\n entry = entries[x]\n try:\n self._ns_sqlcon.update_plugins_table(entry['_source'])\n except AttributeError:\n LOG.exception(f'malformed entry: {entry}')\n if x % 2000 != 0:\n continue\n LOG.info(f'Updated {x} records')\n\n LOG.info(f'Updated {num_of_entries} records')\n try:\n LOG.info('Commit started')\n self._ns_sqlcon.session.commit()\n LOG.info('Commit finished')\n except sqlalchemy.exc.IntegrityError:\n LOG.exception('failed committing updates to DB')\n self._ns_sqlcon.session.rollback()\n\n LOG.info('Finished updating DB')",
"def json_friendly(self):",
"def _getAlterToFormat(cls, alter):\n if alter == '':\n alter = ['', '']\n if isinstance(alter, str): # nothing to do if it is dict\n alter = ['', alter]\n return alter",
"def postprocess(self, json_string):\n is_compressing, is_hash, compressed, spaces = False, False, [], 0\n for row in json_string.split(\"\\n\"):\n if is_compressing:\n if (row[:spaces + 5] == \" \" * (spaces + 4) +\n (\"\\\"\" if is_hash else \"{\")):\n compressed.append(row.rstrip())\n elif (len(row) > spaces and row[:spaces] == \" \" * spaces and\n re.match(\"[\\]\\}],?\", row[spaces:].rstrip())):\n compressed.append(row.rstrip())\n is_compressing = False\n else:\n compressed[-1] += \" \" + row.strip()\n else:\n compressed.append(row.rstrip())\n if any(a in row for a in [\"edges\", \"nodes\"]):\n # Fix to handle issues that arise with empty lists\n if \"[]\" in row:\n continue\n spaces = sum(1 for _ in takewhile(str.isspace, row))\n is_compressing, is_hash = True, \"{\" in row\n return \"\\n\".join(compressed)",
"def json_dates_hook(dict):\n try:\n dict['data'] = dateparser.parse(dict['data'])\n return dict\n except KeyError:\n return dict",
"def addChange(change):",
"def addChange(change):",
"def test_update_to_non_json():\n starting_db = create_db(STARTING_DB_INPUT)\n with pytest.raises(ValueError):\n o_obj.update_object_in_db(\n starting_db,\n \"some_uid\",\n \"this isn't json :(\"\n )",
"def _json(self, data):\n if len(data) == 0:\n return \"\"\n if self.meta:\n data['meta_history'] = [{'prog': __prog__,\n 'release': __release__,\n 'author': __author__,\n 'date': __now__},]\n return json.dumps(data) + \"\\n\"",
"def data_dict_update(self, change):\n self.data_dict = change['value']",
"def process_json(json_str):\n\tjson_str = json_str.replace(\"\\n\", \" \")\n\tjson_str = json_str.replace(\"\\t\", \" \")\n\t\n\twhile json_str.find(\" \") > -1:\n\t\tjson_str = json_str.replace(\" \", \" \")\n\t\n\treturn json_str",
"def find_json(data):\n if data.startswith(\"------------------------------ \\n\"\n \"QUERY PLAN DESCRIPTION: \\n\"\n \"------------------------------\"):\n # Vertica-like\n data = data.split(\"JSON format:\\n\")[1].split(\"End JSON format\")[0]\n return data",
"def do(self, input: Union[str, bytes], options: Dict[str, Union[str, bool]] = {}): # type: ignore\n result = super().do(input, \"-\", dict(**options, to=\"json\"))\n return json.loads(result)",
"def stringify_change(change):\n key = change.key\n a = change.a or '<null>'\n b = change.b or '<null>'\n return '{}: {} => {}'.format(key, a, b)",
"def update_json(self):\n self.set_version_to_default()\n self.remove_null_fields()\n self.remove_unnecessary_keys()\n self.set_fromVersion(from_version=self.from_version)",
"def generic_list_json_change_handler(old_value, new_value, changed_field):\n\n removed_names = [x.to_json() for x in old_value if x not in new_value and x != '']\n added_names = [x.to_json() for x in new_value if x not in old_value and x != '']\n\n message = \"\"\n if len(added_names) > 0:\n message += \"Added to %s: %s. \" % (changed_field, unicode(', '.join(added_names)))\n if len(removed_names) > 0:\n message += \"Removed from %s: %s. \" % (changed_field, unicode(', '.join(removed_names)))\n\n return message",
"def _convert_states_v28_dict_to_v29_dict(cls, draft_change_list):\n return draft_change_list",
"def _prepare_body_update_trunk(prop_diff):\n return {'trunk': prop_diff}"
] |
[
"0.5822516",
"0.5435952",
"0.540422",
"0.52699476",
"0.52576315",
"0.5242274",
"0.52018917",
"0.5112058",
"0.5104661",
"0.5075216",
"0.506897",
"0.50613344",
"0.5037064",
"0.50290513",
"0.5022374",
"0.5008275",
"0.5005664",
"0.49938402",
"0.49938402",
"0.49796298",
"0.4958498",
"0.49499446",
"0.49281514",
"0.4880081",
"0.48704395",
"0.48686597",
"0.48651275",
"0.4848078",
"0.4836435",
"0.4800229"
] |
0.70396775
|
0
|
Set a more optimized setting setup for fast reindexing
|
def set_index_reindex_settings(self):
return self.update_settings(INDEX_REINDEX_SETTINGS)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def setSpellerCacheSize(self, value):\n self.setIntegerOption(17, value)",
"def cacheOptionsForBuild(self):",
"def test_change_default_throttling_settings_http_with_overwrite_throttled():",
"def test_change_default_throttling_settings_http_with_overwrite_not_throttled():",
"def _optimise(self):\n pass",
"def initialize(self, *args, **kwargs): \n super().initialize(*args, **kwargs)\n self.updates_per_optimize = 1",
"def test_change_default_throttling_settings_http_with_overwrite_throttled_burst_above_50():",
"def enable_index_update_feature(settings):\n settings.FEATURES[INDEX_UPDATES] = True",
"def set_settings(self, settings):\n return AlgoliaUtils_request(self.client.headers, self.write_hosts, \"PUT\", \"/1/indexes/%s/settings\" % self.url_index_name, self.client.timeout, settings)",
"def test_change_default_throttling_settings_http_with_overwrite_throttled_burst_above_account_quota():",
"def reindex(self):",
"def reindex(self):",
"def test_change_default_throttling_settings_http_with_overwrite_throttled_rate_above_50():",
"def settings():\n raise NotImplementedError # pragma: nocoverage",
"def test_change_default_throttling_settings_http_with_overwrite_throttled_rate_above_account_quota():",
"def edit_settings(self):\n return 1 << 3",
"def _SetupIndexes(self, _open=open):\n pass",
"def propose_optimize():\n pass",
"def update_settings( what_to_do, settings_inst ):\n from settings import smart_update\n from _settings import settings\n\n smart_update(settings_inst, settings)\n # ok, we want to have parallel\n if what_to_do == \"wikis_to_huge_math\":\n settings_inst[\"input\"] = settings_inst[\"wiki\"][\"xml\"]\n # there are too few so each process should take only 1\n settings_inst[\"parallel\"][\"chunksize\"] = 1",
"def options_set(self):\n\n global OPTIONS\n OPTIONS.append(config.ENABLE(self.threaded))\n OPTIONS.append(config.ENABLE(self.datasaver))\n OPTIONS.append(self.language)",
"def update_index_settings(client):\n index = get_aliased_index(client)\n mapping = ANNOTATION_MAPPING\n\n _update_index_analysis(client.conn, index, ANALYSIS_SETTINGS)\n _update_index_mappings(client.conn, index, client.mapping_type, mapping)",
"def optimize_parameters(self):\n pass",
"def optimize_parameters(self):\n pass",
"def optimize_parameters(self):\n pass",
"def optim_initialize(self, rank=0):\n super().optim_initialize(rank)\n self.optimizer.pytorch_step = self.optimizer.step\n self.optimizer.step = self.save_and_step",
"def __init__(self, simplify_col_names=True, use_tqdm=True):\n self._tq = tqdm if use_tqdm else faketqdm\n self.indices = self.search_for_indices()\n self.simplify_col_names = simplify_col_names",
"def base_settings():\n return \"\"\"\n iota = True\n rho = False\n omega = True\n chi = True\n pini = False\n\n emr = 0\n constrain_omega = 1\n iota.at_specific = 0\n iota.min = 0.0001\n iota.age_cnt = 2\n iota.time_cnt = 2\n omega.at_specific = 1\n omega.min = 0.0001\n omega.age_cnt = 0\n omega.time_cnt = 0\n chi.at_specific = 0\n chi.min = 0.0001\n chi.age_cnt = 1\n chi.time_cnt = 2\n drill_start = 0\n drill_end = -1\n re.iota = all\n re.omega = all\n re.chi = all\n study.0 = False\n study.11 = True\n study.11.at_specific = 0\n study.11.age_cnt = 1\n study.11.time_cnt = 1\n study.11.covtype = rate_value\n study.11.rate = chi\n study.1604 = True\n study.1604.at_specific = 0\n study.1604.age_cnt = 1\n study.1604.time_cnt = 1\n study.1604.covtype = meas_std\n country.156 = True\n country.156.at_specific = 0\n country.156.age_cnt = 1\n country.156.time_cnt = 1\n country.156.covtype = rate_value\n country.156.rate = iota\n country.1998 = True\n country.1998.at_specific = 0\n country.1998.age_cnt = 1\n country.1998.time_cnt = 1\n country.1998.covtype = meas_std\n job_idx = 0\n \"\"\"",
"def setIndexMode(self, mode):\n self.indexMode = mode",
"def init_run_opt(self,value=1):\n self.run_opt={}\n self.run_opt['param']=value\n self.run_opt['analyzer']=value\n self.run_opt['compilation']=value\n self.run_opt['event']=value\n self.run_opt['dir']=value\n self.run_opt['launch']=value\n self.run_opt['control']=value\n self.run_opt['collect']=value\n self.run_opt['plot']=value \n self.run_opt['madweight_main']=value\n self.run_opt['relaunch']=0 #only for bugging case... -> desactivate\n self.run_opt['refine']=0 #only for bugging case... -> desactivate\n self.run_opt['clean']=0 #dangerous... -> desactivate\n self.control_opt()",
"def add_mode_index(self) -> None:"
] |
[
"0.57740206",
"0.569045",
"0.56603867",
"0.564941",
"0.5621489",
"0.55744123",
"0.5519884",
"0.54922324",
"0.54647976",
"0.53976315",
"0.5357913",
"0.5357913",
"0.5356065",
"0.5311139",
"0.5212538",
"0.5199254",
"0.51861364",
"0.51667863",
"0.5117706",
"0.5116156",
"0.51149994",
"0.51147944",
"0.51147944",
"0.51147944",
"0.50907",
"0.50670326",
"0.50640196",
"0.5051482",
"0.5038559",
"0.5012808"
] |
0.65560335
|
0
|
Using the HEAD 404/200 result API for document existence Returns True if 200(exists)
|
def doc_exists(self, doc_id):
es = self.get_es()
doc_path = self.get_doc_path(doc_id)
head_result = es.head(doc_path)
return head_result
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def exists(path):\n r = requests.head(path)\n # print(r.status_code)\n return r.status_code == requests.codes.ok",
"def doc_exists(self, doc_dict):\n es = self.get_es()\n head_result = es.head(self.get_doc_path_typed(doc_dict))\n return head_result",
"def is_exist(self, status_code):\n if status_code == 200:\n return True\n return False",
"def index_exists(self, index):\n req = requests.head(\n urljoin(self.base_url, '{0}'.format(index)),\n verify=self.verify_certs)\n return req.status_code == 200",
"def document_exists(self, document_id):\n document_id = document_id.strip()\n if not document_id:\n return False\n\n connection = self.__get_database_connection()\n response = self.__make_request(connection,\n '/%s/%s' % (self.database_name, document_id),\n method='HEAD')\n return bool(response)",
"def exists_adv(path):\n # TODO: use selenium\n r = requests.head(path)\n # print(r.status_code)\n return r.status_code == requests.codes.ok",
"def _url_exists(url):\n h = httplib2.Http()\n try:\n resp = h.request(url, 'HEAD')\n if resp[0].status == 200:\n return True\n except (httplib2.RelativeURIError, httplib2.ServerNotFoundError):\n return False",
"def url_exist(url:str) -> bool:\r\n with closing(requests.head(url, allow_redirects=True)) as r:\r\n return r.ok",
"def exists(self, url):\n url = urlparse.urlparse(url)\n connection = httplib.HTTPConnection(url.hostname, timeout=CONNECTION_TIMEOUT)\n \n try:\n connection.request(\"HEAD\", url.geturl())\n response = connection.getresponse()\n except:\n return False\n \n if str(response.status)[0] not in [\"2\", \"3\"]:\n return False\n \n connection.close()\n return True",
"def check_response_update_nonexistent(response: HTTPResponse) -> bool: # pylint: disable=invalid-name\n return response.status_code == 404",
"def do_HEAD(self):\n self.do_GET(True)",
"def test_HEAD(self):\n if not self.url:\n return\n response = self.client.head(self.url, {}, format='json')\n self.assertIn(response.status_code, [status.HTTP_405_METHOD_NOT_ALLOWED,\n status.HTTP_200_OK,\n status.HTTP_401_UNAUTHORIZED])",
"def file_exist(file_url):\n try:\n response = requests.head(file_url)\n if 200 <= response.status_code < 300:\n return True\n return False\n except ConnectionError:\n return False",
"def _verify_url_exists(url, use_head=False):\n # (str, bool) -> bool\n try:\n if use_head:\n resp = requests.head(url)\n else:\n resp = requests.get(url)\n except requests.exceptions.ConnectionError:\n return False\n\n return resp.status_code in [200, 302]",
"def test_url_existence(self):\n self.assertEquals(self.response.status_code, 200)",
"def test_exists(self):\n d = self._examineOrSelect()\n self._response(b'* 3 EXISTS')\n self.assertEqual(\n self.successResultOf(d),\n {'READ-WRITE': False, 'EXISTS': 3})",
"def document_exists(self, docid):\n raise NotImplementedError",
"def document_exists(collection, field, value):\n #print(\"Collection: \", collection, \" Field: \", field, \" Value: \", value)\n try:\n cursor = database[collection].find_one({field: value})\n if cursor is None:\n return {'status': 1, 'data': 0}\n return {'status': 1, 'data': cursor}\n except Exception as e:\n return {'status': -1, 'data': 'document_exists ' + str(e)}",
"def test_exists(photo_name):\n try:\n con = httplib.HTTPConnection(AWS_MS_PHOTOS)\n print 'url is %s and name is %s' % (AWS_MS_PHOTOS, photo_name)\n con.request('HEAD', '/%s' % photo_name)\n res = con.getresponse()\n size = int(res.getheader('content-length'))\n print 'size back %s' % size\n if size > 0: \n return True\n else:\n return False\n except:\n logging.error(\"Had a bad fall validating photo %s\" % photo_name, exc_info=True)\n return False",
"def _does_not_exist():\n response_payload = dict(\n message=\"Recipe does not exist!\"\n )\n response_payload = jsonify(response_payload)\n return make_response(response_payload, 404)",
"async def has(path: str) -> bool:\n _ = path.strip('/').split('/')\n bucket = _[0]\n key = '/'.join(_[1:])\n async with _create_client() as client:\n try:\n await client.head_object(Bucket=bucket, Key=key)\n return True\n except ClientError:\n return False",
"def exists(self, uri):\n osaka.utils.LOGGER.debug(\"Does URI {0} exist?\".format(uri))\n try:\n path = osaka.utils.get_uri_path(uri)\n tmp = self.webdav.exists(path)\n osaka.utils.LOGGER.debug(\"Does URI {0} exist? {1}\".format(uri, tmp))\n return tmp\n except Exception:\n pass\n osaka.utils.LOGGER.debug(\"Failed to check existence using HEAD\")\n try:\n text = self.httpHandler.get(re.compile(\"^dav\").sub(\"http\", uri), text=True)\n if re.search(r\"\\s*(?:<!DOCTYPE)|(?:<!doctype)\", text):\n raise osaka.utils.OsakaException(\"Unauthorized, redirected to login\")\n osaka.utils.LOGGER.debug(\"Does URI {0} exist? {1}\".format(uri, True))\n return True\n except Exception as e:\n if \"404 Client Error:\" in str(e):\n osaka.utils.LOGGER.debug(\"Does URI {0} exist? {1}\".format(uri, False))\n return False\n raise",
"def _check_idx(self, url):\n if not url.endswith('.idx'):\n url += '.idx'\n return requests.head(url).ok",
"def do_HEAD(self):\n self.send_response(200)",
"def __check(s3client, key, bucket_name):\n try:\n s3client.head_object(Bucket=bucket_name, Key=key)\n except ClientError as e:\n return int(e.response['Error']['Code']) != 404\n return True",
"def ResourceExists(self, name):\n pass",
"def exists(self):\n print \"exists called: \", self.path\n d = self._get()\n d.addCallback(self._handleResponse)\n\n return d",
"def url_was_found(url=\"localhost:5000/health\"):\n res = requests.get(url).json()\n\n if res['status_code'] == 200:\n return True\n elif res['status_code'] == 404:\n return False\n else:\n raise UnexpectedResponseError(\"Expected 200 OK or 404, got {}.\\n\".format(res['status']), \"Full response : {}\".format(res))",
"def exists( self, path ):\n res = self.__checkArgumentFormat( path )\n if not res['OK']:\n return res\n urls = res['Value']\n successful = {}\n failed = {}\n serviceClient = RPCClient( self.url )\n for url in urls:\n gLogger.debug( \"DIPStorage.exists: Determining existence of %s.\" % url )\n res = serviceClient.exists( url )\n if res['OK']:\n successful[url] = res['Value']\n else:\n failed[url] = res['Message']\n resDict = {'Failed':failed, 'Successful':successful}\n return S_OK( resDict )",
"def object_exists(uri):\n bucket_name, key = split_uri(uri)\n return get_client().object_exists(bucket_name, key)"
] |
[
"0.7441819",
"0.6859171",
"0.67155325",
"0.66828007",
"0.6660155",
"0.64963514",
"0.6493887",
"0.64617324",
"0.63922584",
"0.6353348",
"0.63439393",
"0.63398975",
"0.63322216",
"0.6331907",
"0.6277079",
"0.6238249",
"0.6191304",
"0.6183627",
"0.61783135",
"0.6140467",
"0.60920423",
"0.6090753",
"0.6085632",
"0.6084722",
"0.6047655",
"0.60294735",
"0.60200614",
"0.6000476",
"0.5985626",
"0.59721094"
] |
0.70652413
|
1
|
Generator function for bulk changes note each individual change item goes through the pillowtop pathway individually when loading the bulk item, and short of change_transport, it's identical. It would be slightly more efficient if the couch load could be done in bulk for the actual documents, but it's not quite possible without gutting the existing pillowtop API
|
def bulk_builder(self, changes):
for change in changes:
try:
t = self.change_trigger(change)
if t is not None:
tr = self.change_transform(t)
if tr is not None:
self.change_transport(tr)
yield {
"index": {"_index": self.es_index, "_type": self.es_type,
"_id": tr['_id']}}
yield tr
except Exception, ex:
pillow_logging.error("Error on change: %s, %s" % (change['id'], ex))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def step070() -> None:\n logger.logMessage('Begin: elasticsearch bulk update')\n client = es.Elasticsearch(hostlist)\n\n def generate():\n with open(renumFile,'r') as f:\n line = f.readline().rstrip()\n while line != '':\n fields = line.split(';')\n oper = { '_index': fields[3], \n '_op_type': 'update',\n '_id': fields[2].rstrip(),\n '_type': 'doc',\n '_source:': {'doc': {'tsa': fields[0]}}}\n \n yield oper\n line = f.readline().rstrip()\n result = eshelp.bulk(client,generate())\n logger.logMessage('Bulk result: {0}'.format(result))\n logger.logMessage('End : elasticsearch bulk update')",
"def change_list(ctx, start, count):\n data = ctx.obj.get_all_changes(start=start, count=count)\n output_json_data(data)",
"def get_changes(self, type_of_change=None):\n if type_of_change is None or type_of_change == TypesOfChanges.ALL:\n generator = self._get_all_types()\n elif type_of_change == TypesOfChanges.ADDED:\n generator = self._get_added()\n elif type_of_change == TypesOfChanges.REMOVED:\n generator = self._get_removed()\n elif type_of_change == TypesOfChanges.CHANGED:\n generator = self._get_changes()\n else:\n raise ValueError(f\"doesn't support other types {type_of_change}\")\n\n for i in safe_iterator(generator, (FileNotFoundError, EnvironmentError)):\n yield i",
"def all_changes(db):\n changes = db['changes']\n\n for c in changes.find():\n yield Change(c)",
"def filter_changes(tasks, entity, action):\n\n for task in tasks:\n _entity = get_entity(task[\"id\"])\n if _entity == entity and task.get(\"task\") == action:\n yield task",
"def stream_changes(self) -> Iterator[Change]:\n raise NotImplementedError",
"def process_changes_chunk(self, changes):\n self.bootstrap_if_needed()\n # break up changes by domain\n changes_by_domain = defaultdict(list)\n for change in changes:\n if is_couch_change_for_sql_domain(change):\n continue\n # skip if no domain or no UCR tables in the domain\n if change.metadata.domain and change.metadata.domain in self.table_manager.relevant_domains:\n changes_by_domain[change.metadata.domain].append(change)\n\n retry_changes = set()\n change_exceptions = []\n for domain, changes_chunk in changes_by_domain.items():\n with WarmShutdown():\n failed, exceptions = self._process_chunk_for_domain(domain, changes_chunk)\n retry_changes.update(failed)\n change_exceptions.extend(exceptions)\n\n return retry_changes, change_exceptions",
"def get_changegenerator_for_node(self, node):\n if len(node) == 1 and not (node == (0,)):\n return None\n if node == (1,0):\n return self.proposals.get_whitespace_changes\n elif node == (1,1):\n return self.proposals.get_cvsheader_changes\n elif node == (1,2):\n return self.proposals.get_unmodified_changes\n elif node == (2,0):\n return self.proposals.get_used_changes\n elif node == (2,1):\n return self.proposals.get_zapped_changes\n elif node == (2,2):\n return self.proposals.get_undecided_changes\n elif node == (0,):\n return self.proposals.get_all_changes\n elif len(node) == 2 and node[0] == 0:\n file = self.treestore[node][0]\n return lambda: self.proposals.get_file_changes(file)\n return lambda: []",
"def change():",
"async def change_feed_filter():\n\n conn = await connection()\n feed = await r.table('share_assets').has_fields('revoked_on').changes().run(conn)\n# feed = await r.table(db_config['revoke_table']).has_fields('revoked_on').changes().run(conn)\n\n while (await feed.fetch_next()):\n change = await feed.next()\n c_id = change['new_val']['id']\n revoke_date = datetime.datetime.fromtimestamp(change['new_val']['revoked_on'], tz)\n revoked_flag_new = change['new_val']['revoked_flag']\n try:\n\n revoked_flag_old = change['old_val']['revoked_flag']\n if (revoked_flag_new == 0 and revoked_flag_old == 1):\n task_res = revoke_certi_task.apply_async((c_id,change['new_val']['revoked_on']),eta=revoke_date)\n await task_status_logging(task_res.id, revoke_date)\n\n except KeyError:\n task_res = revoke_certi_task.apply_async((c_id,change['new_val']['revoked_on']),eta=revoke_date)\n await task_status_logging(task_res.id, revoke_date)",
"def getChanges():",
"def _enrich_changes(changes: List[Dict[str, Any]], root_url: str) -> None:\n for change in changes:\n change[\"link\"] = f\"{root_url}#L{change['line']}\"\n if change.get(\"children\") is not None:\n _enrich_changes(change[\"children\"], root_url)",
"def processor(self, change, do_set_checkpoint=True):\n self.changes_seen += 1\n if self.changes_seen % CHECKPOINT_FREQUENCY == 0 and do_set_checkpoint:\n pillow_logging.info(\n \"(%s) setting checkpoint: %s\" % (self.get_checkpoint_doc_name(), change['seq']))\n self.set_checkpoint(change)\n\n try:\n t = self.change_trigger(change)\n if t is not None:\n tr = self.change_transform(t)\n if tr is not None:\n self.change_transport(tr)\n except Exception, ex:\n pillow_logging.exception(\"Error on change: %s, %s\" % (change['id'], ex))\n gevent.sleep(RETRY_INTERVAL)",
"def _apply_incremental_change(\n self, change: TextDocumentContentChangeEvent_Type1\n ) -> None:\n lines = self.lines\n text = change.text\n change_range = change.range\n\n range = range_from_utf16(lines, change_range) # type: ignore\n start_line = range.start.line\n start_col = range.start.character\n end_line = range.end.line\n end_col = range.end.character\n\n # Check for an edit occurring at the very end of the file\n if start_line == len(lines):\n self._source = self.source + text\n return\n\n new = io.StringIO()\n\n # Iterate over the existing document until we hit the edit range,\n # at which point we write the new text, then loop until we hit\n # the end of the range and continue writing.\n for i, line in enumerate(lines):\n if i < start_line:\n new.write(line)\n continue\n\n if i > end_line:\n new.write(line)\n continue\n\n if i == start_line:\n new.write(line[:start_col])\n new.write(text)\n\n if i == end_line:\n new.write(line[end_col:])\n\n self._source = new.getvalue()",
"def new_changes(self):\n with ChangesStream(self.couch_db, feed='continuous', heartbeat=True, since=self.since,\n filter=self.couch_filter, **self.extra_args) as st:\n for c in st:\n self.processor(c)",
"def _items_updated(self, change):\n if self.root:\n # The whole list changed.\n if change['type'] == 'update':\n added = set(change['value']) - set(change['oldvalue'])\n removed = set(change['oldvalue']) - set(change['value'])\n for item in removed:\n self._item_removed(item)\n for item in added:\n self._item_added(item)\n\n # An operation has been performed on the list.\n elif change['type'] == 'container':\n op = change['operation']\n\n # itemren have been added\n if op in ('__iadd__', 'append', 'extend', 'insert'):\n if 'item' in change:\n self._item_added(change['item'])\n if 'items' in change:\n for item in change['items']:\n self._item_added(item)\n\n # itemren have been removed.\n elif op in ('__delitem__', 'remove', 'pop'):\n if 'item' in change:\n self._item_removed(change['item'])\n if 'items' in change:\n for item in change['items']:\n self._item_removed(item)\n\n # One item was replaced.\n elif op in ('__setitem__'):\n old = change['olditem']\n if isinstance(old, list):\n for item in old:\n self._item_removed(item)\n else:\n self._item_removed(old)\n\n new = change['newitem']\n if isinstance(new, list):\n for item in new:\n self._item_added(item)\n else:\n self._item_added(new)\n\n self._recompute_indexes()",
"def divide_changes_to_group(self, chg_rows):\n id_group = []\n type_now = None\n for idx, chg in enumerate(chg_rows):\n # Start of the current group\n if type_now is None:\n type_now = chg[self.DMLCOLNAME]\n id_group.append(chg[self.IDCOLNAME])\n\n # Dump when we are at the end of the changes\n if idx == len(chg_rows) - 1:\n yield type_now, id_group\n return\n # update type cannot be grouped\n elif type_now == self.DML_TYPE_UPDATE:\n yield type_now, id_group\n type_now = None\n id_group = []\n # The next change is a different type, dump what we have now\n elif chg_rows[idx + 1][self.DMLCOLNAME] != type_now:\n yield type_now, id_group\n type_now = None\n id_group = []\n # Reach the max group size, let's submit the query for now\n elif len(id_group) >= self.replay_group_size:\n yield type_now, id_group\n type_now = None\n id_group = []\n # The next element will be the same as what we are now\n else:\n continue",
"def processor(self, change, do_set_checkpoint=True):\n self.changes_seen += 1\n if self.changes_seen % CHECKPOINT_FREQUENCY == 0 and do_set_checkpoint:\n pillow_logging.info(\n \"(%s) setting checkpoint: %s\" % (self.get_checkpoint_doc_name(), change['seq']))\n self.set_checkpoint(change)\n\n try:\n t = self.change_trigger(change)\n if t is not None:\n tr = self.change_transform(t)\n if tr is not None:\n self.change_transport(tr)\n except Exception, ex:\n pillow_logging.error(\"Error on change: %s, %s\" % (change['id'], ex))",
"def enumerate_transfers_for_update(self):\n transfer_batches = self.single_robot_transfer_batches_for_update()\n for transfer_batch in transfer_batches:\n for transfer in transfer_batch.transfers:\n yield transfer",
"def test_otoroshi_controllers_adminapi_tcp_service_api_controller_bulk_update_action(self):\n pass",
"def bulk_process(self):\n\n def actions():\n try:\n task = self.queue.get(block=False, timeout=None)\n\n if task['action'] == 'index':\n yield {\n '_op_type': 'index',\n '_index': self.ensure_index(task),\n '_id': task['id'],\n 'doc': task['properties']\n }\n elif task['action'] == 'delete':\n yield {\n '_op_type': 'delete',\n '_index': self.ensure_index(task),\n '_id': task['id'],\n 'doc': task['properties']\n }\n else:\n raise NotImplementedError\n\n except Empty:\n pass\n\n for success, info in streaming_bulk(self.es_client, actions()):\n if success:\n self.queue.task_done()",
"def test_stress_change_trigger(self):\n for _i in range(config.nstress):\n self.test_change_trigger()",
"def make_change(cost_of_item, amount_paid, denominations):\n\n\tdef data_quality_check(denominations, **kwargs):\n\t\tcomplete = True\t\n\t\t\n\t\tfor key, value in kwargs.items():\n\t\t\tif not isinstance(value, int):\n\t\t\t\tcomplete = False\n\t\t\t\tprint('The {} is not an integer'.format(key))\n\t\t\telse:\n\t\t\t\tif value < 1:\n\t\t\t\t\tcomplete = False\n\t\t\t\t\tprint('The {} is less than $1'.format(key))\n\t\t\n\t\tdenom = list(set(denominations)) # get out any duplicates\n\t\tif not all(isinstance(x, int) for x in denom):\n\t\t\tcomplete = False\n\t\t\tprint('At least one of the elements in the list is not an integer')\n\t\tif any(i < 1 for i in denom):\n\t\t\tcomplete = False\n\t\t\tprint('At least one of the elements in the list is less than 1')\n\t\tif complete:\n\t\t\tdenominations = sorted(denom, reverse=True) # sort list in reverse order\n\t\treturn complete, denominations\n\n\n\tdef amount_of_change(cost_of_item, amount_paid):\n\t\tprint('Cost of the item is ${}'.format(cost_of_item))\n\t\tprint('Amount paid is ${}'.format(amount_paid))\n\t\treturn amount_paid - cost_of_item\n\n\n\tdef denomination_of_bills(change, denominations):\n\t\tbills_to_return = []\n\t\twhile change > 0:\n\t\t\t# set start = 0. Then change it to the count value every time a bill is chosen\n\t\t\t# so that the next loop does not start at the beginning of the list. This shortens\n\t\t\t# each succesive loop.\n\t\t\tstart = 0\n\t\t\tfor count, bill in enumerate(denominations, start):\n\t\t\t\tif bill <= change:\n\t\t\t\t\tbills_to_return.append(bill)\n\t\t\t\t\tchange -= bill\n\t\t\t\t\tstart = count\n\t\t\t\t\tbreak\n\t\treturn bills_to_return\n\t\n\t##### Start the code #####\n\t## put items into a dictionary so that I can experiment with passing kwargs.\n\t\n\tinfo_dict = {'cost of item': cost_of_item, 'amount paid' : amount_paid}\n\tcomplete, denominations = data_quality_check(denominations, **info_dict)\n\tif not complete:\n\t\treturn\n\n\tchange = amount_of_change(cost_of_item, amount_paid)\n\tif change < 0:\n\t\tprint(\"You haven't paid enough for the item.\")\n\telif change == 0:\n\t\tprint('You paid the exact amount and get no change')\n\telse:\n\t\tprint('The change is ${}'.format(change))\n\t\tbills = denomination_of_bills(change, denominations)\n\t\tif len(bills) == 1:\n\t\t\tprint('The denomination of the change is ${}'.format(''.join(str(x) for x in bills)))\n\t\telse:\n\t\t\tprint('The denominations of the change are ${}'.format(', $'.join(str(x) for x in bills)))",
"def update(self, did, rev, changing_fields):\n\n composite_fields = ['urls', 'acl', 'metadata', 'urls_metadata']\n\n with self.session as session:\n query = session.query(IndexRecord).filter(IndexRecord.did == did)\n\n try:\n record = query.one()\n except NoResultFound:\n raise NoRecordFound('no record found')\n except MultipleResultsFound:\n raise MultipleRecordsFound('multiple records found')\n\n if rev != record.rev:\n raise RevisionMismatch('revision mismatch')\n\n # Some operations are dependant on other operations. For example\n # urls has to be updated before urls_metadata because of schema\n # constraints.\n if 'urls' in changing_fields:\n for url in record.urls:\n session.delete(url)\n\n record.urls = [\n IndexRecordUrl(did=record.did, url=url)\n for url in changing_fields['urls']\n ]\n\n if 'acl' in changing_fields:\n for ace in record.acl:\n session.delete(ace)\n\n record.acl = [\n IndexRecordACE(did=record.did, ace=ace)\n for ace in changing_fields['acl']\n ]\n\n if 'metadata' in changing_fields:\n for md_record in record.index_metadata:\n session.delete(md_record)\n\n record.index_metadata = [\n IndexRecordMetadata(\n did=record.did,\n key=m_key,\n value=m_value\n )\n for m_key, m_value in changing_fields['metadata'].items()]\n\n if 'urls_metadata' in changing_fields:\n for url in record.urls:\n for url_metadata in url.url_metadata:\n session.delete(url_metadata)\n\n create_urls_metadata(\n changing_fields['urls_metadata'],\n record,\n session,\n )\n\n for key, value in changing_fields.items():\n if key not in composite_fields:\n # No special logic needed for other updates.\n # ie file_name, version, etc\n setattr(record, key, value)\n\n record.rev = str(uuid.uuid4())[:8]\n\n session.add(record)\n\n return record.did, record.baseid, record.rev",
"async def aiter_changes(query, value_type, conn = None):\n feed = await _run_query(query, conn)\n mapper = value_type.dbval_to_pyval\n return ChangesAsyncMap(feed, mapper)",
"def test_bulk_flow(self):\n lengths = []\n before = Actor2Actor.objects.count()\n for i in range(2):\n file_path = os.path.join(os.path.dirname(__file__),\n self.testdata_folder,\n self.filename_a2a)\n data = {\n 'bulk_upload' : open(file_path, 'rb'),\n }\n\n res = self.client.post(self.a2a_url, data)\n assert res.status_code == status.HTTP_201_CREATED\n lengths.append(Actor2Actor.objects.count())\n # check that 2nd loop does not create additional products\n # but updates them\n assert lengths[0] == lengths[1]\n new = lengths[0] - before\n # check if new fraction-flow per material per new flow was created\n assert FractionFlow.objects.count() == \\\n new * self.composition.fractions.count()\n file_path = os.path.join(os.path.dirname(__file__),\n self.testdata_folder,\n self.filename_a2a_error)\n data = {\n 'bulk_upload' : open(file_path, 'rb'),\n }\n\n res = self.client.post(self.a2a_url, data)\n assert res.status_code == status.HTTP_400_BAD_REQUEST\n\n file_path = os.path.join(os.path.dirname(__file__),\n self.testdata_folder,\n self.filename_a2a_self_ref)\n data = {\n 'bulk_upload' : open(file_path, 'rb'),\n }\n\n res = self.client.post(self.a2a_url, data)\n assert res.status_code == status.HTTP_400_BAD_REQUEST",
"def link_list_change(sv):\r\n for name in sv.Object_list: # use list of objects\r\n nod=sv.Object[name]\r\n if nod.isfunction: # either a \"dict\" or a user defined function \r\n chg=Change+Obr+name+Cbr\r\n if find_tree(sv, (Change, (name, None, None), None)): # look for change(list)\r\n add_object(sv, chg) # create change(list) object\r\n clau=((Plus, (chg, None, None), (str(Change_time)+\"s\", None, None)),(Faux, None, None))\r\n if not clau in sv.Object[chg].clauses: # clause to reset change\r\n sv.Object[chg].clauses+=[clau]\r\n for block in nod.arguments:\r\n clau=((Change, (name+Obr+block+Cbr, None, None), None),(Vrai, None, None)) # link change\r\n if not clau in sv.Object[chg].clauses:\r\n sv.Object[chg].clauses+=[clau]",
"def addChange(change):",
"def addChange(change):",
"def read_profile_change(self):\n from itertools import repeat\n\n self.ID_NUM_CHANGE_DES = kpi_from_db_config.ID_NUM_CHANGE_DES\n self.ID_NUM_CHANGE_AVATAR = kpi_from_db_config.ID_NUM_CHANGE_AVATAR\n self.ID_NUM_CHANGE_LOC = kpi_from_db_config.ID_NUM_CHANGE_LOC\n self.ID_NUM_CHANGE_NAME = kpi_from_db_config.ID_NUM_CHANGE_NAME\n self.ID_NUM_CHANGE_URL = kpi_from_db_config.ID_NUM_CHANGE_URL\n self.ID_NUM_CHANGE_ANY = kpi_from_db_config.ID_NUM_CHANGE_ANY\n\n list_id = [self.ID_NUM_CHANGE_DES, \n self.ID_NUM_CHANGE_AVATAR, \n self.ID_NUM_CHANGE_LOC, \n self.ID_NUM_CHANGE_NAME, \n self.ID_NUM_CHANGE_URL, \n self.ID_NUM_CHANGE_ANY]\n list_result = [[] for i in repeat(None,len(list_id))]\n\n for i in range(len(list_id)):\n self.cursor.execute('''\n SELECT value\n FROM public.kpi_report\n WHERE id = %s\n ORDER BY created_at DESC\n LIMIT 2\n ''', [list_id[i]])\n rows_count = self.cursor.rowcount\n \n if (rows_count == 2): # 2 is LIMIT from the query\n for doc in self.cursor:\n list_result[i].append(int(doc[0]))\n elif (rows_count == 1): # Change rows_count > 0 and rows_count < Number of limit\n for doc in self.cursor:\n list_result[i].append(int(doc[0]))\n list_result[i] = list_result[i] + [0] \n else:\n list_result[i] = [0] * 2\n\n\n# print \"TESTING .... {}\".format(list_result)\n return list_result"
] |
[
"0.5816827",
"0.5687157",
"0.56473476",
"0.5625761",
"0.5434474",
"0.53927106",
"0.5390141",
"0.53204507",
"0.5286606",
"0.52333415",
"0.51879084",
"0.5183084",
"0.51375544",
"0.51019603",
"0.5101592",
"0.50638294",
"0.5050282",
"0.50388134",
"0.49663755",
"0.49251467",
"0.48931143",
"0.48764765",
"0.48744822",
"0.48566172",
"0.4850075",
"0.48499915",
"0.48424527",
"0.4832233",
"0.4832233",
"0.48230708"
] |
0.6802877
|
0
|
Naive means to verify the alias of the current pillow iteration is matched. If we go fancier with routing and multiindex aliases due to index splitting, this will need to be revisited.
|
def check_alias(self):
es = self.get_es()
aliased_indexes = es[self.es_alias].get('_aliases')
return aliased_indexes.keys()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def alias_exists(self, alias):\n req = requests.head(\n urljoin(self.base_url, '_alias/{0}'.format(alias)),\n verify=self.verify_certs)\n return req.status_code == 200",
"def is_aliasable(self, *args):\n return _ida_hexrays.vdloc_t_is_aliasable(self, *args)",
"def altloc_match(self, other: AtomKey) -> bool:\n ...",
"def resolveAlias(self, alias):",
"def test_alias():\n assert getattr(config, \"_is_affirmative\", None) is not None",
"def assume_alias(self):\n\n es = self.get_es()\n if es.head(self.es_alias):\n #remove all existing aliases - this is destructive and could be harmful, but for current\n #uses, it is legal - in a more delicate routing arrangement, a configuration file of\n # some sort should be in use.\n alias_indices = es[self.es_alias].get('_status')['indices'].keys()\n\n remove_actions = [{\"remove\": {\"index\": x, \"alias\": self.es_alias}} for x in\n alias_indices]\n remove_data = {\"actions\": remove_actions}\n es.post('_aliases', data=remove_data)\n #now reapply HEAD/master index\n es.post('_aliases', data={\"actions\": [{\"add\":\n {\"index\": self.es_index,\n \"alias\": self.es_alias}}]})",
"def test_useless_alias():\n with pytest.raises(ValueError, match='duplicate'):\n alias('name', ('name',))",
"def is_alias(cls, alias_string):\n return ':=' in alias_string",
"def is_aliasable(self, *args):\n return _ida_hexrays.lvar_t_is_aliasable(self, *args)",
"def _find_alias(line, aliases):\n for alias in aliases:\n if _soft_in(line, alias):\n return _soft_idx(line, alias)",
"def test_sample_aliases(self):\n self.app = self.make_app(argv = ['report', 'project_status', 'J.Doe_00_03', '--debug'],extensions=['scilifelab.pm.ext.ext_couchdb'])\n handler.register(DeliveryReportController)\n self._run_app()\n data = ast.literal_eval(self.app._output_data['debug'].getvalue())\n # # This should fail since P003_101_index6 != 3_index6\n # self.assertEqual(len(data['table']), 2)\n\n self.app = self.make_app(argv = ['report', 'project_status', 'J.Doe_00_03', '--sample_alias', \"{'P003_101_index6':'3_index6'}\", '--debug'],extensions=['scilifelab.pm.ext.ext_couchdb'])\n handler.register(DeliveryReportController)\n self._run_app()\n data = ast.literal_eval(self.app._output_data['debug'].getvalue())\n samples = [x[0] for x in data['table']]\n self.assertIn(\"3_index6\", samples)",
"def test_raises_for_missing_alias():\n with pytest.raises(AttributeError):\n alias('new_alias', ('first', 'second'))(_HasAliasedProp)",
"def test_aliases_helper(self):\n self.t.config(\"alias.foo\", \"bar\")\n code, out, err = self.t(\"_aliases\")\n self.assertIn(\"foo\", out)",
"def test_raises_for_existing_alias():\n with pytest.raises(AttributeError):\n alias('existing', ('first', 'second'))(_HasAliasedProp)",
"def get_alias(self):",
"def test_output_alias_generation(self):\n print(\"WARNING: There is a random element to test_output_alias_generation\\n\\\n so it is likely to occasionally fail, nonetheless if the alias_generation\\n\\\n method is working correctly failures will be very rare (testing at alpha=0.01\\n\\\n implies we should expect a Type I error about 1% of the time).\")\n\n # Construct a ProbDistribution\n words = get_words('../corpus/thus.txt')\n word_dist = sample2dist(words)\n VA_words = VoseAlias(word_dist)\n\n # Generate sample and calculate the number of observations for a randomly selected word\n word = random.choice(list(VA_words.dist))\n\n n = 1000\n\n t = 0\n for i in range(n):\n if VA_words.alias_generation() == word:\n t += 1\n\n # Compute the p-value\n p_original = VA_words.dist[word]\n\n p_low = math.fsum([self.dbinom(x, n, p_original)\n for x in range(t, n+1)])\n p_high = math.fsum([self.dbinom(x, n, p_original) for x in range(t+1)])\n\n p = 2*min(p_low, p_high)\n\n # Do not accept H_0 if p <= alpha\n alpha = 0.01\n self.assertGreater(p, alpha)",
"def test_aliases(self):\n field = self.base_field\n self.assertFalse(field.get('aliases'))\n self.assertEqual([], SchemaField(field).aliases)\n field['aliases'] = []\n self.assertEqual([], SchemaField(field).aliases)\n field['aliases'] = ['alias1', 'Alias2']\n sch = SchemaField(field)\n self.assertEqual(field['aliases'], sch.aliases)\n # test some related method\n self.assertTrue(sch.has_alias('alias1'))\n self.assertTrue(sch.has_alias('Alias2'))\n self.assertFalse(sch.has_alias('alias2'))\n self.assertTrue(sch.has_alias('alias2', icase=True))\n self.assertFalse(sch.has_alias(field['name']))\n self.assertTrue(sch.has_name_or_alias(field['name'], 'aaaa'))\n self.assertFalse(sch.has_name_or_alias(field['name'].lower(), 'aaaa'))\n self.assertTrue(sch.has_name_or_alias(field['name'].lower(), 'aaaa', icase=True))\n self.assertFalse(sch.has_name_or_alias('aaaa', 'alias2'))\n self.assertTrue(sch.has_name_or_alias('aaaa', 'alias2', icase=True))",
"def _lint_references_and_aliases(\n self, aliases, references, col_aliases, using_cols, parent_select\n ):\n # Are any of the aliases the same?\n for a1, a2 in itertools.combinations(aliases, 2):\n # Compare the strings\n if a1[0] == a2[0] and a1[0]:\n # If there are any, then the rest of the code\n # won't make sense so just return here.\n return [\n LintResult(\n # Reference the element, not the string.\n anchor=a2[1],\n description=(\n \"Duplicate table alias {0!r}. Table \"\n \"aliases should be unique.\"\n ).format(a2[0]),\n )\n ]\n return None",
"def testAliases(self):\n Options().parseOptions(\n [\n \"--maildirdbmdomain\",\n \"example.com=example.com\",\n \"--aliases\",\n self.aliasFilename,\n ]\n )",
"def fuzzy_match(self, other):\n magic, fuzzy = False, False\n try:\n magic = self.alias == other.magic\n except AttributeError:\n pass\n\n if '.' in self.alias:\n major = self.alias.split('.')[0]\n fuzzy = major == other.alias \n return magic or fuzzy",
"def find_alias(self, alias):\n if hasattr(self, '_logger'):\n self._logger.debug(alias)\n self.check_alias(alias, True)\n path = self.alias_path_map[alias]\n path = self._absolute_path(path)\n self.check_path(path, True)\n nid = self.path_nid_map[path]\n return self.get_node_wrapper(nid)",
"def getAliases(self):",
"def test_gen_destination_for_alias_invalid_alias(self):\n self.assertIsNone(db.gen_destination_for_alias(self.dbm, \"twitter\"))",
"def test_override_cache_aliasing(self):\n i, j, k, l = dimify('i j k l')\n a = symbol(name='a', dimensions=(i, j, k, l), value=2.,\n mode='indexed').base.function\n a1 = symbol(name='a', dimensions=(i, j, k, l), value=3.,\n mode='indexed').base.function\n a2 = symbol(name='a', dimensions=(i, j, k, l), value=4.,\n mode='indexed').base.function\n eqn = Eq(a, a+3)\n op = Operator(eqn)\n op()\n op(a=a1)\n op(a=a2)\n shape = [d.size for d in [i, j, k, l]]\n\n assert(np.allclose(a.data, np.zeros(shape) + 5))\n assert(np.allclose(a1.data, np.zeros(shape) + 6))\n assert(np.allclose(a2.data, np.zeros(shape) + 7))",
"def is_alias(email):\n return MAIL_ALIAS_REGEX.match(strip_domains(email))",
"def _find_anonymized_match(self, request):\n request.url = _anonymize_url(request.url, ip_lookup)\n return _BASE_FIND_MATCH(self, request)",
"def isalias(tokens, x, alias):\n\n prior = Token.get(tokens, x - 1)\n token = tokens[x]\n\n # True if prior token is not a separator, grouping token or distinct token and current token is either a column token or quoted token\n return (\n alias\n and x > 0\n and not Token.isseparator(prior)\n and not Token.isgroupstart(prior)\n and not Token.isdistinct(prior)\n and (Token.iscolumn(token) or Token.isquoted(token))\n )",
"def test_TEB_aliases(self):\n teb = TEBpage(mocked=True)\n assert teb is not None\n\n # see if aliases exist\n found = teb.brewery_by_alias(\"TEB\")\n assert found == \"Twin Elephant\"\n\n found = teb.brewery_by_alias(\"Twin Elephant Brewing\")\n assert found == \"Twin Elephant\"\n\n found = teb.brewery_by_alias(\"Twin Elephant\")\n assert found == \"Twin Elephant\"",
"def with_alias(self):\n return self.node.alias",
"def test_alias_with_implicit_complex_filter(self):\n\n # Setup alias with simple filter string\n self.t.config(\"alias.hometoday\", \"project:Home and due:today minimal\")\n\n # Setup tasks for projects Home and Work\n self.t(\"add project:Home due:today Home urgent task\")\n self.t(\"add project:Home Home task\")\n self.t(\"add project:Work due:today Work task\")\n\n # Check that hometoday command outputs the \"Home urgent task\"\n code, out, err = self.t(\"hometoday\")\n self.assertIn(\"Home urgent task\", out,\n msg=\"task hometoday -> project:Home and due:today minimal > \"\n \"Home urgent task\")\n\n # It should not output \"Home task\", as that one is not due:today\n self.assertNotIn(\"Home task\", out,\n msg=\"task hometoday -> project:Home and due:today minimal > \"\n \"Home task\")\n\n # It should not output \"Work task\" either, it has entirely wrong\n # project\n self.assertNotIn(\"Work task\", out,\n msg=\"task hometoday -> project:Home and due:today minimal > \"\n \"Work task\")"
] |
[
"0.6139494",
"0.5927287",
"0.58235115",
"0.580604",
"0.580179",
"0.5758179",
"0.5729632",
"0.56602716",
"0.5649573",
"0.5600212",
"0.5569809",
"0.5567751",
"0.5552736",
"0.5540553",
"0.55247414",
"0.54951197",
"0.5476063",
"0.54035634",
"0.5327102",
"0.5275424",
"0.52657884",
"0.5252317",
"0.52491176",
"0.51975363",
"0.51706564",
"0.5167607",
"0.5164827",
"0.5141003",
"0.5128905",
"0.5121821"
] |
0.6047442
|
1
|
For this instance, have the index that represents this index receive the alias itself. This presents a management issue later if we route out additional indexes/aliases that we automate this carefully. But for now, 1 alias to 1 index. Routing will need a refactor anyway
|
def assume_alias(self):
es = self.get_es()
if es.head(self.es_alias):
#remove all existing aliases - this is destructive and could be harmful, but for current
#uses, it is legal - in a more delicate routing arrangement, a configuration file of
# some sort should be in use.
alias_indices = es[self.es_alias].get('_status')['indices'].keys()
remove_actions = [{"remove": {"index": x, "alias": self.es_alias}} for x in
alias_indices]
remove_data = {"actions": remove_actions}
es.post('_aliases', data=remove_data)
#now reapply HEAD/master index
es.post('_aliases', data={"actions": [{"add":
{"index": self.es_index,
"alias": self.es_alias}}]})
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def add_alias(self, alias, index):\n if index >= len(self._datasets):\n raise DataInvalidIndex('A dataset with index {} does not exist'.format(index))\n self._aliases[alias] = index",
"def get_indexes_from_alias(self, alias):\n return requests.get(\n urljoin(self.base_url, '*/_alias/{0}'.format(alias)),\n verify=self.verify_certs).json()",
"def get_aliased_index(client):\n try:\n result = client.conn.indices.get_alias(name=client.index)\n except elasticsearch.exceptions.NotFoundError: # no alias with that name\n return None\n if len(result) > 1:\n raise RuntimeError(\n \"We don't support managing aliases that \"\n \"point to multiple indices at the moment!\"\n )\n return list(result.keys())[0]",
"def append(self):\n target_index = get_index_from_alias(self.alias_name)\n if not target_index:\n self.replace()\n else:\n self.index_all(target_index)",
"def get_alias(self):",
"def get_by_alias(self, alias):\n with self.session as session:\n try:\n record = (\n session.query(IndexRecord)\n .filter(IndexRecord.aliases.any(name=alias)).one()\n )\n except NoResultFound:\n raise NoRecordFound('no record found')\n except MultipleResultsFound:\n raise MultipleRecordsFound('multiple records found')\n return record.to_document_dict()",
"def get_index_from_alias(alias_name, index_client=None):\n index_client = index_client or indices_client()\n if not index_client.exists_alias(name=alias_name):\n return None\n return list(index_client.get_alias(name=alias_name).keys())[0]",
"def getAliases(self):",
"def alias(self):\n return self._alias",
"def alias(self):\n return self._alias",
"def alias(self):\n\n return self._alias",
"def add_alias(self, alias):\n if alias != self.name:\n self.alias = alias",
"def with_alias(self):\n return self.node.alias",
"def replace(self):\n with zero_downtime_index(self.alias_name, self.index_config()) as target_index:\n self.index_all(target_index)",
"def resolveAlias(self, alias):",
"def update_aliased_index(client, new_target):\n old_target = get_aliased_index(client)\n if old_target is None:\n raise RuntimeError(\n \"Cannot update aliased index for index that is not already aliased.\"\n )\n\n client.conn.indices.update_aliases(\n body={\n \"actions\": [\n {\"add\": {\"index\": new_target, \"alias\": client.index}},\n {\"remove\": {\"index\": old_target, \"alias\": client.index}},\n ]\n }\n )",
"def reindex_subcomponent_taxa():\n pass",
"def set_default_by_alias(self, alias):\n if alias not in self._aliases:\n raise DataInvalidAlias('A dataset with alias {} does not exist'.format(alias))\n\n self._default_index = self._aliases[alias]",
"def getAlias(self):\n pass;",
"def check_alias(self):\n es = self.get_es()\n aliased_indexes = es[self.es_alias].get('_aliases')\n return aliased_indexes.keys()",
"def as_(self, alias):\n return AliasedQuery(self, alias)",
"def index_item(self, item):\n index_name = self.settings['ELASTICSEARCH_INDEX']\n index_suffix_format = self.settings.get(\n 'ELASTICSEARCH_INDEX_DATE_FORMAT', None)\n\n if index_suffix_format:\n index_name += \"-\" + datetime.strftime(datetime.now(),\n index_suffix_format)\n\n if isinstance(item, DocumentItem):\n index_action = {\n '_index': index_name,\n '_type': self.settings['ELASTICSEARCH_TYPE'],\n '_id': hashlib.sha1(item['url']).hexdigest(),\n '_source': dict(item)\n }\n elif isinstance(item, LinkItem):\n index_action = {\n \"_op_type\": \"update\",\n \"_index\": index_name,\n \"_type\": self.settings['ELASTICSEARCH_TYPE'],\n \"_id\": hashlib.sha1(item['target']).hexdigest(),\n \"script\": {\n \"inline\": \"\"\"ctx._source.anchors = ctx._source.anchors ?\n (ctx._source.anchors + [anchor]).unique{it}\n : [anchor]\"\"\",\n \"params\" : {\n \"anchor\" : item[\"anchor\"]\n }\n },\n \"upsert\": {\n \"anchors\": [item[\"anchor\"]],\n \"url\": item['target'],\n \"domain\": urlparse(item['target']).hostname,\n \"updated_on\": datetime.now().strftime(\n \"%Y-%m-%dT%H:%M:%S\")\n }\n }\n elif isinstance(item, AuthorityItem):\n index_action = {\n \"_op_type\": \"update\",\n \"_index\": index_name,\n \"_type\": self.settings['ELASTICSEARCH_TYPE'],\n \"_id\": item['url'],\n \"doc\": {\n \"authority\": item['score']\n }\n }\n else:\n return\n\n self.items_buffer.append(index_action)\n\n if len(self.items_buffer) >= \\\n self.settings.get('ELASTICSEARCH_BUFFER_LENGTH', 500):\n self.send_items()\n self.items_buffer = []",
"def _resolve_index(self, cls):\n # If we have just a string, it's a simple index\n if isinstance(self.index, basestring):\n return self._resolve_name(cls, self.index)\n\n # Otherwise it must be an iterable\n for i in xrange(len(self.index)):\n # Of 2-tuples\n pair = self.index[i]\n if len(pair) != 2:\n raise TypeError(\"Invalid index: {!r}\".format(self.index))\n # Where the first is the key, and the second the direction\n self.index[i] = (self._resolve_name(cls, pair[0]), pair[1])\n\n return self.index",
"def index(self):\n\n if self.cluster:\n self.cluster.index()\n else:\n super().index()",
"def contribute_to_class(self, cls):\n if self.db_index:\n new_index = (self.name,)\n if new_index not in cls._meta.indexes:\n cls._meta.indexes = tuple(list(cls._meta.indexes) + [new_index])",
"def index_together(self):\n return self._index_together",
"def get_aliases_for_did(self, did):\n with self.session as session:\n query = (\n session.query(IndexRecordAlias)\n .filter(IndexRecordAlias.did == did)\n )\n return [i.name for i in query]",
"def get_index(self, *args, **dargs):\n pass",
"def index(self):\n if hasattr(self, '_m_index'):\n return self._m_index if hasattr(self, '_m_index') else None\n\n self._m_index = (self.index_separate if self.is_index_separate else self.index_in_tag)\n return self._m_index if hasattr(self, '_m_index') else None",
"def alias(ctx, search, backend):\n projects = ctx.obj['projects_db'].search(search, active_only=True)\n projects = sorted(projects, key=lambda project: project.name)\n\n if len(projects) == 0:\n ctx.obj['view'].msg(\n \"No active project matches your search string '%s'.\" %\n ''.join(search)\n )\n return\n\n ctx.obj['view'].projects_list(projects, True)\n\n try:\n number = ctx.obj['view'].select_project(projects)\n except CancelException:\n return\n\n project = projects[number]\n ctx.obj['view'].project_with_activities(project, numbered_activities=True)\n\n try:\n number = ctx.obj['view'].select_activity(project.activities)\n except CancelException:\n return\n\n retry = True\n while retry:\n try:\n alias = ctx.obj['view'].select_alias()\n except CancelException:\n return\n\n if alias in aliases_database:\n mapping = aliases_database[alias]\n overwrite = ctx.obj['view'].overwrite_alias(alias, mapping)\n\n if not overwrite:\n return\n elif overwrite:\n retry = False\n # User chose \"retry\"\n else:\n retry = True\n else:\n retry = False\n\n activity = project.activities[number]\n mapping = Mapping(mapping=(project.id, activity.id),\n backend=project.backend)\n ctx.obj['settings'].add_alias(alias, mapping)\n ctx.obj['settings'].write_config()\n\n ctx.obj['view'].alias_added(alias, (project.id, activity.id))"
] |
[
"0.6676081",
"0.66249245",
"0.6387008",
"0.62786865",
"0.62055886",
"0.61112547",
"0.60653275",
"0.6054649",
"0.60334975",
"0.60334975",
"0.59681004",
"0.5964024",
"0.5911201",
"0.5893658",
"0.5892665",
"0.58747756",
"0.58730376",
"0.5786041",
"0.5774079",
"0.5764505",
"0.5752084",
"0.57300353",
"0.57230985",
"0.5589385",
"0.5573186",
"0.5561246",
"0.55425113",
"0.5536724",
"0.55233544",
"0.55163795"
] |
0.7307731
|
0
|
Verify whether the server has indexed this type We can assume at startup that the mapping from the server is loaded, so in memory will be up to date.
|
def type_exists(self, doc_dict, server=False):
es = self.get_es()
type_string = self.get_type_string(doc_dict)
if server and self.online:
type_path = "%(index)s/%(type_string)s" % (
{
'index': self.es_index,
'type_string': type_string
})
head_result = es.head(type_path)
self.seen_types[type_string] = head_result
return head_result
else:
return type_string in self.seen_types
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def has_index(self):\n return self.index is not None",
"def check_type(self):\n pass\n\n indice = client.IndicesClient(self.es)\n print(self.es_main_index)\n if indice.exists_type(index=self.es_main_index,\n doc_type=self.es_main_type):\n print('Scenario %s already exists, deleting the current one'\n % self.es_main_type)\n indice.delete_mapping(index=self.es_main_index,\n doc_type=self.es_main_type)\n\n print('Waiting for 10 seconds to ensure the current type is ' +\n 'deleted.')\n time.sleep(10)\n\n return",
"def checkMap(self):\n return True",
"def does_exist(self, index):\n if index in self.map:\n return True\n return False",
"def is_indexed(self):\n return self._index is not UnindexedComponent_set",
"def __contains__(self, record):\n with self.session as session:\n query = session.query(IndexRecord)\n query = query.filter(IndexRecord.did == record)\n\n return query.exists()",
"def is_indexed(self, url):\n query = self.con.execute(\"select rowid from urllist where url='%s'\" % url).fetchone()\n if query is not None:\n # Check if it actually has been crawled\n crawled = self.con.execute('select * from wordlocation where urlid=%d'\n % query[0]).fetchone()\n if crawled is not None:\n return True\n return False",
"def index_exists(self, index):\n req = requests.head(\n urljoin(self.base_url, '{0}'.format(index)),\n verify=self.verify_certs)\n return req.status_code == 200",
"def has(self, index):\n raise NotImplementedError()",
"def is_indexed(self):\r\n return self._indexed",
"def IndexExists(self, arg0: 'unsigned long long') -> \"bool\":\n return _itkQuadEdgeCellTraitsInfoPython.itkMapContainerULLQEMPF3GQEULLULLBBT_IndexExists(self, arg0)",
"def IndexExists(self, arg0: 'unsigned long long') -> \"bool\":\n return _itkQuadEdgeCellTraitsInfoPython.itkMapContainerULLQEMPF2GQEULLULLBBT_IndexExists(self, arg0)",
"def data_loaded_check(self):\n return True",
"def isKnown(self, index):\n return self._items.has_key(index)",
"async def _exists(self, key):\n return key in SimpleMemoryBackend._cache",
"def __contains__(self, item):\n return item in self._index_map",
"def __load_index(self):\n import os\n if not os.path.exists(self.__dir):\n filename=os.path.join(MY_STORE,self.__dir,INTERNAL_DB_FILE)\n else:\n filename=os.path.join(self.__dir,INTERNAL_DB_FILE)\n try:\n self.__handle = open(filename,self.__mode)\n except IOError, e:\n print 'Cannot create status file. Ensure you have permission to write'\n return False\n\n fcntl.flock(self.__handle.fileno(), fcntl.LOCK_EX)\n internal_db = dbm.open(filename, 'c', 0644 )\n self.__storage = shelve.Shelf(internal_db)\n return True",
"def has_index(self):\n\n if self._check_idx and self._index:\n return self._check_idx",
"def is_loaded(self):\n return self.known_stations != {}",
"def is_index(self, key):\n if key not in self:\n return False\n match = key.base.label if self[key].is_tensor else key\n for i in self.extract(key, readby=True):\n for e in retrieve_indexed(i):\n if any(match in idx.free_symbols for idx in e.indices):\n return True\n return False",
"def exist(self):",
"def _check_idx(self, url):\n if not url.endswith('.idx'):\n url += '.idx'\n return requests.head(url).ok",
"def _checkIndex(self, index):\n # OPT: lets not reuse isKnown, to don't incure 1 more function\n # call\n if not self._items.has_key(index):\n raise KeyError, \\\n \"%s of %s has no key '%s' registered\" \\\n % (self.__class__.__name__,\n self.__owner.__class__.__name__,\n index)",
"def exists(self):\n try:\n self.world.find(self.ehandle)\n except KeyError:\n return False\n else:\n return True",
"def __len__(self):\n return len(self._mapping)",
"def exists(self):\n return True",
"def exists(self):\n return True",
"def __contains__(self, key):\n return key in self._mappings.keys()",
"def test_indexable(self):\n # verify ----------------------\n try:\n self.collection[0]\n except TypeError:\n msg = \"'Collection' object does not support indexing\"\n self.fail(msg)\n except IndexError:\n pass",
"def _check_indexes(cls, document: dict) -> bool:\n criteria = [\n field_name\n for field_name in cls._get_index_fields(IndexType.Other, document, \"\")\n ]\n unique_criteria = [\n field_name\n for field_name in cls._get_index_fields(IndexType.Unique, document, \"\")\n ]\n index_name = f\"idx{cls.__collection_name__}\"\n unique_index_name = f\"uidx{cls.__collection_name__}\"\n indexes = cls.__collection__.list_indexes()\n cls.logger.debug(f\"Checking existing indexes: {indexes}\")\n indexes = {\n index[\"name\"]: index[\"key\"].keys()\n for index in indexes\n if \"name\" in index and \"key\" in index\n }\n return (\n (criteria and index_name not in indexes)\n or (not criteria and index_name in indexes)\n or (criteria and index_name in indexes and criteria != indexes[index_name])\n or (unique_criteria and unique_index_name not in indexes)\n or (not unique_criteria and unique_index_name in indexes)\n or (\n unique_criteria\n and unique_index_name in indexes\n and unique_criteria != indexes[unique_index_name]\n )\n )"
] |
[
"0.63144267",
"0.6157267",
"0.60441124",
"0.6003592",
"0.59744084",
"0.58695626",
"0.5794515",
"0.57553786",
"0.5698403",
"0.5676481",
"0.56584233",
"0.5648552",
"0.56066954",
"0.560276",
"0.55641127",
"0.55522156",
"0.5533276",
"0.55121136",
"0.55082977",
"0.5486367",
"0.5466071",
"0.5462879",
"0.5431242",
"0.54277074",
"0.53968024",
"0.5392012",
"0.5392012",
"0.53851324",
"0.53541106",
"0.53452474"
] |
0.67840266
|
0
|
performs a DNS lookup an ip or hostname.
|
def handle_dns(bot, ievent):
if not ievent.args:
ievent.missing('<host | ip>')
else:
is_a = None
result = None
# If we support IPv6 ...
if socket.has_ipv6:
# ... then check if this is an IPv6 ip
try:
socket.inet_pton(socket.AF_INET6, ievent.args[0])
is_a = 'ipv6'
except socket.error:
pass
# Ah not an IPv6 ip ...
if not is_a:
# ... maybe IPv4 ?
try:
socket.inet_pton(socket.AF_INET, ievent.args[0])
is_a = 'ipv4'
except socket.error:
pass
# Not an ip, must be a hostname then
if not is_a:
is_a = 'host'
# If it was an ip ...
if is_a in ['ipv4', 'ipv6']:
try:
# ... try to resolve it
result = socket.gethostbyaddr(ievent.args[0])
if result[1]:
result = 'primary: %s, aliases: %s' % \
(result[0], ', '.join(result[1]))
else:
result = result[0]
ievent.reply('%s ip %s resolves to %s' % \
(is_a, ievent.args[0], result))
except Exception, e:
ievent.reply('could not resolve %s address %s: %s' % \
(is_a, ievent.args[0], e[1]))
# Oh it's a host, lets resolve that
elif is_a == 'host':
try:
result = []
for info in socket.getaddrinfo(ievent.args[0], None):
if info[0] in [socket.AF_INET, socket.AF_INET6] and \
info[1] == socket.SOCK_STREAM:
ip = info[4][0]
if not ip in result:
result.append(ip)
if not result:
ievent.reply('could not resolve hostname %s: not found' % \
ievent.args[0])
else:
ievent.reply('%s resolves to: %s' % (ievent.args[0], \
', '.join(result)))
except Exception, e:
ievent.reply('could not resolve hostname %s: %s' % \
(ievent.args[0], e[1]))
else:
ievent.reply('lookup failed, no valid data found')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def nslookup(name, dnsserver='', prevent_unqualified_dns=True):\n # if it looks like an IP address, don't try to resolve it\n if digitsAndDots.match(name):\n return (0, \"OK\")\n if name != \"localhost\" and prevent_unqualified_dns:\n name = name + \".\" # prevent unqualified DNS lookups\n\n # TODO: we really want to call something along the lines of\n # google2/io/safe_gethostbyname, this will require some python trickery.\n\n # If dnsserver is an empty string, then mkarg() will escape it with\n # quotes and the host call will try to use \"''\" as a dns server and fail\n # So call mkarg only if actually given a non-empty-string dnsserver\n if not dnsserver:\n dnsserver = ''\n if dnsserver != '':\n dnsserver = commands.mkarg(dnsserver)\n\n executestring = commands.mkarg(\n \"host -t a %s %s 2>/dev/null | grep has\\ address | wc -l\"\n % (commands.mkarg(name), dnsserver))\n\n (stat, out) = commands.getstatusoutput('alarm 5 sh -c %s' % executestring)\n if stat != 0:\n return (1, \"TIMEOUT\") # E.g. DNS server does not respond\n\n if int(out) == 0:\n return (2, \"cannot resolve\")\n\n return (0, \"OK\")",
"def dns_lookup(self, hostname, aux):\n\n resolver = Resolver()\n\n # If the host doesn't have the A record (IPv4),\n # trying to find its AAAA record (IPv6).\n try:\n addr = resolver.query(hostname, \"A\")[0] # <---+\n ver = 4 # |\n except Exception as e: # From the dnspython lib. --------+\n try: # |\n addr = resolver.query(hostname, \"AAAA\")[0] # <---+\n ver = 6\n except Exception as e:\n addr = ver = aux._ERR_PREFIX\n\n return (addr, ver)",
"def dnslookup(url) -> 'text': \n try:\n hn = socket.gethostbyaddr(url)[0] \n except socket.error as msg: \n hn = 'nohost'\n return hn",
"def gethostbyname(self, hostname, dnsserv='192.112.36.4'):\n ipaddrlist = []\n cnames = []\n temp = []\n if(self.caching):\n rcache = RecordCache(self.ttl)\n rcord = rcache.lookup(hostname, Type.ANY, Class.IN)\n if(rcord):\n for rec in rcord:\n if rec.type_ == Type.A:\n arec = rec.rdata\n ipaddrlist.append(arec.address)\n elif rec.type_ == Type.CNAME:\n crec = rec.rdata\n cnames.append(crec.cname)\n if ipaddrlist:\n return hostname, cnames, ipaddrlist\n elif cnames:\n return self.gethostbyname(cnames[0], dnsserv)\n \n\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.settimeout(self.timeout)\n\n # Create and send query\n question = Question(Name(str(hostname)), Type.A, Class.IN)\n header = Header(9001, 0, 1, 0, 0, 0)\n header.qr = 0\n header.opcode = 0\n header.rd = 1\n query = Message(header, [question])\n sock.sendto(query.to_bytes(), (str(dnsserv), 53))\n\n # Receive response\n data = sock.recv(2048)\n response = Message.from_bytes(data)\n print(\"Number of answers: \" +str(len(response.answers)))\n print(\"Number of authorities: \" + str(len(response.authorities)))\n print(\"Number of additionals: \" + str(len(response.additionals)))\n\n # Get data\n aliaslist = cnames\n ipaddrlist = []\n dnslist = []\n \n while response.answers:\n for answer in response.answers:\n if answer.type_ == Type.A:\n print(\"found A RR\")\n if(self.caching):\n rcache.add_record(answer)\n ipaddrlist.append(answer.rdata.address)\n if answer.type_ == Type.CNAME:\n aliaslist.append(answer.rdata.cname)\n if answer.type_ == Type.NS:\n dnslist.append(answer.rdata.nsdname)\n if ipaddrlist:\n return hostname, aliaslist, ipaddrlist\n elif aliaslist:\n question = Question(Name(aliaslist[0]), Type.A, Class.IN)\n query = Message(header, [question])\n sock.sendto(query.to_bytes(), (dnsserv, 53))\n data = sock.recv(2048)\n response = Message.from_bytes(data)\n elif dnslist:\n nsname = dnslist.pop()\n maybe_dnsserv = self.getnsaddr(nsname, response.additionals)\n if maybe_dnsserv:\n dnsserv = maybe_dnsserv\n else:\n pass\n sock.sendto(query.to_bytes(), (dnsserv, 53))\n data = sock.recv(2048)\n response = Message.from_bytes(data)\n else:\n break\n\n if response.authorities:\n for authority in response.authorities:\n if authority.type_ != Type.NS:\n pass\n dnslist.append(authority.rdata.nsdname)\n while dnslist:\n nsname = dnslist.pop()\n maybe_next_dnsserv = self.getnsaddr(nsname, response.additionals)\n if maybe_next_dnsserv:\n next_dns_serv = maybe_next_dnsserv\n else:\n pass\n (hname, aliasl, ipaddrl) = self.gethostbyname(hostname, nsname)\n if ipaddrl:\n return hname, aliasl, ipaddrl",
"def google_dns_resolver(target):\n url = f\"https://dns.google/resolve?name={target}&type=A\"\n \n r = requests.get(url=url)\n if r.status_code != 200:\n return None\n else:\n for result in json.loads(r.text)[\"Answer\"]:\n try:\n if not (\n ipaddress.IPv4Address(result[\"data\"]).is_private\n or ipaddress.IPv4Address(result[\"data\"]).is_loopback\n or ipaddress.IPv4Address(result[\"data\"]).is_link_local\n ):\n return result[\"data\"]\n else:\n continue\n except ipaddress.AddressValueError:\n continue\n # if the loop terminates without any result return None\n return None",
"def google_dns_resolver(target):\n url = f\"https://dns.google/resolve?name={target}&type=A\"\n \n r = requests.get(url=url)\n if r.status_code != 200:\n return None\n else:\n for result in json.loads(r.text)[\"Answer\"]:\n try:\n if not (\n ipaddress.IPv4Address(result[\"data\"]).is_private\n or ipaddress.IPv4Address(result[\"data\"]).is_loopback\n or ipaddress.IPv4Address(result[\"data\"]).is_link_local\n ):\n return result[\"data\"]\n else:\n continue\n except ipaddress.AddressValueError:\n continue\n # if the loop terminates without any result return None\n return None",
"def hostname_lookup(hostname):\n try:\n # The {host} must be resolved to an IP address; if this fails, this\n # will throw a socket.gaierror.\n host_address = gethostbyname(hostname)\n\n # Reset {host} to the resolved address.\n LOG.debug(\n 'Resolved hostname %s to IP address %s.', hostname, host_address\n )\n return host_address\n\n except gaierror:\n # The {host}-as-hostname did not resolve to an IP address.\n LOG.debug('Could not resolve hostname %s to an IP address.', hostname)\n return hostname",
"async def reverse_lookup(resolver, ip):\n result = (ip, \"\")\n allowed_chars = \"abcdefghijklmnopqrstuvwxyz0123456789-.\"\n log.info(\"Requesting PTR record for %s.\", ip)\n try:\n resp = await resolver.gethostbyaddr(ip)\n # Make sure records comply to NetBox and DNS expected format\n if all([bool(c.lower() in allowed_chars) for c in resp.name]):\n result = (ip, resp.name.lower())\n log.debug(\"PTR record for %s is '%s'.\", ip, result[1])\n else:\n log.debug(\n \"Invalid characters detected in PTR record '%s'. Nulling.\",\n resp.name\n )\n except aiodns.error.DNSError as err:\n log.info(\"Unable to find record for %s: %s\", ip, err.args[1])\n return result",
"def gethostbyname(self, hostname):\n \n timeout = 2 # the time waited for a response\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.settimeout(timeout)\n \n hostname = hostname.rstrip(\".\") #framework can't handle \"\" or anything ending with a dot\n\n aliases = []\n addresses = []\n\n #1. See if the answer is in local information, and if so return it to the client.\n if self.caching:\n for alias in self.cache.lookup(hostname, Type.CNAME, Class.IN):\n aliases.append(alias.rdata.data)\n for address in self.cache.lookup(hostname, Type.A, Class.IN):\n addresses.append(address.rdata.data)\n \n if aliases != []:\n return hostname, aliases, addresses\n \n #2. Find the best servers to ask.\n if self.caching:\n nameservers = _cache_get_ns_ip(hostname)\n else:\n nameservers = self.root_servers\n\n #3. Send them queries until one returns a response.\n # Create and send query\n\n #4. Analyze the response, either:\n\n # a. if the response answers the question or contains a name\n # error, cache the data as well as returning it back to\n # the client.\n\n # b. if the response contains a better delegation to other\n # servers, cache the delegation information, and go to\n # step 2.\n\n # c. if the response shows a CNAME and that is not the\n # answer itself, cache the CNAME, change the SNAME to the\n # canonical name in the CNAME RR and go to step 1.\n\n # d. if the response shows a servers failure or other\n # bizarre contents, delete the server from the SLIST and\n # go back to step 3.\n \n return self.get_hostname_helper(sock, nameservers, hostname)",
"def _lv_dns_lookup(name):\n if dns is None:\n return _lv_pydns_lookup(name)\n resp = dns.resolver.query(name, \"srv\")\n if resp.response.flags & dns.flags.TC:\n resp = dns.resolver.query(name, \"srv\", tcp=True)\n return [(a.priority, a.weight, a.port, a.target.to_text(True)) for a in resp]",
"def get_dns_info(self, name_or_ip) :\n self._logger.debug(\"get_dns_info: entering with name_or_ip=%s\" % \\\n (name_or_ip))\n if not is_name(name_or_ip) : # check for matching ipaddress\n for hostname in afs.CONFIG.hosts :\n if name_or_ip in afs.CONFIG.hosts[hostname] :\n self._logger.debug(\"%s is hard-mapped to (%s,%s)\" % \\\n (name_or_ip, [hostname,],afs.CONFIG.hosts[hostname]))\n self._logger.debug(\"returning %s\" % ({ \"names\" : [hostname, ], \\\n \"ipaddrs\" : afs.CONFIG.hosts[hostname] }) )\n return { \"names\" : [hostname, ], \\\n \"ipaddrs\" : afs.CONFIG.hosts[hostname] }\n\n # is a hostname\n \n # hard-mapped, primary Hostname given \n if name_or_ip in afs.CONFIG.hosts.keys() :\n self._logger.debug(\"%s is hard-mapped to (%s,%s)\" % ( name_or_ip, \\\n [name_or_ip, ], afs.CONFIG.hosts[name_or_ip]))\n self._logger.debug(\"returning %s\" % ({\"names\" : [name_or_ip,], \"ipaddrs\" : \\\n afs.CONFIG.hosts[name_or_ip] }) )\n return {\"names\" : [name_or_ip,], \"ipaddrs\" : \\\n afs.CONFIG.hosts[name_or_ip] }\n\n \n # memory_cache \n if name_or_ip in self.memory_cache[\"dns_info\"] :\n self._logger.debug(\"%s in localcache hard-mapped (%s)\" % \\\n (name_or_ip,self.memory_cache[\"dns_info\"][name_or_ip] ))\n self._logger.debug(\"returning %s\" % (self.memory_cache[\"dns_info\"][name_or_ip]))\n return self.memory_cache[\"dns_info\"][name_or_ip]\n \n for srv in self.memory_cache[\"dns_info\"] :\n if name_or_ip in self.memory_cache[\"dns_info\"][srv][\"names\"] :\n self._logger.debug(\"%s is hard-mapped to %s\" % (name_or_ip, \\\n self.memory_cache[\"dns_info\"][srv] ))\n self._logger.debug(\"returning %s\" % (self.memory_cache[\"dns_info\"][srv]) )\n return self.memory_cache[\"dns_info\"][srv]\n\n # lookup from OS\n \n try : \n dns_info = socket.gethostbyaddr(name_or_ip)\n servernames = [dns_info[0]] + dns_info[1]\n ipaddrs = dns_info[2]\n except socket.gaierror :\n if is_name(name_or_ip) :\n raise LookupUtilError(\"Cannot resolve %s\" % name_or_ip)\n else :\n self._logger.warn(\"Cannot resolve %s\" % name_or_ip)\n self._logger.debug(\"returning %s\" % ({\"names\": [], \"ipaddrs\" : [name_or_ip,]}) )\n return {\"names\": [], \"ipaddrs\" : [name_or_ip,]}\n\n\n self._logger.debug(\"%s resolves to %s\" % (name_or_ip, dns_info)) \n # check if resolved ip-address matches (if hostalias was used)\n for hostname in afs.CONFIG.hosts :\n for ipaddr in ipaddrs :\n # ignore IP if we're asked to do so.\n if ipaddr in afs.CONFIG.ignoreIPList : continue\n if ipaddr in afs.CONFIG.hosts[hostname] :\n self._logger.debug(\"%s is hard-mapped to (%s,%s)\" % \\\n (ipaddrs, [hostname,],afs.CONFIG.hosts[hostname]))\n # add this hostalias to list in memory_cache\n if self.memory_cache[\"dns_info\"].has_key(hostname) :\n self.memory_cache[\"dns_info\"][hostname][\"names\"] = \\\n [hostname, ]\n self.memory_cache[\"dns_info\"][hostname][\"ipaddrs\"] = \\\n afs.CONFIG.hosts[hostname]\n else :\n self.memory_cache[\"dns_info\"][hostname] = { \\\n \"names\" : [hostname,], \\\n \"ipaddrs\" : afs.CONFIG.hosts[hostname]}\n self._logger.debug(\"memory_cache = %s\" % \\\n (self.memory_cache))\n ipaddrs = []\n self._logger.debug(\"returning %s\" % ({ \"names\" : [hostname], \"ipaddrs\" : \\\n afs.CONFIG.hosts[hostname] }) )\n return { \"names\" : [hostname], \"ipaddrs\" : \\\n afs.CONFIG.hosts[hostname] }\n\n if \"nxdomain\" in servernames[0] : \n raise LookupUtilError(\"cannot resolve DNS-entry %s\" % name_or_ip)\n # fill up localcache\n self.memory_cache[\"dns_info\"][servernames[0]] = { \\\n \"names\" : servernames, \"ipaddrs\" : ipaddrs }\n self._logger.debug(\"memory_cache = %s\" % (self.memory_cache))\n self._logger.debug(\"returning %s\" % ({\"names\": servernames, \"ipaddrs\" : ipaddrs}) )\n return {\"names\": servernames, \"ipaddrs\" : ipaddrs}",
"def nslookup(host, server=None, index=None, sourcetype=\"nslookup\",\n source=\"nslookup_search_command\", logger=None):\n\n result = collections.OrderedDict()\n\n if host is None or host.strip() == \"\":\n raise ValueError(\"The host cannot be none or empty\")\n\n # Add the hostname we are querying for\n result['query'] = host\n\n # See if this is an IP address. If so, do a reverse lookup.\n try:\n IPAddress(host)\n\n addr = reversename.from_address(host)\n\n if len(resolver.query(addr, \"PTR\")) > 0:\n result['host'] = str(resolver.query(addr, \"PTR\")[0])\n\n # If this isn't an IP address, handle it as a DNS name.\n except ValueError:\n # Make a resolver\n custom_resolver = resolver.Resolver()\n\n if server is not None:\n custom_resolver.nameservers = [server]\n\n # Log the server used\n result['server'] = custom_resolver.nameservers\n\n # NS records\n try:\n answers = custom_resolver.query(host, 'NS')\n\n ns_records = []\n\n for answer in answers:\n ns_records.append(str(answer))\n\n if len(ns_records) > 0:\n result['ns'] = ns_records\n\n except resolver.NoAnswer:\n pass\n\n # A\n try:\n answers = custom_resolver.query(host, 'A')\n\n a_records = []\n\n for answer in answers:\n a_records.append(str(answer))\n\n if len(a_records) > 0:\n result['a'] = a_records\n except resolver.NoAnswer:\n pass\n\n # AAAA\n try:\n answers = custom_resolver.query(host, 'AAAA')\n\n aaaa_records = []\n\n for answer in answers:\n aaaa_records.append(str(answer))\n\n if len(aaaa_records) > 0:\n result['aaaa'] = aaaa_records\n\n except resolver.NoAnswer:\n pass\n\n # MX\n try:\n answers = custom_resolver.query(host, 'MX')\n\n mx_records = []\n\n for answer in answers:\n mx_records.append(str(answer))\n\n if len(mx_records) > 0:\n result['mx'] = mx_records\n\n except resolver.NoAnswer:\n pass\n\n # Write the event as a stash new file\n if index is not None:\n writer = StashNewWriter(index=index, source_name=source, sourcetype=sourcetype,\n file_extension=\".stash_output\")\n\n # Log the data\n if logger:\n logger.debug(\"Wrote stash file=%s\", writer.write_event(result))\n\n return result",
"def test_lookupAddress(self):\n d = client.lookupAddress(self.hostname)\n d.addCallback(self.checkResult, dns.A)\n return d",
"def public_ip_dns(resolv, nameservers, rdatatype, server, responsetype):\n for ns in nameservers:\n try:\n answer = resolv.query(ns, rdatatype)\n nameserver = answer[0].to_text()\n except Exception as e:\n print(e)\n continue\n resolve_public_ip(nameserver, server, responsetype)",
"def _lv_pydns_lookup(name):\n if not DNS.defaults[\"server\"]:\n DNS.DiscoverNameServers()\n req = DNS.Request(name=name, qtype=\"srv\", protocol=\"udp\")\n for retries_left in [3, 2, 1, 0]:\n try:\n response = req.req()\n if response and response.header[\"tc\"]:\n # truncated, rerun with tcp\n req = DNS.Request(name=name, qtype=\"srv\", protocol=\"tcp\")\n continue\n break\n except DNS.Base.DNSError:\n if not retries_left:\n raise\n time.sleep(1) # retry after sleeping a second\n if not response or not response.answers:\n return []\n result = []\n for a in response.answers:\n if a[\"typename\"].lower() != \"srv\":\n continue\n if isinstance(a[\"data\"], list):\n result.extend(a[\"data\"])\n else:\n result.append(a[\"data\"])\n return result",
"def resolve_hostname(self, hostname, type = \"a\"):\r\n\r\n import netius.clients\r\n\r\n future = self.build_future()\r\n\r\n def handler(response):\r\n if not response:\r\n raise errors.RuntimeError(\"Timeout in resolution\")\r\n if not response.answers:\r\n raise errors.RuntimeError(\"Unable to resolve name\")\r\n\r\n answer = response.answers[0]\r\n address = answer[4]\r\n\r\n future.set_result(address)\r\n\r\n netius.clients.DNSClient.query_s(\r\n hostname,\r\n type = type,\r\n callback = handler\r\n )\r\n\r\n return future",
"def custom_dns_resolver(hostname, type='A'):\n nameservers = globals.config.service.initial_dns\n custom_resolver = dns.resolver.Resolver()\n custom_resolver.nameservers = nameservers\n answer = custom_resolver.query(hostname, type)\n\n return str(random.choice(answer))",
"def test_answerless(self):\n servers = {\n ('1.1.2.3', 53): {\n ('example.com', A): {\n },\n },\n }\n resolver = self._getResolver(servers)\n d = resolver.lookupAddress('example.com')\n return self.assertFailure(d, ResolverError)",
"def myip_resolver(arg=None):\n # Return type if no argument for use in Lister.\n if arg is None:\n return 'dns'\n output = get_output(cmd=arg.split(\" \"))\n # Clean up output\n result = str(output[0]).replace('\"', '')\n try:\n interface = ipaddress.ip_interface(result)\n except TypeError:\n interface = None\n return interface",
"def ip_lookup(ip):\n # Create the required data dictionary for Host/Reputation\n api_data = {\n 'host': ip\n }\n response = http_request(endpoint=HOST_REPUTE_API, data=api_data)\n\n if response.get('errorNo') != 0:\n return_error('API Returned, {}:{}'.format(response.get('errorNo'), response.get('errorMsg')))\n\n return response",
"def get_hostname_helper(self, sock, nameservers, hostname):\n hostname = hostname.rstrip(\".\") #framework can't handle \"\" or anything ending with a dot\n\n aliases = []\n addresses = []\n\n #1. See if the answer is in local information, and if so return it to the client.\n if self.caching:\n for alias in self.cache.lookup(hostname, Type.CNAME, Class.IN):\n aliases.append(alias.rdata.data)\n for address in self.cache.lookup(hostname, Type.A, Class.IN):\n addresses.append(address.rdata.data)\n \n if aliases != []:\n return hostname, aliases, addresses\n\n #3. Send them queries until one returns a response.\n # Create and send query\n \n while True:\n response = self._get_single_A(sock, nameservers, hostname)\n \n if response == None:\n break\n \n for answer in [ans for ans in response.answers + response.additionals if ans.type_ == Type.A and ans.name == hostname]:\n addresses.append(answer.rdata.data)\n for additional in [ans for ans in response.additionals if ans.type_ == Type.CNAME and ans.name == hostname]:\n aliases.append(additional.rdata.data)\n if addresses != []:\n break\n \n ns2 = nameservers\n nameservers, remaining = self.extract_ip(response.authorities, response.additionals, hostname)\n if nameservers == []:\n nameservers = self.get_ns(sock, ns2, remaining)\n return hostname, aliases, addresses",
"def resolv(hostname):\n\n ips = list()\n\n # Create resolver object\n res = resolver.Resolver()\n\n # Choose the correct DNS servers\n # Blue DNS servers\n if hostname.startswith('b-'):\n res.nameservers = ['172.16.2.10', '172.16.2.11']\n # Green DNS servers\n elif hostname.startswith('g-'):\n res.nameservers = ['10.0.2.10', '10.0.2.11']\n # Default to white DNS servers\n else:\n res.nameservers = ['194.47.252.134', '194.47.252.135']\n\n # Query\n try:\n query = res.query(hostname)\n for answer in query:\n ips.append(answer.address)\n except resolver.NXDOMAIN:\n raise CouldNotResolv\n\n # Return query result\n return ips",
"def _get_IP_addresses(hostname):\n try:\n answers, auth, addit = yield DNSclient.lookupAddress(hostname)\n except Exception as exc: # Too many different DNS failures to catch...\n log.exception('DNS Resolution failure: %r for name: %r', exc, hostname)\n returnValue([])\n\n returnValue(\n [answer.payload.dottedQuad()\n for answer in answers if answer.type == dns.A])",
"def lookup_ip(ikey, skey, host, ip):\n response = client.call_json_api(\n ikey, skey, host, 'GET', '/verify/v1/lookup/ip.json',\n ip=[ip])\n return response",
"def resolve_hostname(request, hostname):\n try:\n ipaddress = usm_wrapper_utils.resolve_hostname(hostname)\n except Exception, e:\n log.exception(e)\n return Response(\n {'message': 'Error while resolving hostname'}, status=417)\n\n return Response({'IP_Address': ipaddress}, status=200)",
"def host_ip(hostname: str) -> str:\n try:\n return socket.gethostbyname(hostname)\n except socket.gaierror:\n return \"No record found.\"",
"def dns_entry(self, msg):\n if msg['message'].find('Calling getaddrinfo') > -1:\n match = re.search(r'Calling getaddrinfo for host \\[(?P<host>[^\\]]+)\\]', msg['message'])\n if match:\n hostname = match.groupdict().get('host')\n if hostname not in self.dns:\n self.dns[hostname] = {'start': msg['timestamp']}\n elif msg['message'].find('lookup completed for host') > -1:\n match = re.search(r'lookup completed for host \\[(?P<host>[^\\]]+)\\]', msg['message'])\n if match:\n hostname = match.groupdict().get('host')\n if hostname in self.dns and 'end' not in self.dns[hostname]:\n self.dns[hostname]['end'] = msg['timestamp']",
"def recursive_dns_lookup(target_name, qtype, root_servers_list):\n\n # Base case\n if not root_servers_list:\n return None\n\n # Create dns query based on the target_name (website)\n # and qtype (queue type: CNAME, A, AAAA, or MX)\n dns_query = dns.message.make_query(target_name, qtype)\n\n for server in root_servers_list:\n # Doing a try catch to check if the dns server times out,\n # if it does then we continue and try another server\n try:\n query_response = dns.query.udp(dns_query, server, 3)\n except dns.exception.Timeout:\n continue\n # If there's an answer in the response\n if query_response.answer:\n # Search through the response.answer for possible answers\n for response_answers in query_response.answer:\n #print(\"response_answers: \", response_answers)\n for response_answer in response_answers:\n #print(\"Response_answer\", response_answer)\n target_name = str(response_answer)[:-1] # Removes the period at the end\n #print(\"Target_name\", target_name)\n # If we don't get the reponse we're after then\n # continue searching through the root_servers\n if response_answer.rdtype != qtype:\n if response_answer.rdtype == 5:\n return recursive_dns_lookup(target_name, qtype, ROOT_SERVERS)\n else:\n # Return the answer we wanted\n return query_response\n else: # If there isn't an answer in the response then we check additional\n\n # If we do have something in additional then get the stuff inside\n if query_response.additional:\n ip_addresses = []\n for response_additional in query_response.additional:\n #print(\"response_additional: \", response_additional)\n # Convert to string then send to function for parsing the address out\n response_additional_str = str(response_additional)\n\n #print(\"function get_address resp:\", resp)\n resp_elements = response_additional_str.split()\n #print(\"function get_address resp_elements:\", resp_elements)\n ip_address = []\n for resp_element in resp_elements:\n #print(\"function get_address resp_element:\", resp_element)\n if resp_element != 'A':\n continue\n else:\n #print(\"function get_address resp_element = A:\", resp_element)\n #print(\"function get_address address:\", resp_elements[-1])\n ip_address.append(resp_elements[-1])\n ip_addresses += ip_address\n\n return recursive_dns_lookup(target_name, qtype, ip_addresses)",
"def test_detectCanonicalNameLoop(self):\n servers = {\n ('1.1.2.3', 53): {\n ('example.com', A): {\n 'answers': [('example.com', Record_CNAME('example.net')),\n ('example.net', Record_CNAME('example.com'))],\n },\n },\n }\n resolver = self._getResolver(servers)\n d = resolver.lookupAddress('example.com')\n return self.assertFailure(d, ResolverError)",
"def __resolve_domain(self, domain=''):\n _ip = []\n if self.__is_ip_address(domain):\n # print hostname + \" is IP address\"\n _ip.append(domain)\n return _ip\n r = dns.resolver.get_default_resolver()\n r.nameservers = ['8.8.8.8']\n #answers = dns.resolver.query(hostname, 'A')\n try:\n answers = r.query(domain, 'A')\n for rdata in answers:\n # print rdata.address\n _ip.append(rdata.address)\n except dns.resolver.NoAnswer:\n print \"no answer\"\n\n if domain.find(\"www.\") != 0:\n domain = \"www.\" + domain\n # print \"querying \" + hostname\n try:\n answers = dns.resolver.query(domain, 'A')\n for rdata in answers:\n # print rdata.address\n _ip.append(rdata.address)\n except dns.resolver.NoAnswer:\n print \"no answer\"\n # print(\"processed %s, it has %d ips.\" % (hostname, len(_ip)))\n\n return list(set(_ip))"
] |
[
"0.758345",
"0.72457",
"0.69946975",
"0.6803022",
"0.6664075",
"0.6664075",
"0.6659345",
"0.6629895",
"0.66242015",
"0.6593789",
"0.6536833",
"0.65359706",
"0.6534513",
"0.65007156",
"0.64688385",
"0.6463831",
"0.64384496",
"0.6414392",
"0.63627326",
"0.6354092",
"0.6319608",
"0.6311362",
"0.6308639",
"0.6260362",
"0.6247705",
"0.6234416",
"0.6233492",
"0.6232558",
"0.62157667",
"0.6158041"
] |
0.73357594
|
1
|
Discovers the resource structure and attributes.
|
def get_inventory(self, context):
# See below some example code demonstrating how to return the resource structure
# and attributes. In real life, of course, if the actual values are not static,
# this code would be preceded by some SNMP/other calls to get the actual resource information
'''
# Add sub resources details
sub_resources = [ AutoLoadResource(model ='Generic Chassis',name= 'Chassis 1', relative_address='1'),
AutoLoadResource(model='Generic Module',name= 'Module 1',relative_address= '1/1'),
AutoLoadResource(model='Generic Port',name= 'Port 1', relative_address='1/1/1'),
AutoLoadResource(model='Generic Port', name='Port 2', relative_address='1/1/2'),
AutoLoadResource(model='Generic Power Port', name='Power Port', relative_address='1/PP1')]
attributes = [ AutoLoadAttribute(relative_address='', attribute_name='Location', attribute_value='Santa Clara Lab'),
AutoLoadAttribute('', 'Model', 'Catalyst 3850'),
AutoLoadAttribute('', 'Vendor', 'Cisco'),
AutoLoadAttribute('1', 'Serial Number', 'JAE053002JD'),
AutoLoadAttribute('1', 'Model', 'WS-X4232-GB-RJ'),
AutoLoadAttribute('1/1', 'Model', 'WS-X4233-GB-EJ'),
AutoLoadAttribute('1/1', 'Serial Number', 'RVE056702UD'),
AutoLoadAttribute('1/1/1', 'MAC Address', 'fe80::e10c:f055:f7f1:bb7t16'),
AutoLoadAttribute('1/1/1', 'IPv4 Address', '192.168.10.7'),
AutoLoadAttribute('1/1/2', 'MAC Address', 'te67::e40c:g755:f55y:gh7w36'),
AutoLoadAttribute('1/1/2', 'IPv4 Address', '192.168.10.9'),
AutoLoadAttribute('1/PP1', 'Model', 'WS-X4232-GB-RJ'),
AutoLoadAttribute('1/PP1', 'Port Description', 'Power'),
AutoLoadAttribute('1/PP1', 'Serial Number', 'RVE056702UD')]
return AutoLoadDetails(sub_resources,attributes)
'''
self._log(context, 'Begin autoload')
resources = []
attributes = []
attributes.append(AutoLoadAttribute('', 'replication_address', self.get_replication_address(context)))
attributes.append(AutoLoadAttribute('', 'connection_key', self.get_connection_key(context)))
networks = self._get_newtork_interfaces(context)
self._log(context, 'got networks')
controllers = self._get_controllers(context)
self._log(context, 'got controllers')
ports = self._get_ports(context)
model = None
for controller in controllers:
self._log(context, 'Processing ctrlt: ' + controller['name'] + ':' + controller['model'])
resources.append(AutoLoadResource(model='Generic Storage Controller', name=controller['name'],
relative_address=controller['name']))
if model is None:
model = controller['model']
attributes.append(AutoLoadAttribute('', 'Model', model))
for network in networks:
self._log(context, 'Processing netwk: ' + network['name'] + ':' + str(network['address']))
net_name = network['name']
controller = net_name.split('.')[0]
if 'vir0' in controller or 'vir1' in controller:
attributes.append(AutoLoadAttribute('',str(controller + '_address'), str(network['address'])))
continue
if 'vir' in controller:
continue
if 'management' not in network['services']:
continue
resources.append(AutoLoadResource(model='Storage Network Port', name=net_name,
relative_address=controller.upper() + '/' + str(network['address'])))
for port in ports:
if port['iqn'] is not None:
port_name = port['name']
controller = port_name.split('.')[0]
resources.append(AutoLoadResource(model='iSCSI Storage Port', name=port['name'],
relative_address=controller + '/' + port['portal']))
attributes.append(AutoLoadAttribute(controller + '/' + port['portal'], 'iqn', port['iqn']))
elif port['wwn'] is not None:
port_name = port['name']
controller = port_name.split('.')[0]
resources.append(AutoLoadResource(model='FC Storage Port', name=port['name'],
relative_address=controller + '/' + port['name'].split('.')[1]))
attributes.append(AutoLoadAttribute(controller + '/' + port['name'].split('.')[1], 'wwn', port['wwn']))
return AutoLoadDetails(resources, attributes)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _read_resource_metadata(self):\n self.rmeta_path = os.path.join(self.bag_content_path, self.res_meta_path)\n if not os.path.exists(self.rmeta_path):\n raise GenericResourceMeta.ResourceMetaException(\n \"Resource metadata {0} does not exist\".format(self.rmeta_path))\n if not os.access(self.rmeta_path, os.R_OK):\n raise GenericResourceMeta.ResourceMetaException(\n \"Unable to read resource metadata {0}\".format(self.rmeta_path))\n\n # Parse metadata using RDFLib\n self._rmeta_graph = Graph()\n self._rmeta_graph.parse(self.rmeta_path)\n\n res_uri = URIRef(self.root_uri)\n\n # Make sure that the resource ID in the resource metadata matches that of the resource map.\n rmeta_id = None\n for s, p, o in self._rmeta_graph.triples((res_uri, None, None)):\n # Resource identifier literal is represented as\n # 'http://example.com/resource/c9616269b5094c51b71632e8d1d02c0d', we want the part\n # after the final '/', 'or c9616269b5094c51b71632e8d1d02c0d'\n rmeta_id_part = str(s).rpartition('/')\n if rmeta_id_part[1] == '':\n # Should not be possible if a triple matched\n msg = 'Resource metadata does not contain a resource ID.'\n raise HsDeserializationException(msg)\n rmeta_id = rmeta_id_part[-1]\n if rmeta_id != self.id:\n msg = (\"Resource metadata resource ID {0} does not match \"\n \"resource map resource ID {1}.\").format(rmeta_id, self.id)\n raise HsDeserializationException(msg)\n logger.debug(\"Resource ID from resource map {0}\".format(rmeta_id))\n break\n\n if not rmeta_id:\n msg = (\"Resource metadata does not contain a resource ID \"\n \"that matches resource map resource ID {0}.\").format(self.id)\n raise HsDeserializationException(msg)\n\n # Also parse using SAX so that we can capture certain metadata elements\n # in the same order in which they appear in the RDF+XML serialization.\n SAX_parse_results = GenericResourceSAXHandler()\n xml.sax.parse(self.rmeta_path, SAX_parse_results)\n\n hsterms = rdflib.namespace.Namespace('https://www.hydroshare.org/terms/')\n\n # Warn if title does not match that from resource map\n title_lit = self._rmeta_graph.value(res_uri, rdflib.namespace.DC.title)\n if title_lit is not None:\n title = str(title_lit)\n if title != self.title:\n msg = \"Title from resource metadata {0} \"\n msg += \"does not match title from resource map {1}, using {2} \"\n msg += \"(this may be okay if the resource metadata is being updated).\"\n msg = msg.format(title, self.title, title)\n self.title = title\n logger.warning(msg)\n\n # Get abstract\n for s, p, o in self._rmeta_graph.triples((None, rdflib.namespace.DCTERMS.abstract, None)):\n self.abstract = o\n if self.abstract:\n logger.debug(\"\\t\\tAbstract: {0}\".format(self.abstract))\n\n # Get creators\n for s, p, o in self._rmeta_graph.triples((None, rdflib.namespace.DC.creator, None)):\n creator = GenericResourceMeta.ResourceCreator()\n creator.set_uri(o)\n # Get order\n order_lit = self._rmeta_graph.value(o, hsterms.creatorOrder)\n if order_lit is None:\n msg = \"Order for creator {0} was not found.\".format(o)\n raise GenericResourceMeta.ResourceMetaException(msg)\n creator.order = int(str(order_lit))\n # Get name\n name_lit = self._rmeta_graph.value(o, hsterms.name)\n if name_lit is None:\n msg = \"Name for creator {0} was not found.\".format(o)\n raise GenericResourceMeta.ResourceMetaException(msg)\n creator.name = str(name_lit)\n # Get email\n email_lit = self._rmeta_graph.value(o, hsterms.email)\n if email_lit is not None:\n creator.email = str(email_lit)\n # Get organization\n org_lit = self._rmeta_graph.value(o, hsterms.organization)\n if org_lit is not None:\n creator.organization = str(org_lit)\n # Get address\n addy_lit = self._rmeta_graph.value(o, hsterms.address)\n if addy_lit is not None:\n creator.address = str(addy_lit)\n # Get phone\n phone_lit = self._rmeta_graph.value(o, hsterms.phone)\n if phone_lit is not None:\n phone_raw = str(phone_lit).split(':')\n if len(phone_raw) > 1:\n creator.phone = phone_raw[1]\n else:\n creator.phone = phone_raw[0]\n # Get homepage\n homepage_lit = self._rmeta_graph.value(o, hsterms.homepage)\n if homepage_lit is not None:\n creator.homepage = str(homepage_lit)\n\n self.add_creator(creator)\n\n for c in self.get_creators():\n logger.debug(\"\\t\\tCreator: {0}\".format(str(c)))\n\n # Get contributors\n if SAX_parse_results:\n # Use contributors from SAX parser\n self.contributors = list(SAX_parse_results.contributors)\n else:\n # Get contributors from RDF\n for s, p, o in self._rmeta_graph.triples((None, rdflib.namespace.DC.contributor, None)):\n contributor = GenericResourceMeta.ResourceContributor()\n contributor.set_uri(o)\n # Get name\n name_lit = self._rmeta_graph.value(o, hsterms.name)\n if name_lit is None:\n msg = \"Name for contributor {0} was not found.\".format(o)\n raise GenericResourceMeta.ResourceMetaException(msg)\n contributor.name = str(name_lit)\n # Get email\n email_lit = self._rmeta_graph.value(o, hsterms.email)\n if email_lit is not None:\n contributor.email = str(email_lit)\n # Get organization\n org_lit = self._rmeta_graph.value(o, hsterms.organization)\n if org_lit is not None:\n contributor.organization = str(org_lit)\n # Get address\n addy_lit = self._rmeta_graph.value(o, hsterms.address)\n if addy_lit is not None:\n contributor.address = str(addy_lit)\n # Get phone\n phone_lit = self._rmeta_graph.value(o, hsterms.phone)\n if phone_lit is not None:\n phone_raw = str(phone_lit).split(':')\n if len(phone_raw) > 1:\n contributor.phone = phone_raw[1]\n else:\n contributor.phone = phone_raw[0]\n # Get homepage\n homepage_lit = self._rmeta_graph.value(o, hsterms.homepage)\n if homepage_lit is not None:\n contributor.homepage = str(homepage_lit)\n\n self.contributors.append(contributor)\n\n for c in self.contributors:\n logger.debug(\"\\t\\tContributor: {0}\".format(str(c)))\n\n # Get creation date\n for s, p, o in self._rmeta_graph.triples((None, None, rdflib.namespace.DCTERMS.created)):\n created_lit = self._rmeta_graph.value(s, rdflib.namespace.RDF.value)\n if created_lit is None:\n msg = \"Resource metadata {0} does not contain a creation date.\".format(self.rmeta_path)\n raise GenericResourceMeta.ResourceMetaException(msg)\n try:\n self.creation_date = hs_date_to_datetime(str(created_lit))\n except HsDateException:\n try:\n self.creation_date = hs_date_to_datetime_iso(str(created_lit))\n except HsDateException as e:\n msg = \"Unable to parse creation date {0}, error: {1}\".format(str(created_lit),\n str(e))\n raise GenericResourceMeta.ResourceMetaException(msg)\n\n logger.debug(\"\\t\\tCreation date: {0}\".format(str(self.creation_date)))\n\n # Get modification date\n for s, p, o in self._rmeta_graph.triples((None, None, rdflib.namespace.DCTERMS.modified)):\n modified_lit = self._rmeta_graph.value(s, rdflib.namespace.RDF.value)\n if modified_lit is None:\n msg = \"Resource metadata {0} does not contain a modification date.\".format(self.rmeta_path)\n raise GenericResourceMeta.ResourceMetaException(msg)\n try:\n self.modification_date = hs_date_to_datetime(str(modified_lit))\n except HsDateException:\n try:\n self.modification_date = hs_date_to_datetime_iso(str(modified_lit))\n except HsDateException as e:\n msg = \"Unable to parse modification date {0}, error: {1}\".format(str(modified_lit),\n str(e))\n raise GenericResourceMeta.ResourceMetaException(msg)\n\n logger.debug(\"\\t\\tModification date: {0}\".format(str(self.modification_date)))\n\n # Get rights\n resource_rights = None\n for s, p, o in self._rmeta_graph.triples((None, rdflib.namespace.DC.rights, None)):\n resource_rights = GenericResourceMeta.ResourceRights()\n # License URI\n rights_uri = self._rmeta_graph.value(o, hsterms.URL)\n if rights_uri is None:\n msg = \"Resource metadata {0} does not contain rights URI.\".format(self.rmeta_path)\n raise GenericResourceMeta.ResourceMetaException(msg)\n resource_rights.uri = str(rights_uri)\n # Rights statement\n rights_stmt_lit = self._rmeta_graph.value(o, hsterms.rightsStatement)\n if rights_stmt_lit is None:\n msg = \"Resource metadata {0} does not contain rights statement.\".format(self.rmeta_path)\n raise GenericResourceMeta.ResourceMetaException(msg)\n resource_rights.statement = str(rights_stmt_lit)\n\n if resource_rights is None:\n msg = \"Resource metadata {0} does not contain rights.\".format(self.rmeta_path)\n raise GenericResourceMeta.ResourceMetaException(msg)\n\n self.rights = resource_rights\n\n logger.debug(\"\\t\\tRights: {0}\".format(self.rights))\n\n # Get keywords\n if SAX_parse_results:\n # Use keywords from SAX parser\n self.keywords = list(SAX_parse_results.subjects)\n else:\n # Get keywords from RDF\n for s, p, o in self._rmeta_graph.triples((None, rdflib.namespace.DC.subject, None)):\n self.keywords.append(str(o))\n\n logger.debug(\"\\t\\tKeywords: {0}\".format(str(self.keywords)))\n\n # Get language\n lang_lit = self._rmeta_graph.value(res_uri, rdflib.namespace.DC.language)\n if lang_lit is None:\n self.language = 'eng'\n else:\n self.language = str(lang_lit)\n\n logger.debug(\"\\t\\tLanguage: {0}\".format(self.language))\n\n # Get coverage (box)\n for s, p, o in self._rmeta_graph.triples((None, None, rdflib.namespace.DCTERMS.box)):\n coverage_lit = self._rmeta_graph.value(s, rdflib.namespace.RDF.value)\n if coverage_lit is None:\n msg = \"Coverage value not found for {0}.\".format(o)\n raise GenericResourceMeta.ResourceMetaException(msg)\n coverage = GenericResourceMeta.ResourceCoverageBox(str(coverage_lit))\n self.coverages.append(coverage)\n\n # Get coverage (point)\n for s, p, o in self._rmeta_graph.triples((None, None, rdflib.namespace.DCTERMS.point)):\n coverage_lit = self._rmeta_graph.value(s, rdflib.namespace.RDF.value)\n if coverage_lit is None:\n msg = \"Coverage value not found for {0}.\".format(o)\n raise GenericResourceMeta.ResourceMetaException(msg)\n coverage = GenericResourceMeta.ResourceCoveragePoint(str(coverage_lit))\n self.coverages.append(coverage)\n\n # Get coverage (period)\n for s, p, o in self._rmeta_graph.triples((None, None, rdflib.namespace.DCTERMS.period)):\n coverage_lit = self._rmeta_graph.value(s, rdflib.namespace.RDF.value)\n if coverage_lit is None:\n msg = \"Coverage value not found for {0}.\".format(o)\n raise GenericResourceMeta.ResourceMetaException(msg)\n coverage = GenericResourceMeta.ResourceCoveragePeriod(str(coverage_lit))\n self.coverages.append(coverage)\n\n logger.debug(\"\\t\\tCoverages: \")\n for c in self.coverages:\n logger.debug(\"\\t\\t\\t{0}\".format(str(c)))\n\n # Get relations\n for s, p, o in self._rmeta_graph.triples((None, rdflib.namespace.DC.relation, None)):\n for pred, obj in self._rmeta_graph.predicate_objects(o):\n relation = GenericResourceMeta.ResourceRelation(obj, pred)\n self.relations.append(relation)\n\n logger.debug(\"\\t\\tRelations: \")\n for r in self.relations:\n logger.debug(\"\\t\\t\\t{0}\".format(str(r)))",
"def find_resources(self):\n path = self.get_path('resources')\n\n if not path:\n rsoup = self.soup\n else:\n rsoup = BeautifulSoup(open(path))\n\n self.parse_resources(rsoup)",
"def _read_info_resources(self, **kwargs):\n info = {'keypairs': {},\n 'flavors': {},\n 'user_quotas': [],\n 'project_quotas': []}\n\n for keypair in self.get_keypair_list():\n info['keypairs'][keypair.id] = self.convert(keypair)\n\n for flavor in self.get_flavor_list():\n info['flavors'][flavor.id] = self.convert(flavor)\n\n if self.config.migrate.migrate_quotas:\n self._read_info_quotas(info)\n\n return info",
"def getResourceAttributes(self, authenticationToken, guid):\r\n pass",
"def getResource(self, authenticationToken, guid, withData, withRecognition, withAttributes, withAlternateData):\r\n pass",
"def _fetch(self):\n self._data = self._get(self.url)\n\n if self._data['released_errata'] is not None:\n self._released_errata = Erratum(errata_id=self._data[\n 'released_errata']['id'])\n\n for errata_dict in self._data['all_errata']:\n errata = Erratum(errata_id=errata_dict['id'])\n self._all_errata.append(errata)\n\n self._signed_rpms = self._data.get('rpms_signed')\n\n for et_file in self._data['files']:\n self._files.append(et_file['path'])",
"def discover(self):\n pass",
"def _init(self):\n # TODO: recalc self.path from <self._file_path>, to fix correct file system case\n # On windows this would lead to correct URLs\n self.provider._count_get_resource_inst_init += 1\n tableName, primKey = self.provider._split_path(self.path)\n\n display_type = \"Unknown\"\n displayTypeComment = \"\"\n contentType = \"text/html\"\n\n # _logger.debug(\"getInfoDict(%s), nc=%s\" % (path, self.connectCount))\n if tableName is None:\n display_type = \"Database\"\n elif primKey is None: # \"database\" and table name\n display_type = \"Database Table\"\n else:\n contentType = \"text/csv\"\n if primKey == \"_ENTIRE_CONTENTS\":\n display_type = \"Database Table Contents\"\n displayTypeComment = \"CSV Representation of Table Contents\"\n else:\n display_type = \"Database Record\"\n displayTypeComment = \"Attributes available as properties\"\n\n # Avoid calling is_collection, since it would call isExisting -> _init_connection\n is_collection = primKey is None\n\n self._cache = {\n \"content_length\": None,\n \"contentType\": contentType,\n \"created\": time.time(),\n \"display_name\": self.name,\n \"etag\": hashlib.md5().update(self.path).hexdigest(),\n # \"etag\": md5.new(self.path).hexdigest(),\n \"modified\": None,\n \"support_ranges\": False,\n \"display_info\": {\"type\": display_type, \"typeComment\": displayTypeComment},\n }\n\n # Some resource-only infos:\n if not is_collection:\n self._cache[\"modified\"] = time.time()\n _logger.debug(\"---> _init, nc=%s\" % self.provider._count_initConnection)",
"def resources(self):",
"def resources():\n check_resources()",
"def test_resource_parser(self):\n Repository = CTSCapitainsLocalResolver([\"./tests/testing_data/farsiLit\"])\n self.assertEqual(\n Repository.inventory[\"urn:cts:farsiLit:hafez\"].urn, URN(\"urn:cts:farsiLit:hafez\"),\n \"Hafez is found\"\n )\n self.assertEqual(\n len(Repository.inventory[\"urn:cts:farsiLit:hafez\"].works), 1,\n \"Hafez has one child\"\n )\n self.assertEqual(\n Repository.inventory[\"urn:cts:farsiLit:hafez.divan\"].urn, URN(\"urn:cts:farsiLit:hafez.divan\"),\n \"Divan is found\"\n )\n self.assertEqual(\n len(Repository.inventory[\"urn:cts:farsiLit:hafez.divan\"].texts), 3,\n \"Divan has 3 children\"\n )",
"def Initialize(self, fallthroughs_map, parsed_args=None):\n params = {}\n\n # Returns a function that can be used to parse each attribute, which will be\n # used only if the resource parser does not receive a fully qualified\n # resource name.\n def LazyGet(name):\n f = lambda: deps_lib.Get(name, fallthroughs_map, parsed_args=parsed_args)\n return f\n\n for attribute in self.attributes:\n params[self.ParamName(attribute.name)] = LazyGet(attribute.name)\n self._resources.RegisterApiByName(self._collection_info.api_name,\n self._collection_info.api_version)\n try:\n return self._resources.Parse(\n deps_lib.Get(\n self.anchor.name, fallthroughs_map, parsed_args=parsed_args),\n collection=self.collection,\n params=params)\n except deps_lib.AttributeNotFoundError as e:\n raise InitializationError(\n 'The [{}] resource is not properly specified.\\n'\n '{}'.format(self.name, six.text_type(e)))",
"def discover(self):\n\n # Get the Huge Page configuration\n self.get_hugepages()\n\n # Get the device configuration\n self.get_devices_per_node()\n\n # Get the CPU configuration\n self.get_cpu()\n\n # Get the current grub cmdline\n self.get_grub()",
"def _extract_resource(resource: Optional[dict],\n allowed_vals: tuple[tuple[str, ...]],\n exc: Type[exception.CinderException],\n resource_name: str,\n props: tuple[str] = ('status',)) -> Optional[str]:\n\n resource_id = None\n if resource:\n for prop, allowed_states in zip(props, allowed_vals):\n if resource[prop] not in allowed_states:\n msg = _(\"Originating %(res)s %(prop)s must be one of \"\n \"'%(vals)s' values\")\n msg = msg % {'res': resource_name,\n 'prop': prop,\n 'vals': ', '.join(allowed_states)}\n # TODO(harlowja): what happens if the status changes after\n # this initial resource status check occurs??? Seems like\n # someone could delete the resource after this check passes\n # but before the volume is officially created?\n raise exc(reason=msg)\n resource_id = resource['id']\n return resource_id",
"def test_get_attributes(self):\n pass",
"async def discover(self):\n raise NotImplementedError(\"this is a base class\")",
"def __getattr__(self, attr):\n actual_resource = getattr(self.swagger_client, attr)\n if attr in [\"Authorization\", \"Effects\", \"Identify\", \"Info\",\n \"PanelLayout\", \"State\"]:\n return WrappedResource(actual_resource, attr)\n else:\n return actual_resource",
"def process_resource_api(self, resources, resource, api, context):\n pass",
"def read(self):\n self.attributes = self.call('READ', expect=error.OK)",
"def get_inventory(self, context):\n # See below some example code demonstrating how to return the resource structure and attributes\n # In real life, this code will be preceded by SNMP/other calls to the resource details and will not be static\n # run 'shellfoundry generate' in order to create classes that represent your data model\n\n '''\n resource = LanforgeResource.create_from_context(context)\n resource.vendor = 'specify the shell vendor'\n resource.model = 'specify the shell model'\n\n port1 = ResourcePort('Port 1')\n port1.ipv4_address = '192.168.10.7'\n resource.add_sub_resource('1', port1)\n\n return resource.create_autoload_details()\n '''\n return AutoLoadDetails([], [])",
"def autoload_resource(session: CloudShellAPISession, test_helpers: TgTestHelpers, dut: List[str]) -> Iterable[ResourceInfo]:\n address, controller_address, controller_port = dut\n attributes = [\n AttributeNameValue(f\"{IXIA_CHASSIS_MODEL}.Controller Address\", controller_address),\n AttributeNameValue(f\"{IXIA_CHASSIS_MODEL}.Controller TCP Port\", controller_port),\n AttributeNameValue(f\"{IXIA_CHASSIS_MODEL}.User\", \"admin\"),\n AttributeNameValue(f\"{IXIA_CHASSIS_MODEL}.Password\", \"admin\"),\n ]\n resource = test_helpers.create_autoload_resource(IXIA_CHASSIS_MODEL, \"tests/test-ixia\", address, attributes)\n yield resource\n session.DeleteResource(resource.Name)",
"def __init__(self, specs, resources, properties=None):\n if not properties:\n properties = {}\n self.init_collections()\n self.properties = properties\n self.set_paths(specs, resources)\n self.parse_paths()\n self.find_resources()",
"def on_get_resource(self, req, resp, **params):\n instance = self.get_object(**params)\n resp.json(**instance.as_resource)",
"def _get_information(self):\n pass",
"def get_resource(self):\n raise errors.Unimplemented()",
"def getResourceRecognition(self, authenticationToken, guid):\r\n pass",
"def GetResourceSample():\n client = CreateClient()\n for e1 in client.GetResources(limit=5).entry:\n e2 = client.GetResource(e1)\n print 'Refetched: ', e2.title.text, e2.resource_id.text",
"def getResource(self):\n pass;",
"def parse_resource(self, skip_deprecated=False):\n self.resource = parse_resource(self, skip_deprecated=skip_deprecated)\n if self.resource:\n self.authenticate()\n resource = getattr(self.v2, self.resource)\n if is_control_resource(self.resource):\n # control resources are special endpoints that you can only\n # do an HTTP GET to, and which return plain JSON metadata\n # examples are `/api/v2/ping/`, `/api/v2/config/`, etc...\n if self.help:\n self.subparsers[self.resource].print_help()\n raise SystemExit()\n self.method = 'get'\n response = getattr(resource, self.method)()\n else:\n response = self.parse_action(resource)\n\n _filter = self.get_config('filter')\n\n # human format for metrics, settings is special\n if (\n self.resource in ('metrics', 'settings') and\n self.get_config('format') == 'human'\n ):\n response.json = {\n 'count': len(response.json),\n 'results': [\n {'key': k, 'value': v}\n for k, v in response.json.items()\n ]\n }\n _filter = 'key, value'\n\n if (\n self.get_config('format') == 'human' and\n _filter == '.' and\n self.resource in UNIQUENESS_RULES\n ):\n _filter = ', '.join(UNIQUENESS_RULES[self.resource])\n\n formatted = format_response(\n response,\n fmt=self.get_config('format'),\n filter=_filter,\n changed=self.original_action in (\n 'modify', 'create', 'associate', 'disassociate'\n )\n )\n if formatted:\n print(utils.to_str(formatted), file=self.stdout)\n if hasattr(response, 'rc'):\n raise SystemExit(response.rc)\n else:\n self.parser.print_help()",
"def parse_resources(self, soup):\n for res in soup.find_all('res'):\n if 'customlangpack' in res['id'].lower():\n self.find_langpack_path(res)\n else:\n rid = remove_xml(res['id'])\n self.resources[rid] = path_format(self.properties.substitute(res['src']))"
] |
[
"0.5671755",
"0.5449949",
"0.53734154",
"0.5339723",
"0.5314795",
"0.529846",
"0.5258291",
"0.5246755",
"0.52241033",
"0.5214886",
"0.5209261",
"0.51976424",
"0.51858026",
"0.5176893",
"0.51756376",
"0.51583856",
"0.51576287",
"0.5146784",
"0.51284975",
"0.5121302",
"0.5113203",
"0.50924915",
"0.5066923",
"0.5062909",
"0.5062217",
"0.5052292",
"0.50359917",
"0.50240386",
"0.50146943",
"0.50012636"
] |
0.58067125
|
0
|
Write decorated test log for all levels
|
def log_all_levels_decorated(logger_instance):
for log_level in list(new_loglevel_dict.keys()) + list(standard_loglevel_dict.keys()):
getattr(logger_instance, log_level.lower())('test ' + log_level, decorated=True)
getattr(logger_instance, "info")("", decorated=True)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_default():\n logger = logging.getLogger(__name__)\n log_all_levels(logger)\n log_all_levels_decorated(logger)\n log_all_levels_loop(logger)\n return logger",
"def testLoggerLevels(self):\n logging = Logger()\n for level in range(len(logging.LEVELS)):\n testString = \"Test logging level\"\n logging.setLogLevel(level)\n logging.log(level, testString)",
"def log_all_levels(logger_instance):\n for log_level in list(new_loglevel_dict.keys()) + list(standard_loglevel_dict.keys()):\n getattr(logger_instance, log_level.lower())('test ' + log_level)",
"def test_case(self):\n log.e('error日志')\n log.d('debug日志')\n log.i('info日志')\n log.w('warning日志')",
"def test_set_single_logger_level(self):\n pass",
"def test_log_extra_custom_logger(caplog, test_df):\n caplog.clear()\n\n logger_name = \"my_custom_logger\"\n\n my_logger = logging.getLogger(logger_name)\n\n @log_step_extra(len, print_fn=my_logger.info)\n def do_nothing(df, *args, **kwargs):\n return df\n\n with caplog.at_level(logging.INFO):\n test_df.pipe(do_nothing)\n\n assert logger_name in caplog.text",
"def test(self):\n self.info(\"LOGGING: Testing log messages\")\n self.debug(\"This is a debugging message\")\n self.info(\"This is an informational message\")\n self.warning(\"This is a warning message\")\n self.error(\"This is an error message\")\n self.critical(\"This is a critical message\")\n self.info(\"LOGGING: Testing log messages COMPLETE\")\n return",
"def test_log_custom_logger(caplog, test_df):\n caplog.clear()\n\n logger_name = \"my_custom_logger\"\n\n my_logger = logging.getLogger(logger_name)\n\n @log_step(print_fn=my_logger.info)\n def do_nothing(df, *args, **kwargs):\n return df\n\n with caplog.at_level(logging.INFO):\n test_df.pipe(do_nothing)\n\n assert logger_name in caplog.text",
"def test_setup_logging_info(self) -> None:\n # set the log level high to ensure they are properly being change by setup_logging\n self.f_logger.setLevel(LogLevels.CRITICAL)\n self.boto3_logger.setLevel(LogLevels.CRITICAL)\n self.botocore_logger.setLevel(LogLevels.CRITICAL)\n\n setup_logging()\n\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.CRITICAL))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.ERROR))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.SUCCESS))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.WARNING))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.NOTICE))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.INFO))\n self.assertFalse(self.f_logger.isEnabledFor(LogLevels.VERBOSE))\n self.assertFalse(self.f_logger.isEnabledFor(LogLevels.DEBUG))\n\n self.assertFalse(self.boto3_logger.isEnabledFor(LogLevels.INFO))\n self.assertFalse(self.botocore_logger.isEnabledFor(LogLevels.INFO))",
"def log_all_levels_loop(logger_instance):\n for log_level in list(new_loglevel_dict.keys()) + list(standard_loglevel_dict.keys()):\n for i in range(3):\n logger_instance.loop_counter(\"test\", i, getattr(logging, log_level.upper()))\n getattr(logger_instance, log_level.lower())(\"\\n\\n\")",
"def test_logging(self):\n self._verify_logging()",
"def test_setup_logging_verbose(self) -> None:\n # set the log level high to ensure they are properly being change by setup_logging\n self.f_logger.setLevel(LogLevels.CRITICAL)\n self.boto3_logger.setLevel(LogLevels.CRITICAL)\n self.botocore_logger.setLevel(LogLevels.CRITICAL)\n\n setup_logging(LogLevels.VERBOSE)\n\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.CRITICAL))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.ERROR))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.SUCCESS))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.WARNING))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.NOTICE))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.INFO))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.VERBOSE))\n self.assertFalse(self.f_logger.isEnabledFor(LogLevels.DEBUG))\n\n self.assertFalse(self.boto3_logger.isEnabledFor(LogLevels.INFO))\n self.assertFalse(self.botocore_logger.isEnabledFor(LogLevels.INFO))",
"def pytest_logger_stdoutloggers(self, item):",
"def runTest(self):\n import logging\n lg_name = expector.logger_name\n lg = logging.getLogger(lg_name)\n start_level = logging.getLevelName('DEBUG_9')\n end_level = logging.getLevelName('CRITICAL_0')\n for lvl in range(start_level, end_level):\n lg.log(lvl, 'MATCH-START %s %d(%s) MATCH-END',\n lg_name, lvl, logging.getLevelName(lvl))",
"def standard_logging(dir=None, f_ending=None, TEST_MODE=False, modules=None, **kw):\n global _sys_stdout_assigned\n backup = sys.stdout\n if dir is None:\n dpath = kw.get('dpath')\n if dpath is None:\n dir = DEFAULT_ROOT_DIR\n else:\n dir = os.path.join(dpath,'logs','other')\n if TEST_MODE:\n dir += '_TM'\n os.makedirs(dir, exist_ok=True)\n\n\n if TEST_MODE and isinstance(f_ending, str) and not f_ending.endswith('_TM'):\n f_ending += '_TM'\n \n params = {'dir': dir,\n 'lvl': 'DEBUG',\n 'use_FonsFormatter':True,\n 'pair_with_fwriters': [sys.stdout],\n 'f_ending': f_ending,\n 'lvl_handlers': ['WARNING', {'lvl': 'INFO', 'use_rotating': True}],\n 'use_rotating':True,\n 'maxBytes':5*1024*1024,\n 'backupCount':5}\n \n params.update({k:v for k,v in kw.items() if k in params})\n\n #--\n writer = setup_logging(**params)\n #--\n \n if _sys_stdout_assigned: pass\n elif any(x in params['pair_with_fwriters'] for x in ('sys.stdout',sys.stdout)):\n sys.stdout = writer\n _sys_stdout_assigned = True\n \n standard_5 = _init_standard_5(TEST_MODE)\n \n if modules is not None:\n for mod in modules:\n add_module(mod)\n \n multi_module_logging(_modules, standard_5._fields, standard_5)\n \n _globals.update({\n #'queue': queue,\n 'TEST_MODE': TEST_MODE,\n 'level': params['lvl'],\n 'use_FonsFormatter': params['use_FonsFormatter'],\n 'fmt': kw.get('fmt')})\n \n return standard_5",
"def test_level_unknown(self):\n self.assertEqual(DiscordReportFormatter().format(self.record(loglevel=999)), \"**test**\")",
"def test_level_debug(self):\n self.assertEqual(DiscordReportFormatter().format(self.record(loglevel=logging.DEBUG)), \":detective: **test**\")",
"def test_level_warning(self):\n self.assertEqual(DiscordReportFormatter().format(self.record(loglevel=logging.WARNING)), \":warning: **test**\")",
"def test_set_subsystem_logger_level(self):\n pass",
"def test_02_log_something(self):\n logger = get_logger(self)\n logger.info('Info in test_02')\n logger.debug('Debug in test_02')\n logger.warn('Warn in test_02')\n logfiles = glob.glob(os.path.join(self.LOG_FOLDER,\n '{}*.log'.format(self.scenario)))\n assert logfiles\n print(logfiles)\n for logfile in logfiles:\n with open(logfile) as f:\n for line in f:\n print(line.strip())",
"def test_add_file_handler():\n change_cwd()\n logger = logging.getLogger(__name__)\n file_handler = logging.FileHandler('logging.log')\n file_handler.setLevel(logging.DEBUG)\n logger.addHandler(file_handler)\n log_all_levels(logger)\n log_all_levels_decorated(logger)\n log_all_levels_loop(logger)\n return logger",
"def test_basicConfig():\n logging_config = {\n 'format': '%(asctime)s [%(levelname)s]: %(message)s',\n 'datefmt': '%Y-%m-%d %H:%M:%S',\n 'level': 5, # info (see https://docs.python.org/3/library/logging.html#logging-levels)\n }\n logging.basicConfig(**logging_config)\n logging.setGitConfig({\"auto_commit\": False})\n logger = logging.getLogger(__name__)\n log_all_levels(logger)\n log_all_levels_decorated(logger)\n log_all_levels_loop(logger)\n return logger",
"def pytest_logger_fileloggers(self, item):",
"def test_level_info(self):\n self.assertEqual(DiscordReportFormatter().format(self.record(loglevel=logging.INFO)), \":speech_balloon: **test**\")",
"def setUp(self):\n # Disable log messages to silence expected warnings\n cfdm.log_level(\"DISABLE\")\n # Note: to enable all messages for given methods, lines or calls (those\n # without a 'verbose' option to do the same) e.g. to debug them, wrap\n # them (for methods, start-to-end internally) as follows:\n # cfdm.log_level('DEBUG')\n # < ... test code ... >\n # cfdm.log_level('DISABLE')",
"def logger_test():\n test_logger = Logger(True)\n test_dir = r'{}/logger_test'.format(os.getcwd())\n header = ['x', 'y', 'z']\n test_logger.new('test', header)\n for i in range(10):\n data = np.random.random((3,))\n test_logger.add('test', data)\n test_logger.save('test', test_dir)",
"def print_all(self,**kwargs):\n print(\"self.logger = {}\".format(self.logger))\n print(\"self.logger.setLevel = {}\".format(kwargs[\"logger_level\"]))\n if kwargs[\"console_log\"]==True:\n print(\"console setlevel {}\".format(kwargs[\"console_stream_level\"]))\n print(\"console formatter {}\".format(kwargs[\"console_format\"]))\n if kwargs[\"file_log\"]==True:\n print(\"file path {}\".format(kwargs[\"file_path\"]))\n print(\"file_format {}\".format(kwargs[\"file_format\"]))\n print(\"file stream level {}\".format(kwargs[\"file_stream_level\"]))",
"def test_setup_logging_debug(self) -> None:\n # set the log level high to ensure they are properly being change by setup_logging\n self.f_logger.setLevel(LogLevels.CRITICAL)\n self.boto3_logger.setLevel(LogLevels.CRITICAL)\n self.botocore_logger.setLevel(LogLevels.CRITICAL)\n\n with self.assertLogs(self.f_logger, LogLevels.DEBUG) as setup_ctx:\n setup_logging(LogLevels.DEBUG)\n\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.CRITICAL))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.ERROR))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.SUCCESS))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.WARNING))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.NOTICE))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.INFO))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.VERBOSE))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.DEBUG))\n\n self.assertTrue(self.boto3_logger.isEnabledFor(LogLevels.DEBUG))\n self.assertTrue(self.botocore_logger.isEnabledFor(LogLevels.DEBUG))\n\n self.assertEqual(setup_ctx.output,\n [f'DEBUG:f-cli:Initalized logging for f-cli version {__version__}'])",
"def configure_test_logs(caplog: pytest.LogCaptureFixture) -> None:\n # Fix up SQLAlchemy's logging so that it uses the same log level as everything else.\n # By default, SQLAlchemy's logging is slightly unusual: it hides messages below\n # WARNING, even if you pass --log-level=DEBUG to pytest on the command line.\n # See: https://docs.sqlalchemy.org/en/14/core/engines.html#configuring-logging\n caplog.set_level(\"NOTSET\", logger=\"sqlalchemy\")",
"def test_level_critical(self):\n self.assertEqual(DiscordReportFormatter().format(self.record(loglevel=logging.CRITICAL)), \":bangbang: **test**\")"
] |
[
"0.7253009",
"0.70702046",
"0.68437916",
"0.68053335",
"0.6804448",
"0.6685715",
"0.66668046",
"0.6588776",
"0.6572676",
"0.65395325",
"0.6526354",
"0.6512286",
"0.650486",
"0.6426932",
"0.640954",
"0.6406446",
"0.64059454",
"0.6372152",
"0.6348167",
"0.63254714",
"0.63050705",
"0.62892234",
"0.62797904",
"0.6254493",
"0.6244984",
"0.6193715",
"0.6098149",
"0.6096096",
"0.6091752",
"0.6081232"
] |
0.7710551
|
0
|
Write test log for loops on all levels
|
def log_all_levels_loop(logger_instance):
for log_level in list(new_loglevel_dict.keys()) + list(standard_loglevel_dict.keys()):
for i in range(3):
logger_instance.loop_counter("test", i, getattr(logging, log_level.upper()))
getattr(logger_instance, log_level.lower())("\n\n")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def runTest(self):\n import logging\n lg_name = expector.logger_name\n lg = logging.getLogger(lg_name)\n start_level = logging.getLevelName('DEBUG_9')\n end_level = logging.getLevelName('CRITICAL_0')\n for lvl in range(start_level, end_level):\n lg.log(lvl, 'MATCH-START %s %d(%s) MATCH-END',\n lg_name, lvl, logging.getLevelName(lvl))",
"def testLoggerLevels(self):\n logging = Logger()\n for level in range(len(logging.LEVELS)):\n testString = \"Test logging level\"\n logging.setLogLevel(level)\n logging.log(level, testString)",
"def test_02_log_something(self):\n logger = get_logger(self)\n logger.info('Info in test_02')\n logger.debug('Debug in test_02')\n logger.warn('Warn in test_02')\n logfiles = glob.glob(os.path.join(self.LOG_FOLDER,\n '{}*.log'.format(self.scenario)))\n assert logfiles\n print(logfiles)\n for logfile in logfiles:\n with open(logfile) as f:\n for line in f:\n print(line.strip())",
"def test_case(self):\n log.e('error日志')\n log.d('debug日志')\n log.i('info日志')\n log.w('warning日志')",
"def test(self):\n self.info(\"LOGGING: Testing log messages\")\n self.debug(\"This is a debugging message\")\n self.info(\"This is an informational message\")\n self.warning(\"This is a warning message\")\n self.error(\"This is an error message\")\n self.critical(\"This is a critical message\")\n self.info(\"LOGGING: Testing log messages COMPLETE\")\n return",
"def log_all_levels(logger_instance):\n for log_level in list(new_loglevel_dict.keys()) + list(standard_loglevel_dict.keys()):\n getattr(logger_instance, log_level.lower())('test ' + log_level)",
"def logger_test():\n test_logger = Logger(True)\n test_dir = r'{}/logger_test'.format(os.getcwd())\n header = ['x', 'y', 'z']\n test_logger.new('test', header)\n for i in range(10):\n data = np.random.random((3,))\n test_logger.add('test', data)\n test_logger.save('test', test_dir)",
"def test_default():\n logger = logging.getLogger(__name__)\n log_all_levels(logger)\n log_all_levels_decorated(logger)\n log_all_levels_loop(logger)\n return logger",
"def test_log_level_tab(self):\n LoggerSetup.setup(10)\n\n logging.info(\"%sInfo deep 2\", LoggerSetup.get_log_deep(2))\n logging.info(\"%sInfo deep 3 with - as char\", LoggerSetup.get_log_deep(3, '-'))\n\n self.assertEqual(True, LoggerSetup.is_setup_loaded())\n\n LoggerSetup.shutdown()",
"def output_logs(flag_depth, use_json, all_flag):\n use_json = \"--json\" if use_json else \"\"\n all_flag = \"--all\" if all_flag else \"\"\n flags = \" \".join([\"--archive\", \"--repo\", \"--backup\", \"--bucket\"][: flag_depth + 1])\n self.log.info(\"---\")\n self.log.info(f\"Testing Flags: {flags} {use_json} {all_flag}\")\n self.log.info(\"---\")",
"def test_level_unknown(self):\n self.assertEqual(DiscordReportFormatter().format(self.record(loglevel=999)), \"**test**\")",
"def test_level_debug(self):\n self.assertEqual(DiscordReportFormatter().format(self.record(loglevel=logging.DEBUG)), \":detective: **test**\")",
"def log_all_levels_decorated(logger_instance):\n for log_level in list(new_loglevel_dict.keys()) + list(standard_loglevel_dict.keys()):\n getattr(logger_instance, log_level.lower())('test ' + log_level, decorated=True)\n getattr(logger_instance, \"info\")(\"\", decorated=True)",
"def test_set_single_logger_level(self):\n pass",
"def test_print_each(self):\n logger = Logger(each=3, nb_epochs=100)\n logger.__print_function__ = mock.Mock()\n logger.write = mock.Mock()\n for i in range(100):\n i += 1\n logger.epoch(i, lambda: {\"train_lemma\": (0+i, 1+i, 2+i)})\n\n self.assertEqual(logger.__print_function__.called, True, \"Calling to print should have been done\")\n self.assertEqual(logger.write.called, False, \"File has not been set and should not be called\")\n self.assertEqual(len(logger.__print_function__.call_args_list), 35*4, \"There should be 35 time the printing\")\n expected = [\n mock.call(\"::: Train Scores (lemma) :::\"),\n mock.call('+\\tall acc:', 1),\n mock.call('+\\tkno acc:', 2),\n mock.call('+\\tunk acc:', 3)\n ] + [\n call\n for i in range(1, 34)\n for call in [\n mock.call(\"::: Train Scores (lemma) :::\"),\n mock.call('+\\tall acc:', 0+i*3),\n mock.call('+\\tkno acc:', 1+i*3),\n mock.call('+\\tunk acc:', 2+i*3)\n ]\n ] + [\n mock.call(\"::: Train Scores (lemma) :::\"),\n mock.call('+\\tall acc:', 100),\n mock.call('+\\tkno acc:', 101),\n mock.call('+\\tunk acc:', 102)\n ]\n self.assertEqual(\n logger.__print_function__.call_args_list, expected,\n \"It should print the first, each third (except the first) log and the last one\"\n )",
"def test_logging(self):\n self._verify_logging()",
"def pytest_logger_stdoutloggers(self, item):",
"def log_example(var):\n\n log.info('example code started')\n log.debug('calling settings')\n test_settings()\n log2.error('there is no error this is example ')\n log2.info('finished')",
"def test_setup_logging_verbose(self) -> None:\n # set the log level high to ensure they are properly being change by setup_logging\n self.f_logger.setLevel(LogLevels.CRITICAL)\n self.boto3_logger.setLevel(LogLevels.CRITICAL)\n self.botocore_logger.setLevel(LogLevels.CRITICAL)\n\n setup_logging(LogLevels.VERBOSE)\n\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.CRITICAL))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.ERROR))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.SUCCESS))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.WARNING))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.NOTICE))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.INFO))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.VERBOSE))\n self.assertFalse(self.f_logger.isEnabledFor(LogLevels.DEBUG))\n\n self.assertFalse(self.boto3_logger.isEnabledFor(LogLevels.INFO))\n self.assertFalse(self.botocore_logger.isEnabledFor(LogLevels.INFO))",
"def test_004_log(self):\n __test = chess_storage.ChessStorage()\n __test_data = list(range(consts.TEST_LIST_LENGHT))\n __dir_game_saves = os.path.dirname(__file__)\n __dir_game_saves = os.path.join(__dir_game_saves, 'games')\n __test_filename = consts.TEST_FILENAME\n __test_logname = __test_filename + \"_log.txt\"\n __dir_game_log = os.path.join(__dir_game_saves, \"log\")\n __dir_game_log = os.path.join(__dir_game_log, __test_logname)\n #test list\n __log_test = __test.log(__test_filename, __test_data, True)\n self.assertEqual(__log_test, consts.ERROR_CODES[\"SUCCESSFULL\"])\n with open(__dir_game_log, 'r') as __log_game:\n print(\"\\nLine (0 bis \" +str(consts.TEST_LIST_LENGHT-1) +\")\")\n print(__log_game.read())\n __log_test = __test.log(__test_filename, __test_data, False)\n self.assertEqual(__log_test, consts.ERROR_CODES[\"FILE_EXIST\"])\n with open(__dir_game_log, 'r') as __log_game:\n print(\"Still Line (0 bis \" +str(consts.TEST_LIST_LENGHT-1) +\")\")\n print(__log_game.read())\n __log_test = __test.log(__test_filename, __test_data, True)\n self.assertEqual(__log_test, consts.ERROR_CODES[\"SUCCESSFULL\"])\n with open(__dir_game_log, 'r') as __log_game:\n print(\"Line (0 bis \" +str(consts.TEST_LIST_LENGHT-1) +\") two times\")\n print(__log_game.read())\n os.remove(__dir_game_log)\n self.assertFalse(os.path.isfile(__dir_game_log))\n #test string\n __log_test = __test.log(__test_filename, \"__test_data\", True)\n self.assertEqual(__log_test, consts.ERROR_CODES[\"SUCCESSFULL\"])\n with open(__dir_game_log, 'r') as __log_game:\n print(\"\\nOne Line:\")\n print(__log_game.read())\n __log_test = __test.log(__test_filename, \"__test_data\", False)\n self.assertEqual(__log_test, consts.ERROR_CODES[\"FILE_EXIST\"])\n with open(__dir_game_log, 'r') as __log_game:\n print(\"Still one Line:\")\n print(__log_game.read())\n __log_test = __test.log(__test_filename, \"__test_data\", True)\n self.assertEqual(__log_test, consts.ERROR_CODES[\"SUCCESSFULL\"])\n with open(__dir_game_log, 'r') as __log_game:\n print(\"Two Lines\")\n print(__log_game.read())",
"def write_test_log(t, output_dir):\n if t.log_to_file is not None and hasattr(t, \"stop_time\"):\n filename = type(t).__name__ + \"-\" + time.strftime(\"%Y%m%d-%H%M%S\") + \".txt\"\n testtime = t.stop_time - t.start_time\n with open(os.path.join(output_dir, filename), \"w\") as log:\n log.write(\"\\t=======================================================\")\n log.write(f\"\\n\\tTest case ID: {type(t).__name__}\")\n log.write(f\"\\n\\tTest case Description: {type(t).__doc__}\")\n log.write(\"\\n\\t=======================================================\\n\")\n log.write(t.log_to_file)\n log.write(\"\\n\\t=======================================================\")\n log.write(f\"\\n\\t{type(t).__name__} test result: {t.result_grade}\")\n log.write(f\"\\n\\tTotal test time: {testtime} seconds\")\n log.write(\"\\n\\t=======================================================\")",
"def _log_fixtures(self, item, item_type, parent_item_id):\n if not item.tags:\n return\n for tag in item.tags:\n if not tag.startswith(\"fixture.\"):\n continue\n msg = f\"Using of '{tag[len('fixture.'):]}' fixture\"\n if self._cfg.log_layout is not LogLayout.SCENARIO:\n self._step_id = self._rp.start_test_item(\n name=msg,\n start_time=timestamp(),\n item_type=item_type,\n parent_item_id=parent_item_id,\n has_stats=False\n if self._cfg.log_layout is LogLayout.NESTED\n else True,\n )\n self._rp.finish_test_item(self._step_id, timestamp(), \"PASSED\")\n continue\n self._rp.log(\n timestamp(),\n msg,\n level=\"INFO\",\n item_id=parent_item_id,\n )",
"def pytest_logger_fileloggers(self, item):",
"def test_level_info(self):\n self.assertEqual(DiscordReportFormatter().format(self.record(loglevel=logging.INFO)), \":speech_balloon: **test**\")",
"def log(self, level, *msg_elements):\r\n self.report.log(self._threadlocal.current_workunit, level, *msg_elements)",
"def test_main():\n info(\"hi\")\n debug(\"shouldn't appear\")\n set_level(DEBUG)\n debug(\"should appear\")\n folder = \"/tmp/testlogging\"\n if os.path.exists(folder):\n shutil.rmtree(folder)\n configure(folder=folder)\n logkv(\"a\", 3)\n logkv(\"b\", 2.5)\n dumpkvs()\n logkv(\"b\", -2.5)\n logkv(\"a\", 5.5)\n dumpkvs()\n info(\"^^^ should see a = 5.5\")\n logkv_mean(\"b\", -22.5)\n logkv_mean(\"b\", -44.4)\n logkv(\"a\", 5.5)\n dumpkvs()\n with ScopedConfigure(None, None):\n info(\"^^^ should see b = 33.3\")\n\n with ScopedConfigure(\"/tmp/test-logger/\", [\"json\"]):\n logkv(\"b\", -2.5)\n dumpkvs()\n\n reset()\n logkv(\"a\", \"longasslongasslongasslongasslongasslongassvalue\")\n dumpkvs()\n warn(\"hey\")\n error(\"oh\")\n logkvs({\"test\": 1})",
"def tone_down_logger():\n for level in (logging.CRITICAL, logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG):\n level_name = logging.getLevelName(level)\n logging.addLevelName(level, level_name.capitalize())",
"def tone_down_logger():\n for level in (logging.CRITICAL, logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG):\n level_name = logging.getLevelName(level)\n logging.addLevelName(level, level_name.capitalize())",
"def PrintLogs(self) -> None:\n assert self.Finished()\n for f, stream_name in (\n (self.stdout, \"STDOUT\"), (self.stderr, \"STDERR\")):\n f.flush()\n f.seek(0)\n # Since we collected binary data, we have to write binary data.\n encoded = (stream_name.encode(), str(self).encode())\n sys.stdout.buffer.write(b\"BEGIN %s of test %s\\n\" % encoded)\n sys.stdout.buffer.write(f.read())\n sys.stdout.buffer.write(b\"END %s of test %s\\n\" % encoded)\n sys.stdout.buffer.flush()",
"def test_006_log_append(self):\n __test = chess_storage.ChessStorage()\n __test_data = list(range(consts.TEST_LIST_LENGHT))\n __dir_game_saves = os.path.dirname(__file__)\n __dir_game_saves = os.path.join(__dir_game_saves, 'games')\n __dir_game_log = os.path.join(__dir_game_saves, \"log\")\n __test_logname = consts.TEST_FILENAME + \"_log.txt\"\n __dir_game_logfile = os.path.join(__dir_game_log, __test_logname)\n # pylint: disable = protected-access\n __log_test = __test._ChessStorage__log_append(__dir_game_logfile, __test_data)\n # pylint: enable = protected-access\n self.assertEqual(__log_test, consts.ERROR_CODES[\"SUCCESSFULL\"])"
] |
[
"0.7322604",
"0.7109084",
"0.694877",
"0.6849797",
"0.67564356",
"0.6494106",
"0.64357036",
"0.63690066",
"0.63465595",
"0.6284087",
"0.62071955",
"0.61847913",
"0.6177005",
"0.6165315",
"0.6150953",
"0.6119336",
"0.6119231",
"0.61017275",
"0.607864",
"0.6051501",
"0.5957064",
"0.5952155",
"0.59468925",
"0.59440744",
"0.5923525",
"0.59213424",
"0.5919645",
"0.5919645",
"0.5913751",
"0.5906812"
] |
0.7543858
|
0
|
Load a subset of the floorplan dataset.
|
def load_floorplan(self, dataset_dir, subset):
# Set class set based on provided config default classes
class_set = self.config.CLASSES
# Add Classes to inner collection
for idx, c in enumerate(class_set):
self.add_class("floorplan", idx + 1, c)
# Train or validation dataset?
assert subset in ["train", "val", "test"]
dataset_dir = os.path.join(dataset_dir, subset)
if (self.config.MODE == "Combined"):
# Note: Annotations are created using VIA Tool 2.0
if subset == "train":
annotations = json.load(
open(os.path.join(dataset_dir, "deakin_bc_train.json")))
elif subset == "val":
annotations = json.load(
open(os.path.join(dataset_dir, "deakin_bc_val.json")))
# don't need the dict keys
annotations = list(annotations['_via_img_metadata'].values())
# The VIA tool saves images in the JSON even if they don't have any
# annotations. Skip unannotated images.
annotations = [a for a in annotations if a['regions']]
elif (self.config.MODE == "Separate"):
annotations = dict()
# Note: Annotations are created using VIA Tool 2.0
for annotation_file in os.listdir(dataset_dir):
if (annotation_file.endswith('.json')):
json_file = json.load(
open(os.path.join(dataset_dir, annotation_file), mode='r'))
annotations[annotation_file] = json_file['_via_img_metadata'][json_file['_via_image_id_list'][0]]
# Add images
# Note: Only loading regions where Class Name is in the above Class set
if (self.config.MODE == "Combined"):
for a in annotations:
if type(a['regions']) is dict:
polygons = [r for r in a['regions'].values()]
else:
polygons = [r for r in a['regions']]
# load_mask() needs the image size to convert polygons to masks.
# We must read the image to get the size since VIA doesn't include it in JSON
image_path = os.path.join(dataset_dir, a['filename'])
image = skimage.io.imread(image_path)
height, width = image.shape[:2]
self.add_image(
"floorplan",
image_id=a['filename'],
path=image_path,
width=width, height=height,
polygons=polygons)
elif (self.config.MODE == "Separate"):
for json_filename, contents in annotations.items():
if type(contents['regions']) is dict:
polygons = [r for r in contents['regions'].values(
) if r['region_attributes']['Class'] in class_set]
else:
polygons = [r for r in contents['regions']
if r['region_attributes']['Class'] in class_set]
# load_mask() needs the image size to convert polygons to masks.
# We must read the image to get the size since VIA doesn't include it in JSON
image_path = os.path.join(dataset_dir, contents['filename'])
image = skimage.io.imread(image_path)
height, width = image.shape[:2]
self.add_image(
"floorplan",
image_id=contents['filename'],
path=image_path,
width=width, height=height,
polygons=polygons)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def load_data(subset: str):\n df_train = pd.read_csv(f\"{DATA_PATH}/train_durations_per_speaker.csv\")\n df_test = pd.read_csv(f\"{DATA_PATH}/val_durations_per_speaker.csv\")\n df_global = pd.read_csv(f\"{DATA_PATH}/global_durations_per_speaker.csv\")\n if (subset == \"train\"):\n df = df_train\n elif (subset == \"val\"):\n df = df_test\n else:\n df = df_global\n return df",
"def load_dataset(self, subset):\n assert subset in ('train', 'val')\n\n # Add classes\n for id, name in self.class_mapper.items():\n self.add_class('nodule', id, name)\n\n # Add images\n self.df = self.df_all[self.df_all['subset'] == subset]\n\n image_ids = set()\n for row in self.df.itertuples():\n image_id = (row.seriesuid, row.coordZ)\n path = os.path.join(cur_dir, 'data', 'train', '{}_{}.npy'.format(row.seriesuid, row.coordZ))\n if image_id in image_ids:\n continue\n self.add_image(\"nodule\", image_id=image_id, path=path)\n image_ids.add(image_id)",
"def load_subset_data(data_path, subset_name, timesteps):\n\n selected_subset_paths = subset_paths(os.path.join(data_path, subset_name))\n selected_subset_arrays = subset_arrays(selected_subset_paths)\n\n load_selected_timesteps = lambda x: np.load(x)\n\n if timesteps is not None:\n selected_subset_timesteps = load_selected_timesteps(timesteps)\n else:\n selected_subset_timesteps = np.array(range(int(np.sum(selected_subset_arrays[\"seq_lens\"]))))\n\n return selected_subset_arrays, selected_subset_timesteps",
"def set_data_subset(self, subset):\n self.data_subset = subset",
"def __init__(self, root=ROOT, pivots=PIVOTS, path=29, row=32,\n size=128, length=1000, subset='train', time=True,\n transform=None, target_transform=None):\n self.root = os.path.expanduser(root)\n self.pivots = os.path.expanduser(pivots)\n self.path = path\n self.row = row\n self.size = size\n self.length = length\n self.subset = subset\n self.time = time\n self.transform = transform\n self.target_transform = target_transform\n\n # Load the data\n data = []\n for band in [1, 2, 3, 4, 5, 7]:\n # Find a list of all matching files\n filename = '*_{:03d}{:03d}_*_sr_band{}_clipped.tif'\n filename = filename.format(path, row, band)\n files = glob.glob(os.path.join(self.root, filename))\n\n # Convert to LandsatProductIdentifier objects and sort by timestamp\n files = list(map(LandsatProductIdentifier, files))\n files.sort()\n\n # Only keep data from DOY 91-270, during growing season\n files = list(filter(lambda x: x.doy > 90 and x.doy <= 270, files))\n\n # Read each array\n band_data = []\n for file in files:\n ds = gdal.Open(file.filename)\n ar = ds.ReadAsArray().astype('float32')\n band_data.append(ar)\n data.append(band_data)\n\n self.data = np.array(data)\n\n # Load the segmentation\n filename = self.pivots.format(path, row)\n ds = gdal.Open(filename)\n ar = ds.ReadAsArray()\n ar = ar != -1.0\n\n self.dataset = ds\n self.segmentation = ar.astype('float32')",
"def __init__(self, subset=-1, verbose=False, month='02', year='2019'):\n\n if not os.path.exists(f'/tmp/gosat{year}{month}.tsv'):\n if verbose:\n print(\"Cache not found, downloading data...\")\n try:\n with open(f'/tmp/gosat{year}{month}.tsv', 'w') as data:\n r = requests.get(f'https://www.eorc.jaxa.jp/GOSAT/GPCG/download/data-g2-{year}{month}.txt')\n if verbose:\n print(\"Downloaded data\")\n lines = r.text.split('\\n')[11:subset]\n for l in lines:\n l = '\\t'.join(l.split()) + \"\\n\"\n data.write(l)\n except:\n os.remove(f'/tmp/gosat{year}{month}.tsv')\n raise ConnectionError(\"You need an internet connection to download the data\")\n \n df = pd.read_csv(f'/tmp/gosat{year}{month}.tsv', '\\t')\n if verbose:\n print(\"Dataset loaded\")\n self.df = df",
"def run_get_15k_row_subset():\n get_15k_row_subset('politics_30_months_comments_cleaned_standardized_vader_flair.csv',\n 'politics_30_months_comments_cleaned_standardized_vader_flair_15k.csv')",
"def load_table(dataset:str, local:bool=False, cut_lat_max:float=57.,\n cut_lat_min:float=-70., cut_lon_max:float=57.,\n cut_lon_min:float=-70.,time_cut=None,\n cut_DT:tuple=None):\n\n # Which flavor? \n if dataset[0:3] == 'llc':\n if dataset == 'llc_match':\n s3_file = s3_llc_match_table_file\n elif dataset == 'llc_uniform':\n s3_file = s3_llc_uniform_table_file\n else:\n raise IOError(\"Bad llc dataset!\")\n if local:\n tbl_file = os.path.join(os.getenv('SST_OOD'),\n 'LLC', 'Tables', os.path.basename(s3_file))\n else:\n tbl_file = s3_file\n elif dataset == 'viirs':\n if local:\n tbl_file = os.path.join(os.getenv('SST_OOD'),\n 'VIIRS', 'Tables', os.path.basename(s3_viirs_table_file))\n else:\n tbl_file = s3_viirs_table_file\n elif dataset == 'modis_all':\n tbl_file = s3_modis_table_file\n else:\n raise IOError(\"Bad Dataset\")\n\n # Load\n tbl = ulmo_io.load_main_table(tbl_file)\n\n # DT\n tbl['DT'] = tbl.T90 - tbl.T10\n\n # Cut?\n if cut_lat_max is not None:\n tbl = tbl[tbl.lat < cut_lat_max].copy()\n\n if cut_lat_min is not None:\n tbl = tbl[tbl.lat > cut_lat_min].copy()\n\n if cut_lon_max is not None:\n tbl = tbl[tbl.lon < cut_lon_max].copy()\n\n if cut_lon_min is not None:\n tbl = tbl[tbl.lon > cut_lon_min].copy()\n \n if cut_DT is not None:\n tbl.DT = tbl.T90.values - tbl.T10.values\n tbl = tbl[(tbl.DT < cut_DT[1]) & (tbl.DT >= cut_DT[0])].copy()\n\n if time_cut == 'head':\n cutt = (tbl.datetime >= pandas.Timestamp(2012,2,1)) & (\n tbl.datetime< pandas.Timestamp(2016,1,31))\n tbl = tbl[cutt].copy()\n elif time_cut == 'tail':\n cutt = (tbl.datetime >= pandas.Timestamp(2017,1,1)) & (\n tbl.datetime < pandas.Timestamp(2020,12,31))\n tbl = tbl[cutt].copy()\n\n # Expunge Nan\n finite = np.isfinite(tbl.LL)\n tbl = tbl[finite]\n tbl.reset_index(drop=True, inplace=True)\n\n # Return\n return tbl",
"def loadSubset(self, loadsubset):\n libxml2mod.xmlParserSetLoadSubset(self._o, loadsubset)",
"def test_full_dataset_from_file(full_dataset):\n train_dummy = \"justo. Praesent luctus. Curabitur egestas nunc sed libero. Proin sed\"\n val_dummy = \"malesuada. Integer id magna et ipsum cursus vestibulum. Mauris magna.\"\n\n assert full_dataset.train[0][0] == train_dummy\n assert full_dataset.train[0][1] == '6'\n\n assert full_dataset.val[0][0] == val_dummy\n assert full_dataset.val[0][1] == '8'\n\n assert full_dataset[0][0] == train_dummy\n assert full_dataset[100][0] == val_dummy",
"def load_simplefloor(env):\n return _oh.load_scene(env,None,'simpleFloor.env.xml',True)",
"def _get_floorplans(self, url):\n \n try:\n jdict = self._load_json(url)\n floorplans_groups = jdict['props']['homeDetails']['floorPlans']['floorPlanGroups']\n address_data = list(self._get_address(jdict))\n rental_data = []\n \n # different floorplans, e.g. studio, 1 bedroom 1 bathroom etc.\n for floorplans in floorplans_groups:\n plans = floorplans['plans']\n for section in plans:\n # this is the header \n section_data = self._get_section_data(section)\n rental_data.append(address_data+section_data+[url])\n units = section['units']\n # these are all the units under that header \n for unit in units:\n unit_data = self._get_section_data(unit)\n rental_data.append(address_data+unit_data+[url])\n return rental_data\n except:\n return None",
"def load_atlas(atlas=\"DK\", portion=\"all\"):\n if atlas == \"DK\":\n filepath = get_file_path(\"atlases/DK/dk_all.csv\")\n dk = pd.read_csv(filepath)\n\n if portion == \"all\":\n indices = dk.index\n\n else:\n\n cortical_df = dk[dk[\"Cortex\"] == \"cortical\"]\n subcortical_df = dk[dk[\"Cortex\"] == \"subcortical\"]\n\n cort_L_idx = cortical_df[cortical_df[\"Hemisphere\"] == \"Left\"].index\n cort_R_idx = cortical_df[cortical_df[\"Hemisphere\"] == \"Right\"].index\n\n subcort_L_idx = subcortical_df[subcortical_df[\"Hemisphere\"] == \"Left\"].index\n subcort_R_idx = subcortical_df[\n subcortical_df[\"Hemisphere\"] == \"Right\"\n ].index\n\n if portion == \"LR\":\n indices = list(cort_L_idx) + list(cort_R_idx)\n\n elif portion == \"RL\":\n indices = list(cort_R_idx) + list(cort_L_idx)\n\n elif portion == \"LRLR\":\n indices = list(cort_L_idx) + list(cort_R_idx)\n indices += list(subcort_L_idx) + list(subcort_R_idx)\n\n elif portion == \"LRRL\":\n indices = list(cort_L_idx) + list(cort_R_idx)\n indices += list(subcort_R_idx) + list(subcort_L_idx)\n\n elif portion == \"RLLR\":\n indices = list(cort_R_idx) + list(cort_L_idx)\n indices += list(subcort_L_idx) + list(subcort_R_idx)\n else:\n raise NameError(\"Atlas option not found.\")\n\n return dk.loc[indices].set_index(\"Name\")",
"def load_data(self,split='train'):\n return load_arrow_data(self.config,split)",
"def get_floor_plan(port_id):\n url = 'https://api.archisketch.com/v1/public/projects/'\n response = requests.get(url + port_id + '/detail')\n response = response.json()['project']\n floor_plan = response['floorplans'][0]\n return floor_plan",
"def reprogramming(\n subset: str = ReprogrammingSubset.FULL.s,\n path: Union[str, Path] = \"datasets/reprogramming.h5ad\",\n **kwargs: Any,\n) -> AnnData:\n subset = ReprogrammingSubset(subset)\n adata = _load_dataset_from_url(path, *_datasets[\"reprogramming\"], **kwargs)\n\n if subset == ReprogrammingSubset.FULL:\n return adata\n if subset == ReprogrammingSubset.K48:\n return adata[~adata.obs[\"cluster\"].isnull()].copy()\n if subset == ReprogrammingSubset.K85:\n return adata[~adata.obs[\"timecourse\"].isnull()].copy()\n\n raise NotImplementedError(\n f\"Subsetting option `{subset.s!r}` is not yet implemented.\"\n )",
"def load_dummy(path, subset=\"all\", shuffle=True, rnd=2356, vct=CountVectorizer(), fix_k=None, min_size=None, raw=False):\n\n data = bunch.Bunch()\n if subset in ('train', 'test'):\n data[subset] = load_files(\"{0}/{1}\".format(path, subset), charset=\"latin1\", load_content=True, random_state=rnd)\n elif subset == \"all\":\n data[\"train\"] = load_files(\"{0}/{1}\".format(path, \"train\"), charset=\"latin1\", load_content=True,\n random_state=rnd)\n data[\"test\"] = load_files(\"{0}/{1}\".format(path, \"test\"), charset=\"latin1\", load_content=True, random_state=rnd)\n else:\n raise ValueError(\n \"subset can only be 'train', 'test' or 'all', got '%s'\" % subset)\n if not raw:\n data = process_data(data, fix_k, min_size, vct)\n return data",
"def load_data_file_with_spec_demand_data(filename):\n src_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname\n (\n os.path.abspath(\n __file__)))))\n input_data_path = os.path.join(src_path, 'data', 'BaseData',\n 'Specific_Demand_Data', filename)\n\n dataset = np.genfromtxt(input_data_path, delimiter='\\t', skip_header=1)\n return dataset",
"def load_data(self,split='train'):\n raise NotImplementedError",
"def _load( self, i ):\n if ir.config.verbosity_level >= 2: print(\"[observation] Lazy loading raster\")\n self._raster_data[i] = raster_cube( self._raster_files, line=self._line_info['description'][i], keep_null=self._keep_null )",
"def load_data(filen, model):\n mass_sel = select_bin(model.fit_var, *model.fit_range)\n selections = [mass_sel]\n for var, bounds in model.get_load_vars():\n selections.append(\n select_bin(var, *[float(v) for v in bounds.split(',')]))\n\n load_vars = ['{costh,phi}_HX_fold'] + collect_requirements(selections)\n\n return apply_selections(get_dataframe(filen, columns=load_vars),\n selections)",
"def load_data(city, month, day):\n\n df = pd.read_csv(CITY_DATA[CITIES[city]])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['Month'] = df['Start Time'].dt.month\n df['Day of Week'] = df['Start Time'].dt.dayofweek\n\n # get the subset of data where the month matches the one chosen\n if month != 0:\n df = df[df['Month'] == month]\n \n # get the subset of data where the day of the week matches the one chosen\n if day != 7:\n df = df[df['Day of Week'] == day]\n \n return df",
"def load_sub_data(gt_id,\n target_horizon,\n target_date_obj,\n submission_date,\n experiment,\n margin_in_days,\n regen=True,\n print_missing_cols=True):\n # Name of cache directory and file with submission date-specific results\n submission_cache_dir = os.path.join('results', experiment,\n '{}'.format(submission_date),\n '{}_{}'.format(gt_id, target_horizon))\n sub_data_file = os.path.join(\n submission_cache_dir,\n \"sub_data-margin{}-{}_{}-{}.h5\".format(margin_in_days, gt_id,\n target_horizon, submission_date))\n\n # ---------------\n # Check if subdata matrix already exists, otherwise regenerate it\n # ---------------\n if regen or not os.path.isfile(sub_data_file):\n print \"Creating sub_data\"\n create_sub_data(gt_id, target_horizon, submission_date,\n experiment, margin_in_days)\n print \"\"\n\n # ---------------\n # Read saved subset features from disk\n # ---------------\n print \"Reading saved subset features from \"+sub_data_file\n t = time.time()\n sub_data = pd.read_hdf(sub_data_file)\n print \"Elapsed: {}s\".format(time.time() - t)\n\n # print any data missing in target_date\n print_missing_cols_func(sub_data, target_date_obj, print_missing_cols)\n\n return sub_data",
"def import_dataset(self):\n\n if not self.layers_loaded and not self.data_type == \"table\":\n self.set_project_srid()\n\n if self.service == \"WFS\":\n uri = (\n \"pagingEnabled='true' \"\n \"preferCoordinatesForWfsT11='false' \"\n \"restrictToRequestBBOX='1' \"\n \"typename='{0}:{4}-{5}' \"\n \"url='https://{0}/services;key={1}/{2}/{4}-{5}' \"\n \"version='{3}'\"\n ).format(\n self.domain,\n self.api_key_instance.get_api_key(self.domain),\n self.service.lower(),\n self.service_versions[self.service.lower()],\n self.data_type,\n self.object_id,\n )\n\n layer = QgsVectorLayer(uri, self.layer_title, self.service.upper())\n\n elif self.service == \"WMTS\":\n if self.domain == \"basemaps.linz.govt.nz\":\n if self.selected_crs == \"EPSG:2193\":\n tms = \"NZTM2000Quad\"\n elif self.selected_crs == \"EPSG:3857\":\n tms = \"WebMercatorQuad\"\n else:\n self.iface.messageBar().pushMessage(\n \"Error\",\n \"\"\"The LINZ Basemaps WMTS has returned an unexpected coordinate system.\"\"\",\n level=Qgis.Critical,\n )\n return\n uri = (\n \"contextualWMSLegend=0\"\n \"&crs={1}\" # e.g. EPSG:2193\n \"&dpiMode=7&featureCount=10\"\n \"&format=image/webp\"\n \"&layers={2}\"\n \"&styles=default\"\n \"&tileMatrixSet={4}\" # e.g. NZTM2000Quad\n \"&url=https://{0}/v1/tiles/aerial/WMTSCapabilities.xml?api={3}\"\n ).format(\n self.domain,\n self.selected_crs,\n self.object_id,\n self.api_key_instance.get_api_key(self.domain),\n tms,\n )\n else:\n uri = (\n \"SmoothPixmapTransform=1\"\n \"&contextualWMSLegend=0\"\n \"&crs={1}&dpiMode=7&format=image/png\"\n \"&layers={2}-{3}&styles=style%3Dauto&tileMatrixSet={1}\"\n \"&url=https://{0}/services;\"\n \"key={4}/{5}/{6}/{2}/{3}/\"\n \"WMTSCapabilities.xml\"\n ).format(\n self.domain,\n self.selected_crs,\n self.data_type,\n self.object_id,\n self.api_key_instance.get_api_key(self.domain),\n self.service.lower(),\n self.service_versions[self.service.lower()],\n )\n layer = QgsRasterLayer(uri, self.layer_title, \"wms\")\n else:\n pass # ERROR not supported\n\n QgsProject.instance().addMapLayer(layer)\n self.layers_loaded = True\n self.dlg.close()",
"def load():\n filepath = dirname(abspath(__file__))\n##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####\n data = recfromtxt(open(filepath + '/spector.csv',\"rb\"), delimiter=\" \",\n names=True, dtype=float, usecols=(1,2,3,4))\n names = list(data.dtype.names)\n endog = array(data[names[3]], dtype=float)\n endog_name = names[3]\n exog = column_stack(data[i] for i in names[:3]).astype(float)\n exog_name = names[:3]\n dataset = Dataset(data=data, names=names, endog=endog, exog=exog,\n endog_name = endog_name, exog_name=exog_name)\n return dataset",
"def load_cityscapes(self, dataset_dir, subset):\n self.class_labels = {\n 'unlabeled':0,\n 'ego vehicle':1, \n 'rectification border':2,\n 'out of roi':3, \n 'static':4, \n 'dynamic':5, \n 'ground':6, \n 'road':7, \n 'sidewalk':8, \n 'parking':9, \n 'rail track':10, \n 'building':11, \n 'wall':12, \n 'fence':13, \n 'guard rail':14, \n 'bridge':15, \n 'tunnel':16, \n 'pole':17, \n 'polegroup':18, \n 'traffic light':19, \n 'traffic sign':20, \n 'vegetation':21, \n 'terrain':22, \n 'sky':23, \n 'person':24, \n 'rider':25, \n 'car':26, \n 'truck':27, \n 'bus':28, \n 'caravan':29, \n 'trailer':30, \n 'train':31, \n 'motorcycle':32, \n 'bicycle':33, \n 'license plate':34}\n \n annotation_dir = dataset_dir + 'gtFine_trainvaltest/' + subset + '_all.json'\n self.image_info = json.load(open(annotation_dir, 'r'))\n \n # Add classes\n for i in range(len(self.class_labels)):\n self.add_class(\"cityscape\", i, list(self.class_labels.keys())[i])",
"def load(self):\n\n\t\t# Check if dataset location exists\n\t\tif (not os.path.exists(self._location)):\n\t\t\tprint(\"Error: Dataset location does not exist\")\n\t\t\treturn\n\n\t\tself._frames = sorted(glob.glob(self._location + '/frame*.jpg'))\n\n\t\t# Check if startFrame is beyond dataset bounds\n\t\tif (self._startFrame > len(self._frames)):\n\t\t\tprint(\"Error: Start frame is beyond dataset bounds\")\n\t\t\treturn\n\n\t\t# Check if endFrame is beyond dataset bounds\n\t\tif (self._endFrame > len(self._frames)):\n\t\t\tprint(\"Warning: End frame beyond dataset bounds, setting to end of dataset\")\n\n\t\t# Start buffering threads\n\t\tself._bufferThreadStopped = False\n\t\tself._bufferingThread.start()",
"def load_data(city, month, day):\n ##task0.4 now the user choices are all ready and saved, start preparing your data \n ## first, load the data of the selected city using the pd.read_csv command for the user specified city \n df = pd.read_csv(CITY_DATA[city])\n ## read the column start time as a date, to start extracting information from it\n df['Start Time']=pd.to_datetime(df['Start Time'])\n ##define a new column\"month\" as a function from start time, which reads the month from the date\n df['month'] = df['Start Time'].dt.month\n ## extract the day name from the start time, append a new column\"day_of_week\" to the dataframe \n df['day_of_week'] = df['Start Time'].dt.day_name()\n ## extract the hour of the start time and save to a new variable\"hour\" \n df['hour'] = df['Start Time'].dt.hour\n \n ## now, the panel data is complete with variables of interest created\n ## time to consider user filters on month and day!!\n ## for the month, if user choice is all, no changes to dataset\n ## if user specifies a month,filter the dataset column\"month\" with the selected value\n if month !=\"all\":\n df=(df[df['month']==int(month)])\n \n ## for the day column, if user doesnt choose a specific day and select\"all\", no change to df\n ## if the user chooses a day, filter the \"day_of_week\" column for the specified day\n if day !=\"All\":\n df=(df[df['day_of_week']==day])\n \n ## print the first five rows of data to confirm filters are applied\n ## return the data frame to start calculating statistics\n #print(df.head())\n return df",
"def loadtrainData_undersampling():\n train = []\n fileIn = open(PATH + 'traindata_Subtask4.txt')\n for line in fileIn.readlines():\n lineArr = line.strip().split()\n train.append([float(lineArr[i]) for i in range(len(lineArr))])\n\n pos = []\n neg = []\n for i in train:\n if i[-1] == 1.0:\n pos.append(i)\n else:\n neg.append(i)\n slice1 = random.sample(neg, len(pos))\n data = pos + slice1\n train_x = []\n train_y = []\n y = []\n for line in data:\n train_x.append([float(line[i]) for i in range(len(line) - 1)])\n y.append([int(line[-1])])\n for i in range(len(y)):\n train_y.append(y[i][0])\n return np.mat(train_x), np.mat(train_y).transpose()",
"def test_load_selections(self, selection):\n selection.load_selections([SAMPLE_ROI])\n rows, cols = np.column_stack(self.roi_coords)\n for pixel in self.image_set._roi_data[rows, cols]:\n assert np.array_equal(\n pixel, [255.0, 0.0, 0.0, 255.]\n )\n for pixel in self.subset._roi_data[rows, cols]:\n assert np.array_equal(\n pixel, [0.0, 100.0, 0.0, 255.]\n )"
] |
[
"0.661039",
"0.62231714",
"0.6147798",
"0.583328",
"0.5583259",
"0.5560036",
"0.54643214",
"0.540946",
"0.5382919",
"0.5382749",
"0.5364376",
"0.53627944",
"0.5359254",
"0.5358604",
"0.534547",
"0.53237855",
"0.53235096",
"0.531234",
"0.5311516",
"0.5301634",
"0.53001165",
"0.52961886",
"0.5245369",
"0.5231524",
"0.52247167",
"0.5218656",
"0.5215286",
"0.5214228",
"0.5211936",
"0.5210243"
] |
0.71230924
|
0
|
Generate instance masks for shapes of the given image ID.
|
def load_mask(self, image_id):
info = self.image_info[image_id]
mask = np.zeros([info["height"], info["width"], len(info["polygons"])],
dtype=np.uint8)
shapes = info['polygons']
for i, p in enumerate(info['polygons']):
shape = p['shape_attributes']['name']
mask[:, :, i:i + 1] = self.draw_shape(mask[:, :, i:i + 1].copy(),
shape, p, 1)
# Map class names to class IDs.
if (self.config.MODE == "Combined"):
class_ids = np.array([self.class_names.index(s['region_attributes']['element_type'])
if 'element_type' in s['region_attributes'].keys() else self.class_names.index('door') for s in shapes])
elif (self.config.MODE == "Separate"):
class_ids = np.array([self.class_names.index(s['region_attributes']['Class']) if 'Class' in s['region_attributes'].keys(
) else self.class_names.index('Door (Curve)') for s in shapes])
return mask, class_ids.astype(np.int32)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def load_mask(self, image_id):\n\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"pcb\":\n return super(self.__class__, self).load_mask(image_id)\n\n # convert polygons to a bitmap mask of shape\n # [height, width, instance_count]\n info = self.image_info[image_id]\n \n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n dtype=np.uint8)\n \n for i, p in enumerate(info[\"polygons\"]):\n # get indexes of pixels inside the polygon and set them to 1\n rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n mask[rr, cc, i] = 1\n\n # return mask, and array of class IDs of each instance.\n # since we have one class ID only, we return an array of 1s\n return mask.astype(np.bool), info[\"class_ids\"]",
"def load_mask(self, image_id):\n image_info = self.image_info[image_id]\n annotations = image_info['annotations']\n instance_masks = []\n class_ids = []\n \n for annotation in annotations:\n class_id = annotation['category_id']\n mask = Image.new('1', (image_info['width'], image_info['height']))\n mask_draw = ImageDraw.ImageDraw(mask, '1')\n for segmentation in annotation['segmentation']:\n mask_draw.polygon(segmentation, fill=1)\n bool_array = np.array(mask) > 0\n instance_masks.append(bool_array)\n class_ids.append(class_id)\n\n mask = np.dstack(instance_masks)\n class_ids = np.array(class_ids, dtype=np.int32)\n \n return mask, class_ids",
"def load_mask(self, image_id):\n # If not a pedestrian dataset image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"pedestrian\":\n return super(self.__class__, self).load_mask(image_id)\n\n # Convert polygons to a bitmap mask of shape\n # [height, width, instance_count]\n info = self.image_info[image_id]\n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n dtype=np.uint8)\n for i, p in enumerate(info[\"polygons\"]):\n # Get indexes of pixels inside the polygon and set them to 1\n rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n mask[rr, cc, i] = 1\n\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)",
"def load_mask(self, image_id):\n # If not a vesicle dataset image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"vesicle\":\n return super(self.__class__, self).load_mask(image_id)\n\n # Convert polygons to a bitmap mask of shape\n # [height, width, instance_count]\n info = self.image_info[image_id]\n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n dtype=np.uint8)\n for i, p in enumerate(info[\"polygons\"]):\n rr, cc = skimage.draw.polygon(p[1], p[0])\n mask[rr, cc, i] = 1\n\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)",
"def load_mask(self, image_id):\n # If not homeobject dataset, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != 'homeobject':\n print(\n \"Warn: \\'{}\\' label not found. Processing with parent load_mask.\".format(image_info[\"source\"]))\n return super(self.__class__, self).load_mask(image_id)\n\n # Convert polygons to a bitmap mask of shape\n class_ids = image_info['class_ids']\n info = self.image_info[image_id]\n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])], dtype=np.uint8)\n for i, p in enumerate(info[\"polygons\"]):\n # Get indexes of pixels inside the polygon and set them to 1\n rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n\n # modify dirt mask if it resides outside of image boundary\n rr[rr > mask.shape[0] - 1] = mask.shape[0] - 1\n cc[cc > mask.shape[1] - 1] = mask.shape[1] - 1\n\n mask[rr, cc, i] = 1\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n class_ids = np.array(class_ids, dtype=np.int32)\n # return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)\n return mask, class_ids",
"def load_mask(self, image_id):\n # TODO: build dict **self.image_info** in this form\n # self.image_info.keys() = ['objects', 'imgWidth', 'imgHeight']\n # objects is a list which contains label and polygon (same as annotations form below)\n # imgHeight and imgWidth are numbers (usually 1024, 2048)\n annotations = self.image_info[image_id][\"objects\"]\n # annotations form: [{'label': label, 'polygon': [[x1,y1], [x2,y2] ...]}, ...]\n height = self.image_info[image_id]['imgHeight']\n width = self.image_info[image_id]['imgWidth']\n instance_masks = []\n class_ids = []\n for ann in annotations:\n m = self.annToMask(ann, height, width)\n \n label_tmp = ann['label']\n if ( not label_tmp in list(self.class_labels.keys()) ) and label_tmp.endswith('group'):\n label_tmp = label_tmp[:-len('group')]\n \n class_id = self.class_labels[label_tmp]\n instance_masks.append(m)\n class_ids.append(class_id)\n \n mask = np.stack(instance_masks, axis=2)\n class_ids = np.array(class_ids)\n \n return mask, class_ids",
"def load_mask(self, image_id):\n # If not a balloon dataset image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"glomerulus\":\n return super(self.__class__, self).load_mask(image_id)\n\n # Convert polygons to a bitmap mask of shape\n # [height, width, instance_count]\n info = self.image_info[image_id]\n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n dtype=np.uint8)\n for i, p in enumerate(info[\"polygons\"]):\n # Get indexes of pixels inside the polygon and set them to 1\n rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n mask[rr, cc, i] = 1\n\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n return mask, np.ones([mask.shape[-1]], dtype=np.int32)",
"def load_mask(self, image_id):\n info = self.image_info[image_id]\n# logger.info(\"mask {}\".format(image_id))\n if info[\"mask\"] is None:\n craters = info['craters']\n count = len(craters)\n mask = np.zeros([info['height'], info['width'], count], dtype=np.uint8)\n for i, dims in enumerate(craters):\n mask[:, :, i:i+1] = self.draw_shape(mask[:, :, i:i+1].copy(),\n \"circle\", dims, 1)\n # Handle occlusions\n occlusion = np.logical_not(mask[:, :, -1]).astype(np.uint8)\n for i in range(count-2, -1, -1):\n mask[:, :, i] = mask[:, :, i] * occlusion\n occlusion = np.logical_and(occlusion, np.logical_not(mask[:, :, i]))\n # Map class names to class IDs.\n class_ids = np.array([self.class_names.index(s) for s in info[\"shapes\"]])\n info[\"mask\"] = mask.astype(np.bool)\n info[\"class_ids\"] = class_ids.astype(np.int32)\n else:\n mask, class_ids = info[\"mask\"], info[\"class_ids\"]\n return mask, class_ids",
"def load_mask(self, image_id):\n info = self.image_info[image_id]\n # Get mask directory from image path\n mask_dir = os.path.join(os.path.dirname(os.path.dirname(info['path'])), \"masks\")\n\n # Read mask files from .png image\n mask = []\n # for f in next(os.walk(mask_dir))[2]:\n m = skimage.io.imread(os.path.join(mask_dir, info['id']+'.png')).astype(np.bool)\n mask.append(m)\n # print(mask)\n mask = np.stack(mask, axis=-1)\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID, we return an array of ones\n return mask, np.ones([mask.shape[-1]], dtype=np.int32)",
"def load_mask(self, image_id):\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"self_annotation\":\n super(CarsAndVehiclesDataset, self).load_mask(image_id)\n\n instance_masks = []\n class_ids = []\n annotations = self.image_info[image_id][\"annotations\"]\n # Build mask of shape [height, width, instance_count] and list\n # of class IDs that correspond to each channel of the mask.\n for annotation in annotations:\n class_id = self.map_source_class_id(\"self_annotation.{}\".format(annotation[\"category_id\"]))\n\n if class_id:\n m = self.annToMask(annotation, image_info[\"height\"], image_info[\"width\"])\n\n # Some objects are so small that they're less than 1 pixel area\n # and end up rounded out. Skip those objects.\n if m.max() < 1:\n continue\n # Is it a crowd? If so, use a negative class ID\n if annotation[\"iscrowd\"]:\n # Use negative class ID for crowds\n class_id *= -1\n # For crowd masks, annToMask() sometimes returns a mask\n # smaller than the given dimensions. If so, resize it.\n if m.shape[0] != image_info[\"height\"] or m.shape[1] != image_info[\"width\"]:\n m = np.ones(image_info[\"height\"], image_info[\"width\"], dtype=bool)\n instance_masks.append(m)\n class_ids.append(class_id)\n\n # Pack instance masks into an array\n if class_ids:\n mask = np.stack(instance_masks, axis=2).astype(np.bool)\n class_ids = np.array(class_ids, dtype=np.int32)\n return mask, class_ids\n else:\n # Call super class to return an empty mask\n return super(CarsAndVehiclesDataset, self).load_mask(image_id)",
"def load_mask(self, image_id):\n info = self.image_info[image_id]\n image_name = str(info['id']).zfill(6) + \"_10.png\"\n gt_image = imageio.imread(\"./training/instance/\" + image_name)\n instance_gt = np.array(gt_image) % 256\n semantic_gt = np.array(gt_image) // 256\n instance_gt = cv2.resize(instance_gt, (self.width, self.height), interpolation=cv2.INTER_NEAREST)\n semantic_gt = cv2.resize(semantic_gt, (self.width, self.height), interpolation=cv2.INTER_NEAREST)\n labels = [26, 24]\n masks = []\n class_ids = []\n for l in labels:\n mask_sem = (semantic_gt == [l]).astype(np.int_) * 255\n mask_ins = instance_gt & mask_sem\n num_ins = np.max(mask_ins)\n if(num_ins > 30):\n print(\"WARNING: num ins %d for label l %d\" % (num_ins, l))\n\n for i in range(1, num_ins + 1):\n mask_obj = (mask_ins == [i]).astype(np.int_) * 255\n masks.append(mask_obj)\n if l == 24:\n class_ids.append(2)\n else:\n class_ids.append(1)\n masks = np.array(masks)\n masks = np.moveaxis(masks, 0, -1)\n class_ids = np.array(class_ids)\n return masks, class_ids",
"def load_mask(self, image_id):\n info = self.image_info[image_id]\n num_cards = info['cards']\n # count = len(num_cards)\n count = 1 # there will only ever be 1 card per image (for simplicity) TODO: do multiple documents?\n mask = np.zeros([info['height'], info['width'], count], dtype=np.uint8)\n # for i, _ in enumerate(info['cards']):\n mask[:, :, 0] = self.draw_quadrilateral(mask[:, :, 0].copy(), info['cornerpoints'], 1)\n\n # Handle occlusions\n occlusion = np.logical_not(mask[:, :, -1]).astype(np.uint8)\n for i in range(count-2, -1, -1):\n mask[:, :, i] = mask[:, :, i] * occlusion\n occlusion = np.logical_and(occlusion, np.logical_not(mask[:, :, i]))\n # Map class names to class IDs.\n # class_ids = np.array([self.class_names.index(s[0]) for s in num_categories])\n\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)",
"def load_mask(self, image_id):\n info = self.image_info[image_id]\n # Get mask directory from image path\n mask_dir = os.path.join(\n os.path.dirname(os.path.dirname(info['path'])), 'masks')\n\n # Read mask files from .png image\n masks = []\n for file in next(os.walk(mask_dir))[2]:\n if file.endswith('.png'):\n mask = imread(os.path.join(mask_dir, file),\n as_gray=True).astype(np.bool)\n masks.append(mask)\n masks = np.stack(masks, axis=-1)\n # Return masks, and array of class IDs of each instance. Since we have\n # one class ID, we return an array of ones\n return masks, np.ones([masks.shape[-1]], dtype=np.int32)",
"def load_mask(self, image_id):\n info = self.image_info[image_id]\n ships = info['ships']\n count = len(ships)\n mask = np.zeros([info['height'], info['width'], count], dtype=np.uint8)\n for i, (ship, dims) in enumerate(info['ships']):\n mask[:, :, i:i + 1] = self.draw_mask(mask[:, :, i:i + 1].copy(),\n ship, dims)\n # Handle occlusions\n occlusion = np.logical_not(mask[:, :, -1]).astype(np.uint8)\n for i in range(count - 2, -1, -1):\n mask[:, :, i] = mask[:, :, i] * occlusion\n occlusion = np.logical_and(\n occlusion, np.logical_not(mask[:, :, i]))\n # Map class names to class IDs.\n class_ids = np.array([self.class_names.index(s[0]) for s in ships])\n return mask, class_ids.astype(np.int32)",
"def load_mask(self, image_id):\n global iter_num\n print(\"image_id\",image_id)\n info = self.image_info[image_id]\n count = 1 # number of object\n img = Image.open(info['mask_path'])\n num_obj = self.get_obj_index(img)\n mask = np.zeros([info['height'], info['width'], num_obj], dtype=np.uint8)\n mask = self.draw_mask(num_obj, mask, img,image_id)\n occlusion = np.logical_not(mask[:, :, -1]).astype(np.uint8)\n for i in range(count - 2, -1, -1):\n mask[:, :, i] = mask[:, :, i] * occlusion\n \n occlusion = np.logical_and(occlusion, np.logical_not(mask[:, :, i]))\n labels = []\n labels = self.from_yaml_get_class(image_id)\n labels_form = []\n for i in range(len(labels)):\n if labels[i].find(\"bird\") != -1:\n # print \"bird\"\n labels_form.append(\"bird\")\n class_ids = np.array([self.class_names.index(s) for s in labels_form])\n return mask, class_ids.astype(np.int32)",
"def get_object_mask(self, image_id):\n image_info = self.image_meta[image_id]\n active_class_info = image_info['active_class_info']\n object_cnt = len(active_class_info)\n mask = np.zeros([image_info['height'], image_info['width'], object_cnt], dtype=np.uint8)\n for i, (object_, _, dims) in enumerate(active_class_info):\n mask[:, :, i:i + 1] = self.draw_object_shape(mask[:, :, i:i + 1].copy(), object_, 1, dims)\n \n # Handle occlusions, when two objects intersect, we should ensure that the intersection mask is\n # given to only only object.\n occlusion = np.logical_not(mask[:, :, -1]).astype(np.uint8)\n # print(occlusion)\n \n for i in range(object_cnt - 2, -1, -1):\n mask[:, :, i] = mask[:, :, i] * occlusion\n occlusion = np.logical_and(occlusion, np.logical_not(mask[:, :, i]))\n # Map class names to class IDs.\n return mask.astype(np.bool)",
"def load_mask_custom(self, image_id, image_shape):\n info = self.image_info[image_id]\n filePaths = info['maskPaths']\n classes = info['maskClasses']\n \n masks = []\n class_ids = []\n if(len(image_shape)==3):\n image_shape = image_shape[:2]\n \n # 1 filePath -- 1 class \n for i, filePath in enumerate(filePaths):\n \n if filePath.endswith(\".png\"):\n mask = cv2.imread(filePath, 0)\n mask = np.asarray(mask, dtype = \"uint8\")\n \n masks.append(mask)\n class_ids.append(classes[i])\n \n if len(masks)==0 :\n masks.append(np.zeros(image_shape, dtype = \"uint8\"))\n class_ids.append(0)\n \n image = np.stack(masks, axis=2)\n class_ids = np.array(class_ids, dtype=np.int32)\n return image, class_ids",
"def _load_mask(self, image_id):\n\n mask_pattern = os.path.join(self.directory, image_id, \"masks/*.png\")\n ic = ImageCollection(mask_pattern)\n\n mask = np.zeros(self.imsize, dtype='uint8')\n for lbl, indiv_mask in enumerate(ic):\n mask += ((\n 1 + lbl) * self._process(indiv_mask, True).astype('uint8'))\n\n return mask",
"def load_mask(self, image_id):\n # Override this function to load a mask from your dataset.\n # Otherwise, it returns an empty mask.\n mask = np.empty([0, 0, 0])\n class_ids = np.empty([0], np.int32)\n return mask, class_ids",
"def load_mask(self, image_id):\n info = self.image_info[image_id]\n mask_paths = glob.glob(info['path'].replace('images', 'masks').replace('.png', '*.png'))\n masks = []\n class_ids = []\n for mask_path in mask_paths:\n# print(mask_path)\n mask = cv2.imread(mask_path,cv2.IMREAD_GRAYSCALE) \n masks.append(mask)\n if 'normal' in mask_path:\n class_ids.append(0)\n if 'benign' in mask_path:\n class_ids.append(1)\n if 'malignant' in mask_path:\n class_ids.append(2)\n masks = np.moveaxis(masks,0,-1)\n class_ids = np.array(class_ids)\n return masks, class_ids",
"def load_mask(self, image_id):\n\n # If not a balloon dataset image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"dsb\":\n return super(self.__class__, self).load_mask(image_id)\n\n path = image_info[\"dir\"]\n\n mascara = next(os.walk(path + '/masks/'))[2]\n masc = skimage.io.imread(path + '/masks/' + mascara[0])\n height, width = masc.shape\n\n mask = np.zeros((height, width, len(mascara)), dtype=np.uint8)\n\n for i, mask_file in enumerate(mascara):\n mask[:,:,i] = skimage.io.imread(path + '/masks/' + mask_file)\n\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n return mask, np.ones([mask.shape[-1]], dtype=np.int32)",
"def load_mask(self, image_id):\n info = self.image_info[image_id]\n label_path = info['path']\n\n # 读取json文件\n with open(os.path.join(self.DATA_ROOT_DIR, label_path), encoding='utf-8') as json_file:\n labelmeJson = json.load(json_file)\n height = labelmeJson['imageHeight']\n width = labelmeJson['imageWidth']\n shapes = labelmeJson['shapes']\n\n count = len(shapes)\n mask = np.zeros([height, width, count], dtype=np.uint8)\n\n for i, shape in enumerate(shapes):\n mask[:, :, i] = self.shape_to_mask(mask.shape, shape['points'], shape['shape_type'])\n\n # Map class names to class IDs.\n class_ids = np.array([self.class_names.index(shape['label']) if shape['label'] in self.class_names else self.class_names.index('undefined') for shape in shapes])\n #print('class_ids:', class_ids)\n #input()\n return mask.astype(np.bool), class_ids.astype(np.int32)",
"def load_mask(self, image_id):\n # Override this function to load a mask from your dataset.\n # Otherwise, it returns an empty mask.\n logging.warning(\"You are using the default load_mask(), maybe you need to define your own one.\")\n mask = np.empty([0, 0, 0])\n class_ids = np.empty([0], np.int32)\n return mask, class_ids",
"def load_mask(self, image_id):\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"face\":\n return super(self.__class__, self).load_mask(image_id)\n info = self.image_info[image_id]\n mask = np.zeros([info['height'], info['width'], len(info['boundingbox'])], dtype=np.uint8)\n for i, p in enumerate(info['boundingbox'].values()):\n rr, cc = skimage.draw.polygon(p['y'], p['x'])\n mask[rr, cc, i] = 1\n return mask, np.ones([mask.shape[-1]], dtype=np.int32)",
"def load_mask(self, image_id):\r\n mask_path = self.mask_path[self.ids[image_id]]\r\n file_pattern = os.path.join(mask_path, \"*.png\")\r\n info = self.image_info[image_id]\r\n mask_files = glob.glob(file_pattern)\r\n #mask_tmp = cv2.imread(mask_files[0])\r\n mask_new = np.zeros([info['height'], info['width'], mask_files.__len__()+1], dtype=np.uint8) # one more for background\r\n count = 1\r\n mask_total = 0\r\n for i in mask_files:\r\n mask = cv2.imread(i)\r\n mask = mask[:, :, 1] / 255.0\r\n #mask = ski_transform.resize(mask, (info['height'], info['width']), mode='reflect')\r\n mask_new[:, :, count] = (mask)\r\n mask_total = mask_total + (mask>0) * count\r\n count = count + 1\r\n # Map class names to class IDs.\r\n class_ids = np.ones(count) # one more fore background\r\n #add Background\r\n class_ids[0] = 0; # Background\r\n mask_new[:, :, 0] = np.invert(mask_total.astype(np.bool))\r\n # End add Background\r\n\r\n return mask_new, class_ids.astype(np.int32)",
"def load_mask(self, image_id):\n # load image infos\n \n info = self.image_info[image_id]\n patch_path = info['path']\n height = info['height']\n width = info['width']\n mag_path = os.path.join(patch_path,\"mag\")\n tissue_path = os.path.join(patch_path,\"tissue\")\n \n # collect mask names\n \n mag_mask_list = os.listdir(mag_path)\n tissue_mask_list = os.listdir(tissue_path)\n \n classes = []\n masks = []\n \n # append masks and ids in list\n \n if mag_mask_list:\n for filename in mag_mask_list:\n a = os.path.join(mag_path,filename)\n masks.append(skimage.io.imread(a).astype(bool))\n classes.append(2)\n \n if tissue_mask_list:\n for filename in tissue_mask_list:\n a = os.path.join(tissue_path,filename)\n masks.append(skimage.io.imread(a).astype(bool))\n classes.append(1)\n \n return np.stack(masks,axis=2), np.asarray(classes).astype(int)",
"def load_mask(self, image_id):\r\n info = self.image_info[image_id]\r\n mask = self.masks[image_id]\r\n count = int(mask.max())\r\n mask_new = np.zeros([info['height'], info['width'], count+1], dtype=np.uint8) # one more for background\r\n for i in range(count+1):\r\n #mask_new[:, :, i:i+1] = (mask == i).transpose(1, 2, 0)\r\n mask_new[:, :, i:i + 1] = (mask==i).reshape(mask.shape[0], mask.shape[1], -1)\r\n # mask_new[:, :, i:i+1] = (mask==i).transpose(1,2,0)\r\n # Map class names to class IDs.\r\n class_ids = np.ones(count+1) # one more fore background\r\n\r\n #add Background\r\n #class_ids[count] = 0 # add Background\r\n #mask_new[:, :, count:count + 1] = (mask == 0).transpose(1, 2, 0)\r\n #class_ids[count] = 0 # add Background\r\n class_ids[0] = 0 # add Background\r\n # End add Background\r\n\r\n return mask_new, class_ids.astype(np.int32)",
"def load_mask(self, image_id):\r\n info = self.image_info[image_id]\r\n mask = tifffile.imread(self.mask_path[self.ids[image_id]])\r\n\r\n if np.unique(mask).__len__() > 1:\r\n count = np.unique(mask).__len__()-1 # one less because of 0\r\n\r\n mask_new = np.zeros([info['height'], info['width'], count], dtype=np.uint8) # one more for background\r\n running = 0\r\n for i in np.unique(mask): #range(1, count):\r\n if ((i > 0) & ((mask == i).sum() > 0)):\r\n mask_new[:, :, running] = (mask == i)\r\n running = running + 1\r\n # Map class names to class IDs.\r\n class_ids = np.ones(count)\r\n else:\r\n mask_new = np.zeros([info['height'], info['width'], 1], dtype=np.uint8)\r\n class_ids = np.zeros([1])\r\n return mask_new, class_ids.astype(np.int32)",
"def load_mask(self, image_id):\n # If not a vesicle dataset image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"vesicle\":\n return super(self.__class__, self).load_mask(image_id)\n\n # Convert 16 bit mask to a bitmap mask of shape\n # [height, width, instance_count]\n info = self.image_info[image_id]\n mask_path = info['mask_path']\n mask = cv.imread(mask_path, cv.IMREAD_GRAYSCALE + cv.IMREAD_ANYDEPTH)\n bin_mask = get_bin_mask(mask)\n n_instance = bin_mask.shape[-1]\n return bin_mask, np.ones([n_instance], dtype=np.int32)",
"def load_mask(self, image_id):\n # If not a COCO image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"coco\":\n return super(ExtendedCocoDataset, self).load_mask(image_id, common.COCO_NUM_CLASSES) # NOTE: this calls ActivityDataset.load_mask()\n\n instance_masks = []\n class_ids = []\n annotations = self.image_info[image_id][\"annotations\"]\n # Build mask of shape [height, width, instance_count] and list\n # of class IDs that correspond to each channel of the mask.\n for annotation in annotations:\n class_id = self.map_source_class_id(\n \"coco.{}\".format(annotation['category_id']))\n if class_id:\n m = self.annToMask(annotation, image_info[\"height\"],\n image_info[\"width\"])\n # Some objects are so small that they're less than 1 pixel area\n # and end up rounded out. Skip those objects.\n if m.max() < 1:\n continue\n # Is it a crowd? If so, use a negative class ID.\n if annotation['iscrowd']:\n # Use negative class ID for crowds\n class_id *= -1\n # For crowd masks, annToMask() sometimes returns a mask\n # smaller than the given dimensions. If so, resize it.\n if m.shape[0] != image_info[\"height\"] or m.shape[1] != image_info[\"width\"]:\n m = np.ones([image_info[\"height\"], image_info[\"width\"]], dtype=bool)\n instance_masks.append(m)\n class_ids.append(class_id)\n\n # Pack instance masks into an array\n if class_ids:\n mask = np.stack(instance_masks, axis=2).astype(np.bool)\n class_ids = np.array(class_ids, dtype=np.int32)\n return mask, class_ids\n else:\n # Call super class to return an empty mask\n return super(CocoDataset, self).load_mask(image_id)"
] |
[
"0.7636923",
"0.742737",
"0.7384672",
"0.7339207",
"0.72988886",
"0.7290323",
"0.71908504",
"0.713616",
"0.713167",
"0.7113493",
"0.70892924",
"0.70513177",
"0.7007302",
"0.7003303",
"0.70003724",
"0.69821656",
"0.68691427",
"0.6793878",
"0.6793428",
"0.6769815",
"0.6765007",
"0.6760266",
"0.67576915",
"0.6706683",
"0.6636101",
"0.66207206",
"0.6612627",
"0.6577259",
"0.64755005",
"0.64205027"
] |
0.7464017
|
1
|
Route to redirect /v to /version permanently (301).
|
def redirect_version():
return redirect(url_for("base_blueprint.version"), code=301)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def redir_index():\n return redirect(url_for(\"index\"), code=301)",
"def redirect_source():\n return redirect(url_for(\"base_blueprint.source\"), code=301)",
"def redirect_old_featured(page):\r\n return redirect(url_for('.index', page=page), 301)",
"def redirect_view(request):\n path = request.GET.get(\"to\") or \"/\"\n return redirect(path if path.startswith(\"/\") else f\"/{path}\", permanent=True)",
"def redirect(to):\r\n def _redirect(environ, start_response):\r\n args, kwargs = environ['wsgiorg.routing_args']\r\n start_response('301 MOVED PERMANENTLY',\r\n [('Location', to.format(*args, **kwargs))])\r\n return []\r\n return _redirect",
"def redirect(self):\n new_url = self.server.url + options.script_alias + '/'\n self.send_response(301, \"Moved (redirection follows)\")\n self.send_header(\"Content-type\", \"text/html\")\n self.send_header(\"Location\", new_url)\n self.end_headers()\n self.wfile.write(\"\"\"<html>\n<head>\n<meta http-equiv=\"refresh\" content=\"1; URL=%s\">\n</head>\n<body>\n<h1>Redirection to <a href=\"%s\">ViewVC</a></h1>\nWait a second. You will be automatically redirected to <b>ViewVC</b>.\nIf this doesn't work, please click on the link above.\n</body>\n</html>\n\"\"\" % tuple([new_url]*2))",
"def index_file():\n return redirect(\"/\")",
"def set_version(v):\n old = get_version()\n sys.stderr.write('%s -> %s\\n' % (old, v))\n with open(INIT, 'r+') as f:\n text = f.read()\n text = pattern.sub(\"__version__ = %r\" % v, text)\n f.seek(0)\n f.truncate()\n f.write(text)",
"def redirect(url, status=None):\n raise cherrypy.HTTPRedirect(url, status)",
"def redirect(url):",
"def catch_all(path):\n return redirect('/', code=302)",
"def dispatch(self, req, *args):\n builder = nova.api.openstack.views.versions.get_view_builder(req)\n if req.path in ('/', ''):\n return builder.build_versions(VERSIONS)\n else:\n return builder.build_version(VERSIONS['v1.0'])",
"def _send_301(self, new_url):\n try:\n self.send_response(301)\n self.send_header('Location', new_url)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n except UnicodeEncodeError:\n self._send_internal_server_error()",
"def index():\n return redirect('/client/index.html')",
"def redirect_to_original_url(query_short_url):\n db_url = Url.query.filter_by(short_url=query_short_url).first_or_404()\n db_url.views += 1\n db.session.commit()\n return redirect(db_url.original_url)",
"def set_up_asset_versioning(app):\n def callback(app, error, endpoint, values):\n if endpoint == \"staticv\":\n filename = values[\"filename\"]\n fpath = path.join(app.static_folder, filename)\n try:\n mtime = path.getmtime(fpath)\n except OSError:\n return url_for(\"static\", filename=filename)\n cache = app._hash_cache.get(fpath)\n if cache and cache[0] == mtime:\n hashstr = cache[1]\n else:\n with open(fpath, \"rb\") as fp:\n hashstr = md5(fp.read()).hexdigest()\n app._hash_cache[fpath] = (mtime, hashstr)\n return url_for(\"static\", filename=filename, v=hashstr)\n raise error\n\n old_get_max_age = app.get_send_file_max_age\n\n def extend_max_age(filename):\n if \"v\" in request.args:\n return 60 * 60 * 24 * 365 # 1 year\n return old_get_max_age(filename)\n\n app._hash_cache = {}\n app.url_build_error_handlers.append(lambda a, b, c: callback(app, a, b, c))\n app.get_send_file_max_age = extend_max_age",
"def web_index():\n\n try:\n auth_check()\n except Exception as e:\n return flask.redirect(str(e))\n\n db_update_archives()\n\n return flask.redirect('videos')",
"def redirect_permanently(self, location):\n self.status = 301\n self.set_header('Location', location)",
"def route_view(request, code):\n try:\n instance = get_object_or_404(ShortUrl, url_code=code)\n return redirect(instance.long_url, permanent=True)\n except Http404:\n return redirect('/', permanent=True)",
"def versioned(filename, version, force_version=False, full_path=True):\n if not '.' in filename:\n return None\n\n if USE_VERSIONING or force_version:\n dotindex = filename.rindex('.')\n filename = u'%s.%s%s' % (filename[:dotindex], version, filename[dotindex:])\n\n if full_path:\n return static(filename)\n\n return filename",
"def process_request(self, req):\n args = {'method': req.method, 'path': req.path, 'accept': req.accept}\n LOG.debug(\"Determining version of request: %(method)s %(path)s \"\n \"Accept: %(accept)s\", args)\n\n # If the request is for /versions, just return the versions container\n if req.path_info_peek() == \"versions\":\n return self.versions_app.index(req, explicit=True)\n\n accept = str(req.accept)\n if accept.startswith(self.vnd_mime_type):\n LOG.debug(\"Using media-type versioning\")\n token_loc = len(self.vnd_mime_type)\n req_version = accept[token_loc:]\n else:\n LOG.debug(\"Using url versioning\")\n # Remove version in url so it doesn't conflict later\n req_version = self._pop_path_info(req)\n\n try:\n version = self._match_version_string(req_version)\n except ValueError:\n LOG.debug(\"Unknown version. Returning version choices.\")\n return self.versions_app\n\n req.environ['api.version'] = version\n req.path_info = ''.join(('/v', str(version), req.path_info))\n LOG.debug(\"Matched version: v%d\", version)\n LOG.debug('new path %s', req.path_info)\n return None",
"def redirect_old_published(page): # pragma: no cover\r\n category = db.session.query(model.category.Category).first()\r\n return redirect(url_for('.app_cat_index', category=category.short_name, page=page), 301)",
"def redoc_redirect(request: HttpRequest) -> HttpResponse:\n return HttpResponse('Use /api/v2/docs/ instead', status=410)",
"def swagger_redirect(request: HttpRequest) -> HttpResponse:\n return HttpResponse('Use /api/v2/docs/ instead', status=410)",
"def switch_to_version(self, version):\n self.current_version = version\n self.save()",
"def redirect_old_draft(page):\r\n return redirect(url_for('.draft', page=page), 301)",
"def v1(text):\n return {'message': 'All v1 routes are deprecated'}",
"def increment_version(path):\n pattern = r\"v(?P<version>\\d{3})\"\n regex = re.compile(pattern)\n match = regex.search(path)\n if not match:\n raise ValueError(\"%s does not contain a version number\" % path)\n version = match.group(\"version\")\n version = \"v\" + str(int(version) + 1).zfill(3)\n return regex.sub(version, path)",
"def menu_shorturl_redirect(request, b36_int, model=MenuItem):\n url = _redirect_implementation(request=request, model=model,\n b36_encoded_pk=b36_int)\n return redirect(url, permanent=False)",
"def index(self, request):\n versions = []\n for key, data in VERSIONS.items():\n v = BaseVersion(\n data[\"id\"],\n data[\"status\"],\n request.application_url,\n data[\"updated\"])\n versions.append(v)\n return wsgi.Result(VersionsDataView(versions))"
] |
[
"0.60801035",
"0.56187874",
"0.5600564",
"0.5590633",
"0.5558089",
"0.55440605",
"0.5326738",
"0.529449",
"0.5254541",
"0.52042747",
"0.51776284",
"0.5129523",
"0.51130855",
"0.51113045",
"0.5070587",
"0.5069953",
"0.50527155",
"0.5035664",
"0.500536",
"0.49580628",
"0.49280378",
"0.49221554",
"0.49099606",
"0.48951125",
"0.48856026",
"0.48672053",
"0.48608184",
"0.48565432",
"0.47990254",
"0.47778252"
] |
0.7418174
|
0
|
Route to return the current version of the application.
|
def version():
from app import get_version
return render_template("version.html", version=get_version())
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_application_version(self):\n return self.connector.request('GET', '/app/version')",
"def GetVersion(self):\n return self._SendRequest(HTTP_GET, \"/version\", None, None)",
"def redirect_version():\n return redirect(url_for(\"base_blueprint.version\"), code=301)",
"def dispatch(self, req, *args):\n builder = nova.api.openstack.views.versions.get_view_builder(req)\n if req.path in ('/', ''):\n return builder.build_versions(VERSIONS)\n else:\n return builder.build_version(VERSIONS['v1.0'])",
"def show(self, request):\n # data = VERSIONS[request.url_version]\n LOG.debug('request: %s ' % request)\n data = VERSIONS['1.0']\n LOG.debug('data: %s ' % data)\n LOG.debug(\"conf: sections:%s\" % conf.sections())\n v = Version(data[\"id\"], data[\"status\"],\n request.application_url, data[\"updated\"])\n return wsgi.Result(VersionDataView(v))",
"def version():\n response = make_response('{\"version\" : %s }' % app.config.get('VERSION'), 200)\n response.content_type = \"application/json\"\n return response",
"def version(self):\n _, body = self.request('/', 'GET')\n return body.get('version', None)",
"def get_version():\n click.echo(get_current_version_number())",
"def version(self):\n\n data = {\"action\" : \"version\"}\n return rpc_request(self.uri, data)",
"def get_version(self):\n return self.http_call(\"get\", url=f\"{self.base_url}/version\").json()",
"def version_get():\n try:\n return json_response.success({'version': version.local_version()})\n except version.Error as e:\n return json_response.error(str(e)), 200",
"def get_version(self):\n res = requests.get(self.base_url + '/version')\n\n return res",
"def app_version(self) -> str:\n return pulumi.get(self, \"app_version\")",
"def version(self, app, args):\n app.put('\\n\\n%s\\n' % _version_str)",
"def get_version(self):\n return self.__make_api_call('get/version')",
"def get_api_version(self):\n return self.connector.request('GET', '/app/webapiVersion')",
"def GetVersion(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def get_current_version(self):\n raise NotImplementedError(\"get_current_version is not implemented\")",
"def get_current_version(self) -> str:\n raise NotImplementedError()",
"def get_version():\n return about.get_version()",
"def get_version(self):\n url = '{}/version'.format(self.url)\n try:\n r = requests.get(url)\n if r.status_code == 200:\n return r.json()['version']\n except Exception as e:\n pass\n return ''",
"def print_app_version(app_name):\n print_file('{}/current/version.txt'.format(get_app_basedir(app_name)))",
"def Hello(self):\n version = '1.5.3'\n print 'returned version number', version\n return version",
"def version(self) -> 'outputs.VersionResponse':\n return pulumi.get(self, \"version\")",
"def version(ctx):\n print(VERSION)",
"def version(self):\n\n if self.running() is True:\n return APIConsumer.get(\"/version\").content\n else:\n return None",
"def get_version(self):\n\n r = self._create_operation_request(self, method=\"GET\")\n root_info = send_session_request(self._session, r).json()\n return root_info[\"currentVersion\"]",
"def get_version(self):\n url = '{}/v2/version'.format(self.url)\n try:\n r = requests.get(url)\n if r.status_code == 200:\n return r.json()['version']\n except Exception as e:\n pass\n return ''",
"def get_current_version(self):\n current_version = self.get_version(self.get_module_and_path(self._main_dir))\n return current_version",
"def get_version():\n version = current_app.config.get(\"APP_FULL_VERSION\", \"\")\n if len(version) == 0: \n version = current_app.config.get(\"APP_VERSION\", \"\")\n return jsonify({\n \"version\": version,\n \"app_id\": current_app.config.get(\"APP_ID\", \"\"),\n \"commit\": current_app.config.get(\"APP_COMMIT\", \"\"),\n \"date\": current_app.config.get(\"APP_COMMIT_DATE\", \"\"),\n \"timestamp\": current_app.config.get(\"APP_COMMIT_DATE_EPOCH\", 0),\n \"branch\": current_app.config.get(\"APP_COMMIT_BRANCH\", \"\"),\n \"author\": current_app.config.get(\"APP_COMMIT_AUTHOR\", \"\"),\n \"contact_url\": current_app.config.get(\"APP_CONTACT_URL\", \"\"),\n \"contact_email\": current_app.config.get(\"APP_CONTACT_EMAIL\", \"\"),\n })"
] |
[
"0.7166603",
"0.6996313",
"0.6883298",
"0.6768739",
"0.67376894",
"0.6720483",
"0.66486543",
"0.6620883",
"0.6592513",
"0.65706766",
"0.655283",
"0.65500826",
"0.6521532",
"0.65004563",
"0.6468458",
"0.64239985",
"0.6387191",
"0.6363297",
"0.6339282",
"0.63187337",
"0.63084704",
"0.62780833",
"0.6273281",
"0.6272395",
"0.626818",
"0.6246189",
"0.62274027",
"0.6215477",
"0.6199037",
"0.61920255"
] |
0.774868
|
0
|
Redirects /s to /source permanently (301).
|
def redirect_source():
return redirect(url_for("base_blueprint.source"), code=301)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _send_301(self, new_url):\n try:\n self.send_response(301)\n self.send_header('Location', new_url)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n except UnicodeEncodeError:\n self._send_internal_server_error()",
"def redirect_permanently(self, location):\n self.status = 301\n self.set_header('Location', location)",
"def redirect(to):\r\n def _redirect(environ, start_response):\r\n args, kwargs = environ['wsgiorg.routing_args']\r\n start_response('301 MOVED PERMANENTLY',\r\n [('Location', to.format(*args, **kwargs))])\r\n return []\r\n return _redirect",
"def redirect(self):\n new_url = self.server.url + options.script_alias + '/'\n self.send_response(301, \"Moved (redirection follows)\")\n self.send_header(\"Content-type\", \"text/html\")\n self.send_header(\"Location\", new_url)\n self.end_headers()\n self.wfile.write(\"\"\"<html>\n<head>\n<meta http-equiv=\"refresh\" content=\"1; URL=%s\">\n</head>\n<body>\n<h1>Redirection to <a href=\"%s\">ViewVC</a></h1>\nWait a second. You will be automatically redirected to <b>ViewVC</b>.\nIf this doesn't work, please click on the link above.\n</body>\n</html>\n\"\"\" % tuple([new_url]*2))",
"def redir_index():\n return redirect(url_for(\"index\"), code=301)",
"def ssl_redirect():\n if request.get_header('X-Forwarded-Proto', 'http') != 'https':\n redirect(request.url.replace('http://', 'https://', 1), code=301)",
"def test_forward(self):\n short_url = ShortURL.objects.create(url='http://example.com')\n response = self.client.get('/%s'%(short_url.key))\n self.assertEqual(response.status_code, 301)",
"def redirect(url):",
"def redirect_dest(fallback):\n dest = request.args.get('next')\n try:\n if dest.startswith('/') or dest.startswith(request.host_url):\n return redirect(dest)\n dest_url = url_for(dest)\n except:\n return redirect(fallback)\n return redirect(dest_url)",
"def resolveRedirect(self, useHEAD=False):\n conn = self.getConnection()\n try:\n if useHEAD:\n conn.request('HEAD', '%s%s' % (self.path, self.query), None,\n self.header)\n else:\n conn.request('GET', '%s%s' % (self.path, self.query), None,\n self.header)\n self.response = conn.getresponse()\n # read the server's encoding, in case we need it later\n self.readEncodingFromResponse(self.response)\n except httplib.BadStatusLine:\n # Some servers don't seem to handle HEAD requests properly,\n # e.g. http://www.radiorus.ru/ which is running on a very old\n # Apache server. Using GET instead works on these (but it uses\n # more bandwidth).\n if useHEAD:\n return self.resolveRedirect(useHEAD=False)\n else:\n raise\n if self.response.status >= 300 and self.response.status <= 399:\n # to debug, print response.getheaders()\n redirTarget = self.response.getheader('Location')\n if redirTarget:\n try:\n redirTarget.encode('ascii')\n except UnicodeError:\n redirTarget = redirTarget.decode(\n self.getEncodingUsedByServer())\n if redirTarget.startswith('http://') or \\\n redirTarget.startswith('https://'):\n self.changeUrl(redirTarget)\n return True\n elif redirTarget.startswith('/'):\n self.changeUrl(u'%s://%s%s'\n % (self.protocol, self.host, redirTarget))\n return True\n else: # redirect to relative position\n # cut off filename\n directory = self.path[:self.path.rindex('/') + 1]\n # handle redirect to parent directory\n while redirTarget.startswith('../'):\n redirTarget = redirTarget[3:]\n # some servers redirect to .. although we are already\n # in the root directory; ignore this.\n if directory != '/':\n # change /foo/bar/ to /foo/\n directory = directory[:-1]\n directory = directory[:directory.rindex('/') + 1]\n self.changeUrl('%s://%s%s%s'\n % (self.protocol, self.host, directory,\n redirTarget))\n return True\n else:\n return False # not a redirect",
"def head301(\n self, custom_headers=None, raw=False, **operation_config):\n # Construct URL\n url = '/http/redirect/301'\n\n # Construct parameters\n query_parameters = {}\n\n # Construct headers\n header_parameters = {}\n header_parameters['Content-Type'] = 'application/json; charset=utf-8'\n if custom_headers:\n header_parameters.update(custom_headers)\n\n # Construct and send request\n request = self._client.head(url, query_parameters)\n response = self._client.send(request, header_parameters, **operation_config)\n\n if response.status_code not in [200, 301]:\n raise models.ErrorException(self._deserialize, response)\n\n if raw:\n client_raw_response = ClientRawResponse(None, response)\n client_raw_response.add_headers({\n 'Location': 'str',\n })\n return client_raw_response",
"def register_redirect(self, src, dst):\n #TODO: check for cycles\n assert src not in self.redirects\n self.redirects[src] = dst",
"def redirect_view(request):\n path = request.GET.get(\"to\") or \"/\"\n return redirect(path if path.startswith(\"/\") else f\"/{path}\", permanent=True)",
"def _redirect(self, url):\n logger.debug('Redirecting to URL %s', url)\n segments = urllib.parse.urlparse(url)\n\n host = segments.netloc\n if host != self._host:\n self.new_connection(host)\n\n relurl = urllib.parse.urlunparse(('', '') + segments[2:])\n try:\n self._raw_get(relurl)\n except http.client.HTTPException as e:\n logger.debug('Got exception: %s.', e)\n raise DDGConnectionError(\"Failed to get '%s'.\" % url)",
"def redirect(url, status=None):\n raise cherrypy.HTTPRedirect(url, status)",
"def alias(aliasPath, sourcePath):\n sourcePath = sourcePath.split(\"/\")\n aliasPath = aliasPath.split(\"/\")\n\n def rewriter(request):\n if request.postpath[: len(aliasPath)] == aliasPath:\n after = request.postpath[len(aliasPath) :]\n request.postpath = sourcePath + after\n request.path = \"/\" + \"/\".join(request.prepath + request.postpath)\n\n return rewriter",
"def redirect(target):\n return {\n 'status': '302',\n 'statusDescription': 'Found',\n 'headers': {\n 'location': [{\n 'key': 'Location',\n 'value': target\n }]\n }\n }",
"def forward_to(id):\n\n db = init_connection_engine()\n\n if id == 'short_URL':\n return redirect(url_for('index'))\n else:\n # Looking up the URL by its ID in the DB.\n try:\n # Using a with statement ensures that the connection is always released\n # back into the pool at the end of statement (even if an error occurs).\n with db.connect() as conn:\n lookup_url = \"SELECT url_data FROM url_list WHERE url_id='\" + id + \"';\"\n target_url = conn.execute(lookup_url).fetchone()\n # If target URL is not found.\n if not target_url:\n flash('Not found')\n return redirect(url_for('index'))\n # If something goes wrong.\n except:\n flash('Something went wrong')\n return redirect(url_for('index'))\n\n return redirect(target_url[0])",
"def redirect(self, location, status):\n url = ''\n # location string could contain either an abolute path or a relative one.\n # Also relative address could begin with /, i.e. from the root directory\n # on the same server, or be related to current path.\n # Therefore we split location for 3 parts:\n # 1) a host with a protocol http(s)://site.com\n # 2) the rest of the link (including first / if it presents)\n # 3) beginning / if it presents (as a flag)\n redirect_re = re.compile('^(https?://[^/]+)?((/)?(?:.*))$', re.I)\n matches = redirect_re.match(location)\n if matches.group(1): # if there is a host in the location\n url = location # the path is absolute, redirect there\n elif matches.group(3): # there is beginning /\n # the path is related to the root directory of the same server\n # add a path to the host\n url = '{}://{}{}'.format(self.url.protocol, self.url.host, matches.group(2))\n else: # the path is related to current directory on the server\n # get current path from the request\n path = self.url.request.rsplit('/', 1)[0] + '/'\n # add a new path to current path with the host\n url = '{}://{}{}'.format(self.url.protocol, self.url.host, path + matches.group(2))\n return TaskRedirect(self.url.host, status, URL(url))",
"def redirect_to_original_url(query_short_url):\n db_url = Url.query.filter_by(short_url=query_short_url).first_or_404()\n db_url.views += 1\n db.session.commit()\n return redirect(db_url.original_url)",
"def redirect_old_featured(page):\r\n return redirect(url_for('.index', page=page), 301)",
"def redirect(self, url):\n self.setResponseCode(responsecode.FOUND)\n self.setHeader(\"location\", url)",
"def redirect(cls, dest, code = 302):\r\n dest = cls.format_output_url(dest)\r\n c.response.headers['Location'] = dest\r\n c.response.status_code = code\r\n return c.response",
"def redirect(self, location):\n self.status=302\n headers=self.headers\n headers['status']='302 Moved Temporarily'\n headers['location']=location\n return location",
"def _map_source(source):\n for pattern, replacement in \\\n settings.REFINERY_FILE_SOURCE_MAP.iteritems():\n translated_source = re.sub(pattern, replacement, source)\n if translated_source != source:\n return translated_source\n return source",
"def _add_redirects(self):\n with open(REDIRECTS_FILE) as mapping_fd:\n reader = csv.reader(mapping_fd)\n for row in reader:\n if not row or row[0].strip().startswith(\"#\"):\n continue\n\n html_path = os.path.join(BUILD_PATH, \"html\")\n path = os.path.join(html_path, *row[0].split(\"/\")) + \".html\"\n\n if not self.include_api and (\n os.path.join(html_path, \"reference\") in path\n or os.path.join(html_path, \"generated\") in path\n ):\n continue\n\n try:\n title = self._get_page_title(row[1])\n except Exception:\n # the file can be an ipynb and not an rst, or docutils\n # may not be able to read the rst because it has some\n # sphinx specific stuff\n title = \"this page\"\n\n with open(path, \"w\") as moved_page_fd:\n html = f\"\"\"\\\n<html>\n <head>\n <meta http-equiv=\"refresh\" content=\"0;URL={row[1]}.html\"/>\n </head>\n <body>\n <p>\n The page has been moved to <a href=\"{row[1]}.html\">{title}</a>\n </p>\n </body>\n<html>\"\"\"\n\n moved_page_fd.write(html)",
"def _add_redirects(self):\n with open(REDIRECTS_FILE, encoding=\"utf-8\") as mapping_fd:\n reader = csv.reader(mapping_fd)\n for row in reader:\n if not row or row[0].strip().startswith(\"#\"):\n continue\n\n html_path = os.path.join(BUILD_PATH, \"html\")\n path = os.path.join(html_path, *row[0].split(\"/\")) + \".html\"\n\n if not self.include_api and (\n os.path.join(html_path, \"reference\") in path\n or os.path.join(html_path, \"generated\") in path\n ):\n continue\n\n try:\n title = self._get_page_title(row[1])\n except Exception:\n # the file can be an ipynb and not an rst, or docutils\n # may not be able to read the rst because it has some\n # sphinx specific stuff\n title = \"this page\"\n\n with open(path, \"w\", encoding=\"utf-8\") as moved_page_fd:\n html = f\"\"\"\\\n<html>\n <head>\n <meta http-equiv=\"refresh\" content=\"0;URL={row[1]}.html\"/>\n </head>\n <body>\n <p>\n The page has been moved to <a href=\"{row[1]}.html\">{title}</a>\n </p>\n </body>\n<html>\"\"\"\n\n moved_page_fd.write(html)",
"def unshorten_redirect(self, hashed):\n link_data = self.get_link_data(hashed)\n if link_data is None:\n abort(404, 'Shortened URL not found')\n else:\n self.link_db[hashed]['lookups'] += 1\n\n full_link = link_data['full_link']\n\n redirect(full_link)\n self.link_db.sync()",
"def test_document_based_redirection(base_url):\n url = base_url + \"/en-US/docs/concat\"\n resp = request(\"get\", url)\n assert resp.status_code == 301\n assert (\n resp.headers[\"Location\"]\n == \"/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/concat\"\n )",
"def get301(\n self, custom_headers=None, raw=False, **operation_config):\n # Construct URL\n url = '/http/redirect/301'\n\n # Construct parameters\n query_parameters = {}\n\n # Construct headers\n header_parameters = {}\n header_parameters['Content-Type'] = 'application/json; charset=utf-8'\n if custom_headers:\n header_parameters.update(custom_headers)\n\n # Construct and send request\n request = self._client.get(url, query_parameters)\n response = self._client.send(request, header_parameters, **operation_config)\n\n if response.status_code not in [200, 301]:\n raise models.ErrorException(self._deserialize, response)\n\n if raw:\n client_raw_response = ClientRawResponse(None, response)\n client_raw_response.add_headers({\n 'Location': 'str',\n })\n return client_raw_response"
] |
[
"0.61149055",
"0.5877363",
"0.58217674",
"0.574811",
"0.5609715",
"0.55710393",
"0.55684286",
"0.556559",
"0.55389315",
"0.54649377",
"0.5393228",
"0.52729505",
"0.5256892",
"0.5242376",
"0.523995",
"0.52105653",
"0.5163181",
"0.51342165",
"0.5124434",
"0.5098271",
"0.5082799",
"0.5058754",
"0.5043193",
"0.5018647",
"0.5003466",
"0.5001034",
"0.499548",
"0.497693",
"0.4965507",
"0.49638128"
] |
0.7261589
|
0
|
Redirects to the last menus' url.
|
def source():
return redirect(get_last_menus_url())
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def root_redirect():\r\n return redirect(url_for(\"display_top\"))",
"def get_redirect_url(self):\n return reverse('accounts:home')",
"def redirect(url):",
"def get(self, request):\n return redirect('start:home')",
"def get(self, request):\n return redirect('start:home')",
"def get(self, request):\n return redirect('start:home')",
"def home_page():\n return redirect(url_for(_DEFAULT_ROUTE, _external=True))",
"def second_page():\n return redirect(url_for('index'))",
"def index():\n return redirect(url_for('second_page'))",
"def entry_page():\n return redirect(url_for('index'))",
"def __goToLastPage(self):\n try:\n self.currenturi = self.__baseuri + self.soup.find('li', \\\n 'lia-component-pagesnumbered').findAll('a', text=re.compile\\\n ('^\\d+$'))[-1].parent['href'].split(';')[0]\n self.__setSoupForCurrentUri()\n except:\n log.info(self.log_msg('Last page cannot find from the given page no \\\n for url %s'%self.task.instance_data['uri']))",
"def redirectPage() -> Response:\n # pass in the function name\n return redirect(url_for('view.loadMainPage'))",
"def post(self) :\n self.redirect('/admin')",
"def go_home(self):\r\n if self.home_url is not None:\r\n self.set_url(self.home_url)",
"def goto_menu(self, *args):\n self.manager.current = 'Main Menu'\n self.reset()\n self.manager.reset()",
"def root1(request):\n\ttemplate = 'main'\n\treturn redirect(template)",
"def get(self):\n self.redirect('/admin')",
"def get(self):\n self.redirect('/admin')",
"def menu_spe_homepage(self, event=None):\n self.link('http://pythonide.stani.be')",
"def news0_redirect(request):\n return redirect('news:news', start_id='0')",
"def homepage():\n return redirect('index.html')",
"def toLanding():\n return redirect(url_for('landingurl'))",
"def homepage():\n return redirect(\"/posts\")",
"def redirect_to_next(self, request):\n\n if 'next' in request.GET:\n next_page = request.GET['next']\n return HttpResponseRedirect(next_page)\n else:\n return redirect('index')",
"def redirect_url(default='front_page'):\n return request.args.get('next') or \\\n request.referrer or \\\n url_for(default)",
"def get_redirect_url(self, *args, **kwargs):\n if \"next\" in self.request.POST:\n return self.request.POST.get(\"next\")\n return reverse(\"my_reservations\")",
"def links(n):\n return redirect(url_for(\"link_page\", n=n, offset=0))",
"def home(request):\n return redirect('commprod/')",
"def post_login(self, came_from='/'):\n if not request.identity:\n login_counter = request.environ['repoze.who.logins'] + 1\n redirect('/login', came_from=came_from, __logins=login_counter)\n userid = request.identity['repoze.who.userid']\n flash(_('Welcome back, %s!') % userid)\n redirect(came_from)",
"def goToAdmin(request):\n\n\ttemplate = '../admin'\n\treturn HttpResponseRedirect(template)"
] |
[
"0.6405678",
"0.63813686",
"0.6303314",
"0.6293801",
"0.6293801",
"0.6293801",
"0.62527364",
"0.62424713",
"0.61899626",
"0.6161606",
"0.615013",
"0.61412543",
"0.60826886",
"0.60195106",
"0.6013808",
"0.6008119",
"0.598344",
"0.598344",
"0.5967124",
"0.5961747",
"0.5948852",
"0.59417814",
"0.59301466",
"0.5927521",
"0.59009045",
"0.59005207",
"0.5897042",
"0.58715373",
"0.58715326",
"0.58569723"
] |
0.77138567
|
0
|
finds all MP3s below the path given and returns a list of all the file paths
|
def findMP3s(path=None):
if not path:
lib_path = r'.\songs'
else:
lib_path = path
all_songs = []
#folder from os.walk is: root, dirnames, filenames
for rt, dirs, files in os.walk(lib_path):
for fp in files:
if fnmatch.fnmatch(fp, '*.mp3'):
fullpath = r'{}\{}'.format(rt, fp)
all_songs.append(fullpath)
#pp2(all_songs)
print 'found {} songs'.format(len(all_songs))
return all_songs
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_mp3_files(path):\n for dirname, dirnames, filenames in sorted(os.walk(path)):\n for filename in filenames:\n filepath = os.path.join(dirname, filename)\n if is_mp3_file(filepath):\n yield filepath",
"def list_directory(path):\n files = []\n for f in listdir(path):\n if isfile(join(path, f)) and f.endswith('.mp3'):\n files.append(f)\n return files",
"def getMusic(self, path):\n music =[]\n files = [ f for f in listdir(path) if isfile(join(path,f))]\n for f in files:\n m = join(path,f)\n #if not f.endswith('.mp3'):\n # files.remove(f)\n if f.endswith('.mp3'):\n music.append(m)\n return music",
"def find_files(path: str) -> List[str]:\n files = sorted(list(glob.glob(os.path.join(path, \"*.wav\"))))\n\n print(\"files: %d files found\" % len(files))\n return files",
"def get_file_paths():\n audio_folder = MY_DIR + '/assets'\n\n audio_addy_list = []\n for file in os.scandir(audio_folder):\n audio_addy_list.append(file.path)\n\n return audio_addy_list",
"def get_songs(path):\r\n song_list = []\r\n genre_paths = glob.glob(path + '/*')\r\n for genre_path in genre_paths:\r\n artist_paths = glob.glob(genre_path + '/*')\r\n for artist_path in artist_paths:\r\n album_paths = glob.glob(artist_path + '/*')\r\n for album_path in album_paths:\r\n lyrics_paths = glob.glob(album_path + '/*.txt')\r\n for lyrics_path in lyrics_paths:\r\n song = {}\r\n song[\"genre\"] = genre_path.replace(path + '/', '')\r\n song[\"artist\"] = artist_path.replace(genre_path + '/', '')\r\n song[\"album\"] = album_path.replace(artist_path + '/', '')\r\n song[\"lyrics\"] = open(lyrics_path).read()\r\n song[\"name\"] = lyrics_path[:-4].replace(album_path + '/', '')\r\n song[\"x\"] = 0\r\n song[\"y\"] = 0\r\n song_list.append(song)\r\n return song_list",
"def create_many_from_mp3_dir(cls, path_to_mp3_dir):\n songs = []\n path_to_mp3_dir = os.path.abspath(path_to_mp3_dir)\n dirty_mp3_names = os.listdir(path_to_mp3_dir)\n clean_mp3_paths = [\n os.path.join(path_to_mp3_dir, mp3_path) for\n mp3_path in dirty_mp3_names if\n mp3_path.lower().endswith(\".mp3\")\n ]\n\n if not clean_mp3_paths:\n raise EnvironmentError(\"No mp3's found in: %s\" % path_to_mp3_dir)\n\n for mp3_path in clean_mp3_paths:\n songs.append(cls.create_from_mp3_path(mp3_path))\n\n return songs",
"def GetSongFilenames():\n\n\t## Loop through each directory\n\tsong_files = []\n\tfor root, dirs, fnames in os.walk(\"_data\\\\fma_small\\\\\"):\n\t\t\n\t\t## Skip the first level\n\t\tif root == \"_data\\\\fma_small\\\\\":\n\t\t\tcontinue\n\n\t\t## Otherwise collect the files, appending\n\t\t## the root path.\n\t\tsong_files += [root+\"\\\\\"+f for f in fnames]\n\n\treturn song_files",
"def volume_paths(path):\n files = (os.path.join(path, f) for f in sorted(os.listdir(path)))\n return [f for f in files if os.path.isdir(f) or f.endswith('.zip')]",
"def get_music_files(pth: pathlib.Path) -> typing.List[mutagen.FileType]:\n file_names = [os.path.join(pth, f) for f in os.listdir(pth)]\n files = [mutagen.File(f) for f in file_names if os.path.isfile(f)]\n return [f for f in files if f is not None]",
"def load_music_files():\n # Make a list of music files, right now it is done by collection all files\n # below the current folder whose extension starts with mp3/wav \n print('Loading music files...')\n for path, dirs, files in os.walk('.'):\n for file_ in files:\n file_path = os.path.relpath(os.path.join(path, file_))\n url_path = os.path.join(*[quote(part) for part in os.path.split(file_path)]) \n ext = os.path.splitext(file_)[1].lower()\n name = os.path.splitext(file_)[0].lower()\n key = ''.join(name.split()) # unique key - no spaces\n audio_file = None\n if ext.startswith('.mp3'):\n audio = MP3(file_path) \n audio_file = AudioFile(url_path, audio.info.length, name, key) \n if audio_file:\n music_files.append(audio_file)\n print('Found:', music_files[-1])",
"def get_files(path: str, extension: str = '.wav') -> List[Path]:\n\n return list(Path(path).expanduser().resolve().rglob(f'*{extension}'))",
"def getMediaFiles(path):\n fileList = getMediaFileList(path)\n # dirList = getDirectoryList(path)\n\n # results = map(getMediaFiles, dirList)\n\n # for result in results:\n # fileList = fileList + result\n\n return fileList",
"def find(query):\n acc = []\n for root, dirs, files in os.walk(query, topdown=False):\n for name in files:\n acc += [os.path.join(root, name)]\n return acc",
"def get_file_list(\n self,\n file_regex = r'.*'):\n s3Contents = []\n #Use list_objects_v2 via kwargs since there could be\n #more than 1000 objects (single return limit)\n kwargs = {'Bucket': self.bucket, 'Prefix':self.key}\n while True:\n try:\n resp = self.s3.list_objects_v2(**kwargs)\n except:\n resp = None\n self.logger.error('Unable to reach s3 bucket')\n sys.exit(1)\n if resp.get(\"Contents\"):\n try:\n f_regex = re.compile(file_regex)\n #python 3.8+ required for walrus operator\n s3Contents += [f['Key'] for f in resp['Contents'] if (match := re.search(f_regex, f['Key']))]\n except Exception as e:\n self.logger.exception(e)\n self.logger.error('failed to filter s3 folder. Bucket: %s and location: %s',\n self.bucket,\n self.key)\n sys.exit(1)\n try:\n kwargs['ContinuationToken'] = resp['NextContinuationToken']\n except KeyError:\n break\n if not s3Contents:\n self.logger.warning(\n 'No files were returned from s3 bucket: %s and location: %s filtering by %s',\n self.bucket,\n self.key,\n file_regex)\n return s3Contents",
"def find_files(suffix, path):\n returnList = []\n find_file_recursive(suffix, path, returnList)\n return returnList",
"def collect_files(path, audio_files):\n\n for entry in os.scandir(path):\n if entry.is_dir():\n collect_files(entry.path, audio_files)\n if entry.is_file() and (entry.path.endswith(\".flac\") or entry.path.endswith(\".wav\")):\n audio_files.append(entry.path)",
"def test_s3uri_find_all_files(s3_test_path):\n prefix = os.path.join(s3_test_path, \"test_s3uri_find_all_files\")\n all_files = make_files_in_dir(prefix, make_local_empty_dir_d_a=False)\n\n all_files_found = S3URI(prefix).find_all_files()\n assert sorted(all_files_found) == sorted(all_files)\n for file in all_files:\n assert S3URI(file).exists",
"def list_keys(bucket, path, suffix=None):\n\t# Apparently there is no easy way of doing this except to loop over the result\n\t# chek the parameters delimiter='', marker=''\n\t# then the list returns boto.s3.prefix.Prefix objects on matches\n\tfiles = []\n\tpath = path.strip('/')\n\tfor key in bucket.list(path):\n\t\trelative_path = key.name.replace(path, '').lstrip('/')\n\t\tif not relative_path:\n\t\t\t# Empty\n\t\t\tcontinue\n\t\tif '/' in relative_path.strip('/'):\n\t\t\t# Skip sub-folders\n\t\t\tcontinue\n\n\t\tif not suffix or relative_path.endswith(suffix):\n\t\t\tfiles.append(relative_path)\n\treturn files",
"def get_midi_paths(root):\n\treturn [os.path.join(dirpath, filename) \n\t\t\tfor dirpath, _, filenames in os.walk(root) \n\t\t\tfor filename in filenames \n\t\t\tif filename.endswith(\".mid\")]",
"def list_files_local(path: str) -> List[str]:\n files = os.listdir(path)\n files = [f.split('.')[0] for f in files if 'pkl' in f]\n return files",
"def get_filepaths(directory):\n file_paths = [] # List which will store all of the full filepaths.\n\n # Walk the tree.\n\n for root, directories, files in os.walk(directory):\n\n for filename in files:\n if filename.endswith('.wav'):\n # Join the two strings in order to form the full filepath.\n filepath = os.path.join(root, filename)\n file_paths.append(filepath) # Add it to the list.\n # pdb.set_trace()\n file_paths.sort()\n return file_paths",
"def get_filepaths(keyword, directory):\n \n matches = []\n filenames_total = []\n \n for root, dirnames, filenames in os.walk(directory):\n for filename in filenames:\n if keyword in filename:\n matches.append(root + '/' + filename)\n filenames_total.append(filename)\n return matches, filenames_total",
"def get_paths(file_path):\n return glob(path.join(file_path, '*'))",
"def get_file_list(path: str) -> list:\n\treturn [f for f in listdir(path) if isfile(join(path, f))]",
"def readPlayerImageFiles(self):\n currentPath = os.path.dirname(os.path.abspath(__file__))\n listOfFileNames=[]\n for i in os.listdir(currentPath):\n if re.match(\"player\\_\\d+\",i): #i.endswith(\".gif\")\n listOfFileNames.append(currentPath+'/'+i)\n return listOfFileNames",
"def find_fastq_files(directory):\n\n filepaths = []\n for dirpath, dirnames, filenames in os.walk(directory):\n for filename in filenames:\n if filename.endswith('.fastq'):\n filepaths.append(os.path.join(dirpath, filename))\n return filepaths",
"def find(self, path_list):\n import fnmatch\n path_list2 = []\n for pattern in path_list:\n for root, _, filenames in os.walk('.'):\n for filename in fnmatch.filter(filenames, pattern):\n path_list2.append(os.path.join(root, filename))\n return path_list2",
"def get_paths(input_folder: str) -> list[str]:\n\n return [f for f in os.listdir(input_folder) if f[-4:] == '.txt' and f[:3] != 'top']",
"def fetch_all_files(bucket_name, suffix, level=None, keyword=None):\n s3 = boto3.resource(\"s3\")\n s3_bucket = s3.Bucket(bucket_name)\n files = []\n for obj in s3_bucket.objects.all():\n key = obj.key\n if key.endswith(suffix):\n if keyword:\n if keyword in key:\n files.append(key)\n else:\n files.append(key)\n\n if level:\n res = []\n for file in files:\n file2list = file.split(\"/\")\n if len(file2list) == level + 1:\n res.append(file)\n return res\n else:\n return files"
] |
[
"0.7776858",
"0.7220829",
"0.72117263",
"0.68320405",
"0.678996",
"0.67430526",
"0.671247",
"0.664901",
"0.66283673",
"0.65808403",
"0.6493454",
"0.6273654",
"0.617928",
"0.61612785",
"0.61595064",
"0.61321527",
"0.60744894",
"0.60734063",
"0.60670584",
"0.60611314",
"0.6056712",
"0.6003642",
"0.59934103",
"0.59849924",
"0.5983075",
"0.5972896",
"0.5969637",
"0.5944826",
"0.59430367",
"0.5942738"
] |
0.86981595
|
0
|
Extract metadata from options and populate a post node.
|
def _update_post_node(node, options, arguments):
node["date"] = arguments[0] if arguments else None
node["tags"] = options.get("tags", [])
node["author"] = options.get("author", [])
node["category"] = options.get("category", [])
node["location"] = options.get("location", [])
node["language"] = options.get("language", [])
node["redirect"] = options.get("redirect", [])
node["title"] = options.get("title", None)
node["image"] = options.get("image", None)
node["excerpt"] = options.get("excerpt", None)
node["exclude"] = "exclude" in options
node["nocomments"] = "nocomments" in options
node["external_link"] = options.get("external_link", [])
return node
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def add_meta(self, post, *args, **kwargs):\n\t\tsuper(Command, self).add_meta(post, *args, **kwargs)\n\t\tpost.gen_description = False\n\t\tpost.description = description_from_content(post)\n\t\tpost.save()",
"def post(self):\n args = parser.parse_args()",
"def handle_post(self, node):\n\t\tattr = node.attributes\n\t\t# By default we always want to chroot, unless\n\t\t# otherwise specified\n\t\tif attr.getNamedItem((None, 'chroot')):\n\t\t\tchroot = attr.getNamedItem((None, 'chroot')).value\n\t\telse:\n\t\t\tchroot = 'yes'\n\n\t\t# By default, the interpreter is always /bin/sh, unless\n\t\t# otherwise specified.\n\t\tif attr.getNamedItem((None, 'interpreter')):\n\t\t\tinterpreter = attr.getNamedItem((None,\n\t\t\t\t'interpreter')).value\n\t\telse:\n\t\t\tinterpreter = '/bin/sh'\n\n\t\t# The args that are supplied are for the command that\n\t\t# you want to run, and not to the installer section.\n\t\tif attr.getNamedItem((None, 'arg')):\n\t\t\targ = attr.getNamedItem((None, 'arg')).value\n\t\telse:\n\t\t\targ = ''\n\n\t\tlist = []\n\t\tif chroot == 'yes':\n\t\t\tlist.append(\"cat > /a/tmp/post_section_%d << '__eof__'\"\n\t\t\t\t\t% self.finish_section)\n\t\t\tlist.append(\"#!%s\" % interpreter)\n\t\t\tlist.append(self.getChildText(node))\n\t\t\tlist.append(\"__eof__\")\n\t\t\tlist.append(\"chmod a+rx /a/tmp/post_section_%d\"\n\t\t\t\t\t% self.finish_section)\n\t\t\tlist.append(\"chroot /a /tmp/post_section_%d %s\"\n\t\t\t\t\t% (self.finish_section, arg))\n\t\telse:\n\t\t\tif interpreter is not '/bin/sh':\n\t\t\t\tlist.append(\"cat > /tmp/post_section_%d \"\n\t\t\t\t\t\"<< '__eof__'\"\n\t\t\t\t\t% self.finish_section)\n\t\t\t\tlist.append(\"#!%s\" % interpreter)\n\t\t\t\tlist.append(self.getChildText(node))\n\t\t\t\tlist.append(\"__eof__\")\n\t\t\t\tlist.append(\"chmod a+rx /tmp/post_section_%d\"\n\t\t\t\t\t% self.finish_section)\n\t\t\t\tlist.append(\"%s /tmp/post_section_%d\"\n\t\t\t\t\t% (interpreter, self.finish_section))\n\t\t\t\n\t\t\telse:\n\t\t\t\tlist.append(self.getChildText(node))\n\n\t\tself.finish_section = self.finish_section+1\n\t\tself.ks['finish'] += list",
"def post(self):\n # Grab all the specified information\n post = dict([(key, getattr(self, key)) for key in self.info_names])\n\n # Add additional metadata\n if self.extraneous:\n post.update(self.extraneous)\n\n return post",
"def run_package(m):\n\n if m.args.upload:\n doc = find_fs_package_from_dir(m.args.source)\n else:\n doc = find_csv_package(m)\n\n url, user, password = get_site_config(m.args.site_name)\n wp = Client(url, user, password)\n\n post = get_or_new_post(m, wp, doc)\n\n assert post is not None\n\n if m.args.upload:\n upload_to_wordpress(wp, post, doc)\n\n content = html(doc, m.args.template)\n\n post.excerpt = doc['Root'].get_value('Root.Description') or content[:200]\n\n post_tags = list(set(\n [t.value for t in doc['Root'].find('Root.Tag')] +\n [t.value for t in doc['Root'].find('Root.Group')] +\n [doc['Root'].get_value('Root.Origin')] +\n list(split_groups_tags(m.args.group)) +\n list(split_groups_tags(m.args.tag))\n ))\n\n post.terms_names = {\n 'post_tag': post_tags,\n 'category': ['Dataset'] + list(split_groups_tags(m.args.group))\n }\n\n post.title = doc.get_value('Root.Title')\n post.slug = slugify(doc.nonver_name)\n post.content = content\n\n if m.args.publish:\n post.post_status = 'publish'\n\n try:\n if m.args.no_op:\n r = {}\n else:\n r = wp.call(EditPost(post.id, post))\n except Fault as e:\n\n if 'taxonomies' in e.faultString:\n err((\"User {} does not have permissions to add terms to taxonomies. \"\n \"Terms are: {}\").format(user, post.terms_names))\n\n raise\n\n return r",
"def parse_post_content(self, response):\n post = Post()\n post['title'] = response.xpath('//h2/a/text()')[0].extract()\n post['image_url'] = response.xpath(\"//div[@class='cont group']//img/@src\")[0].extract()\n yield post",
"def get_post(self):\n\t\tself.post = graph.get_object(POST_ID)",
"def post(self):\n data = request.get_json()\n dbops.post_meta(data)\n return None, 204",
"def register_post_parser(self, fct, cfg, ctx):\n self.post_parsers.append((fct, cfg, ctx))",
"def handle_new_post(post_data, user_agent, remote_addr):\n \n for required in POST_REQUIRED_PARAMS:\n if required not in post_data:\n return None, None\n\n try:\n value = int(string_from_interwebs(post_data.getfirst(\"code\", \"\")))\n except ValueError:\n return None, None\n \n if value != 98098098098:\n return None, None\n\n # not yet safe to use.\n location = post_data.getfirst(\"location\", \"\")\n tags = string_from_interwebs(post_data.getfirst(\"tags\")) \n author = post_data.getfirst(\"author\")\n \n split_tags = [string_from_interwebs(tag).strip().lower() for tag in tags.split(\",\")] # temporary\n \n if len(split_tags) > 3:\n return None, None\n \n author_id = string_from_interwebs(author).strip()\n \n with Connection('localhost', 27017) as connection:\n reply_to = string_from_interwebs(post_data.getfirst(\"reply_to\"))\n \n if not verify_author(author_id, connection):\n return None, None\n\n if not verify_post(reply_to, connection):\n return None, None\n\n # if reply then it's verified.\n # XXX: I need to make a standard object structure for this, so that I don't \n # have to update separate things.\n\n post = {\"viewed\" : 0,\n \"comments\" : 0,\n \"flagged\" : 0,\n \"disliked\" : 0,\n \"enjoyed\" : 0,\n \"num_replies\" : 0,\n \"num_reposts\" : 0,\n \"content-type\" : \"image\", # need to pull this from the mime lookup\n \"file\" : \"placeholder\",\n \"user_agent\" : user_agent,\n \"remote_addr\" : remote_addr,\n \"created\" : datetime.utcnow(),\n \"location\" : string_from_interwebs(location).strip(),\n \"author\" : ObjectId(author_id),\n \"reply_to\" : ObjectId(reply_to),\n \"tags\" : split_tags}\n\n update_post(reply_to, connection)\n\n return post_data.getfirst(\"data\"), post",
"def __init__(self, rpc, post):\n super(PostModule, self).__init__(rpc, 'post', post)\n self._action = self._info.get('default_action', \"\")",
"def _fillPost(self, useRawHTML, old_data=None):\n # Initialize empty dictionnary ct (aka content)\n # to be sent through self.server.metaWeblog.newPost()\n ct = {}\n\n # if no old_data, create a fake one\n if old_data == None:\n old_data = { 'title': None\n , 'mt_keywords': None\n , 'formatted_text': BALISE\n , 'mt_excerpt': None\n , 'description': None}\n\n def updateField(prompt, string=None):\n if (string == None) or (string == \"\") :\n return raw_input(prompt)\n else :\n r = raw_input(prompt + \" [default:\" + string + \"]\\n\")\n if r == \"\" :\n return string\n else :\n return r\n\n # Now get information\n ct['title'] = updateField( \"Title?\\n\", old_data['title'])\n ct['mt_keywords'] = updateField(\n \"Tags? (comma separated lists)?\\n\",\n old_data['mt_keywords'])\n\n # Categories are not included in the struct \"ct\"\n # see _setCategorie()\n\n # Get excerpt/content\n # Method0: external XML file\n if useRawHTML:\n with open( useRawHTML, 'rb') as f:\n doc = xml.dom.minidom.parse(f)\n # Parse our XHTML file\n text = doc.getElementsByTagName(\"body\")[0].toxml()\n #text = text.decode() # convert bytes to string\n text = text.replace(\"<body>\", \"\").replace(\"</body>\", \"\")\n ct['mt_excerpt'], ct['description'] = split_excerpt( text)\n\n # Method1: custom editor\n elif self.editor :\n prev_data = old_data['formatted_text']\n data = self._externalEditor( wrap_with_template(prev_data) )\n ct['mt_excerpt'], ct['description'] = split_excerpt( data)\n\n # Method2: input\n else :\n ct['mt_excerpt'] = updateField(\n \"Excerpt? (beware of xHTML tags !)\\n\",\n old_data['mt_excerpt'])\n ct['description'] = updateField(\n \"Main content? (beware of xHTML tags !)\\n\",\n old_data['description'])\n\n # Process the rest of the attributes (comments, pings, ...)\n def set_boolean( prompt, default):\n if default == True:\n return raw_input(prompt + \"[Y|n]\") != \"n\"\n else:\n return raw_input(prompt + \"[y|N]\") != \"y\"\n\n ct['mt_allow_comments'] = set_boolean( \"Allow comments ?\"\n , self.auto_comments)\n ct['mt_allow_pings'] = set_boolean( \"Allow pings ?\"\n , self.auto_pings)\n publish = set_boolean( \"Publish ?\" , self.auto_publish)\n\n return ct, publish",
"def run_get_post(m):\n\n doc = get_doc(m)\n assert doc is not None\n\n wp = get_wp(m)\n\n post = find_post(wp, doc.identifier)\n\n if post:\n post.content = \"…content elided…\"\n from pprint import pprint\n pprint(post.struct)\n return\n else:\n warn(f\"Didn't find post for identifier {doc.identifier}\")\n return",
"def deliver_post(data, access=None):\n\n schema = get_post_schema(data)\n return deliver_fields(schema, data, access)",
"def post(self, post):\n\n self._post = post",
"def post(self, post):\n\n self._post = post",
"def remotePostCreate(host, post):\n post = post.get('posts')[0]\n author = remoteAddAuthor(post.get('author'))\n title = post.get('title')\n description = post.get('description')\n contentType = post.get('contentType')\n content = post.get('content')\n published = utc.localize(datetime.strptime(post.get('published'), '%Y-%m-%dT%H:%M:%S.%fZ'))\n visibility = post.get('visibility')\n unlisted = post.get('unlisted')\n id = post.get('id')\n origin = post.get('origin')\n count = post.get('count')\n comments = remoteCommentList(post)\n source = \"{}/api/posts/{}\".format(DOMAIN, post.get('id'))\n\n post_dict = {'author': author, 'title': title, 'description': description,\n 'contentType': contentType, 'content': content, 'published': published,\n 'visibility': visibility, 'unlisted': unlisted, 'id': id,\n 'comments': comments, 'origin': origin, 'count': count,\n 'source': source}\n return post_dict",
"def update_post(prev_data, data, db_conn):\n\n schema = get_post_schema(data)\n post_kind = prev_data['kind']\n if post_kind is 'post' or post_kind is 'proposal':\n data = pick(data, ('body',))\n elif post_kind is 'vote':\n data = pick(data, ('body', 'response',))\n data, errors = update_document(schema, prev_data, data, db_conn)\n if not errors:\n add_post_to_es(data, db_conn)\n return data, errors",
"def handle_post(cls, **kwargs):\n raise NotImplementedError",
"def parse_post(post: Dict[str, Any],\n image_retriever: str = \"pretrained\",\n image_basedir: Optional[str] = \"documentIntent_emnlp19/resnet18_feat\") -> Dict[str, Any]:\n id = post['id']\n label_intent = post['intent']\n label_semiotic = post['semiotic']\n label_contextual = post['contextual']\n caption = post['caption']\n\n if image_retriever == \"url\":\n image = post['url']\n raise NotImplementedError(\"Currently cannot download an image from {}\".format(image))\n elif image_retriever == \"pretrained\":\n image_path = Path(image_basedir) / \"{}.npy\".format(id)\n image = np.load(image_path)\n elif image_retriever == \"ignored\":\n image = None\n else:\n raise NotImplementedError(\"image_retriever method doesn't exist\")\n\n output_dict = {\n 'id': id,\n 'label': {\n 'intent': label_intent,\n 'semiotic': label_semiotic,\n 'contextual': label_contextual,\n },\n 'caption': caption,\n 'image': image,\n }\n\n return output_dict",
"def update_post_format(post):\n\n post_dict = {\n \"title\": post[1],\n \"genre\": get_genre(post[0]),\n \"content\": post[2],\n \"repeater_link\": get_links(post[3], post[4]),\n }\n \n return post_dict",
"def parse_options(self, extra):\n options = super().parse_options(extra)\n self.target_image = options.pop(\"target\")\n\n return options",
"def fetchPostMortemPack(self):\n assert False, \"Deriving class must implement\"",
"def mutate(self, info, **arguments):\n title = arguments.get('title')\n text = arguments.get('text')\n\n # Get author id from `relay` id and find author on database\n author_id = from_global_id(arguments.get('author_id'))[1]\n author = UserModel.objects.get(pk=author_id)\n\n # Get post id from `relay` id and find it on database\n post_id = from_global_id(arguments.pop('id'))[1]\n post = PostModel.objects.get(pk=post_id)\n\n if not author:\n print(\"=\" * 100)\n print(\"schema.py@41\")\n print(\"We should return an error here\")\n print(\"=\" * 100)\n return CreatePostMutation(success=False)\n\n post = PostModel.objects.create(title=title, author=author, text=text)\n return CreatePostMutation(success=True, post=post)",
"def _process_kwargs(name, d, definition, nodes):\n # get node class\n module_root = d.get(\"plugin\", \"podpac\")\n node_string = \"%s.%s\" % (module_root, d[\"node\"])\n module_name, node_name = node_string.rsplit(\".\", 1)\n try:\n module = importlib.import_module(module_name)\n except ImportError:\n raise ValueError(\"Invalid definition for node '%s': no module found '%s'\" % (name, module_name))\n try:\n node_class = getattr(module, node_name)\n except AttributeError:\n raise ValueError(\n \"Invalid definition for node '%s': class '%s' not found in module '%s'\" % (name, node_name, module_name)\n )\n\n kwargs = {}\n for k, v in d.get(\"attrs\", {}).items():\n kwargs[k] = v\n\n for k, v in d.get(\"inputs\", {}).items():\n kwargs[k] = _lookup_input(nodes, name, v, definition)\n\n for k, v in d.get(\"lookup_attrs\", {}).items():\n kwargs[k] = _lookup_attr(nodes, name, v)\n\n if \"style\" in d:\n style_class = getattr(node_class, \"style\", Style)\n if isinstance(style_class, tl.TraitType):\n # Now we actually have to look through the class to see\n # if there is a custom initializer for style\n for attr in dir(node_class):\n atr = getattr(node_class, attr)\n if not isinstance(atr, tl.traitlets.DefaultHandler) or atr.trait_name != \"style\":\n continue\n try:\n style_class = atr(node_class)\n except Exception as e:\n # print (\"couldn't make style from class\", e)\n try:\n style_class = atr(node_class())\n except:\n # print (\"couldn't make style from class instance\", e)\n style_class = style_class.klass\n try:\n kwargs[\"style\"] = style_class.from_definition(d[\"style\"])\n except Exception as e:\n kwargs[\"style\"] = Style.from_definition(d[\"style\"])\n # print (\"couldn't make style from inferred style class\", e)\n\n for k in d:\n if k not in [\"node\", \"inputs\", \"attrs\", \"lookup_attrs\", \"plugin\", \"style\"]:\n raise ValueError(\"Invalid definition for node '%s': unexpected property '%s'\" % (name, k))\n\n nodes[name] = node_class(**kwargs)",
"def insert_new_post(post_arg_set):\n api, post_data, acct_data, page_id, config = post_arg_set\n\n try:\n post_id = post_data['id'] if post_data.has_key('id') else None\n\n except Exception as e:\n log.error( e )\n\n else:\n\n # parse date\n if post_data.has_key('created_time') and post_data['created_time'] is not None: \n dt = datetime.strptime(post_data['created_time'], FB_DATE_FORMAT)\n date_time = tz_adj(dt, config)\n time_bucket = round_datetime(date_time, config)\n raw_timestamp = int(date_time.strftime(\"%s\"))\n \n else:\n time_bucket = None\n raw_timestamp = None\n \n # extract message so we can find links within the msg if not in url\n article_urls = [get_fb_link(post_data, config, unshorten=True)]\n message = post_data['message'].encode('utf-8') if post_data.has_key('message') else None\n message_urls = get_message_urls(article_urls, message, config)\n\n # detect article links, unshorten and parse\n article_urls = [\n parse_url(unshorten_link(url, config)) \\\n for url in article_urls + message_urls\n if url is not None\n ]\n\n article_urls = [url for url in article_urls if is_article(url, config)]\n\n if article_urls:\n for article_url in set(article_urls):\n\n # sluggify url\n article_slug = sluggify(article_url)\n\n # format data\n post_value = {\n 'article_slug': article_slug,\n 'article_url': article_url,\n 'time_bucket': time_bucket,\n 'fb_post_created': raw_timestamp,\n 'raw_timestamp': raw_timestamp,\n 'fb_raw_link' : get_fb_link(post_data, config=config),\n 'fb_page_id': page_id,\n 'fb_post_id': post_id,\n 'fb_page_likes': acct_data['likes'] if acct_data.has_key('likes') else None,\n 'fb_page_talking_about': acct_data['talking_about_count'] if acct_data.has_key('talking_about_count') else None,\n 'fb_type': post_data['type'] if post_data.has_key('type') else None,\n 'fb_status_type': post_data['status_type'] if post_data.has_key('status_type') else None,\n 'fb_message': message\n }\n \n # always insert insights data\n if is_insights(page_id, config):\n \n log.info( \"INSIGHTS\\tAdding data from %s re: %s\" % (page_id, article_slug) )\n\n # fetch data\n insights_value = get_insights_data(api, page_id, post_id)\n\n # create datasource name\n data_source = \"facebook_insights_%s\" % page_id \n \n # upsert url\n upsert_url(article_url, article_slug, data_source, config)\n\n # insert id\n db.sadd('facebook_post_ids', post_id)\n\n # format time bucket\n current_time_bucket = gen_time_bucket(config)\n insights_value['time_bucket'] = current_time_bucket\n post_value.pop('time_bucket', None)\n \n value = json.dumps({\n data_source : dict(post_value.items() + insights_value.items())\n })\n\n # upload data to redis\n db.zadd(article_slug, current_time_bucket, value) \n \n # only insert new posts\n if not db.sismember('facebook_post_ids', post_id):\n \n log.info( \"FACEBOOK\\tNew post %s\\t%s\" % (post_id, article_url) )\n \n # insert id\n db.sadd('facebook_post_ids', post_id) \n \n # upsert url\n data_source = \"facebook_%s\" % page_id\n upsert_url(article_url, article_slug, data_source, config)\n\n value = json.dumps( {data_source : post_value} )\n\n\n # upload data to redis\n db.zadd(article_slug, time_bucket, value)",
"def _post_parser_init(self, args):\n self.host = args.host if args.host else 'localhost'\n self.protocol = args.protocol\n self.id = args.id\n self.server = DerivaServer(self.protocol,\n args.host,\n credentials=DerivaCatalogCLI._get_credential(self.host,\n token=args.token,\n oauth2_token=args.oauth2_token))",
"def process_posts(app, doctree):\n env = app.builder.env\n if not hasattr(env, \"ablog_posts\"):\n env.ablog_posts = {}\n post_nodes = list(doctree.findall(PostNode))\n if not post_nodes:\n return\n post_date_format = app.config[\"post_date_format\"]\n should_auto_orphan = app.config[\"post_auto_orphan\"]\n docname = env.docname\n if should_auto_orphan:\n # mark the post as 'orphan' so that\n # \"document isn't included in any toctree\" warning is not issued\n # We do not simply assign to should_auto_orphan because if auto-orphan\n # is false, we still want to respect the per-post :rst:dir`orphan` setting\n app.env.metadata[docname][\"orphan\"] = True\n blog = Blog(app)\n auto_excerpt = blog.post_auto_excerpt\n multi_post = len(post_nodes) > 1 or blog.post_always_section\n for order, node in enumerate(post_nodes, start=1):\n if node[\"excerpt\"] is None:\n node[\"excerpt\"] = auto_excerpt\n if multi_post:\n # section title, and first few paragraphs of the section of post\n # are used when there are more than 1 posts\n section = node\n while True:\n if isinstance(section, nodes.section):\n break\n section = node.parent\n else:\n section = doctree\n # get updates here, in the section that post belongs to\n # Might there be orphan updates?\n update_dates = _get_update_dates(section, docname, post_date_format)\n # Making sure that post has a title because all post titles\n # are needed when resolving post lists in documents\n title = node[\"title\"] or _get_section_title(section)\n # creating a summary here, before references are resolved\n excerpt = []\n if node.children:\n if node[\"exclude\"]:\n node.replace_self([])\n else:\n node.replace_self(node.children)\n for child in node.children:\n excerpt.append(child.deepcopy())\n elif node[\"excerpt\"]:\n count = 0\n for nod in section.findall(nodes.paragraph):\n excerpt.append(nod.deepcopy())\n count += 1\n if count >= (node[\"excerpt\"] or 0):\n break\n node.replace_self([])\n else:\n node.replace_self([])\n nimg = node[\"image\"] or blog.post_auto_image\n if nimg:\n for img, nod in enumerate(section.findall(nodes.image), start=1):\n if img == nimg:\n excerpt.append(nod.deepcopy())\n break\n date = node[\"date\"]\n if date:\n try:\n date = datetime.strptime(date, post_date_format)\n except ValueError:\n if date_parser:\n try:\n date = date_parser(date)\n except ValueError:\n raise ValueError(\"invalid post date in: \" + docname)\n else:\n raise ValueError(\n f\"invalid post date ({date}) in \" + docname + f\". Expected format: {post_date_format}\"\n )\n else:\n date = None\n # if docname ends with `index` use folder name to reference the document\n # a potential problem here is that there may be files/folders with the\n # same name, so issuing a warning when that's the case may be a good idea\n folder, label = os.path.split(docname)\n if label == \"index\":\n folder, label = os.path.split(folder)\n if not label:\n label = slugify(title)\n section_name = \"\"\n if multi_post and section.parent is not doctree:\n section_name = section.attributes[\"ids\"][0]\n label += \"-\" + section_name\n else:\n # create a reference for the post\n # if it is posting the document\n # ! this does not work for sections\n app.env.domains[\"std\"].data[\"labels\"][label] = (docname, label, title)\n app.env.domains[\"std\"].data[\"anonlabels\"][label] = (docname, label)\n if section.parent is doctree:\n section_copy = section[0].deepcopy()\n else:\n section_copy = section.deepcopy()\n # multiple posting may result having post nodes\n for nn in section_copy.findall(PostNode):\n if nn[\"exclude\"]:\n nn.replace_self([])\n else:\n nn.replace_self(node.children)\n postinfo = {\n \"docname\": docname,\n \"section\": section_name,\n \"order\": order,\n \"date\": date,\n \"update\": max(update_dates + [date]),\n \"title\": title,\n \"excerpt\": excerpt,\n \"tags\": node[\"tags\"],\n \"author\": node[\"author\"],\n \"category\": node[\"category\"],\n \"location\": node[\"location\"],\n \"language\": node[\"language\"],\n \"redirect\": node[\"redirect\"],\n \"nocomments\": node[\"nocomments\"],\n \"image\": node[\"image\"],\n \"exclude\": node[\"exclude\"],\n \"external_link\": node[\"external_link\"],\n \"doctree\": section_copy,\n }\n if docname not in env.ablog_posts:\n env.ablog_posts[docname] = []\n env.ablog_posts[docname].append(postinfo)\n # instantiate catalogs and collections here\n # so that references are created and no warnings are issued\n if app.builder.format == \"html\":\n stdlabel = env.domains[\"std\"].data[\"labels\"] # NOQA\n else:\n if hasattr(env, \"intersphinx_inventory\"):\n stdlabel = env.intersphinx_inventory.setdefault(\"std:label\", {}) # NOQA\n baseurl = getattr(env.config, \"blog_baseurl\").rstrip(\"/\") + \"/\" # NOQA\n project, version = env.config.project, str(env.config.version) # NOQA\n for key in [\"tags\", \"author\", \"category\", \"location\", \"language\"]:\n catalog = blog.catalogs[key]\n for label in postinfo[key]:\n coll = catalog[label] # NOQA\n if postinfo[\"date\"]:\n coll = blog.archive[postinfo[\"date\"].year] # NOQA",
"def proto_post(self, proto_category):\n return baker.make(\n Post,\n content=(\n \"Aliquip excepteur qui mollit labore nulla et culpa \"\n \"minim et commodo reprehenderit consequat sint.\"\n ),\n categories=proto_category,\n make_m2m=True,\n _create_files=True,\n )",
"def proto_post(self, proto_category):\n return baker.make(\n Post,\n content=\"Aute non ex nostrud amet ipsum.\",\n categories=proto_category,\n make_m2m=True,\n _create_files=True,\n )"
] |
[
"0.59166276",
"0.5812724",
"0.56457675",
"0.56069916",
"0.526757",
"0.52343196",
"0.52183485",
"0.5210904",
"0.5157758",
"0.51434153",
"0.50863826",
"0.5043521",
"0.50420105",
"0.50243163",
"0.5016184",
"0.5016184",
"0.5013102",
"0.4995355",
"0.49839917",
"0.49028623",
"0.48933217",
"0.48860025",
"0.4883046",
"0.4860644",
"0.48491418",
"0.48468468",
"0.48359197",
"0.48262367",
"0.47836033",
"0.47553685"
] |
0.68622595
|
0
|
Return section title as text.
|
def _get_section_title(section):
for title in section.findall(nodes.title):
return title.astext()
raise Exception("Missing title")
# A problem with the following is that title may contain pending
# references, e.g. :ref:`tag-tips`
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def title(self):\n return self.header",
"def get_section_title(\n header_element: lxml.html.Html.Element,\n anchor_characters: str | None = None,\n) -> str:\n return clean_title_text(header_element.text_content())",
"def get_title():",
"def get_title(self) -> str:\n pass",
"def title(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"title\")",
"def title(self) -> str:\n return pulumi.get(self, \"title\")",
"def title(self) -> str:\n return pulumi.get(self, \"title\")",
"def title(self) -> str:\n return pulumi.get(self, \"title\")",
"def title(self):\n return ' '.join(self._title)",
"def short_title(self):\n if hasattr(self, \"title\"):\n return self.title\n else:\n return \"\"",
"def get_title(self):\n return self.title",
"def get_title(self):\n return self.title",
"def get_title(self):\n return self.title",
"def title(self):\n return self['title']",
"def title(self) -> str:\r\n return self._title",
"def get_title(self):\n return self._get_title_()",
"def _section_titles(self):\r\n chapter_css = 'nav > div.chapter > h3 > a'\r\n return self.q(css=chapter_css).map(lambda el: el.text.strip()).results",
"def title(self):\n return self.container['title']",
"def title(self):\n return self.get(self._names[\"title\"])",
"def title(self):\n return self.get(self._names[\"title\"])",
"def sectionName(self) -> str:\n return(self._sectionName)",
"def GetTitle(self):\n return str(self.title)",
"def get_title(self):\n\n return self.title",
"def title(self) -> str:\n\t\t# pylint: disable=unsubscriptable-object\n\t\treturn self.value[1]",
"def title_string(self):\n return ' '.join(self.title).replace(' - ', '')",
"def title(self):\n return self._title",
"def title(self):\n return self._title",
"def title(self):\n return self._title",
"def title(self):\n return self._title",
"def title(self):\n return self._title"
] |
[
"0.72328174",
"0.7200083",
"0.7151541",
"0.7150657",
"0.70747584",
"0.70035803",
"0.70035803",
"0.70035803",
"0.6951649",
"0.6951533",
"0.6945576",
"0.6945576",
"0.6945576",
"0.6924524",
"0.69163346",
"0.6911535",
"0.6908612",
"0.6906137",
"0.6904355",
"0.6904355",
"0.6900102",
"0.6891076",
"0.6885366",
"0.6876793",
"0.68552834",
"0.6850815",
"0.6850815",
"0.6850815",
"0.6850815",
"0.6850815"
] |
0.7722091
|
0
|
Return list of dates of updates found section.
|
def _get_update_dates(section, docname, post_date_format):
update_nodes = list(section.findall(UpdateNode))
update_dates = []
for update_node in update_nodes:
try:
update = datetime.strptime(update_node["date"], post_date_format)
except ValueError:
if date_parser:
try:
update = date_parser(update_node["date"])
except ValueError:
raise ValueError("invalid post date in: " + docname)
else:
raise ValueError(
f"invalid post date ({update_node['date']}) in "
+ docname
+ f". Expected format: {post_date_format}"
)
# Insert a new title element which contains the `Updated on {date}` logic.
substitute = nodes.title("", _("Updated on ") + update.strftime(post_date_format))
update_node.insert(0, substitute)
update_node["classes"] = ["note", "update"]
update_dates.append(update)
return update_dates
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def extract_last_update_dates(self, feed):\n parsed_feed = parse_feed(feed)\n\n if not parsed_feed:\n return []\n\n dates = [\n (publication.metadata.identifier, publication.metadata.modified)\n for publication in self._get_publications(parsed_feed)\n if publication.metadata.modified\n ]\n\n return dates",
"def updates(self):\n return self._get_page('updates').json()",
"def get_dates(self):\r\n return self.__dates",
"def dates(self):\n #{{{ function to return start and end times for a station\n return self.wfdates.keys()",
"def _find_updates(since: str):\n updates = _find_update_docs_since(since)\n newest_update_per_location = _find_newest_update_by_location(updates)\n logger.info(f\"Found {len(newest_update_per_location)} updates since {since}\")\n yield from _gen_updates_to_notify(newest_update_per_location)",
"def getListModifiedDates(self):\n return _libsbml.ModelHistory_getListModifiedDates(self)",
"def _find_update_docs_since(since: str):\n delta = since_to_delta(since)\n earliest_dt = datetime.now(timezone.utc) - delta\n query = get_db().collection_group(\"updates\").where(\"date\", \">\", earliest_dt)\n return (doc.to_dict() for doc in query.stream())",
"def get_updates():\n global PREVIOUS_NEWEST_STR, UPDATED, WIKIDATA_WB_API, WIKIDATA_WB_PARAMS\n r = requests.get(url=WIKIDATA_WB_API, params=WIKIDATA_WB_PARAMS)\n root = etree.fromstring(r.text)\n seen = 0\n updates = []\n oldest_str = None\n newest_str = None\n for entry in root.iterchildren('{http://www.w3.org/2005/Atom}entry'):\n # print(etree.tostring(entry))\n q = entry.find('{http://www.w3.org/2005/Atom}title').text\n updated_str = entry.find('{http://www.w3.org/2005/Atom}updated').text\n if newest_str is None or updated_str > newest_str:\n newest_str = updated_str\n if oldest_str is None or updated_str < oldest_str:\n oldest_str = updated_str\n updated = dateutil.parser.parse(updated_str)\n if not re.match(r'''Q\\d+$''', q):\n # This is not an updated entity, ignore\n pass\n elif q in UPDATED and UPDATED[q] >= updated:\n # print(\"See %s update already\" % (q))\n seen += 1\n else:\n updates.append(q)\n # print(\"Got %s (updated at %s)\" % (q, updated))\n UPDATED[q] = updated\n print(\"%s: Got %d updates (ignored %d already seen)\" % (datetime.now(), len(updates), seen))\n if oldest_str > PREVIOUS_NEWEST_STR:\n print(\"WARNING: Gap between feed dates from %s to %s\" % (PREVIOUS_NEWEST_STR, oldest_str))\n PREVIOUS_NEWEST_STR = newest_str\n return updates",
"def __getHoldingsUpdate(self, dirPath=None):\n retD = {}\n dirPath = dirPath if dirPath else self.__sandboxPath\n try:\n updateTypeList = [\"added\", \"modified\", \"obsolete\"]\n contentTypeList = [\"entries\", \"mr\", \"cs\", \"sf\", \"nef\", \"nmr-str\"]\n contentNameD = {\n \"entries\": \"coordinates\",\n \"mr\": \"NMR restraints\",\n \"cs\": \"NMR chemical shifts\",\n \"sf\": \"structure factors\",\n \"nef\": \"Combined NMR data (NEF)\",\n \"nmr-str\": \"Combined NMR data (NMR-STAR)\",\n }\n #\n for updateType in updateTypeList:\n uD = {}\n for contentType in contentTypeList:\n fp = os.path.join(dirPath, \"update-lists\", updateType + \"-\" + contentType)\n if not self.__mU.exists(fp):\n continue\n entryIdL = self.__mU.doImport(fp, \"list\")\n #\n for entryId in entryIdL:\n entryId = entryId.strip().upper()\n uD.setdefault(entryId, []).append(contentNameD[contentType])\n for entryId in uD:\n uType = \"removed\" if updateType == \"obsolete\" else updateType\n # retD[entryId] = {\"update_id\": updateId, \"entry_id\": entryId, \"update_type\": uType, \"repository_content_types\": uD[entryId]}\n retD[entryId] = {\"update_type\": uType, \"repository_content_types\": uD[entryId]}\n return retD\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n return retD",
"def get_updates(self) -> dict:\n url = self.URL + \"getUpdates\"\n js = self.get_json_from_url(url)\n return js",
"def get_updates(cls, date, team):\n return cls.query(\n cls.date == date,\n cls.team == team.lower()\n ).order(-cls.name).fetch(100)",
"def _get_dates():\n remote = os.path.join(BASE_URL, RSS_FEED)\n local = os.path.join(TMP, RSS_FEED)\n u..(remote, local)\n\n with open(local) as f:\n return PUB_DATE.findall(f.read())",
"def getUpdates(self):\n # execute the query\n ret = self._er.execQuery(self)\n\n if ret and ret.get(\"recentActivity\") and ret[\"recentActivity\"].get(\"articles\"):\n # return the latest articles\n return ret[\"recentActivity\"][\"articles\"][\"activity\"]\n # or empty\n return []",
"def getUpdates(self):\n # execute the query\n ret = self._er.execQuery(self)\n\n if ret and ret.get(\"recentActivity\") and ret[\"recentActivity\"].get(\"events\"):\n # return the updated information\n return ret[\"recentActivity\"][\"events\"]\n # or empty\n return {}",
"def dates(self):\n pass",
"def feed_entries(self):\n date_format = \"%Y-%m-%dT%H:%M:%SZ\"\n entries = self.mapper.list_entries(limit=10)\n if entries:\n updated = max([e.updated for e in entries]).strftime(date_format)\n else:\n updated = datetime.utcnow().strftime(date_format)\n return {\"entries\": entries, \"updated\": updated}",
"def Dates(self):\n data = self.DictData()\n dates = [ row[ \"Date\"] for row in data ]\n return dates",
"def get_updates(self, *args, **kwargs):\n\n updates_data = api.get_updates(\n *args,\n api_key=self.__creds.api_key_v2, \n **kwargs)\n return [en.Update(creds=self.__creds, **update_data) for update_data in updates_data]",
"def get_dates(self):\n now = datetime.now()\n if now.month > 6 and now.month < 9:\n now = datetime(now.year, 6, 1)\n\n term = ReadingsTerm()\n out = list(term.get_year_interval(now)) + [now.month]\n return out",
"def get_course_update_items(course_updates, provided_index=0):\n def _course_info_content(html_parsed):\n \"\"\"\n Constructs the HTML for the course info update, not including the header.\n \"\"\"\n if len(html_parsed) == 1:\n # could enforce that update[0].tag == 'h2'\n content = html_parsed[0].tail\n else:\n content = html_parsed[0].tail if html_parsed[0].tail is not None else \"\"\n content += \"\\n\".join([html.tostring(ele).decode('utf-8') for ele in html_parsed[1:]])\n return content\n\n if course_updates and getattr(course_updates, \"items\", None):\n if provided_index and 0 < provided_index <= len(course_updates.items):\n return course_updates.items[provided_index - 1]\n else:\n # return list in reversed order (old format: [4,3,2,1]) for compatibility\n return list(reversed(course_updates.items))\n\n course_update_items = []\n if course_updates:\n # old method to get course updates\n # purely to handle free formed updates not done via editor. Actually kills them, but at least doesn't break.\n try:\n course_html_parsed = html.fromstring(course_updates.data)\n except (etree.XMLSyntaxError, etree.ParserError):\n log.error(\"Cannot parse: \" + course_updates.data) # lint-amnesty, pylint: disable=logging-not-lazy\n escaped = escape(course_updates.data)\n # xss-lint: disable=python-concat-html\n course_html_parsed = html.fromstring(\"<ol><li>\" + escaped + \"</li></ol>\")\n\n # confirm that root is <ol>, iterate over <li>, pull out <h2> subs and then rest of val\n if course_html_parsed.tag == 'ol':\n # 0 is the newest\n for index, update in enumerate(course_html_parsed):\n if len(update) > 0:\n content = _course_info_content(update)\n # make the id on the client be 1..len w/ 1 being the oldest and len being the newest\n computed_id = len(course_html_parsed) - index\n payload = {\n \"id\": computed_id,\n \"date\": update.findtext(\"h2\"),\n \"content\": content\n }\n if provided_index == 0:\n course_update_items.append(payload)\n elif provided_index == computed_id:\n return payload\n\n return course_update_items",
"def find_listing_date(soup, dates):\r\n pdate = soup.find_all(name = 'div', class_='pdate')\r\n for i in pdate:\r\n # input(i)\r\n text = i.get_text()\r\n date = text.split(':')\r\n date = date[2:3]\r\n\r\n dates.append(date)\r\n\r\n return dates",
"def calibration_dates(self):\n return sorted(self._calibration_dict.keys())",
"def get_course_update_items(course_updates, provided_id=None):\r\n if course_updates and getattr(course_updates, \"items\", None):\r\n provided_id = _get_index(provided_id)\r\n if provided_id and 0 < provided_id <= len(course_updates.items):\r\n return course_updates.items[provided_id - 1]\r\n\r\n # return list in reversed order (old format: [4,3,2,1]) for compatibility\r\n return list(reversed(course_updates.items))\r\n else:\r\n # old method to get course updates\r\n # purely to handle free formed updates not done via editor. Actually kills them, but at least doesn't break.\r\n try:\r\n course_html_parsed = html.fromstring(course_updates.data)\r\n except (etree.XMLSyntaxError, etree.ParserError):\r\n log.error(\"Cannot parse: \" + course_updates.data)\r\n escaped = django.utils.html.escape(course_updates.data)\r\n course_html_parsed = html.fromstring(\"<ol><li>\" + escaped + \"</li></ol>\")\r\n\r\n # confirm that root is <ol>, iterate over <li>, pull out <h2> subs and then rest of val\r\n course_update_items = []\r\n provided_id = _get_index(provided_id)\r\n if course_html_parsed.tag == 'ol':\r\n # 0 is the newest\r\n for index, update in enumerate(course_html_parsed):\r\n if len(update) > 0:\r\n content = _course_info_content(update)\r\n # make the id on the client be 1..len w/ 1 being the oldest and len being the newest\r\n computed_id = len(course_html_parsed) - index\r\n payload = {\r\n \"id\": computed_id,\r\n \"date\": update.findtext(\"h2\"),\r\n \"content\": content\r\n }\r\n if provided_id == 0:\r\n course_update_items.append(payload)\r\n elif provided_id == computed_id:\r\n return payload\r\n\r\n return course_update_items",
"def getPurchaseDates(self):\n\t\treturn self.dateList",
"def get_dates(soup):\n return [res.get_text() for res in soup.find_all('a', attrs={'href':re.compile('/events/')})]",
"def _get_updates(self, patch):\n updates = {}\n for p in patch:\n attribute = p['path'] if p['path'][0] != '/' else p['path'][1:]\n updates[attribute] = p['value']\n return updates",
"def list_dates(product):\n\n if product == 'analysis_assim':\n files = _list_files(product)\n dates = []\n for f in files:\n date = _date_from_filename(f)\n dates.append(date)\n dates = list(set(dates)) # Get unique dates\n else:\n template = (HS_DATA_EXPLORER_URI + 'files_explorer/get-folder-contents'\n '/?selection_path=%2Fprojects%2Fwater%2Fnwm%2Fdata%2F{0}'\n '%3Ffolder&query_type=filesystem')\n if 'long_range' in product:\n product = 'long_range'\n uri = template.format(product)\n response = urlopen(uri).read()\n dates = re.findall(r'\\>([0-9]+)\\<', response)\n return sorted(dates)",
"def updates(self):\r\n return list(self.state_updates)",
"def _find_latest_updates():\n query = (\n get_db()\n .collection_group(\"updates\")\n .order_by(\"date\", direction=firestore.Query.DESCENDING)\n )\n for doc_ref in query.stream():\n doc = doc_ref.to_dict()\n location = load_location(doc[\"location\"])\n yield {**location, **doc}",
"def DateUpdated(self, default=None):\n return self.data.get('metadata', {}).get('_updated', default)"
] |
[
"0.67136776",
"0.6575803",
"0.5979425",
"0.5853244",
"0.5843556",
"0.58385",
"0.5817554",
"0.5813687",
"0.5786492",
"0.5741242",
"0.57165074",
"0.5714437",
"0.5712907",
"0.5687153",
"0.5668193",
"0.5646013",
"0.5619637",
"0.56141734",
"0.5610867",
"0.5599167",
"0.5577074",
"0.5576776",
"0.55669063",
"0.552842",
"0.55193573",
"0.54996866",
"0.5473523",
"0.544797",
"0.54363096",
"0.5428158"
] |
0.7661961
|
0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.